1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Incremental bus scan, based on bus topology
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
9 #include <linux/errno.h>
10 #include <linux/firewire.h>
11 #include <linux/firewire-constants.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/atomic.h>
20 #include <asm/byteorder.h>
24 #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
25 #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
26 #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
27 #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
28 #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
29 #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
30 #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
31 #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
33 #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
35 #define SELFID_PORT_CHILD 0x3
36 #define SELFID_PORT_PARENT 0x2
37 #define SELFID_PORT_NCONN 0x1
38 #define SELFID_PORT_NONE 0x0
40 static u32
*count_ports(u32
*sid
, int *total_port_count
, int *child_port_count
)
43 int port_type
, shift
, seq
;
45 *total_port_count
= 0;
46 *child_port_count
= 0;
53 port_type
= (q
>> shift
) & 0x03;
55 case SELFID_PORT_CHILD
:
56 (*child_port_count
)++;
58 case SELFID_PORT_PARENT
:
59 case SELFID_PORT_NCONN
:
60 (*total_port_count
)++;
61 case SELFID_PORT_NONE
:
67 if (!SELF_ID_MORE_PACKETS(q
))
75 * Check that the extra packets actually are
76 * extended self ID packets and that the
77 * sequence numbers in the extended self ID
78 * packets increase as expected.
81 if (!SELF_ID_EXTENDED(q
) ||
82 seq
!= SELF_ID_EXT_SEQUENCE(q
))
90 static int get_port_type(u32
*sid
, int port_index
)
94 index
= (port_index
+ 5) / 8;
95 shift
= 16 - ((port_index
+ 5) & 7) * 2;
96 return (sid
[index
] >> shift
) & 0x03;
99 static struct fw_node
*fw_node_create(u32 sid
, int port_count
, int color
)
101 struct fw_node
*node
;
103 node
= kzalloc(struct_size(node
, ports
, port_count
), GFP_ATOMIC
);
108 node
->node_id
= LOCAL_BUS
| SELF_ID_PHY_ID(sid
);
109 node
->link_on
= SELF_ID_LINK_ON(sid
);
110 node
->phy_speed
= SELF_ID_PHY_SPEED(sid
);
111 node
->initiated_reset
= SELF_ID_PHY_INITIATOR(sid
);
112 node
->port_count
= port_count
;
114 refcount_set(&node
->ref_count
, 1);
115 INIT_LIST_HEAD(&node
->link
);
121 * Compute the maximum hop count for this node and it's children. The
122 * maximum hop count is the maximum number of connections between any
123 * two nodes in the subtree rooted at this node. We need this for
124 * setting the gap count. As we build the tree bottom up in
125 * build_tree() below, this is fairly easy to do: for each node we
126 * maintain the max hop count and the max depth, ie the number of hops
127 * to the furthest leaf. Computing the max hop count breaks down into
128 * two cases: either the path goes through this node, in which case
129 * the hop count is the sum of the two biggest child depths plus 2.
130 * Or it could be the case that the max hop path is entirely
131 * containted in a child tree, in which case the max hop count is just
132 * the max hop count of this child.
134 static void update_hop_count(struct fw_node
*node
)
136 int depths
[2] = { -1, -1 };
137 int max_child_hops
= 0;
140 for (i
= 0; i
< node
->port_count
; i
++) {
141 if (node
->ports
[i
] == NULL
)
144 if (node
->ports
[i
]->max_hops
> max_child_hops
)
145 max_child_hops
= node
->ports
[i
]->max_hops
;
147 if (node
->ports
[i
]->max_depth
> depths
[0]) {
148 depths
[1] = depths
[0];
149 depths
[0] = node
->ports
[i
]->max_depth
;
150 } else if (node
->ports
[i
]->max_depth
> depths
[1])
151 depths
[1] = node
->ports
[i
]->max_depth
;
154 node
->max_depth
= depths
[0] + 1;
155 node
->max_hops
= max(max_child_hops
, depths
[0] + depths
[1] + 2);
158 static inline struct fw_node
*fw_node(struct list_head
*l
)
160 return list_entry(l
, struct fw_node
, link
);
164 * This function builds the tree representation of the topology given
165 * by the self IDs from the latest bus reset. During the construction
166 * of the tree, the function checks that the self IDs are valid and
167 * internally consistent. On success this function returns the
168 * fw_node corresponding to the local card otherwise NULL.
170 static struct fw_node
*build_tree(struct fw_card
*card
,
171 u32
*sid
, int self_id_count
)
173 struct fw_node
*node
, *child
, *local_node
, *irm_node
;
174 struct list_head stack
, *h
;
175 u32
*next_sid
, *end
, q
;
176 int i
, port_count
, child_port_count
, phy_id
, parent_count
, stack_depth
;
178 bool beta_repeaters_present
;
182 INIT_LIST_HEAD(&stack
);
184 end
= sid
+ self_id_count
;
187 gap_count
= SELF_ID_GAP_COUNT(*sid
);
188 beta_repeaters_present
= false;
191 next_sid
= count_ports(sid
, &port_count
, &child_port_count
);
193 if (next_sid
== NULL
) {
194 fw_err(card
, "inconsistent extended self IDs\n");
199 if (phy_id
!= SELF_ID_PHY_ID(q
)) {
200 fw_err(card
, "PHY ID mismatch in self ID: %d != %d\n",
201 phy_id
, SELF_ID_PHY_ID(q
));
205 if (child_port_count
> stack_depth
) {
206 fw_err(card
, "topology stack underflow\n");
211 * Seek back from the top of our stack to find the
212 * start of the child nodes for this node.
214 for (i
= 0, h
= &stack
; i
< child_port_count
; i
++)
217 * When the stack is empty, this yields an invalid value,
218 * but that pointer will never be dereferenced.
222 node
= fw_node_create(q
, port_count
, card
->color
);
224 fw_err(card
, "out of memory while building topology\n");
228 if (phy_id
== (card
->node_id
& 0x3f))
231 if (SELF_ID_CONTENDER(q
))
236 for (i
= 0; i
< port_count
; i
++) {
237 switch (get_port_type(sid
, i
)) {
238 case SELFID_PORT_PARENT
:
240 * Who's your daddy? We dont know the
241 * parent node at this time, so we
242 * temporarily abuse node->color for
243 * remembering the entry in the
244 * node->ports array where the parent
245 * node should be. Later, when we
246 * handle the parent node, we fix up
253 case SELFID_PORT_CHILD
:
254 node
->ports
[i
] = child
;
256 * Fix up parent reference for this
259 child
->ports
[child
->color
] = node
;
260 child
->color
= card
->color
;
261 child
= fw_node(child
->link
.next
);
267 * Check that the node reports exactly one parent
268 * port, except for the root, which of course should
271 if ((next_sid
== end
&& parent_count
!= 0) ||
272 (next_sid
< end
&& parent_count
!= 1)) {
273 fw_err(card
, "parent port inconsistency for node %d: "
274 "parent_count=%d\n", phy_id
, parent_count
);
278 /* Pop the child nodes off the stack and push the new node. */
279 __list_del(h
->prev
, &stack
);
280 list_add_tail(&node
->link
, &stack
);
281 stack_depth
+= 1 - child_port_count
;
283 if (node
->phy_speed
== SCODE_BETA
&&
284 parent_count
+ child_port_count
> 1)
285 beta_repeaters_present
= true;
288 * If PHYs report different gap counts, set an invalid count
289 * which will force a gap count reconfiguration and a reset.
291 if (SELF_ID_GAP_COUNT(q
) != gap_count
)
294 update_hop_count(node
);
300 card
->root_node
= node
;
301 card
->irm_node
= irm_node
;
302 card
->gap_count
= gap_count
;
303 card
->beta_repeaters_present
= beta_repeaters_present
;
308 typedef void (*fw_node_callback_t
)(struct fw_card
* card
,
309 struct fw_node
* node
,
310 struct fw_node
* parent
);
312 static void for_each_fw_node(struct fw_card
*card
, struct fw_node
*root
,
313 fw_node_callback_t callback
)
315 struct list_head list
;
316 struct fw_node
*node
, *next
, *child
, *parent
;
319 INIT_LIST_HEAD(&list
);
322 list_add_tail(&root
->link
, &list
);
324 list_for_each_entry(node
, &list
, link
) {
325 node
->color
= card
->color
;
327 for (i
= 0; i
< node
->port_count
; i
++) {
328 child
= node
->ports
[i
];
331 if (child
->color
== card
->color
)
335 list_add_tail(&child
->link
, &list
);
339 callback(card
, node
, parent
);
342 list_for_each_entry_safe(node
, next
, &list
, link
)
346 static void report_lost_node(struct fw_card
*card
,
347 struct fw_node
*node
, struct fw_node
*parent
)
349 fw_node_event(card
, node
, FW_NODE_DESTROYED
);
352 /* Topology has changed - reset bus manager retry counter */
353 card
->bm_retries
= 0;
356 static void report_found_node(struct fw_card
*card
,
357 struct fw_node
*node
, struct fw_node
*parent
)
359 int b_path
= (node
->phy_speed
== SCODE_BETA
);
361 if (parent
!= NULL
) {
362 /* min() macro doesn't work here with gcc 3.4 */
363 node
->max_speed
= parent
->max_speed
< node
->phy_speed
?
364 parent
->max_speed
: node
->phy_speed
;
365 node
->b_path
= parent
->b_path
&& b_path
;
367 node
->max_speed
= node
->phy_speed
;
368 node
->b_path
= b_path
;
371 fw_node_event(card
, node
, FW_NODE_CREATED
);
373 /* Topology has changed - reset bus manager retry counter */
374 card
->bm_retries
= 0;
377 void fw_destroy_nodes(struct fw_card
*card
)
381 spin_lock_irqsave(&card
->lock
, flags
);
383 if (card
->local_node
!= NULL
)
384 for_each_fw_node(card
, card
->local_node
, report_lost_node
);
385 card
->local_node
= NULL
;
386 spin_unlock_irqrestore(&card
->lock
, flags
);
389 static void move_tree(struct fw_node
*node0
, struct fw_node
*node1
, int port
)
391 struct fw_node
*tree
;
394 tree
= node1
->ports
[port
];
395 node0
->ports
[port
] = tree
;
396 for (i
= 0; i
< tree
->port_count
; i
++) {
397 if (tree
->ports
[i
] == node1
) {
398 tree
->ports
[i
] = node0
;
405 * Compare the old topology tree for card with the new one specified by root.
406 * Queue the nodes and mark them as either found, lost or updated.
407 * Update the nodes in the card topology tree as we go.
409 static void update_tree(struct fw_card
*card
, struct fw_node
*root
)
411 struct list_head list0
, list1
;
412 struct fw_node
*node0
, *node1
, *next1
;
415 INIT_LIST_HEAD(&list0
);
416 list_add_tail(&card
->local_node
->link
, &list0
);
417 INIT_LIST_HEAD(&list1
);
418 list_add_tail(&root
->link
, &list1
);
420 node0
= fw_node(list0
.next
);
421 node1
= fw_node(list1
.next
);
423 while (&node0
->link
!= &list0
) {
424 WARN_ON(node0
->port_count
!= node1
->port_count
);
426 if (node0
->link_on
&& !node1
->link_on
)
427 event
= FW_NODE_LINK_OFF
;
428 else if (!node0
->link_on
&& node1
->link_on
)
429 event
= FW_NODE_LINK_ON
;
430 else if (node1
->initiated_reset
&& node1
->link_on
)
431 event
= FW_NODE_INITIATED_RESET
;
433 event
= FW_NODE_UPDATED
;
435 node0
->node_id
= node1
->node_id
;
436 node0
->color
= card
->color
;
437 node0
->link_on
= node1
->link_on
;
438 node0
->initiated_reset
= node1
->initiated_reset
;
439 node0
->max_hops
= node1
->max_hops
;
440 node1
->color
= card
->color
;
441 fw_node_event(card
, node0
, event
);
443 if (card
->root_node
== node1
)
444 card
->root_node
= node0
;
445 if (card
->irm_node
== node1
)
446 card
->irm_node
= node0
;
448 for (i
= 0; i
< node0
->port_count
; i
++) {
449 if (node0
->ports
[i
] && node1
->ports
[i
]) {
451 * This port didn't change, queue the
452 * connected node for further
455 if (node0
->ports
[i
]->color
== card
->color
)
457 list_add_tail(&node0
->ports
[i
]->link
, &list0
);
458 list_add_tail(&node1
->ports
[i
]->link
, &list1
);
459 } else if (node0
->ports
[i
]) {
461 * The nodes connected here were
462 * unplugged; unref the lost nodes and
463 * queue FW_NODE_LOST callbacks for
467 for_each_fw_node(card
, node0
->ports
[i
],
469 node0
->ports
[i
] = NULL
;
470 } else if (node1
->ports
[i
]) {
472 * One or more node were connected to
473 * this port. Move the new nodes into
474 * the tree and queue FW_NODE_CREATED
475 * callbacks for them.
477 move_tree(node0
, node1
, i
);
478 for_each_fw_node(card
, node0
->ports
[i
],
483 node0
= fw_node(node0
->link
.next
);
484 next1
= fw_node(node1
->link
.next
);
490 static void update_topology_map(struct fw_card
*card
,
491 u32
*self_ids
, int self_id_count
)
493 int node_count
= (card
->root_node
->node_id
& 0x3f) + 1;
494 __be32
*map
= card
->topology_map
;
496 *map
++ = cpu_to_be32((self_id_count
+ 2) << 16);
497 *map
++ = cpu_to_be32(be32_to_cpu(card
->topology_map
[1]) + 1);
498 *map
++ = cpu_to_be32((node_count
<< 16) | self_id_count
);
500 while (self_id_count
--)
501 *map
++ = cpu_to_be32p(self_ids
++);
503 fw_compute_block_crc(card
->topology_map
);
506 void fw_core_handle_bus_reset(struct fw_card
*card
, int node_id
, int generation
,
507 int self_id_count
, u32
*self_ids
, bool bm_abdicate
)
509 struct fw_node
*local_node
;
513 * If the selfID buffer is not the immediate successor of the
514 * previously processed one, we cannot reliably compare the
515 * old and new topologies.
517 if (!is_next_generation(generation
, card
->generation
) &&
518 card
->local_node
!= NULL
) {
519 fw_destroy_nodes(card
);
520 card
->bm_retries
= 0;
523 spin_lock_irqsave(&card
->lock
, flags
);
525 card
->broadcast_channel_allocated
= card
->broadcast_channel_auto_allocated
;
526 card
->node_id
= node_id
;
528 * Update node_id before generation to prevent anybody from using
529 * a stale node_id together with a current generation.
532 card
->generation
= generation
;
533 card
->reset_jiffies
= get_jiffies_64();
534 card
->bm_node_id
= 0xffff;
535 card
->bm_abdicate
= bm_abdicate
;
536 fw_schedule_bm_work(card
, 0);
538 local_node
= build_tree(card
, self_ids
, self_id_count
);
540 update_topology_map(card
, self_ids
, self_id_count
);
544 if (local_node
== NULL
) {
545 fw_err(card
, "topology build failed\n");
546 /* FIXME: We need to issue a bus reset in this case. */
547 } else if (card
->local_node
== NULL
) {
548 card
->local_node
= local_node
;
549 for_each_fw_node(card
, local_node
, report_found_node
);
551 update_tree(card
, local_node
);
554 spin_unlock_irqrestore(&card
->lock
, flags
);
556 EXPORT_SYMBOL(fw_core_handle_bus_reset
);