Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / 1394 / s1394_dev_disc.c
blob3f3d98edbe173375ea8428f10dad687ab2120244
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
20 * CDDL HEADER END
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * s1394_dev_disc.c
31 * 1394 Services Layer Device Discovery Routines
32 * This file contains the bus reset thread code, bus manager routines and
33 * various routines that are used to implement remote Config ROM reading.
35 * FUTURE:
36 * Rescan the bus if invalid nodes are seen.
37 * Investigate taskq for reading phase2 config rom reads.
38 * If we are reading the entire bus info blk, we should attempt
39 * a block read and fallback to quad reads if this fails.
42 #include <sys/conf.h>
43 #include <sys/sysmacros.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/cmn_err.h>
47 #include <sys/sunndi.h>
48 #include <sys/modctl.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/types.h>
51 #include <sys/kmem.h>
52 #include <sys/kstat.h>
53 #include <sys/varargs.h>
55 #include <sys/tnf_probe.h>
57 #include <sys/1394/t1394.h>
58 #include <sys/1394/s1394.h>
59 #include <sys/1394/h1394.h>
60 #include <sys/1394/ieee1394.h>
61 #include <sys/1394/ieee1212.h>
63 /* hcmd_ret_t */
64 typedef enum {
65 S1394_HCMD_INVALID,
66 S1394_HCMD_NODE_DONE,
67 S1394_HCMD_NODE_EXPECT_MORE,
68 S1394_HCMD_LOCK_FAILED
69 } hcmd_ret_t;
71 #define QUAD_TO_CFGROM_ADDR(b, n, q, addr) { \
72 uint64_t bl = (b); \
73 uint64_t nl = (n); \
74 addr = ((bl) << IEEE1394_ADDR_BUS_ID_SHIFT) | \
75 ((nl) << IEEE1394_ADDR_PHY_ID_SHIFT); \
76 addr += IEEE1394_CONFIG_ROM_ADDR + ((q) << 2); \
79 #define CFGROM_READ_PAUSE(d) \
80 ((s1394_cfgrom_read_delay_ms == 0) ? (void) 0 : \
81 ddi_msleep((d)))
83 #define BUMP_CFGROM_READ_DELAY(n) \
84 (n)->cfgrom_read_delay += s1394_cfgrom_read_delay_incr
86 #define CFGROM_GET_READ_DELAY(n, d) \
87 ((d) = (n)->cfgrom_read_delay)
89 #define SETUP_QUAD_READ(n, reset_fails, quadlet, cnt) \
90 { \
91 int i = (reset_fails); \
92 if (i != 0) { \
93 (n)->cfgrom_read_fails = 0; \
94 (n)->cfgrom_read_delay = (uchar_t)s1394_cfgrom_read_delay_ms; \
95 } \
96 (n)->cfgrom_quad_to_read = (quadlet); \
97 (n)->cfgrom_quad_read_cnt = (cnt); \
100 static void s1394_wait_for_events(s1394_hal_t *hal, int firsttime);
102 static int s1394_wait_for_cfgrom_callbacks(s1394_hal_t *hal, uint_t wait_gen,
103 hcmd_ret_t(*handle_cmd_fn)(s1394_hal_t *hal, cmd1394_cmd_t *cmd));
105 static void s1394_flush_cmplq(s1394_hal_t *hal);
107 static void s1394_br_thread_exit(s1394_hal_t *hal);
109 static void s1394_target_bus_reset_notifies(s1394_hal_t *hal,
110 t1394_localinfo_t *localinfo);
112 static int s1394_alloc_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
113 s1394_status_t *status);
115 static int s1394_cfgrom_scan_phase1(s1394_hal_t *hal);
117 static hcmd_ret_t s1394_br_thread_handle_cmd_phase1(s1394_hal_t *hal,
118 cmd1394_cmd_t *cmd);
120 static int s1394_cfgrom_scan_phase2(s1394_hal_t *hal);
122 static hcmd_ret_t s1394_br_thread_handle_cmd_phase2(s1394_hal_t *hal,
123 cmd1394_cmd_t *cmd);
125 static int s1394_read_config_quadlet(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
126 s1394_status_t *status);
128 static void s1394_cfgrom_read_callback(cmd1394_cmd_t *cmd);
130 static void s1394_get_quad_info(cmd1394_cmd_t *cmd, uint32_t *node_num,
131 uint32_t *quadlet, uint32_t *data);
133 static int s1394_match_GUID(s1394_hal_t *hal, s1394_node_t *nnode);
135 static int s1394_match_all_GUIDs(s1394_hal_t *hal);
137 static void s1394_become_bus_mgr(void *arg);
139 static void s1394_become_bus_mgr_callback(cmd1394_cmd_t *cmd);
141 static int s1394_bus_mgr_processing(s1394_hal_t *hal);
143 static int s1394_do_bus_mgr_processing(s1394_hal_t *hal);
145 static void s1394_bus_mgr_timers_stop(s1394_hal_t *hal,
146 timeout_id_t *bus_mgr_query_tid, timeout_id_t *bus_mgr_tid);
148 static void s1394_bus_mgr_timers_start(s1394_hal_t *hal,
149 timeout_id_t *bus_mgr_query_tid, timeout_id_t *bus_mgr_tid);
151 static int s1394_cycle_master_capable(s1394_hal_t *hal);
153 static int s1394_do_phy_config_pkt(s1394_hal_t *hal, int new_root,
154 int new_gap_cnt, uint32_t IRM_flags);
156 static void s1394_phy_config_callback(cmd1394_cmd_t *cmd);
158 static int s1394_calc_next_quad(s1394_hal_t *hal, s1394_node_t *node,
159 uint32_t quadlet, uint32_t *nextquadp);
161 static int s1394_cfgrom_read_retry_cnt = 3; /* 1 + 3 retries */
162 static int s1394_cfgrom_read_delay_ms = 20; /* start with 20ms */
163 static int s1394_cfgrom_read_delay_incr = 10; /* 10ms increments */
164 static int s1394_enable_crc_validation = 0;
165 static int s1394_turn_off_dir_stack = 0;
166 static int s1394_crcsz_is_cfgsz = 0;
167 static int s1394_enable_rio_pass1_workarounds = 0;
170 * s1394_br_thread()
171 * is the bus reset thread. Its sole purpose is to read/reread config roms
172 * as appropriate and do bus reset time things (bus manager processing,
173 * isoch resource reallocation etc.).
175 void
176 s1394_br_thread(s1394_hal_t *hal)
178 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
180 /* Initialize the Bus Mgr timers */
181 hal->bus_mgr_timeout_id = 0;
182 hal->bus_mgr_query_timeout_id = 0;
184 /* Initialize the cmpletion Q */
185 mutex_enter(&hal->br_cmplq_mutex);
186 hal->br_cmplq_head = hal->br_cmplq_tail = NULL;
187 mutex_exit(&hal->br_cmplq_mutex);
189 s1394_wait_for_events(hal, 1);
191 for (;;) {
192 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
194 s1394_wait_for_events(hal, 0);
196 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
198 /* stop bus manager timeouts, if needed */
199 s1394_bus_mgr_timers_stop(hal, &hal->bus_mgr_query_timeout_id,
200 &hal->bus_mgr_timeout_id);
202 s1394_flush_cmplq(hal);
204 /* start timers for checking bus manager, if needed */
205 s1394_bus_mgr_timers_start(hal, &hal->bus_mgr_query_timeout_id,
206 &hal->bus_mgr_timeout_id);
208 /* Try to reallocate all isoch resources */
209 s1394_isoch_rsrc_realloc(hal);
211 if (s1394_cfgrom_scan_phase1(hal) != DDI_SUCCESS) {
212 continue;
215 if (s1394_bus_mgr_processing(hal) != DDI_SUCCESS) {
216 continue;
219 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
221 if (s1394_cfgrom_scan_phase2(hal) != DDI_SUCCESS) {
222 continue;
225 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
230 * s1394_wait_for_events()
231 * blocks waiting for a cv_signal on the bus reset condition variable.
232 * Used by the bus reset thread for synchronizing with the bus reset/
233 * self id interrupt callback from the hal. Does CPR initialization
234 * first time it is called. If services layer sees a valid self id
235 * buffer, it builds the topology tree and signals the bus reset thread
236 * to read the config roms as appropriate (indicated by BR_THR_CFGROM_SCAN).
237 * If the services layer wishes to kill the bus reset thread, it signals
238 * this by signaling a BR_THR_GO_AWAY event.
240 static void
241 s1394_wait_for_events(s1394_hal_t *hal, int firsttime)
243 uint_t event;
245 ASSERT(MUTEX_NOT_HELD(&hal->br_thread_mutex));
246 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
248 if (firsttime)
249 CALLB_CPR_INIT(&hal->hal_cprinfo, &hal->br_thread_mutex,
250 callb_generic_cpr, "s1394_br_thread");
252 /* Check and wait for a BUS RESET */
253 mutex_enter(&hal->br_thread_mutex);
254 while ((event = hal->br_thread_ev_type) == 0) {
255 CALLB_CPR_SAFE_BEGIN(&hal->hal_cprinfo);
256 cv_wait(&hal->br_thread_cv, &hal->br_thread_mutex);
257 CALLB_CPR_SAFE_END(&hal->hal_cprinfo, &hal->br_thread_mutex);
260 if (event & BR_THR_GO_AWAY) {
261 s1394_br_thread_exit(hal);
262 /*NOTREACHED*/
263 return;
266 if (firsttime) {
267 mutex_exit(&hal->br_thread_mutex);
268 return;
271 mutex_enter(&hal->topology_tree_mutex);
272 if (event & BR_THR_CFGROM_SCAN) {
274 hal->br_cfgrom_read_gen = hal->generation_count;
276 hal->br_thread_ev_type &= ~BR_THR_CFGROM_SCAN;
277 mutex_exit(&hal->topology_tree_mutex);
278 mutex_exit(&hal->br_thread_mutex);
282 * s1394_wait_for_cfgrom_callbacks()
283 * Waits for completed config rom reads. Takes each completion off the
284 * completion queue and passes it to the "completion handler" function
285 * that was passed in as an argument. Further processing of the completion
286 * queue depends on the return status of the completion handler. If there
287 * is a bus reset while waiting for completions or if the services layer
288 * signals BR_THR_GO_AWAY, quits waiting for completions and returns
289 * non-zero. Also returns non-zero if completion handler returns
290 * S1394_HCMD_LOCK_FAILED. Returns 0 if config roms for all nodes have
291 * been dealt with.
293 static int
294 s1394_wait_for_cfgrom_callbacks(s1394_hal_t *hal, uint_t wait_gen,
295 hcmd_ret_t(*handle_cmd_fn)(s1394_hal_t *hal, cmd1394_cmd_t *cmd))
297 cmd1394_cmd_t *cmd;
298 s1394_cmd_priv_t *s_priv;
299 int ret, done = 0;
300 hcmd_ret_t cmdret;
302 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
304 ret = DDI_SUCCESS;
306 while (!done) {
307 mutex_enter(&hal->br_cmplq_mutex);
308 mutex_enter(&hal->topology_tree_mutex);
309 while (wait_gen == hal->generation_count &&
310 (hal->br_thread_ev_type & BR_THR_GO_AWAY) == 0 &&
311 hal->br_cmplq_head == NULL) {
312 mutex_exit(&hal->topology_tree_mutex);
313 cv_wait(&hal->br_cmplq_cv, &hal->br_cmplq_mutex);
314 mutex_enter(&hal->topology_tree_mutex);
316 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
317 if (wait_gen != hal->generation_count ||
318 (hal->br_thread_ev_type & BR_THR_GO_AWAY) != 0) {
320 #if !defined(NPROBE) && defined(TNF_DEBUG)
321 int hal_gen = hal->generation_count;
322 #endif
324 mutex_exit(&hal->topology_tree_mutex);
325 mutex_exit(&hal->br_cmplq_mutex);
326 s1394_flush_cmplq(hal);
327 return (DDI_FAILURE);
329 mutex_exit(&hal->topology_tree_mutex);
331 if ((cmd = hal->br_cmplq_head) != NULL) {
332 s_priv = S1394_GET_CMD_PRIV(cmd);
334 hal->br_cmplq_head = s_priv->cmd_priv_next;
336 if (cmd == hal->br_cmplq_tail)
337 hal->br_cmplq_tail = NULL;
338 mutex_exit(&hal->br_cmplq_mutex);
340 if (cmd != NULL) {
341 if (cmd->bus_generation != wait_gen) {
342 (void) s1394_free_cmd(hal, &cmd);
343 continue;
345 cmdret = (*handle_cmd_fn)(hal, cmd);
346 ASSERT(cmdret != S1394_HCMD_INVALID);
347 if (cmdret == S1394_HCMD_LOCK_FAILED) {
348 /* flush completion queue */
349 ret = DDI_FAILURE;
350 s1394_flush_cmplq(hal);
351 break;
352 } else if (cmdret == S1394_HCMD_NODE_DONE) {
353 if (--hal->cfgroms_being_read == 0) {
354 /* All done */
355 break;
357 } else {
358 ASSERT(cmdret == S1394_HCMD_NODE_EXPECT_MORE);
359 done = 0;
364 return (ret);
368 * s1394_flush_cmplq()
369 * Frees all cmds on the completion queue.
371 static void
372 s1394_flush_cmplq(s1394_hal_t *hal)
374 s1394_cmd_priv_t *s_priv;
375 cmd1394_cmd_t *cmd, *tcmd;
377 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
379 cmd = NULL;
381 do {
382 mutex_enter(&hal->br_cmplq_mutex);
383 cmd = hal->br_cmplq_head;
384 hal->br_cmplq_head = hal->br_cmplq_tail = NULL;
385 mutex_exit(&hal->br_cmplq_mutex);
387 while (cmd != NULL) {
388 s_priv = S1394_GET_CMD_PRIV(cmd);
390 tcmd = s_priv->cmd_priv_next;
391 (void) s1394_free_cmd(hal, &cmd);
392 cmd = tcmd;
395 mutex_enter(&hal->br_cmplq_mutex);
396 cmd = hal->br_cmplq_head;
397 mutex_exit(&hal->br_cmplq_mutex);
399 } while (cmd != NULL);
404 * s1394_br_thread_exit()
405 * Flushes the completion queue and calls thread_exit() (which effectively
406 * kills the bus reset thread).
408 static void
409 s1394_br_thread_exit(s1394_hal_t *hal)
411 ASSERT(MUTEX_HELD(&hal->br_thread_mutex));
412 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
413 s1394_flush_cmplq(hal);
414 #ifndef __lock_lint
415 CALLB_CPR_EXIT(&hal->hal_cprinfo);
416 #endif
417 hal->br_thread_ev_type &= ~BR_THR_GO_AWAY;
418 thread_exit();
422 * s1394_target_bus_reset_notifies()
423 * tells the ndi event framework to invoke any callbacks registered for
424 * "bus reset event".
426 static void
427 s1394_target_bus_reset_notifies(s1394_hal_t *hal, t1394_localinfo_t *localinfo)
429 ddi_eventcookie_t cookie;
431 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
433 if (ndi_event_retrieve_cookie(hal->hal_ndi_event_hdl, NULL,
434 DDI_DEVI_BUS_RESET_EVENT, &cookie, NDI_EVENT_NOPASS) ==
435 NDI_SUCCESS) {
436 (void) ndi_event_run_callbacks(hal->hal_ndi_event_hdl, NULL,
437 cookie, localinfo);
442 * s1394_alloc_cfgrom()
443 * Allocates config rom for the node. Sets CFGROM_NEW_ALLOC bit in the
444 * node cfgrom state. Drops topology_tree_mutex around the calls to
445 * kmem_zalloc(). If re-locking fails, returns DDI_FAILURE, else returns
446 * DDI_SUCCESS.
448 static int
449 s1394_alloc_cfgrom(s1394_hal_t *hal, s1394_node_t *node, s1394_status_t *status)
451 uint32_t *cfgrom;
453 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
455 *status = S1394_NOSTATUS;
458 * if cfgrom is non-NULL, this has to be generation changed
459 * case (where we allocate cfgrom again to reread the cfgrom)
461 ASSERT(node->cfgrom == NULL || (node->cfgrom != NULL &&
462 CFGROM_GEN_CHANGED(node) == B_TRUE));
465 * if node matched, either cfgrom has to be NULL or link should be
466 * off in the last matched node or config rom generations changed.
468 ASSERT(NODE_MATCHED(node) == B_FALSE || (NODE_MATCHED(node) == B_TRUE &&
469 (node->cfgrom == NULL || LINK_ACTIVE(node->old_node) == B_FALSE) ||
470 CFGROM_GEN_CHANGED(node) == B_TRUE));
472 s1394_unlock_tree(hal);
473 cfgrom = kmem_zalloc(IEEE1394_CONFIG_ROM_SZ, KM_SLEEP);
474 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
475 kmem_free(cfgrom, IEEE1394_CONFIG_ROM_SZ);
476 *status |= S1394_LOCK_FAILED;
477 return (DDI_FAILURE);
479 node->cfgrom = cfgrom;
480 node->cfgrom_size = IEEE1394_CONFIG_ROM_QUAD_SZ;
481 SET_CFGROM_NEW_ALLOC(node);
482 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
483 return (DDI_SUCCESS);
487 * s1394_free_cfgrom()
488 * Marks the config rom invalid and frees up the config based on otpions.
490 void
491 s1394_free_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
492 s1394_free_cfgrom_t options)
494 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
495 ASSERT(node->cfgrom != NULL);
497 if (options == S1394_FREE_CFGROM_BOTH) {
499 * free in both old and new trees; will be called with
500 * new node.
502 s1394_node_t *onode = node->old_node;
504 if (NODE_MATCHED(node) == B_TRUE && onode->cfgrom != NULL)
505 ASSERT(onode->cfgrom == node->cfgrom);
507 if (onode != NULL && onode->cfgrom != NULL && onode->cfgrom !=
508 node->cfgrom)
509 kmem_free(onode->cfgrom, IEEE1394_CONFIG_ROM_SZ);
511 kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
512 onode->cfgrom = NULL;
513 node->cfgrom = NULL;
515 CLEAR_CFGROM_STATE(onode);
516 CLEAR_CFGROM_STATE(node);
518 } else if (options == S1394_FREE_CFGROM_NEW) {
520 ASSERT(CFGROM_NEW_ALLOC(node) == B_TRUE);
521 kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
522 CLEAR_CFGROM_NEW_ALLOC(node);
523 node->cfgrom = NULL;
524 CLEAR_CFGROM_STATE(node);
526 } else if (options == S1394_FREE_CFGROM_OLD) {
528 /* freeing in old tree */
529 kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
530 node->cfgrom = NULL;
531 CLEAR_CFGROM_STATE(node);
536 * s1394_copy_cfgrom()
537 * Copies config rom info from "from" node to "to" node. Clears
538 * CFGROM_NEW_ALLOC bit in cfgrom state in bothe nodes. (CFGROM_NEW_ALLOC
539 * acts as a reference count. If set, only the node in the current tree
540 * has a pointer to it; if clear, both the node in the current tree as
541 * well as the corresponding node in the old tree point to the same memory).
543 void
544 s1394_copy_cfgrom(s1394_node_t *to, s1394_node_t *from)
546 ASSERT(to->cfgrom == NULL);
548 to->cfgrom = from->cfgrom;
549 to->cfgrom_state = from->cfgrom_state;
550 to->cfgrom_valid_size = from->cfgrom_valid_size;
551 to->cfgrom_size = from->cfgrom_size;
552 to->node_state = from->node_state;
554 bcopy(from->dir_stack, to->dir_stack,
555 offsetof(s1394_node_t, cfgrom_quad_to_read) -
556 offsetof(s1394_node_t, dir_stack));
558 to->cfgrom_quad_to_read = from->cfgrom_quad_to_read;
560 CLEAR_CFGROM_NEW_ALLOC(to);
561 CLEAR_CFGROM_NEW_ALLOC(from);
564 * old link off, new link on => handled in s1394_cfgrom_scan_phase1
565 * old link on, new link off => handled in s1394_process_old_tree
567 if (LINK_ACTIVE(from) == B_FALSE) {
569 * if last time around, link was off, there wouldn't
570 * have been config rom allocated.
572 ASSERT(from->cfgrom == NULL);
573 return;
574 } else {
575 s1394_selfid_pkt_t *selfid_pkt = to->selfid_packet;
577 if (IEEE1394_SELFID_ISLINKON(selfid_pkt))
578 SET_LINK_ACTIVE(to);
583 * s1394_read_bus_info_blk()
584 * Attempts to kick off reading IEEE1212_NODE_CAP_QUAD quad or quad 0.
585 * Increments cfgroms_being_read by 1. Returns DDI_SUCCESS command was
586 * issued, else sets status to the failure reason and returns DDI_FAILURE.
588 static int
589 s1394_read_bus_info_blk(s1394_hal_t *hal, s1394_node_t *node,
590 s1394_status_t *status)
592 uint32_t quadlet;
593 cmd1394_cmd_t *cmd;
594 uchar_t node_num;
596 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
597 ASSERT(LINK_ACTIVE(node) == B_TRUE);
599 node_num = node->node_num;
602 * drop the topology lock around command allocation. Return failure
603 * if either command allocation fails or cannot reacquire the lock
605 s1394_unlock_tree(hal);
606 *status = S1394_NOSTATUS;
608 if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
609 *status |= S1394_CMD_ALLOC_FAILED;
611 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
612 *status |= S1394_LOCK_FAILED;
613 /* free the cmd allocated above */
614 if (((*status) & S1394_CMD_ALLOC_FAILED) != 0)
615 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
617 if (((*status) & (S1394_CMD_ALLOC_FAILED | S1394_LOCK_FAILED)) != 0) {
618 return (DDI_FAILURE);
621 /* allocate cfgrom if needed */
622 if (node->cfgrom == NULL && s1394_alloc_cfgrom(hal, node, status) !=
623 DDI_SUCCESS) {
624 ASSERT(((*status) & S1394_LOCK_FAILED) != 0);
625 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
626 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
627 return (DDI_FAILURE);
631 * if this is a matched node, read quad 2 (node capabilities) to
632 * see if the generation count changed.
634 quadlet = CFGROM_BIB_READ(node) ? IEEE1212_NODE_CAP_QUAD : 0;
637 * read bus info block at 100Mbit. This will help us with the cases
638 * where LINK is slower than PHY; s1394 uses PHY speed till speed map
639 * is updated.
641 cmd->completion_callback = s1394_cfgrom_read_callback;
642 cmd->bus_generation = hal->generation_count;
643 cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
644 CMD1394_OVERRIDE_ADDR | CMD1394_OVERRIDE_SPEED);
645 cmd->cmd_speed = IEEE1394_S100;
646 cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
648 QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num,
649 quadlet, cmd->cmd_addr);
651 SETUP_QUAD_READ(node, 1, quadlet, 1);
652 if (s1394_read_config_quadlet(hal, cmd, status) != DDI_SUCCESS) {
653 /* free the command if it wasn't handed over to the HAL */
654 if (((*status) & S1394_CMD_INFLIGHT) == 0) {
655 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
657 if (((*status) & S1394_LOCK_FAILED) != 0) {
658 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
660 return (DDI_FAILURE);
663 hal->cfgroms_being_read++;
664 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
666 return (DDI_SUCCESS);
670 * s1394_read_rest_of_cfgrom()
671 * Attempts to start reading node->cfgrom_quad_to_read quadlet. Increments
672 * cfgroms_being_read by 1 and returns DDI_SUCCESS if command was issued,
673 * else sets status to the failure reason and returns DDI_FAILURE.
676 s1394_read_rest_of_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
677 s1394_status_t *status)
679 cmd1394_cmd_t *cmd;
680 uchar_t node_num = node->node_num;
682 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
683 ASSERT(LINK_ACTIVE(node) == B_TRUE);
686 * drop the topology lock around command allocation. Return failure
687 * if either command allocation fails or cannot reacquire the lock
689 s1394_unlock_tree(hal);
690 *status = S1394_NOSTATUS;
692 if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
693 *status |= S1394_CMD_ALLOC_FAILED;
695 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
696 *status |= S1394_LOCK_FAILED;
697 /* free if we allocated a cmd above */
698 if (((*status) & S1394_CMD_ALLOC_FAILED) == 0)
699 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
701 if (((*status) & (S1394_CMD_ALLOC_FAILED | S1394_LOCK_FAILED)) != 0) {
702 return (DDI_FAILURE);
705 cmd->completion_callback = s1394_cfgrom_read_callback;
706 cmd->bus_generation = hal->generation_count;
707 cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
708 CMD1394_OVERRIDE_ADDR);
709 cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
711 QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num,
712 node->cfgrom_quad_to_read, cmd->cmd_addr);
713 SETUP_QUAD_READ(node, 1, node->cfgrom_quad_to_read, 1);
714 if (s1394_read_config_quadlet(hal, cmd, status) != DDI_SUCCESS) {
715 /* free the command if it wasn't handed over to the HAL */
716 if (((*status) & S1394_CMD_INFLIGHT) == 0) {
717 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
719 if (((*status) & S1394_LOCK_FAILED) != 0) {
720 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
722 return (DDI_FAILURE);
725 hal->cfgroms_being_read++;
726 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
728 return (DDI_SUCCESS);
732 * s1394_cfgrom_scan_phase1()
733 * Attempts to read bus info blocks for nodes as needed. Returns DDI_FAILURE
734 * if bus reset generations changed (as indicated by s1394_lock_tree()
735 * return status) or if any of the callees return failure, else returns
736 * DDI_SUCCESS.
738 static int
739 s1394_cfgrom_scan_phase1(s1394_hal_t *hal)
741 uint32_t number_of_nodes;
742 int ret;
743 int node;
744 int wait_in_gen;
745 int wait_for_cbs;
746 uint_t hal_node_num;
747 uint_t hal_node_num_old;
748 s1394_node_t *nnode, *onode;
749 s1394_selfid_pkt_t *selfid_pkt;
750 s1394_status_t status;
752 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
754 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
755 return (DDI_FAILURE);
757 wait_for_cbs = 0;
758 number_of_nodes = hal->number_of_nodes;
759 hal->cfgroms_being_read = 0;
760 hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
761 hal_node_num_old = IEEE1394_NODE_NUM(hal->old_node_id);
762 s1394_unlock_tree(hal);
764 ret = DDI_SUCCESS;
766 /* Send requests for all new node config ROM 0 */
767 for (node = 0; node < number_of_nodes; node++) {
769 status = S1394_UNKNOWN;
771 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
772 status = S1394_LOCK_FAILED;
773 break;
776 nnode = &hal->topology_tree[node];
777 onode = nnode->old_node;
778 /* if node matched, onode should be non NULL */
779 ASSERT(NODE_MATCHED(nnode) == B_FALSE || (NODE_MATCHED(nnode) ==
780 B_TRUE && onode != NULL));
783 * Read bus info block if it is a brand new node (MATCHED is 0)
784 * or if matched but link was off in previous generations or
785 * or if matched but had invalid cfgrom in last generation
786 * or if matched but config rom generation > 1 (this is to
787 * check if config rom generation changed between bus resets).
789 if ((node != hal_node_num) &&
790 ((NODE_MATCHED(nnode) == B_FALSE) ||
791 (NODE_MATCHED(nnode) == B_TRUE && LINK_ACTIVE(onode) ==
792 B_FALSE) || (NODE_MATCHED(nnode) == B_TRUE &&
793 (onode->cfgrom == NULL || CFGROM_VALID(onode) ==
794 B_FALSE)) || (NODE_MATCHED(nnode) == B_TRUE &&
795 nnode->cfgrom != NULL && CONFIG_ROM_GEN(nnode->cfgrom) >
796 1))) {
798 SET_NODE_VISITED(nnode);
799 selfid_pkt = nnode->selfid_packet;
800 if (IEEE1394_SELFID_ISLINKON(selfid_pkt)) {
802 SET_LINK_ACTIVE(nnode);
804 status = S1394_UNKNOWN;
806 if (s1394_read_bus_info_blk(hal, nnode,
807 &status) != DDI_SUCCESS) {
808 if ((status & S1394_LOCK_FAILED) != 0)
809 break;
810 } else {
811 wait_for_cbs++;
812 wait_in_gen = hal->br_cfgrom_read_gen;
814 } else {
816 * Special case: if link was active last
817 * time around, this should be treated as
818 * node going away.
820 CLEAR_LINK_ACTIVE(nnode);
821 if (NODE_MATCHED(nnode) == B_TRUE &&
822 LINK_ACTIVE(onode) == B_TRUE) {
823 CLEAR_CFGROM_STATE(nnode);
826 } else {
827 if (node == hal_node_num) {
828 onode = &hal->old_tree[hal_node_num_old];
829 /* Set up the local matched nodes */
830 if (onode) {
831 nnode->old_node = onode;
832 SET_NODE_MATCHED(nnode);
833 SET_NODE_MATCHED(onode);
834 s1394_copy_cfgrom(nnode, onode);
838 s1394_unlock_tree(hal);
841 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
843 if ((status & S1394_LOCK_FAILED) != 0) {
844 return (DDI_FAILURE);
848 * If we started any reads, wait for completion callbacks
850 if (wait_for_cbs != 0) {
851 ret = s1394_wait_for_cfgrom_callbacks(hal, wait_in_gen,
852 s1394_br_thread_handle_cmd_phase1);
855 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
857 return (ret);
861 * s1394_br_thread_handle_cmd_phase1()
862 * Process the cmd completion for phase 1 config rom reads. If we
863 * successfully read IEEE1212_NODE_CAP_QUAD quadlet and config rom gen
864 * did not change, move targets hanging off the old node to the current
865 * node. If config rom generations change, alloc new config rom and start
866 * re-reading the new config rom. If all of bus info block is read (as
867 * required), mark the node as CFGROM_BIB_READ. If config rom read fails
868 * retry if not too many failures. Topology tree mutex is dropped and
869 * reacquired in this routine. If reacquiring fails, returns
870 * S1394_HCMD_LOCK_FAILED. If the entire bus info block is read, returns
871 * S1394_HCMD_NODE_DONE, else returns S1394_HCMD_NODE_EXPECT_MORE (to
872 * indicate not done with the node yet).
874 * If we cannot read any of the quadlets in the bus info block, cfgrom
875 * is marked invalid in this generation (a side effect of calling
876 * s1394_free_cfgrom()). We free cfgrom in this routine only if the failure
877 * is not due to bus generations changing.
879 static hcmd_ret_t
880 s1394_br_thread_handle_cmd_phase1(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
882 s1394_target_t *t;
883 s1394_node_t *node, *onode;
884 uint32_t node_num, quadlet, data;
885 int freecmd, done, locked;
886 hcmd_ret_t cmdret;
887 uchar_t readdelay;
888 s1394_status_t status;
890 s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
891 ASSERT(quadlet == 0 || quadlet < IEEE1394_BIB_QUAD_SZ);
893 cmdret = S1394_HCMD_NODE_EXPECT_MORE;
895 locked = 1;
896 freecmd = 1;
898 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
899 locked = 0;
900 goto bail;
903 node = &hal->topology_tree[node_num];
905 if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
907 int reread = 0;
909 done = 0;
911 if (quadlet == IEEE1212_NODE_CAP_QUAD &&
912 CFGROM_BIB_READ(node)) {
914 int cur_gen = ((data & IEEE1394_BIB_GEN_MASK) >>
915 IEEE1394_BIB_GEN_SHIFT);
918 * node->old_node can be NULL if this is a new node &
919 * we are doing a rescan
921 onode = node->old_node;
922 if (CONFIG_ROM_GEN(node->cfgrom) == cur_gen) {
924 if (CFGROM_PARSED(node) == B_TRUE) {
925 rw_enter(&hal->target_list_rwlock,
926 RW_WRITER);
927 /* Update the target list, if any */
928 if (onode != NULL &&
929 (t = onode->target_list) != NULL) {
930 node->target_list = t;
931 while (t != NULL) {
932 t->on_node = node;
933 t = t->target_sibling;
936 rw_exit(&hal->target_list_rwlock);
938 SET_NODE_MATCHED(node);
939 if (onode)
940 SET_NODE_MATCHED(onode);
941 node->cfgrom_quad_to_read =
942 IEEE1394_BIB_QUAD_SZ;
943 done++;
944 } else {
946 SET_CFGROM_GEN_CHANGED(node);
947 if (onode != NULL)
948 SET_CFGROM_GEN_CHANGED(onode);
950 * Reset BIB_READ flag and start reading entire
951 * config rom.
953 CLEAR_CFGROM_BIB_READ(node);
954 reread = 1;
957 * if generations changed, allocate cfgrom for
958 * the new generation. s1394_match_GUID() will
959 * free up the cfgrom from the old generation.
961 if (s1394_alloc_cfgrom(hal, node, &status) !=
962 DDI_SUCCESS) {
963 ASSERT((status & S1394_LOCK_FAILED) !=
965 ASSERT(MUTEX_NOT_HELD(&hal->
966 topology_tree_mutex));
967 locked = 0;
968 /* we failed to relock the tree */
969 goto bail;
975 * we end up here if we don't have bus_info_blk for this
976 * node or if config rom generation changed.
980 * Pass1 Rio bug workaround. Due to this bug, if we read
981 * past quadlet 5 of the config rom, the PCI bus gets wedged.
982 * Avoid the hang by not reading past quadlet 5.
983 * We identify a remote Rio by the node vendor id part of
984 * quad 3 (which is == SUNW == S1394_SUNW_OUI (0x80020)).
986 if (s1394_enable_rio_pass1_workarounds != 0) {
987 if ((quadlet == 3) && ((data >> 8) == S1394_SUNW_OUI)) {
988 node->cfgrom_size = IEEE1394_BIB_QUAD_SZ;
989 node->cfgrom_valid_size = IEEE1394_BIB_QUAD_SZ;
993 if (!done) {
995 if (reread)
996 quadlet = 0;
997 else
998 node->cfgrom[quadlet++] = data;
1000 /* if we don't have the entire bus_info_blk... */
1001 if (quadlet < IEEE1394_BIB_QUAD_SZ) {
1003 CFGROM_GET_READ_DELAY(node, readdelay);
1004 SETUP_QUAD_READ(node, 1, quadlet, 1);
1005 s1394_unlock_tree(hal);
1006 CFGROM_READ_PAUSE(readdelay);
1007 /* get next quadlet */
1008 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1009 locked = 0;
1010 } else if (s1394_read_config_quadlet(hal, cmd,
1011 &status) != DDI_SUCCESS) {
1013 * Failed to get going. If command was
1014 * successfully handed over to the HAL,
1015 * don't free it (it will get freed
1016 * later in the callback).
1018 if ((status & S1394_CMD_INFLIGHT) !=
1019 0) {
1020 freecmd = 0;
1022 if ((status & S1394_LOCK_FAILED) != 0) {
1023 locked = 0;
1024 } else {
1025 if (CFGROM_NEW_ALLOC(node) ==
1026 B_TRUE) {
1027 s1394_free_cfgrom(hal,
1028 node,
1029 S1394_FREE_CFGROM_NEW);
1030 } else {
1031 CLEAR_CFGROM_STATE(
1032 node);
1035 done++;
1036 } else {
1037 freecmd = 0;
1039 } else {
1040 /* got all of bus_info_blk */
1041 SET_CFGROM_BIB_READ(node);
1042 if (node->cfgrom_size == IEEE1394_BIB_QUAD_SZ)
1043 SET_CFGROM_ALL_READ(node);
1044 node->cfgrom_quad_to_read = quadlet;
1045 done++;
1048 } else {
1049 done = 1;
1050 node->cfgrom_read_fails++;
1051 BUMP_CFGROM_READ_DELAY(node);
1053 /* retry if not too many failures */
1054 if (node->cfgrom_read_fails < s1394_cfgrom_read_retry_cnt) {
1055 CFGROM_GET_READ_DELAY(node, readdelay);
1056 SETUP_QUAD_READ(node, 0, quadlet, 1);
1057 s1394_unlock_tree(hal);
1058 CFGROM_READ_PAUSE(readdelay);
1059 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1060 locked = 0;
1061 } else if (s1394_read_config_quadlet(hal, cmd,
1062 &status) != DDI_SUCCESS) {
1064 * Failed to get going. If command was
1065 * successfully handed over to the HAL,
1066 * don't free it (it will get freed
1067 * later in the callback).
1069 if ((status & S1394_CMD_INFLIGHT) != 0) {
1070 freecmd = 0;
1072 if ((status & S1394_LOCK_FAILED) != 0) {
1073 locked = 0;
1074 } else {
1075 if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
1076 s1394_free_cfgrom(hal, node,
1077 S1394_FREE_CFGROM_NEW);
1078 } else {
1079 CLEAR_CFGROM_STATE(node);
1082 } else {
1083 done = 0;
1084 freecmd = 0;
1086 } else {
1087 if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
1088 s1394_free_cfgrom(hal, node,
1089 S1394_FREE_CFGROM_NEW);
1090 } else {
1091 CLEAR_CFGROM_STATE(node);
1095 bail:
1096 if (freecmd) {
1097 (void) s1394_free_cmd(hal, &cmd);
1100 if (done) {
1101 cmdret = S1394_HCMD_NODE_DONE;
1104 /* if we are bailing out because locking failed, locked == 0 */
1105 if (locked == 0)
1106 cmdret = S1394_HCMD_LOCK_FAILED;
1107 else
1108 s1394_unlock_tree(hal);
1110 return (cmdret);
1114 * s1394_cfgrom_scan_phase2()
1115 * Handles phase 2 of bus reset processing. Matches GUIDs between old
1116 * and new topology trees to identify which node moved where. Processes
1117 * the old topology tree (involves offlining any nodes that got unplugged
1118 * between the last generation and the current generation). Updates speed
1119 * map, sets up physical AR request filer and does isoch resource
1120 * realloc failure notification and bus reset notifications. Then resends
1121 * any commands that were issued by targets while the reset was being
1122 * processed. Finally, the current topology tree is processed. This involves
1123 * reading config rom past the bus info block for new nodes and parsing
1124 * the config rom, creating a devinfo for each unit directory found in the
1125 * config rom.
1126 * Returns DDI_FAILURE if there was bus reset during any of the function
1127 * calls (as indicated by lock failures) or if any of the routines callees
1128 * return failure, else returns DDI_SUCCESS.
1130 static int
1131 s1394_cfgrom_scan_phase2(s1394_hal_t *hal)
1133 int ret;
1134 uint_t wait_gen;
1135 int wait_for_cbs = 0;
1136 t1394_localinfo_t localinfo;
1138 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1140 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1141 return (DDI_FAILURE);
1144 if (s1394_match_all_GUIDs(hal) == DDI_SUCCESS) {
1145 s1394_unlock_tree(hal);
1148 if (s1394_process_old_tree(hal) != DDI_SUCCESS) {
1149 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1150 return (DDI_FAILURE);
1153 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1154 return (DDI_FAILURE);
1157 s1394_update_speed_map_link_speeds(hal);
1158 s1394_unlock_tree(hal);
1160 /* Setup physical AR request filters */
1161 s1394_physical_arreq_setup_all(hal);
1163 /* Notify targets of isoch resource realloc failures */
1164 s1394_isoch_rsrc_realloc_notify(hal);
1166 /* Notify targets of the end of bus reset processing */
1167 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1168 return (DDI_FAILURE);
1171 localinfo.bus_generation = hal->generation_count;
1172 localinfo.local_nodeID = hal->node_id;
1174 s1394_unlock_tree(hal);
1175 s1394_target_bus_reset_notifies(hal, &localinfo);
1176 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1177 return (DDI_FAILURE);
1180 /* Set HAL state to normal */
1181 if (hal->disable_requests_bit == 0)
1182 hal->hal_state = S1394_HAL_NORMAL;
1183 else
1184 hal->hal_state = S1394_HAL_DREQ;
1186 s1394_unlock_tree(hal);
1188 /* Flush the pending Q */
1189 s1394_resend_pending_cmds(hal);
1191 if (s1394_process_topology_tree(hal, &wait_for_cbs, &wait_gen)) {
1192 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1193 return (DDI_FAILURE);
1196 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1197 return (DDI_FAILURE);
1200 s1394_print_node_info(hal);
1202 s1394_unlock_tree(hal);
1204 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1206 ret = DDI_SUCCESS;
1209 * If we started any reads, wait for completion callbacks
1211 if (wait_for_cbs != 0) {
1212 ret = s1394_wait_for_cfgrom_callbacks(hal, wait_gen,
1213 s1394_br_thread_handle_cmd_phase2);
1216 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1218 return (ret);
1222 * s1394_br_thread_handle_cmd_phase2()
1223 * Process the cmd completion for phase 2 config rom reads. If all the
1224 * needed quads are read, validates the config rom; if config rom is
1225 * invalid (crc failures), frees the config rom, else marks the config rom
1226 * valid and calls s1394_update_devinfo_tree() to parse the config rom.
1227 * If need to get more quadlets, attempts to kick off the read and returns
1228 * S1394_HCMD_NODE_EXPECT_MORE if successfully started the read. If a bus
1229 * reset is seen while in this routine, returns S1394_HCMD_LOCK_FAILED. If
1230 * done with the node (with or withoug crc errors), returns
1231 * S1394_HCMD_NODE_DONE, else returns S1394_HCMD_NODE_EXPECT_MORE (to
1232 * indicate not done with the node yet).
1234 static hcmd_ret_t
1235 s1394_br_thread_handle_cmd_phase2(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
1237 s1394_node_t *node;
1238 uint32_t node_num, quadlet, data;
1239 int update_devinfo, locked, freecmd, done;
1240 hcmd_ret_t cmdret;
1241 uchar_t readdelay;
1242 s1394_status_t status;
1245 * we end up here if this is a brand new node or if it is a known node
1246 * but the config ROM changed (and triggered a re-read).
1248 s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
1249 ASSERT(quadlet == IEEE1394_BIB_QUAD_SZ || quadlet <
1250 IEEE1394_CONFIG_ROM_QUAD_SZ);
1252 locked = freecmd = done = 1;
1253 cmdret = S1394_HCMD_NODE_EXPECT_MORE;
1255 update_devinfo = 0;
1257 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1258 locked = 0;
1259 goto bail;
1262 node = &hal->topology_tree[node_num];
1264 if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
1266 ASSERT(CFGROM_BIB_READ(node) == B_TRUE);
1268 node->cfgrom[quadlet] = data;
1270 if (s1394_calc_next_quad(hal, node, quadlet, &quadlet) != 0) {
1272 node->cfgrom_valid_size = quadlet + 1;
1273 if (s1394_valid_cfgrom(hal, node) == B_TRUE) {
1274 SET_CFGROM_ALL_READ(node);
1275 update_devinfo++;
1276 } else {
1277 s1394_free_cfgrom(hal, node,
1278 S1394_FREE_CFGROM_BOTH);
1280 } else {
1281 CFGROM_GET_READ_DELAY(node, readdelay);
1282 SETUP_QUAD_READ(node, 1, quadlet, 1);
1283 s1394_unlock_tree(hal);
1284 CFGROM_READ_PAUSE(readdelay);
1285 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1286 locked = 0;
1287 } else if (s1394_read_config_quadlet(hal, cmd,
1288 &status) != DDI_SUCCESS) {
1290 if ((status & S1394_CMD_INFLIGHT) != 0) {
1291 freecmd = 0;
1293 if ((status & S1394_LOCK_FAILED) != 0) {
1294 locked = 0;
1295 } else {
1296 node->cfgrom_valid_size = quadlet;
1297 if (s1394_valid_cfgrom(hal, node) ==
1298 B_TRUE) {
1299 SET_CFGROM_ALL_READ(node);
1300 update_devinfo++;
1301 } else {
1302 s1394_free_cfgrom(hal, node,
1303 S1394_FREE_CFGROM_BOTH);
1306 } else {
1307 /* successfully started next read */
1308 done = 0;
1309 freecmd = 0;
1312 } else {
1313 node->cfgrom_read_fails++;
1314 BUMP_CFGROM_READ_DELAY(node);
1316 /* retry if not too many failures */
1317 if (node->cfgrom_read_fails < s1394_cfgrom_read_retry_cnt) {
1318 CFGROM_GET_READ_DELAY(node, readdelay);
1319 s1394_unlock_tree(hal);
1320 SETUP_QUAD_READ(node, 0, quadlet, 1);
1321 CFGROM_READ_PAUSE(readdelay);
1322 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1323 locked = 0;
1324 } else if (s1394_read_config_quadlet(hal, cmd,
1325 &status) != DDI_SUCCESS) {
1326 if ((status & S1394_CMD_INFLIGHT) != 0) {
1327 freecmd = 0;
1329 if ((status & S1394_LOCK_FAILED) != 0) {
1330 locked = 0;
1331 } else {
1332 /* stop further reads */
1333 node->cfgrom_valid_size = quadlet + 1;
1334 if (s1394_valid_cfgrom(hal, node) ==
1335 B_TRUE) {
1336 SET_CFGROM_ALL_READ(node);
1337 update_devinfo++;
1338 } else {
1339 s1394_free_cfgrom(hal, node,
1340 S1394_FREE_CFGROM_BOTH);
1343 } else {
1344 /* successfully started next read */
1345 done = 0;
1346 freecmd = 0;
1348 } else {
1350 node->cfgrom_valid_size = quadlet + 1;
1351 if (s1394_valid_cfgrom(hal, node) == B_TRUE) {
1352 SET_CFGROM_ALL_READ(node);
1353 update_devinfo++;
1354 } else {
1355 s1394_free_cfgrom(hal, node,
1356 S1394_FREE_CFGROM_BOTH);
1360 bail:
1361 if (freecmd) {
1362 (void) s1394_free_cmd(hal, &cmd);
1365 if (done) {
1366 cmdret = S1394_HCMD_NODE_DONE;
1369 if (update_devinfo) {
1370 ASSERT(locked);
1372 * s1394_update_devinfo_tree() drops and reacquires the
1373 * topology_tree_mutex. If tree lock fails, it returns
1374 * a DDI_FAILURE. Set locked to 0 so in this case so that
1375 * we will return S1394_HCMD_LOCK_FAILED below
1377 if (s1394_update_devinfo_tree(hal, node) != DDI_SUCCESS) {
1378 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1379 locked = 0;
1383 /* if we are bailing out because locking failed, locked == 0 */
1384 if (locked == 0)
1385 cmdret = S1394_HCMD_LOCK_FAILED;
1386 else
1387 s1394_unlock_tree(hal);
1389 return (cmdret);
1393 * s1394_read_config_quadlet()
1394 * Starts the reads of a config quadlet (deduced cmd_addr). Returns
1395 * DDI_SUCCESS if the read was started with no errors, else DDI_FAILURE
1396 * is returned, with status indicating the reason for the failure(s).
1398 static int
1399 s1394_read_config_quadlet(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
1400 s1394_status_t *status)
1402 s1394_node_t *node;
1403 int ret, err, node_num, quadlet;
1405 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1406 node_num = IEEE1394_ADDR_PHY_ID(cmd->cmd_addr);
1407 node = &hal->topology_tree[node_num];
1408 quadlet = node->cfgrom_quad_to_read;
1410 /* Calculate the 64-bit address */
1411 QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num, quadlet,
1412 cmd->cmd_addr);
1414 *status = S1394_NOSTATUS;
1416 ret = s1394_setup_asynch_command(hal, NULL, cmd, S1394_CMD_READ, &err);
1418 if (ret != DDI_SUCCESS) {
1419 *status |= S1394_UNKNOWN;
1420 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1421 return (DDI_FAILURE);
1424 s1394_unlock_tree(hal);
1425 ret = DDI_SUCCESS;
1426 /* Send the command out */
1427 if (s1394_xfer_asynch_command(hal, cmd, &err) == DDI_SUCCESS) {
1428 /* Callers can expect a callback now */
1429 *status |= S1394_CMD_INFLIGHT;
1430 } else {
1432 s1394_cmd_priv_t *s_priv;
1434 /* Remove from queue */
1435 s1394_remove_q_asynch_cmd(hal, cmd);
1436 s_priv = S1394_GET_CMD_PRIV(cmd);
1438 s_priv->cmd_in_use = B_FALSE;
1440 *status |= S1394_XFER_FAILED;
1441 ret = DDI_FAILURE;
1444 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1445 *status |= S1394_LOCK_FAILED;
1446 ret = DDI_FAILURE;
1449 return (ret);
1453 * s1394_cfgrom_read_callback()
1454 * callback routine for config rom reads. Frees the command if it failed
1455 * due to bus reset else appends the command to the completion queue
1456 * and signals the completion queue cv.
1458 static void
1459 s1394_cfgrom_read_callback(cmd1394_cmd_t *cmd)
1461 cmd1394_cmd_t *tcmd;
1462 s1394_cmd_priv_t *s_priv;
1463 s1394_hal_t *hal;
1465 #if defined(DEBUG)
1466 uint32_t node_num, quadlet, data;
1467 #endif
1469 /* Get the Services Layer private area */
1470 s_priv = S1394_GET_CMD_PRIV(cmd);
1472 hal = (s1394_hal_t *)s_priv->sent_on_hal;
1474 #if defined(DEBUG)
1476 s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
1478 #endif
1480 if (cmd->cmd_result == CMD1394_EBUSRESET) {
1481 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1482 } else {
1483 mutex_enter(&hal->br_cmplq_mutex);
1485 /* Put the command on completion queue */
1486 s_priv->cmd_priv_next = NULL;
1487 if ((tcmd = hal->br_cmplq_tail) != NULL) {
1488 s_priv = S1394_GET_CMD_PRIV(tcmd);
1490 s_priv->cmd_priv_next = cmd;
1493 hal->br_cmplq_tail = cmd;
1495 if (hal->br_cmplq_head == NULL)
1496 hal->br_cmplq_head = cmd;
1498 cv_signal(&hal->br_cmplq_cv);
1499 mutex_exit(&hal->br_cmplq_mutex);
1504 * s1394_cfgrom_parse_unit_dir()
1505 * Parses the unit directory passed in and returns reg[2...5] of reg
1506 * property (see 1275 binding for reg property defintion). Currently,
1507 * returns 0 for all the values since none of the existing devices implement
1508 * this and future devices, per P1212r, need a binding change.
1510 /* ARGSUSED */
1511 void
1512 s1394_cfgrom_parse_unit_dir(uint32_t *unit_dir, uint32_t *addr_hi,
1513 uint32_t *addr_lo, uint32_t *size_hi, uint32_t *size_lo)
1515 *addr_hi = *addr_lo = *size_hi = *size_lo = 0;
1519 * s1394_get_quad_info()
1520 * Helper routine that picks apart the various fields of a 1394 address
1522 static void
1523 s1394_get_quad_info(cmd1394_cmd_t *cmd, uint32_t *node_num, uint32_t *quadlet,
1524 uint32_t *data)
1526 uint64_t addr;
1528 addr = cmd->cmd_addr;
1529 *node_num = IEEE1394_ADDR_PHY_ID(addr);
1530 *quadlet = ((addr & IEEE1394_ADDR_OFFSET_MASK) -
1531 IEEE1394_CONFIG_ROM_ADDR);
1532 *quadlet = (*quadlet >> 2);
1533 *data = T1394_DATA32(cmd->cmd_u.q.quadlet_data);
1537 * s1394_match_GUID()
1538 * attempts to match nnode (which is in the current topology tree) with
1539 * a node in the old topology tree by comparing GUIDs. If a match is found
1540 * the old_node field of the current node and cur_node field of the old
1541 * are set point to each other. Also, this routine makes both the nodes
1542 * point at the same config rom. If unable to relock the tree, returns
1543 * DDI_FAILURE, else returns DDI_SUCCESS.
1545 static int
1546 s1394_match_GUID(s1394_hal_t *hal, s1394_node_t *nnode)
1548 int old_node;
1549 int gen_changed;
1550 uint32_t old_a, old_b;
1551 uint32_t new_a, new_b;
1552 s1394_node_t *onode;
1553 s1394_target_t *t;
1554 int ret = DDI_SUCCESS;
1556 ASSERT(nnode->cfgrom != NULL);
1557 ASSERT(CFGROM_BIB_READ(nnode));
1559 new_a = nnode->node_guid_hi;
1560 new_b = nnode->node_guid_lo;
1562 for (old_node = 0; old_node < hal->old_number_of_nodes; old_node++) {
1564 onode = &hal->old_tree[old_node];
1565 if (onode->cfgrom == NULL || CFGROM_BIB_READ(onode) == B_FALSE)
1566 continue;
1568 old_a = onode->node_guid_hi;
1569 old_b = onode->node_guid_lo;
1571 if ((old_a == new_a) && (old_b == new_b)) {
1573 if (NODE_MATCHED(onode) == B_TRUE) {
1574 cmn_err(CE_NOTE, "!Duplicate GUIDs: %08x%08x",
1575 old_a, old_b);
1576 /* offline the new node that last matched */
1577 ret = s1394_offline_node(hal, onode->cur_node);
1578 /* and make the current new node invalid */
1579 ASSERT(CFGROM_NEW_ALLOC(nnode) == B_TRUE);
1580 s1394_free_cfgrom(hal, nnode,
1581 S1394_FREE_CFGROM_NEW);
1582 break;
1586 * If there is indeed a cfgrom gen change,
1587 * CFGROM_GEN_CHANGED() will be set iff we are matching
1588 * tree nodes. Otherwise, CONFIG_ROM_GEN(old) !=
1589 * CONFIG_ROM_GEN(new).
1591 if (CFGROM_GEN_CHANGED(nnode) == B_TRUE ||
1592 (CONFIG_ROM_GEN(onode->cfgrom) !=
1593 CONFIG_ROM_GEN(nnode->cfgrom))) {
1594 gen_changed = 1;
1595 } else {
1596 gen_changed = 0;
1599 onode->cur_node = nnode;
1600 nnode->old_node = onode;
1601 nnode->node_state = onode->node_state;
1602 SET_NODE_VISITED(onode);
1603 SET_NODE_MATCHED(onode);
1604 SET_NODE_MATCHED(nnode);
1606 * If generations changed, need to offline any targets
1607 * hanging off the old node, prior to freeing up old
1608 * cfgrom. If the generations didn't change, we can
1609 * free up the new config rom and copy all info from
1610 * the old node (this helps in picking up further
1611 * reads from where the last generation left off).
1613 if (gen_changed == 1) {
1614 if (s1394_offline_node(hal, onode)) {
1615 ret = DDI_FAILURE;
1616 break;
1618 s1394_free_cfgrom(hal, onode,
1619 S1394_FREE_CFGROM_OLD);
1620 CLEAR_CFGROM_PARSED(nnode);
1621 CLEAR_CFGROM_NEW_ALLOC(nnode);
1622 CLEAR_CFGROM_NEW_ALLOC(onode);
1623 onode->cfgrom = nnode->cfgrom;
1624 /* done */
1625 break;
1629 * Free up cfgrom memory in the new_node and
1630 * point it at the same config rom as the old one.
1632 if (onode->cfgrom != nnode->cfgrom) {
1634 ASSERT(CFGROM_NEW_ALLOC(nnode) == B_TRUE);
1635 s1394_free_cfgrom(hal, nnode,
1636 S1394_FREE_CFGROM_NEW);
1638 nnode->cfgrom = onode->cfgrom;
1639 nnode->cfgrom_state = onode->cfgrom_state;
1640 nnode->cfgrom_valid_size = onode->cfgrom_valid_size;
1641 nnode->cfgrom_size = onode->cfgrom_size;
1642 nnode->cfgrom_quad_to_read = onode->cfgrom_quad_to_read;
1643 bcopy(onode->dir_stack, nnode->dir_stack,
1644 offsetof(s1394_node_t, cfgrom_quad_to_read) -
1645 offsetof(s1394_node_t, dir_stack));
1646 CLEAR_CFGROM_NEW_ALLOC(nnode);
1647 CLEAR_CFGROM_NEW_ALLOC(onode);
1649 if (CFGROM_PARSED(nnode) == B_TRUE) {
1650 rw_enter(&hal->target_list_rwlock, RW_WRITER);
1651 /* Update the target list */
1652 if ((t = onode->target_list) != NULL) {
1653 nnode->target_list = t;
1654 while (t != NULL) {
1655 t->on_node = nnode;
1656 t = t->target_sibling;
1659 rw_exit(&hal->target_list_rwlock);
1661 break;
1665 return (ret);
1669 * s1394_match_all_GUIDs()
1670 * attempt to match each node in the current topology tree with the a
1671 * node in the old topology tree. If unable to relock the tree, returns
1672 * DDI_FAILURE, else returns DDI_SUCCESS.
1674 static int
1675 s1394_match_all_GUIDs(s1394_hal_t *hal)
1677 int node;
1678 int ret = DDI_SUCCESS;
1679 s1394_node_t *nnode;
1681 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1683 for (node = 0; node < hal->number_of_nodes; node++) {
1684 nnode = &hal->topology_tree[node];
1685 if (LINK_ACTIVE(nnode) == B_FALSE || CFGROM_BIB_READ(nnode) ==
1686 B_FALSE)
1687 continue;
1688 if (NODE_MATCHED(nnode)) {
1690 * Skip if node matched. If config rom generations
1691 * changed, we want to call s1394_match_GUID() even
1692 * if the nodes matched.
1694 int gen_changed;
1695 s1394_node_t *onode = nnode->old_node;
1697 gen_changed = (onode && onode->cfgrom &&
1698 CONFIG_ROM_GEN(onode->cfgrom) != CONFIG_ROM_GEN(
1699 nnode->cfgrom)) ? 1 : 0;
1701 if (CFGROM_GEN_CHANGED(nnode) == 0 && gen_changed == 0)
1702 continue;
1705 if (s1394_match_GUID(hal, nnode) == DDI_FAILURE) {
1706 ret = DDI_FAILURE;
1710 return (ret);
1714 * s1394_valid_cfgrom()
1715 * Performs crc check on the config rom. Returns B_TRUE if config rom has
1716 * good CRC else returns B_FALSE.
1718 /* ARGSUSED */
1719 boolean_t
1720 s1394_valid_cfgrom(s1394_hal_t *hal, s1394_node_t *node)
1722 uint32_t crc_len, crc_value, CRC, CRC_old, quad0;
1724 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1725 ASSERT(node->cfgrom);
1727 if (s1394_enable_crc_validation == 0) {
1728 return (B_TRUE);
1731 quad0 = node->cfgrom[0];
1732 crc_len = (quad0 >> IEEE1394_CFG_ROM_CRC_LEN_SHIFT) &
1733 IEEE1394_CFG_ROM_CRC_LEN_MASK;
1734 crc_value = quad0 & IEEE1394_CFG_ROM_CRC_VALUE_MASK;
1736 if (node->cfgrom_valid_size < crc_len + 1) {
1737 return (B_FALSE);
1740 CRC = s1394_CRC16(&node->cfgrom[1], crc_len);
1742 if (CRC != crc_value) {
1743 CRC_old = s1394_CRC16_old(&node->cfgrom[1], crc_len);
1744 if (CRC_old == crc_value) {
1745 return (B_TRUE);
1748 cmn_err(CE_NOTE,
1749 "!Bad CRC in config rom (node's GUID %08x%08x)",
1750 node->node_guid_hi, node->node_guid_lo);
1752 return (B_FALSE);
1755 return (B_TRUE);
1759 * s1394_valid_dir()
1760 * Performs crc check on a directory. Returns B_TRUE if dir has good CRC
1761 * else returns B_FALSE.
1763 /*ARGSUSED*/
1764 boolean_t
1765 s1394_valid_dir(s1394_hal_t *hal, s1394_node_t *node,
1766 uint32_t key, uint32_t *dir)
1768 uint32_t dir_len, crc_value, CRC, CRC_old, quad0;
1771 * Ideally, we would like to do crc validations for the entire cfgrom
1772 * as well as the individual directories. However, we have seen devices
1773 * that have valid directories but busted cfgrom crc and devices that
1774 * have bad crcs in directories as well as for the entire cfgrom. This
1775 * is sad, but unfortunately, real world!
1777 if (s1394_enable_crc_validation == 0) {
1778 return (B_TRUE);
1781 quad0 = dir[0];
1783 dir_len = IEEE1212_DIR_LEN(quad0);
1784 crc_value = IEEE1212_DIR_CRC(quad0);
1786 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1788 CRC = s1394_CRC16(&dir[1], dir_len);
1790 if (CRC != crc_value) {
1791 CRC_old = s1394_CRC16_old(&dir[1], dir_len);
1792 if (CRC_old == crc_value) {
1793 return (B_TRUE);
1796 return (B_FALSE);
1799 return (B_TRUE);
1803 * s1394_become_bus_mgr()
1804 * is a callback from a timeout() setup by the main br_thread. After
1805 * a bus reset, depending on the Bus Manager's incumbancy and the state
1806 * of its abdicate bit, a timer of a certain length is set. After this
1807 * time expires, the local host may attempt to become the Bus Manager.
1808 * This is done by sending a request to the current IRM on the bus. The
1809 * IRM holds the BUS_MANAGER_ID register. Depending on whether or not
1810 * the local host is already the IRM, we will send a request onto the
1811 * 1394 bus or call into the HAL.
1813 static void
1814 s1394_become_bus_mgr(void *arg)
1816 s1394_hal_t *hal;
1817 s1394_cmd_priv_t *s_priv;
1818 cmd1394_cmd_t *cmd;
1819 uint64_t Bus_Mgr_ID_addr;
1820 uint32_t hal_node_num;
1821 uint32_t old_value;
1822 uint32_t generation;
1823 uint_t curr_bus_mgr;
1824 uint_t bm_node;
1825 uint_t IRM_node;
1826 int err;
1827 int ret;
1829 hal = (s1394_hal_t *)arg;
1831 /* Lock the topology tree */
1832 mutex_enter(&hal->topology_tree_mutex);
1834 hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
1835 generation = hal->generation_count;
1836 IRM_node = hal->IRM_node;
1838 mutex_enter(&hal->bus_mgr_node_mutex);
1839 bm_node = hal->bus_mgr_node;
1840 mutex_exit(&hal->bus_mgr_node_mutex);
1842 /* Unlock the topology tree */
1843 mutex_exit(&hal->topology_tree_mutex);
1845 /* Make sure we aren't already the Bus Manager */
1846 if (bm_node != -1) {
1847 return;
1850 /* Send compare-swap to BUS_MANAGER_ID */
1851 /* register on the Isoch Rsrc Mgr */
1852 if (IRM_node == hal_node_num) {
1853 /* Local */
1854 ret = HAL_CALL(hal).csr_cswap32(hal->halinfo.hal_private,
1855 generation, (IEEE1394_SCSR_BUSMGR_ID &
1856 IEEE1394_CSR_OFFSET_MASK), S1394_INVALID_NODE_NUM,
1857 hal_node_num, &old_value);
1858 if (ret != DDI_SUCCESS) {
1859 return;
1861 curr_bus_mgr = IEEE1394_NODE_NUM(old_value);
1863 mutex_enter(&hal->bus_mgr_node_mutex);
1864 if ((curr_bus_mgr == S1394_INVALID_NODE_NUM) ||
1865 (curr_bus_mgr == hal_node_num)) {
1866 hal->bus_mgr_node = hal_node_num;
1867 hal->incumbent_bus_mgr = B_TRUE;
1868 } else {
1869 hal->bus_mgr_node = curr_bus_mgr;
1870 hal->incumbent_bus_mgr = B_FALSE;
1872 cv_signal(&hal->bus_mgr_node_cv);
1873 mutex_exit(&hal->bus_mgr_node_mutex);
1875 } else {
1876 /* Remote */
1877 if (s1394_alloc_cmd(hal, T1394_ALLOC_CMD_NOSLEEP, &cmd) !=
1878 DDI_SUCCESS) {
1879 return;
1882 cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
1883 CMD1394_OVERRIDE_ADDR);
1884 cmd->cmd_type = CMD1394_ASYNCH_LOCK_32;
1885 cmd->completion_callback = s1394_become_bus_mgr_callback;
1886 Bus_Mgr_ID_addr = (IEEE1394_ADDR_BUS_ID_MASK |
1887 IEEE1394_SCSR_BUSMGR_ID) |
1888 (((uint64_t)hal->IRM_node) << IEEE1394_ADDR_PHY_ID_SHIFT);
1889 cmd->cmd_addr = Bus_Mgr_ID_addr;
1890 cmd->bus_generation = generation;
1891 cmd->cmd_u.l32.arg_value = T1394_DATA32(
1892 S1394_INVALID_NODE_NUM);
1893 cmd->cmd_u.l32.data_value = T1394_DATA32(hal_node_num);
1894 cmd->cmd_u.l32.num_retries = 0;
1895 cmd->cmd_u.l32.lock_type = CMD1394_LOCK_COMPARE_SWAP;
1897 /* Get the Services Layer private area */
1898 s_priv = S1394_GET_CMD_PRIV(cmd);
1900 /* Lock the topology tree */
1901 mutex_enter(&hal->topology_tree_mutex);
1903 ret = s1394_setup_asynch_command(hal, NULL, cmd,
1904 S1394_CMD_LOCK, &err);
1906 /* Unlock the topology tree */
1907 mutex_exit(&hal->topology_tree_mutex);
1909 /* Command has now been put onto the queue! */
1910 if (ret != DDI_SUCCESS) {
1911 /* Need to free the command */
1912 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1913 return;
1916 /* Send the command out */
1917 ret = s1394_xfer_asynch_command(hal, cmd, &err);
1919 if (ret != DDI_SUCCESS) {
1920 /* Remove cmd outstanding request Q */
1921 s1394_remove_q_asynch_cmd(hal, cmd);
1923 s_priv->cmd_in_use = B_FALSE;
1925 mutex_enter(&hal->bus_mgr_node_mutex);
1927 /* Don't know who the bus_mgr is */
1928 hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
1929 hal->incumbent_bus_mgr = B_FALSE;
1931 cv_signal(&hal->bus_mgr_node_cv);
1932 mutex_exit(&hal->bus_mgr_node_mutex);
1934 /* Need to free the command */
1935 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1941 * s1394_become_bus_mgr_callback()
1942 * is the callback used by s1394_become_bus_mgr() when it is necessary
1943 * to send the Bus Manager request to a remote IRM. After the completion
1944 * of the compare-swap request, this routine looks at the "old_value"
1945 * in the request to determine whether or not it has become the Bus
1946 * Manager for the current generation. It sets the bus_mgr_node and
1947 * incumbent_bus_mgr fields to their appropriate values.
1949 static void
1950 s1394_become_bus_mgr_callback(cmd1394_cmd_t *cmd)
1952 s1394_cmd_priv_t *s_priv;
1953 s1394_hal_t *hal;
1954 uint32_t hal_node_num;
1955 uint32_t temp;
1956 uint_t curr_bus_mgr;
1958 /* Get the Services Layer private area */
1959 s_priv = S1394_GET_CMD_PRIV(cmd);
1961 hal = (s1394_hal_t *)s_priv->sent_on_hal;
1963 /* Lock the topology tree */
1964 mutex_enter(&hal->topology_tree_mutex);
1966 hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
1968 /* Was the command successful? */
1969 if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
1970 temp = T1394_DATA32(cmd->cmd_u.l32.old_value);
1971 curr_bus_mgr = IEEE1394_NODE_NUM(temp);
1972 mutex_enter(&hal->bus_mgr_node_mutex);
1973 if ((curr_bus_mgr == S1394_INVALID_NODE_NUM) ||
1974 (curr_bus_mgr == hal_node_num)) {
1976 hal->bus_mgr_node = hal_node_num;
1977 hal->incumbent_bus_mgr = B_TRUE;
1979 } else {
1980 hal->bus_mgr_node = curr_bus_mgr;
1981 hal->incumbent_bus_mgr = B_FALSE;
1983 cv_signal(&hal->bus_mgr_node_cv);
1984 mutex_exit(&hal->bus_mgr_node_mutex);
1986 } else {
1987 mutex_enter(&hal->bus_mgr_node_mutex);
1989 /* Don't know who the bus_mgr is */
1990 hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
1991 hal->incumbent_bus_mgr = B_FALSE;
1993 cv_signal(&hal->bus_mgr_node_cv);
1994 mutex_exit(&hal->bus_mgr_node_mutex);
1997 /* Need to free the command */
1998 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
2000 /* Unlock the topology tree */
2001 mutex_exit(&hal->topology_tree_mutex);
2005 * s1394_bus_mgr_processing()
2006 * is called following "phase1" completion of reading Bus_Info_Blocks.
2007 * Its purpose is to determine whether the local node is capable of
2008 * becoming the Bus Manager (has the IRMC bit set) and if so to call
2009 * the s1394_do_bus_mgr_processing() routine.
2010 * NOTE: we overload DDI_FAILURE return value to mean jump back to
2011 * the start of bus reset processing.
2013 static int
2014 s1394_bus_mgr_processing(s1394_hal_t *hal)
2016 int ret;
2017 int IRM_node_num;
2019 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2021 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2022 return (DDI_FAILURE);
2024 IRM_node_num = hal->IRM_node;
2025 s1394_unlock_tree(hal);
2027 ret = DDI_SUCCESS;
2029 /* If we are IRM capable, then do bus_mgr stuff... */
2030 if (hal->halinfo.bus_capabilities & IEEE1394_BIB_IRMC_MASK) {
2031 /* If there is an IRM, then do bus_mgr stuff */
2032 if (IRM_node_num != -1) {
2033 if (s1394_do_bus_mgr_processing(hal))
2034 ret = DDI_FAILURE;
2038 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2040 return (ret);
2044 * s1394_do_bus_mgr_processing()
2045 * is used to perform those operations expected of the Bus Manager.
2046 * After being called, s1394_do_bus_mgr_processing() looks at the value
2047 * in bus_mgr_node and waits if it is -1 (Bus Manager has not been
2048 * chosen yet). Then, if there is more than one node on the 1394 bus,
2049 * and we are either the Bus Manager or (if there is no Bus Manager)
2050 * the IRM, it optimizes the gap_count and/or sets the cycle master's
2051 * root holdoff bit (to ensure that the cycle master is/stays root).
2053 * NOTE: we overload DDI_FAILURE return value to mean jump back to
2054 * the start of bus reset processing.
2056 static int
2057 s1394_do_bus_mgr_processing(s1394_hal_t *hal)
2059 int ret;
2060 int IRM_flags, hal_bus_mgr_node;
2061 int IRM_node_num;
2062 uint_t hal_node_num, number_of_nodes;
2063 int new_root, new_gap_cnt;
2065 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2067 /* Wait for Bus Manager to be determined */
2068 /* or a Bus Reset to happen */
2069 mutex_enter(&hal->bus_mgr_node_mutex);
2070 if (hal->bus_mgr_node == -1)
2071 cv_wait(&hal->bus_mgr_node_cv, &hal->bus_mgr_node_mutex);
2073 /* Check if a BUS RESET has come while we've been waiting */
2074 mutex_enter(&hal->br_thread_mutex);
2075 if (hal->br_thread_ev_type & (BR_THR_CFGROM_SCAN | BR_THR_GO_AWAY)) {
2077 mutex_exit(&hal->br_thread_mutex);
2078 mutex_exit(&hal->bus_mgr_node_mutex);
2080 return (1);
2082 mutex_exit(&hal->br_thread_mutex);
2084 hal_bus_mgr_node = hal->bus_mgr_node;
2085 mutex_exit(&hal->bus_mgr_node_mutex);
2087 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2088 return (1);
2090 hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
2091 IRM_node_num = hal->IRM_node;
2092 number_of_nodes = hal->number_of_nodes;
2094 ret = 0;
2096 /* If we are the bus_mgr or if there is no bus_mgr */
2097 /* the IRM and there is > 1 nodes on the bus */
2098 if ((number_of_nodes > 1) &&
2099 ((hal_bus_mgr_node == (int)hal_node_num) ||
2100 ((hal_bus_mgr_node == S1394_INVALID_NODE_NUM) &&
2101 (IRM_node_num == (int)hal_node_num)))) {
2103 IRM_flags = 0;
2105 /* Make sure the root node is cycle master capable */
2106 if (!s1394_cycle_master_capable(hal)) {
2107 /* Make the local node root */
2108 new_root = hal_node_num;
2109 IRM_flags = IRM_flags | ROOT_HOLDOFF;
2111 /* If setting root, then optimize gap_count */
2112 new_gap_cnt = hal->optimum_gap_count;
2113 IRM_flags = IRM_flags | GAP_COUNT;
2115 } else {
2116 /* Make sure root's ROOT_HOLDOFF bit is set */
2117 new_root = (number_of_nodes - 1);
2118 IRM_flags = IRM_flags | ROOT_HOLDOFF;
2120 if (hal->gap_count > hal->optimum_gap_count) {
2121 /* Set the gap_count to optimum */
2122 new_gap_cnt = hal->optimum_gap_count;
2123 IRM_flags = IRM_flags | GAP_COUNT;
2127 s1394_unlock_tree(hal);
2129 if (IRM_flags) {
2130 ret = s1394_do_phy_config_pkt(hal, new_root,
2131 new_gap_cnt, IRM_flags);
2133 return (ret);
2136 s1394_unlock_tree(hal);
2138 return (ret);
2142 * s1394_bus_mgr_timers_stop()
2143 * Cancels bus manager timeouts
2145 /*ARGSUSED*/
2146 static void
2147 s1394_bus_mgr_timers_stop(s1394_hal_t *hal, timeout_id_t *bus_mgr_query_tid,
2148 timeout_id_t *bus_mgr_tid)
2150 /* Cancel the Bus Mgr timeouts (if necessary) */
2151 if (*bus_mgr_tid != 0) {
2152 (void) untimeout(*bus_mgr_tid);
2153 *bus_mgr_tid = 0;
2155 if (*bus_mgr_query_tid != 0) {
2156 (void) untimeout(*bus_mgr_query_tid);
2157 *bus_mgr_query_tid = 0;
2162 * s1394_bus_mgr_timers_start()
2163 * Starts bus manager timeouts if the hal is IRM capable.
2165 static void
2166 s1394_bus_mgr_timers_start(s1394_hal_t *hal, timeout_id_t *bus_mgr_query_tid,
2167 timeout_id_t *bus_mgr_tid)
2169 boolean_t incumbant;
2170 uint_t hal_node_num;
2171 int IRM_node_num;
2173 mutex_enter(&hal->topology_tree_mutex);
2175 IRM_node_num = hal->IRM_node;
2176 hal_node_num = hal->node_id;
2178 mutex_enter(&hal->bus_mgr_node_mutex);
2179 incumbant = hal->incumbent_bus_mgr;
2180 mutex_exit(&hal->bus_mgr_node_mutex);
2182 /* If we are IRM capable, then do bus_mgr stuff... */
2183 if (hal->halinfo.bus_capabilities & IEEE1394_BIB_IRMC_MASK) {
2185 * If we are the IRM, then wait 625ms
2186 * before checking BUS_MANAGER_ID register
2188 if (IRM_node_num == IEEE1394_NODE_NUM(hal_node_num)) {
2190 mutex_exit(&hal->topology_tree_mutex);
2192 /* Wait 625ms, then check bus manager */
2193 *bus_mgr_query_tid = timeout(s1394_become_bus_mgr,
2194 hal, drv_usectohz(IEEE1394_BM_IRM_TIMEOUT));
2196 mutex_enter(&hal->topology_tree_mutex);
2199 /* If there is an IRM on the bus */
2200 if (IRM_node_num != -1) {
2201 if ((incumbant == B_TRUE) &&
2202 (hal->abdicate_bus_mgr_bit == 0)) {
2203 mutex_exit(&hal->topology_tree_mutex);
2205 /* Try to become bus manager */
2206 s1394_become_bus_mgr(hal);
2208 mutex_enter(&hal->topology_tree_mutex);
2209 } else {
2210 hal->abdicate_bus_mgr_bit = 0;
2212 mutex_exit(&hal->topology_tree_mutex);
2214 /* Wait 125ms, then try to become bus manager */
2215 *bus_mgr_tid = timeout(s1394_become_bus_mgr,
2216 hal, drv_usectohz(
2217 IEEE1394_BM_INCUMBENT_TIMEOUT));
2219 mutex_enter(&hal->topology_tree_mutex);
2221 } else {
2222 mutex_enter(&hal->bus_mgr_node_mutex);
2223 hal->incumbent_bus_mgr = B_FALSE;
2224 mutex_exit(&hal->bus_mgr_node_mutex);
2228 mutex_exit(&hal->topology_tree_mutex);
2232 * s1394_get_maxpayload()
2233 * is used to determine a device's maximum payload size. That is to
2234 * say, the largest packet that can be transmitted or received by the
2235 * the target device given the current topological (speed) constraints
2236 * and the constraints specified in the local host's and remote device's
2237 * Config ROM (max_rec). Caller must hold the topology_tree_mutex and
2238 * the target_list_rwlock as an RW_READER (at least).
2240 /*ARGSUSED*/
2241 void
2242 s1394_get_maxpayload(s1394_target_t *target, uint_t *dev_max_payload,
2243 uint_t *current_max_payload)
2245 s1394_hal_t *hal;
2246 uint32_t bus_capabilities;
2247 uint32_t from_node;
2248 uint32_t to_node;
2249 uint_t local_max_rec;
2250 uint_t local_max_blk;
2251 uint_t max_rec;
2252 uint_t max_blk;
2253 uint_t curr_speed;
2254 uint_t speed_max_blk;
2255 uint_t temp;
2257 /* Find the HAL this target resides on */
2258 hal = target->on_hal;
2260 /* Make sure we're holding the topology_tree_mutex */
2261 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2263 /* Set dev_max_payload to local (HAL's) size */
2264 bus_capabilities = target->on_hal->halinfo.bus_capabilities;
2265 local_max_rec = (bus_capabilities & IEEE1394_BIB_MAXREC_MASK) >>
2266 IEEE1394_BIB_MAXREC_SHIFT;
2267 if ((local_max_rec > 0) && (local_max_rec < 14)) {
2268 local_max_blk = 1 << (local_max_rec + 1);
2269 } else {
2270 /* These are either unspecified or reserved */
2271 local_max_blk = 4;
2274 /* Is this target on a node? */
2275 if ((target->target_state & S1394_TARG_GONE) == 0 &&
2276 (target->on_node != NULL)) {
2277 ASSERT(target->on_node->cfgrom != NULL);
2279 bus_capabilities =
2280 target->on_node->cfgrom[IEEE1212_NODE_CAP_QUAD];
2281 max_rec = (bus_capabilities & IEEE1394_BIB_MAXREC_MASK) >>
2282 IEEE1394_BIB_MAXREC_SHIFT;
2284 if ((max_rec > 0) && (max_rec < 14)) {
2285 max_blk = 1 << (max_rec + 1);
2286 } else {
2287 /* These are either unspecified or reserved */
2288 max_blk = 4;
2290 (*dev_max_payload) = max_blk;
2292 from_node = IEEE1394_NODE_NUM(target->on_hal->node_id);
2293 to_node = (target->on_node->node_num);
2295 /* Speed is to be filled in from speed map */
2296 curr_speed = (uint_t)s1394_speed_map_get(target->on_hal,
2297 from_node, to_node);
2298 speed_max_blk = 512 << curr_speed;
2299 temp = (local_max_blk < max_blk) ? local_max_blk : max_blk;
2300 (*current_max_payload) = (temp < speed_max_blk) ? temp :
2301 speed_max_blk;
2302 } else {
2303 /* Set dev_max_payload to local (HAL's) size */
2304 (*dev_max_payload) = local_max_blk;
2305 (*current_max_payload) = local_max_blk;
2310 * s1394_cycle_master_capable()
2311 * is used to determine whether or not the current root node on the
2312 * 1394 bus has its CMC-bit set in it Config ROM. If not, then it
2313 * is not capable of being cycle master and a new root node must be
2314 * selected.
2316 static int
2317 s1394_cycle_master_capable(s1394_hal_t *hal)
2319 s1394_node_t *root;
2320 int cycle_master_capable;
2321 uint_t hal_node_num;
2323 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2325 hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
2327 /* Get a pointer to the root node */
2328 root = s1394_topology_tree_get_root_node(hal);
2330 /* Ignore, if we are already root */
2331 if (root == &hal->topology_tree[hal_node_num]) {
2332 return (1);
2336 * We want to pick a new root if link is off or we don't have
2337 * valid config rom
2339 if (LINK_ACTIVE(root) == B_FALSE || root->cfgrom == NULL ||
2340 CFGROM_BIB_READ(root) == 0) {
2342 return (0);
2345 /* Check the Cycle Master bit in the Bus Info Block */
2346 cycle_master_capable = root->cfgrom[IEEE1212_NODE_CAP_QUAD] &
2347 IEEE1394_BIB_CMC_MASK;
2349 if (cycle_master_capable) {
2350 return (1);
2351 } else {
2352 return (0);
2357 * s1394_do_phy_config_pkt()
2358 * is called by s1394_do_bus_mgr_processing() to setup and send out
2359 * a PHY configuration packet onto the 1394 bus. Depending on the
2360 * values in IRM_flags, the gap_count and root_holdoff bits on the
2361 * bus will be affected by this packet.
2363 * NOTE: we overload DDI_FAILURE return value to mean jump back to
2364 * the start of bus reset processing.
2366 static int
2367 s1394_do_phy_config_pkt(s1394_hal_t *hal, int new_root, int new_gap_cnt,
2368 uint32_t IRM_flags)
2370 cmd1394_cmd_t *cmd;
2371 s1394_cmd_priv_t *s_priv;
2372 h1394_cmd_priv_t *h_priv;
2373 uint32_t pkt_data = 0;
2374 uint32_t gap_cnt = 0;
2375 uint32_t root = 0;
2376 int ret, result;
2377 uint_t flags = 0;
2379 /* Gap count needs to be optimized */
2380 if (IRM_flags & GAP_COUNT) {
2382 pkt_data = pkt_data | IEEE1394_PHY_CONFIG_T_BIT_MASK;
2383 gap_cnt = ((uint32_t)new_gap_cnt) <<
2384 IEEE1394_PHY_CONFIG_GAP_CNT_SHIFT;
2385 gap_cnt = gap_cnt & IEEE1394_PHY_CONFIG_GAP_CNT_MASK;
2386 pkt_data = pkt_data | gap_cnt;
2388 (void) HAL_CALL(hal).set_gap_count(hal->halinfo.hal_private,
2389 (uint_t)new_gap_cnt);
2392 /* Root node needs to be changed */
2393 if (IRM_flags & ROOT_HOLDOFF) {
2395 pkt_data = pkt_data | IEEE1394_PHY_CONFIG_R_BIT_MASK;
2396 root = ((uint32_t)new_root) <<
2397 IEEE1394_PHY_CONFIG_ROOT_HOLD_SHIFT;
2398 root = root & IEEE1394_PHY_CONFIG_ROOT_HOLD_MASK;
2399 pkt_data = pkt_data | root;
2401 (void) HAL_CALL(hal).set_root_holdoff_bit(
2402 hal->halinfo.hal_private);
2406 if (IRM_flags) {
2407 if (s1394_alloc_cmd(hal, flags, &cmd) != DDI_SUCCESS) {
2408 return (0);
2411 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2412 /* lock tree failure indicates a bus gen change */
2413 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
2414 return (1);
2417 /* Setup the callback routine */
2418 cmd->completion_callback = s1394_phy_config_callback;
2419 cmd->cmd_callback_arg = (void *)(uintptr_t)IRM_flags;
2420 cmd->bus_generation = hal->generation_count;
2421 cmd->cmd_options = CMD1394_OVERRIDE_ADDR;
2422 cmd->cmd_type = CMD1394_ASYNCH_WR_QUAD;
2423 cmd->cmd_u.q.quadlet_data = pkt_data;
2425 /* Get the Services Layer private area */
2426 s_priv = S1394_GET_CMD_PRIV(cmd);
2428 /* Get a pointer to the HAL private struct */
2429 h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
2431 s_priv->sent_by_target = (s1394_target_t *)NULL;
2432 s_priv->sent_on_hal = (s1394_hal_t *)hal;
2434 h_priv->bus_generation = cmd->bus_generation;
2436 /* Speed must be IEEE1394_S100 on PHY config packets */
2437 s_priv->hal_cmd_private.speed = IEEE1394_S100;
2439 /* Mark command as being used */
2440 s_priv->cmd_in_use = B_TRUE;
2442 s1394_unlock_tree(hal);
2444 /* Put command on the HAL's outstanding request Q */
2445 s1394_insert_q_asynch_cmd(hal, cmd);
2447 ret = HAL_CALL(hal).send_phy_configuration_packet(
2448 hal->halinfo.hal_private, (cmd1394_cmd_t *)cmd,
2449 (h1394_cmd_priv_t *)&s_priv->hal_cmd_private, &result);
2451 if (ret != DDI_SUCCESS) {
2452 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
2454 return (0);
2456 } else {
2458 * There will be a bus reset only if GAP_COUNT changed
2460 if (IRM_flags & GAP_COUNT) {
2461 return (1);
2466 return (0);
2470 * s1394_phy_config_callback()
2471 * is the callback called after the PHY configuration packet has been
2472 * sent out onto the 1394 bus. Depending on the values in IRM_flags,
2473 * (specifically if the gap_count has been changed) this routine may
2474 * initiate a bus reset.
2476 static void
2477 s1394_phy_config_callback(cmd1394_cmd_t *cmd)
2479 s1394_cmd_priv_t *s_priv;
2480 s1394_hal_t *hal;
2481 uint32_t IRM_flags;
2483 /* Get the Services Layer private area */
2484 s_priv = S1394_GET_CMD_PRIV(cmd);
2486 hal = (s1394_hal_t *)s_priv->sent_on_hal;
2488 IRM_flags = (uint32_t)(uintptr_t)cmd->cmd_callback_arg;
2490 if (cmd->cmd_result != CMD1394_CMDSUCCESS) {
2491 (void) s1394_free_cmd(hal, &cmd);
2492 } else {
2493 (void) s1394_free_cmd(hal, &cmd);
2495 /* Only need a bus reset if we changed GAP_COUNT */
2496 if (IRM_flags & GAP_COUNT) {
2497 s1394_initiate_hal_reset(hal, NON_CRITICAL);
2503 * s1394_lock_tree()
2504 * Attempts to lock the topology tree. Returns DDI_FAILURE if generations
2505 * changed or if the services layer signals the bus reset thread to go
2506 * away. Otherwise, returns DDI_SUCCESS.
2509 s1394_lock_tree(s1394_hal_t *hal)
2511 int circular;
2513 ASSERT(MUTEX_NOT_HELD(&hal->br_thread_mutex));
2514 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2516 mutex_enter(&hal->br_thread_mutex);
2517 ndi_devi_enter(hal->halinfo.dip, &circular);
2518 mutex_enter(&hal->topology_tree_mutex);
2520 if ((hal->br_thread_ev_type & BR_THR_GO_AWAY) != 0) {
2521 mutex_exit(&hal->br_thread_mutex);
2522 mutex_exit(&hal->topology_tree_mutex);
2523 ndi_devi_exit(hal->halinfo.dip, circular);
2524 return (DDI_FAILURE);
2525 } else if (hal->br_cfgrom_read_gen != hal->generation_count) {
2526 mutex_exit(&hal->br_thread_mutex);
2527 mutex_exit(&hal->topology_tree_mutex);
2528 ndi_devi_exit(hal->halinfo.dip, circular);
2529 return (DDI_FAILURE);
2532 mutex_exit(&hal->br_thread_mutex);
2534 return (DDI_SUCCESS);
2538 * s1394_unlock_tree()
2539 * Unlocks the topology tree
2541 void
2542 s1394_unlock_tree(s1394_hal_t *hal)
2544 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2545 mutex_exit(&hal->topology_tree_mutex);
2546 ndi_devi_exit(hal->halinfo.dip, 0);
2550 * s1394_calc_next_quad()
2551 * figures out the next quadlet to read. This maintains a stack of
2552 * directories in the node. When the first quad of a directory (the
2553 * first directory would be the root directory) is read, it is pushed on
2554 * the this stack. When the directory is all read, it scans the directory
2555 * looking for indirect entries. If any indirect directory entry is found,
2556 * it is pushed on stack and that directory is read. If we are done dealing
2557 * with all entries in the current dir, the directory is popped off the
2558 * stack. If the stack is empty, we are back at the root directory level
2559 * and essentially read the entire directory hierarchy.
2560 * Returns 0 is more quads to read, else returns non-zero.
2562 static int
2563 s1394_calc_next_quad(s1394_hal_t *hal, s1394_node_t *node, uint32_t quadlet,
2564 uint32_t *nextquadp)
2566 uint32_t data, type, key, value, *ptr;
2568 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2570 if (((quadlet + 1) >= node->cfgrom_size) ||
2571 (CFGROM_SIZE_IS_CRCSIZE(node) == B_TRUE && (quadlet + 1) >=
2572 node->cfgrom_valid_size)) {
2573 return (1);
2576 if (s1394_turn_off_dir_stack != 0 || CFGROM_DIR_STACK_OFF(node) ==
2577 B_TRUE) {
2578 quadlet++;
2579 *nextquadp = quadlet;
2580 return (0);
2583 data = node->cfgrom[quadlet];
2585 if (quadlet == IEEE1212_ROOT_DIR_QUAD) {
2586 node->dir_stack_top = -1;
2587 node->expected_dir_quad = quadlet;
2588 node->expected_type = IEEE1212_IMMEDIATE_TYPE;
2591 CFGROM_TYPE_KEY_VALUE(data, type, key, value);
2594 * check to make sure we are looking at a dir. If the config rom
2595 * is broken, then revert to normal scanning of the config rom
2597 if (node->expected_dir_quad == quadlet) {
2598 if (type != 0 || key != 0) {
2599 SET_CFGROM_DIR_STACK_OFF(node);
2600 quadlet = IEEE1212_ROOT_DIR_QUAD;
2601 } else {
2602 node->cur_dir_start = quadlet;
2603 node->cur_dir_size = IEEE1212_DIR_LEN(data);
2604 node->expected_dir_quad = 0;
2605 /* get the next quad */
2606 quadlet++;
2608 } else {
2610 * If we read all quads in cur dir and the cur dir is not
2611 * a leaf, scan for offsets (if the directory's CRC checks
2612 * out OK). If we have a directory or a leaf, we save the
2613 * current location on the stack and start reading that
2614 * directory. So, we will end up with a depth first read of
2615 * the entire config rom. If we are done with the current
2616 * directory, pop it off the stack and continue the scanning
2617 * as appropriate.
2619 if (quadlet == node->cur_dir_start + node->cur_dir_size) {
2621 int i, top;
2622 boolean_t done_with_cur_dir = B_FALSE;
2624 if (node->expected_type == IEEE1212_LEAF_TYPE) {
2625 node->expected_type = IEEE1212_IMMEDIATE_TYPE;
2626 done_with_cur_dir = B_TRUE;
2627 goto donewithcurdir;
2630 ptr = &node->cfgrom[node->cur_dir_start];
2631 CFGROM_TYPE_KEY_VALUE(*ptr, type, key, value);
2634 * If CRC for this directory is invalid, turn off
2635 * dir stack and start re-reading from root dir.
2636 * This wastes the work done thus far, but CRC
2637 * errors in directories should be rather rare.
2638 * if s1394_crcsz_is_cfgsz is set, then set
2639 * cfgrom_valid_size to the len specfied as crc len
2640 * in quadlet 0.
2642 if (s1394_valid_dir(hal, node, key, ptr) == B_FALSE) {
2643 SET_CFGROM_DIR_STACK_OFF(node);
2644 if (s1394_crcsz_is_cfgsz != 0) {
2645 SET_CFGROM_SIZE_IS_CRCSIZE(node);
2646 node->cfgrom_valid_size =
2647 ((node->cfgrom[0] >>
2648 IEEE1394_CFG_ROM_CRC_LEN_SHIFT) &
2649 IEEE1394_CFG_ROM_CRC_LEN_MASK);
2651 *nextquadp = IEEE1212_ROOT_DIR_QUAD;
2652 return (0);
2654 i = node->cur_dir_start + 1;
2655 rescan:
2656 for (done_with_cur_dir = B_FALSE; i <=
2657 node->cur_dir_start + node->cur_dir_size; i++) {
2658 data = node->cfgrom[i];
2659 CFGROM_TYPE_KEY_VALUE(data, type, key, value);
2660 /* read leaf type and directory types only */
2661 if (type == IEEE1212_LEAF_TYPE || type ==
2662 IEEE1212_DIRECTORY_TYPE) {
2665 * push current dir on stack; if the
2666 * stack is overflowing, ie, too many
2667 * directory level nestings, turn off
2668 * dir stack and fall back to serial
2669 * scanning, starting at root dir. This
2670 * wastes all the work we have done
2671 * thus far, but more than 16 levels
2672 * of directories is rather odd...
2674 top = ++node->dir_stack_top;
2675 if (top == S1394_DIR_STACK_SIZE) {
2677 SET_CFGROM_DIR_STACK_OFF(node);
2678 *nextquadp =
2679 IEEE1212_ROOT_DIR_QUAD;
2680 return (0);
2683 node->dir_stack[top].dir_start =
2684 node->cur_dir_start;
2685 node->dir_stack[top].dir_size =
2686 node->cur_dir_size;
2687 node->dir_stack[top].dir_next_quad =
2688 i + 1;
2689 /* and set the next quadlet to read */
2690 quadlet = i + value;
2691 node->expected_dir_quad = quadlet;
2692 node->expected_type = type;
2693 break;
2697 donewithcurdir:
2699 if ((i > node->cur_dir_start + node->cur_dir_size) ||
2700 done_with_cur_dir == B_TRUE) {
2703 * all done with cur dir; pop it off the stack
2705 if (node->dir_stack_top >= 0) {
2706 top = node->dir_stack_top--;
2707 node->cur_dir_start =
2708 node->dir_stack[top].dir_start;
2709 node->cur_dir_size =
2710 node->dir_stack[top].dir_size;
2711 i = node->dir_stack[top].dir_next_quad;
2712 goto rescan;
2713 } else {
2715 * if empty stack, we are at the top
2716 * level; declare done.
2718 return (1);
2721 } else {
2722 /* get the next quadlet */
2723 quadlet++;
2726 *nextquadp = quadlet;
2728 return (0);