4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
31 * 1394 Services Layer HAL Interface
32 * Contains all of the routines that define the HAL to Services Layer
38 #include <sys/sunddi.h>
39 #include <sys/modctl.h>
40 #include <sys/sunndi.h>
41 #include <sys/cmn_err.h>
42 #include <sys/types.h>
44 #include <sys/thread.h>
48 #include <sys/devctl.h>
49 #include <sys/tnf_probe.h>
51 #include <sys/1394/t1394.h>
52 #include <sys/1394/s1394.h>
53 #include <sys/1394/h1394.h>
54 #include <sys/1394/ieee1394.h>
57 extern struct bus_ops nx1394_busops
;
58 extern int nx1394_define_events(s1394_hal_t
*hal
);
59 extern void nx1394_undefine_events(s1394_hal_t
*hal
);
60 extern int s1394_ignore_invalid_gap_cnt
;
63 * Function: h1394_init()
64 * Input(s): modlp The structure containing all of the
65 * HAL's relevant information
69 * Description: h1394_init() is called by the HAL's _init function and is
70 * used to set up the nexus bus ops.
73 h1394_init(struct modlinkage
*modlp
)
75 struct dev_ops
*devops
;
77 devops
= ((struct modldrv
*)(modlp
->ml_linkage
[0]))->drv_dev_ops
;
78 devops
->devo_bus_ops
= &nx1394_busops
;
84 * Function: h1394_fini()
85 * Input(s): modlp The structure containing all of the
86 * HAL's relevant information
90 * Description: h1394_fini() is called by the HAL's _fini function and is
91 * used to NULL out the nexus bus ops.
94 h1394_fini(struct modlinkage
*modlp
)
96 struct dev_ops
*devops
;
98 devops
= ((struct modldrv
*)(modlp
->ml_linkage
[0]))->drv_dev_ops
;
99 devops
->devo_bus_ops
= NULL
;
103 * Function: h1394_attach()
104 * Input(s): halinfo The structure containing all of the
105 * HAL's relevant information
106 * cmd The ddi_attach_cmd_t that tells us
107 * if this is a RESUME or a regular
110 * Output(s): sl_private The HAL "handle" to be used for
111 * all subsequent calls into the
112 * 1394 Software Framework
114 * Description: h1394_attach() registers the HAL with the 1394 Software
115 * Framework. It returns a HAL "handle" to be used for
116 * all subsequent calls into the 1394 Software Framework.
119 h1394_attach(h1394_halinfo_t
*halinfo
, ddi_attach_cmd_t cmd
, void **sl_private
)
126 ASSERT(sl_private
!= NULL
);
128 /* If this is a DDI_RESUME, return success */
129 if (cmd
== DDI_RESUME
) {
130 hal
= (s1394_hal_t
*)(*sl_private
);
131 /* If we have a 1394A PHY, then reset the "contender bit" */
132 if (hal
->halinfo
.phy
== H1394_PHY_1394A
)
133 (void) HAL_CALL(hal
).set_contender_bit(
134 hal
->halinfo
.hal_private
);
135 return (DDI_SUCCESS
);
136 } else if (cmd
!= DDI_ATTACH
) {
137 return (DDI_FAILURE
);
140 /* Allocate space for s1394_hal_t */
141 hal
= kmem_zalloc(sizeof (s1394_hal_t
), KM_SLEEP
);
143 /* Setup HAL state */
144 hal
->hal_state
= S1394_HAL_INIT
;
146 /* Copy in the halinfo struct */
147 hal
->halinfo
= *halinfo
;
149 /* Create the topology tree mutex */
150 mutex_init(&hal
->topology_tree_mutex
, NULL
, MUTEX_DRIVER
,
151 hal
->halinfo
.hw_interrupt
);
153 /* Create the Cycle Mater timer mutex */
154 mutex_init(&hal
->cm_timer_mutex
, NULL
, MUTEX_DRIVER
,
155 hal
->halinfo
.hw_interrupt
);
157 /* Initialize the Isoch CEC list */
158 hal
->isoch_cec_list_head
= NULL
;
159 hal
->isoch_cec_list_tail
= NULL
;
160 mutex_init(&hal
->isoch_cec_list_mutex
, NULL
, MUTEX_DRIVER
,
161 hal
->halinfo
.hw_interrupt
);
163 /* Initialize the Bus Manager node ID mutex and cv */
164 mutex_init(&hal
->bus_mgr_node_mutex
, NULL
, MUTEX_DRIVER
,
165 hal
->halinfo
.hw_interrupt
);
166 cv_init(&hal
->bus_mgr_node_cv
, NULL
, CV_DRIVER
,
167 hal
->halinfo
.hw_interrupt
);
169 /* Initialize the Bus Manager node ID - "-1" means undetermined */
170 hal
->bus_mgr_node
= -1;
171 hal
->incumbent_bus_mgr
= B_FALSE
;
173 /* Initialize the Target list */
174 hal
->target_head
= NULL
;
175 hal
->target_tail
= NULL
;
176 rw_init(&hal
->target_list_rwlock
, NULL
, RW_DRIVER
,
177 hal
->halinfo
.hw_interrupt
);
179 /* Setup Request Q's */
180 hal
->outstanding_q_head
= NULL
;
181 hal
->outstanding_q_tail
= NULL
;
182 mutex_init(&hal
->outstanding_q_mutex
, NULL
, MUTEX_DRIVER
,
183 hal
->halinfo
.hw_interrupt
);
184 hal
->pending_q_head
= NULL
;
185 hal
->pending_q_tail
= NULL
;
186 mutex_init(&hal
->pending_q_mutex
, NULL
, MUTEX_DRIVER
,
187 hal
->halinfo
.hw_interrupt
);
189 /* Create the kmem_cache for command allocations */
190 (void) sprintf(buf
, "hal%d_cache", ddi_get_instance(hal
->halinfo
.dip
));
191 cmd_size
= sizeof (cmd1394_cmd_t
) + sizeof (s1394_cmd_priv_t
) +
192 hal
->halinfo
.hal_overhead
;
194 hal
->hal_kmem_cachep
= kmem_cache_create(buf
, cmd_size
, 8, NULL
, NULL
,
195 NULL
, NULL
, NULL
, 0);
197 /* Setup the event stuff */
198 ret
= nx1394_define_events(hal
);
199 if (ret
!= DDI_SUCCESS
) {
200 /* Clean up before leaving */
201 s1394_cleanup_for_detach(hal
, H1394_CLEANUP_LEVEL0
);
203 return (DDI_FAILURE
);
206 /* Initialize the mutexes and cv's used by the bus reset thread */
207 mutex_init(&hal
->br_thread_mutex
, NULL
, MUTEX_DRIVER
,
208 hal
->halinfo
.hw_interrupt
);
209 cv_init(&hal
->br_thread_cv
, NULL
, CV_DRIVER
, hal
->halinfo
.hw_interrupt
);
210 mutex_init(&hal
->br_cmplq_mutex
, NULL
, MUTEX_DRIVER
,
211 hal
->halinfo
.hw_interrupt
);
212 cv_init(&hal
->br_cmplq_cv
, NULL
, CV_DRIVER
, hal
->halinfo
.hw_interrupt
);
215 * Create a bus reset thread to handle the device discovery.
216 * It should take the default stack sizes, it should run
217 * the s1394_br_thread() routine at the start, passing the
218 * HAL pointer as its argument. The thread should be put
219 * on processor p0, its state should be set to runnable,
220 * but not yet on a processor, and its scheduling priority
221 * should be the minimum level of any system class.
223 hal
->br_thread
= thread_create(NULL
, 0, s1394_br_thread
,
224 hal
, 0, &p0
, TS_RUN
, minclsyspri
);
226 /* Until we see a bus reset this HAL has no nodes */
227 hal
->number_of_nodes
= 0;
228 hal
->num_bus_reset_till_fail
= NUM_BR_FAIL
;
230 /* Initialize the SelfID Info */
231 hal
->current_buffer
= 0;
232 hal
->selfid_buf0
= kmem_zalloc(S1394_SELFID_BUF_SIZE
, KM_SLEEP
);
233 hal
->selfid_buf1
= kmem_zalloc(S1394_SELFID_BUF_SIZE
, KM_SLEEP
);
235 /* Initialize kstat structures */
236 ret
= s1394_kstat_init(hal
);
237 if (ret
!= DDI_SUCCESS
) {
238 /* Clean up before leaving */
239 s1394_cleanup_for_detach(hal
, H1394_CLEANUP_LEVEL3
);
241 return (DDI_FAILURE
);
243 hal
->hal_kstats
->guid
= hal
->halinfo
.guid
;
245 /* Setup the node tree pointers */
246 hal
->old_tree
= &hal
->last_valid_tree
[0];
247 hal
->topology_tree
= &hal
->current_tree
[0];
249 /* Initialize the local Config ROM entry */
250 ret
= s1394_init_local_config_rom(hal
);
251 if (ret
!= DDI_SUCCESS
) {
252 /* Clean up before leaving */
253 s1394_cleanup_for_detach(hal
, H1394_CLEANUP_LEVEL4
);
255 return (DDI_FAILURE
);
258 /* Initialize 1394 Address Space */
259 ret
= s1394_init_addr_space(hal
);
260 if (ret
!= DDI_SUCCESS
) {
261 /* Clean up before leaving */
262 s1394_cleanup_for_detach(hal
, H1394_CLEANUP_LEVEL5
);
264 return (DDI_FAILURE
);
267 /* Initialize FCP subsystem */
268 ret
= s1394_fcp_hal_init(hal
);
269 if (ret
!= DDI_SUCCESS
) {
270 /* Clean up before leaving */
271 s1394_cleanup_for_detach(hal
, H1394_CLEANUP_LEVEL6
);
273 return (DDI_FAILURE
);
276 /* Initialize the IRM node ID - "-1" means invalid, undetermined */
279 /* If we have a 1394A PHY, then set the "contender bit" */
280 if (hal
->halinfo
.phy
== H1394_PHY_1394A
)
281 (void) HAL_CALL(hal
).set_contender_bit(
282 hal
->halinfo
.hal_private
);
284 /* Add into linked list */
285 mutex_enter(&s1394_statep
->hal_list_mutex
);
286 if ((s1394_statep
->hal_head
== NULL
) &&
287 (s1394_statep
->hal_tail
== NULL
)) {
288 s1394_statep
->hal_head
= hal
;
289 s1394_statep
->hal_tail
= hal
;
291 s1394_statep
->hal_tail
->hal_next
= hal
;
292 hal
->hal_prev
= s1394_statep
->hal_tail
;
293 s1394_statep
->hal_tail
= hal
;
295 mutex_exit(&s1394_statep
->hal_list_mutex
);
297 /* Fill in services layer private info */
298 *sl_private
= (void *)hal
;
300 return (DDI_SUCCESS
);
304 * Function: h1394_detach()
305 * Input(s): sl_private The HAL "handle" returned by
307 * cmd The ddi_detach_cmd_t that tells us
308 * if this is a SUSPEND or a regular
311 * Output(s): DDI_SUCCESS HAL successfully detached
312 * DDI_FAILURE HAL failed to detach
314 * Description: h1394_detach() unregisters the HAL from the 1394 Software
315 * Framework. It can be called during a SUSPEND operation or
316 * for a real detach() event.
319 h1394_detach(void **sl_private
, ddi_detach_cmd_t cmd
)
323 hal
= (s1394_hal_t
*)(*sl_private
);
327 /* Clean up before leaving */
328 s1394_cleanup_for_detach(hal
, H1394_CLEANUP_LEVEL7
);
329 /* NULL out the HAL "handle" */
334 /* Turn off any timers that might be set */
335 s1394_destroy_timers(hal
);
336 /* Set the hal_was_suspended bit */
337 hal
->hal_was_suspended
= B_TRUE
;
341 return (DDI_FAILURE
);
344 return (DDI_SUCCESS
);
348 * Function: h1394_alloc_cmd()
349 * Input(s): sl_private The HAL "handle" returned by
351 * flags The flags parameter is described below
353 * Output(s): cmdp Pointer to the newly allocated command
354 * hal_priv_ptr Offset into the command, points to
355 * the HAL's private area
357 * Description: h1394_alloc_cmd() allocates a command for use with the
358 * h1394_read_request(), h1394_write_request(), or
359 * h1394_lock_request() interfaces of the 1394 Software Framework.
360 * By default, h1394_alloc_cmd() may sleep while allocating
361 * memory for the command structure. If this is undesirable,
362 * the HAL may set the H1394_ALLOC_CMD_NOSLEEP bit in the flags
366 h1394_alloc_cmd(void *sl_private
, uint_t flags
, cmd1394_cmd_t
**cmdp
,
367 h1394_cmd_priv_t
**hal_priv_ptr
)
370 s1394_cmd_priv_t
*s_priv
;
372 hal
= (s1394_hal_t
*)sl_private
;
374 if (s1394_alloc_cmd(hal
, flags
, cmdp
) != DDI_SUCCESS
) {
375 return (DDI_FAILURE
);
378 /* Get the Services Layer private area */
379 s_priv
= S1394_GET_CMD_PRIV(*cmdp
);
381 *hal_priv_ptr
= &s_priv
->hal_cmd_private
;
383 return (DDI_SUCCESS
);
387 * Function: h1394_free_cmd()
388 * Input(s): sl_private The HAL "handle" returned by
390 * cmdp Pointer to the command to be freed
392 * Output(s): DDI_SUCCESS HAL successfully freed command
393 * DDI_FAILURE HAL failed to free command
395 * Description: h1394_free_cmd() attempts to free a command that has previously
396 * been allocated by the HAL. It is possible for h1394_free_cmd()
397 * to fail because the command is currently in-use by the 1394
398 * Software Framework.
401 h1394_free_cmd(void *sl_private
, cmd1394_cmd_t
**cmdp
)
404 s1394_cmd_priv_t
*s_priv
;
406 hal
= (s1394_hal_t
*)sl_private
;
408 /* Get the Services Layer private area */
409 s_priv
= S1394_GET_CMD_PRIV(*cmdp
);
411 /* Check that command isn't in use */
412 if (s_priv
->cmd_in_use
== B_TRUE
) {
413 ASSERT(s_priv
->cmd_in_use
== B_FALSE
);
414 return (DDI_FAILURE
);
417 kmem_cache_free(hal
->hal_kmem_cachep
, *cmdp
);
419 /* Command pointer is set to NULL before returning */
422 /* kstats - number of cmds freed */
423 hal
->hal_kstats
->cmd_free
++;
425 return (DDI_SUCCESS
);
429 * Function: h1394_cmd_is_complete()
430 * Input(s): sl_private The HAL "handle" returned by
432 * command_id Pointer to the command that has
434 * cmd_type AT_RESP => AT response or ATREQ =
436 * status Command's completion status
440 * Description: h1394_cmd_is_complete() is called by the HAL whenever an
441 * outstanding command has completed (successfully or otherwise).
442 * After determining whether it was an AT request or and AT
443 * response that we are handling, the command is dispatched to
444 * the appropriate handler in the 1394 Software Framework.
447 h1394_cmd_is_complete(void *sl_private
, cmd1394_cmd_t
*command_id
,
448 uint32_t cmd_type
, int status
)
453 hal
= (s1394_hal_t
*)sl_private
;
455 /* Is it AT_RESP or AT_REQ? */
458 s1394_atreq_cmd_complete(hal
, command_id
, status
);
462 s1394_atresp_cmd_complete(hal
, command_id
, status
);
466 dip
= hal
->halinfo
.dip
;
468 /* An unexpected error in the HAL */
469 cmn_err(CE_WARN
, HALT_ERROR_MESSAGE
,
470 ddi_node_name(dip
), ddi_get_instance(dip
));
472 /* Disable the HAL */
473 s1394_hal_shutdown(hal
, B_TRUE
);
480 * Function: h1394_bus_reset()
481 * Input(s): sl_private The HAL "handle" returned by
484 * Output(s): selfid_buf_addr The pointer to a buffer into which
485 * any Self ID packets should be put
487 * Description: h1394_bus_reset() is called whenever a 1394 bus reset event
488 * is detected by the HAL. This routine simply prepares for
489 * the subsequent Self ID packets.
492 h1394_bus_reset(void *sl_private
, void **selfid_buf_addr
)
496 hal
= (s1394_hal_t
*)sl_private
;
498 mutex_enter(&hal
->topology_tree_mutex
);
500 /* Update the HAL's state */
501 if (hal
->hal_state
!= S1394_HAL_SHUTDOWN
) {
502 hal
->hal_state
= S1394_HAL_RESET
;
504 mutex_exit(&hal
->topology_tree_mutex
);
508 if (hal
->initiated_bus_reset
== B_TRUE
) {
509 hal
->initiated_bus_reset
= B_FALSE
;
510 if (hal
->num_bus_reset_till_fail
> 0) {
511 hal
->num_bus_reset_till_fail
--;
515 hal
->num_bus_reset_till_fail
= NUM_BR_FAIL
;
518 /* Reset the IRM node ID */
521 /* Slowest node defaults to IEEE1394_S400 */
522 hal
->slowest_node_speed
= IEEE1394_S400
;
524 /* Pick a SelfID buffer to give */
525 if (hal
->current_buffer
== 0) {
526 *selfid_buf_addr
= (void *)hal
->selfid_buf1
;
527 hal
->current_buffer
= 1;
529 *selfid_buf_addr
= (void *)hal
->selfid_buf0
;
530 hal
->current_buffer
= 0;
533 /* Disable the CSR topology_map (temporarily) */
534 s1394_CSR_topology_map_disable(hal
);
536 mutex_exit(&hal
->topology_tree_mutex
);
538 /* Reset the Bus Manager node ID */
539 mutex_enter(&hal
->bus_mgr_node_mutex
);
540 hal
->bus_mgr_node
= -1;
541 mutex_exit(&hal
->bus_mgr_node_mutex
);
545 * Function: h1394_self_ids()
546 * Input(s): sl_private The HAL "handle" returned by
548 * selfid_buf_addr Pointer to the Self ID buffer
549 * selfid_size The size of the filled part of the
551 * node_id The local (host) node ID for the
553 * generation_count The current generation number
557 * Description: h1394_self_ids() does alot of the work at bus reset. It
558 * takes the Self ID packets and parses them, builds a topology
559 * tree representation of them, calculates gap count, IRM, speed
560 * map, does any node matching that's possible, and then wakes
564 h1394_self_ids(void *sl_private
, void *selfid_buf_addr
, uint32_t selfid_size
,
565 uint32_t node_id
, uint32_t generation_count
)
569 uint_t gen_diff
, gen_rollover
;
570 boolean_t tree_copied
= B_FALSE
;
571 ushort_t saved_number_of_nodes
;
574 * NOTE: current topology tree is referred to as topology_tree
575 * and the old topology tree is referred to as old_tree.
576 * tree_valid indicates selfID buffer checked out OK and we were
577 * able to build the topology tree.
578 * tree_processed indicates we read the config ROMs as needed.
581 hal
= (s1394_hal_t
*)sl_private
;
583 /* Lock the topology tree */
584 mutex_enter(&hal
->topology_tree_mutex
);
585 if (hal
->hal_state
== S1394_HAL_SHUTDOWN
) {
586 mutex_exit(&hal
->topology_tree_mutex
);
590 /* kstats - number of selfid completes */
591 hal
->hal_kstats
->selfid_complete
++;
593 if (generation_count
> hal
->generation_count
) {
594 gen_diff
= generation_count
- hal
->generation_count
;
595 hal
->hal_kstats
->bus_reset
+= gen_diff
;
597 gen_diff
= hal
->generation_count
- generation_count
;
598 /* Use max_generation to determine how many bus resets */
599 hal
->hal_kstats
->bus_reset
+=
600 (hal
->halinfo
.max_generation
- gen_diff
);
604 * If the current tree has a valid topology tree (selfids
605 * checked out OK etc) and config roms read as needed,
606 * then make it the old tree before building a new one.
608 if ((hal
->topology_tree_valid
== B_TRUE
) &&
609 (hal
->topology_tree_processed
== B_TRUE
)) {
610 /* Trees are switched after the copy completes */
611 s1394_copy_old_tree(hal
);
612 tree_copied
= B_TRUE
;
615 /* Set the new generation and node id */
616 hal
->node_id
= node_id
;
617 hal
->generation_count
= generation_count
;
619 /* Invalidate the current topology tree */
620 hal
->topology_tree_valid
= B_FALSE
;
621 hal
->topology_tree_processed
= B_FALSE
;
622 hal
->cfgroms_being_read
= 0;
625 * Save the number of nodes prior to parsing the self id buffer.
626 * We need this saved value while initializing the topology tree
627 * (for non-copy case).
629 saved_number_of_nodes
= hal
->number_of_nodes
;
631 /* Parse the SelfID buffer */
632 if (s1394_parse_selfid_buffer(hal
, selfid_buf_addr
, selfid_size
) !=
634 /* Unlock the topology tree */
635 mutex_exit(&hal
->topology_tree_mutex
);
637 /* kstats - SelfID buffer error */
638 hal
->hal_kstats
->selfid_buffer_error
++;
639 return; /* Error parsing SelfIDs */
642 /* Sort the SelfID packets by node number (if it's a 1995 PHY) */
643 if (hal
->halinfo
.phy
== H1394_PHY_1995
) {
644 s1394_sort_selfids(hal
);
648 * Update the cycle master timer - if the timer is set and
649 * we were the root but we are not anymore, then disable it.
651 mutex_enter(&hal
->cm_timer_mutex
);
652 if ((hal
->cm_timer_set
== B_TRUE
) &&
653 ((hal
->old_number_of_nodes
- 1) ==
654 IEEE1394_NODE_NUM(hal
->old_node_id
)) &&
655 ((hal
->number_of_nodes
- 1) !=
656 IEEE1394_NODE_NUM(hal
->node_id
))) {
657 mutex_exit(&hal
->cm_timer_mutex
);
658 (void) untimeout(hal
->cm_timer
);
660 mutex_exit(&hal
->cm_timer_mutex
);
663 s1394_init_topology_tree(hal
, tree_copied
, saved_number_of_nodes
);
665 /* Determine the 1394 bus gap count */
666 hal
->gap_count
= s1394_get_current_gap_count(hal
);
667 /* If gap counts are inconsistent, reset */
668 if (hal
->gap_count
== -1) {
669 /* Unlock the topology tree */
670 mutex_exit(&hal
->topology_tree_mutex
);
672 /* kstats - SelfID buffer error (invalid gap counts) */
673 hal
->hal_kstats
->selfid_buffer_error
++;
675 if (s1394_ignore_invalid_gap_cnt
== 1) {
676 /* Lock the topology tree again */
677 mutex_enter(&hal
->topology_tree_mutex
);
678 hal
->gap_count
= 0x3F;
680 return; /* Invalid gap counts in SelfID buffer */
684 /* Determine the Isoch Resource Manager */
685 hal
->IRM_node
= s1394_get_isoch_rsrc_mgr(hal
);
687 /* Build the topology tree */
688 if (s1394_topology_tree_build(hal
) != DDI_SUCCESS
) {
689 /* Unlock the topology tree */
690 mutex_exit(&hal
->topology_tree_mutex
);
692 /* kstats - SelfID buffer error (Invalid topology tree) */
693 hal
->hal_kstats
->selfid_buffer_error
++;
694 return; /* Error building topology tree from SelfIDs */
697 /* Update the CSR topology_map */
698 s1394_CSR_topology_map_update(hal
);
700 /* Calculate the diameter */
701 diameter
= s1394_topology_tree_calculate_diameter(hal
);
703 /* Determine the optimum gap count */
704 hal
->optimum_gap_count
= s1394_gap_count_optimize(diameter
);
706 /* Fill in the speed map */
707 s1394_speed_map_fill(hal
);
709 /* Initialize the two trees (for tree walking) */
710 s1394_topology_tree_mark_all_unvisited(hal
);
711 s1394_old_tree_mark_all_unvisited(hal
);
712 s1394_old_tree_mark_all_unmatched(hal
);
714 /* Are both trees (old and new) valid? */
715 if ((hal
->old_tree_valid
== B_TRUE
) &&
716 (hal
->topology_tree_valid
== B_TRUE
)) {
717 /* If HAL was in a suspended state, then do no matching */
718 if (hal
->hal_was_suspended
== B_TRUE
) {
719 hal
->hal_was_suspended
= B_FALSE
;
721 gen_rollover
= hal
->halinfo
.max_generation
+ 1;
722 /* If only one bus reset occurred, match the trees */
723 if (((hal
->old_generation_count
+ 1) % gen_rollover
) ==
725 s1394_match_tree_nodes(hal
);
730 /* Unlock the topology tree */
731 mutex_exit(&hal
->topology_tree_mutex
);
733 /* Wake up the bus reset processing thread */
734 s1394_tickle_bus_reset_thread(hal
);
738 * Function: h1394_read_request()
739 * Input(s): sl_private The HAL "handle" returned by
741 * req The incoming AR request
745 * Description: h1394_read_request() receives incoming AR requests. These
746 * asynchronous read requests are dispatched to the appropriate
747 * target (if one has registered) or are handled by the 1394
748 * Software Framework, which will send out an appropriate
752 h1394_read_request(void *sl_private
, cmd1394_cmd_t
*req
)
755 s1394_cmd_priv_t
*s_priv
;
756 s1394_addr_space_blk_t
*addr_blk
;
758 uint64_t end_of_request
;
765 void (*recv_read_req
)(cmd1394_cmd_t
*);
767 hal
= (s1394_hal_t
*)sl_private
;
769 /* Get the Services Layer private area */
770 s_priv
= S1394_GET_CMD_PRIV(req
);
772 s_priv
->cmd_priv_xfer_type
= S1394_CMD_READ
;
774 switch (req
->cmd_type
) {
775 case CMD1394_ASYNCH_RD_QUAD
:
776 cmd_length
= IEEE1394_QUADLET
;
777 hal
->hal_kstats
->arreq_quad_rd
++;
780 case CMD1394_ASYNCH_RD_BLOCK
:
781 cmd_length
= req
->cmd_u
.b
.blk_length
;
782 hal
->hal_kstats
->arreq_blk_rd
++;
786 dip
= hal
->halinfo
.dip
;
788 /* An unexpected error in the HAL */
789 cmn_err(CE_WARN
, HALT_ERROR_MESSAGE
,
790 ddi_node_name(dip
), ddi_get_instance(dip
));
792 /* Disable the HAL */
793 s1394_hal_shutdown(hal
, B_TRUE
);
798 /* Lock the "used" tree */
799 mutex_enter(&hal
->addr_space_used_mutex
);
801 /* Has the 1394 address been allocated? */
802 addr_blk
= s1394_used_tree_search(hal
, req
->cmd_addr
);
804 /* If it wasn't found, it isn't owned... */
805 if (addr_blk
== NULL
) {
806 /* Unlock the "used" tree */
807 mutex_exit(&hal
->addr_space_used_mutex
);
808 req
->cmd_result
= IEEE1394_RESP_ADDRESS_ERROR
;
809 (void) s1394_send_response(hal
, req
);
813 /* Does the WHOLE request fit in the allocated block? */
814 end_of_request
= (req
->cmd_addr
+ cmd_length
) - 1;
815 if (end_of_request
> addr_blk
->addr_hi
) {
816 /* Unlock the "used" tree */
817 mutex_exit(&hal
->addr_space_used_mutex
);
818 req
->cmd_result
= IEEE1394_RESP_ADDRESS_ERROR
;
819 (void) s1394_send_response(hal
, req
);
823 /* Is a read request valid for this address space? */
824 if (!(addr_blk
->addr_enable
& T1394_ADDR_RDENBL
)) {
825 /* Unlock the "used" tree */
826 mutex_exit(&hal
->addr_space_used_mutex
);
827 req
->cmd_result
= IEEE1394_RESP_TYPE_ERROR
;
828 (void) s1394_send_response(hal
, req
);
832 /* Make sure quadlet requests are quadlet-aligned */
833 offset
= req
->cmd_addr
- addr_blk
->addr_lo
;
834 if ((req
->cmd_type
== CMD1394_ASYNCH_RD_QUAD
) &&
835 ((offset
& 0x3) != 0)) {
836 /* Unlock the "used" tree */
837 mutex_exit(&hal
->addr_space_used_mutex
);
838 req
->cmd_result
= IEEE1394_RESP_TYPE_ERROR
;
839 (void) s1394_send_response(hal
, req
);
843 /* Fill in the backing store if necessary */
844 if (addr_blk
->kmem_bufp
!= NULL
) {
845 offset
= req
->cmd_addr
- addr_blk
->addr_lo
;
846 bufp_addr
= (uchar_t
*)addr_blk
->kmem_bufp
+ offset
;
848 switch (req
->cmd_type
) {
849 case CMD1394_ASYNCH_RD_QUAD
:
850 bcopy((void *)bufp_addr
,
851 (void *)&(req
->cmd_u
.q
.quadlet_data
), cmd_length
);
854 case CMD1394_ASYNCH_RD_BLOCK
:
855 begin_ptr
= req
->cmd_u
.b
.data_block
->b_wptr
;
856 end_ptr
= begin_ptr
+ cmd_length
;
857 tmp_ptr
= req
->cmd_u
.b
.data_block
->b_datap
->db_lim
;
858 if (end_ptr
<= tmp_ptr
) {
859 bcopy((void *)bufp_addr
, (void *)begin_ptr
,
861 /* Update b_wptr to refelect the new data */
862 req
->cmd_u
.b
.data_block
->b_wptr
= end_ptr
;
864 dip
= hal
->halinfo
.dip
;
866 /* An unexpected error in the HAL */
867 cmn_err(CE_WARN
, HALT_ERROR_MESSAGE
,
868 ddi_node_name(dip
), ddi_get_instance(dip
));
870 /* Unlock the "used" tree */
871 mutex_exit(&hal
->addr_space_used_mutex
);
873 /* Disable the HAL */
874 s1394_hal_shutdown(hal
, B_TRUE
);
881 dip
= hal
->halinfo
.dip
;
883 /* An unexpected error in the HAL */
884 cmn_err(CE_WARN
, HALT_ERROR_MESSAGE
,
885 ddi_node_name(dip
), ddi_get_instance(dip
));
887 /* Unlock the "used" tree */
888 mutex_exit(&hal
->addr_space_used_mutex
);
890 /* Disable the HAL */
891 s1394_hal_shutdown(hal
, B_TRUE
);
897 /* Fill in the rest of the info in the request */
898 s_priv
->arreq_valid_addr
= B_TRUE
;
899 req
->cmd_callback_arg
= addr_blk
->addr_arg
;
900 recv_read_req
= addr_blk
->addr_events
.recv_read_request
;
902 /* Unlock the "used" tree */
903 mutex_exit(&hal
->addr_space_used_mutex
);
906 * Add no code that modifies the command after the target
907 * callback is called or after the response is sent to the
910 if (recv_read_req
!= NULL
) {
913 req
->cmd_result
= IEEE1394_RESP_COMPLETE
;
914 (void) s1394_send_response(hal
, req
);
920 * Function: h1394_write_request()
921 * Input(s): sl_private The HAL "handle" returned by
923 * req The incoming AR request
927 * Description: h1394_write_request() receives incoming AR requests. These
928 * asynchronous write requests are dispatched to the appropriate
929 * target (if one has registered) or are handled by the 1394
930 * Software Framework, which will send out an appropriate
934 h1394_write_request(void *sl_private
, cmd1394_cmd_t
*req
)
937 s1394_cmd_priv_t
*s_priv
;
938 h1394_cmd_priv_t
*h_priv
;
939 s1394_addr_space_blk_t
*addr_blk
;
947 uint64_t end_of_request
;
948 boolean_t posted_write
= B_FALSE
;
949 boolean_t write_error
= B_FALSE
;
950 void (*recv_write_req
)(cmd1394_cmd_t
*);
952 hal
= (s1394_hal_t
*)sl_private
;
954 /* Get the Services Layer private area */
955 s_priv
= S1394_GET_CMD_PRIV(req
);
957 s_priv
->cmd_priv_xfer_type
= S1394_CMD_WRITE
;
959 switch (req
->cmd_type
) {
960 case CMD1394_ASYNCH_WR_QUAD
:
961 cmd_length
= IEEE1394_QUADLET
;
962 hal
->hal_kstats
->arreq_quad_wr
++;
965 case CMD1394_ASYNCH_WR_BLOCK
:
966 cmd_length
= req
->cmd_u
.b
.blk_length
;
967 hal
->hal_kstats
->arreq_blk_wr
++;
968 hal
->hal_kstats
->arreq_blk_wr_size
+= cmd_length
;
972 dip
= hal
->halinfo
.dip
;
974 /* An unexpected error in the HAL */
975 cmn_err(CE_WARN
, HALT_ERROR_MESSAGE
,
976 ddi_node_name(dip
), ddi_get_instance(dip
));
978 /* Disable the HAL */
979 s1394_hal_shutdown(hal
, B_TRUE
);
984 /* Lock the "used" tree */
985 mutex_enter(&hal
->addr_space_used_mutex
);
987 /* Has the 1394 address been allocated? */
988 addr_blk
= s1394_used_tree_search(hal
, req
->cmd_addr
);
990 /* Is this a posted write request? */
991 posted_write
= s1394_is_posted_write(hal
, req
->cmd_addr
);
993 /* If it wasn't found, it isn't owned... */
994 if (addr_blk
== NULL
) {
995 req
->cmd_result
= IEEE1394_RESP_ADDRESS_ERROR
;
996 write_error
= B_TRUE
;
997 goto write_error_check
;
1000 /* Does the WHOLE request fit in the allocated block? */
1001 end_of_request
= (req
->cmd_addr
+ cmd_length
) - 1;
1002 if (end_of_request
> addr_blk
->addr_hi
) {
1003 req
->cmd_result
= IEEE1394_RESP_ADDRESS_ERROR
;
1004 write_error
= B_TRUE
;
1005 goto write_error_check
;
1008 /* Is a write request valid for this address space? */
1009 if (!(addr_blk
->addr_enable
& T1394_ADDR_WRENBL
)) {
1010 req
->cmd_result
= IEEE1394_RESP_TYPE_ERROR
;
1011 write_error
= B_TRUE
;
1012 goto write_error_check
;
1015 /* Make sure quadlet request is quadlet aligned */
1016 offset
= req
->cmd_addr
- addr_blk
->addr_lo
;
1017 if ((req
->cmd_type
== CMD1394_ASYNCH_WR_QUAD
) &&
1018 ((offset
& 0x3) != 0)) {
1019 req
->cmd_result
= IEEE1394_RESP_TYPE_ERROR
;
1020 write_error
= B_TRUE
;
1021 goto write_error_check
;
1025 /* Check if posted-write when sending error responses */
1026 if (write_error
== B_TRUE
) {
1027 /* Unlock the "used" tree */
1028 mutex_exit(&hal
->addr_space_used_mutex
);
1030 if (posted_write
== B_TRUE
) {
1031 /* Get a pointer to the HAL private struct */
1032 h_priv
= (h1394_cmd_priv_t
*)&s_priv
->hal_cmd_private
;
1033 hal
->hal_kstats
->arreq_posted_write_error
++;
1034 /* Free the command - Pass it back to the HAL */
1035 HAL_CALL(hal
).response_complete(
1036 hal
->halinfo
.hal_private
, req
, h_priv
);
1039 (void) s1394_send_response(hal
, req
);
1044 /* Fill in the backing store if necessary */
1045 if (addr_blk
->kmem_bufp
!= NULL
) {
1046 offset
= req
->cmd_addr
- addr_blk
->addr_lo
;
1047 bufp_addr
= (uchar_t
*)addr_blk
->kmem_bufp
+ offset
;
1048 switch (req
->cmd_type
) {
1049 case CMD1394_ASYNCH_WR_QUAD
:
1050 bcopy((void *)&(req
->cmd_u
.q
.quadlet_data
),
1051 (void *)bufp_addr
, cmd_length
);
1054 case CMD1394_ASYNCH_WR_BLOCK
:
1055 begin_ptr
= req
->cmd_u
.b
.data_block
->b_rptr
;
1056 end_ptr
= begin_ptr
+ cmd_length
;
1057 tmp_ptr
= req
->cmd_u
.b
.data_block
->b_wptr
;
1058 if (end_ptr
<= tmp_ptr
) {
1059 bcopy((void *)begin_ptr
, (void *)bufp_addr
,
1062 dip
= hal
->halinfo
.dip
;
1064 /* An unexpected error in the HAL */
1065 cmn_err(CE_WARN
, HALT_ERROR_MESSAGE
,
1066 ddi_node_name(dip
), ddi_get_instance(dip
));
1068 /* Unlock the "used" tree */
1069 mutex_exit(&hal
->addr_space_used_mutex
);
1071 /* Disable the HAL */
1072 s1394_hal_shutdown(hal
, B_TRUE
);
1079 dip
= hal
->halinfo
.dip
;
1081 /* An unexpected error in the HAL */
1082 cmn_err(CE_WARN
, HALT_ERROR_MESSAGE
,
1083 ddi_node_name(dip
), ddi_get_instance(dip
));
1085 /* Unlock the "used" tree */
1086 mutex_exit(&hal
->addr_space_used_mutex
);
1088 /* Disable the HAL */
1089 s1394_hal_shutdown(hal
, B_TRUE
);
1095 /* Fill in the rest of the info in the request */
1096 if (addr_blk
->addr_type
== T1394_ADDR_POSTED_WRITE
)
1097 s_priv
->posted_write
= B_TRUE
;
1099 s_priv
->arreq_valid_addr
= B_TRUE
;
1100 req
->cmd_callback_arg
= addr_blk
->addr_arg
;
1101 recv_write_req
= addr_blk
->addr_events
.recv_write_request
;
1103 /* Unlock the "used" tree */
1104 mutex_exit(&hal
->addr_space_used_mutex
);
1107 * Add no code that modifies the command after the target
1108 * callback is called or after the response is sent to the
1111 if (recv_write_req
!= NULL
) {
1112 recv_write_req(req
);
1114 req
->cmd_result
= IEEE1394_RESP_COMPLETE
;
1115 (void) s1394_send_response(hal
, req
);
1121 * Function: h1394_lock_request()
1122 * Input(s): sl_private The HAL "handle" returned by
1124 * req The incoming AR request
1128 * Description: h1394_lock_request() receives incoming AR requests. These
1129 * asynchronous lock requests are dispatched to the appropriate
1130 * target (if one has registered) or are handled by the 1394
1131 * Software Framework, which will send out an appropriate
1135 h1394_lock_request(void *sl_private
, cmd1394_cmd_t
*req
)
1138 s1394_cmd_priv_t
*s_priv
;
1139 s1394_addr_space_blk_t
*addr_blk
;
1141 uint64_t end_of_request
;
1144 cmd1394_lock_type_t lock_type
;
1145 void (*recv_lock_req
)(cmd1394_cmd_t
*);
1147 hal
= (s1394_hal_t
*)sl_private
;
1149 /* Get the Services Layer private area */
1150 s_priv
= S1394_GET_CMD_PRIV(req
);
1152 s_priv
->cmd_priv_xfer_type
= S1394_CMD_LOCK
;
1154 /* Lock the "used" tree */
1155 mutex_enter(&hal
->addr_space_used_mutex
);
1157 /* Has the 1394 address been allocated? */
1158 addr_blk
= s1394_used_tree_search(hal
, req
->cmd_addr
);
1160 /* If it wasn't found, it isn't owned... */
1161 if (addr_blk
== NULL
) {
1162 /* Unlock the "used" tree */
1163 mutex_exit(&hal
->addr_space_used_mutex
);
1164 req
->cmd_result
= IEEE1394_RESP_ADDRESS_ERROR
;
1165 (void) s1394_send_response(hal
, req
);
1169 /* Does the WHOLE request fit in the allocated block? */
1170 switch (req
->cmd_type
) {
1171 case CMD1394_ASYNCH_LOCK_32
:
1172 end_of_request
= (req
->cmd_addr
+ IEEE1394_QUADLET
) - 1;
1173 /* kstats - 32-bit lock request */
1174 hal
->hal_kstats
->arreq_lock32
++;
1177 case CMD1394_ASYNCH_LOCK_64
:
1178 end_of_request
= (req
->cmd_addr
+ IEEE1394_OCTLET
) - 1;
1179 /* kstats - 64-bit lock request */
1180 hal
->hal_kstats
->arreq_lock64
++;
1184 /* Unlock the "used" tree */
1185 mutex_exit(&hal
->addr_space_used_mutex
);
1187 dip
= hal
->halinfo
.dip
;
1189 /* An unexpected error in the HAL */
1190 cmn_err(CE_WARN
, HALT_ERROR_MESSAGE
,
1191 ddi_node_name(dip
), ddi_get_instance(dip
));
1193 /* Disable the HAL */
1194 s1394_hal_shutdown(hal
, B_TRUE
);
1199 if (end_of_request
> addr_blk
->addr_hi
) {
1200 /* Unlock the "used" tree */
1201 mutex_exit(&hal
->addr_space_used_mutex
);
1202 req
->cmd_result
= IEEE1394_RESP_ADDRESS_ERROR
;
1203 (void) s1394_send_response(hal
, req
);
1207 /* Is a lock request valid for this address space? */
1208 if (!(addr_blk
->addr_enable
& T1394_ADDR_LKENBL
)) {
1209 /* Unlock the "used" tree */
1210 mutex_exit(&hal
->addr_space_used_mutex
);
1211 req
->cmd_result
= IEEE1394_RESP_TYPE_ERROR
;
1212 (void) s1394_send_response(hal
, req
);
1216 /* Fill in the backing store if necessary */
1217 if (addr_blk
->kmem_bufp
!= NULL
) {
1218 offset
= req
->cmd_addr
- addr_blk
->addr_lo
;
1219 bufp_addr
= (uchar_t
*)addr_blk
->kmem_bufp
+ offset
;
1221 if (req
->cmd_type
== CMD1394_ASYNCH_LOCK_32
) {
1224 uint32_t data_value
;
1227 arg_value
= req
->cmd_u
.l32
.arg_value
;
1228 data_value
= req
->cmd_u
.l32
.data_value
;
1229 lock_type
= req
->cmd_u
.l32
.lock_type
;
1230 bcopy((void *)bufp_addr
, (void *)&old_value
,
1233 switch (lock_type
) {
1234 case CMD1394_LOCK_MASK_SWAP
:
1235 /* Mask-Swap (see P1394A - Table 1.7) */
1236 new_value
= (data_value
& arg_value
) |
1237 (old_value
& ~arg_value
);
1238 /* Copy new_value into backing store */
1239 bcopy((void *)&new_value
, (void *)bufp_addr
,
1241 req
->cmd_u
.l32
.old_value
= old_value
;
1244 case CMD1394_LOCK_COMPARE_SWAP
:
1246 if (old_value
== arg_value
) {
1247 new_value
= data_value
;
1248 /* Copy new_value into backing store */
1249 bcopy((void *)&new_value
,
1253 req
->cmd_u
.l32
.old_value
= old_value
;
1256 case CMD1394_LOCK_FETCH_ADD
:
1257 /* Fetch-Add (see P1394A - Table 1.7) */
1258 old_value
= T1394_DATA32(old_value
);
1259 new_value
= old_value
+ data_value
;
1260 new_value
= T1394_DATA32(new_value
);
1261 /* Copy new_value into backing store */
1262 bcopy((void *)&new_value
, (void *)bufp_addr
,
1264 req
->cmd_u
.l32
.old_value
= old_value
;
1267 case CMD1394_LOCK_LITTLE_ADD
:
1268 /* Little-Add (see P1394A - Table 1.7) */
1269 old_value
= T1394_DATA32(old_value
);
1270 new_value
= old_value
+ data_value
;
1271 new_value
= T1394_DATA32(new_value
);
1272 /* Copy new_value into backing store */
1273 bcopy((void *)&new_value
, (void *)bufp_addr
,
1275 req
->cmd_u
.l32
.old_value
= old_value
;
1278 case CMD1394_LOCK_BOUNDED_ADD
:
1279 /* Bounded-Add (see P1394A - Table 1.7) */
1280 old_value
= T1394_DATA32(old_value
);
1281 if (old_value
!= arg_value
) {
1282 new_value
= old_value
+ data_value
;
1283 new_value
= T1394_DATA32(new_value
);
1284 /* Copy new_value into backing store */
1285 bcopy((void *)&new_value
,
1289 req
->cmd_u
.l32
.old_value
= old_value
;
1292 case CMD1394_LOCK_WRAP_ADD
:
1293 /* Wrap-Add (see P1394A - Table 1.7) */
1294 old_value
= T1394_DATA32(old_value
);
1295 if (old_value
!= arg_value
) {
1296 new_value
= old_value
+ data_value
;
1298 new_value
= data_value
;
1300 new_value
= T1394_DATA32(new_value
);
1301 /* Copy new_value into backing store */
1302 bcopy((void *)&new_value
, (void *)bufp_addr
,
1304 req
->cmd_u
.l32
.old_value
= old_value
;
1308 /* Unlock the "used" tree */
1309 mutex_exit(&hal
->addr_space_used_mutex
);
1310 req
->cmd_result
= IEEE1394_RESP_TYPE_ERROR
;
1311 (void) s1394_send_response(hal
, req
);
1315 /* Handling for the 8-byte (64-bit) lock requests */
1318 uint64_t data_value
;
1321 arg_value
= req
->cmd_u
.l64
.arg_value
;
1322 data_value
= req
->cmd_u
.l64
.data_value
;
1323 lock_type
= req
->cmd_u
.l64
.lock_type
;
1324 bcopy((void *)bufp_addr
, (void *)&old_value
,
1327 switch (lock_type
) {
1328 case CMD1394_LOCK_MASK_SWAP
:
1329 /* Mask-Swap (see P1394A - Table 1.7) */
1330 new_value
= (data_value
& arg_value
) |
1331 (old_value
& ~arg_value
);
1332 /* Copy new_value into backing store */
1333 bcopy((void *)&new_value
, (void *)bufp_addr
,
1335 req
->cmd_u
.l64
.old_value
= old_value
;
1338 case CMD1394_LOCK_COMPARE_SWAP
:
1340 if (old_value
== arg_value
) {
1341 new_value
= data_value
;
1342 /* Copy new_value into backing store */
1343 bcopy((void *)&new_value
,
1347 req
->cmd_u
.l64
.old_value
= old_value
;
1350 case CMD1394_LOCK_FETCH_ADD
:
1351 /* Fetch-Add (see P1394A - Table 1.7) */
1352 old_value
= T1394_DATA64(old_value
);
1353 new_value
= old_value
+ data_value
;
1354 new_value
= T1394_DATA64(new_value
);
1355 /* Copy new_value into backing store */
1356 bcopy((void *)&new_value
, (void *)bufp_addr
,
1358 req
->cmd_u
.l64
.old_value
= old_value
;
1361 case CMD1394_LOCK_LITTLE_ADD
:
1362 /* Little-Add (see P1394A - Table 1.7) */
1363 old_value
= T1394_DATA64(old_value
);
1364 new_value
= old_value
+ data_value
;
1365 new_value
= T1394_DATA64(new_value
);
1366 /* Copy new_value into backing store */
1367 bcopy((void *)&new_value
, (void *)bufp_addr
,
1369 req
->cmd_u
.l64
.old_value
= old_value
;
1372 case CMD1394_LOCK_BOUNDED_ADD
:
1373 /* Bounded-Add (see P1394A - Table 1.7) */
1374 old_value
= T1394_DATA64(old_value
);
1375 if (old_value
!= arg_value
) {
1376 new_value
= old_value
+ data_value
;
1377 new_value
= T1394_DATA64(new_value
);
1378 /* Copy new_value into backing store */
1379 bcopy((void *)&new_value
,
1383 req
->cmd_u
.l64
.old_value
= old_value
;
1386 case CMD1394_LOCK_WRAP_ADD
:
1387 /* Wrap-Add (see P1394A - Table 1.7) */
1388 old_value
= T1394_DATA64(old_value
);
1389 if (old_value
!= arg_value
) {
1390 new_value
= old_value
+ data_value
;
1392 new_value
= data_value
;
1394 new_value
= T1394_DATA64(new_value
);
1395 /* Copy new_value into backing store */
1396 bcopy((void *)&new_value
, (void *)bufp_addr
,
1398 req
->cmd_u
.l64
.old_value
= old_value
;
1402 /* Unlock the "used" tree */
1403 mutex_exit(&hal
->addr_space_used_mutex
);
1404 req
->cmd_result
= IEEE1394_RESP_TYPE_ERROR
;
1405 (void) s1394_send_response(hal
, req
);
1411 /* Fill in the rest of the info in the request */
1412 s_priv
->arreq_valid_addr
= B_TRUE
;
1413 req
->cmd_callback_arg
= addr_blk
->addr_arg
;
1414 recv_lock_req
= addr_blk
->addr_events
.recv_lock_request
;
1416 /* Unlock the "used" tree */
1417 mutex_exit(&hal
->addr_space_used_mutex
);
1420 * Add no code that modifies the command after the target
1421 * callback is called or after the response is sent to the
1424 if (recv_lock_req
!= NULL
) {
1427 req
->cmd_result
= IEEE1394_RESP_COMPLETE
;
1428 (void) s1394_send_response(hal
, req
);
1434 * Function: h1394_ioctl()
1435 * Input(s): sl_private The HAL "handle" returned by
1438 * arg argument for the ioctl cmd
1439 * mode mode bits (see ioctl(9e))
1440 * cred_p cred structure pointer
1441 * rval_p pointer to return value (see ioctl(9e))
1443 * Output(s): EINVAL if not a DEVCTL ioctl, else return value from s1394_ioctl
1445 * Description: h1394_ioctl() implements non-HAL specific ioctls. Currently,
1446 * DEVCTL ioctls are the only generic ioctls supported.
1449 h1394_ioctl(void *sl_private
, int cmd
, intptr_t arg
, int mode
, cred_t
*cred_p
,
1454 if ((cmd
& DEVCTL_IOC
) != DEVCTL_IOC
)
1457 status
= s1394_ioctl((s1394_hal_t
*)sl_private
, cmd
, arg
, mode
,
1464 * Function: h1394_phy_packet()
1465 * Input(s): sl_private The HAL "handle" returned by
1467 * packet_data Pointer to a buffer of packet data
1468 * quadlet_count Length of the buffer
1469 * timestamp Timestamp indicating time of arrival
1473 * Description: h1394_phy_packet() is not implemented currently, but would
1474 * be used to process the responses to PHY ping packets in P1394A
1475 * When one is sent out, a timestamp is given indicating its time
1476 * of departure. Comparing that old timestamp with this new
1477 * timestamp, we can determine the time of flight and can use
1478 * those times to optimize the gap count.
1482 h1394_phy_packet(void *sl_private
, uint32_t *packet_data
, uint_t quadlet_count
,
1485 /* This interface is not yet implemented */
1489 * Function: h1394_error_detected()
1490 * Input(s): sl_private The HAL "handle" returned by
1492 * type The type of error the HAL detected
1493 * arg Pointer to any extra information
1497 * Description: h1394_error_detected() is used by the HAL to report errors
1498 * to the 1394 Software Framework.
1501 h1394_error_detected(void *sl_private
, h1394_error_t type
, void *arg
)
1504 uint_t hal_node_num
;
1505 uint_t IRM_node_num
;
1507 hal
= (s1394_hal_t
*)sl_private
;
1510 case H1394_LOCK_RESP_ERR
:
1511 /* If we are the IRM, then initiate a bus reset */
1512 mutex_enter(&hal
->topology_tree_mutex
);
1513 hal_node_num
= IEEE1394_NODE_NUM(hal
->node_id
);
1514 IRM_node_num
= hal
->IRM_node
;
1515 mutex_exit(&hal
->topology_tree_mutex
);
1516 if (IRM_node_num
== hal_node_num
)
1517 s1394_initiate_hal_reset(hal
, NON_CRITICAL
);
1520 case H1394_POSTED_WR_ERR
:
1523 case H1394_SELF_INITIATED_SHUTDOWN
:
1524 s1394_hal_shutdown(hal
, B_FALSE
);
1527 case H1394_CYCLE_TOO_LONG
:
1528 /* Set a timer to become cycle master after 1 second */
1529 mutex_enter(&hal
->cm_timer_mutex
);
1530 hal
->cm_timer_set
= B_TRUE
;
1531 mutex_exit(&hal
->cm_timer_mutex
);
1532 hal
->cm_timer
= timeout(s1394_cycle_too_long_callback
, hal
,
1533 drv_usectohz(CYCLE_MASTER_TIMER
* 1000));