4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
24 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2016 by Delphix. All rights reserved.
30 #include <sys/t_lock.h>
31 #include <sys/cmn_err.h>
32 #include <sys/instance.h>
36 #include <sys/hwconf.h>
37 #include <sys/sunddi.h>
38 #include <sys/sunndi.h>
39 #include <sys/ddi_impldefs.h>
40 #include <sys/ndi_impldefs.h>
41 #include <sys/modctl.h>
42 #include <sys/contract/device_impl.h>
44 #include <sys/promif.h>
46 #include <sys/cpuvar.h>
47 #include <sys/pathname.h>
48 #include <sys/taskq.h>
49 #include <sys/sysevent.h>
50 #include <sys/sunmdi.h>
51 #include <sys/stream.h>
52 #include <sys/strsubr.h>
53 #include <sys/fs/snode.h>
54 #include <sys/fs/dv_node.h>
55 #include <sys/reboot.h>
56 #include <sys/sysmacros.h>
57 #include <sys/systm.h>
58 #include <sys/fs/sdev_impl.h>
59 #include <sys/sunldi.h>
60 #include <sys/sunldi_impl.h>
61 #include <sys/bootprops.h>
62 #include <sys/varargs.h>
63 #include <sys/modhash.h>
64 #include <sys/instance.h>
66 #if defined(__amd64) && !defined(__xpv)
67 #include <sys/iommulib.h>
71 int ddidebug
= DDI_AUDIT
;
76 #define MT_CONFIG_OP 0
77 #define MT_UNCONFIG_OP 1
79 /* Multi-threaded configuration */
80 struct mt_config_handle
{
84 dev_info_t
*mtc_pdip
; /* parent dip for mt_config_children */
85 dev_info_t
**mtc_fdip
; /* "a" dip where unconfigure failed */
86 major_t mtc_parmajor
; /* parent major for mt_config_driver */
89 int mtc_op
; /* config or unconfig */
90 int mtc_error
; /* operation error */
91 struct brevq_node
**mtc_brevqp
; /* outstanding branch events queue */
94 timestruc_t start_time
;
101 struct devi_nodeid
*next
;
104 struct devi_nodeid_list
{
105 kmutex_t dno_lock
; /* Protects other fields */
106 struct devi_nodeid
*dno_head
; /* list of devi nodeid elements */
107 struct devi_nodeid
*dno_free
; /* Free list */
108 uint_t dno_list_length
; /* number of dips in list */
111 /* used to keep track of branch remove events to be generated */
114 struct brevq_node
*brn_sibling
;
115 struct brevq_node
*brn_child
;
118 static struct devi_nodeid_list devi_nodeid_list
;
119 static struct devi_nodeid_list
*devimap
= &devi_nodeid_list
;
122 * Well known nodes which are attached first at boot time.
124 dev_info_t
*top_devinfo
; /* root of device tree */
125 dev_info_t
*options_dip
;
126 dev_info_t
*pseudo_dip
;
127 dev_info_t
*clone_dip
;
128 dev_info_t
*scsi_vhci_dip
; /* MPXIO dip */
132 * A non-global zone's /dev is derived from the device tree.
133 * This generation number serves to indicate when a zone's
134 * /dev may need to be updated.
136 volatile ulong_t devtree_gen
; /* generation number */
138 /* block all future dev_info state changes */
139 hrtime_t
volatile devinfo_freeze
= 0;
141 /* number of dev_info attaches/detaches currently in progress */
142 static ulong_t devinfo_attach_detach
= 0;
144 extern int sys_shutdown
;
145 extern kmutex_t global_vhci_lock
;
147 /* bitset of DS_SYSAVAIL & DS_RECONFIG - no races, no lock */
148 static int devname_state
= 0;
151 * The devinfo snapshot cache and related variables.
152 * The only field in the di_cache structure that needs initialization
153 * is the mutex (cache_lock). However, since this is an adaptive mutex
154 * (MUTEX_DEFAULT) - it is automatically initialized by being allocated
155 * in zeroed memory (static storage class). Therefore no explicit
156 * initialization of the di_cache structure is needed.
158 struct di_cache di_cache
= {1};
159 int di_cache_debug
= 0;
161 /* For ddvis, which needs pseudo children under PCI */
162 int pci_allow_pseudo_children
= 0;
164 /* Allow path-oriented alias driver binding on driver.conf enumerated nodes */
165 int driver_conf_allow_path_alias
= 1;
168 * The following switch is for service people, in case a
169 * 3rd party driver depends on identify(9e) being called.
174 * Add flag so behaviour of preventing attach for retired persistant nodes
177 int retire_prevents_attach
= 1;
179 int mtc_off
; /* turn off mt config */
181 int quiesce_debug
= 0;
183 boolean_t ddi_aliases_present
= B_FALSE
;
184 ddi_alias_t ddi_aliases
;
185 uint_t tsd_ddi_redirect
;
187 #define DDI_ALIAS_HASH_SIZE (2700)
189 static kmem_cache_t
*ddi_node_cache
; /* devinfo node cache */
190 static devinfo_log_header_t
*devinfo_audit_log
; /* devinfo log */
191 static int devinfo_log_size
; /* size in pages */
193 boolean_t ddi_err_panic
= B_FALSE
;
195 static int lookup_compatible(dev_info_t
*, uint_t
);
196 static char *encode_composite_string(char **, uint_t
, size_t *, uint_t
);
197 static void link_to_driver_list(dev_info_t
*);
198 static void unlink_from_driver_list(dev_info_t
*);
199 static void add_to_dn_list(struct devnames
*, dev_info_t
*);
200 static void remove_from_dn_list(struct devnames
*, dev_info_t
*);
201 static dev_info_t
*find_duplicate_child();
202 static void add_global_props(dev_info_t
*);
203 static void remove_global_props(dev_info_t
*);
204 static int uninit_node(dev_info_t
*);
205 static void da_log_init(void);
206 static void da_log_enter(dev_info_t
*);
207 static int walk_devs(dev_info_t
*, int (*f
)(dev_info_t
*, void *), void *, int);
208 static int reset_nexus_flags(dev_info_t
*, void *);
209 static void ddi_optimize_dtree(dev_info_t
*);
210 static int is_leaf_node(dev_info_t
*);
211 static struct mt_config_handle
*mt_config_init(dev_info_t
*, dev_info_t
**,
212 int, major_t
, int, struct brevq_node
**);
213 static void mt_config_children(struct mt_config_handle
*);
214 static void mt_config_driver(struct mt_config_handle
*);
215 static int mt_config_fini(struct mt_config_handle
*);
216 static int devi_unconfig_common(dev_info_t
*, dev_info_t
**, int, major_t
,
217 struct brevq_node
**);
219 ndi_devi_config_obp_args(dev_info_t
*parent
, char *devnm
,
220 dev_info_t
**childp
, int flags
);
221 static void i_link_vhci_node(dev_info_t
*);
222 static void ndi_devi_exit_and_wait(dev_info_t
*dip
,
223 int circular
, clock_t end_time
);
224 static int ndi_devi_unbind_driver(dev_info_t
*dip
);
226 static int i_ddi_check_retire(dev_info_t
*dip
);
228 static void quiesce_one_device(dev_info_t
*, void *);
230 dev_info_t
*ddi_alias_redirect(char *alias
);
231 char *ddi_curr_redirect(char *currpath
);
235 * dev_info cache and node management
238 /* initialize dev_info node cache */
240 i_ddi_node_cache_init()
242 ASSERT(ddi_node_cache
== NULL
);
243 ddi_node_cache
= kmem_cache_create("dev_info_node_cache",
244 sizeof (struct dev_info
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
246 if (ddidebug
& DDI_AUDIT
)
252 * Allocating a dev_info node, callable from interrupt context with KM_NOSLEEP
253 * The allocated node has a reference count of 0.
256 i_ddi_alloc_node(dev_info_t
*pdip
, char *node_name
, pnode_t nodeid
,
257 int instance
, ddi_prop_t
*sys_prop
, int flag
)
259 struct dev_info
*devi
;
260 struct devi_nodeid
*elem
;
261 static char failed
[] = "i_ddi_alloc_node: out of memory";
263 ASSERT(node_name
!= NULL
);
265 if ((devi
= kmem_cache_alloc(ddi_node_cache
, flag
)) == NULL
) {
266 cmn_err(CE_NOTE
, failed
);
270 bzero(devi
, sizeof (struct dev_info
));
272 if (devinfo_audit_log
) {
273 devi
->devi_audit
= kmem_zalloc(sizeof (devinfo_audit_t
), flag
);
274 if (devi
->devi_audit
== NULL
)
278 if ((devi
->devi_node_name
= i_ddi_strdup(node_name
, flag
)) == NULL
)
281 /* default binding name is node name */
282 devi
->devi_binding_name
= devi
->devi_node_name
;
283 devi
->devi_major
= DDI_MAJOR_T_NONE
; /* unbound by default */
286 * Make a copy of system property
289 (devi
->devi_sys_prop_ptr
= i_ddi_prop_list_dup(sys_prop
, flag
))
294 * Assign devi_nodeid, devi_node_class, devi_node_attributes
295 * according to the following algorithm:
297 * nodeid arg node class node attributes
299 * DEVI_PSEUDO_NODEID DDI_NC_PSEUDO A
300 * DEVI_SID_NODEID DDI_NC_PSEUDO A,P
301 * DEVI_SID_HIDDEN_NODEID DDI_NC_PSEUDO A,P,H
302 * DEVI_SID_HP_NODEID DDI_NC_PSEUDO A,P,h
303 * DEVI_SID_HP_HIDDEN_NODEID DDI_NC_PSEUDO A,P,H,h
304 * other DDI_NC_PROM P
306 * Where A = DDI_AUTO_ASSIGNED_NODEID (auto-assign a nodeid)
307 * and P = DDI_PERSISTENT
308 * and H = DDI_HIDDEN_NODE
309 * and h = DDI_HOTPLUG_NODE
311 * auto-assigned nodeids are also auto-freed.
313 devi
->devi_node_attributes
= 0;
315 case DEVI_SID_HIDDEN_NODEID
:
316 devi
->devi_node_attributes
|= DDI_HIDDEN_NODE
;
319 case DEVI_SID_HP_NODEID
:
320 devi
->devi_node_attributes
|= DDI_HOTPLUG_NODE
;
323 case DEVI_SID_HP_HIDDEN_NODEID
:
324 devi
->devi_node_attributes
|= DDI_HIDDEN_NODE
;
325 devi
->devi_node_attributes
|= DDI_HOTPLUG_NODE
;
328 case DEVI_SID_NODEID
:
329 sid
: devi
->devi_node_attributes
|= DDI_PERSISTENT
;
330 if ((elem
= kmem_zalloc(sizeof (*elem
), flag
)) == NULL
)
334 case DEVI_PSEUDO_NODEID
:
335 devi
->devi_node_attributes
|= DDI_AUTO_ASSIGNED_NODEID
;
336 devi
->devi_node_class
= DDI_NC_PSEUDO
;
337 if (impl_ddi_alloc_nodeid(&devi
->devi_nodeid
)) {
338 panic("i_ddi_alloc_node: out of nodeids");
344 if ((elem
= kmem_zalloc(sizeof (*elem
), flag
)) == NULL
)
348 * the nodetype is 'prom', try to 'take' the nodeid now.
349 * This requires memory allocation, so check for failure.
351 if (impl_ddi_take_nodeid(nodeid
, flag
) != 0) {
352 kmem_free(elem
, sizeof (*elem
));
356 devi
->devi_nodeid
= nodeid
;
357 devi
->devi_node_class
= DDI_NC_PROM
;
358 devi
->devi_node_attributes
= DDI_PERSISTENT
;
362 if (ndi_dev_is_persistent_node((dev_info_t
*)devi
)) {
363 mutex_enter(&devimap
->dno_lock
);
364 elem
->next
= devimap
->dno_free
;
365 devimap
->dno_free
= elem
;
366 mutex_exit(&devimap
->dno_lock
);
370 * Instance is normally initialized to -1. In a few special
371 * cases, the caller may specify an instance (e.g. CPU nodes).
373 devi
->devi_instance
= instance
;
376 * set parent and bus_ctl parent
378 devi
->devi_parent
= DEVI(pdip
);
379 devi
->devi_bus_ctl
= DEVI(pdip
);
381 NDI_CONFIG_DEBUG((CE_CONT
,
382 "i_ddi_alloc_node: name=%s id=%d\n", node_name
, devi
->devi_nodeid
));
384 cv_init(&(devi
->devi_cv
), NULL
, CV_DEFAULT
, NULL
);
385 mutex_init(&(devi
->devi_lock
), NULL
, MUTEX_DEFAULT
, NULL
);
386 mutex_init(&(devi
->devi_pm_lock
), NULL
, MUTEX_DEFAULT
, NULL
);
387 mutex_init(&(devi
->devi_pm_busy_lock
), NULL
, MUTEX_DEFAULT
, NULL
);
389 RIO_TRACE((CE_NOTE
, "i_ddi_alloc_node: Initing contract fields: "
390 "dip=%p, name=%s", (void *)devi
, node_name
));
392 mutex_init(&(devi
->devi_ct_lock
), NULL
, MUTEX_DEFAULT
, NULL
);
393 cv_init(&(devi
->devi_ct_cv
), NULL
, CV_DEFAULT
, NULL
);
394 devi
->devi_ct_count
= -1; /* counter not in use if -1 */
395 list_create(&(devi
->devi_ct
), sizeof (cont_device_t
),
396 offsetof(cont_device_t
, cond_next
));
398 i_ddi_set_node_state((dev_info_t
*)devi
, DS_PROTO
);
399 da_log_enter((dev_info_t
*)devi
);
400 return ((dev_info_t
*)devi
);
403 if (devi
->devi_sys_prop_ptr
)
404 i_ddi_prop_list_delete(devi
->devi_sys_prop_ptr
);
405 if (devi
->devi_node_name
)
406 kmem_free(devi
->devi_node_name
, strlen(node_name
) + 1);
407 if (devi
->devi_audit
)
408 kmem_free(devi
->devi_audit
, sizeof (devinfo_audit_t
));
409 kmem_cache_free(ddi_node_cache
, devi
);
410 cmn_err(CE_NOTE
, failed
);
415 * free a dev_info structure.
416 * NB. Not callable from interrupt since impl_ddi_free_nodeid may block.
419 i_ddi_free_node(dev_info_t
*dip
)
421 struct dev_info
*devi
= DEVI(dip
);
422 struct devi_nodeid
*elem
;
424 ASSERT(devi
->devi_ref
== 0);
425 ASSERT(devi
->devi_addr
== NULL
);
426 ASSERT(devi
->devi_node_state
== DS_PROTO
);
427 ASSERT(devi
->devi_child
== NULL
);
428 ASSERT(devi
->devi_hp_hdlp
== NULL
);
430 /* free devi_addr_buf allocated by ddi_set_name_addr() */
431 if (devi
->devi_addr_buf
)
432 kmem_free(devi
->devi_addr_buf
, 2 * MAXNAMELEN
);
434 if (i_ndi_dev_is_auto_assigned_node(dip
))
435 impl_ddi_free_nodeid(DEVI(dip
)->devi_nodeid
);
437 if (ndi_dev_is_persistent_node(dip
)) {
438 mutex_enter(&devimap
->dno_lock
);
439 ASSERT(devimap
->dno_free
);
440 elem
= devimap
->dno_free
;
441 devimap
->dno_free
= elem
->next
;
442 mutex_exit(&devimap
->dno_lock
);
443 kmem_free(elem
, sizeof (*elem
));
446 if (DEVI(dip
)->devi_compat_names
)
447 kmem_free(DEVI(dip
)->devi_compat_names
,
448 DEVI(dip
)->devi_compat_length
);
449 if (DEVI(dip
)->devi_rebinding_name
)
450 kmem_free(DEVI(dip
)->devi_rebinding_name
,
451 strlen(DEVI(dip
)->devi_rebinding_name
) + 1);
453 ddi_prop_remove_all(dip
); /* remove driver properties */
454 if (devi
->devi_sys_prop_ptr
)
455 i_ddi_prop_list_delete(devi
->devi_sys_prop_ptr
);
456 if (devi
->devi_hw_prop_ptr
)
457 i_ddi_prop_list_delete(devi
->devi_hw_prop_ptr
);
459 if (DEVI(dip
)->devi_devid_str
)
460 ddi_devid_str_free(DEVI(dip
)->devi_devid_str
);
462 i_ddi_set_node_state(dip
, DS_INVAL
);
464 if (devi
->devi_audit
) {
465 kmem_free(devi
->devi_audit
, sizeof (devinfo_audit_t
));
467 if (devi
->devi_device_class
)
468 kmem_free(devi
->devi_device_class
,
469 strlen(devi
->devi_device_class
) + 1);
470 cv_destroy(&(devi
->devi_cv
));
471 mutex_destroy(&(devi
->devi_lock
));
472 mutex_destroy(&(devi
->devi_pm_lock
));
473 mutex_destroy(&(devi
->devi_pm_busy_lock
));
475 RIO_TRACE((CE_NOTE
, "i_ddi_free_node: destroying contract fields: "
476 "dip=%p", (void *)dip
));
477 contract_device_remove_dip(dip
);
478 ASSERT(devi
->devi_ct_count
== -1);
479 ASSERT(list_is_empty(&(devi
->devi_ct
)));
480 cv_destroy(&(devi
->devi_ct_cv
));
481 list_destroy(&(devi
->devi_ct
));
482 /* free this last since contract_device_remove_dip() uses it */
483 mutex_destroy(&(devi
->devi_ct_lock
));
484 RIO_TRACE((CE_NOTE
, "i_ddi_free_node: destroyed all contract fields: "
485 "dip=%p, name=%s", (void *)dip
, devi
->devi_node_name
));
487 kmem_free(devi
->devi_node_name
, strlen(devi
->devi_node_name
) + 1);
489 /* free event data */
490 if (devi
->devi_ev_path
)
491 kmem_free(devi
->devi_ev_path
, MAXPATHLEN
);
493 kmem_cache_free(ddi_node_cache
, devi
);
498 * Node state transitions
502 * Change the node name
505 ndi_devi_set_nodename(dev_info_t
*dip
, char *name
, int flags
)
507 _NOTE(ARGUNUSED(flags
))
512 oname
= DEVI(dip
)->devi_node_name
;
513 if (strcmp(oname
, name
) == 0)
514 return (DDI_SUCCESS
);
517 * pcicfg_fix_ethernet requires a name change after node
518 * is linked into the tree. When pcicfg is fixed, we
519 * should only allow name change in DS_PROTO state.
521 if (i_ddi_node_state(dip
) >= DS_BOUND
) {
523 * Don't allow name change once node is bound
526 "ndi_devi_set_nodename: node already bound dip = %p,"
527 " %s -> %s", (void *)dip
, ddi_node_name(dip
), name
);
528 return (NDI_FAILURE
);
531 nname
= i_ddi_strdup(name
, KM_SLEEP
);
532 DEVI(dip
)->devi_node_name
= nname
;
533 i_ddi_set_binding_name(dip
, nname
);
534 kmem_free(oname
, strlen(oname
) + 1);
537 return (NDI_SUCCESS
);
541 i_ddi_add_devimap(dev_info_t
*dip
)
543 struct devi_nodeid
*elem
;
547 if (!ndi_dev_is_persistent_node(dip
))
550 ASSERT(ddi_get_parent(dip
) == NULL
|| (DEVI_VHCI_NODE(dip
)) ||
551 DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
553 mutex_enter(&devimap
->dno_lock
);
555 ASSERT(devimap
->dno_free
);
557 elem
= devimap
->dno_free
;
558 devimap
->dno_free
= elem
->next
;
560 elem
->nodeid
= ddi_get_nodeid(dip
);
562 elem
->next
= devimap
->dno_head
;
563 devimap
->dno_head
= elem
;
565 devimap
->dno_list_length
++;
567 mutex_exit(&devimap
->dno_lock
);
571 i_ddi_remove_devimap(dev_info_t
*dip
)
573 struct devi_nodeid
*prev
, *elem
;
574 static const char *fcn
= "i_ddi_remove_devimap";
578 if (!ndi_dev_is_persistent_node(dip
))
579 return (DDI_SUCCESS
);
581 mutex_enter(&devimap
->dno_lock
);
584 * The following check is done with dno_lock held
585 * to prevent race between dip removal and
586 * e_ddi_prom_node_to_dip()
588 if (e_ddi_devi_holdcnt(dip
)) {
589 mutex_exit(&devimap
->dno_lock
);
590 return (DDI_FAILURE
);
593 ASSERT(devimap
->dno_head
);
594 ASSERT(devimap
->dno_list_length
> 0);
597 for (elem
= devimap
->dno_head
; elem
; elem
= elem
->next
) {
598 if (elem
->dip
== dip
) {
599 ASSERT(elem
->nodeid
== ddi_get_nodeid(dip
));
606 prev
->next
= elem
->next
;
608 devimap
->dno_head
= elem
->next
;
610 panic("%s: devinfo node(%p) not found",
613 devimap
->dno_list_length
--;
618 elem
->next
= devimap
->dno_free
;
619 devimap
->dno_free
= elem
;
621 mutex_exit(&devimap
->dno_lock
);
623 return (DDI_SUCCESS
);
627 * Link this node into the devinfo tree and add to orphan list
628 * Not callable from interrupt context
631 link_node(dev_info_t
*dip
)
633 struct dev_info
*devi
= DEVI(dip
);
634 struct dev_info
*parent
= devi
->devi_parent
;
637 ASSERT(parent
); /* never called for root node */
639 NDI_CONFIG_DEBUG((CE_CONT
, "link_node: parent = %s child = %s\n",
640 parent
->devi_node_name
, devi
->devi_node_name
));
643 * Hold the global_vhci_lock before linking any direct
644 * children of rootnex driver. This special lock protects
645 * linking and unlinking for rootnext direct children.
647 if ((dev_info_t
*)parent
== ddi_root_node())
648 mutex_enter(&global_vhci_lock
);
651 * attach the node to end of the list unless the node is already there
653 dipp
= (dev_info_t
**)(&DEVI(parent
)->devi_child
);
654 while (*dipp
&& (*dipp
!= dip
)) {
655 dipp
= (dev_info_t
**)(&DEVI(*dipp
)->devi_sibling
);
657 ASSERT(*dipp
== NULL
); /* node is not linked */
660 * Now that we are in the tree, update the devi-nodeid map.
662 i_ddi_add_devimap(dip
);
665 * This is a temporary workaround for Bug 4618861.
666 * We keep the scsi_vhci nexus node on the left side of the devinfo
667 * tree (under the root nexus driver), so that virtual nodes under
668 * scsi_vhci will be SUSPENDed first and RESUMEd last. This ensures
669 * that the pHCI nodes are active during times when their clients
670 * may be depending on them. This workaround embodies the knowledge
671 * that system PM and CPR both traverse the tree left-to-right during
672 * SUSPEND and right-to-left during RESUME.
673 * Extending the workaround to IB Nexus/VHCI
676 if (strcmp(devi
->devi_binding_name
, "scsi_vhci") == 0) {
677 /* Add scsi_vhci to beginning of list */
678 ASSERT((dev_info_t
*)parent
== top_devinfo
);
679 /* scsi_vhci under rootnex */
680 devi
->devi_sibling
= parent
->devi_child
;
681 parent
->devi_child
= devi
;
682 } else if (strcmp(devi
->devi_binding_name
, "ib") == 0) {
683 i_link_vhci_node(dip
);
685 /* Add to end of list */
687 DEVI(dip
)->devi_sibling
= NULL
;
691 * Release the global_vhci_lock before linking any direct
692 * children of rootnex driver.
694 if ((dev_info_t
*)parent
== ddi_root_node())
695 mutex_exit(&global_vhci_lock
);
697 /* persistent nodes go on orphan list */
698 if (ndi_dev_is_persistent_node(dip
))
699 add_to_dn_list(&orphanlist
, dip
);
703 * Unlink this node from the devinfo tree
706 unlink_node(dev_info_t
*dip
)
708 struct dev_info
*devi
= DEVI(dip
);
709 struct dev_info
*parent
= devi
->devi_parent
;
711 ddi_hp_cn_handle_t
*hdlp
;
713 ASSERT(parent
!= NULL
);
714 ASSERT(devi
->devi_node_state
== DS_LINKED
);
716 NDI_CONFIG_DEBUG((CE_CONT
, "unlink_node: name = %s\n",
717 ddi_node_name(dip
)));
719 /* check references */
720 if (devi
->devi_ref
|| i_ddi_remove_devimap(dip
) != DDI_SUCCESS
)
721 return (DDI_FAILURE
);
724 * Hold the global_vhci_lock before linking any direct
725 * children of rootnex driver.
727 if ((dev_info_t
*)parent
== ddi_root_node())
728 mutex_enter(&global_vhci_lock
);
730 dipp
= (dev_info_t
**)(&DEVI(parent
)->devi_child
);
731 while (*dipp
&& (*dipp
!= dip
)) {
732 dipp
= (dev_info_t
**)(&DEVI(*dipp
)->devi_sibling
);
735 *dipp
= (dev_info_t
*)(devi
->devi_sibling
);
736 devi
->devi_sibling
= NULL
;
738 NDI_CONFIG_DEBUG((CE_NOTE
, "unlink_node: %s not linked",
739 devi
->devi_node_name
));
743 * Release the global_vhci_lock before linking any direct
744 * children of rootnex driver.
746 if ((dev_info_t
*)parent
== ddi_root_node())
747 mutex_exit(&global_vhci_lock
);
749 /* Remove node from orphan list */
750 if (ndi_dev_is_persistent_node(dip
)) {
751 remove_from_dn_list(&orphanlist
, dip
);
754 /* Update parent's hotplug handle list */
755 for (hdlp
= DEVI(parent
)->devi_hp_hdlp
; hdlp
; hdlp
= hdlp
->next
) {
756 if (hdlp
->cn_info
.cn_child
== dip
)
757 hdlp
->cn_info
.cn_child
= NULL
;
759 return (DDI_SUCCESS
);
763 * Bind this devinfo node to a driver. If compat is NON-NULL, try that first.
764 * Else, use the node-name.
766 * NOTE: IEEE1275 specifies that nodename should be tried before compatible.
767 * Solaris implementation binds nodename after compatible.
769 * If we find a binding,
770 * - set the binding name to the string,
771 * - set major number to driver major
773 * If we don't find a binding,
777 bind_node(dev_info_t
*dip
)
780 major_t major
= DDI_MAJOR_T_NONE
;
781 struct dev_info
*devi
= DEVI(dip
);
782 dev_info_t
*parent
= ddi_get_parent(dip
);
784 ASSERT(devi
->devi_node_state
== DS_LINKED
);
786 NDI_CONFIG_DEBUG((CE_CONT
, "bind_node: 0x%p(name = %s)\n",
787 (void *)dip
, ddi_node_name(dip
)));
789 mutex_enter(&DEVI(dip
)->devi_lock
);
790 if (DEVI(dip
)->devi_flags
& DEVI_NO_BIND
) {
791 mutex_exit(&DEVI(dip
)->devi_lock
);
792 return (DDI_FAILURE
);
794 mutex_exit(&DEVI(dip
)->devi_lock
);
796 /* find the driver with most specific binding using compatible */
797 major
= ddi_compatible_driver_major(dip
, &p
);
798 if (major
== DDI_MAJOR_T_NONE
)
799 return (DDI_FAILURE
);
801 devi
->devi_major
= major
;
803 i_ddi_set_binding_name(dip
, p
);
804 NDI_CONFIG_DEBUG((CE_CONT
, "bind_node: %s bound to %s\n",
805 devi
->devi_node_name
, p
));
808 /* Link node to per-driver list */
809 link_to_driver_list(dip
);
812 * reset parent flag so that nexus will merge .conf props
814 if (ndi_dev_is_persistent_node(dip
)) {
815 mutex_enter(&DEVI(parent
)->devi_lock
);
816 DEVI(parent
)->devi_flags
&=
817 ~(DEVI_ATTACHED_CHILDREN
|DEVI_MADE_CHILDREN
);
818 mutex_exit(&DEVI(parent
)->devi_lock
);
820 return (DDI_SUCCESS
);
824 * Unbind this devinfo node
825 * Called before the node is destroyed or driver is removed from system
828 unbind_node(dev_info_t
*dip
)
830 ASSERT(DEVI(dip
)->devi_node_state
== DS_BOUND
);
831 ASSERT(DEVI(dip
)->devi_major
!= DDI_MAJOR_T_NONE
);
833 /* check references */
834 if (DEVI(dip
)->devi_ref
)
835 return (DDI_FAILURE
);
837 NDI_CONFIG_DEBUG((CE_CONT
, "unbind_node: 0x%p(name = %s)\n",
838 (void *)dip
, ddi_node_name(dip
)));
840 unlink_from_driver_list(dip
);
842 DEVI(dip
)->devi_major
= DDI_MAJOR_T_NONE
;
843 DEVI(dip
)->devi_binding_name
= DEVI(dip
)->devi_node_name
;
844 return (DDI_SUCCESS
);
848 * Initialize a node: calls the parent nexus' bus_ctl ops to do the operation.
849 * Must hold parent and per-driver list while calling this function.
850 * A successful init_node() returns with an active ndi_hold_devi() hold on
854 init_node(dev_info_t
*dip
)
857 dev_info_t
*pdip
= ddi_get_parent(dip
);
858 int (*f
)(dev_info_t
*, dev_info_t
*, ddi_ctl_enum_t
, void *, void *);
861 ddi_devid_t devid
= NULL
;
863 ASSERT(i_ddi_node_state(dip
) == DS_BOUND
);
865 /* should be DS_READY except for pcmcia ... */
866 ASSERT(i_ddi_node_state(pdip
) >= DS_PROBED
);
868 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
869 (void) ddi_pathname(dip
, path
);
870 NDI_CONFIG_DEBUG((CE_CONT
, "init_node: entry: path %s 0x%p\n",
874 * The parent must have a bus_ctl operation.
876 if ((DEVI(pdip
)->devi_ops
->devo_bus_ops
== NULL
) ||
877 (f
= DEVI(pdip
)->devi_ops
->devo_bus_ops
->bus_ctl
) == NULL
) {
882 add_global_props(dip
);
885 * Invoke the parent's bus_ctl operation with the DDI_CTLOPS_INITCHILD
886 * command to transform the child to canonical form 1. If there
887 * is an error, ddi_remove_child should be called, to clean up.
889 error
= (*f
)(pdip
, pdip
, DDI_CTLOPS_INITCHILD
, dip
, NULL
);
890 if (error
!= DDI_SUCCESS
) {
891 NDI_CONFIG_DEBUG((CE_CONT
, "init_node: %s 0x%p failed\n",
893 remove_global_props(dip
);
896 * If a nexus INITCHILD implementation calls ddi_devid_regster()
897 * prior to setting devi_addr, the devid is not recorded in
898 * the devid cache (i.e. DEVI_CACHED_DEVID is not set).
899 * With mpxio, while the vhci client path may be missing
900 * from the cache, phci pathinfo paths may have already be
901 * added to the cache, against the client dip, by use of
902 * e_devid_cache_pathinfo(). Because of this, when INITCHILD
903 * of the client fails, we need to purge the client dip from
904 * the cache even if DEVI_CACHED_DEVID is not set - if only
905 * devi_devid_str is set.
907 mutex_enter(&DEVI(dip
)->devi_lock
);
908 if ((DEVI(dip
)->devi_flags
& DEVI_CACHED_DEVID
) ||
909 DEVI(dip
)->devi_devid_str
) {
910 DEVI(dip
)->devi_flags
&= ~DEVI_CACHED_DEVID
;
911 mutex_exit(&DEVI(dip
)->devi_lock
);
912 ddi_devid_unregister(dip
);
914 mutex_exit(&DEVI(dip
)->devi_lock
);
916 /* in case nexus driver didn't clear this field */
917 ddi_set_name_addr(dip
, NULL
);
922 ndi_hold_devi(pdip
); /* initial hold of parent */
924 /* recompute path after initchild for @addr information */
925 (void) ddi_pathname(dip
, path
);
927 /* Check for duplicate nodes */
928 if (find_duplicate_child(pdip
, dip
) != NULL
) {
930 * uninit_node() the duplicate - a successful uninit_node()
931 * will release inital hold of parent using ndi_rele_devi().
933 if ((error
= uninit_node(dip
)) != DDI_SUCCESS
) {
934 ndi_rele_devi(pdip
); /* release initial hold */
935 cmn_err(CE_WARN
, "init_node: uninit of duplicate "
936 "node %s failed", path
);
938 NDI_CONFIG_DEBUG((CE_CONT
, "init_node: duplicate uninit "
939 "%s 0x%p%s\n", path
, (void *)dip
,
940 (error
== DDI_SUCCESS
) ? "" : " failed"));
946 * If a devid was registered for a DS_BOUND node then the devid_cache
947 * may not have captured the path. Detect this situation and ensure that
948 * the path enters the cache now that devi_addr is established.
950 if (!(DEVI(dip
)->devi_flags
& DEVI_CACHED_DEVID
) &&
951 (ddi_devid_get(dip
, &devid
) == DDI_SUCCESS
)) {
952 if (e_devid_cache_register(dip
, devid
) == DDI_SUCCESS
) {
953 mutex_enter(&DEVI(dip
)->devi_lock
);
954 DEVI(dip
)->devi_flags
|= DEVI_CACHED_DEVID
;
955 mutex_exit(&DEVI(dip
)->devi_lock
);
958 ddi_devid_free(devid
);
962 * Check to see if we have a path-oriented driver alias that overrides
963 * the current driver binding. If so, we need to rebind. This check
964 * needs to be delayed until after a successful DDI_CTLOPS_INITCHILD,
965 * so the unit-address is established on the last component of the path.
967 * NOTE: Allowing a path-oriented alias to change the driver binding
968 * of a driver.conf node results in non-intuitive property behavior.
969 * We provide a tunable (driver_conf_allow_path_alias) to control
970 * this behavior. See uninit_node() for more details.
972 * NOTE: If you are adding a path-oriented alias for the boot device,
973 * and there is mismatch between OBP and the kernel in regard to
974 * generic name use, like "disk" .vs. "ssd", then you will need
975 * to add a path-oriented alias for both paths.
977 major
= ddi_name_to_major(path
);
978 if (driver_active(major
) && (major
!= DEVI(dip
)->devi_major
) &&
979 (ndi_dev_is_persistent_node(dip
) || driver_conf_allow_path_alias
)) {
981 /* Mark node for rebind processing. */
982 mutex_enter(&DEVI(dip
)->devi_lock
);
983 DEVI(dip
)->devi_flags
|= DEVI_REBIND
;
984 mutex_exit(&DEVI(dip
)->devi_lock
);
987 * Add an extra hold on the parent to prevent it from ever
988 * having a zero devi_ref during the child rebind process.
989 * This is necessary to ensure that the parent will never
990 * detach(9E) during the rebind.
992 ndi_hold_devi(pdip
); /* extra hold of parent */
995 * uninit_node() current binding - a successful uninit_node()
996 * will release extra hold of parent using ndi_rele_devi().
998 if ((error
= uninit_node(dip
)) != DDI_SUCCESS
) {
999 ndi_rele_devi(pdip
); /* release extra hold */
1000 ndi_rele_devi(pdip
); /* release initial hold */
1001 cmn_err(CE_WARN
, "init_node: uninit for rebind "
1002 "of node %s failed", path
);
1006 /* Unbind: demote the node back to DS_LINKED. */
1007 if ((error
= ndi_devi_unbind_driver(dip
)) != DDI_SUCCESS
) {
1008 ndi_rele_devi(pdip
); /* release initial hold */
1009 cmn_err(CE_WARN
, "init_node: unbind for rebind "
1010 "of node %s failed", path
);
1014 /* establish rebinding name */
1015 if (DEVI(dip
)->devi_rebinding_name
== NULL
)
1016 DEVI(dip
)->devi_rebinding_name
=
1017 i_ddi_strdup(path
, KM_SLEEP
);
1020 * Now that we are demoted and marked for rebind, repromote.
1021 * We need to do this in steps, instead of just calling
1022 * ddi_initchild, so that we can redo the merge operation
1023 * after we are rebound to the path-bound driver.
1025 * Start by rebinding node to the path-bound driver.
1027 if ((error
= ndi_devi_bind_driver(dip
, 0)) != DDI_SUCCESS
) {
1028 ndi_rele_devi(pdip
); /* release initial hold */
1029 cmn_err(CE_WARN
, "init_node: rebind "
1030 "of node %s failed", path
);
1035 * If the node is not a driver.conf node then merge
1036 * driver.conf properties from new path-bound driver.conf.
1038 if (ndi_dev_is_persistent_node(dip
))
1039 (void) i_ndi_make_spec_children(pdip
, 0);
1042 * Now that we have taken care of merge, repromote back
1043 * to DS_INITIALIZED.
1045 error
= ddi_initchild(pdip
, dip
);
1046 NDI_CONFIG_DEBUG((CE_CONT
, "init_node: rebind "
1047 "%s 0x%p\n", path
, (void *)dip
));
1050 * Release our initial hold. If ddi_initchild() was
1051 * successful then it will return with the active hold.
1053 ndi_rele_devi(pdip
);
1058 * Apply multi-parent/deep-nexus optimization to the new node
1060 DEVI(dip
)->devi_instance
= e_ddi_assign_instance(dip
);
1061 ddi_optimize_dtree(dip
);
1062 error
= DDI_SUCCESS
; /* return with active hold */
1064 out
: if (error
!= DDI_SUCCESS
) {
1065 /* On failure ensure that DEVI_REBIND is cleared */
1066 mutex_enter(&DEVI(dip
)->devi_lock
);
1067 DEVI(dip
)->devi_flags
&= ~DEVI_REBIND
;
1068 mutex_exit(&DEVI(dip
)->devi_lock
);
1070 kmem_free(path
, MAXPATHLEN
);
1076 * The per-driver list must be held busy during the call.
1077 * A successful uninit_node() releases the init_node() hold on
1078 * the parent by calling ndi_rele_devi().
1081 uninit_node(dev_info_t
*dip
)
1083 int node_state_entry
;
1085 struct dev_ops
*ops
;
1091 * Don't check for references here or else a ref-counted
1092 * dip cannot be downgraded by the framework.
1094 node_state_entry
= i_ddi_node_state(dip
);
1095 ASSERT((node_state_entry
== DS_BOUND
) ||
1096 (node_state_entry
== DS_INITIALIZED
));
1097 pdip
= ddi_get_parent(dip
);
1100 NDI_CONFIG_DEBUG((CE_CONT
, "uninit_node: 0x%p(%s%d)\n",
1101 (void *)dip
, ddi_driver_name(dip
), ddi_get_instance(dip
)));
1103 if (((ops
= ddi_get_driver(pdip
)) == NULL
) ||
1104 (ops
->devo_bus_ops
== NULL
) ||
1105 ((f
= ops
->devo_bus_ops
->bus_ctl
) == NULL
)) {
1106 return (DDI_FAILURE
);
1110 * save the @addr prior to DDI_CTLOPS_UNINITCHILD for use in
1111 * freeing the instance if it succeeds.
1113 if (node_state_entry
== DS_INITIALIZED
) {
1114 addr
= ddi_get_name_addr(dip
);
1116 addr
= i_ddi_strdup(addr
, KM_SLEEP
);
1121 error
= (*f
)(pdip
, pdip
, DDI_CTLOPS_UNINITCHILD
, dip
, NULL
);
1122 if (error
== DDI_SUCCESS
) {
1123 /* ensure that devids are unregistered */
1124 mutex_enter(&DEVI(dip
)->devi_lock
);
1125 if ((DEVI(dip
)->devi_flags
& DEVI_CACHED_DEVID
)) {
1126 DEVI(dip
)->devi_flags
&= ~DEVI_CACHED_DEVID
;
1127 mutex_exit(&DEVI(dip
)->devi_lock
);
1128 ddi_devid_unregister(dip
);
1130 mutex_exit(&DEVI(dip
)->devi_lock
);
1132 /* if uninitchild forgot to set devi_addr to NULL do it now */
1133 ddi_set_name_addr(dip
, NULL
);
1136 * Free instance number. This is a no-op if instance has
1137 * been kept by probe_node(). Avoid free when we are called
1138 * from init_node (DS_BOUND) because the instance has not yet
1141 if (node_state_entry
== DS_INITIALIZED
) {
1142 e_ddi_free_instance(dip
, addr
);
1143 DEVI(dip
)->devi_instance
= -1;
1146 /* release the init_node hold */
1147 ndi_rele_devi(pdip
);
1149 remove_global_props(dip
);
1152 * NOTE: The decision on whether to allow a path-oriented
1153 * rebind of a driver.conf enumerated node is made by
1154 * init_node() based on driver_conf_allow_path_alias. The
1155 * rebind code below prevents deletion of system properties
1156 * on driver.conf nodes.
1158 * When driver_conf_allow_path_alias is set, property behavior
1159 * on rebound driver.conf file is non-intuitive. For a
1160 * driver.conf node, the unit-address properties come from
1161 * the driver.conf file as system properties. Removing system
1162 * properties from a driver.conf node makes the node
1163 * useless (we get node without unit-address properties) - so
1164 * we leave system properties in place. The result is a node
1165 * where system properties come from the node being rebound,
1166 * and global properties come from the driver.conf file
1167 * of the driver we are rebinding to. If we could determine
1168 * that the path-oriented alias driver.conf file defined a
1169 * node at the same unit address, it would be best to use
1170 * that node and avoid the non-intuitive property behavior.
1171 * Unfortunately, the current "merge" code does not support
1172 * this, so we live with the non-intuitive property behavior.
1174 if (!((ndi_dev_is_persistent_node(dip
) == 0) &&
1175 (DEVI(dip
)->devi_flags
& DEVI_REBIND
)))
1176 e_ddi_prop_remove_all(dip
);
1178 NDI_CONFIG_DEBUG((CE_CONT
, "uninit_node failed: 0x%p(%s%d)\n",
1179 (void *)dip
, ddi_driver_name(dip
), ddi_get_instance(dip
)));
1183 kmem_free(addr
, strlen(addr
) + 1);
1188 * Invoke driver's probe entry point to probe for existence of hardware.
1189 * Keep instance permanent for successful probe and leaf nodes.
1191 * Per-driver list must be held busy while calling this function.
1194 probe_node(dev_info_t
*dip
)
1198 ASSERT(i_ddi_node_state(dip
) == DS_INITIALIZED
);
1200 NDI_CONFIG_DEBUG((CE_CONT
, "probe_node: 0x%p(%s%d)\n",
1201 (void *)dip
, ddi_driver_name(dip
), ddi_get_instance(dip
)));
1203 /* temporarily hold the driver while we probe */
1204 DEVI(dip
)->devi_ops
= ndi_hold_driver(dip
);
1205 if (DEVI(dip
)->devi_ops
== NULL
) {
1206 NDI_CONFIG_DEBUG((CE_CONT
,
1207 "probe_node: 0x%p(%s%d) cannot load driver\n",
1208 (void *)dip
, ddi_driver_name(dip
), ddi_get_instance(dip
)));
1209 return (DDI_FAILURE
);
1212 if (identify_9e
!= 0)
1213 (void) devi_identify(dip
);
1215 rv
= devi_probe(dip
);
1217 /* release the driver now that probe is complete */
1218 ndi_rele_driver(dip
);
1219 DEVI(dip
)->devi_ops
= NULL
;
1222 case DDI_PROBE_SUCCESS
: /* found */
1223 case DDI_PROBE_DONTCARE
: /* ddi_dev_is_sid */
1224 e_ddi_keep_instance(dip
); /* persist instance */
1228 case DDI_PROBE_PARTIAL
: /* maybe later */
1229 case DDI_PROBE_FAILURE
: /* not found */
1230 NDI_CONFIG_DEBUG((CE_CONT
,
1231 "probe_node: 0x%p(%s%d) no hardware found%s\n",
1232 (void *)dip
, ddi_driver_name(dip
), ddi_get_instance(dip
),
1233 (rv
== DDI_PROBE_PARTIAL
) ? " yet" : ""));
1239 cmn_err(CE_WARN
, "probe_node: %s%d: illegal probe(9E) value",
1240 ddi_driver_name(dip
), ddi_get_instance(dip
));
1249 * Unprobe a node. Simply reset the node state.
1250 * Per-driver list must be held busy while calling this function.
1253 unprobe_node(dev_info_t
*dip
)
1255 ASSERT(i_ddi_node_state(dip
) == DS_PROBED
);
1258 * Don't check for references here or else a ref-counted
1259 * dip cannot be downgraded by the framework.
1262 NDI_CONFIG_DEBUG((CE_CONT
, "unprobe_node: 0x%p(name = %s)\n",
1263 (void *)dip
, ddi_node_name(dip
)));
1264 return (DDI_SUCCESS
);
1268 * Attach devinfo node.
1269 * Per-driver list must be held busy.
1272 attach_node(dev_info_t
*dip
)
1276 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
1277 ASSERT(i_ddi_node_state(dip
) == DS_PROBED
);
1279 NDI_CONFIG_DEBUG((CE_CONT
, "attach_node: 0x%p(%s%d)\n",
1280 (void *)dip
, ddi_driver_name(dip
), ddi_get_instance(dip
)));
1283 * Tell mpxio framework that a node is about to online.
1285 if ((rv
= mdi_devi_online(dip
, 0)) != NDI_SUCCESS
) {
1286 return (DDI_FAILURE
);
1289 /* no recursive attachment */
1290 ASSERT(DEVI(dip
)->devi_ops
== NULL
);
1293 * Hold driver the node is bound to.
1295 DEVI(dip
)->devi_ops
= ndi_hold_driver(dip
);
1296 if (DEVI(dip
)->devi_ops
== NULL
) {
1298 * We were able to load driver for probing, so we should
1299 * not get here unless something really bad happened.
1301 cmn_err(CE_WARN
, "attach_node: no driver for major %d",
1302 DEVI(dip
)->devi_major
);
1303 return (DDI_FAILURE
);
1306 if (NEXUS_DRV(DEVI(dip
)->devi_ops
))
1307 DEVI(dip
)->devi_taskq
= ddi_taskq_create(dip
,
1309 TASKQ_DEFAULTPRI
, 0);
1311 mutex_enter(&(DEVI(dip
)->devi_lock
));
1312 DEVI_SET_ATTACHING(dip
);
1313 DEVI_SET_NEED_RESET(dip
);
1314 mutex_exit(&(DEVI(dip
)->devi_lock
));
1316 rv
= devi_attach(dip
, DDI_ATTACH
);
1318 mutex_enter(&(DEVI(dip
)->devi_lock
));
1319 DEVI_CLR_ATTACHING(dip
);
1321 if (rv
!= DDI_SUCCESS
) {
1322 DEVI_CLR_NEED_RESET(dip
);
1323 mutex_exit(&DEVI(dip
)->devi_lock
);
1326 * Cleanup dacf reservations
1328 mutex_enter(&dacf_lock
);
1329 dacf_clr_rsrvs(dip
, DACF_OPID_POSTATTACH
);
1330 dacf_clr_rsrvs(dip
, DACF_OPID_PREDETACH
);
1331 mutex_exit(&dacf_lock
);
1332 if (DEVI(dip
)->devi_taskq
)
1333 ddi_taskq_destroy(DEVI(dip
)->devi_taskq
);
1334 ddi_remove_minor_node(dip
, NULL
);
1336 /* release the driver if attach failed */
1337 ndi_rele_driver(dip
);
1338 DEVI(dip
)->devi_ops
= NULL
;
1339 NDI_CONFIG_DEBUG((CE_CONT
, "attach_node: 0x%p(%s%d) failed\n",
1340 (void *)dip
, ddi_driver_name(dip
), ddi_get_instance(dip
)));
1341 return (DDI_FAILURE
);
1343 mutex_exit(&DEVI(dip
)->devi_lock
);
1345 /* successful attach, return with driver held */
1347 return (DDI_SUCCESS
);
1351 * Detach devinfo node.
1352 * Per-driver list must be held busy.
1355 detach_node(dev_info_t
*dip
, uint_t flag
)
1357 struct devnames
*dnp
;
1360 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
1361 ASSERT(i_ddi_node_state(dip
) == DS_ATTACHED
);
1363 /* check references */
1364 if (DEVI(dip
)->devi_ref
)
1365 return (DDI_FAILURE
);
1367 NDI_CONFIG_DEBUG((CE_CONT
, "detach_node: 0x%p(%s%d)\n",
1368 (void *)dip
, ddi_driver_name(dip
), ddi_get_instance(dip
)));
1371 * NOTE: If we are processing a pHCI node then the calling code
1372 * must detect this and ndi_devi_enter() in (vHCI, parent(pHCI))
1373 * order unless pHCI and vHCI are siblings. Code paths leading
1374 * here that must ensure this ordering include:
1375 * unconfig_immediate_children(), devi_unconfig_one(),
1376 * ndi_devi_unconfig_one(), ndi_devi_offline().
1378 ASSERT(!MDI_PHCI(dip
) ||
1379 (ddi_get_parent(mdi_devi_get_vdip(dip
)) == ddi_get_parent(dip
)) ||
1380 DEVI_BUSY_OWNED(mdi_devi_get_vdip(dip
)));
1382 /* Offline the device node with the mpxio framework. */
1383 if (mdi_devi_offline(dip
, flag
) != NDI_SUCCESS
) {
1384 return (DDI_FAILURE
);
1387 /* drain the taskq */
1388 if (DEVI(dip
)->devi_taskq
)
1389 ddi_taskq_wait(DEVI(dip
)->devi_taskq
);
1391 rv
= devi_detach(dip
, DDI_DETACH
);
1393 if (rv
!= DDI_SUCCESS
) {
1394 NDI_CONFIG_DEBUG((CE_CONT
,
1395 "detach_node: 0x%p(%s%d) failed\n",
1396 (void *)dip
, ddi_driver_name(dip
), ddi_get_instance(dip
)));
1397 return (DDI_FAILURE
);
1400 mutex_enter(&(DEVI(dip
)->devi_lock
));
1401 DEVI_CLR_NEED_RESET(dip
);
1402 mutex_exit(&(DEVI(dip
)->devi_lock
));
1404 #if defined(__amd64) && !defined(__xpv)
1406 * Close any iommulib mediated linkage to an IOMMU
1408 if (IOMMU_USED(dip
))
1409 iommulib_nex_close(dip
);
1412 /* destroy the taskq */
1413 if (DEVI(dip
)->devi_taskq
) {
1414 ddi_taskq_destroy(DEVI(dip
)->devi_taskq
);
1415 DEVI(dip
)->devi_taskq
= NULL
;
1418 /* Cleanup dacf reservations */
1419 mutex_enter(&dacf_lock
);
1420 dacf_clr_rsrvs(dip
, DACF_OPID_POSTATTACH
);
1421 dacf_clr_rsrvs(dip
, DACF_OPID_PREDETACH
);
1422 mutex_exit(&dacf_lock
);
1424 /* remove any additional flavors that were added */
1425 if (DEVI(dip
)->devi_flavorv_n
> 1 && DEVI(dip
)->devi_flavorv
!= NULL
) {
1426 kmem_free(DEVI(dip
)->devi_flavorv
,
1427 (DEVI(dip
)->devi_flavorv_n
- 1) * sizeof (void *));
1428 DEVI(dip
)->devi_flavorv
= NULL
;
1431 /* Remove properties and minor nodes in case driver forgots */
1432 ddi_remove_minor_node(dip
, NULL
);
1433 ddi_prop_remove_all(dip
);
1435 /* a detached node can't have attached or .conf children */
1436 mutex_enter(&DEVI(dip
)->devi_lock
);
1437 DEVI(dip
)->devi_flags
&= ~(DEVI_MADE_CHILDREN
|DEVI_ATTACHED_CHILDREN
);
1438 mutex_exit(&DEVI(dip
)->devi_lock
);
1441 * If the instance has successfully detached in detach_driver() context,
1442 * clear DN_DRIVER_HELD for correct ddi_hold_installed_driver()
1443 * behavior. Consumers like qassociate() depend on this (via clnopen()).
1445 if (flag
& NDI_DETACH_DRIVER
) {
1446 dnp
= &(devnamesp
[DEVI(dip
)->devi_major
]);
1447 LOCK_DEV_OPS(&dnp
->dn_lock
);
1448 dnp
->dn_flags
&= ~DN_DRIVER_HELD
;
1449 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
1452 /* successful detach, release the driver */
1453 ndi_rele_driver(dip
);
1454 DEVI(dip
)->devi_ops
= NULL
;
1455 return (DDI_SUCCESS
);
1459 * Run dacf post_attach routines
1462 postattach_node(dev_info_t
*dip
)
1467 * For hotplug busses like USB, it's possible that devices
1468 * are removed but dip is still around. We don't want to
1469 * run dacf routines as part of detach failure recovery.
1471 * Pretend success until we figure out how to prevent
1472 * access to such devinfo nodes.
1474 if (DEVI_IS_DEVICE_REMOVED(dip
))
1475 return (DDI_SUCCESS
);
1478 * if dacf_postattach failed, report it to the framework
1479 * so that it can be retried later at the open time.
1481 mutex_enter(&dacf_lock
);
1482 rval
= dacfc_postattach(dip
);
1483 mutex_exit(&dacf_lock
);
1486 * Plumbing during postattach may fail because of the
1487 * underlying device is not ready. This will fail ndi_devi_config()
1488 * in dv_filldir() and a warning message is issued. The message
1489 * from here will explain what happened
1491 if (rval
!= DACF_SUCCESS
) {
1492 cmn_err(CE_WARN
, "Postattach failed for %s%d\n",
1493 ddi_driver_name(dip
), ddi_get_instance(dip
));
1494 return (DDI_FAILURE
);
1497 return (DDI_SUCCESS
);
1501 * Run dacf pre-detach routines
1504 predetach_node(dev_info_t
*dip
, uint_t flag
)
1509 * Don't auto-detach if DDI_FORCEATTACH or DDI_NO_AUTODETACH
1510 * properties are set.
1512 if (flag
& NDI_AUTODETACH
) {
1513 struct devnames
*dnp
;
1514 int pflag
= DDI_PROP_NOTPROM
| DDI_PROP_DONTPASS
;
1516 if ((ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
1517 pflag
, DDI_FORCEATTACH
, 0) == 1) ||
1518 (ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
1519 pflag
, DDI_NO_AUTODETACH
, 0) == 1))
1520 return (DDI_FAILURE
);
1522 /* check for driver global version of DDI_NO_AUTODETACH */
1523 dnp
= &devnamesp
[DEVI(dip
)->devi_major
];
1524 LOCK_DEV_OPS(&dnp
->dn_lock
);
1525 if (dnp
->dn_flags
& DN_NO_AUTODETACH
) {
1526 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
1527 return (DDI_FAILURE
);
1529 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
1532 mutex_enter(&dacf_lock
);
1533 ret
= dacfc_predetach(dip
);
1534 mutex_exit(&dacf_lock
);
1540 * Wrapper for making multiple state transitions
1544 * i_ndi_config_node: upgrade dev_info node into a specified state.
1545 * It is a bit tricky because the locking protocol changes before and
1546 * after a node is bound to a driver. All locks are held external to
1550 i_ndi_config_node(dev_info_t
*dip
, ddi_node_state_t state
, uint_t flag
)
1552 _NOTE(ARGUNUSED(flag
))
1553 int rv
= DDI_SUCCESS
;
1555 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
1557 while ((i_ddi_node_state(dip
) < state
) && (rv
== DDI_SUCCESS
)) {
1559 /* don't allow any more changes to the device tree */
1560 if (devinfo_freeze
) {
1565 switch (i_ddi_node_state(dip
)) {
1568 * only caller can reference this node, no external
1572 translate_devid((dev_info_t
*)dip
);
1573 i_ddi_set_node_state(dip
, DS_LINKED
);
1577 * Three code path may attempt to bind a node:
1581 * Boot code is single threaded, add_drv synchronize
1582 * on a userland lock, and hotplug synchronize on
1583 * hotplug_lk. There could be a race between add_drv
1584 * and hotplug thread. We'll live with this until the
1585 * conversion to top-down loading.
1587 if ((rv
= bind_node(dip
)) == DDI_SUCCESS
)
1588 i_ddi_set_node_state(dip
, DS_BOUND
);
1593 * The following transitions synchronizes on the
1594 * per-driver busy changing flag, since we already
1597 if ((rv
= init_node(dip
)) == DDI_SUCCESS
)
1598 i_ddi_set_node_state(dip
, DS_INITIALIZED
);
1600 case DS_INITIALIZED
:
1601 if ((rv
= probe_node(dip
)) == DDI_SUCCESS
)
1602 i_ddi_set_node_state(dip
, DS_PROBED
);
1606 * If node is retired and persistent, then prevent
1607 * attach. We can't do this for non-persistent nodes
1608 * as we would lose evidence that the node existed.
1610 if (i_ddi_check_retire(dip
) == 1 &&
1611 ndi_dev_is_persistent_node(dip
) &&
1612 retire_prevents_attach
== 1) {
1616 atomic_inc_ulong(&devinfo_attach_detach
);
1617 if ((rv
= attach_node(dip
)) == DDI_SUCCESS
)
1618 i_ddi_set_node_state(dip
, DS_ATTACHED
);
1619 atomic_dec_ulong(&devinfo_attach_detach
);
1622 if ((rv
= postattach_node(dip
)) == DDI_SUCCESS
)
1623 i_ddi_set_node_state(dip
, DS_READY
);
1628 /* should never reach here */
1629 ASSERT("unknown devinfo state");
1633 if (ddidebug
& DDI_AUDIT
)
1639 * i_ndi_unconfig_node: downgrade dev_info node into a specified state.
1642 i_ndi_unconfig_node(dev_info_t
*dip
, ddi_node_state_t state
, uint_t flag
)
1644 int rv
= DDI_SUCCESS
;
1646 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
1648 while ((i_ddi_node_state(dip
) > state
) && (rv
== DDI_SUCCESS
)) {
1650 /* don't allow any more changes to the device tree */
1651 if (devinfo_freeze
) {
1656 switch (i_ddi_node_state(dip
)) {
1661 * Persistent nodes are only removed by hotplug code
1662 * .conf nodes synchronizes on per-driver list.
1664 if ((rv
= unlink_node(dip
)) == DDI_SUCCESS
)
1665 i_ddi_set_node_state(dip
, DS_PROTO
);
1669 * The following transitions synchronizes on the
1670 * per-driver busy changing flag, since we already
1673 if ((rv
= unbind_node(dip
)) == DDI_SUCCESS
)
1674 i_ddi_set_node_state(dip
, DS_LINKED
);
1676 case DS_INITIALIZED
:
1677 if ((rv
= uninit_node(dip
)) == DDI_SUCCESS
)
1678 i_ddi_set_node_state(dip
, DS_BOUND
);
1681 if ((rv
= unprobe_node(dip
)) == DDI_SUCCESS
)
1682 i_ddi_set_node_state(dip
, DS_INITIALIZED
);
1685 atomic_inc_ulong(&devinfo_attach_detach
);
1687 mutex_enter(&(DEVI(dip
)->devi_lock
));
1688 DEVI_SET_DETACHING(dip
);
1689 mutex_exit(&(DEVI(dip
)->devi_lock
));
1691 membar_enter(); /* ensure visibility for hold_devi */
1693 if ((rv
= detach_node(dip
, flag
)) == DDI_SUCCESS
)
1694 i_ddi_set_node_state(dip
, DS_PROBED
);
1696 mutex_enter(&(DEVI(dip
)->devi_lock
));
1697 DEVI_CLR_DETACHING(dip
);
1698 mutex_exit(&(DEVI(dip
)->devi_lock
));
1700 atomic_dec_ulong(&devinfo_attach_detach
);
1703 if ((rv
= predetach_node(dip
, flag
)) == DDI_SUCCESS
)
1704 i_ddi_set_node_state(dip
, DS_ATTACHED
);
1707 ASSERT("unknown devinfo state");
1715 * ddi_initchild: transform node to DS_INITIALIZED state
1718 ddi_initchild(dev_info_t
*parent
, dev_info_t
*proto
)
1722 ndi_devi_enter(parent
, &circ
);
1723 ret
= i_ndi_config_node(proto
, DS_INITIALIZED
, 0);
1724 ndi_devi_exit(parent
, circ
);
1730 * ddi_uninitchild: transform node down to DS_BOUND state
1733 ddi_uninitchild(dev_info_t
*dip
)
1736 dev_info_t
*parent
= ddi_get_parent(dip
);
1739 ndi_devi_enter(parent
, &circ
);
1740 ret
= i_ndi_unconfig_node(dip
, DS_BOUND
, 0);
1741 ndi_devi_exit(parent
, circ
);
1747 * i_ddi_attachchild: transform node to DS_READY/i_ddi_devi_attached() state
1750 i_ddi_attachchild(dev_info_t
*dip
)
1752 dev_info_t
*parent
= ddi_get_parent(dip
);
1755 ASSERT(parent
&& DEVI_BUSY_OWNED(parent
));
1757 if ((i_ddi_node_state(dip
) < DS_BOUND
) || DEVI_IS_DEVICE_OFFLINE(dip
))
1758 return (DDI_FAILURE
);
1760 ret
= i_ndi_config_node(dip
, DS_READY
, 0);
1761 if (ret
== NDI_SUCCESS
) {
1765 * Take it down to DS_INITIALIZED so pm_pre_probe is run
1766 * on the next attach
1768 (void) i_ndi_unconfig_node(dip
, DS_INITIALIZED
, 0);
1776 * i_ddi_detachchild: transform node down to DS_PROBED state
1777 * If it fails, put it back to DS_READY state.
1778 * NOTE: A node that fails detach may be at DS_ATTACHED instead
1779 * of DS_READY for a small amount of time - this is the source of
1780 * transient DS_READY->DS_ATTACHED->DS_READY state changes.
1783 i_ddi_detachchild(dev_info_t
*dip
, uint_t flags
)
1785 dev_info_t
*parent
= ddi_get_parent(dip
);
1788 ASSERT(parent
&& DEVI_BUSY_OWNED(parent
));
1790 ret
= i_ndi_unconfig_node(dip
, DS_PROBED
, flags
);
1791 if (ret
!= DDI_SUCCESS
)
1792 (void) i_ndi_config_node(dip
, DS_READY
, 0);
1794 /* allow pm_pre_probe to reestablish pm state */
1795 (void) i_ndi_unconfig_node(dip
, DS_INITIALIZED
, 0);
1800 * Add a child and bind to driver
1803 ddi_add_child(dev_info_t
*pdip
, char *name
, uint_t nodeid
, uint_t unit
)
1808 /* allocate a new node */
1809 dip
= i_ddi_alloc_node(pdip
, name
, nodeid
, (int)unit
, NULL
, KM_SLEEP
);
1811 ndi_devi_enter(pdip
, &circ
);
1812 (void) i_ndi_config_node(dip
, DS_BOUND
, 0);
1813 ndi_devi_exit(pdip
, circ
);
1818 * ddi_remove_child: remove the dip. The parent must be attached and held
1821 ddi_remove_child(dev_info_t
*dip
, int dummy
)
1823 _NOTE(ARGUNUSED(dummy
))
1825 dev_info_t
*parent
= ddi_get_parent(dip
);
1828 ndi_devi_enter(parent
, &circ
);
1831 * If we still have children, for example SID nodes marked
1832 * as persistent but not attached, attempt to remove them.
1834 if (DEVI(dip
)->devi_child
) {
1835 ret
= ndi_devi_unconfig(dip
, NDI_DEVI_REMOVE
);
1836 if (ret
!= NDI_SUCCESS
) {
1837 ndi_devi_exit(parent
, circ
);
1838 return (DDI_FAILURE
);
1840 ASSERT(DEVI(dip
)->devi_child
== NULL
);
1843 ret
= i_ndi_unconfig_node(dip
, DS_PROTO
, 0);
1844 ndi_devi_exit(parent
, circ
);
1846 if (ret
!= DDI_SUCCESS
)
1849 ASSERT(i_ddi_node_state(dip
) == DS_PROTO
);
1850 i_ddi_free_node(dip
);
1851 return (DDI_SUCCESS
);
1855 * NDI wrappers for ref counting, node allocation, and transitions
1859 * Hold/release the devinfo node itself.
1860 * Caller is assumed to prevent the devi from detaching during this call
1863 ndi_hold_devi(dev_info_t
*dip
)
1865 mutex_enter(&DEVI(dip
)->devi_lock
);
1866 ASSERT(DEVI(dip
)->devi_ref
>= 0);
1867 DEVI(dip
)->devi_ref
++;
1868 membar_enter(); /* make sure stores are flushed */
1869 mutex_exit(&DEVI(dip
)->devi_lock
);
1873 ndi_rele_devi(dev_info_t
*dip
)
1875 ASSERT(DEVI(dip
)->devi_ref
> 0);
1877 mutex_enter(&DEVI(dip
)->devi_lock
);
1878 DEVI(dip
)->devi_ref
--;
1879 membar_enter(); /* make sure stores are flushed */
1880 mutex_exit(&DEVI(dip
)->devi_lock
);
1884 e_ddi_devi_holdcnt(dev_info_t
*dip
)
1886 return (DEVI(dip
)->devi_ref
);
1890 * Hold/release the driver the devinfo node is bound to.
1893 ndi_hold_driver(dev_info_t
*dip
)
1895 if (i_ddi_node_state(dip
) < DS_BOUND
)
1898 ASSERT(DEVI(dip
)->devi_major
!= -1);
1899 return (mod_hold_dev_by_major(DEVI(dip
)->devi_major
));
1903 ndi_rele_driver(dev_info_t
*dip
)
1905 ASSERT(i_ddi_node_state(dip
) >= DS_BOUND
);
1906 mod_rele_dev_by_major(DEVI(dip
)->devi_major
);
1910 * Single thread entry into devinfo node for modifying its children (devinfo,
1911 * pathinfo, and minor). To verify in ASSERTS use DEVI_BUSY_OWNED macro.
1914 ndi_devi_enter(dev_info_t
*dip
, int *circular
)
1916 struct dev_info
*devi
= DEVI(dip
);
1917 ASSERT(dip
!= NULL
);
1919 /* for vHCI, enforce (vHCI, pHCI) ndi_deve_enter() order */
1920 ASSERT(!MDI_VHCI(dip
) || (mdi_devi_pdip_entered(dip
) == 0) ||
1921 DEVI_BUSY_OWNED(dip
));
1923 mutex_enter(&devi
->devi_lock
);
1924 if (devi
->devi_busy_thread
== curthread
) {
1925 devi
->devi_circular
++;
1927 while (DEVI_BUSY_CHANGING(devi
) && !panicstr
)
1928 cv_wait(&(devi
->devi_cv
), &(devi
->devi_lock
));
1930 mutex_exit(&devi
->devi_lock
);
1933 devi
->devi_flags
|= DEVI_BUSY
;
1934 devi
->devi_busy_thread
= curthread
;
1936 *circular
= devi
->devi_circular
;
1937 mutex_exit(&devi
->devi_lock
);
1941 * Release ndi_devi_enter or successful ndi_devi_tryenter.
1944 ndi_devi_exit(dev_info_t
*dip
, int circular
)
1946 struct dev_info
*devi
= DEVI(dip
);
1947 struct dev_info
*vdevi
;
1948 ASSERT(dip
!= NULL
);
1953 mutex_enter(&(devi
->devi_lock
));
1954 if (circular
!= 0) {
1955 devi
->devi_circular
--;
1957 devi
->devi_flags
&= ~DEVI_BUSY
;
1958 ASSERT(devi
->devi_busy_thread
== curthread
);
1959 devi
->devi_busy_thread
= NULL
;
1960 cv_broadcast(&(devi
->devi_cv
));
1962 mutex_exit(&(devi
->devi_lock
));
1965 * For pHCI exit we issue a broadcast to vHCI for ndi_devi_config_one()
1966 * doing cv_wait on vHCI.
1968 if (MDI_PHCI(dip
)) {
1969 vdevi
= DEVI(mdi_devi_get_vdip(dip
));
1971 mutex_enter(&(vdevi
->devi_lock
));
1972 if (vdevi
->devi_flags
& DEVI_PHCI_SIGNALS_VHCI
) {
1973 vdevi
->devi_flags
&= ~DEVI_PHCI_SIGNALS_VHCI
;
1974 cv_broadcast(&(vdevi
->devi_cv
));
1976 mutex_exit(&(vdevi
->devi_lock
));
1982 * Release ndi_devi_enter and wait for possibility of new children, avoiding
1983 * possibility of missing broadcast before getting to cv_timedwait().
1986 ndi_devi_exit_and_wait(dev_info_t
*dip
, int circular
, clock_t end_time
)
1988 struct dev_info
*devi
= DEVI(dip
);
1989 ASSERT(dip
!= NULL
);
1995 * We are called to wait for of a new child, and new child can
1996 * only be added if circular is zero.
1998 ASSERT(circular
== 0);
2000 /* like ndi_devi_exit with circular of zero */
2001 mutex_enter(&(devi
->devi_lock
));
2002 devi
->devi_flags
&= ~DEVI_BUSY
;
2003 ASSERT(devi
->devi_busy_thread
== curthread
);
2004 devi
->devi_busy_thread
= NULL
;
2005 cv_broadcast(&(devi
->devi_cv
));
2007 /* now wait for new children while still holding devi_lock */
2008 (void) cv_timedwait(&devi
->devi_cv
, &(devi
->devi_lock
), end_time
);
2009 mutex_exit(&(devi
->devi_lock
));
2013 * Attempt to single thread entry into devinfo node for modifying its children.
2016 ndi_devi_tryenter(dev_info_t
*dip
, int *circular
)
2018 int rval
= 1; /* assume we enter */
2019 struct dev_info
*devi
= DEVI(dip
);
2020 ASSERT(dip
!= NULL
);
2022 mutex_enter(&devi
->devi_lock
);
2023 if (devi
->devi_busy_thread
== (void *)curthread
) {
2024 devi
->devi_circular
++;
2026 if (!DEVI_BUSY_CHANGING(devi
)) {
2027 devi
->devi_flags
|= DEVI_BUSY
;
2028 devi
->devi_busy_thread
= (void *)curthread
;
2030 rval
= 0; /* devi is busy */
2033 *circular
= devi
->devi_circular
;
2034 mutex_exit(&devi
->devi_lock
);
2039 * Allocate and initialize a new dev_info structure.
2041 * This routine may be called at interrupt time by a nexus in
2042 * response to a hotplug event, therefore memory allocations are
2043 * not allowed to sleep.
2046 ndi_devi_alloc(dev_info_t
*parent
, char *node_name
, pnode_t nodeid
,
2047 dev_info_t
**ret_dip
)
2049 ASSERT(node_name
!= NULL
);
2050 ASSERT(ret_dip
!= NULL
);
2052 *ret_dip
= i_ddi_alloc_node(parent
, node_name
, nodeid
, -1, NULL
,
2054 if (*ret_dip
== NULL
) {
2058 return (NDI_SUCCESS
);
2062 * Allocate and initialize a new dev_info structure
2063 * This routine may sleep and should not be called at interrupt time
2066 ndi_devi_alloc_sleep(dev_info_t
*parent
, char *node_name
, pnode_t nodeid
,
2067 dev_info_t
**ret_dip
)
2069 ASSERT(node_name
!= NULL
);
2070 ASSERT(ret_dip
!= NULL
);
2072 *ret_dip
= i_ddi_alloc_node(parent
, node_name
, nodeid
, -1, NULL
,
2078 * Remove an initialized (but not yet attached) dev_info
2079 * node from it's parent.
2082 ndi_devi_free(dev_info_t
*dip
)
2084 ASSERT(dip
!= NULL
);
2086 if (i_ddi_node_state(dip
) >= DS_INITIALIZED
)
2087 return (DDI_FAILURE
);
2089 NDI_CONFIG_DEBUG((CE_CONT
, "ndi_devi_free: %s%d (%p)\n",
2090 ddi_driver_name(dip
), ddi_get_instance(dip
), (void *)dip
));
2092 (void) ddi_remove_child(dip
, 0);
2094 return (NDI_SUCCESS
);
2098 * ndi_devi_bind_driver() binds a driver to a given device. If it fails
2099 * to bind the driver, it returns an appropriate error back. Some drivers
2100 * may want to know if the actually failed to bind.
2103 ndi_devi_bind_driver(dev_info_t
*dip
, uint_t flags
)
2105 int ret
= NDI_FAILURE
;
2107 dev_info_t
*pdip
= ddi_get_parent(dip
);
2110 NDI_CONFIG_DEBUG((CE_CONT
,
2111 "ndi_devi_bind_driver: %s%d (%p) flags: %x\n",
2112 ddi_driver_name(dip
), ddi_get_instance(dip
), (void *)dip
, flags
));
2114 ndi_devi_enter(pdip
, &circ
);
2115 if (i_ndi_config_node(dip
, DS_BOUND
, flags
) == DDI_SUCCESS
)
2117 ndi_devi_exit(pdip
, circ
);
2123 * ndi_devi_unbind_driver: unbind the dip
2126 ndi_devi_unbind_driver(dev_info_t
*dip
)
2128 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
2130 return (i_ndi_unconfig_node(dip
, DS_LINKED
, 0));
2134 * Misc. help routines called by framework only
2138 * Get the state of node
2141 i_ddi_node_state(dev_info_t
*dip
)
2143 return (DEVI(dip
)->devi_node_state
);
2147 * Set the state of node
2150 i_ddi_set_node_state(dev_info_t
*dip
, ddi_node_state_t state
)
2152 DEVI(dip
)->devi_node_state
= state
;
2153 membar_enter(); /* make sure stores are flushed */
2157 * Determine if node is attached. The implementation accommodates transient
2158 * DS_READY->DS_ATTACHED->DS_READY state changes. Outside this file, this
2159 * function should be instead of i_ddi_node_state() DS_ATTACHED/DS_READY
2163 i_ddi_devi_attached(dev_info_t
*dip
)
2165 return (DEVI(dip
)->devi_node_state
>= DS_ATTACHED
);
2169 * Common function for finding a node in a sibling list given name and addr.
2171 * By default, name is matched with devi_node_name. The following
2172 * alternative match strategies are supported:
2174 * FIND_NODE_BY_NODENAME: Match on node name - typical use.
2176 * FIND_NODE_BY_DRIVER: A match on driver name bound to node is conducted.
2177 * This support is used for support of OBP generic names and
2178 * for the conversion from driver names to generic names. When
2179 * more consistency in the generic name environment is achieved
2180 * (and not needed for upgrade) this support can be removed.
2182 * FIND_NODE_BY_ADDR: Match on just the addr.
2183 * This support is only used/needed during boot to match
2184 * a node bound via a path-based driver alias.
2186 * If a child is not named (dev_addr == NULL), there are three
2190 * (2) FIND_ADDR_BY_INIT: bring child to DS_INITIALIZED state
2191 * (3) FIND_ADDR_BY_CALLBACK: use a caller-supplied callback function
2193 #define FIND_NODE_BY_NODENAME 0x01
2194 #define FIND_NODE_BY_DRIVER 0x02
2195 #define FIND_NODE_BY_ADDR 0x04
2196 #define FIND_ADDR_BY_INIT 0x10
2197 #define FIND_ADDR_BY_CALLBACK 0x20
2200 find_sibling(dev_info_t
*head
, char *cname
, char *caddr
, uint_t flag
,
2201 int (*callback
)(dev_info_t
*, char *, int))
2208 /* only one way to find a node */
2210 (FIND_NODE_BY_DRIVER
| FIND_NODE_BY_NODENAME
| FIND_NODE_BY_ADDR
);
2211 ASSERT(by
&& BIT_ONLYONESET(by
));
2213 /* only one way to name a node */
2214 ASSERT(((flag
& FIND_ADDR_BY_INIT
) == 0) ||
2215 ((flag
& FIND_ADDR_BY_CALLBACK
) == 0));
2217 if (by
== FIND_NODE_BY_DRIVER
) {
2218 major
= ddi_name_to_major(cname
);
2219 if (major
== DDI_MAJOR_T_NONE
)
2223 /* preallocate buffer of naming node by callback */
2224 if (flag
& FIND_ADDR_BY_CALLBACK
)
2225 buf
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
2228 * Walk the child list to find a match
2232 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(head
)));
2233 for (dip
= head
; dip
; dip
= ddi_get_next_sibling(dip
)) {
2234 if (by
== FIND_NODE_BY_NODENAME
) {
2235 /* match node name */
2236 if (strcmp(cname
, DEVI(dip
)->devi_node_name
) != 0)
2238 } else if (by
== FIND_NODE_BY_DRIVER
) {
2239 /* match driver major */
2240 if (DEVI(dip
)->devi_major
!= major
)
2244 if ((addr
= DEVI(dip
)->devi_addr
) == NULL
) {
2245 /* name the child based on the flag */
2246 if (flag
& FIND_ADDR_BY_INIT
) {
2247 if (ddi_initchild(ddi_get_parent(dip
), dip
)
2250 addr
= DEVI(dip
)->devi_addr
;
2251 } else if (flag
& FIND_ADDR_BY_CALLBACK
) {
2252 if ((callback
== NULL
) || (callback(
2253 dip
, buf
, MAXNAMELEN
) != DDI_SUCCESS
))
2257 continue; /* skip */
2262 ASSERT(addr
!= NULL
);
2263 if (strcmp(caddr
, addr
) == 0)
2264 break; /* node found */
2267 if (flag
& FIND_ADDR_BY_CALLBACK
)
2268 kmem_free(buf
, MAXNAMELEN
);
2273 * Find child of pdip with name: cname@caddr
2274 * Called by init_node() to look for duplicate nodes
2277 find_duplicate_child(dev_info_t
*pdip
, dev_info_t
*dip
)
2280 char *cname
= DEVI(dip
)->devi_node_name
;
2281 char *caddr
= DEVI(dip
)->devi_addr
;
2283 /* search nodes before dip */
2284 dup
= find_sibling(ddi_get_child(pdip
), cname
, caddr
,
2285 FIND_NODE_BY_NODENAME
, NULL
);
2290 * search nodes after dip; normally this is not needed,
2292 return (find_sibling(ddi_get_next_sibling(dip
), cname
, caddr
,
2293 FIND_NODE_BY_NODENAME
, NULL
));
2297 * Find a child of a given name and address, using a callback to name
2298 * unnamed children. cname is the binding name.
2301 ndi_devi_findchild_by_callback(dev_info_t
*pdip
, char *dname
, char *ua
,
2302 int (*make_ua
)(dev_info_t
*, char *, int))
2304 int by
= FIND_ADDR_BY_CALLBACK
;
2306 ASSERT(DEVI_BUSY_OWNED(pdip
));
2307 by
|= dname
? FIND_NODE_BY_DRIVER
: FIND_NODE_BY_ADDR
;
2308 return (find_sibling(ddi_get_child(pdip
), dname
, ua
, by
, make_ua
));
2312 * Find a child of a given name and address, invoking initchild to name
2313 * unnamed children. cname is the node name.
2316 find_child_by_name(dev_info_t
*pdip
, char *cname
, char *caddr
)
2320 /* attempt search without changing state of preceding siblings */
2321 dip
= find_sibling(ddi_get_child(pdip
), cname
, caddr
,
2322 FIND_NODE_BY_NODENAME
, NULL
);
2326 return (find_sibling(ddi_get_child(pdip
), cname
, caddr
,
2327 FIND_NODE_BY_NODENAME
|FIND_ADDR_BY_INIT
, NULL
));
2331 * Find a child of a given name and address, invoking initchild to name
2332 * unnamed children. cname is the node name.
2335 find_child_by_driver(dev_info_t
*pdip
, char *cname
, char *caddr
)
2339 /* attempt search without changing state of preceding siblings */
2340 dip
= find_sibling(ddi_get_child(pdip
), cname
, caddr
,
2341 FIND_NODE_BY_DRIVER
, NULL
);
2345 return (find_sibling(ddi_get_child(pdip
), cname
, caddr
,
2346 FIND_NODE_BY_DRIVER
|FIND_ADDR_BY_INIT
, NULL
));
2350 * Find a child of a given address, invoking initchild to name
2351 * unnamed children. cname is the node name.
2353 * NOTE: This function is only used during boot. One would hope that
2354 * unique sibling unit-addresses on hardware branches of the tree would
2355 * be a requirement to avoid two drivers trying to control the same
2356 * piece of hardware. Unfortunately there are some cases where this
2357 * situation exists (/ssm@0,0/pci@1c,700000 /ssm@0,0/sghsc@1c,700000).
2358 * Until unit-address uniqueness of siblings is guaranteed, use of this
2359 * interface for purposes other than boot should be avoided.
2362 find_child_by_addr(dev_info_t
*pdip
, char *caddr
)
2366 /* return NULL if called without a unit-address */
2367 if ((caddr
== NULL
) || (*caddr
== '\0'))
2370 /* attempt search without changing state of preceding siblings */
2371 dip
= find_sibling(ddi_get_child(pdip
), NULL
, caddr
,
2372 FIND_NODE_BY_ADDR
, NULL
);
2376 return (find_sibling(ddi_get_child(pdip
), NULL
, caddr
,
2377 FIND_NODE_BY_ADDR
|FIND_ADDR_BY_INIT
, NULL
));
2381 * Deleting a property list. Take care, since some property structures
2382 * may not be fully built.
2385 i_ddi_prop_list_delete(ddi_prop_t
*prop
)
2388 ddi_prop_t
*next
= prop
->prop_next
;
2389 if (prop
->prop_name
)
2390 kmem_free(prop
->prop_name
, strlen(prop
->prop_name
) + 1);
2391 if ((prop
->prop_len
!= 0) && prop
->prop_val
)
2392 kmem_free(prop
->prop_val
, prop
->prop_len
);
2393 kmem_free(prop
, sizeof (struct ddi_prop
));
2399 * Duplicate property list
2402 i_ddi_prop_list_dup(ddi_prop_t
*prop
, uint_t flag
)
2404 ddi_prop_t
*result
, *prev
, *copy
;
2409 result
= prev
= NULL
;
2410 for (; prop
!= NULL
; prop
= prop
->prop_next
) {
2411 ASSERT(prop
->prop_name
!= NULL
);
2412 copy
= kmem_zalloc(sizeof (struct ddi_prop
), flag
);
2416 copy
->prop_dev
= prop
->prop_dev
;
2417 copy
->prop_flags
= prop
->prop_flags
;
2418 copy
->prop_name
= i_ddi_strdup(prop
->prop_name
, flag
);
2419 if (copy
->prop_name
== NULL
)
2422 if ((copy
->prop_len
= prop
->prop_len
) != 0) {
2423 copy
->prop_val
= kmem_zalloc(prop
->prop_len
, flag
);
2424 if (copy
->prop_val
== NULL
)
2427 bcopy(prop
->prop_val
, copy
->prop_val
, prop
->prop_len
);
2431 result
= prev
= copy
;
2433 prev
->prop_next
= copy
;
2439 i_ddi_prop_list_delete(result
);
2444 * Create a reference property list, currently used only for
2445 * driver global properties. Created with ref count of 1.
2448 i_ddi_prop_list_create(ddi_prop_t
*props
)
2450 ddi_prop_list_t
*list
= kmem_alloc(sizeof (*list
), KM_SLEEP
);
2451 list
->prop_list
= props
;
2457 * Increment/decrement reference count. The reference is
2458 * protected by dn_lock. The only interfaces modifying
2459 * dn_global_prop_ptr is in impl_make[free]_parlist().
2462 i_ddi_prop_list_hold(ddi_prop_list_t
*prop_list
, struct devnames
*dnp
)
2464 ASSERT(prop_list
->prop_ref
>= 0);
2465 ASSERT(mutex_owned(&dnp
->dn_lock
));
2466 prop_list
->prop_ref
++;
2470 i_ddi_prop_list_rele(ddi_prop_list_t
*prop_list
, struct devnames
*dnp
)
2472 ASSERT(prop_list
->prop_ref
> 0);
2473 ASSERT(mutex_owned(&dnp
->dn_lock
));
2474 prop_list
->prop_ref
--;
2476 if (prop_list
->prop_ref
== 0) {
2477 i_ddi_prop_list_delete(prop_list
->prop_list
);
2478 kmem_free(prop_list
, sizeof (*prop_list
));
2483 * Free table of classes by drivers
2486 i_ddi_free_exported_classes(char **classes
, int n
)
2488 if ((n
== 0) || (classes
== NULL
))
2491 kmem_free(classes
, n
* sizeof (char *));
2495 * Get all classes exported by dip
2498 i_ddi_get_exported_classes(dev_info_t
*dip
, char ***classes
)
2500 extern void lock_hw_class_list();
2501 extern void unlock_hw_class_list();
2502 extern int get_class(const char *, char **);
2504 static char *rootclass
= "root";
2505 int n
= 0, nclass
= 0;
2508 ASSERT(i_ddi_node_state(dip
) >= DS_BOUND
);
2510 if (dip
== ddi_root_node()) /* rootnode exports class "root" */
2512 lock_hw_class_list();
2513 nclass
+= get_class(ddi_driver_name(dip
), NULL
);
2515 unlock_hw_class_list();
2516 return (0); /* no class exported */
2519 *classes
= buf
= kmem_alloc(nclass
* sizeof (char *), KM_SLEEP
);
2520 if (dip
== ddi_root_node()) {
2524 n
+= get_class(ddi_driver_name(dip
), buf
);
2525 unlock_hw_class_list();
2527 ASSERT(n
== nclass
); /* make sure buf wasn't overrun */
2532 * Helper functions, returns NULL if no memory.
2535 i_ddi_strdup(char *str
, uint_t flag
)
2542 copy
= kmem_alloc(strlen(str
) + 1, flag
);
2546 (void) strcpy(copy
, str
);
2551 * Load driver.conf file for major. Load all if major == -1.
2554 * - early in boot after devnames array is initialized
2555 * - from vfs code when certain file systems are mounted
2556 * - from add_drv when a new driver is added
2559 i_ddi_load_drvconf(major_t major
)
2561 extern int modrootloaded
;
2563 major_t low
, high
, m
;
2565 if (major
== DDI_MAJOR_T_NONE
) {
2569 if (major
>= devcnt
)
2574 for (m
= low
; m
<= high
; m
++) {
2575 struct devnames
*dnp
= &devnamesp
[m
];
2576 LOCK_DEV_OPS(&dnp
->dn_lock
);
2577 dnp
->dn_flags
&= ~(DN_DRIVER_HELD
|DN_DRIVER_INACTIVE
);
2578 (void) impl_make_parlist(m
);
2579 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
2582 if (modrootloaded
) {
2583 ddi_walk_devs(ddi_root_node(), reset_nexus_flags
,
2584 (void *)(uintptr_t)major
);
2587 /* build dn_list from old entries in path_to_inst */
2588 e_ddi_unorphan_instance_nos();
2593 * Unload a specific driver.conf.
2594 * Don't support unload all because it doesn't make any sense
2597 i_ddi_unload_drvconf(major_t major
)
2600 struct devnames
*dnp
;
2602 if (major
>= devcnt
)
2606 * Take the per-driver lock while unloading driver.conf
2608 dnp
= &devnamesp
[major
];
2609 LOCK_DEV_OPS(&dnp
->dn_lock
);
2610 error
= impl_free_parlist(major
);
2611 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
2616 * Merge a .conf node. This is called by nexus drivers to augment
2617 * hw node with properties specified in driver.conf file. This function
2618 * takes a callback routine to name nexus children.
2619 * The parent node must be held busy.
2621 * It returns DDI_SUCCESS if the node is merged and DDI_FAILURE otherwise.
2624 ndi_merge_node(dev_info_t
*dip
, int (*make_ua
)(dev_info_t
*, char *, int))
2628 ASSERT(ndi_dev_is_persistent_node(dip
) == 0);
2629 ASSERT(ddi_get_name_addr(dip
) != NULL
);
2631 hwdip
= ndi_devi_findchild_by_callback(ddi_get_parent(dip
),
2632 ddi_binding_name(dip
), ddi_get_name_addr(dip
), make_ua
);
2635 * Look for the hardware node that is the target of the merge;
2636 * return failure if not found.
2638 if ((hwdip
== NULL
) || (hwdip
== dip
)) {
2639 char *buf
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
2640 NDI_CONFIG_DEBUG((CE_WARN
, "No HW node to merge conf node %s",
2641 ddi_deviname(dip
, buf
)));
2642 kmem_free(buf
, MAXNAMELEN
);
2643 return (DDI_FAILURE
);
2647 * Make sure the hardware node is uninitialized and has no property.
2648 * This may not be the case if new .conf files are load after some
2649 * hardware nodes have already been initialized and attached.
2651 * N.B. We return success here because the node was *intended*
2652 * to be a merge node because there is a hw node with the name.
2654 mutex_enter(&DEVI(hwdip
)->devi_lock
);
2655 if (ndi_dev_is_persistent_node(hwdip
) == 0) {
2657 mutex_exit(&DEVI(hwdip
)->devi_lock
);
2659 buf
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
2660 NDI_CONFIG_DEBUG((CE_NOTE
, "Duplicate .conf node %s",
2661 ddi_deviname(dip
, buf
)));
2662 kmem_free(buf
, MAXNAMELEN
);
2663 return (DDI_SUCCESS
);
2667 * If it is possible that the hardware has already been touched
2670 if (i_ddi_node_state(hwdip
) >= DS_INITIALIZED
||
2671 (DEVI(hwdip
)->devi_sys_prop_ptr
!= NULL
) ||
2672 (DEVI(hwdip
)->devi_drv_prop_ptr
!= NULL
)) {
2674 mutex_exit(&DEVI(hwdip
)->devi_lock
);
2676 buf
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
2677 NDI_CONFIG_DEBUG((CE_NOTE
,
2678 "!Cannot merge .conf node %s with hw node %p "
2679 "-- not in proper state",
2680 ddi_deviname(dip
, buf
), (void *)hwdip
));
2681 kmem_free(buf
, MAXNAMELEN
);
2682 return (DDI_SUCCESS
);
2685 mutex_enter(&DEVI(dip
)->devi_lock
);
2686 DEVI(hwdip
)->devi_sys_prop_ptr
= DEVI(dip
)->devi_sys_prop_ptr
;
2687 DEVI(hwdip
)->devi_drv_prop_ptr
= DEVI(dip
)->devi_drv_prop_ptr
;
2688 DEVI(dip
)->devi_sys_prop_ptr
= NULL
;
2689 DEVI(dip
)->devi_drv_prop_ptr
= NULL
;
2690 mutex_exit(&DEVI(dip
)->devi_lock
);
2691 mutex_exit(&DEVI(hwdip
)->devi_lock
);
2693 return (DDI_SUCCESS
);
2697 * Merge a "wildcard" .conf node. This is called by nexus drivers to
2698 * augment a set of hw node with properties specified in driver.conf file.
2699 * The parent node must be held busy.
2701 * There is no failure mode, since the nexus may or may not have child
2702 * node bound the driver specified by the wildcard node.
2705 ndi_merge_wildcard_node(dev_info_t
*dip
)
2708 dev_info_t
*pdip
= ddi_get_parent(dip
);
2709 major_t major
= ddi_driver_major(dip
);
2711 /* never attempt to merge a hw node */
2712 ASSERT(ndi_dev_is_persistent_node(dip
) == 0);
2713 /* must be bound to a driver major number */
2714 ASSERT(major
!= DDI_MAJOR_T_NONE
);
2717 * Walk the child list to find all nodes bound to major
2718 * and copy properties.
2720 mutex_enter(&DEVI(dip
)->devi_lock
);
2721 ASSERT(DEVI_BUSY_OWNED(pdip
));
2722 for (hwdip
= ddi_get_child(pdip
); hwdip
;
2723 hwdip
= ddi_get_next_sibling(hwdip
)) {
2725 * Skip nodes not bound to same driver
2727 if (ddi_driver_major(hwdip
) != major
)
2733 if (ndi_dev_is_persistent_node(hwdip
) == 0)
2737 * Make sure the node is uninitialized and has no property.
2739 mutex_enter(&DEVI(hwdip
)->devi_lock
);
2740 if (i_ddi_node_state(hwdip
) >= DS_INITIALIZED
||
2741 (DEVI(hwdip
)->devi_sys_prop_ptr
!= NULL
) ||
2742 (DEVI(hwdip
)->devi_drv_prop_ptr
!= NULL
)) {
2743 mutex_exit(&DEVI(hwdip
)->devi_lock
);
2744 NDI_CONFIG_DEBUG((CE_NOTE
, "HW node %p state not "
2745 "suitable for merging wildcard conf node %s",
2746 (void *)hwdip
, ddi_node_name(dip
)));
2750 DEVI(hwdip
)->devi_sys_prop_ptr
=
2751 i_ddi_prop_list_dup(DEVI(dip
)->devi_sys_prop_ptr
, KM_SLEEP
);
2752 DEVI(hwdip
)->devi_drv_prop_ptr
=
2753 i_ddi_prop_list_dup(DEVI(dip
)->devi_drv_prop_ptr
, KM_SLEEP
);
2754 mutex_exit(&DEVI(hwdip
)->devi_lock
);
2756 mutex_exit(&DEVI(dip
)->devi_lock
);
2760 * Return the major number based on the compatible property. This interface
2761 * may be used in situations where we are trying to detect if a better driver
2762 * now exists for a device, so it must use the 'compatible' property. If
2763 * a non-NULL formp is specified and the binding was based on compatible then
2764 * return the pointer to the form used in *formp.
2767 ddi_compatible_driver_major(dev_info_t
*dip
, char **formp
)
2769 struct dev_info
*devi
= DEVI(dip
);
2773 major_t major
= DDI_MAJOR_T_NONE
;
2778 if (ddi_prop_exists(DDI_DEV_T_NONE
, dip
, DDI_PROP_DONTPASS
,
2780 major
= ddi_name_to_major("nulldriver");
2785 * Highest precedence binding is a path-oriented alias. Since this
2786 * requires a 'path', this type of binding occurs via more obtuse
2787 * 'rebind'. The need for a path-oriented alias 'rebind' is detected
2788 * after a successful DDI_CTLOPS_INITCHILD to another driver: this is
2789 * is the first point at which the unit-address (or instance) of the
2790 * last component of the path is available (even though the path is
2791 * bound to the wrong driver at this point).
2793 if (devi
->devi_flags
& DEVI_REBIND
) {
2794 p
= devi
->devi_rebinding_name
;
2795 major
= ddi_name_to_major(p
);
2796 if (driver_active(major
)) {
2803 * If for some reason devi_rebinding_name no longer resolves
2804 * to a proper driver then clear DEVI_REBIND.
2806 mutex_enter(&devi
->devi_lock
);
2807 devi
->devi_flags
&= ~DEVI_REBIND
;
2808 mutex_exit(&devi
->devi_lock
);
2811 /* look up compatible property */
2812 (void) lookup_compatible(dip
, KM_SLEEP
);
2813 compat
= (void *)(devi
->devi_compat_names
);
2814 len
= devi
->devi_compat_length
;
2816 /* find the highest precedence compatible form with a driver binding */
2817 while ((p
= prom_decode_composite_string(compat
, len
, p
)) != NULL
) {
2818 major
= ddi_name_to_major(p
);
2819 if (driver_active(major
)) {
2827 * none of the compatible forms have a driver binding, see if
2828 * the node name has a driver binding.
2830 major
= ddi_name_to_major(ddi_node_name(dip
));
2831 if (driver_active(major
))
2835 return (DDI_MAJOR_T_NONE
);
2839 * Static help functions
2843 * lookup the "compatible" property and cache it's contents in the
2847 lookup_compatible(dev_info_t
*dip
, uint_t flag
)
2853 char *di_compat_strp
;
2854 size_t di_compat_strlen
;
2856 if (DEVI(dip
)->devi_compat_names
) {
2857 return (DDI_SUCCESS
);
2860 prop_flags
= DDI_PROP_TYPE_STRING
| DDI_PROP_DONTPASS
;
2862 if (flag
& KM_NOSLEEP
) {
2863 prop_flags
|= DDI_PROP_DONTSLEEP
;
2866 if (ndi_dev_is_prom_node(dip
) == 0) {
2867 prop_flags
|= DDI_PROP_NOTPROM
;
2870 rv
= ddi_prop_lookup_common(DDI_DEV_T_ANY
, dip
, prop_flags
,
2871 "compatible", &compatstrpp
, &ncompatstrs
,
2872 ddi_prop_fm_decode_strings
);
2874 if (rv
== DDI_PROP_NOT_FOUND
) {
2875 return (DDI_SUCCESS
);
2878 if (rv
!= DDI_PROP_SUCCESS
) {
2879 return (DDI_FAILURE
);
2883 * encode the compatible property data in the dev_info node
2886 if (ncompatstrs
!= 0) {
2887 di_compat_strp
= encode_composite_string(compatstrpp
,
2888 ncompatstrs
, &di_compat_strlen
, flag
);
2889 if (di_compat_strp
!= NULL
) {
2890 DEVI(dip
)->devi_compat_names
= di_compat_strp
;
2891 DEVI(dip
)->devi_compat_length
= di_compat_strlen
;
2896 ddi_prop_free(compatstrpp
);
2901 * Create a composite string from a list of strings.
2903 * A composite string consists of a single buffer containing one
2904 * or more NULL terminated strings.
2907 encode_composite_string(char **strings
, uint_t nstrings
, size_t *retsz
,
2917 if (strings
== NULL
|| nstrings
== 0 || retsz
== NULL
) {
2921 for (index
= 0, strpp
= strings
; index
< nstrings
; index
++)
2922 cbuf_sz
+= strlen(*(strpp
++)) + 1;
2924 if ((cbuf_p
= kmem_alloc(cbuf_sz
, flag
)) == NULL
) {
2926 "?failed to allocate device node compatstr");
2931 for (index
= 0, strpp
= strings
; index
< nstrings
; index
++) {
2932 slen
= strlen(*strpp
);
2933 bcopy(*(strpp
++), cbuf_ip
, slen
);
2935 *(cbuf_ip
++) = '\0';
2943 link_to_driver_list(dev_info_t
*dip
)
2945 major_t major
= DEVI(dip
)->devi_major
;
2946 struct devnames
*dnp
;
2948 ASSERT(major
!= DDI_MAJOR_T_NONE
);
2951 * Remove from orphan list
2953 if (ndi_dev_is_persistent_node(dip
)) {
2955 remove_from_dn_list(dnp
, dip
);
2959 * Add to per driver list
2961 dnp
= &devnamesp
[major
];
2962 add_to_dn_list(dnp
, dip
);
2966 unlink_from_driver_list(dev_info_t
*dip
)
2968 major_t major
= DEVI(dip
)->devi_major
;
2969 struct devnames
*dnp
;
2971 ASSERT(major
!= DDI_MAJOR_T_NONE
);
2974 * Remove from per-driver list
2976 dnp
= &devnamesp
[major
];
2977 remove_from_dn_list(dnp
, dip
);
2980 * Add to orphan list
2982 if (ndi_dev_is_persistent_node(dip
)) {
2984 add_to_dn_list(dnp
, dip
);
2989 * scan the per-driver list looking for dev_info "dip"
2992 in_dn_list(struct devnames
*dnp
, dev_info_t
*dip
)
2994 struct dev_info
*idevi
;
2996 if ((idevi
= DEVI(dnp
->dn_head
)) == NULL
)
3000 if (idevi
== DEVI(dip
))
3002 idevi
= idevi
->devi_next
;
3008 * insert devinfo node 'dip' into the per-driver instance list
3011 * Nodes on the per-driver list are ordered: HW - SID - PSEUDO. The order is
3012 * required for merging of .conf file data to work properly.
3015 add_to_ordered_dn_list(struct devnames
*dnp
, dev_info_t
*dip
)
3019 ASSERT(mutex_owned(&(dnp
->dn_lock
)));
3021 dipp
= &dnp
->dn_head
;
3022 if (ndi_dev_is_prom_node(dip
)) {
3024 * Find the first non-prom node or end of list
3026 while (*dipp
&& (ndi_dev_is_prom_node(*dipp
) != 0)) {
3027 dipp
= (dev_info_t
**)&DEVI(*dipp
)->devi_next
;
3029 } else if (ndi_dev_is_persistent_node(dip
)) {
3031 * Find the first non-persistent node
3033 while (*dipp
&& (ndi_dev_is_persistent_node(*dipp
) != 0)) {
3034 dipp
= (dev_info_t
**)&DEVI(*dipp
)->devi_next
;
3038 * Find the end of the list
3041 dipp
= (dev_info_t
**)&DEVI(*dipp
)->devi_next
;
3045 DEVI(dip
)->devi_next
= DEVI(*dipp
);
3050 * add a list of device nodes to the device node list in the
3051 * devnames structure
3054 add_to_dn_list(struct devnames
*dnp
, dev_info_t
*dip
)
3057 * Look to see if node already exists
3059 LOCK_DEV_OPS(&(dnp
->dn_lock
));
3060 if (in_dn_list(dnp
, dip
)) {
3061 cmn_err(CE_NOTE
, "add_to_dn_list: node %s already in list",
3062 DEVI(dip
)->devi_node_name
);
3064 add_to_ordered_dn_list(dnp
, dip
);
3066 UNLOCK_DEV_OPS(&(dnp
->dn_lock
));
3070 remove_from_dn_list(struct devnames
*dnp
, dev_info_t
*dip
)
3074 LOCK_DEV_OPS(&(dnp
->dn_lock
));
3076 plist
= (dev_info_t
**)&dnp
->dn_head
;
3077 while (*plist
&& (*plist
!= dip
)) {
3078 plist
= (dev_info_t
**)&DEVI(*plist
)->devi_next
;
3081 if (*plist
!= NULL
) {
3082 ASSERT(*plist
== dip
);
3083 *plist
= (dev_info_t
*)(DEVI(dip
)->devi_next
);
3084 DEVI(dip
)->devi_next
= NULL
;
3086 NDI_CONFIG_DEBUG((CE_NOTE
,
3087 "remove_from_dn_list: node %s not found in list",
3088 DEVI(dip
)->devi_node_name
));
3091 UNLOCK_DEV_OPS(&(dnp
->dn_lock
));
3095 * Add and remove reference driver global property list
3098 add_global_props(dev_info_t
*dip
)
3100 struct devnames
*dnp
;
3101 ddi_prop_list_t
*plist
;
3103 ASSERT(DEVI(dip
)->devi_global_prop_list
== NULL
);
3104 ASSERT(DEVI(dip
)->devi_major
!= DDI_MAJOR_T_NONE
);
3106 dnp
= &devnamesp
[DEVI(dip
)->devi_major
];
3107 LOCK_DEV_OPS(&dnp
->dn_lock
);
3108 plist
= dnp
->dn_global_prop_ptr
;
3109 if (plist
== NULL
) {
3110 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
3113 i_ddi_prop_list_hold(plist
, dnp
);
3114 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
3116 mutex_enter(&DEVI(dip
)->devi_lock
);
3117 DEVI(dip
)->devi_global_prop_list
= plist
;
3118 mutex_exit(&DEVI(dip
)->devi_lock
);
3122 remove_global_props(dev_info_t
*dip
)
3124 ddi_prop_list_t
*proplist
;
3126 mutex_enter(&DEVI(dip
)->devi_lock
);
3127 proplist
= DEVI(dip
)->devi_global_prop_list
;
3128 DEVI(dip
)->devi_global_prop_list
= NULL
;
3129 mutex_exit(&DEVI(dip
)->devi_lock
);
3133 struct devnames
*dnp
;
3135 major
= ddi_driver_major(dip
);
3136 ASSERT(major
!= DDI_MAJOR_T_NONE
);
3137 dnp
= &devnamesp
[major
];
3138 LOCK_DEV_OPS(&dnp
->dn_lock
);
3139 i_ddi_prop_list_rele(proplist
, dnp
);
3140 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
3146 * Set this variable to '0' to disable the optimization,
3147 * and to 2 to print debug message.
3149 static int optimize_dtree
= 1;
3152 debug_dtree(dev_info_t
*devi
, struct dev_info
*adevi
, char *service
)
3154 char *adeviname
, *buf
;
3157 * Don't print unless optimize dtree is set to 2+
3159 if (optimize_dtree
<= 1)
3162 buf
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
3163 adeviname
= ddi_deviname((dev_info_t
*)adevi
, buf
);
3164 if (*adeviname
== '\0')
3167 cmn_err(CE_CONT
, "%s %s -> %s\n",
3168 ddi_deviname(devi
, buf
), service
, adeviname
);
3170 kmem_free(buf
, MAXNAMELEN
);
3173 #define debug_dtree(a1, a2, a3) /* nothing */
3177 ddi_optimize_dtree(dev_info_t
*devi
)
3179 struct dev_info
*pdevi
;
3182 pdevi
= DEVI(devi
)->devi_parent
;
3186 * Set the unoptimized values
3188 DEVI(devi
)->devi_bus_map_fault
= pdevi
;
3189 DEVI(devi
)->devi_bus_dma_allochdl
= pdevi
;
3190 DEVI(devi
)->devi_bus_dma_freehdl
= pdevi
;
3191 DEVI(devi
)->devi_bus_dma_bindhdl
= pdevi
;
3192 DEVI(devi
)->devi_bus_dma_bindfunc
=
3193 pdevi
->devi_ops
->devo_bus_ops
->bus_dma_bindhdl
;
3194 DEVI(devi
)->devi_bus_dma_unbindhdl
= pdevi
;
3195 DEVI(devi
)->devi_bus_dma_unbindfunc
=
3196 pdevi
->devi_ops
->devo_bus_ops
->bus_dma_unbindhdl
;
3197 DEVI(devi
)->devi_bus_dma_flush
= pdevi
;
3198 DEVI(devi
)->devi_bus_dma_win
= pdevi
;
3199 DEVI(devi
)->devi_bus_dma_ctl
= pdevi
;
3200 DEVI(devi
)->devi_bus_ctl
= pdevi
;
3203 if (optimize_dtree
== 0)
3207 b
= pdevi
->devi_ops
->devo_bus_ops
;
3209 if (i_ddi_map_fault
== b
->bus_map_fault
) {
3210 DEVI(devi
)->devi_bus_map_fault
= pdevi
->devi_bus_map_fault
;
3211 debug_dtree(devi
, DEVI(devi
)->devi_bus_map_fault
,
3215 if (ddi_dma_allochdl
== b
->bus_dma_allochdl
) {
3216 DEVI(devi
)->devi_bus_dma_allochdl
=
3217 pdevi
->devi_bus_dma_allochdl
;
3218 debug_dtree(devi
, DEVI(devi
)->devi_bus_dma_allochdl
,
3219 "bus_dma_allochdl");
3222 if (ddi_dma_freehdl
== b
->bus_dma_freehdl
) {
3223 DEVI(devi
)->devi_bus_dma_freehdl
= pdevi
->devi_bus_dma_freehdl
;
3224 debug_dtree(devi
, DEVI(devi
)->devi_bus_dma_freehdl
,
3228 if (ddi_dma_bindhdl
== b
->bus_dma_bindhdl
) {
3229 DEVI(devi
)->devi_bus_dma_bindhdl
= pdevi
->devi_bus_dma_bindhdl
;
3230 DEVI(devi
)->devi_bus_dma_bindfunc
=
3231 pdevi
->devi_bus_dma_bindhdl
->devi_ops
->
3232 devo_bus_ops
->bus_dma_bindhdl
;
3233 debug_dtree(devi
, DEVI(devi
)->devi_bus_dma_bindhdl
,
3237 if (ddi_dma_unbindhdl
== b
->bus_dma_unbindhdl
) {
3238 DEVI(devi
)->devi_bus_dma_unbindhdl
=
3239 pdevi
->devi_bus_dma_unbindhdl
;
3240 DEVI(devi
)->devi_bus_dma_unbindfunc
=
3241 pdevi
->devi_bus_dma_unbindhdl
->devi_ops
->
3242 devo_bus_ops
->bus_dma_unbindhdl
;
3243 debug_dtree(devi
, DEVI(devi
)->devi_bus_dma_unbindhdl
,
3244 "bus_dma_unbindhdl");
3247 if (ddi_dma_flush
== b
->bus_dma_flush
) {
3248 DEVI(devi
)->devi_bus_dma_flush
= pdevi
->devi_bus_dma_flush
;
3249 debug_dtree(devi
, DEVI(devi
)->devi_bus_dma_flush
,
3253 if (ddi_dma_win
== b
->bus_dma_win
) {
3254 DEVI(devi
)->devi_bus_dma_win
= pdevi
->devi_bus_dma_win
;
3255 debug_dtree(devi
, DEVI(devi
)->devi_bus_dma_win
,
3259 if (ddi_dma_mctl
== b
->bus_dma_ctl
) {
3260 DEVI(devi
)->devi_bus_dma_ctl
= pdevi
->devi_bus_dma_ctl
;
3261 debug_dtree(devi
, DEVI(devi
)->devi_bus_dma_ctl
, "bus_dma_ctl");
3264 if (ddi_ctlops
== b
->bus_ctl
) {
3265 DEVI(devi
)->devi_bus_ctl
= pdevi
->devi_bus_ctl
;
3266 debug_dtree(devi
, DEVI(devi
)->devi_bus_ctl
, "bus_ctl");
3270 #define MIN_DEVINFO_LOG_SIZE max_ncpus
3271 #define MAX_DEVINFO_LOG_SIZE max_ncpus * 10
3276 devinfo_log_header_t
*dh
;
3277 int logsize
= devinfo_log_size
;
3280 logsize
= MIN_DEVINFO_LOG_SIZE
;
3281 else if (logsize
> MAX_DEVINFO_LOG_SIZE
)
3282 logsize
= MAX_DEVINFO_LOG_SIZE
;
3284 dh
= kmem_alloc(logsize
* PAGESIZE
, KM_SLEEP
);
3285 mutex_init(&dh
->dh_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3286 dh
->dh_max
= ((logsize
* PAGESIZE
) - sizeof (*dh
)) /
3287 sizeof (devinfo_audit_t
) + 1;
3291 devinfo_audit_log
= dh
;
3295 * Log the stack trace in per-devinfo audit structure and also enter
3296 * it into a system wide log for recording the time history.
3299 da_log_enter(dev_info_t
*dip
)
3301 devinfo_audit_t
*da_log
, *da
= DEVI(dip
)->devi_audit
;
3302 devinfo_log_header_t
*dh
= devinfo_audit_log
;
3304 if (devinfo_audit_log
== NULL
)
3309 da
->da_devinfo
= dip
;
3310 da
->da_timestamp
= gethrtime();
3311 da
->da_thread
= curthread
;
3312 da
->da_node_state
= DEVI(dip
)->devi_node_state
;
3313 da
->da_device_state
= DEVI(dip
)->devi_state
;
3314 da
->da_depth
= getpcstack(da
->da_stack
, DDI_STACK_DEPTH
);
3317 * Copy into common log and note the location for tracing history
3319 mutex_enter(&dh
->dh_lock
);
3322 if (dh
->dh_curr
>= dh
->dh_max
)
3323 dh
->dh_curr
-= dh
->dh_max
;
3324 da_log
= &dh
->dh_entry
[dh
->dh_curr
];
3325 mutex_exit(&dh
->dh_lock
);
3327 bcopy(da
, da_log
, sizeof (devinfo_audit_t
));
3328 da
->da_lastlog
= da_log
;
3335 for (i
= 0; i
< devcnt
; i
++) {
3336 struct devnames
*dnp
= &devnamesp
[i
];
3337 if ((dnp
->dn_flags
& DN_FORCE_ATTACH
) &&
3338 (ddi_hold_installed_driver((major_t
)i
) != NULL
))
3339 ddi_rele_driver((major_t
)i
);
3344 * Launch a thread to force attach drivers. This avoids penalty on boot time.
3347 i_ddi_forceattach_drivers()
3351 * Attach IB VHCI driver before the force-attach thread attaches the
3352 * IB HCA driver. IB HCA driver will fail if IB Nexus has not yet
3355 (void) ddi_hold_installed_driver(ddi_name_to_major("ib"));
3357 (void) thread_create(NULL
, 0, (void (*)())attach_drivers
, NULL
, 0, &p0
,
3358 TS_RUN
, minclsyspri
);
3362 * This is a private DDI interface for optimizing boot performance.
3363 * I/O subsystem initialization is considered complete when devfsadm
3366 * NOTE: The start of syseventd happens to be a convenient indicator
3367 * of the completion of I/O initialization during boot.
3368 * The implementation should be replaced by something more robust.
3371 i_ddi_io_initialized()
3373 extern int sysevent_daemon_init
;
3374 return (sysevent_daemon_init
);
3378 * May be used to determine system boot state
3379 * "Available" means the system is for the most part up
3380 * and initialized, with all system services either up or
3381 * capable of being started. This state is set by devfsadm
3382 * during the boot process. The /dev filesystem infers
3383 * from this when implicit reconfig can be performed,
3384 * ie, devfsadm can be invoked. Please avoid making
3385 * further use of this unless it's really necessary.
3390 return (devname_state
& DS_SYSAVAIL
);
3394 * May be used to determine if boot is a reconfigure boot.
3399 return (devname_state
& DS_RECONFIG
);
3403 * Note system services are up, inform /dev.
3406 i_ddi_set_sysavail()
3408 if ((devname_state
& DS_SYSAVAIL
) == 0) {
3409 devname_state
|= DS_SYSAVAIL
;
3410 sdev_devstate_change();
3415 * Note reconfiguration boot, inform /dev.
3418 i_ddi_set_reconfig()
3420 if ((devname_state
& DS_RECONFIG
) == 0) {
3421 devname_state
|= DS_RECONFIG
;
3422 sdev_devstate_change();
3428 * device tree walking
3432 struct walk_elem
*next
;
3437 free_list(struct walk_elem
*list
)
3440 struct walk_elem
*next
= list
->next
;
3441 kmem_free(list
, sizeof (*list
));
3447 append_node(struct walk_elem
**list
, dev_info_t
*dip
)
3449 struct walk_elem
*tail
;
3450 struct walk_elem
*elem
= kmem_alloc(sizeof (*elem
), KM_SLEEP
);
3455 if (*list
== NULL
) {
3468 * The implementation of ddi_walk_devs().
3471 walk_devs(dev_info_t
*dip
, int (*f
)(dev_info_t
*, void *), void *arg
,
3474 struct walk_elem
*head
= NULL
;
3477 * Do it in two passes. First pass invoke callback on each
3478 * dip on the sibling list. Second pass invoke callback on
3479 * children of each dip.
3482 switch ((*f
)(dip
, arg
)) {
3483 case DDI_WALK_TERMINATE
:
3485 return (DDI_WALK_TERMINATE
);
3487 case DDI_WALK_PRUNESIB
:
3488 /* ignore sibling by setting dip to NULL */
3489 append_node(&head
, dip
);
3493 case DDI_WALK_PRUNECHILD
:
3494 /* don't worry about children */
3495 dip
= ddi_get_next_sibling(dip
);
3498 case DDI_WALK_CONTINUE
:
3500 append_node(&head
, dip
);
3501 dip
= ddi_get_next_sibling(dip
);
3510 struct walk_elem
*next
= head
->next
;
3513 ndi_devi_enter(head
->dip
, &circ
);
3514 if (walk_devs(ddi_get_child(head
->dip
), f
, arg
, do_locking
) ==
3515 DDI_WALK_TERMINATE
) {
3517 ndi_devi_exit(head
->dip
, circ
);
3519 return (DDI_WALK_TERMINATE
);
3522 ndi_devi_exit(head
->dip
, circ
);
3523 kmem_free(head
, sizeof (*head
));
3527 return (DDI_WALK_CONTINUE
);
3531 * This general-purpose routine traverses the tree of dev_info nodes,
3532 * starting from the given node, and calls the given function for each
3533 * node that it finds with the current node and the pointer arg (which
3534 * can point to a structure of information that the function
3535 * needs) as arguments.
3537 * It does the walk a layer at a time, not depth-first. The given function
3538 * must return one of the following values:
3541 * DDI_WALK_PRUNECHILD
3542 * DDI_WALK_TERMINATE
3544 * N.B. Since we walk the sibling list, the caller must ensure that
3545 * the parent of dip is held against changes, unless the parent
3546 * is rootnode. ndi_devi_enter() on the parent is sufficient.
3548 * To avoid deadlock situations, caller must not attempt to
3549 * configure/unconfigure/remove device node in (*f)(), nor should
3550 * it attempt to recurse on other nodes in the system. Any
3551 * ndi_devi_enter() done by (*f)() must occur 'at-or-below' the
3552 * node entered prior to ddi_walk_devs(). Furthermore, if (*f)()
3553 * does any multi-threading (in framework *or* in driver) then the
3554 * ndi_devi_enter() calls done by dependent threads must be
3557 * This is not callable from device autoconfiguration routines.
3558 * They include, but not limited to, _init(9e), _fini(9e), probe(9e),
3559 * attach(9e), and detach(9e).
3563 ddi_walk_devs(dev_info_t
*dip
, int (*f
)(dev_info_t
*, void *), void *arg
)
3566 ASSERT(dip
== NULL
|| ddi_get_parent(dip
) == NULL
||
3567 DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
3569 (void) walk_devs(dip
, f
, arg
, 1);
3573 * This is a general-purpose routine traverses the per-driver list
3574 * and calls the given function for each node. must return one of
3575 * the following values:
3577 * DDI_WALK_TERMINATE
3579 * N.B. The same restrictions from ddi_walk_devs() apply.
3583 e_ddi_walk_driver(char *drv
, int (*f
)(dev_info_t
*, void *), void *arg
)
3586 struct devnames
*dnp
;
3589 major
= ddi_name_to_major(drv
);
3590 if (major
== DDI_MAJOR_T_NONE
)
3593 dnp
= &devnamesp
[major
];
3594 LOCK_DEV_OPS(&dnp
->dn_lock
);
3598 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
3599 if ((*f
)(dip
, arg
) == DDI_WALK_TERMINATE
) {
3603 LOCK_DEV_OPS(&dnp
->dn_lock
);
3605 dip
= ddi_get_next(dip
);
3607 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
3611 * argument to i_find_devi, a devinfo node search callback function.
3614 dev_info_t
*dip
; /* result */
3615 char *nodename
; /* if non-null, nodename must match */
3616 int instance
; /* if != -1, instance must match */
3617 int attached
; /* if != 0, i_ddi_devi_attached() */
3621 i_find_devi(dev_info_t
*dip
, void *arg
)
3623 struct match_info
*info
= (struct match_info
*)arg
;
3625 if (((info
->nodename
== NULL
) ||
3626 (strcmp(ddi_node_name(dip
), info
->nodename
) == 0)) &&
3627 ((info
->instance
== -1) ||
3628 (ddi_get_instance(dip
) == info
->instance
)) &&
3629 ((info
->attached
== 0) || i_ddi_devi_attached(dip
))) {
3632 return (DDI_WALK_TERMINATE
);
3635 return (DDI_WALK_CONTINUE
);
3639 * Find dip with a known node name and instance and return with it held
3642 ddi_find_devinfo(char *nodename
, int instance
, int attached
)
3644 struct match_info info
;
3646 info
.nodename
= nodename
;
3647 info
.instance
= instance
;
3648 info
.attached
= attached
;
3651 ddi_walk_devs(ddi_root_node(), i_find_devi
, &info
);
3655 extern ib_boot_prop_t
*iscsiboot_prop
;
3657 i_ddi_parse_iscsi_name(char *name
, char **nodename
, char **addrname
,
3661 static char nulladdrname
[] = "";
3663 /* default values */
3667 *addrname
= nulladdrname
;
3672 while (*cp
!= '\0') {
3673 if (addrname
&& *cp
== '@') {
3676 } else if (minorname
&& *cp
== ':') {
3677 *minorname
= cp
+ 1;
3682 if (colon
!= name
) {
3688 * Parse for name, addr, and minor names. Some args may be NULL.
3691 i_ddi_parse_name(char *name
, char **nodename
, char **addrname
, char **minorname
)
3694 static char nulladdrname
[] = "";
3696 /* default values */
3700 *addrname
= nulladdrname
;
3705 while (*cp
!= '\0') {
3706 if (addrname
&& *cp
== '@') {
3709 } else if (minorname
&& *cp
== ':') {
3710 *minorname
= cp
+ 1;
3718 child_path_to_driver(dev_info_t
*parent
, char *child_name
, char *unit_address
)
3720 char *p
, *drvname
= NULL
;
3724 * Construct the pathname and ask the implementation
3725 * if it can do a driver = f(pathname) for us, if not
3726 * we'll just default to using the node-name that
3727 * was given to us. We want to do this first to
3728 * allow the platform to use 'generic' names for
3729 * legacy device drivers.
3731 p
= kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
3732 (void) ddi_pathname(parent
, p
);
3733 (void) strcat(p
, "/");
3734 (void) strcat(p
, child_name
);
3735 if (unit_address
&& *unit_address
) {
3736 (void) strcat(p
, "@");
3737 (void) strcat(p
, unit_address
);
3741 * Get the binding. If there is none, return the child_name
3742 * and let the caller deal with it.
3744 maj
= path_to_major(p
);
3746 kmem_free(p
, MAXPATHLEN
);
3748 if (maj
!= DDI_MAJOR_T_NONE
)
3749 drvname
= ddi_major_to_name(maj
);
3750 if (drvname
== NULL
)
3751 drvname
= child_name
;
3757 #define PCI_EX_CLASS "pciexclass"
3758 #define PCI_EX "pciex"
3759 #define PCI_CLASS "pciclass"
3763 ddi_is_pci_dip(dev_info_t
*dip
)
3767 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
3768 "compatible", &prop
) == DDI_PROP_SUCCESS
) {
3770 if (strncmp(prop
, PCI_EX_CLASS
, sizeof (PCI_EX_CLASS
) - 1)
3772 strncmp(prop
, PCI_EX
, sizeof (PCI_EX
)- 1)
3774 strncmp(prop
, PCI_CLASS
, sizeof (PCI_CLASS
) - 1)
3776 strncmp(prop
, PCI
, sizeof (PCI
) - 1)
3778 ddi_prop_free(prop
);
3784 ddi_prop_free(prop
);
3791 * Given the pathname of a device, fill in the dev_info_t value and/or the
3792 * dev_t value and/or the spectype, depending on which parameters are non-NULL.
3793 * If there is an error, this function returns -1.
3795 * NOTE: If this function returns the dev_info_t structure, then it
3796 * does so with a hold on the devi. Caller should ensure that they get
3797 * decremented via ddi_release_devi() or ndi_rele_devi();
3799 * This function can be invoked in the boot case for a pathname without
3800 * device argument (:xxxx), traditionally treated as a minor name.
3801 * In this case, we do the following
3802 * (1) search the minor node of type DDM_DEFAULT.
3803 * (2) if no DDM_DEFAULT minor exists, then the first non-alias minor is chosen.
3804 * (3) if neither exists, a dev_t is faked with minor number = instance.
3805 * As of S9 FCS, no instance of #1 exists. #2 is used by several platforms
3806 * to default the boot partition to :a possibly by other OBP definitions.
3807 * #3 is used for booting off network interfaces, most SPARC network
3808 * drivers support Style-2 only, so only DDM_ALIAS minor exists.
3810 * It is possible for OBP to present device args at the end of the path as
3811 * well as in the middle. For example, with IB the following strings are
3813 * a /pci@8,700000/ib@1,2:port=1,pkey=ff,dhcp,...
3814 * b /pci@8,700000/ib@1,1:port=1/ioc@xxxxxx,yyyyyyy:dhcp
3815 * Case (a), we first look for minor node "port=1,pkey...".
3816 * Failing that, we will pass "port=1,pkey..." to the bus_config
3817 * entry point of ib (HCA) driver.
3818 * Case (b), configure ib@1,1 as usual. Then invoke ib's bus_config
3819 * with argument "ioc@xxxxxxx,yyyyyyy:port=1". After configuring
3820 * the ioc, look for minor node dhcp. If not found, pass ":dhcp"
3821 * to ioc's bus_config entry point.
3824 resolve_pathname(char *pathname
,
3825 dev_info_t
**dipp
, dev_t
*devtp
, int *spectypep
)
3828 dev_info_t
*parent
, *child
;
3830 char *component
, *config_name
;
3831 char *minorname
= NULL
;
3832 char *prev_minor
= NULL
;
3835 struct ddi_minor_data
*dmn
;
3838 if (*pathname
!= '/')
3840 parent
= ddi_root_node(); /* Begin at the top of the tree */
3842 if (error
= pn_get(pathname
, UIO_SYSSPACE
, &pn
))
3846 ASSERT(i_ddi_devi_attached(parent
));
3847 ndi_hold_devi(parent
);
3849 component
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
3850 config_name
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
3852 while (pn_pathleft(&pn
)) {
3853 /* remember prev minor (:xxx) in the middle of path */
3855 prev_minor
= i_ddi_strdup(minorname
, KM_SLEEP
);
3857 /* Get component and chop off minorname */
3858 (void) pn_getcomponent(&pn
, component
);
3859 if ((iscsiboot_prop
!= NULL
) &&
3860 (strcmp((DEVI(parent
)->devi_node_name
), "iscsi") == 0)) {
3861 i_ddi_parse_iscsi_name(component
, NULL
, NULL
,
3864 i_ddi_parse_name(component
, NULL
, NULL
, &minorname
);
3866 if (prev_minor
== NULL
) {
3867 (void) snprintf(config_name
, MAXNAMELEN
, "%s",
3870 (void) snprintf(config_name
, MAXNAMELEN
, "%s:%s",
3871 component
, prev_minor
);
3872 kmem_free(prev_minor
, strlen(prev_minor
) + 1);
3877 * Find and configure the child
3879 if (ndi_devi_config_one(parent
, config_name
, &child
,
3880 NDI_PROMNAME
| NDI_NO_EVENT
) != NDI_SUCCESS
) {
3881 ndi_rele_devi(parent
);
3883 kmem_free(component
, MAXNAMELEN
);
3884 kmem_free(config_name
, MAXNAMELEN
);
3888 ASSERT(i_ddi_devi_attached(child
));
3889 ndi_rele_devi(parent
);
3895 * First look for a minor node matching minorname.
3896 * Failing that, try to pass minorname to bus_config().
3898 if (minorname
&& i_ddi_minorname_to_devtspectype(parent
,
3899 minorname
, &devt
, &spectype
) == DDI_FAILURE
) {
3900 (void) snprintf(config_name
, MAXNAMELEN
, "%s", minorname
);
3901 if (ndi_devi_config_obp_args(parent
,
3902 config_name
, &child
, 0) != NDI_SUCCESS
) {
3903 ndi_rele_devi(parent
);
3905 kmem_free(component
, MAXNAMELEN
);
3906 kmem_free(config_name
, MAXNAMELEN
);
3907 NDI_CONFIG_DEBUG((CE_NOTE
,
3908 "%s: minor node not found\n", pathname
));
3911 minorname
= NULL
; /* look for default minor */
3912 ASSERT(i_ddi_devi_attached(child
));
3913 ndi_rele_devi(parent
);
3917 if (devtp
|| spectypep
) {
3918 if (minorname
== NULL
) {
3920 * Search for a default entry with an active
3921 * ndi_devi_enter to protect the devi_minor list.
3923 ndi_devi_enter(parent
, &circ
);
3924 for (dmn
= DEVI(parent
)->devi_minor
; dmn
;
3926 if (dmn
->type
== DDM_DEFAULT
) {
3927 devt
= dmn
->ddm_dev
;
3928 spectype
= dmn
->ddm_spec_type
;
3933 if (devt
== NODEV
) {
3935 * No default minor node, try the first one;
3936 * else, assume 1-1 instance-minor mapping
3938 dmn
= DEVI(parent
)->devi_minor
;
3939 if (dmn
&& ((dmn
->type
== DDM_MINOR
) ||
3940 (dmn
->type
== DDM_INTERNAL_PATH
))) {
3941 devt
= dmn
->ddm_dev
;
3942 spectype
= dmn
->ddm_spec_type
;
3945 DEVI(parent
)->devi_major
,
3946 ddi_get_instance(parent
));
3950 ndi_devi_exit(parent
, circ
);
3955 *spectypep
= spectype
;
3959 kmem_free(component
, MAXNAMELEN
);
3960 kmem_free(config_name
, MAXNAMELEN
);
3963 * If there is no error, return the appropriate parameters
3969 * We should really keep the ref count to keep the node from
3970 * detaching but ddi_pathname_to_dev_t() specifies a NULL dipp,
3971 * so we have no way of passing back the held dip. Not holding
3972 * the dip allows detaches to occur - which can cause problems
3973 * for subsystems which call ddi_pathname_to_dev_t (console).
3975 * Instead of holding the dip, we place a ddi-no-autodetach
3976 * property on the node to prevent auto detaching.
3978 * The right fix is to remove ddi_pathname_to_dev_t and replace
3979 * it, and all references, with a call that specifies a dipp.
3980 * In addition, the callers of this new interfaces would then
3981 * need to call ndi_rele_devi when the reference is complete.
3984 (void) ddi_prop_update_int(DDI_DEV_T_NONE
, parent
,
3985 DDI_NO_AUTODETACH
, 1);
3986 ndi_rele_devi(parent
);
3993 * Given the pathname of a device, return the dev_t of the corresponding
3994 * device. Returns NODEV on failure.
3996 * Note that this call sets the DDI_NO_AUTODETACH property on the devinfo node.
3999 ddi_pathname_to_dev_t(char *pathname
)
4004 error
= resolve_pathname(pathname
, NULL
, &devt
, NULL
);
4006 return (error
? NODEV
: devt
);
4010 * Translate a prom pathname to kernel devfs pathname.
4011 * Caller is assumed to allocate devfspath memory of
4012 * size at least MAXPATHLEN
4014 * The prom pathname may not include minor name, but
4015 * devfs pathname has a minor name portion.
4018 i_ddi_prompath_to_devfspath(char *prompath
, char *devfspath
)
4020 dev_t devt
= (dev_t
)NODEV
;
4021 dev_info_t
*dip
= NULL
;
4022 char *minor_name
= NULL
;
4027 error
= resolve_pathname(prompath
, &dip
, &devt
, &spectype
);
4029 return (DDI_FAILURE
);
4030 ASSERT(dip
&& devt
!= NODEV
);
4033 * Get in-kernel devfs pathname
4035 (void) ddi_pathname(dip
, devfspath
);
4037 ndi_devi_enter(dip
, &circ
);
4038 minor_name
= i_ddi_devtspectype_to_minorname(dip
, devt
, spectype
);
4040 (void) strcat(devfspath
, ":");
4041 (void) strcat(devfspath
, minor_name
);
4044 * If minor_name is NULL, we have an alias minor node.
4045 * So manufacture a path to the corresponding clone minor.
4047 (void) snprintf(devfspath
, MAXPATHLEN
, "%s:%s",
4048 CLONE_PATH
, ddi_driver_name(dip
));
4050 ndi_devi_exit(dip
, circ
);
4052 /* release hold from resolve_pathname() */
4058 * This function is intended to identify drivers that must quiesce for fast
4059 * reboot to succeed. It does not claim to have more knowledge about the device
4060 * than its driver. If a driver has implemented quiesce(), it will be invoked;
4061 * if a so identified driver does not manage any device that needs to be
4062 * quiesced, it must explicitly set its devo_quiesce dev_op to
4063 * ddi_quiesce_not_needed.
4065 static int skip_pseudo
= 1; /* Skip pseudo devices */
4066 static int skip_non_hw
= 1; /* Skip devices with no hardware property */
4068 should_implement_quiesce(dev_info_t
*dip
)
4070 struct dev_info
*devi
= DEVI(dip
);
4074 * If dip is pseudo and skip_pseudo is set, driver doesn't have to
4075 * implement quiesce().
4078 strncmp(ddi_binding_name(dip
), "pseudo", sizeof ("pseudo")) == 0)
4082 * If parent dip is pseudo and skip_pseudo is set, driver doesn't have
4083 * to implement quiesce().
4085 if (skip_pseudo
&& (pdip
= ddi_get_parent(dip
)) != NULL
&&
4086 strncmp(ddi_binding_name(pdip
), "pseudo", sizeof ("pseudo")) == 0)
4090 * If not attached, driver doesn't have to implement quiesce().
4092 if (!i_ddi_devi_attached(dip
))
4096 * If dip has no hardware property and skip_non_hw is set,
4097 * driver doesn't have to implement quiesce().
4099 if (skip_non_hw
&& devi
->devi_hw_prop_ptr
== NULL
)
4106 driver_has_quiesce(struct dev_ops
*ops
)
4108 if ((ops
->devo_rev
>= 4) && (ops
->devo_quiesce
!= nodev
) &&
4109 (ops
->devo_quiesce
!= NULL
) && (ops
->devo_quiesce
!= nulldev
) &&
4110 (ops
->devo_quiesce
!= ddi_quiesce_not_supported
))
4117 * Check to see if a driver has implemented the quiesce() DDI function.
4120 check_driver_quiesce(dev_info_t
*dip
, void *arg
)
4122 struct dev_ops
*ops
;
4124 if (!should_implement_quiesce(dip
))
4125 return (DDI_WALK_CONTINUE
);
4127 if ((ops
= ddi_get_driver(dip
)) == NULL
)
4128 return (DDI_WALK_CONTINUE
);
4130 if (driver_has_quiesce(ops
)) {
4131 if ((quiesce_debug
& 0x2) == 0x2) {
4132 if (ops
->devo_quiesce
== ddi_quiesce_not_needed
)
4133 cmn_err(CE_CONT
, "%s does not need to be "
4134 "quiesced", ddi_driver_name(dip
));
4136 cmn_err(CE_CONT
, "%s has quiesce routine",
4137 ddi_driver_name(dip
));
4142 cmn_err(CE_WARN
, "%s has no quiesce()", ddi_driver_name(dip
));
4145 return (DDI_WALK_CONTINUE
);
4152 quiesce_one_device(dev_info_t
*dip
, void *arg
)
4154 struct dev_ops
*ops
;
4155 int should_quiesce
= 0;
4158 * If the device is not attached it doesn't need to be quiesced.
4160 if (!i_ddi_devi_attached(dip
))
4163 if ((ops
= ddi_get_driver(dip
)) == NULL
)
4166 should_quiesce
= should_implement_quiesce(dip
);
4169 * If there's an implementation of quiesce(), always call it even if
4170 * some of the drivers don't have quiesce() or quiesce() have failed
4171 * so we can do force fast reboot. The implementation of quiesce()
4172 * should not negatively affect a regular reboot.
4174 if (driver_has_quiesce(ops
)) {
4175 int rc
= DDI_SUCCESS
;
4177 if (ops
->devo_quiesce
== ddi_quiesce_not_needed
)
4180 rc
= devi_quiesce(dip
);
4182 if (rc
!= DDI_SUCCESS
&& should_quiesce
) {
4184 cmn_err(CE_WARN
, "quiesce() failed for %s%d",
4185 ddi_driver_name(dip
), ddi_get_instance(dip
));
4190 } else if (should_quiesce
&& arg
!= NULL
) {
4196 * Traverse the dev info tree in a breadth-first manner so that we quiesce
4197 * children first. All subtrees under the parent of dip will be quiesced.
4200 quiesce_devices(dev_info_t
*dip
, void *arg
)
4203 * if we're reached here, the device tree better not be changing.
4204 * so either devinfo_freeze better be set or we better be panicing.
4206 ASSERT(devinfo_freeze
|| panicstr
);
4208 for (; dip
!= NULL
; dip
= ddi_get_next_sibling(dip
)) {
4209 quiesce_devices(ddi_get_child(dip
), arg
);
4211 quiesce_one_device(dip
, arg
);
4216 * Reset all the pure leaf drivers on the system at halt time
4219 reset_leaf_device(dev_info_t
*dip
, void *arg
)
4221 _NOTE(ARGUNUSED(arg
))
4222 struct dev_ops
*ops
;
4224 /* if the device doesn't need to be reset then there's nothing to do */
4225 if (!DEVI_NEED_RESET(dip
))
4226 return (DDI_WALK_CONTINUE
);
4229 * if the device isn't a char/block device or doesn't have a
4230 * reset entry point then there's nothing to do.
4232 ops
= ddi_get_driver(dip
);
4233 if ((ops
== NULL
) || (ops
->devo_cb_ops
== NULL
) ||
4234 (ops
->devo_reset
== nodev
) || (ops
->devo_reset
== nulldev
) ||
4235 (ops
->devo_reset
== NULL
))
4236 return (DDI_WALK_CONTINUE
);
4238 if (DEVI_IS_ATTACHING(dip
) || DEVI_IS_DETACHING(dip
)) {
4239 static char path
[MAXPATHLEN
];
4242 * bad news, this device has blocked in it's attach or
4243 * detach routine, which means it not safe to call it's
4244 * devo_reset() entry point.
4246 cmn_err(CE_WARN
, "unable to reset device: %s",
4247 ddi_pathname(dip
, path
));
4248 return (DDI_WALK_CONTINUE
);
4251 NDI_CONFIG_DEBUG((CE_NOTE
, "resetting %s%d\n",
4252 ddi_driver_name(dip
), ddi_get_instance(dip
)));
4254 (void) devi_reset(dip
, DDI_RESET_FORCE
);
4255 return (DDI_WALK_CONTINUE
);
4262 * if we're reached here, the device tree better not be changing.
4263 * so either devinfo_freeze better be set or we better be panicing.
4265 ASSERT(devinfo_freeze
|| panicstr
);
4267 (void) walk_devs(top_devinfo
, reset_leaf_device
, NULL
, 0);
4272 * devtree_freeze() must be called before quiesce_devices() and reset_leaves()
4273 * during a normal system shutdown. It attempts to ensure that there are no
4274 * outstanding attach or detach operations in progress when quiesce_devices() or
4275 * reset_leaves()is invoked. It must be called before the system becomes
4276 * single-threaded because device attach and detach are multi-threaded
4277 * operations. (note that during system shutdown the system doesn't actually
4278 * become single-thread since other threads still exist, but the shutdown thread
4279 * will disable preemption for itself, raise it's pil, and stop all the other
4280 * cpus in the system there by effectively making the system single-threaded.)
4283 devtree_freeze(void)
4287 /* if we're panicing then the device tree isn't going to be changing */
4291 /* stop all dev_info state changes in the device tree */
4292 devinfo_freeze
= gethrtime();
4295 * if we're not panicing and there are on-going attach or detach
4296 * operations, wait for up to 3 seconds for them to finish. This
4297 * is a randomly chosen interval but this should be ok because:
4298 * - 3 seconds is very small relative to the deadman timer.
4299 * - normal attach and detach operations should be very quick.
4300 * - attach and detach operations are fairly rare.
4302 while (!panicstr
&& atomic_add_long_nv(&devinfo_attach_detach
, 0) &&
4306 /* do a sleeping wait for one second */
4307 ASSERT(!servicing_interrupt());
4313 bind_dip(dev_info_t
*dip
, void *arg
)
4315 _NOTE(ARGUNUSED(arg
))
4317 major_t major
, pmajor
;
4320 * If the node is currently bound to the wrong driver, try to unbind
4321 * so that we can rebind to the correct driver.
4323 if (i_ddi_node_state(dip
) >= DS_BOUND
) {
4324 major
= ddi_compatible_driver_major(dip
, NULL
);
4325 if ((DEVI(dip
)->devi_major
== major
) &&
4326 (i_ddi_node_state(dip
) >= DS_INITIALIZED
)) {
4328 * Check for a path-oriented driver alias that
4329 * takes precedence over current driver binding.
4331 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
4332 (void) ddi_pathname(dip
, path
);
4333 pmajor
= ddi_name_to_major(path
);
4334 if (driver_active(pmajor
))
4336 kmem_free(path
, MAXPATHLEN
);
4339 /* attempt unbind if current driver is incorrect */
4340 if (driver_active(major
) &&
4341 (major
!= DEVI(dip
)->devi_major
))
4342 (void) ndi_devi_unbind_driver(dip
);
4345 /* If unbound, try to bind to a driver */
4346 if (i_ddi_node_state(dip
) < DS_BOUND
)
4347 (void) ndi_devi_bind_driver(dip
, 0);
4349 return (DDI_WALK_CONTINUE
);
4353 i_ddi_bind_devs(void)
4355 /* flush devfs so that ndi_devi_unbind_driver will work when possible */
4356 (void) devfs_clean(top_devinfo
, NULL
, 0);
4358 ddi_walk_devs(top_devinfo
, bind_dip
, NULL
);
4361 /* callback data for unbind_children_by_alias() */
4362 typedef struct unbind_data
{
4370 * A utility function provided for testing and support convenience
4371 * Called for each device during an upgrade_drv -d bound to the alias
4372 * that cannot be unbound due to device in use.
4375 unbind_alias_dev_in_use(dev_info_t
*dip
, char *alias
)
4377 if (moddebug
& MODDEBUG_BINDING
) {
4378 cmn_err(CE_CONT
, "%s%d: state %d: bound to %s\n",
4379 ddi_driver_name(dip
), ddi_get_instance(dip
),
4380 i_ddi_node_state(dip
), alias
);
4385 * walkdevs callback for unbind devices bound to specific driver
4386 * and alias. Invoked within the context of update_drv -d <alias>.
4389 unbind_children_by_alias(dev_info_t
*dip
, void *arg
)
4394 unbind_data_t
*ub
= (unbind_data_t
*)(uintptr_t)arg
;
4398 * We are called from update_drv to try to unbind a specific
4399 * set of aliases for a driver. Unbind what persistent nodes
4400 * we can, and return the number of nodes which cannot be unbound.
4401 * If not all nodes can be unbound, update_drv leaves the
4402 * state of the driver binding files unchanged, except in
4405 ndi_devi_enter(dip
, &circ
);
4406 for (cdip
= ddi_get_child(dip
); cdip
; cdip
= next
) {
4407 next
= ddi_get_next_sibling(cdip
);
4408 if ((ddi_driver_major(cdip
) != ub
->drv_major
) ||
4409 (strcmp(DEVI(cdip
)->devi_node_name
, ub
->drv_alias
) != 0))
4411 if (i_ddi_node_state(cdip
) >= DS_BOUND
) {
4412 rv
= ndi_devi_unbind_driver(cdip
);
4413 if (rv
!= DDI_SUCCESS
||
4414 (i_ddi_node_state(cdip
) >= DS_BOUND
)) {
4415 unbind_alias_dev_in_use(cdip
, ub
->drv_alias
);
4419 if (ndi_dev_is_persistent_node(cdip
) == 0)
4420 (void) ddi_remove_child(cdip
, 0);
4423 ndi_devi_exit(dip
, circ
);
4425 return (DDI_WALK_CONTINUE
);
4429 * Unbind devices by driver & alias
4430 * Context: update_drv [-f] -d -i <alias> <driver>
4433 i_ddi_unbind_devs_by_alias(major_t major
, char *alias
)
4438 ub
= kmem_zalloc(sizeof (*ub
), KM_SLEEP
);
4439 ub
->drv_major
= major
;
4440 ub
->drv_alias
= alias
;
4441 ub
->ndevs_bound
= 0;
4442 ub
->unbind_errors
= 0;
4444 /* flush devfs so that ndi_devi_unbind_driver will work when possible */
4445 (void) devfs_clean(top_devinfo
, NULL
, 0);
4446 ddi_walk_devs(top_devinfo
, unbind_children_by_alias
,
4447 (void *)(uintptr_t)ub
);
4449 /* return the number of devices remaining bound to the alias */
4450 rv
= ub
->ndevs_bound
+ ub
->unbind_errors
;
4451 kmem_free(ub
, sizeof (*ub
));
4456 * walkdevs callback for unbind devices by driver
4459 unbind_children_by_driver(dev_info_t
*dip
, void *arg
)
4464 major_t major
= (major_t
)(uintptr_t)arg
;
4468 * We are called either from rem_drv or update_drv when reloading
4469 * a driver.conf file. In either case, we unbind persistent nodes
4470 * and destroy .conf nodes. In the case of rem_drv, this will be
4471 * the final state. In the case of update_drv, i_ddi_bind_devs()
4472 * may be invoked later to re-enumerate (new) driver.conf rebind
4475 ndi_devi_enter(dip
, &circ
);
4476 for (cdip
= ddi_get_child(dip
); cdip
; cdip
= next
) {
4477 next
= ddi_get_next_sibling(cdip
);
4478 if (ddi_driver_major(cdip
) != major
)
4480 if (i_ddi_node_state(cdip
) >= DS_BOUND
) {
4481 rv
= ndi_devi_unbind_driver(cdip
);
4482 if (rv
== DDI_FAILURE
||
4483 (i_ddi_node_state(cdip
) >= DS_BOUND
))
4485 if (ndi_dev_is_persistent_node(cdip
) == 0)
4486 (void) ddi_remove_child(cdip
, 0);
4489 ndi_devi_exit(dip
, circ
);
4491 return (DDI_WALK_CONTINUE
);
4495 * Unbind devices by driver
4496 * Context: rem_drv or unload driver.conf
4499 i_ddi_unbind_devs(major_t major
)
4501 /* flush devfs so that ndi_devi_unbind_driver will work when possible */
4502 (void) devfs_clean(top_devinfo
, NULL
, 0);
4503 ddi_walk_devs(top_devinfo
, unbind_children_by_driver
,
4504 (void *)(uintptr_t)major
);
4508 * I/O Hotplug control
4512 * create and attach a dev_info node from a .conf file spec
4515 init_spec_child(dev_info_t
*pdip
, struct hwc_spec
*specp
, uint_t flags
)
4517 _NOTE(ARGUNUSED(flags
))
4521 if (((node_name
= specp
->hwc_devi_name
) == NULL
) ||
4522 (ddi_name_to_major(node_name
) == DDI_MAJOR_T_NONE
)) {
4523 char *tmp
= node_name
;
4527 "init_spec_child: parent=%s, bad spec (%s)\n",
4528 ddi_node_name(pdip
), tmp
);
4532 dip
= i_ddi_alloc_node(pdip
, node_name
, (pnode_t
)DEVI_PSEUDO_NODEID
,
4533 -1, specp
->hwc_devi_sys_prop_ptr
, KM_SLEEP
);
4538 if (ddi_initchild(pdip
, dip
) != DDI_SUCCESS
)
4539 (void) ddi_remove_child(dip
, 0);
4543 * Lookup hwc specs from hash tables and make children from the spec
4544 * Because some .conf children are "merge" nodes, we also initialize
4545 * .conf children to merge properties onto hardware nodes.
4547 * The pdip must be held busy.
4550 i_ndi_make_spec_children(dev_info_t
*pdip
, uint_t flags
)
4552 extern struct hwc_spec
*hwc_get_child_spec(dev_info_t
*, major_t
);
4554 struct hwc_spec
*list
, *spec
;
4556 ndi_devi_enter(pdip
, &circ
);
4557 if (DEVI(pdip
)->devi_flags
& DEVI_MADE_CHILDREN
) {
4558 ndi_devi_exit(pdip
, circ
);
4559 return (DDI_SUCCESS
);
4562 list
= hwc_get_child_spec(pdip
, DDI_MAJOR_T_NONE
);
4563 for (spec
= list
; spec
!= NULL
; spec
= spec
->hwc_next
) {
4564 init_spec_child(pdip
, spec
, flags
);
4566 hwc_free_spec_list(list
);
4568 mutex_enter(&DEVI(pdip
)->devi_lock
);
4569 DEVI(pdip
)->devi_flags
|= DEVI_MADE_CHILDREN
;
4570 mutex_exit(&DEVI(pdip
)->devi_lock
);
4571 ndi_devi_exit(pdip
, circ
);
4572 return (DDI_SUCCESS
);
4576 * Run initchild on all child nodes such that instance assignment
4577 * for multiport network cards are contiguous.
4579 * The pdip must be held busy.
4582 i_ndi_init_hw_children(dev_info_t
*pdip
, uint_t flags
)
4586 ASSERT(DEVI(pdip
)->devi_flags
& DEVI_MADE_CHILDREN
);
4588 /* contiguous instance assignment */
4589 e_ddi_enter_instance();
4590 dip
= ddi_get_child(pdip
);
4592 if (ndi_dev_is_persistent_node(dip
))
4593 (void) i_ndi_config_node(dip
, DS_INITIALIZED
, flags
);
4594 dip
= ddi_get_next_sibling(dip
);
4596 e_ddi_exit_instance();
4600 * report device status
4603 i_ndi_devi_report_status_change(dev_info_t
*dip
, char *path
)
4607 if (!DEVI_NEED_REPORT(dip
) ||
4608 (i_ddi_node_state(dip
) < DS_INITIALIZED
) ||
4609 ndi_dev_is_hidden_node(dip
)) {
4613 /* Invalidate the devinfo snapshot cache */
4614 i_ddi_di_cache_invalidate();
4616 if (DEVI_IS_DEVICE_REMOVED(dip
)) {
4618 } else if (DEVI_IS_DEVICE_OFFLINE(dip
)) {
4620 } else if (DEVI_IS_DEVICE_DOWN(dip
)) {
4622 } else if (DEVI_IS_BUS_QUIESCED(dip
)) {
4623 status
= "quiesced";
4624 } else if (DEVI_IS_BUS_DOWN(dip
)) {
4626 } else if (i_ddi_devi_attached(dip
)) {
4633 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
4634 cmn_err(CE_CONT
, "?%s (%s%d) %s\n",
4635 ddi_pathname(dip
, path
), ddi_driver_name(dip
),
4636 ddi_get_instance(dip
), status
);
4637 kmem_free(path
, MAXPATHLEN
);
4639 cmn_err(CE_CONT
, "?%s (%s%d) %s\n",
4640 path
, ddi_driver_name(dip
),
4641 ddi_get_instance(dip
), status
);
4644 mutex_enter(&(DEVI(dip
)->devi_lock
));
4645 DEVI_REPORT_DONE(dip
);
4646 mutex_exit(&(DEVI(dip
)->devi_lock
));
4650 * log a notification that a dev_info node has been configured.
4653 i_log_devfs_add_devinfo(dev_info_t
*dip
, uint_t flags
)
4659 sysevent_value_t se_val
;
4660 sysevent_attr_list_t
*ev_attr_list
= NULL
;
4662 int no_transport
= 0;
4664 ASSERT(dip
&& ddi_get_parent(dip
) &&
4665 DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
4667 /* do not generate ESC_DEVFS_DEVI_ADD event during boot */
4668 if (!i_ddi_io_initialized())
4669 return (DDI_SUCCESS
);
4671 /* Invalidate the devinfo snapshot cache */
4672 i_ddi_di_cache_invalidate();
4674 ev
= sysevent_alloc(EC_DEVFS
, ESC_DEVFS_DEVI_ADD
, EP_DDI
, SE_SLEEP
);
4676 pathname
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
4678 (void) ddi_pathname(dip
, pathname
);
4679 ASSERT(strlen(pathname
));
4681 se_val
.value_type
= SE_DATA_TYPE_STRING
;
4682 se_val
.value
.sv_string
= pathname
;
4683 if (sysevent_add_attr(&ev_attr_list
, DEVFS_PATHNAME
,
4684 &se_val
, SE_SLEEP
) != 0) {
4688 /* add the device class attribute */
4689 if ((class_name
= i_ddi_devi_class(dip
)) != NULL
) {
4690 se_val
.value_type
= SE_DATA_TYPE_STRING
;
4691 se_val
.value
.sv_string
= class_name
;
4693 if (sysevent_add_attr(&ev_attr_list
,
4694 DEVFS_DEVI_CLASS
, &se_val
, SE_SLEEP
) != 0) {
4695 sysevent_free_attr(ev_attr_list
);
4701 * must log a branch event too unless NDI_BRANCH_EVENT_OP is set,
4702 * in which case the branch event will be logged by the caller
4703 * after the entire branch has been configured.
4705 if ((flags
& NDI_BRANCH_EVENT_OP
) == 0) {
4707 * Instead of logging a separate branch event just add
4708 * DEVFS_BRANCH_EVENT attribute. It indicates devfsadmd to
4709 * generate a EC_DEV_BRANCH event.
4711 se_val
.value_type
= SE_DATA_TYPE_INT32
;
4712 se_val
.value
.sv_int32
= 1;
4713 if (sysevent_add_attr(&ev_attr_list
,
4714 DEVFS_BRANCH_EVENT
, &se_val
, SE_SLEEP
) != 0) {
4715 sysevent_free_attr(ev_attr_list
);
4720 if (sysevent_attach_attributes(ev
, ev_attr_list
) != 0) {
4721 sysevent_free_attr(ev_attr_list
);
4725 if ((se_err
= log_sysevent(ev
, SE_SLEEP
, &eid
)) != 0) {
4726 if (se_err
== SE_NO_TRANSPORT
)
4732 kmem_free(pathname
, MAXPATHLEN
);
4734 return (DDI_SUCCESS
);
4737 cmn_err(CE_WARN
, "failed to log ESC_DEVFS_DEVI_ADD event for %s%s",
4738 pathname
, (no_transport
) ? " (syseventd not responding)" : "");
4740 cmn_err(CE_WARN
, "/dev may not be current for driver %s. "
4741 "Run devfsadm -i %s",
4742 ddi_driver_name(dip
), ddi_driver_name(dip
));
4745 kmem_free(pathname
, MAXPATHLEN
);
4746 return (DDI_SUCCESS
);
4750 * log a notification that a dev_info node has been unconfigured.
4753 i_log_devfs_remove_devinfo(char *pathname
, char *class_name
, char *driver_name
,
4754 int instance
, uint_t flags
)
4758 sysevent_value_t se_val
;
4759 sysevent_attr_list_t
*ev_attr_list
= NULL
;
4761 int no_transport
= 0;
4763 if (!i_ddi_io_initialized())
4764 return (DDI_SUCCESS
);
4766 /* Invalidate the devinfo snapshot cache */
4767 i_ddi_di_cache_invalidate();
4769 ev
= sysevent_alloc(EC_DEVFS
, ESC_DEVFS_DEVI_REMOVE
, EP_DDI
, SE_SLEEP
);
4771 se_val
.value_type
= SE_DATA_TYPE_STRING
;
4772 se_val
.value
.sv_string
= pathname
;
4773 if (sysevent_add_attr(&ev_attr_list
, DEVFS_PATHNAME
,
4774 &se_val
, SE_SLEEP
) != 0) {
4779 /* add the device class, driver name and instance attributes */
4781 se_val
.value_type
= SE_DATA_TYPE_STRING
;
4782 se_val
.value
.sv_string
= class_name
;
4783 if (sysevent_add_attr(&ev_attr_list
,
4784 DEVFS_DEVI_CLASS
, &se_val
, SE_SLEEP
) != 0) {
4785 sysevent_free_attr(ev_attr_list
);
4789 se_val
.value_type
= SE_DATA_TYPE_STRING
;
4790 se_val
.value
.sv_string
= driver_name
;
4791 if (sysevent_add_attr(&ev_attr_list
,
4792 DEVFS_DRIVER_NAME
, &se_val
, SE_SLEEP
) != 0) {
4793 sysevent_free_attr(ev_attr_list
);
4797 se_val
.value_type
= SE_DATA_TYPE_INT32
;
4798 se_val
.value
.sv_int32
= instance
;
4799 if (sysevent_add_attr(&ev_attr_list
,
4800 DEVFS_INSTANCE
, &se_val
, SE_SLEEP
) != 0) {
4801 sysevent_free_attr(ev_attr_list
);
4807 * must log a branch event too unless NDI_BRANCH_EVENT_OP is set,
4808 * in which case the branch event will be logged by the caller
4809 * after the entire branch has been unconfigured.
4811 if ((flags
& NDI_BRANCH_EVENT_OP
) == 0) {
4813 * Instead of logging a separate branch event just add
4814 * DEVFS_BRANCH_EVENT attribute. It indicates devfsadmd to
4815 * generate a EC_DEV_BRANCH event.
4817 se_val
.value_type
= SE_DATA_TYPE_INT32
;
4818 se_val
.value
.sv_int32
= 1;
4819 if (sysevent_add_attr(&ev_attr_list
,
4820 DEVFS_BRANCH_EVENT
, &se_val
, SE_SLEEP
) != 0) {
4821 sysevent_free_attr(ev_attr_list
);
4826 if (sysevent_attach_attributes(ev
, ev_attr_list
) != 0) {
4827 sysevent_free_attr(ev_attr_list
);
4831 if ((se_err
= log_sysevent(ev
, SE_SLEEP
, &eid
)) != 0) {
4832 if (se_err
== SE_NO_TRANSPORT
)
4838 return (DDI_SUCCESS
);
4842 cmn_err(CE_WARN
, "failed to log ESC_DEVFS_DEVI_REMOVE event for %s%s",
4843 pathname
, (no_transport
) ? " (syseventd not responding)" : "");
4844 return (DDI_SUCCESS
);
4848 i_ddi_log_devfs_device_remove(dev_info_t
*dip
)
4852 ASSERT(dip
&& ddi_get_parent(dip
) &&
4853 DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
4854 ASSERT(DEVI_IS_DEVICE_REMOVED(dip
));
4856 ASSERT(i_ddi_node_state(dip
) >= DS_INITIALIZED
);
4857 if (i_ddi_node_state(dip
) < DS_INITIALIZED
)
4860 /* Inform LDI_EV_DEVICE_REMOVE callbacks. */
4861 ldi_invoke_finalize(dip
, DDI_DEV_T_ANY
, 0, LDI_EV_DEVICE_REMOVE
,
4862 LDI_EV_SUCCESS
, NULL
);
4864 /* Generate EC_DEVFS_DEVI_REMOVE sysevent. */
4865 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
4866 (void) i_log_devfs_remove_devinfo(ddi_pathname(dip
, path
),
4867 i_ddi_devi_class(dip
), (char *)ddi_driver_name(dip
),
4868 ddi_get_instance(dip
), 0);
4869 kmem_free(path
, MAXPATHLEN
);
4873 i_ddi_log_devfs_device_insert(dev_info_t
*dip
)
4875 ASSERT(dip
&& ddi_get_parent(dip
) &&
4876 DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
4877 ASSERT(!DEVI_IS_DEVICE_REMOVED(dip
));
4879 (void) i_log_devfs_add_devinfo(dip
, 0);
4884 * log an event that a dev_info branch has been configured or unconfigured.
4887 i_log_devfs_branch(char *node_path
, char *subclass
)
4892 sysevent_value_t se_val
;
4893 sysevent_attr_list_t
*ev_attr_list
= NULL
;
4894 int no_transport
= 0;
4896 /* do not generate the event during boot */
4897 if (!i_ddi_io_initialized())
4898 return (DDI_SUCCESS
);
4900 /* Invalidate the devinfo snapshot cache */
4901 i_ddi_di_cache_invalidate();
4903 ev
= sysevent_alloc(EC_DEVFS
, subclass
, EP_DDI
, SE_SLEEP
);
4905 se_val
.value_type
= SE_DATA_TYPE_STRING
;
4906 se_val
.value
.sv_string
= node_path
;
4908 if (sysevent_add_attr(&ev_attr_list
, DEVFS_PATHNAME
,
4909 &se_val
, SE_SLEEP
) != 0) {
4913 if (sysevent_attach_attributes(ev
, ev_attr_list
) != 0) {
4914 sysevent_free_attr(ev_attr_list
);
4918 if ((se_err
= log_sysevent(ev
, SE_SLEEP
, &eid
)) != 0) {
4919 if (se_err
== SE_NO_TRANSPORT
)
4925 return (DDI_SUCCESS
);
4928 cmn_err(CE_WARN
, "failed to log %s branch event for %s%s",
4929 subclass
, node_path
,
4930 (no_transport
) ? " (syseventd not responding)" : "");
4933 return (DDI_FAILURE
);
4937 * log an event that a dev_info tree branch has been configured.
4940 i_log_devfs_branch_add(dev_info_t
*dip
)
4945 node_path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
4946 (void) ddi_pathname(dip
, node_path
);
4947 rv
= i_log_devfs_branch(node_path
, ESC_DEVFS_BRANCH_ADD
);
4948 kmem_free(node_path
, MAXPATHLEN
);
4954 * log an event that a dev_info tree branch has been unconfigured.
4957 i_log_devfs_branch_remove(char *node_path
)
4959 return (i_log_devfs_branch(node_path
, ESC_DEVFS_BRANCH_REMOVE
));
4963 * enqueue the dip's deviname on the branch event queue.
4965 static struct brevq_node
*
4966 brevq_enqueue(struct brevq_node
**brevqp
, dev_info_t
*dip
,
4967 struct brevq_node
*child
)
4969 struct brevq_node
*brn
;
4972 deviname
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
4973 (void) ddi_deviname(dip
, deviname
);
4975 brn
= kmem_zalloc(sizeof (*brn
), KM_SLEEP
);
4976 brn
->brn_deviname
= i_ddi_strdup(deviname
, KM_SLEEP
);
4977 kmem_free(deviname
, MAXNAMELEN
);
4978 brn
->brn_child
= child
;
4979 brn
->brn_sibling
= *brevqp
;
4986 * free the memory allocated for the elements on the branch event queue.
4989 free_brevq(struct brevq_node
*brevq
)
4991 struct brevq_node
*brn
, *next_brn
;
4993 for (brn
= brevq
; brn
!= NULL
; brn
= next_brn
) {
4994 next_brn
= brn
->brn_sibling
;
4995 ASSERT(brn
->brn_child
== NULL
);
4996 kmem_free(brn
->brn_deviname
, strlen(brn
->brn_deviname
) + 1);
4997 kmem_free(brn
, sizeof (*brn
));
5002 * log the events queued up on the branch event queue and free the
5003 * associated memory.
5005 * node_path must have been allocated with at least MAXPATHLEN bytes.
5008 log_and_free_brevq(char *node_path
, struct brevq_node
*brevq
)
5010 struct brevq_node
*brn
;
5013 p
= node_path
+ strlen(node_path
);
5014 for (brn
= brevq
; brn
!= NULL
; brn
= brn
->brn_sibling
) {
5015 (void) strcpy(p
, brn
->brn_deviname
);
5016 (void) i_log_devfs_branch_remove(node_path
);
5024 * log the events queued up on the branch event queue and free the
5025 * associated memory. Same as the previous function but operates on dip.
5028 log_and_free_brevq_dip(dev_info_t
*dip
, struct brevq_node
*brevq
)
5032 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
5033 (void) ddi_pathname(dip
, path
);
5034 log_and_free_brevq(path
, brevq
);
5035 kmem_free(path
, MAXPATHLEN
);
5039 * log the outstanding branch remove events for the grand children of the dip
5040 * and free the associated memory.
5043 log_and_free_br_events_on_grand_children(dev_info_t
*dip
,
5044 struct brevq_node
*brevq
)
5046 struct brevq_node
*brn
;
5050 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
5051 (void) ddi_pathname(dip
, path
);
5052 p
= path
+ strlen(path
);
5053 for (brn
= brevq
; brn
!= NULL
; brn
= brn
->brn_sibling
) {
5054 if (brn
->brn_child
) {
5055 (void) strcpy(p
, brn
->brn_deviname
);
5056 /* now path contains the node path to the dip's child */
5057 log_and_free_brevq(path
, brn
->brn_child
);
5058 brn
->brn_child
= NULL
;
5061 kmem_free(path
, MAXPATHLEN
);
5065 * log and cleanup branch remove events for the grand children of the dip.
5068 cleanup_br_events_on_grand_children(dev_info_t
*dip
, struct brevq_node
**brevqp
)
5071 struct brevq_node
*brevq
, *brn
, *prev_brn
, *next_brn
;
5075 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
5079 ndi_devi_enter(dip
, &circ
);
5080 for (brn
= brevq
; brn
!= NULL
; brn
= next_brn
) {
5081 next_brn
= brn
->brn_sibling
;
5082 for (child
= ddi_get_child(dip
); child
!= NULL
;
5083 child
= ddi_get_next_sibling(child
)) {
5084 if (i_ddi_node_state(child
) >= DS_INITIALIZED
) {
5085 (void) ddi_deviname(child
, path
);
5086 if (strcmp(path
, brn
->brn_deviname
) == 0)
5091 if (child
!= NULL
&& !(DEVI_EVREMOVE(child
))) {
5093 * Event state is not REMOVE. So branch remove event
5094 * is not going be generated on brn->brn_child.
5095 * If any branch remove events were queued up on
5096 * brn->brn_child log them and remove the brn
5099 if (brn
->brn_child
) {
5100 (void) ddi_pathname(dip
, path
);
5101 (void) strcat(path
, brn
->brn_deviname
);
5102 log_and_free_brevq(path
, brn
->brn_child
);
5106 prev_brn
->brn_sibling
= next_brn
;
5110 kmem_free(brn
->brn_deviname
,
5111 strlen(brn
->brn_deviname
) + 1);
5112 kmem_free(brn
, sizeof (*brn
));
5115 * Free up the outstanding branch remove events
5116 * queued on brn->brn_child since brn->brn_child
5117 * itself is eligible for branch remove event.
5119 if (brn
->brn_child
) {
5120 free_brevq(brn
->brn_child
);
5121 brn
->brn_child
= NULL
;
5127 ndi_devi_exit(dip
, circ
);
5128 kmem_free(path
, MAXPATHLEN
);
5132 need_remove_event(dev_info_t
*dip
, int flags
)
5134 if ((flags
& (NDI_NO_EVENT
| NDI_AUTODETACH
)) == 0 &&
5135 (flags
& (NDI_DEVI_OFFLINE
| NDI_UNCONFIG
| NDI_DEVI_REMOVE
)) &&
5136 !(DEVI_EVREMOVE(dip
)))
5143 * Unconfigure children/descendants of the dip.
5145 * If the operation involves a branch event NDI_BRANCH_EVENT_OP is set
5146 * through out the unconfiguration. On successful return *brevqp is set to
5147 * a queue of dip's child devinames for which branch remove events need
5151 devi_unconfig_branch(dev_info_t
*dip
, dev_info_t
**dipp
, int flags
,
5152 struct brevq_node
**brevqp
)
5158 if ((!(flags
& NDI_BRANCH_EVENT_OP
)) && need_remove_event(dip
, flags
))
5159 flags
|= NDI_BRANCH_EVENT_OP
;
5161 if (flags
& NDI_BRANCH_EVENT_OP
) {
5162 rval
= devi_unconfig_common(dip
, dipp
, flags
, DDI_MAJOR_T_NONE
,
5165 if (rval
!= NDI_SUCCESS
&& (*brevqp
)) {
5166 log_and_free_brevq_dip(dip
, *brevqp
);
5170 rval
= devi_unconfig_common(dip
, dipp
, flags
, DDI_MAJOR_T_NONE
,
5177 * If the dip is already bound to a driver transition to DS_INITIALIZED
5178 * in order to generate an event in the case where the node was left in
5179 * DS_BOUND state since boot (never got attached) and the node is now
5183 init_bound_node_ev(dev_info_t
*pdip
, dev_info_t
*dip
, int flags
)
5185 if (need_remove_event(dip
, flags
) &&
5186 i_ddi_node_state(dip
) == DS_BOUND
&&
5187 i_ddi_devi_attached(pdip
) && !DEVI_IS_DEVICE_OFFLINE(dip
))
5188 (void) ddi_initchild(pdip
, dip
);
5192 * attach a node/branch with parent already held busy
5195 devi_attach_node(dev_info_t
*dip
, uint_t flags
)
5197 dev_info_t
*pdip
= ddi_get_parent(dip
);
5199 ASSERT(pdip
&& DEVI_BUSY_OWNED(pdip
));
5201 mutex_enter(&(DEVI(dip
)->devi_lock
));
5202 if (flags
& NDI_DEVI_ONLINE
) {
5203 if (!i_ddi_devi_attached(dip
))
5204 DEVI_SET_REPORT(dip
);
5205 DEVI_SET_DEVICE_ONLINE(dip
);
5207 if (DEVI_IS_DEVICE_OFFLINE(dip
)) {
5208 mutex_exit(&(DEVI(dip
)->devi_lock
));
5209 return (NDI_FAILURE
);
5211 mutex_exit(&(DEVI(dip
)->devi_lock
));
5213 if (i_ddi_attachchild(dip
) != DDI_SUCCESS
) {
5214 mutex_enter(&(DEVI(dip
)->devi_lock
));
5215 DEVI_SET_EVUNINIT(dip
);
5216 mutex_exit(&(DEVI(dip
)->devi_lock
));
5218 if (ndi_dev_is_persistent_node(dip
))
5219 (void) ddi_uninitchild(dip
);
5222 * Delete .conf nodes and nodes that are not
5225 (void) ddi_remove_child(dip
, 0);
5227 return (NDI_FAILURE
);
5230 i_ndi_devi_report_status_change(dip
, NULL
);
5233 * log an event, but not during devfs lookups in which case
5234 * NDI_NO_EVENT is set.
5236 if ((flags
& NDI_NO_EVENT
) == 0 && !(DEVI_EVADD(dip
))) {
5237 (void) i_log_devfs_add_devinfo(dip
, flags
);
5239 mutex_enter(&(DEVI(dip
)->devi_lock
));
5240 DEVI_SET_EVADD(dip
);
5241 mutex_exit(&(DEVI(dip
)->devi_lock
));
5242 } else if (!(flags
& NDI_NO_EVENT_STATE_CHNG
)) {
5243 mutex_enter(&(DEVI(dip
)->devi_lock
));
5244 DEVI_SET_EVADD(dip
);
5245 mutex_exit(&(DEVI(dip
)->devi_lock
));
5248 return (NDI_SUCCESS
);
5251 /* internal function to config immediate children */
5253 config_immediate_children(dev_info_t
*pdip
, uint_t flags
, major_t major
)
5255 dev_info_t
*child
, *next
;
5258 ASSERT(i_ddi_devi_attached(pdip
));
5260 if (!NEXUS_DRV(ddi_get_driver(pdip
)))
5261 return (NDI_SUCCESS
);
5263 NDI_CONFIG_DEBUG((CE_CONT
,
5264 "config_immediate_children: %s%d (%p), flags=%x\n",
5265 ddi_driver_name(pdip
), ddi_get_instance(pdip
),
5266 (void *)pdip
, flags
));
5268 ndi_devi_enter(pdip
, &circ
);
5270 if (flags
& NDI_CONFIG_REPROBE
) {
5271 mutex_enter(&DEVI(pdip
)->devi_lock
);
5272 DEVI(pdip
)->devi_flags
&= ~DEVI_MADE_CHILDREN
;
5273 mutex_exit(&DEVI(pdip
)->devi_lock
);
5275 (void) i_ndi_make_spec_children(pdip
, flags
);
5276 i_ndi_init_hw_children(pdip
, flags
);
5278 child
= ddi_get_child(pdip
);
5280 /* NOTE: devi_attach_node() may remove the dip */
5281 next
= ddi_get_next_sibling(child
);
5284 * Configure all nexus nodes or leaf nodes with
5285 * matching driver major
5287 if ((major
== DDI_MAJOR_T_NONE
) ||
5288 (major
== ddi_driver_major(child
)) ||
5289 ((flags
& NDI_CONFIG
) && (is_leaf_node(child
) == 0)))
5290 (void) devi_attach_node(child
, flags
);
5294 ndi_devi_exit(pdip
, circ
);
5296 return (NDI_SUCCESS
);
5299 /* internal function to config grand children */
5301 config_grand_children(dev_info_t
*pdip
, uint_t flags
, major_t major
)
5303 struct mt_config_handle
*hdl
;
5305 /* multi-threaded configuration of child nexus */
5306 hdl
= mt_config_init(pdip
, NULL
, flags
, major
, MT_CONFIG_OP
, NULL
);
5307 mt_config_children(hdl
);
5309 return (mt_config_fini(hdl
)); /* wait for threads to exit */
5313 * Common function for device tree configuration,
5314 * either BUS_CONFIG_ALL or BUS_CONFIG_DRIVER.
5315 * The NDI_CONFIG flag causes recursive configuration of
5316 * grandchildren, devfs usage should not recurse.
5319 devi_config_common(dev_info_t
*dip
, int flags
, major_t major
)
5324 if (!i_ddi_devi_attached(dip
))
5325 return (NDI_FAILURE
);
5327 if (pm_pre_config(dip
, NULL
) != DDI_SUCCESS
)
5328 return (NDI_FAILURE
);
5330 if ((DEVI(dip
)->devi_ops
->devo_bus_ops
== NULL
) ||
5331 (DEVI(dip
)->devi_ops
->devo_bus_ops
->busops_rev
< BUSO_REV_5
) ||
5332 (f
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_config
) == NULL
) {
5333 error
= config_immediate_children(dip
, flags
, major
);
5335 /* call bus_config entry point */
5336 ddi_bus_config_op_t bus_op
= (major
== DDI_MAJOR_T_NONE
) ?
5337 BUS_CONFIG_ALL
: BUS_CONFIG_DRIVER
;
5339 flags
, bus_op
, (void *)(uintptr_t)major
, NULL
, 0);
5343 pm_post_config(dip
, NULL
);
5348 * Some callers, notably SCSI, need to mark the devfs cache
5349 * to be rebuilt together with the config operation.
5351 if (flags
& NDI_DEVFS_CLEAN
)
5352 (void) devfs_clean(dip
, NULL
, 0);
5354 if (flags
& NDI_CONFIG
)
5355 (void) config_grand_children(dip
, flags
, major
);
5357 pm_post_config(dip
, NULL
);
5359 return (NDI_SUCCESS
);
5363 * Framework entry point for BUS_CONFIG_ALL
5366 ndi_devi_config(dev_info_t
*dip
, int flags
)
5368 NDI_CONFIG_DEBUG((CE_CONT
,
5369 "ndi_devi_config: par = %s%d (%p), flags = 0x%x\n",
5370 ddi_driver_name(dip
), ddi_get_instance(dip
), (void *)dip
, flags
));
5372 return (devi_config_common(dip
, flags
, DDI_MAJOR_T_NONE
));
5376 * Framework entry point for BUS_CONFIG_DRIVER, bound to major
5379 ndi_devi_config_driver(dev_info_t
*dip
, int flags
, major_t major
)
5381 /* don't abuse this function */
5382 ASSERT(major
!= DDI_MAJOR_T_NONE
);
5384 NDI_CONFIG_DEBUG((CE_CONT
,
5385 "ndi_devi_config_driver: par = %s%d (%p), flags = 0x%x\n",
5386 ddi_driver_name(dip
), ddi_get_instance(dip
), (void *)dip
, flags
));
5388 return (devi_config_common(dip
, flags
, major
));
5392 * Called by nexus drivers to configure its children.
5395 devi_config_one(dev_info_t
*pdip
, char *devnm
, dev_info_t
**cdipp
,
5396 uint_t flags
, clock_t timeout
)
5398 dev_info_t
*vdip
= NULL
;
5399 char *drivername
= NULL
;
5400 int find_by_addr
= 0;
5403 clock_t end_time
; /* 60 sec */
5406 mdi_pathinfo_t
*cpip
;
5410 if (!NEXUS_DRV(ddi_get_driver(pdip
)))
5411 return (NDI_FAILURE
);
5413 /* split name into "name@addr" parts */
5414 i_ddi_parse_name(devnm
, &name
, &addr
, NULL
);
5417 * If the nexus is a pHCI and we are not processing a pHCI from
5418 * mdi bus_config code then we need to know the vHCI.
5421 vdip
= mdi_devi_get_vdip(pdip
);
5424 * We may have a genericname on a system that creates drivername
5425 * nodes (from .conf files). Find the drivername by nodeid. If we
5426 * can't find a node with devnm as the node name then we search by
5427 * drivername. This allows an implementation to supply a genericly
5428 * named boot path (disk) and locate drivename nodes (sd). The
5429 * NDI_PROMNAME flag does not apply to /devices/pseudo paths.
5431 if ((flags
& NDI_PROMNAME
) && (pdip
!= pseudo_dip
)) {
5432 drivername
= child_path_to_driver(pdip
, name
, addr
);
5437 * Determine end_time: This routine should *not* be called with a
5438 * constant non-zero timeout argument, the caller should be adjusting
5439 * the timeout argument relative to when it *started* its asynchronous
5443 end_time
= ddi_get_lbolt() + timeout
;
5447 * For pHCI, enter (vHCI, pHCI) and search for pathinfo/client
5448 * child - break out of for(;;) loop if child found.
5449 * NOTE: Lock order for ndi_devi_enter is (vHCI, pHCI).
5452 /* use mdi_devi_enter ordering */
5453 ndi_devi_enter(vdip
, &v_circ
);
5454 ndi_devi_enter(pdip
, &p_circ
);
5455 cpip
= mdi_pi_find(pdip
, NULL
, addr
);
5456 cdip
= mdi_pi_get_client(cpip
);
5460 ndi_devi_enter(pdip
, &p_circ
);
5463 * When not a vHCI or not all pHCI devices are required to
5464 * enumerated under the vHCI (NDI_MDI_FALLBACK) search for
5467 if ((vdip
== NULL
) || (flags
& NDI_MDI_FALLBACK
)) {
5468 /* determine if .conf nodes already built */
5469 probed
= (DEVI(pdip
)->devi_flags
& DEVI_MADE_CHILDREN
);
5472 * Search for child by name, if not found then search
5473 * for a node bound to the drivername driver with the
5474 * specified "@addr". Break out of for(;;) loop if
5475 * child found. To support path-oriented aliases
5476 * binding on boot-device, we do a search_by_addr too.
5478 again
: (void) i_ndi_make_spec_children(pdip
, flags
);
5479 cdip
= find_child_by_name(pdip
, name
, addr
);
5480 if ((cdip
== NULL
) && drivername
)
5481 cdip
= find_child_by_driver(pdip
,
5483 if ((cdip
== NULL
) && find_by_addr
)
5484 cdip
= find_child_by_addr(pdip
, addr
);
5489 * determine if we should reenumerate .conf nodes
5490 * and look for child again.
5493 i_ddi_io_initialized() &&
5494 (flags
& NDI_CONFIG_REPROBE
) &&
5495 ((timeout
<= 0) || (ddi_get_lbolt() >= end_time
))) {
5497 mutex_enter(&DEVI(pdip
)->devi_lock
);
5498 DEVI(pdip
)->devi_flags
&= ~DEVI_MADE_CHILDREN
;
5499 mutex_exit(&DEVI(pdip
)->devi_lock
);
5504 /* break out of for(;;) if time expired */
5505 if ((timeout
<= 0) || (ddi_get_lbolt() >= end_time
))
5509 * Child not found, exit and wait for asynchronous enumeration
5510 * to add child (or timeout). The addition of a new child (vhci
5511 * or phci) requires the asynchronous enumeration thread to
5512 * ndi_devi_enter/ndi_devi_exit. This exit will signal devi_cv
5513 * and cause us to return from ndi_devi_exit_and_wait, after
5514 * which we loop and search for the requested child again.
5516 NDI_DEBUG(flags
, (CE_CONT
,
5517 "%s%d: waiting for child %s@%s, timeout %ld",
5518 ddi_driver_name(pdip
), ddi_get_instance(pdip
),
5519 name
, addr
, timeout
));
5522 * Mark vHCI for pHCI ndi_devi_exit broadcast.
5524 mutex_enter(&DEVI(vdip
)->devi_lock
);
5525 DEVI(vdip
)->devi_flags
|=
5526 DEVI_PHCI_SIGNALS_VHCI
;
5527 mutex_exit(&DEVI(vdip
)->devi_lock
);
5528 ndi_devi_exit(pdip
, p_circ
);
5531 * NB: There is a small race window from above
5532 * ndi_devi_exit() of pdip to cv_wait() in
5533 * ndi_devi_exit_and_wait() which can result in
5534 * not immediately finding a new pHCI child
5535 * of a pHCI that uses NDI_MDI_FAILBACK.
5537 ndi_devi_exit_and_wait(vdip
, v_circ
, end_time
);
5539 ndi_devi_exit_and_wait(pdip
, p_circ
, end_time
);
5543 /* done with paddr, fixup i_ddi_parse_name '@'->'\0' change */
5544 if (addr
&& *addr
!= '\0')
5547 /* attach and hold the child, returning pointer to child */
5548 if (cdip
&& (devi_attach_node(cdip
, flags
) == NDI_SUCCESS
)) {
5549 ndi_hold_devi(cdip
);
5553 ndi_devi_exit(pdip
, p_circ
);
5555 ndi_devi_exit(vdip
, v_circ
);
5556 return (*cdipp
? NDI_SUCCESS
: NDI_FAILURE
);
5560 * Enumerate and attach a child specified by name 'devnm'.
5561 * Called by devfs lookup and DR to perform a BUS_CONFIG_ONE.
5562 * Note: devfs does not make use of NDI_CONFIG to configure
5566 ndi_devi_config_one(dev_info_t
*pdip
, char *devnm
, dev_info_t
**dipp
, int flags
)
5572 int branch_event
= 0;
5577 ASSERT(i_ddi_devi_attached(pdip
));
5579 NDI_CONFIG_DEBUG((CE_CONT
,
5580 "ndi_devi_config_one: par = %s%d (%p), child = %s\n",
5581 ddi_driver_name(pdip
), ddi_get_instance(pdip
),
5582 (void *)pdip
, devnm
));
5586 if (pm_pre_config(pdip
, devnm
) != DDI_SUCCESS
) {
5587 cmn_err(CE_WARN
, "preconfig failed: %s", devnm
);
5588 return (NDI_FAILURE
);
5591 if ((flags
& (NDI_NO_EVENT
| NDI_BRANCH_EVENT_OP
)) == 0 &&
5592 (flags
& NDI_CONFIG
)) {
5593 flags
|= NDI_BRANCH_EVENT_OP
;
5597 nmdup
= strdup(devnm
);
5598 duplen
= strlen(devnm
) + 1;
5600 if ((DEVI(pdip
)->devi_ops
->devo_bus_ops
== NULL
) ||
5601 (DEVI(pdip
)->devi_ops
->devo_bus_ops
->busops_rev
< BUSO_REV_5
) ||
5602 (f
= DEVI(pdip
)->devi_ops
->devo_bus_ops
->bus_config
) == NULL
) {
5603 error
= devi_config_one(pdip
, devnm
, dipp
, flags
, 0);
5605 /* call bus_config entry point */
5606 error
= (*f
)(pdip
, flags
, BUS_CONFIG_ONE
, (void *)devnm
, dipp
);
5614 * if we fail to lookup and this could be an alias, lookup currdip
5615 * To prevent recursive lookups into the same hash table, only
5616 * do the currdip lookups once the hash table init is complete.
5617 * Use tsd so that redirection doesn't recurse
5620 char *alias
= kmem_alloc(MAXPATHLEN
, KM_NOSLEEP
);
5621 if (alias
== NULL
) {
5622 ddi_err(DER_PANIC
, pdip
, "alias alloc failed: %s",
5625 (void) ddi_pathname(pdip
, alias
);
5626 (void) strlcat(alias
, "/", MAXPATHLEN
);
5627 (void) strlcat(alias
, nmdup
, MAXPATHLEN
);
5629 *dipp
= ddi_alias_redirect(alias
);
5630 error
= (*dipp
? NDI_SUCCESS
: NDI_FAILURE
);
5632 kmem_free(alias
, MAXPATHLEN
);
5634 kmem_free(nmdup
, duplen
);
5636 if (error
|| !(flags
& NDI_CONFIG
)) {
5637 pm_post_config(pdip
, devnm
);
5642 * DR usage (i.e. call with NDI_CONFIG) recursively configures
5643 * grandchildren, performing a BUS_CONFIG_ALL from the node attached
5644 * by the BUS_CONFIG_ONE.
5647 error
= devi_config_common(*dipp
, flags
, DDI_MAJOR_T_NONE
);
5649 pm_post_config(pdip
, devnm
);
5652 (void) i_log_devfs_branch_add(*dipp
);
5658 * Enumerate and attach a child specified by name 'devnm'.
5659 * Called during configure the OBP options. This configures
5663 ndi_devi_config_obp_args(dev_info_t
*parent
, char *devnm
,
5664 dev_info_t
**childp
, int flags
)
5670 ASSERT(i_ddi_devi_attached(parent
));
5672 NDI_CONFIG_DEBUG((CE_CONT
, "ndi_devi_config_obp_args: "
5673 "par = %s%d (%p), child = %s\n", ddi_driver_name(parent
),
5674 ddi_get_instance(parent
), (void *)parent
, devnm
));
5676 if ((DEVI(parent
)->devi_ops
->devo_bus_ops
== NULL
) ||
5677 (DEVI(parent
)->devi_ops
->devo_bus_ops
->busops_rev
< BUSO_REV_5
) ||
5678 (f
= DEVI(parent
)->devi_ops
->devo_bus_ops
->bus_config
) == NULL
) {
5679 error
= NDI_FAILURE
;
5681 /* call bus_config entry point */
5682 error
= (*f
)(parent
, flags
,
5683 BUS_CONFIG_OBP_ARGS
, (void *)devnm
, childp
);
5689 * Pay attention, the following is a bit tricky:
5690 * There are three possible cases when constraints are applied
5692 * - A constraint is applied and the offline is disallowed.
5693 * Simply return failure and block the offline
5695 * - A constraint is applied and the offline is allowed.
5696 * Mark the dip as having passed the constraint and allow
5697 * offline to proceed.
5699 * - A constraint is not applied. Allow the offline to proceed for now.
5701 * In the latter two cases we allow the offline to proceed. If the
5702 * offline succeeds (no users) everything is fine. It is ok for an unused
5703 * device to be offlined even if no constraints were imposed on the offline.
5704 * If the offline fails because there are users, we look at the constraint
5705 * flag on the dip. If the constraint flag is set (implying that it passed
5706 * a constraint) we allow the dip to be retired. If not, we don't allow
5707 * the retire. This ensures that we don't allow unconstrained retire.
5710 e_ddi_offline_notify(dev_info_t
*dip
)
5716 RIO_VERBOSE((CE_NOTE
, "e_ddi_offline_notify(): entered: dip=%p",
5723 * Start with userland constraints first - applied via device contracts
5725 retval
= contract_device_offline(dip
, DDI_DEV_T_ANY
, 0);
5728 RIO_DEBUG((CE_NOTE
, "Received NACK for dip=%p", (void *)dip
));
5733 RIO_DEBUG((CE_NOTE
, "Received ACK for dip=%p", (void *)dip
));
5737 RIO_DEBUG((CE_NOTE
, "No contracts on dip=%p", (void *)dip
));
5740 ASSERT(retval
== CT_NONE
);
5744 * Next, use LDI to impose kernel constraints
5746 retval
= ldi_invoke_notify(dip
, DDI_DEV_T_ANY
, 0, LDI_EV_OFFLINE
, NULL
);
5748 case LDI_EV_FAILURE
:
5749 contract_device_negend(dip
, DDI_DEV_T_ANY
, 0, CT_EV_FAILURE
);
5750 RIO_DEBUG((CE_NOTE
, "LDI callback failed on dip=%p",
5754 case LDI_EV_SUCCESS
:
5756 RIO_DEBUG((CE_NOTE
, "LDI callback success on dip=%p",
5760 /* no matching LDI callbacks */
5761 RIO_DEBUG((CE_NOTE
, "No LDI callbacks for dip=%p",
5765 ASSERT(retval
== LDI_EV_NONE
);
5769 mutex_enter(&(DEVI(dip
)->devi_lock
));
5770 if ((DEVI(dip
)->devi_flags
& DEVI_RETIRING
) && failure
) {
5771 RIO_VERBOSE((CE_NOTE
, "e_ddi_offline_notify(): setting "
5772 "BLOCKED flag. dip=%p", (void *)dip
));
5773 DEVI(dip
)->devi_flags
|= DEVI_R_BLOCKED
;
5774 if (DEVI(dip
)->devi_flags
& DEVI_R_CONSTRAINT
) {
5775 RIO_VERBOSE((CE_NOTE
, "e_ddi_offline_notify(): "
5776 "blocked. clearing RCM CONSTRAINT flag. dip=%p",
5778 DEVI(dip
)->devi_flags
&= ~DEVI_R_CONSTRAINT
;
5780 } else if ((DEVI(dip
)->devi_flags
& DEVI_RETIRING
) && constraint
) {
5781 RIO_VERBOSE((CE_NOTE
, "e_ddi_offline_notify(): setting "
5782 "CONSTRAINT flag. dip=%p", (void *)dip
));
5783 DEVI(dip
)->devi_flags
|= DEVI_R_CONSTRAINT
;
5784 } else if ((DEVI(dip
)->devi_flags
& DEVI_RETIRING
) &&
5785 ((DEVI(dip
)->devi_ops
!= NULL
&&
5786 DEVI(dip
)->devi_ops
->devo_bus_ops
!= NULL
) ||
5787 DEVI(dip
)->devi_ref
== 0)) {
5788 /* also allow retire if nexus or if device is not in use */
5789 RIO_VERBOSE((CE_NOTE
, "e_ddi_offline_notify(): device not in "
5790 "use. Setting CONSTRAINT flag. dip=%p", (void *)dip
));
5791 DEVI(dip
)->devi_flags
|= DEVI_R_CONSTRAINT
;
5794 * Note: We cannot ASSERT here that DEVI_R_CONSTRAINT is
5795 * not set, since other sources (such as RCM) may have
5798 RIO_VERBOSE((CE_NOTE
, "e_ddi_offline_notify(): not setting "
5799 "constraint flag. dip=%p", (void *)dip
));
5801 mutex_exit(&(DEVI(dip
)->devi_lock
));
5804 RIO_VERBOSE((CE_NOTE
, "e_ddi_offline_notify(): exit: dip=%p",
5807 return (failure
? DDI_FAILURE
: DDI_SUCCESS
);
5811 e_ddi_offline_finalize(dev_info_t
*dip
, int result
)
5813 RIO_DEBUG((CE_NOTE
, "e_ddi_offline_finalize(): entry: result=%s, "
5814 "dip=%p", result
== DDI_SUCCESS
? "SUCCESS" : "FAILURE",
5817 contract_device_negend(dip
, DDI_DEV_T_ANY
, 0, result
== DDI_SUCCESS
?
5818 CT_EV_SUCCESS
: CT_EV_FAILURE
);
5820 ldi_invoke_finalize(dip
, DDI_DEV_T_ANY
, 0,
5821 LDI_EV_OFFLINE
, result
== DDI_SUCCESS
?
5822 LDI_EV_SUCCESS
: LDI_EV_FAILURE
, NULL
);
5824 RIO_VERBOSE((CE_NOTE
, "e_ddi_offline_finalize(): exit: dip=%p",
5829 e_ddi_degrade_finalize(dev_info_t
*dip
)
5831 RIO_DEBUG((CE_NOTE
, "e_ddi_degrade_finalize(): entry: "
5832 "result always = DDI_SUCCESS, dip=%p", (void *)dip
));
5834 contract_device_degrade(dip
, DDI_DEV_T_ANY
, 0);
5835 contract_device_negend(dip
, DDI_DEV_T_ANY
, 0, CT_EV_SUCCESS
);
5837 ldi_invoke_finalize(dip
, DDI_DEV_T_ANY
, 0, LDI_EV_DEGRADE
,
5838 LDI_EV_SUCCESS
, NULL
);
5840 RIO_VERBOSE((CE_NOTE
, "e_ddi_degrade_finalize(): exit: dip=%p",
5845 e_ddi_undegrade_finalize(dev_info_t
*dip
)
5847 RIO_DEBUG((CE_NOTE
, "e_ddi_undegrade_finalize(): entry: "
5848 "result always = DDI_SUCCESS, dip=%p", (void *)dip
));
5850 contract_device_undegrade(dip
, DDI_DEV_T_ANY
, 0);
5851 contract_device_negend(dip
, DDI_DEV_T_ANY
, 0, CT_EV_SUCCESS
);
5853 RIO_VERBOSE((CE_NOTE
, "e_ddi_undegrade_finalize(): exit: dip=%p",
5858 * detach a node with parent already held busy
5861 devi_detach_node(dev_info_t
*dip
, uint_t flags
)
5863 dev_info_t
*pdip
= ddi_get_parent(dip
);
5864 int ret
= NDI_SUCCESS
;
5865 ddi_eventcookie_t cookie
;
5868 char *driver
= NULL
;
5872 ASSERT(pdip
&& DEVI_BUSY_OWNED(pdip
));
5875 * Invoke notify if offlining
5877 if (flags
& NDI_DEVI_OFFLINE
) {
5878 RIO_DEBUG((CE_NOTE
, "devi_detach_node: offlining dip=%p",
5880 if (e_ddi_offline_notify(dip
) != DDI_SUCCESS
) {
5881 RIO_DEBUG((CE_NOTE
, "devi_detach_node: offline NACKed"
5882 "dip=%p", (void *)dip
));
5883 return (NDI_FAILURE
);
5887 if (flags
& NDI_POST_EVENT
) {
5888 if (i_ddi_devi_attached(pdip
)) {
5889 if (ddi_get_eventcookie(dip
, DDI_DEVI_REMOVE_EVENT
,
5890 &cookie
) == NDI_SUCCESS
)
5891 (void) ndi_post_event(dip
, dip
, cookie
, NULL
);
5896 * dv_mknod places a hold on the dev_info_t for each devfs node
5897 * created. If we're to succeed in detaching this device, we must
5898 * first release all outstanding references held by devfs.
5900 (void) devfs_clean(pdip
, NULL
, DV_CLEAN_FORCE
);
5902 if (i_ddi_detachchild(dip
, flags
) != DDI_SUCCESS
) {
5903 if (flags
& NDI_DEVI_OFFLINE
) {
5904 RIO_DEBUG((CE_NOTE
, "devi_detach_node: offline failed."
5905 " Calling e_ddi_offline_finalize with result=%d. "
5906 "dip=%p", DDI_FAILURE
, (void *)dip
));
5907 e_ddi_offline_finalize(dip
, DDI_FAILURE
);
5909 return (NDI_FAILURE
);
5912 if (flags
& NDI_DEVI_OFFLINE
) {
5913 RIO_DEBUG((CE_NOTE
, "devi_detach_node: offline succeeded."
5914 " Calling e_ddi_offline_finalize with result=%d, "
5915 "dip=%p", DDI_SUCCESS
, (void *)dip
));
5916 e_ddi_offline_finalize(dip
, DDI_SUCCESS
);
5919 if (flags
& NDI_AUTODETACH
)
5920 return (NDI_SUCCESS
);
5923 * For DR, even bound nodes may need to have offline
5926 if (flags
& NDI_DEVI_OFFLINE
) {
5927 mutex_enter(&(DEVI(dip
)->devi_lock
));
5928 DEVI_SET_DEVICE_OFFLINE(dip
);
5929 mutex_exit(&(DEVI(dip
)->devi_lock
));
5932 if (i_ddi_node_state(dip
) == DS_INITIALIZED
) {
5933 struct dev_info
*devi
= DEVI(dip
);
5935 if (devi
->devi_ev_path
== NULL
) {
5936 devi
->devi_ev_path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
5937 (void) ddi_pathname(dip
, devi
->devi_ev_path
);
5939 if (flags
& NDI_DEVI_OFFLINE
)
5940 i_ndi_devi_report_status_change(dip
,
5941 devi
->devi_ev_path
);
5943 if (need_remove_event(dip
, flags
)) {
5945 * instance and path data are lost in call to
5948 devi
->devi_ev_instance
= ddi_get_instance(dip
);
5950 mutex_enter(&(DEVI(dip
)->devi_lock
));
5951 DEVI_SET_EVREMOVE(dip
);
5952 mutex_exit(&(DEVI(dip
)->devi_lock
));
5956 if (flags
& (NDI_UNCONFIG
| NDI_DEVI_REMOVE
)) {
5957 ret
= ddi_uninitchild(dip
);
5958 if (ret
== NDI_SUCCESS
) {
5960 * Remove uninitialized pseudo nodes because
5961 * system props are lost and the node cannot be
5964 if (!ndi_dev_is_persistent_node(dip
))
5965 flags
|= NDI_DEVI_REMOVE
;
5967 if (flags
& NDI_DEVI_REMOVE
) {
5969 * NOTE: If there is a consumer of LDI events,
5970 * ddi_uninitchild above would have failed
5971 * because of active devi_ref from ldi_open().
5974 if (DEVI_EVREMOVE(dip
)) {
5975 path
= i_ddi_strdup(
5976 DEVI(dip
)->devi_ev_path
,
5979 i_ddi_strdup(i_ddi_devi_class(dip
),
5983 (char *)ddi_driver_name(dip
),
5985 instance
= DEVI(dip
)->devi_ev_instance
;
5989 ret
= ddi_remove_child(dip
, 0);
5990 if (post_event
&& ret
== NDI_SUCCESS
) {
5991 /* Generate EC_DEVFS_DEVI_REMOVE */
5992 (void) i_log_devfs_remove_devinfo(path
,
5993 class, driver
, instance
, flags
);
6011 * unconfigure immediate children of bus nexus device
6014 unconfig_immediate_children(
6020 int rv
= NDI_SUCCESS
;
6023 dev_info_t
*vdip
= NULL
;
6026 ASSERT(dipp
== NULL
|| *dipp
== NULL
);
6029 * Scan forward to see if we will be processing a pHCI child. If we
6030 * have a child that is a pHCI and vHCI and pHCI are not siblings then
6031 * enter vHCI before parent(pHCI) to prevent deadlock with mpxio
6032 * Client power management operations.
6034 ndi_devi_enter(dip
, &circ
);
6035 for (child
= ddi_get_child(dip
); child
;
6036 child
= ddi_get_next_sibling(child
)) {
6037 /* skip same nodes we skip below */
6038 if (((major
!= DDI_MAJOR_T_NONE
) &&
6039 (major
!= ddi_driver_major(child
))) ||
6040 ((flags
& NDI_AUTODETACH
) && !is_leaf_node(child
)))
6043 if (MDI_PHCI(child
)) {
6044 vdip
= mdi_devi_get_vdip(child
);
6046 * If vHCI and vHCI is not a sibling of pHCI
6047 * then enter in (vHCI, parent(pHCI)) order.
6049 if (vdip
&& (ddi_get_parent(vdip
) != dip
)) {
6050 ndi_devi_exit(dip
, circ
);
6052 /* use mdi_devi_enter ordering */
6053 ndi_devi_enter(vdip
, &vcirc
);
6054 ndi_devi_enter(dip
, &circ
);
6061 child
= ddi_get_child(dip
);
6063 next
= ddi_get_next_sibling(child
);
6065 if ((major
!= DDI_MAJOR_T_NONE
) &&
6066 (major
!= ddi_driver_major(child
))) {
6071 /* skip nexus nodes during autodetach */
6072 if ((flags
& NDI_AUTODETACH
) && !is_leaf_node(child
)) {
6077 if (devi_detach_node(child
, flags
) != NDI_SUCCESS
) {
6078 if (dipp
&& *dipp
== NULL
) {
6079 ndi_hold_devi(child
);
6086 * Continue upon failure--best effort algorithm
6091 ndi_devi_exit(dip
, circ
);
6093 ndi_devi_exit(vdip
, vcirc
);
6099 * unconfigure grand children of bus nexus device
6102 unconfig_grand_children(
6107 struct brevq_node
**brevqp
)
6109 struct mt_config_handle
*hdl
;
6114 /* multi-threaded configuration of child nexus */
6115 hdl
= mt_config_init(dip
, dipp
, flags
, major
, MT_UNCONFIG_OP
, brevqp
);
6116 mt_config_children(hdl
);
6118 return (mt_config_fini(hdl
)); /* wait for threads to exit */
6122 * Unconfigure children/descendants of the dip.
6124 * If brevqp is not NULL, on return *brevqp is set to a queue of dip's
6125 * child devinames for which branch remove events need to be generated.
6128 devi_unconfig_common(
6133 struct brevq_node
**brevqp
)
6138 ddi_bus_config_op_t bus_op
;
6146 * Power up the dip if it is powered off. If the flag bit
6147 * NDI_AUTODETACH is set and the dip is not at its full power,
6148 * skip the rest of the branch.
6150 if (pm_pre_unconfig(dip
, flags
, &pm_cookie
, NULL
) != DDI_SUCCESS
)
6151 return ((flags
& NDI_AUTODETACH
) ? NDI_SUCCESS
:
6155 * Some callers, notably SCSI, need to clear out the devfs
6156 * cache together with the unconfig to prevent stale entries.
6158 if (flags
& NDI_DEVFS_CLEAN
)
6159 (void) devfs_clean(dip
, NULL
, 0);
6161 rv
= unconfig_grand_children(dip
, dipp
, flags
, major
, brevqp
);
6163 if ((rv
!= NDI_SUCCESS
) && ((flags
& NDI_AUTODETACH
) == 0)) {
6164 if (brevqp
&& *brevqp
) {
6165 log_and_free_br_events_on_grand_children(dip
, *brevqp
);
6166 free_brevq(*brevqp
);
6169 pm_post_unconfig(dip
, pm_cookie
, NULL
);
6173 if (dipp
&& *dipp
) {
6174 ndi_rele_devi(*dipp
);
6179 * It is possible to have a detached nexus with children
6180 * and grandchildren (for example: a branch consisting
6181 * entirely of bound nodes.) Since the nexus is detached
6182 * the bus_unconfig entry point cannot be used to remove
6183 * or unconfigure the descendants.
6185 if (!i_ddi_devi_attached(dip
) ||
6186 (DEVI(dip
)->devi_ops
->devo_bus_ops
== NULL
) ||
6187 (DEVI(dip
)->devi_ops
->devo_bus_ops
->busops_rev
< BUSO_REV_5
) ||
6188 (f
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_unconfig
) == NULL
) {
6189 rv
= unconfig_immediate_children(dip
, dipp
, flags
, major
);
6192 * call bus_unconfig entry point
6193 * It should reset nexus flags if unconfigure succeeds.
6195 bus_op
= (major
== DDI_MAJOR_T_NONE
) ?
6196 BUS_UNCONFIG_ALL
: BUS_UNCONFIG_DRIVER
;
6197 rv
= (*f
)(dip
, flags
, bus_op
, (void *)(uintptr_t)major
);
6200 pm_post_unconfig(dip
, pm_cookie
, NULL
);
6202 if (brevqp
&& *brevqp
)
6203 cleanup_br_events_on_grand_children(dip
, brevqp
);
6209 * called by devfs/framework to unconfigure children bound to major
6210 * If NDI_AUTODETACH is specified, this is invoked by either the
6211 * moduninstall daemon or the modunload -i 0 command.
6214 ndi_devi_unconfig_driver(dev_info_t
*dip
, int flags
, major_t major
)
6216 NDI_CONFIG_DEBUG((CE_CONT
,
6217 "ndi_devi_unconfig_driver: par = %s%d (%p), flags = 0x%x\n",
6218 ddi_driver_name(dip
), ddi_get_instance(dip
), (void *)dip
, flags
));
6220 return (devi_unconfig_common(dip
, NULL
, flags
, major
, NULL
));
6224 ndi_devi_unconfig(dev_info_t
*dip
, int flags
)
6226 NDI_CONFIG_DEBUG((CE_CONT
,
6227 "ndi_devi_unconfig: par = %s%d (%p), flags = 0x%x\n",
6228 ddi_driver_name(dip
), ddi_get_instance(dip
), (void *)dip
, flags
));
6230 return (devi_unconfig_common(dip
, NULL
, flags
, DDI_MAJOR_T_NONE
, NULL
));
6234 e_ddi_devi_unconfig(dev_info_t
*dip
, dev_info_t
**dipp
, int flags
)
6236 NDI_CONFIG_DEBUG((CE_CONT
,
6237 "e_ddi_devi_unconfig: par = %s%d (%p), flags = 0x%x\n",
6238 ddi_driver_name(dip
), ddi_get_instance(dip
), (void *)dip
, flags
));
6240 return (devi_unconfig_common(dip
, dipp
, flags
, DDI_MAJOR_T_NONE
, NULL
));
6244 * Unconfigure child by name
6247 devi_unconfig_one(dev_info_t
*pdip
, char *devnm
, int flags
)
6251 dev_info_t
*vdip
= NULL
;
6254 ndi_devi_enter(pdip
, &circ
);
6255 child
= ndi_devi_findchild(pdip
, devnm
);
6258 * If child is pHCI and vHCI and pHCI are not siblings then enter vHCI
6259 * before parent(pHCI) to avoid deadlock with mpxio Client power
6260 * management operations.
6262 if (child
&& MDI_PHCI(child
)) {
6263 vdip
= mdi_devi_get_vdip(child
);
6264 if (vdip
&& (ddi_get_parent(vdip
) != pdip
)) {
6265 ndi_devi_exit(pdip
, circ
);
6267 /* use mdi_devi_enter ordering */
6268 ndi_devi_enter(vdip
, &v_circ
);
6269 ndi_devi_enter(pdip
, &circ
);
6270 child
= ndi_devi_findchild(pdip
, devnm
);
6276 rv
= devi_detach_node(child
, flags
);
6278 NDI_CONFIG_DEBUG((CE_CONT
,
6279 "devi_unconfig_one: %s not found\n", devnm
));
6283 ndi_devi_exit(pdip
, circ
);
6285 ndi_devi_exit(vdip
, v_circ
);
6291 ndi_devi_unconfig_one(
6301 dev_info_t
*vdip
= NULL
;
6303 struct brevq_node
*brevq
= NULL
;
6305 ASSERT(i_ddi_devi_attached(pdip
));
6307 NDI_CONFIG_DEBUG((CE_CONT
,
6308 "ndi_devi_unconfig_one: par = %s%d (%p), child = %s\n",
6309 ddi_driver_name(pdip
), ddi_get_instance(pdip
),
6310 (void *)pdip
, devnm
));
6312 if (pm_pre_unconfig(pdip
, flags
, &pm_cookie
, devnm
) != DDI_SUCCESS
)
6313 return (NDI_FAILURE
);
6318 ndi_devi_enter(pdip
, &circ
);
6319 child
= ndi_devi_findchild(pdip
, devnm
);
6322 * If child is pHCI and vHCI and pHCI are not siblings then enter vHCI
6323 * before parent(pHCI) to avoid deadlock with mpxio Client power
6324 * management operations.
6326 if (child
&& MDI_PHCI(child
)) {
6327 vdip
= mdi_devi_get_vdip(child
);
6328 if (vdip
&& (ddi_get_parent(vdip
) != pdip
)) {
6329 ndi_devi_exit(pdip
, circ
);
6331 /* use mdi_devi_enter ordering */
6332 ndi_devi_enter(vdip
, &v_circ
);
6333 ndi_devi_enter(pdip
, &circ
);
6334 child
= ndi_devi_findchild(pdip
, devnm
);
6339 if (child
== NULL
) {
6340 NDI_CONFIG_DEBUG((CE_CONT
, "ndi_devi_unconfig_one: %s"
6341 " not found\n", devnm
));
6347 * Unconfigure children/descendants of named child
6349 rv
= devi_unconfig_branch(child
, dipp
, flags
| NDI_UNCONFIG
, &brevq
);
6350 if (rv
!= NDI_SUCCESS
)
6353 init_bound_node_ev(pdip
, child
, flags
);
6355 if ((DEVI(pdip
)->devi_ops
->devo_bus_ops
== NULL
) ||
6356 (DEVI(pdip
)->devi_ops
->devo_bus_ops
->busops_rev
< BUSO_REV_5
) ||
6357 (f
= DEVI(pdip
)->devi_ops
->devo_bus_ops
->bus_unconfig
) == NULL
) {
6358 rv
= devi_detach_node(child
, flags
);
6360 /* call bus_config entry point */
6361 rv
= (*f
)(pdip
, flags
, BUS_UNCONFIG_ONE
, (void *)devnm
);
6365 if (rv
!= NDI_SUCCESS
)
6366 log_and_free_brevq_dip(child
, brevq
);
6371 if (dipp
&& rv
!= NDI_SUCCESS
) {
6372 ndi_hold_devi(child
);
6373 ASSERT(*dipp
== NULL
);
6378 ndi_devi_exit(pdip
, circ
);
6380 ndi_devi_exit(vdip
, v_circ
);
6382 pm_post_unconfig(pdip
, pm_cookie
, devnm
);
6393 * Common async handler for:
6394 * ndi_devi_bind_driver_async
6395 * ndi_devi_online_async
6398 i_ndi_devi_async_common(dev_info_t
*dip
, uint_t flags
, void (*func
)())
6402 struct async_arg
*arg
;
6403 dev_info_t
*pdip
= ddi_get_parent(dip
);
6406 ASSERT(DEVI(pdip
)->devi_taskq
);
6407 ASSERT(ndi_dev_is_persistent_node(dip
));
6409 if (flags
& NDI_NOSLEEP
) {
6410 kmflag
= KM_NOSLEEP
;
6411 tqflag
= TQ_NOSLEEP
;
6417 arg
= kmem_alloc(sizeof (*arg
), kmflag
);
6423 if (ddi_taskq_dispatch(DEVI(pdip
)->devi_taskq
, func
, arg
, tqflag
) ==
6425 return (NDI_SUCCESS
);
6429 NDI_CONFIG_DEBUG((CE_CONT
, "%s%d: ddi_taskq_dispatch failed",
6430 ddi_driver_name(pdip
), ddi_get_instance(pdip
)));
6433 kmem_free(arg
, sizeof (*arg
));
6434 return (NDI_FAILURE
);
6438 i_ndi_devi_bind_driver_cb(struct async_arg
*arg
)
6440 (void) ndi_devi_bind_driver(arg
->dip
, arg
->flags
);
6441 kmem_free(arg
, sizeof (*arg
));
6445 ndi_devi_bind_driver_async(dev_info_t
*dip
, uint_t flags
)
6447 return (i_ndi_devi_async_common(dip
, flags
,
6448 (void (*)())i_ndi_devi_bind_driver_cb
));
6452 * place the devinfo in the ONLINE state.
6455 ndi_devi_online(dev_info_t
*dip
, uint_t flags
)
6458 dev_info_t
*pdip
= ddi_get_parent(dip
);
6459 int branch_event
= 0;
6463 NDI_CONFIG_DEBUG((CE_CONT
, "ndi_devi_online: %s%d (%p)\n",
6464 ddi_driver_name(dip
), ddi_get_instance(dip
), (void *)dip
));
6466 ndi_devi_enter(pdip
, &circ
);
6467 /* bind child before merging .conf nodes */
6468 rv
= i_ndi_config_node(dip
, DS_BOUND
, flags
);
6469 if (rv
!= NDI_SUCCESS
) {
6470 ndi_devi_exit(pdip
, circ
);
6474 /* merge .conf properties */
6475 (void) i_ndi_make_spec_children(pdip
, flags
);
6477 flags
|= (NDI_DEVI_ONLINE
| NDI_CONFIG
);
6479 if (flags
& NDI_NO_EVENT
) {
6481 * Caller is specifically asking for not to generate an event.
6482 * Set the following flag so that devi_attach_node() don't
6483 * change the event state.
6485 flags
|= NDI_NO_EVENT_STATE_CHNG
;
6488 if ((flags
& (NDI_NO_EVENT
| NDI_BRANCH_EVENT_OP
)) == 0 &&
6489 ((flags
& NDI_CONFIG
) || DEVI_NEED_NDI_CONFIG(dip
))) {
6490 flags
|= NDI_BRANCH_EVENT_OP
;
6495 * devi_attach_node() may remove dip on failure
6497 if ((rv
= devi_attach_node(dip
, flags
)) == NDI_SUCCESS
) {
6498 if ((flags
& NDI_CONFIG
) || DEVI_NEED_NDI_CONFIG(dip
)) {
6500 * Hold the attached dip, and exit the parent while
6501 * we drive configuration of children below the
6505 ndi_devi_exit(pdip
, circ
);
6507 (void) ndi_devi_config(dip
, flags
);
6509 ndi_devi_enter(pdip
, &circ
);
6514 (void) i_log_devfs_branch_add(dip
);
6517 ndi_devi_exit(pdip
, circ
);
6520 * Notify devfs that we have a new node. Devfs needs to invalidate
6521 * cached directory contents.
6523 * For PCMCIA devices, it is possible the pdip is not fully
6524 * attached. In this case, calling back into devfs will
6525 * result in a loop or assertion error. Hence, the check
6528 * If we own parent lock, this is part of a branch operation.
6529 * We skip the devfs_clean() step because the cache invalidation
6530 * is done higher up in the device tree.
6532 if (rv
== NDI_SUCCESS
&& i_ddi_devi_attached(pdip
) &&
6533 !DEVI_BUSY_OWNED(pdip
))
6534 (void) devfs_clean(pdip
, NULL
, 0);
6539 i_ndi_devi_online_cb(struct async_arg
*arg
)
6541 (void) ndi_devi_online(arg
->dip
, arg
->flags
);
6542 kmem_free(arg
, sizeof (*arg
));
6546 ndi_devi_online_async(dev_info_t
*dip
, uint_t flags
)
6548 /* mark child as need config if requested. */
6549 if (flags
& NDI_CONFIG
) {
6550 mutex_enter(&(DEVI(dip
)->devi_lock
));
6551 DEVI_SET_NDI_CONFIG(dip
);
6552 mutex_exit(&(DEVI(dip
)->devi_lock
));
6555 return (i_ndi_devi_async_common(dip
, flags
,
6556 (void (*)())i_ndi_devi_online_cb
));
6560 * Take a device node Offline
6561 * To take a device Offline means to detach the device instance from
6562 * the driver and prevent devfs requests from re-attaching the device
6565 * The flag NDI_DEVI_REMOVE causes removes the device node from
6566 * the driver list and the device tree. In this case, the device
6567 * is assumed to be removed from the system.
6570 ndi_devi_offline(dev_info_t
*dip
, uint_t flags
)
6573 dev_info_t
*pdip
= ddi_get_parent(dip
);
6574 dev_info_t
*vdip
= NULL
;
6576 struct brevq_node
*brevq
= NULL
;
6580 flags
|= NDI_DEVI_OFFLINE
;
6583 * If child is pHCI and vHCI and pHCI are not siblings then enter vHCI
6584 * before parent(pHCI) to avoid deadlock with mpxio Client power
6585 * management operations.
6587 if (MDI_PHCI(dip
)) {
6588 vdip
= mdi_devi_get_vdip(dip
);
6589 if (vdip
&& (ddi_get_parent(vdip
) != pdip
))
6590 ndi_devi_enter(vdip
, &v_circ
);
6594 ndi_devi_enter(pdip
, &circ
);
6596 if (i_ddi_devi_attached(dip
)) {
6598 * If dip is in DS_READY state, there may be cached dv_nodes
6599 * referencing this dip, so we invoke devfs code path.
6600 * Note that we must release busy changing on pdip to
6601 * avoid deadlock against devfs.
6603 char *devname
= kmem_alloc(MAXNAMELEN
+ 1, KM_SLEEP
);
6604 (void) ddi_deviname(dip
, devname
);
6606 ndi_devi_exit(pdip
, circ
);
6608 ndi_devi_exit(vdip
, v_circ
);
6611 * If we are explictly told to clean, then clean. If we own the
6612 * parent lock then this is part of a branch operation, and we
6613 * skip the devfs_clean() step.
6615 * NOTE: A thread performing a devfs file system lookup/
6616 * bus_config can't call devfs_clean to unconfig without
6617 * causing rwlock problems in devfs. For ndi_devi_offline, this
6618 * means that the NDI_DEVFS_CLEAN flag is safe from ioctl code
6619 * or from an async hotplug thread, but is not safe from a
6620 * nexus driver's bus_config implementation.
6622 if ((flags
& NDI_DEVFS_CLEAN
) ||
6623 (!DEVI_BUSY_OWNED(pdip
)))
6624 (void) devfs_clean(pdip
, devname
+ 1, DV_CLEAN_FORCE
);
6626 kmem_free(devname
, MAXNAMELEN
+ 1);
6628 rval
= devi_unconfig_branch(dip
, NULL
, flags
|NDI_UNCONFIG
,
6632 return (NDI_FAILURE
);
6635 ndi_devi_enter(vdip
, &v_circ
);
6636 ndi_devi_enter(pdip
, &circ
);
6639 init_bound_node_ev(pdip
, dip
, flags
);
6641 rval
= devi_detach_node(dip
, flags
);
6643 if (rval
!= NDI_SUCCESS
)
6644 log_and_free_brevq_dip(dip
, brevq
);
6649 ndi_devi_exit(pdip
, circ
);
6651 ndi_devi_exit(vdip
, v_circ
);
6657 * Find the child dev_info node of parent nexus 'p' whose unit address
6658 * matches "cname@caddr". Recommend use of ndi_devi_findchild() instead.
6661 ndi_devi_find(dev_info_t
*pdip
, char *cname
, char *caddr
)
6666 if (pdip
== NULL
|| cname
== NULL
|| caddr
== NULL
)
6667 return ((dev_info_t
*)NULL
);
6669 ndi_devi_enter(pdip
, &circ
);
6670 child
= find_sibling(ddi_get_child(pdip
), cname
, caddr
,
6671 FIND_NODE_BY_NODENAME
, NULL
);
6672 ndi_devi_exit(pdip
, circ
);
6677 * Find the child dev_info node of parent nexus 'p' whose unit address
6678 * matches devname "name@addr". Permits caller to hold the parent.
6681 ndi_devi_findchild(dev_info_t
*pdip
, char *devname
)
6684 char *cname
, *caddr
;
6687 ASSERT(DEVI_BUSY_OWNED(pdip
));
6689 devstr
= i_ddi_strdup(devname
, KM_SLEEP
);
6690 i_ddi_parse_name(devstr
, &cname
, &caddr
, NULL
);
6692 if (cname
== NULL
|| caddr
== NULL
) {
6693 kmem_free(devstr
, strlen(devname
)+1);
6694 return ((dev_info_t
*)NULL
);
6697 child
= find_sibling(ddi_get_child(pdip
), cname
, caddr
,
6698 FIND_NODE_BY_NODENAME
, NULL
);
6699 kmem_free(devstr
, strlen(devname
)+1);
6704 * Misc. routines called by framework only
6708 * Clear the DEVI_MADE_CHILDREN/DEVI_ATTACHED_CHILDREN flags
6709 * if new child spec has been added.
6712 reset_nexus_flags(dev_info_t
*dip
, void *arg
)
6714 struct hwc_spec
*list
;
6717 if (((DEVI(dip
)->devi_flags
& DEVI_MADE_CHILDREN
) == 0) ||
6718 ((list
= hwc_get_child_spec(dip
, (major_t
)(uintptr_t)arg
)) == NULL
))
6719 return (DDI_WALK_CONTINUE
);
6721 hwc_free_spec_list(list
);
6723 /* coordinate child state update */
6724 ndi_devi_enter(dip
, &circ
);
6725 mutex_enter(&DEVI(dip
)->devi_lock
);
6726 DEVI(dip
)->devi_flags
&= ~(DEVI_MADE_CHILDREN
| DEVI_ATTACHED_CHILDREN
);
6727 mutex_exit(&DEVI(dip
)->devi_lock
);
6728 ndi_devi_exit(dip
, circ
);
6730 return (DDI_WALK_CONTINUE
);
6734 * Helper functions, returns NULL if no memory.
6740 * Return an alternate driver name binding for the leaf device
6741 * of the given pathname, if there is one. The purpose of this
6742 * function is to deal with generic pathnames. The default action
6743 * for platforms that can't do this (ie: x86 or any platform that
6744 * does not have prom_finddevice functionality, which matches
6745 * nodenames and unit-addresses without the drivers participation)
6746 * is to return DDI_MAJOR_T_NONE.
6748 * Used in loadrootmodules() in the swapgeneric module to
6749 * associate a given pathname with a given leaf driver.
6753 path_to_major(char *path
)
6760 /* check for path-oriented alias */
6761 major
= ddi_name_to_major(path
);
6762 if (driver_active(major
)) {
6763 NDI_CONFIG_DEBUG((CE_NOTE
, "path_to_major: %s path bound %s\n",
6764 path
, ddi_major_to_name(major
)));
6769 * Get the nodeid of the given pathname, if such a mapping exists.
6772 nodeid
= prom_finddevice(path
);
6773 if (nodeid
!= OBP_BADNODE
) {
6775 * Find the nodeid in our copy of the device tree and return
6776 * whatever name we used to bind this node to a driver.
6778 dip
= e_ddi_nodeid_to_dip(nodeid
);
6782 NDI_CONFIG_DEBUG((CE_WARN
,
6783 "path_to_major: can't bind <%s>\n", path
));
6784 return (DDI_MAJOR_T_NONE
);
6788 * If we're bound to something other than the nodename,
6789 * note that in the message buffer and system log.
6791 p
= ddi_binding_name(dip
);
6792 q
= ddi_node_name(dip
);
6793 if (p
&& q
&& (strcmp(p
, q
) != 0))
6794 NDI_CONFIG_DEBUG((CE_NOTE
, "path_to_major: %s bound to %s\n",
6797 major
= ddi_name_to_major(p
);
6799 ndi_rele_devi(dip
); /* release e_ddi_nodeid_to_dip hold */
6805 * Return the held dip for the specified major and instance, attempting to do
6806 * an attach if specified. Return NULL if the devi can't be found or put in
6807 * the proper state. The caller must release the hold via ddi_release_devi if
6808 * a non-NULL value is returned.
6810 * Some callers expect to be able to perform a hold_devi() while in a context
6811 * where using ndi_devi_enter() to ensure the hold might cause deadlock (see
6812 * open-from-attach code in consconfig_dacf.c). Such special-case callers
6813 * must ensure that an ndi_devi_enter(parent)/ndi_hold_devi() from a safe
6814 * context is already active. The hold_devi() implementation must accommodate
6818 hold_devi(major_t major
, int instance
, int flags
)
6820 struct devnames
*dnp
;
6825 if ((major
>= devcnt
) || (instance
== -1))
6828 /* try to find the instance in the per driver list */
6829 dnp
= &(devnamesp
[major
]);
6830 LOCK_DEV_OPS(&(dnp
->dn_lock
));
6831 for (dip
= dnp
->dn_head
; dip
;
6832 dip
= (dev_info_t
*)DEVI(dip
)->devi_next
) {
6833 /* skip node if instance field is not valid */
6834 if (i_ddi_node_state(dip
) < DS_INITIALIZED
)
6837 /* look for instance match */
6838 if (DEVI(dip
)->devi_instance
== instance
) {
6840 * To accommodate callers that can't block in
6841 * ndi_devi_enter() we do an ndi_hold_devi(), and
6842 * afterwards check that the node is in a state where
6843 * the hold prevents detach(). If we did not manage to
6844 * prevent detach then we ndi_rele_devi() and perform
6845 * the slow path below (which can result in a blocking
6846 * ndi_devi_enter() while driving attach top-down).
6847 * This code depends on the ordering of
6848 * DEVI_SET_DETACHING and the devi_ref check in the
6849 * detach_node() code path.
6852 if (i_ddi_devi_attached(dip
) &&
6853 !DEVI_IS_DETACHING(dip
)) {
6854 UNLOCK_DEV_OPS(&(dnp
->dn_lock
));
6855 return (dip
); /* fast-path with devi held */
6864 ASSERT(dip
== NULL
);
6865 UNLOCK_DEV_OPS(&(dnp
->dn_lock
));
6867 if (flags
& E_DDI_HOLD_DEVI_NOATTACH
)
6868 return (NULL
); /* told not to drive attach */
6870 /* slow-path may block, so it should not occur from interrupt */
6871 ASSERT(!servicing_interrupt());
6872 if (servicing_interrupt())
6875 /* reconstruct the path and drive attach by path through devfs. */
6876 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
6877 if (e_ddi_majorinstance_to_path(major
, instance
, path
) == 0) {
6878 dip
= e_ddi_hold_devi_by_path(path
, flags
);
6881 * Verify that we got the correct device - a path_to_inst file
6882 * with a bogus/corrupt path (or a nexus that changes its
6883 * unit-address format) could result in an incorrect answer
6885 * Verify major, instance, and path.
6887 vpath
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
6889 ((DEVI(dip
)->devi_major
!= major
) ||
6890 ((DEVI(dip
)->devi_instance
!= instance
)) ||
6891 (strcmp(path
, ddi_pathname(dip
, vpath
)) != 0))) {
6893 dip
= NULL
; /* no answer better than wrong answer */
6895 kmem_free(vpath
, MAXPATHLEN
);
6897 kmem_free(path
, MAXPATHLEN
);
6898 return (dip
); /* with devi held */
6902 * The {e_}ddi_hold_devi{_by_{instance|dev|path}} hold the devinfo node
6903 * associated with the specified arguments. This hold should be released
6904 * by calling ddi_release_devi.
6906 * The E_DDI_HOLD_DEVI_NOATTACH flag argument allows the caller to to specify
6907 * a failure return if the node is not already attached.
6909 * NOTE: by the time we make e_ddi_hold_devi public, we should be able to reuse
6910 * ddi_hold_devi again.
6913 ddi_hold_devi_by_instance(major_t major
, int instance
, int flags
)
6915 return (hold_devi(major
, instance
, flags
));
6919 e_ddi_hold_devi_by_dev(dev_t dev
, int flags
)
6921 major_t major
= getmajor(dev
);
6923 struct dev_ops
*ops
;
6924 dev_info_t
*ddip
= NULL
;
6926 dip
= hold_devi(major
, dev_to_instance(dev
), flags
);
6929 * The rest of this routine is legacy support for drivers that
6930 * have broken DDI_INFO_DEVT2INSTANCE implementations but may have
6931 * functional DDI_INFO_DEVT2DEVINFO implementations. This code will
6932 * diagnose inconsistency and, for maximum compatibility with legacy
6933 * drivers, give preference to the drivers DDI_INFO_DEVT2DEVINFO
6934 * implementation over the above derived dip based the driver's
6935 * DDI_INFO_DEVT2INSTANCE implementation. This legacy support should
6936 * be removed when DDI_INFO_DEVT2DEVINFO is deprecated.
6938 * NOTE: The following code has a race condition. DEVT2DEVINFO
6939 * returns a dip which is not held. By the time we ref ddip,
6940 * it could have been freed. The saving grace is that for
6941 * most drivers, the dip returned from hold_devi() is the
6942 * same one as the one returned by DEVT2DEVINFO, so we are
6943 * safe for drivers with the correct getinfo(9e) impl.
6945 if (((ops
= ddi_hold_driver(major
)) != NULL
) &&
6946 CB_DRV_INSTALLED(ops
) && ops
->devo_getinfo
) {
6947 if ((*ops
->devo_getinfo
)(NULL
, DDI_INFO_DEVT2DEVINFO
,
6948 (void *)dev
, (void **)&ddip
) != DDI_SUCCESS
)
6952 /* give preference to the driver returned DEVT2DEVINFO dip */
6953 if (ddip
&& (dip
!= ddip
)) {
6955 cmn_err(CE_WARN
, "%s: inconsistent getinfo(9E) implementation",
6956 ddi_driver_name(ddip
));
6958 ndi_hold_devi(ddip
);
6965 ddi_rele_driver(major
);
6971 * For compatibility only. Do not call this function!
6974 e_ddi_get_dev_info(dev_t dev
, vtype_t type
)
6976 dev_info_t
*dip
= NULL
;
6977 if (getmajor(dev
) >= devcnt
)
6983 dip
= e_ddi_hold_devi_by_dev(dev
, 0);
6989 * For compatibility reasons, we can only return the dip with
6990 * the driver ref count held. This is not a safe thing to do.
6991 * For certain broken third-party software, we are willing
6992 * to venture into unknown territory.
6995 (void) ndi_hold_driver(dip
);
7002 e_ddi_hold_devi_by_path(char *path
, int flags
)
7006 /* can't specify NOATTACH by path */
7007 ASSERT(!(flags
& E_DDI_HOLD_DEVI_NOATTACH
));
7009 return (resolve_pathname(path
, &dip
, NULL
, NULL
) ? NULL
: dip
);
7013 e_ddi_hold_devi(dev_info_t
*dip
)
7019 ddi_release_devi(dev_info_t
*dip
)
7025 * Associate a streams queue with a devinfo node
7026 * NOTE: This function is called by STREAM driver's put procedure.
7030 ddi_assoc_queue_with_devi(queue_t
*q
, dev_info_t
*dip
)
7032 queue_t
*rq
= _RD(q
);
7036 /* set flag indicating that ddi_assoc_queue_with_devi was called */
7037 mutex_enter(QLOCK(rq
));
7038 rq
->q_flag
|= _QASSOCIATED
;
7039 mutex_exit(QLOCK(rq
));
7041 /* get the vnode associated with the queue */
7046 /* change the hardware association of the vnode */
7047 spec_assoc_vp_with_devi(vp
, dip
);
7051 * ddi_install_driver(name)
7053 * Driver installation is currently a byproduct of driver loading. This
7057 ddi_install_driver(char *name
)
7059 major_t major
= ddi_name_to_major(name
);
7061 if ((major
== DDI_MAJOR_T_NONE
) ||
7062 (ddi_hold_installed_driver(major
) == NULL
)) {
7063 return (DDI_FAILURE
);
7065 ddi_rele_driver(major
);
7066 return (DDI_SUCCESS
);
7070 ddi_hold_driver(major_t major
)
7072 return (mod_hold_dev_by_major(major
));
7077 ddi_rele_driver(major_t major
)
7079 mod_rele_dev_by_major(major
);
7084 * This is called during boot to force attachment order of special dips
7085 * dip must be referenced via ndi_hold_devi()
7088 i_ddi_attach_node_hierarchy(dev_info_t
*dip
)
7094 * Recurse up until attached parent is found.
7096 if (i_ddi_devi_attached(dip
))
7097 return (DDI_SUCCESS
);
7098 parent
= ddi_get_parent(dip
);
7099 if (i_ddi_attach_node_hierarchy(parent
) != DDI_SUCCESS
)
7100 return (DDI_FAILURE
);
7103 * Come top-down, expanding .conf nodes under this parent
7104 * and driving attach.
7106 ndi_devi_enter(parent
, &circ
);
7107 (void) i_ndi_make_spec_children(parent
, 0);
7108 ret
= i_ddi_attachchild(dip
);
7109 ndi_devi_exit(parent
, circ
);
7114 /* keep this function static */
7116 attach_driver_nodes(major_t major
)
7118 struct devnames
*dnp
;
7120 int error
= DDI_FAILURE
;
7122 dnp
= &devnamesp
[major
];
7123 LOCK_DEV_OPS(&dnp
->dn_lock
);
7127 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
7128 if (i_ddi_attach_node_hierarchy(dip
) == DDI_SUCCESS
)
7129 error
= DDI_SUCCESS
;
7131 * Set the 'ddi-config-driver-node' property on a nexus
7132 * node to cause attach_driver_nodes() to configure all
7133 * immediate children of the nexus. This property should
7134 * be set on nodes with immediate children that bind to
7135 * the same driver as parent.
7137 if ((error
== DDI_SUCCESS
) && (ddi_prop_exists(DDI_DEV_T_ANY
,
7138 dip
, DDI_PROP_DONTPASS
, "ddi-config-driver-node"))) {
7139 (void) ndi_devi_config(dip
, NDI_NO_EVENT
);
7141 LOCK_DEV_OPS(&dnp
->dn_lock
);
7143 dip
= ddi_get_next(dip
);
7145 if (error
== DDI_SUCCESS
)
7146 dnp
->dn_flags
|= DN_NO_AUTODETACH
;
7147 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
7154 * i_ddi_attach_hw_nodes configures and attaches all hw nodes
7155 * bound to a specific driver. This function replaces calls to
7156 * ddi_hold_installed_driver() for drivers with no .conf
7159 * This facility is typically called at boot time to attach
7160 * platform-specific hardware nodes, such as ppm nodes on xcal
7161 * and grover and keyswitch nodes on cherrystone. It does not
7162 * deal with .conf enumerated node. Calling it beyond the boot
7163 * process is strongly discouraged.
7166 i_ddi_attach_hw_nodes(char *driver
)
7170 major
= ddi_name_to_major(driver
);
7171 if (major
== DDI_MAJOR_T_NONE
)
7172 return (DDI_FAILURE
);
7174 return (attach_driver_nodes(major
));
7178 * i_ddi_attach_pseudo_node configures pseudo drivers which
7179 * has a single node. The .conf nodes must be enumerated
7180 * before calling this interface. The dip is held attached
7183 * This facility should only be called only at boot time
7184 * by the I/O framework.
7187 i_ddi_attach_pseudo_node(char *driver
)
7192 major
= ddi_name_to_major(driver
);
7193 if (major
== DDI_MAJOR_T_NONE
)
7196 if (attach_driver_nodes(major
) != DDI_SUCCESS
)
7199 dip
= devnamesp
[major
].dn_head
;
7200 ASSERT(dip
&& ddi_get_next(dip
) == NULL
);
7206 diplist_to_parent_major(dev_info_t
*head
, char parents
[])
7209 dev_info_t
*dip
, *pdip
;
7211 for (dip
= head
; dip
!= NULL
; dip
= ddi_get_next(dip
)) {
7212 pdip
= ddi_get_parent(dip
);
7213 ASSERT(pdip
); /* disallow rootnex.conf nodes */
7214 major
= ddi_driver_major(pdip
);
7215 if ((major
!= DDI_MAJOR_T_NONE
) && parents
[major
] == 0)
7221 * Call ddi_hold_installed_driver() on each parent major
7222 * and invoke mt_config_driver() to attach child major.
7223 * This is part of the implementation of ddi_hold_installed_driver.
7226 attach_driver_by_parent(major_t child_major
, char parents
[])
7229 struct mt_config_handle
*hdl
;
7230 int flags
= NDI_DEVI_PERSIST
| NDI_NO_EVENT
;
7232 hdl
= mt_config_init(NULL
, NULL
, flags
, child_major
, MT_CONFIG_OP
,
7234 for (par_major
= 0; par_major
< devcnt
; par_major
++) {
7235 /* disallow recursion on the same driver */
7236 if (parents
[par_major
] == 0 || par_major
== child_major
)
7238 if (ddi_hold_installed_driver(par_major
) == NULL
)
7240 hdl
->mtc_parmajor
= par_major
;
7241 mt_config_driver(hdl
);
7242 ddi_rele_driver(par_major
);
7244 (void) mt_config_fini(hdl
);
7246 return (i_ddi_devs_attached(child_major
));
7250 i_ddi_devs_attached(major_t major
)
7253 struct devnames
*dnp
;
7254 int error
= DDI_FAILURE
;
7256 /* check for attached instances */
7257 dnp
= &devnamesp
[major
];
7258 LOCK_DEV_OPS(&dnp
->dn_lock
);
7259 for (dip
= dnp
->dn_head
; dip
!= NULL
; dip
= ddi_get_next(dip
)) {
7260 if (i_ddi_devi_attached(dip
)) {
7261 error
= DDI_SUCCESS
;
7265 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
7271 i_ddi_minor_node_count(dev_info_t
*ddip
, const char *node_type
)
7274 struct ddi_minor_data
*dp
;
7277 ndi_devi_enter(ddip
, &circ
);
7278 for (dp
= DEVI(ddip
)->devi_minor
; dp
!= NULL
; dp
= dp
->next
) {
7279 if (strcmp(dp
->ddm_node_type
, node_type
) == 0)
7282 ndi_devi_exit(ddip
, circ
);
7287 * ddi_hold_installed_driver configures and attaches all
7288 * instances of the specified driver. To accomplish this
7289 * it configures and attaches all possible parents of
7290 * the driver, enumerated both in h/w nodes and in the
7291 * driver's .conf file.
7293 * NOTE: This facility is for compatibility purposes only and will
7294 * eventually go away. Its usage is strongly discouraged.
7297 enter_driver(struct devnames
*dnp
)
7299 mutex_enter(&dnp
->dn_lock
);
7300 ASSERT(dnp
->dn_busy_thread
!= curthread
);
7301 while (dnp
->dn_flags
& DN_DRIVER_BUSY
)
7302 cv_wait(&dnp
->dn_wait
, &dnp
->dn_lock
);
7303 dnp
->dn_flags
|= DN_DRIVER_BUSY
;
7304 dnp
->dn_busy_thread
= curthread
;
7305 mutex_exit(&dnp
->dn_lock
);
7309 exit_driver(struct devnames
*dnp
)
7311 mutex_enter(&dnp
->dn_lock
);
7312 ASSERT(dnp
->dn_busy_thread
== curthread
);
7313 dnp
->dn_flags
&= ~DN_DRIVER_BUSY
;
7314 dnp
->dn_busy_thread
= NULL
;
7315 cv_broadcast(&dnp
->dn_wait
);
7316 mutex_exit(&dnp
->dn_lock
);
7320 ddi_hold_installed_driver(major_t major
)
7322 struct dev_ops
*ops
;
7323 struct devnames
*dnp
;
7327 ops
= ddi_hold_driver(major
);
7332 * Return immediately if all the attach operations associated
7333 * with a ddi_hold_installed_driver() call have already been done.
7335 dnp
= &devnamesp
[major
];
7337 ASSERT(driver_active(major
));
7339 if (dnp
->dn_flags
& DN_DRIVER_HELD
) {
7341 if (i_ddi_devs_attached(major
) == DDI_SUCCESS
)
7343 ddi_rele_driver(major
);
7347 LOCK_DEV_OPS(&dnp
->dn_lock
);
7348 dnp
->dn_flags
|= (DN_DRIVER_HELD
| DN_NO_AUTODETACH
);
7349 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
7351 DCOMPATPRINTF((CE_CONT
,
7352 "ddi_hold_installed_driver: %s\n", dnp
->dn_name
));
7355 * When the driver has no .conf children, it is sufficient
7356 * to attach existing nodes in the device tree. Nodes not
7357 * enumerated by the OBP are not attached.
7359 if (dnp
->dn_pl
== NULL
) {
7360 if (attach_driver_nodes(major
) == DDI_SUCCESS
) {
7365 ddi_rele_driver(major
);
7370 * Driver has .conf nodes. We find all possible parents
7371 * and recursively all ddi_hold_installed_driver on the
7372 * parent driver; then we invoke ndi_config_driver()
7373 * on all possible parent node in parallel to speed up
7376 parents
= kmem_zalloc(devcnt
* sizeof (char), KM_SLEEP
);
7378 LOCK_DEV_OPS(&dnp
->dn_lock
);
7379 /* find .conf parents */
7380 (void) impl_parlist_to_major(dnp
->dn_pl
, parents
);
7381 /* find hw node parents */
7382 diplist_to_parent_major(dnp
->dn_head
, parents
);
7383 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
7385 error
= attach_driver_by_parent(major
, parents
);
7386 kmem_free(parents
, devcnt
* sizeof (char));
7387 if (error
== DDI_SUCCESS
) {
7393 ddi_rele_driver(major
);
7398 * Default bus_config entry point for nexus drivers
7401 ndi_busop_bus_config(dev_info_t
*pdip
, uint_t flags
, ddi_bus_config_op_t op
,
7402 void *arg
, dev_info_t
**child
, clock_t timeout
)
7407 * A timeout of 30 minutes or more is probably a mistake
7408 * This is intended to catch uses where timeout is in
7409 * the wrong units. timeout must be in units of ticks.
7411 ASSERT(timeout
< SEC_TO_TICK(1800));
7413 major
= DDI_MAJOR_T_NONE
;
7415 case BUS_CONFIG_ONE
:
7416 NDI_DEBUG(flags
, (CE_CONT
, "%s%d: bus config %s timeout=%ld\n",
7417 ddi_driver_name(pdip
), ddi_get_instance(pdip
),
7418 (char *)arg
, timeout
));
7419 return (devi_config_one(pdip
, (char *)arg
, child
, flags
,
7422 case BUS_CONFIG_DRIVER
:
7423 major
= (major_t
)(uintptr_t)arg
;
7425 case BUS_CONFIG_ALL
:
7426 NDI_DEBUG(flags
, (CE_CONT
, "%s%d: bus config timeout=%ld\n",
7427 ddi_driver_name(pdip
), ddi_get_instance(pdip
),
7430 NDI_DEBUG(flags
, (CE_CONT
,
7431 "%s%d: bus config all timeout=%ld\n",
7432 ddi_driver_name(pdip
), ddi_get_instance(pdip
),
7436 return (config_immediate_children(pdip
, flags
, major
));
7439 return (NDI_FAILURE
);
7445 * Default busop bus_unconfig handler for nexus drivers
7448 ndi_busop_bus_unconfig(dev_info_t
*pdip
, uint_t flags
, ddi_bus_config_op_t op
,
7453 major
= DDI_MAJOR_T_NONE
;
7455 case BUS_UNCONFIG_ONE
:
7456 NDI_DEBUG(flags
, (CE_CONT
, "%s%d: bus unconfig %s\n",
7457 ddi_driver_name(pdip
), ddi_get_instance(pdip
),
7459 return (devi_unconfig_one(pdip
, (char *)arg
, flags
));
7461 case BUS_UNCONFIG_DRIVER
:
7462 major
= (major_t
)(uintptr_t)arg
;
7464 case BUS_UNCONFIG_ALL
:
7465 NDI_DEBUG(flags
, (CE_CONT
, "%s%d: bus unconfig all\n",
7466 ddi_driver_name(pdip
), ddi_get_instance(pdip
)));
7467 return (unconfig_immediate_children(pdip
, NULL
, flags
, major
));
7470 return (NDI_FAILURE
);
7476 * dummy functions to be removed
7479 impl_rem_dev_props(dev_info_t
*dip
)
7481 _NOTE(ARGUNUSED(dip
))
7486 * Determine if a node is a leaf node. If not sure, return false (0).
7489 is_leaf_node(dev_info_t
*dip
)
7491 major_t major
= ddi_driver_major(dip
);
7493 if (major
== DDI_MAJOR_T_NONE
)
7496 return (devnamesp
[major
].dn_flags
& DN_LEAF_DRIVER
);
7500 * Multithreaded [un]configuration
7502 static struct mt_config_handle
*
7503 mt_config_init(dev_info_t
*pdip
, dev_info_t
**dipp
, int flags
,
7504 major_t major
, int op
, struct brevq_node
**brevqp
)
7506 struct mt_config_handle
*hdl
= kmem_alloc(sizeof (*hdl
), KM_SLEEP
);
7508 mutex_init(&hdl
->mtc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
7509 cv_init(&hdl
->mtc_cv
, NULL
, CV_DEFAULT
, NULL
);
7510 hdl
->mtc_pdip
= pdip
;
7511 hdl
->mtc_fdip
= dipp
;
7512 hdl
->mtc_parmajor
= DDI_MAJOR_T_NONE
;
7513 hdl
->mtc_flags
= flags
;
7514 hdl
->mtc_major
= major
;
7515 hdl
->mtc_thr_count
= 0;
7518 hdl
->mtc_brevqp
= brevqp
;
7521 gethrestime(&hdl
->start_time
);
7522 hdl
->total_time
= 0;
7530 time_diff_in_msec(timestruc_t start
, timestruc_t end
)
7534 sec
= end
.tv_sec
- start
.tv_sec
;
7535 nsec
= end
.tv_nsec
- start
.tv_nsec
;
7541 return (sec
* (NANOSEC
>> 20) + (nsec
>> 20));
7547 mt_config_fini(struct mt_config_handle
*hdl
)
7552 timestruc_t end_time
;
7555 mutex_enter(&hdl
->mtc_lock
);
7556 while (hdl
->mtc_thr_count
> 0)
7557 cv_wait(&hdl
->mtc_cv
, &hdl
->mtc_lock
);
7558 rv
= hdl
->mtc_error
;
7559 mutex_exit(&hdl
->mtc_lock
);
7562 gethrestime(&end_time
);
7563 real_time
= time_diff_in_msec(hdl
->start_time
, end_time
);
7564 if ((ddidebug
& DDI_MTCONFIG
) && hdl
->mtc_pdip
)
7566 "config %s%d: total time %d msec, real time %d msec",
7567 ddi_driver_name(hdl
->mtc_pdip
),
7568 ddi_get_instance(hdl
->mtc_pdip
),
7569 hdl
->total_time
, real_time
);
7572 cv_destroy(&hdl
->mtc_cv
);
7573 mutex_destroy(&hdl
->mtc_lock
);
7574 kmem_free(hdl
, sizeof (*hdl
));
7579 struct mt_config_data
{
7580 struct mt_config_handle
*mtc_hdl
;
7581 dev_info_t
*mtc_dip
;
7584 struct brevq_node
*mtc_brn
;
7585 struct mt_config_data
*mtc_next
;
7589 mt_config_thread(void *arg
)
7591 struct mt_config_data
*mcd
= (struct mt_config_data
*)arg
;
7592 struct mt_config_handle
*hdl
= mcd
->mtc_hdl
;
7593 dev_info_t
*dip
= mcd
->mtc_dip
;
7594 dev_info_t
*rdip
, **dipp
;
7595 major_t major
= mcd
->mtc_major
;
7596 int flags
= mcd
->mtc_flags
;
7600 timestruc_t start_time
, end_time
;
7601 gethrestime(&start_time
);
7605 dipp
= hdl
->mtc_fdip
? &rdip
: NULL
;
7607 switch (hdl
->mtc_op
) {
7609 rv
= devi_config_common(dip
, flags
, major
);
7611 case MT_UNCONFIG_OP
:
7613 struct brevq_node
*brevq
= NULL
;
7614 rv
= devi_unconfig_common(dip
, dipp
, flags
, major
,
7616 mcd
->mtc_brn
->brn_child
= brevq
;
7618 rv
= devi_unconfig_common(dip
, dipp
, flags
, major
,
7623 mutex_enter(&hdl
->mtc_lock
);
7625 gethrestime(&end_time
);
7626 hdl
->total_time
+= time_diff_in_msec(start_time
, end_time
);
7629 if ((rv
!= NDI_SUCCESS
) && (hdl
->mtc_error
== 0)) {
7630 hdl
->mtc_error
= rv
;
7632 if ((ddidebug
& DDI_DEBUG
) && (major
!= DDI_MAJOR_T_NONE
)) {
7633 char *path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
7635 (void) ddi_pathname(dip
, path
);
7636 cmn_err(CE_NOTE
, "mt_config_thread: "
7637 "op %d.%d.%x at %s failed %d",
7638 hdl
->mtc_op
, major
, flags
, path
, rv
);
7639 kmem_free(path
, MAXPATHLEN
);
7644 if (hdl
->mtc_fdip
&& *hdl
->mtc_fdip
== NULL
) {
7645 *hdl
->mtc_fdip
= rdip
;
7650 ASSERT(rv
!= NDI_SUCCESS
);
7651 ndi_rele_devi(rdip
);
7656 if (--hdl
->mtc_thr_count
== 0)
7657 cv_broadcast(&hdl
->mtc_cv
);
7658 mutex_exit(&hdl
->mtc_lock
);
7659 kmem_free(mcd
, sizeof (*mcd
));
7663 * Multi-threaded config/unconfig of child nexus
7666 mt_config_children(struct mt_config_handle
*hdl
)
7668 dev_info_t
*pdip
= hdl
->mtc_pdip
;
7669 major_t major
= hdl
->mtc_major
;
7672 struct brevq_node
*brn
;
7673 struct mt_config_data
*mcd_head
= NULL
;
7674 struct mt_config_data
*mcd_tail
= NULL
;
7675 struct mt_config_data
*mcd
;
7677 timestruc_t end_time
;
7679 /* Update total_time in handle */
7680 gethrestime(&end_time
);
7681 hdl
->total_time
+= time_diff_in_msec(hdl
->start_time
, end_time
);
7684 ndi_devi_enter(pdip
, &circ
);
7685 dip
= ddi_get_child(pdip
);
7687 if (hdl
->mtc_op
== MT_UNCONFIG_OP
&& hdl
->mtc_brevqp
&&
7688 !(DEVI_EVREMOVE(dip
)) &&
7689 i_ddi_node_state(dip
) >= DS_INITIALIZED
) {
7691 * Enqueue this dip's deviname.
7692 * No need to hold a lock while enqueuing since this
7693 * is the only thread doing the enqueue and no one
7694 * walks the queue while we are in multithreaded
7697 brn
= brevq_enqueue(hdl
->mtc_brevqp
, dip
, NULL
);
7702 * Hold the child that we are processing so it does not get
7703 * removed. The corrisponding ndi_rele_devi() for children
7704 * that are not being skipped is done at the end of
7705 * mt_config_thread().
7710 * skip leaf nodes and (for configure) nodes not
7713 if (is_leaf_node(dip
) ||
7714 (hdl
->mtc_op
== MT_CONFIG_OP
&&
7715 i_ddi_node_state(dip
) < DS_READY
)) {
7717 dip
= ddi_get_next_sibling(dip
);
7721 mcd
= kmem_alloc(sizeof (*mcd
), KM_SLEEP
);
7727 * Switch a 'driver' operation to an 'all' operation below a
7728 * node bound to the driver.
7730 if ((major
== DDI_MAJOR_T_NONE
) ||
7731 (major
== ddi_driver_major(dip
)))
7732 mcd
->mtc_major
= DDI_MAJOR_T_NONE
;
7734 mcd
->mtc_major
= major
;
7737 * The unconfig-driver to unconfig-all conversion above
7738 * constitutes an autodetach for NDI_DETACH_DRIVER calls,
7739 * set NDI_AUTODETACH.
7741 mcd
->mtc_flags
= hdl
->mtc_flags
;
7742 if ((mcd
->mtc_flags
& NDI_DETACH_DRIVER
) &&
7743 (hdl
->mtc_op
== MT_UNCONFIG_OP
) &&
7744 (major
== ddi_driver_major(pdip
)))
7745 mcd
->mtc_flags
|= NDI_AUTODETACH
;
7747 mutex_enter(&hdl
->mtc_lock
);
7748 hdl
->mtc_thr_count
++;
7749 mutex_exit(&hdl
->mtc_lock
);
7752 * Add to end of list to process after ndi_devi_exit to avoid
7753 * locking differences depending on value of mtc_off.
7755 mcd
->mtc_next
= NULL
;
7756 if (mcd_head
== NULL
)
7759 mcd_tail
->mtc_next
= mcd
;
7762 dip
= ddi_get_next_sibling(dip
);
7764 ndi_devi_exit(pdip
, circ
);
7766 /* go through the list of held children */
7767 for (mcd
= mcd_head
; mcd
; mcd
= mcd_head
) {
7768 mcd_head
= mcd
->mtc_next
;
7769 if (mtc_off
|| (mcd
->mtc_flags
& NDI_MTC_OFF
))
7770 mt_config_thread(mcd
);
7772 (void) thread_create(NULL
, 0, mt_config_thread
, mcd
,
7773 0, &p0
, TS_RUN
, minclsyspri
);
7778 mt_config_driver(struct mt_config_handle
*hdl
)
7780 major_t par_major
= hdl
->mtc_parmajor
;
7781 major_t major
= hdl
->mtc_major
;
7782 struct devnames
*dnp
= &devnamesp
[par_major
];
7784 struct mt_config_data
*mcd_head
= NULL
;
7785 struct mt_config_data
*mcd_tail
= NULL
;
7786 struct mt_config_data
*mcd
;
7788 timestruc_t end_time
;
7790 /* Update total_time in handle */
7791 gethrestime(&end_time
);
7792 hdl
->total_time
+= time_diff_in_msec(hdl
->start_time
, end_time
);
7794 ASSERT(par_major
!= DDI_MAJOR_T_NONE
);
7795 ASSERT(major
!= DDI_MAJOR_T_NONE
);
7797 LOCK_DEV_OPS(&dnp
->dn_lock
);
7798 dip
= devnamesp
[par_major
].dn_head
;
7801 * Hold the child that we are processing so it does not get
7802 * removed. The corrisponding ndi_rele_devi() for children
7803 * that are not being skipped is done at the end of
7804 * mt_config_thread().
7808 /* skip leaf nodes and nodes not fully attached */
7809 if (!i_ddi_devi_attached(dip
) || is_leaf_node(dip
)) {
7811 dip
= ddi_get_next(dip
);
7815 mcd
= kmem_alloc(sizeof (*mcd
), KM_SLEEP
);
7818 mcd
->mtc_major
= major
;
7819 mcd
->mtc_flags
= hdl
->mtc_flags
;
7821 mutex_enter(&hdl
->mtc_lock
);
7822 hdl
->mtc_thr_count
++;
7823 mutex_exit(&hdl
->mtc_lock
);
7826 * Add to end of list to process after UNLOCK_DEV_OPS to avoid
7827 * locking differences depending on value of mtc_off.
7829 mcd
->mtc_next
= NULL
;
7830 if (mcd_head
== NULL
)
7833 mcd_tail
->mtc_next
= mcd
;
7836 dip
= ddi_get_next(dip
);
7838 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
7840 /* go through the list of held children */
7841 for (mcd
= mcd_head
; mcd
; mcd
= mcd_head
) {
7842 mcd_head
= mcd
->mtc_next
;
7843 if (mtc_off
|| (mcd
->mtc_flags
& NDI_MTC_OFF
))
7844 mt_config_thread(mcd
);
7846 (void) thread_create(NULL
, 0, mt_config_thread
, mcd
,
7847 0, &p0
, TS_RUN
, minclsyspri
);
7852 * Given the nodeid for a persistent (PROM or SID) node, return
7853 * the corresponding devinfo node
7854 * NOTE: This function will return NULL for .conf nodeids.
7857 e_ddi_nodeid_to_dip(pnode_t nodeid
)
7859 dev_info_t
*dip
= NULL
;
7860 struct devi_nodeid
*prev
, *elem
;
7862 mutex_enter(&devimap
->dno_lock
);
7865 for (elem
= devimap
->dno_head
; elem
; elem
= elem
->next
) {
7866 if (elem
->nodeid
== nodeid
) {
7867 ndi_hold_devi(elem
->dip
);
7875 * Move to head for faster lookup next time
7878 prev
->next
= elem
->next
;
7879 elem
->next
= devimap
->dno_head
;
7880 devimap
->dno_head
= elem
;
7883 mutex_exit(&devimap
->dno_lock
);
7888 free_cache_task(void *arg
)
7890 ASSERT(arg
== NULL
);
7892 mutex_enter(&di_cache
.cache_lock
);
7895 * The cache can be invalidated without holding the lock
7896 * but it can be made valid again only while the lock is held.
7897 * So if the cache is invalid when the lock is held, it will
7898 * stay invalid until lock is released.
7900 if (!di_cache
.cache_valid
)
7901 i_ddi_di_cache_free(&di_cache
);
7903 mutex_exit(&di_cache
.cache_lock
);
7906 cmn_err(CE_NOTE
, "system_taskq: di_cache freed");
7909 extern int modrootloaded
;
7912 i_ddi_di_cache_free(struct di_cache
*cache
)
7915 extern int sys_shutdown
;
7917 ASSERT(mutex_owned(&cache
->cache_lock
));
7919 if (cache
->cache_size
) {
7920 ASSERT(cache
->cache_size
> 0);
7921 ASSERT(cache
->cache_data
);
7923 kmem_free(cache
->cache_data
, cache
->cache_size
);
7924 cache
->cache_data
= NULL
;
7925 cache
->cache_size
= 0;
7928 cmn_err(CE_NOTE
, "i_ddi_di_cache_free: freed cachemem");
7930 ASSERT(cache
->cache_data
== NULL
);
7932 cmn_err(CE_NOTE
, "i_ddi_di_cache_free: NULL cache");
7935 if (!modrootloaded
|| rootvp
== NULL
||
7936 vn_is_readonly(rootvp
) || sys_shutdown
) {
7937 if (di_cache_debug
) {
7938 cmn_err(CE_WARN
, "/ not mounted/RDONLY. Skip unlink");
7943 error
= vn_remove(DI_CACHE_FILE
, UIO_SYSSPACE
, RMFILE
);
7944 if (di_cache_debug
&& error
&& error
!= ENOENT
) {
7945 cmn_err(CE_WARN
, "%s: unlink failed: %d", DI_CACHE_FILE
, error
);
7946 } else if (di_cache_debug
&& !error
) {
7947 cmn_err(CE_NOTE
, "i_ddi_di_cache_free: unlinked cache file");
7952 i_ddi_di_cache_invalidate()
7956 if (!modrootloaded
|| !i_ddi_io_initialized()) {
7958 cmn_err(CE_NOTE
, "I/O not inited. Skipping invalidate");
7962 /* Increment devtree generation number. */
7963 atomic_inc_ulong(&devtree_gen
);
7965 /* Invalidate the in-core cache and dispatch free on valid->invalid */
7966 cache_valid
= atomic_swap_uint(&di_cache
.cache_valid
, 0);
7969 * This is an optimization to start cleaning up a cached
7970 * snapshot early. For this reason, it is OK for
7971 * taskq_dispatach to fail (and it is OK to not track calling
7972 * context relative to sleep, and assume NOSLEEP).
7974 (void) taskq_dispatch(system_taskq
, free_cache_task
, NULL
,
7978 if (di_cache_debug
) {
7979 cmn_err(CE_NOTE
, "invalidation");
7985 i_bind_vhci_node(dev_info_t
*dip
)
7987 DEVI(dip
)->devi_major
= ddi_name_to_major(ddi_node_name(dip
));
7988 i_ddi_set_node_state(dip
, DS_BOUND
);
7991 static char vhci_node_addr
[2];
7994 i_init_vhci_node(dev_info_t
*dip
)
7996 add_global_props(dip
);
7997 DEVI(dip
)->devi_ops
= ndi_hold_driver(dip
);
7998 if (DEVI(dip
)->devi_ops
== NULL
)
8001 DEVI(dip
)->devi_instance
= e_ddi_assign_instance(dip
);
8002 e_ddi_keep_instance(dip
);
8003 vhci_node_addr
[0] = '\0';
8004 ddi_set_name_addr(dip
, vhci_node_addr
);
8005 i_ddi_set_node_state(dip
, DS_INITIALIZED
);
8010 i_link_vhci_node(dev_info_t
*dip
)
8012 ASSERT(MUTEX_HELD(&global_vhci_lock
));
8015 * scsi_vhci should be kept left most of the device tree.
8017 if (scsi_vhci_dip
) {
8018 DEVI(dip
)->devi_sibling
= DEVI(scsi_vhci_dip
)->devi_sibling
;
8019 DEVI(scsi_vhci_dip
)->devi_sibling
= DEVI(dip
);
8021 DEVI(dip
)->devi_sibling
= DEVI(top_devinfo
)->devi_child
;
8022 DEVI(top_devinfo
)->devi_child
= DEVI(dip
);
8028 * This a special routine to enumerate vhci node (child of rootnex
8029 * node) without holding the ndi_devi_enter() lock. The device node
8030 * is allocated, initialized and brought into DS_READY state before
8031 * inserting into the device tree. The VHCI node is handcrafted
8032 * here to bring the node to DS_READY, similar to rootnex node.
8034 * The global_vhci_lock protects linking the node into the device
8035 * as same lock is held before linking/unlinking any direct child
8036 * of rootnex children.
8038 * This routine is a workaround to handle a possible deadlock
8039 * that occurs while trying to enumerate node in a different sub-tree
8040 * during _init/_attach entry points.
8044 ndi_devi_config_vhci(char *drvname
, int flags
)
8046 struct devnames
*dnp
;
8048 major_t major
= ddi_name_to_major(drvname
);
8053 /* Make sure we create the VHCI node only once */
8054 dnp
= &devnamesp
[major
];
8055 LOCK_DEV_OPS(&dnp
->dn_lock
);
8058 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
8061 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
8063 /* Allocate the VHCI node */
8064 ndi_devi_alloc_sleep(top_devinfo
, drvname
, DEVI_SID_NODEID
, &dip
);
8067 /* Mark the node as VHCI */
8068 DEVI(dip
)->devi_node_attributes
|= DDI_VHCI_NODE
;
8070 i_ddi_add_devimap(dip
);
8071 i_bind_vhci_node(dip
);
8072 if (i_init_vhci_node(dip
) == -1) {
8074 (void) ndi_devi_free(dip
);
8078 mutex_enter(&(DEVI(dip
)->devi_lock
));
8079 DEVI_SET_ATTACHING(dip
);
8080 mutex_exit(&(DEVI(dip
)->devi_lock
));
8082 if (devi_attach(dip
, DDI_ATTACH
) != DDI_SUCCESS
) {
8083 cmn_err(CE_CONT
, "Could not attach %s driver", drvname
);
8084 e_ddi_free_instance(dip
, vhci_node_addr
);
8086 (void) ndi_devi_free(dip
);
8089 mutex_enter(&(DEVI(dip
)->devi_lock
));
8090 DEVI_CLR_ATTACHING(dip
);
8091 mutex_exit(&(DEVI(dip
)->devi_lock
));
8093 mutex_enter(&global_vhci_lock
);
8094 i_link_vhci_node(dip
);
8095 mutex_exit(&global_vhci_lock
);
8096 i_ddi_set_node_state(dip
, DS_READY
);
8098 LOCK_DEV_OPS(&dnp
->dn_lock
);
8099 dnp
->dn_flags
|= DN_DRIVER_HELD
;
8101 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
8103 i_ndi_devi_report_status_change(dip
, NULL
);
8109 * Maintain DEVI_DEVICE_REMOVED hotplug devi_state for remove/reinsert hotplug
8110 * of open devices. Currently, because of tight coupling between the devfs file
8111 * system and the Solaris device tree, a driver can't always make the device
8112 * tree state (esp devi_node_state) match device hardware hotplug state. Until
8113 * resolved, to overcome this deficiency we use the following interfaces that
8114 * maintain the DEVI_DEVICE_REMOVED devi_state status bit. These interface
8115 * report current state, and drive operation (like events and cache
8116 * invalidation) when a driver changes remove/insert state of an open device.
8118 * The ndi_devi_device_isremoved() returns 1 if the device is currently removed.
8120 * The ndi_devi_device_remove() interface declares the device as removed, and
8121 * returns 1 if there was a state change associated with this declaration.
8123 * The ndi_devi_device_insert() declares the device as inserted, and returns 1
8124 * if there was a state change associated with this declaration.
8127 ndi_devi_device_isremoved(dev_info_t
*dip
)
8129 return (DEVI_IS_DEVICE_REMOVED(dip
));
8133 ndi_devi_device_remove(dev_info_t
*dip
)
8135 ASSERT(dip
&& ddi_get_parent(dip
) &&
8136 DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
8138 /* Return if already marked removed. */
8139 if (ndi_devi_device_isremoved(dip
))
8142 /* Mark the device as having been physically removed. */
8143 mutex_enter(&(DEVI(dip
)->devi_lock
));
8144 ndi_devi_set_hidden(dip
); /* invisible: lookup/snapshot */
8145 DEVI_SET_DEVICE_REMOVED(dip
);
8146 DEVI_SET_EVREMOVE(dip
); /* this clears EVADD too */
8147 mutex_exit(&(DEVI(dip
)->devi_lock
));
8149 /* report remove (as 'removed') */
8150 i_ndi_devi_report_status_change(dip
, NULL
);
8153 * Invalidate the cache to ensure accurate
8154 * (di_state() & DI_DEVICE_REMOVED).
8156 i_ddi_di_cache_invalidate();
8159 * Generate sysevent for those interested in removal (either
8160 * directly via private EC_DEVFS or indirectly via devfsadmd
8161 * generated EC_DEV). This will generate LDI DEVICE_REMOVE
8164 i_ddi_log_devfs_device_remove(dip
);
8166 return (1); /* DEVICE_REMOVED state changed */
8170 ndi_devi_device_insert(dev_info_t
*dip
)
8172 ASSERT(dip
&& ddi_get_parent(dip
) &&
8173 DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
8175 /* Return if not marked removed. */
8176 if (!ndi_devi_device_isremoved(dip
))
8179 /* Mark the device as having been physically reinserted. */
8180 mutex_enter(&(DEVI(dip
)->devi_lock
));
8181 ndi_devi_clr_hidden(dip
); /* visible: lookup/snapshot */
8182 DEVI_SET_DEVICE_REINSERTED(dip
);
8183 DEVI_SET_EVADD(dip
); /* this clears EVREMOVE too */
8184 mutex_exit(&(DEVI(dip
)->devi_lock
));
8186 /* report insert (as 'online') */
8187 i_ndi_devi_report_status_change(dip
, NULL
);
8190 * Invalidate the cache to ensure accurate
8191 * (di_state() & DI_DEVICE_REMOVED).
8193 i_ddi_di_cache_invalidate();
8196 * Generate sysevent for those interested in removal (either directly
8197 * via EC_DEVFS or indirectly via devfsadmd generated EC_DEV).
8199 i_ddi_log_devfs_device_insert(dip
);
8201 return (1); /* DEVICE_REMOVED state changed */
8205 * ibt_hw_is_present() returns 0 when there is no IB hardware actively
8206 * running. This is primarily useful for modules like rpcmod which
8207 * needs a quick check to decide whether or not it should try to use
8210 int ib_hw_status
= 0;
8214 return (ib_hw_status
);
8218 * ASSERT that constraint flag is not set and then set the "retire attempt"
8222 e_ddi_mark_retiring(dev_info_t
*dip
, void *arg
)
8224 char **cons_array
= (char **)arg
;
8231 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
8232 (void) ddi_pathname(dip
, path
);
8233 for (i
= 0; cons_array
[i
] != NULL
; i
++) {
8234 if (strcmp(path
, cons_array
[i
]) == 0) {
8239 kmem_free(path
, MAXPATHLEN
);
8242 mutex_enter(&DEVI(dip
)->devi_lock
);
8243 ASSERT(!(DEVI(dip
)->devi_flags
& DEVI_R_CONSTRAINT
));
8244 DEVI(dip
)->devi_flags
|= DEVI_RETIRING
;
8246 DEVI(dip
)->devi_flags
|= DEVI_R_CONSTRAINT
;
8247 mutex_exit(&DEVI(dip
)->devi_lock
);
8249 RIO_VERBOSE((CE_NOTE
, "marked dip as undergoing retire process dip=%p",
8253 RIO_DEBUG((CE_NOTE
, "marked dip as constrained, dip=%p",
8257 mdi_phci_mark_retiring(dip
, cons_array
);
8259 return (DDI_WALK_CONTINUE
);
8263 free_array(char **cons_array
)
8267 if (cons_array
== NULL
)
8270 for (i
= 0; cons_array
[i
] != NULL
; i
++) {
8271 kmem_free(cons_array
[i
], strlen(cons_array
[i
]) + 1);
8273 kmem_free(cons_array
, (i
+1) * sizeof (char *));
8277 * Walk *every* node in subtree and check if it blocks, allows or has no
8278 * comment on a proposed retire.
8281 e_ddi_retire_notify(dev_info_t
*dip
, void *arg
)
8283 int *constraint
= (int *)arg
;
8285 RIO_DEBUG((CE_NOTE
, "retire notify: dip = %p", (void *)dip
));
8287 (void) e_ddi_offline_notify(dip
);
8289 mutex_enter(&(DEVI(dip
)->devi_lock
));
8290 if (!(DEVI(dip
)->devi_flags
& DEVI_RETIRING
)) {
8291 RIO_DEBUG((CE_WARN
, "retire notify: dip in retire "
8292 "subtree is not marked: dip = %p", (void *)dip
));
8294 } else if (DEVI(dip
)->devi_flags
& DEVI_R_BLOCKED
) {
8295 ASSERT(!(DEVI(dip
)->devi_flags
& DEVI_R_CONSTRAINT
));
8296 RIO_DEBUG((CE_NOTE
, "retire notify: BLOCKED: dip = %p",
8299 } else if (!(DEVI(dip
)->devi_flags
& DEVI_R_CONSTRAINT
)) {
8300 RIO_DEBUG((CE_NOTE
, "retire notify: NO CONSTRAINT: "
8301 "dip = %p", (void *)dip
));
8304 RIO_DEBUG((CE_NOTE
, "retire notify: CONSTRAINT set: "
8305 "dip = %p", (void *)dip
));
8307 mutex_exit(&DEVI(dip
)->devi_lock
);
8310 mdi_phci_retire_notify(dip
, constraint
);
8312 return (DDI_WALK_CONTINUE
);
8316 e_ddi_retire_finalize(dev_info_t
*dip
, void *arg
)
8318 int constraint
= *(int *)arg
;
8322 mutex_enter(&DEVI(dip
)->devi_lock
);
8323 if (!(DEVI(dip
)->devi_flags
& DEVI_RETIRING
)) {
8325 "retire: unmarked dip(%p) in retire subtree",
8327 ASSERT(!(DEVI(dip
)->devi_flags
& DEVI_RETIRED
));
8328 ASSERT(!(DEVI(dip
)->devi_flags
& DEVI_R_CONSTRAINT
));
8329 ASSERT(!(DEVI(dip
)->devi_flags
& DEVI_R_BLOCKED
));
8330 mutex_exit(&DEVI(dip
)->devi_lock
);
8331 return (DDI_WALK_CONTINUE
);
8335 * retire the device if constraints have been applied
8336 * or if the device is not in use
8340 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip
)));
8342 ASSERT(DEVI(dip
)->devi_flags
& DEVI_R_CONSTRAINT
);
8343 ASSERT(!(DEVI(dip
)->devi_flags
& DEVI_R_BLOCKED
));
8344 DEVI(dip
)->devi_flags
&= ~DEVI_R_CONSTRAINT
;
8345 DEVI(dip
)->devi_flags
&= ~DEVI_RETIRING
;
8346 DEVI(dip
)->devi_flags
|= DEVI_RETIRED
;
8347 mutex_exit(&DEVI(dip
)->devi_lock
);
8348 (void) spec_fence_snode(dip
, NULL
);
8349 RIO_DEBUG((CE_NOTE
, "Fenced off: dip = %p", (void *)dip
));
8350 e_ddi_offline_finalize(dip
, DDI_SUCCESS
);
8352 if (DEVI(dip
)->devi_flags
& DEVI_R_BLOCKED
) {
8353 ASSERT(!(DEVI(dip
)->devi_flags
& DEVI_R_CONSTRAINT
));
8354 DEVI(dip
)->devi_flags
&= ~DEVI_R_BLOCKED
;
8355 DEVI(dip
)->devi_flags
&= ~DEVI_RETIRING
;
8356 /* we have already finalized during notify */
8357 } else if (DEVI(dip
)->devi_flags
& DEVI_R_CONSTRAINT
) {
8358 DEVI(dip
)->devi_flags
&= ~DEVI_R_CONSTRAINT
;
8359 DEVI(dip
)->devi_flags
&= ~DEVI_RETIRING
;
8362 DEVI(dip
)->devi_flags
&= ~DEVI_RETIRING
;
8364 * even if no contracts, need to call finalize
8365 * to clear the contract barrier on the dip
8369 mutex_exit(&DEVI(dip
)->devi_lock
);
8370 RIO_DEBUG((CE_NOTE
, "finalize: NOT retired: dip = %p",
8373 e_ddi_offline_finalize(dip
, DDI_FAILURE
);
8377 * phci_only variable indicates no client checking, just
8378 * offline the PHCI. We set that to 0 to enable client
8383 mdi_phci_retire_finalize(dip
, phci_only
, arg
);
8385 return (DDI_WALK_CONTINUE
);
8390 * DDI_SUCCESS if constraints allow retire
8391 * DDI_FAILURE if constraints don't allow retire.
8392 * cons_array is a NULL terminated array of node paths for
8393 * which constraints have already been applied.
8396 e_ddi_retire_device(char *path
, char **cons_array
)
8406 * First, lookup the device
8408 dip
= e_ddi_hold_devi_by_path(path
, 0);
8411 * device does not exist. This device cannot be
8412 * a critical device since it is not in use. Thus
8413 * this device is always retireable. Return DDI_SUCCESS
8414 * to indicate this. If this device is ever
8415 * instantiated, I/O framework will consult the
8416 * the persistent retire store, mark it as
8417 * retired and fence it off.
8419 RIO_DEBUG((CE_NOTE
, "Retire device: device doesn't exist."
8420 " NOP. Just returning SUCCESS. path=%s", path
));
8421 free_array(cons_array
);
8422 return (DDI_SUCCESS
);
8425 RIO_DEBUG((CE_NOTE
, "Retire device: found dip = %p.", (void *)dip
));
8427 pdip
= ddi_get_parent(dip
);
8428 ndi_hold_devi(pdip
);
8431 * Run devfs_clean() in case dip has no constraints and is
8432 * not in use, so is retireable but there are dv_nodes holding
8433 * ref-count on the dip. Note that devfs_clean() always returns
8436 devnm
= kmem_alloc(MAXNAMELEN
+ 1, KM_SLEEP
);
8437 (void) ddi_deviname(dip
, devnm
);
8438 (void) devfs_clean(pdip
, devnm
+ 1, DV_CLEAN_FORCE
);
8439 kmem_free(devnm
, MAXNAMELEN
+ 1);
8441 ndi_devi_enter(pdip
, &circ
);
8443 /* release hold from e_ddi_hold_devi_by_path */
8447 * If it cannot make a determination, is_leaf_node() assumes
8450 (void) e_ddi_mark_retiring(dip
, cons_array
);
8451 if (!is_leaf_node(dip
)) {
8452 ndi_devi_enter(dip
, &circ2
);
8453 ddi_walk_devs(ddi_get_child(dip
), e_ddi_mark_retiring
,
8455 ndi_devi_exit(dip
, circ2
);
8457 free_array(cons_array
);
8462 RIO_DEBUG((CE_NOTE
, "retire: subtree retire notify: path = %s", path
));
8464 constraint
= 1; /* assume constraints allow retire */
8465 (void) e_ddi_retire_notify(dip
, &constraint
);
8466 if (!is_leaf_node(dip
)) {
8467 ndi_devi_enter(dip
, &circ2
);
8468 ddi_walk_devs(ddi_get_child(dip
), e_ddi_retire_notify
,
8470 ndi_devi_exit(dip
, circ2
);
8474 * Now finalize the retire
8476 (void) e_ddi_retire_finalize(dip
, &constraint
);
8477 if (!is_leaf_node(dip
)) {
8478 ndi_devi_enter(dip
, &circ2
);
8479 ddi_walk_devs(ddi_get_child(dip
), e_ddi_retire_finalize
,
8481 ndi_devi_exit(dip
, circ2
);
8485 RIO_DEBUG((CE_WARN
, "retire failed: path = %s", path
));
8487 RIO_DEBUG((CE_NOTE
, "retire succeeded: path = %s", path
));
8490 ndi_devi_exit(pdip
, circ
);
8491 ndi_rele_devi(pdip
);
8492 return (constraint
? DDI_SUCCESS
: DDI_FAILURE
);
8496 unmark_and_unfence(dev_info_t
*dip
, void *arg
)
8498 char *path
= (char *)arg
;
8502 (void) ddi_pathname(dip
, path
);
8504 mutex_enter(&DEVI(dip
)->devi_lock
);
8505 DEVI(dip
)->devi_flags
&= ~DEVI_RETIRED
;
8506 DEVI_SET_DEVICE_ONLINE(dip
);
8507 mutex_exit(&DEVI(dip
)->devi_lock
);
8509 RIO_VERBOSE((CE_NOTE
, "Cleared RETIRED flag: dip=%p, path=%s",
8510 (void *)dip
, path
));
8512 (void) spec_unfence_snode(dip
);
8513 RIO_DEBUG((CE_NOTE
, "Unfenced device: %s", path
));
8516 mdi_phci_unretire(dip
);
8518 return (DDI_WALK_CONTINUE
);
8528 find_dip_fcn(dev_info_t
*dip
, void *arg
)
8530 struct find_dip
*findp
= (struct find_dip
*)arg
;
8532 (void) ddi_pathname(dip
, findp
->fd_buf
);
8534 if (strcmp(findp
->fd_path
, findp
->fd_buf
) != 0)
8535 return (DDI_WALK_CONTINUE
);
8538 findp
->fd_dip
= dip
;
8540 return (DDI_WALK_TERMINATE
);
8544 e_ddi_unretire_device(char *path
)
8551 struct find_dip find_dip
;
8554 ASSERT(*path
== '/');
8556 if (strcmp(path
, "/") == 0) {
8557 cmn_err(CE_WARN
, "Root node cannot be retired. Skipping "
8558 "device unretire: %s", path
);
8563 * We can't lookup the dip (corresponding to path) via
8564 * e_ddi_hold_devi_by_path() because the dip may be offline
8565 * and may not attach. Use ddi_walk_devs() instead;
8567 find_dip
.fd_buf
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
8568 find_dip
.fd_path
= path
;
8569 find_dip
.fd_dip
= NULL
;
8571 pdip
= ddi_root_node();
8573 ndi_devi_enter(pdip
, &circ
);
8574 ddi_walk_devs(ddi_get_child(pdip
), find_dip_fcn
, &find_dip
);
8575 ndi_devi_exit(pdip
, circ
);
8577 kmem_free(find_dip
.fd_buf
, MAXPATHLEN
);
8579 if (find_dip
.fd_dip
== NULL
) {
8580 cmn_err(CE_WARN
, "Device not found in device tree. Skipping "
8581 "device unretire: %s", path
);
8585 dip
= find_dip
.fd_dip
;
8587 pdip
= ddi_get_parent(dip
);
8589 ndi_hold_devi(pdip
);
8591 ndi_devi_enter(pdip
, &circ
);
8593 path2
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
8595 (void) unmark_and_unfence(dip
, path2
);
8596 if (!is_leaf_node(dip
)) {
8597 ndi_devi_enter(dip
, &circ2
);
8598 ddi_walk_devs(ddi_get_child(dip
), unmark_and_unfence
, path2
);
8599 ndi_devi_exit(dip
, circ2
);
8602 kmem_free(path2
, MAXPATHLEN
);
8604 /* release hold from find_dip_fcn() */
8607 ndi_devi_exit(pdip
, circ
);
8609 ndi_rele_devi(pdip
);
8615 * Called before attach on a dip that has been retired.
8618 mark_and_fence(dev_info_t
*dip
, void *arg
)
8620 char *fencepath
= (char *)arg
;
8623 * We have already decided to retire this device. The various
8624 * constraint checking should not be set.
8625 * NOTE that the retire flag may already be set due to
8626 * fenced -> detach -> fenced transitions.
8628 mutex_enter(&DEVI(dip
)->devi_lock
);
8629 ASSERT(!(DEVI(dip
)->devi_flags
& DEVI_R_CONSTRAINT
));
8630 ASSERT(!(DEVI(dip
)->devi_flags
& DEVI_R_BLOCKED
));
8631 ASSERT(!(DEVI(dip
)->devi_flags
& DEVI_RETIRING
));
8632 DEVI(dip
)->devi_flags
|= DEVI_RETIRED
;
8633 mutex_exit(&DEVI(dip
)->devi_lock
);
8634 RIO_VERBOSE((CE_NOTE
, "marked as RETIRED dip=%p", (void *)dip
));
8637 (void) spec_fence_snode(dip
, NULL
);
8638 RIO_DEBUG((CE_NOTE
, "Fenced: %s",
8639 ddi_pathname(dip
, fencepath
)));
8642 return (DDI_WALK_CONTINUE
);
8646 * Checks the retire database and:
8648 * - if device is present in the retire database, marks the device retired
8649 * and fences it off.
8650 * - if device is not in retire database, allows the device to attach normally
8652 * To be called only by framework attach code on first attach attempt.
8656 i_ddi_check_retire(dev_info_t
*dip
)
8664 pdip
= ddi_get_parent(dip
);
8667 * Root dip is treated special and doesn't take this code path.
8668 * Also root can never be retired.
8671 ASSERT(DEVI_BUSY_OWNED(pdip
));
8672 ASSERT(i_ddi_node_state(dip
) < DS_ATTACHED
);
8674 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
8676 (void) ddi_pathname(dip
, path
);
8678 RIO_VERBOSE((CE_NOTE
, "Checking if dip should attach: dip=%p, path=%s",
8679 (void *)dip
, path
));
8682 * Check if this device is in the "retired" store i.e. should
8683 * be retired. If not, we have nothing to do.
8685 if (e_ddi_device_retired(path
) == 0) {
8686 RIO_VERBOSE((CE_NOTE
, "device is NOT retired: path=%s", path
));
8687 if (DEVI(dip
)->devi_flags
& DEVI_RETIRED
)
8688 (void) e_ddi_unretire_device(path
);
8689 kmem_free(path
, MAXPATHLEN
);
8693 RIO_DEBUG((CE_NOTE
, "attach: device is retired: path=%s", path
));
8696 * Mark dips and fence off snodes (if any)
8698 RIO_DEBUG((CE_NOTE
, "attach: Mark and fence subtree: path=%s", path
));
8699 (void) mark_and_fence(dip
, path
);
8700 if (!is_leaf_node(dip
)) {
8701 ndi_devi_enter(dip
, &circ
);
8702 ddi_walk_devs(ddi_get_child(dip
), mark_and_fence
, path
);
8703 ndi_devi_exit(dip
, circ
);
8706 kmem_free(path
, MAXPATHLEN
);
8709 * We don't want to check the client. We just want to
8715 mdi_phci_retire_finalize(dip
, phci_only
, &constraint
);
8720 #define VAL_ALIAS(array, x) (strlen(array[x].pair_alias))
8721 #define VAL_CURR(array, x) (strlen(array[x].pair_curr))
8722 #define SWAP(array, x, y) \
8724 alias_pair_t tmpair = array[x]; \
8725 array[x] = array[y]; \
8726 array[y] = tmpair; \
8730 partition_curr(alias_pair_t
*array
, int start
, int end
)
8739 } while (VAL_CURR(array
, j
) > VAL_CURR(array
, pivot
));
8743 } while (VAL_CURR(array
, i
) < VAL_CURR(array
, pivot
));
8753 partition_aliases(alias_pair_t
*array
, int start
, int end
)
8762 } while (VAL_ALIAS(array
, j
) > VAL_ALIAS(array
, pivot
));
8766 } while (VAL_ALIAS(array
, i
) < VAL_ALIAS(array
, pivot
));
8775 sort_alias_pairs(alias_pair_t
*array
, int start
, int end
)
8780 mid
= partition_aliases(array
, start
, end
);
8781 sort_alias_pairs(array
, start
, mid
);
8782 sort_alias_pairs(array
, mid
+ 1, end
);
8787 sort_curr_pairs(alias_pair_t
*array
, int start
, int end
)
8792 mid
= partition_curr(array
, start
, end
);
8793 sort_curr_pairs(array
, start
, mid
);
8794 sort_curr_pairs(array
, mid
+ 1, end
);
8799 create_sorted_pairs(plat_alias_t
*pali
, int npali
)
8807 for (i
= 0; i
< npali
; i
++) {
8808 count
+= pali
[i
].pali_naliases
;
8811 ddi_aliases
.dali_alias_pairs
= kmem_zalloc(
8812 (sizeof (alias_pair_t
)) * count
, KM_NOSLEEP
);
8813 if (ddi_aliases
.dali_alias_pairs
== NULL
) {
8814 cmn_err(CE_PANIC
, "alias path-pair alloc failed");
8818 ddi_aliases
.dali_curr_pairs
= kmem_zalloc(
8819 (sizeof (alias_pair_t
)) * count
, KM_NOSLEEP
);
8820 if (ddi_aliases
.dali_curr_pairs
== NULL
) {
8821 cmn_err(CE_PANIC
, "curr path-pair alloc failed");
8825 for (i
= 0, k
= 0; i
< npali
; i
++) {
8826 for (j
= 0; j
< pali
[i
].pali_naliases
; j
++, k
++) {
8827 ddi_aliases
.dali_alias_pairs
[k
].pair_curr
=
8828 ddi_aliases
.dali_curr_pairs
[k
].pair_curr
=
8829 pali
[i
].pali_current
;
8830 ddi_aliases
.dali_alias_pairs
[k
].pair_alias
=
8831 ddi_aliases
.dali_curr_pairs
[k
].pair_alias
=
8832 pali
[i
].pali_aliases
[j
];
8838 ddi_aliases
.dali_num_pairs
= count
;
8840 /* Now sort the array based on length of pair_alias */
8841 sort_alias_pairs(ddi_aliases
.dali_alias_pairs
, 0, count
- 1);
8842 sort_curr_pairs(ddi_aliases
.dali_curr_pairs
, 0, count
- 1);
8846 ddi_register_aliases(plat_alias_t
*pali
, uint64_t npali
)
8849 ASSERT((pali
== NULL
) ^ (npali
!= 0));
8852 ddi_err(DER_PANIC
, NULL
, "npali == 0");
8856 if (ddi_aliases_present
== B_TRUE
) {
8857 ddi_err(DER_PANIC
, NULL
, "multiple init");
8861 ddi_aliases
.dali_alias_TLB
= mod_hash_create_strhash(
8862 "ddi-alias-tlb", DDI_ALIAS_HASH_SIZE
, mod_hash_null_valdtor
);
8863 if (ddi_aliases
.dali_alias_TLB
== NULL
) {
8864 ddi_err(DER_PANIC
, NULL
, "alias TLB hash alloc failed");
8868 ddi_aliases
.dali_curr_TLB
= mod_hash_create_strhash(
8869 "ddi-curr-tlb", DDI_ALIAS_HASH_SIZE
, mod_hash_null_valdtor
);
8870 if (ddi_aliases
.dali_curr_TLB
== NULL
) {
8871 ddi_err(DER_PANIC
, NULL
, "curr TLB hash alloc failed");
8875 create_sorted_pairs(pali
, npali
);
8877 tsd_create(&tsd_ddi_redirect
, NULL
);
8879 ddi_aliases_present
= B_TRUE
;
8883 path_to_dip(char *path
)
8885 dev_info_t
*currdip
;
8889 pdup
= ddi_strdup(path
, KM_NOSLEEP
);
8891 cmn_err(CE_PANIC
, "path strdup failed: %s", path
);
8895 error
= resolve_pathname(pdup
, &currdip
, NULL
, NULL
);
8897 kmem_free(pdup
, strlen(path
) + 1);
8899 return (error
? NULL
: currdip
);
8903 ddi_alias_to_currdip(char *alias
, int i
)
8907 dev_info_t
*currdip
= NULL
;
8911 pair
= &(ddi_aliases
.dali_alias_pairs
[i
]);
8912 len
= strlen(pair
->pair_alias
);
8915 aliasdup
= ddi_strdup(alias
, KM_NOSLEEP
);
8916 if (aliasdup
== NULL
) {
8917 cmn_err(CE_PANIC
, "aliasdup alloc failed");
8921 if (strncmp(alias
, pair
->pair_alias
, len
) != 0)
8924 if (alias
[len
] != '/' && alias
[len
] != '\0')
8927 curr
= kmem_alloc(MAXPATHLEN
, KM_NOSLEEP
);
8929 cmn_err(CE_PANIC
, "curr alloc failed");
8932 (void) strlcpy(curr
, pair
->pair_curr
, MAXPATHLEN
);
8933 if (alias
[len
] == '/') {
8934 (void) strlcat(curr
, "/", MAXPATHLEN
);
8935 (void) strlcat(curr
, &alias
[len
+ 1], MAXPATHLEN
);
8938 currdip
= path_to_dip(curr
);
8942 rv
= mod_hash_insert(ddi_aliases
.dali_alias_TLB
,
8943 (mod_hash_key_t
)aliasdup
, (mod_hash_val_t
)curr
);
8945 kmem_free(curr
, MAXPATHLEN
);
8949 rv
= mod_hash_insert(ddi_aliases
.dali_alias_TLB
,
8950 (mod_hash_key_t
)aliasdup
, (mod_hash_val_t
)NULL
);
8955 kmem_free(curr
, MAXPATHLEN
);
8962 ddi_curr_to_alias(char *curr
, int i
)
8970 pair
= &(ddi_aliases
.dali_curr_pairs
[i
]);
8972 len
= strlen(pair
->pair_curr
);
8976 currdup
= ddi_strdup(curr
, KM_NOSLEEP
);
8977 if (currdup
== NULL
) {
8978 cmn_err(CE_PANIC
, "currdup alloc failed");
8982 if (strncmp(curr
, pair
->pair_curr
, len
) != 0)
8985 if (curr
[len
] != '/' && curr
[len
] != '\0')
8988 alias
= kmem_alloc(MAXPATHLEN
, KM_NOSLEEP
);
8989 if (alias
== NULL
) {
8990 cmn_err(CE_PANIC
, "alias alloc failed");
8994 (void) strlcpy(alias
, pair
->pair_alias
, MAXPATHLEN
);
8995 if (curr
[len
] == '/') {
8996 (void) strlcat(alias
, "/", MAXPATHLEN
);
8997 (void) strlcat(alias
, &curr
[len
+ 1], MAXPATHLEN
);
9000 if (e_ddi_path_to_instance(alias
) == NULL
) {
9001 kmem_free(alias
, MAXPATHLEN
);
9006 rv
= mod_hash_insert(ddi_aliases
.dali_curr_TLB
,
9007 (mod_hash_key_t
)currdup
, (mod_hash_val_t
)alias
);
9016 ddi_alias_redirect(char *alias
)
9019 dev_info_t
*currdip
;
9022 if (ddi_aliases_present
== B_FALSE
)
9025 if (tsd_get(tsd_ddi_redirect
))
9028 (void) tsd_set(tsd_ddi_redirect
, (void *)1);
9030 ASSERT(ddi_aliases
.dali_alias_TLB
);
9031 ASSERT(ddi_aliases
.dali_alias_pairs
);
9034 if (mod_hash_find(ddi_aliases
.dali_alias_TLB
,
9035 (mod_hash_key_t
)alias
, (mod_hash_val_t
*)&curr
) == 0) {
9036 currdip
= curr
? path_to_dip(curr
) : NULL
;
9040 /* The TLB has no translation, do it the hard way */
9042 for (i
= ddi_aliases
.dali_num_pairs
- 1; i
>= 0; i
--) {
9043 currdip
= ddi_alias_to_currdip(alias
, i
);
9048 (void) tsd_set(tsd_ddi_redirect
, NULL
);
9054 ddi_curr_redirect(char *curr
)
9059 if (ddi_aliases_present
== B_FALSE
)
9062 if (tsd_get(tsd_ddi_redirect
))
9065 (void) tsd_set(tsd_ddi_redirect
, (void *)1);
9067 ASSERT(ddi_aliases
.dali_curr_TLB
);
9068 ASSERT(ddi_aliases
.dali_curr_pairs
);
9071 if (mod_hash_find(ddi_aliases
.dali_curr_TLB
,
9072 (mod_hash_key_t
)curr
, (mod_hash_val_t
*)&alias
) == 0) {
9077 /* The TLB has no translation, do it the slow way */
9079 for (i
= ddi_aliases
.dali_num_pairs
- 1; i
>= 0; i
--) {
9080 alias
= ddi_curr_to_alias(curr
, i
);
9086 (void) tsd_set(tsd_ddi_redirect
, NULL
);
9092 ddi_err(ddi_err_t ade
, dev_info_t
*rdip
, const char *fmt
, ...)
9097 size_t buflen
, tlen
;
9100 const char *fmtbad
= "Invalid arguments to ddi_err()";
9121 tlen
= strlen(strbuf
);
9122 buf
= strbuf
+ tlen
;
9123 buflen
= sizeof (strbuf
) - tlen
;
9125 if (rdip
&& ddi_get_instance(rdip
) == -1) {
9126 (void) snprintf(buf
, buflen
, "%s: ",
9127 ddi_driver_name(rdip
));
9129 (void) snprintf(buf
, buflen
, "%s%d: ",
9130 ddi_driver_name(rdip
), ddi_get_instance(rdip
));
9133 tlen
= strlen(strbuf
);
9134 buf
= strbuf
+ tlen
;
9135 buflen
= sizeof (strbuf
) - tlen
;
9140 (void) vsnprintf(buf
, buflen
, fmt
, ap
);
9141 if (ade
!= DER_CONT
) {
9142 (void) strlcat(strbuf
, "\n", sizeof (strbuf
));
9147 (void) vsnprintf(buf
, buflen
, fmt
, ap
);
9151 (void) vsnprintf(buf
, buflen
, fmt
, ap
);
9155 (void) vsnprintf(buf
, buflen
, fmt
, ap
);
9156 if (ddi_err_panic
== B_TRUE
) {
9163 (void) snprintf(buf
, buflen
, "DEBUG: ");
9164 tlen
= strlen("DEBUG: ");
9165 (void) vsnprintf(buf
+ tlen
, buflen
- tlen
, fmt
, ap
);
9169 (void) vsnprintf(buf
, buflen
, fmt
, ap
);
9174 (void) snprintf(buf
, buflen
, fmtbad
);
9175 tlen
= strlen(fmtbad
);
9176 (void) vsnprintf(buf
+ tlen
, buflen
- tlen
, fmt
, ap
);
9182 cmn_err(ce
, strbuf
);
9187 ddi_mem_update(uint64_t addr
, uint64_t size
)
9189 #if defined(__x86) && !defined(__xpv)
9190 extern void immu_physmem_update(uint64_t addr
, uint64_t size
);
9191 immu_physmem_update(addr
, size
);