4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
30 #include <sys/types.h>
31 #include <sys/sysmacros.h>
33 #include <sys/errno.h>
34 #include <sys/modctl.h>
39 #include <sys/cpuvar.h>
40 #include <sys/ddi_impldefs.h>
42 #include <sys/fm/protocol.h>
43 #include <sys/fm/util.h>
44 #include <sys/fm/io/ddi.h>
45 #include <sys/sysevent/eventdefs.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/debug.h>
50 #include <sys/bofi_impl.h>
53 * Testing the resilience of a hardened device driver requires a suitably wide
54 * range of different types of "typical" hardware faults to be injected,
55 * preferably in a controlled and repeatable fashion. This is not in general
56 * possible via hardware, so the "fault injection test harness" is provided.
57 * This works by intercepting calls from the driver to various DDI routines,
58 * and then corrupting the result of those DDI routine calls as if the
59 * hardware had caused the corruption.
61 * Conceptually, the bofi driver consists of two parts:
63 * A driver interface that supports a number of ioctls which allow error
64 * definitions ("errdefs") to be defined and subsequently managed. The
65 * driver is a clone driver, so each open will create a separate
66 * invocation. Any errdefs created by using ioctls to that invocation
67 * will automatically be deleted when that invocation is closed.
69 * Intercept routines: When the bofi driver is attached, it edits the
70 * bus_ops structure of the bus nexus specified by the "bofi-nexus"
71 * field in the "bofi.conf" file, thus allowing the
72 * bofi driver to intercept various ddi functions. These intercept
73 * routines primarily carry out fault injections based on the errdefs
74 * created for that device.
76 * Faults can be injected into:
78 * DMA (corrupting data for DMA to/from memory areas defined by
79 * ddi_dma_setup(), ddi_dma_bind_handle(), etc)
81 * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(),
84 * Interrupts (generating spurious interrupts, losing interrupts,
85 * delaying interrupts).
87 * By default, ddi routines called from all drivers will be intercepted
88 * and faults potentially injected. However, the "bofi-to-test" field in
89 * the "bofi.conf" file can be set to a space-separated list of drivers to
90 * test (or by preceding each driver name in the list with an "!", a list
91 * of drivers not to test).
93 * In addition to fault injection, the bofi driver does a number of static
94 * checks which are controlled by properties in the "bofi.conf" file.
96 * "bofi-ddi-check" - if set will validate that there are no PIO access
97 * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
99 * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
100 * validate that calls to ddi_get8(), ddi_put8(), etc are not made
101 * specifying addresses outside the range of the access_handle.
103 * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
104 * are being made correctly.
107 extern void *bp_mapin_common(struct buf
*, int);
109 static int bofi_ddi_check
;
110 static int bofi_sync_check
;
111 static int bofi_range_check
;
113 static struct bofi_link bofi_link_array
[BOFI_NLINKS
], *bofi_link_freelist
;
115 #define LLSZMASK (sizeof (uint64_t)-1)
117 #define HDL_HASH_TBL_SIZE 64
118 static struct bofi_shadow hhash_table
[HDL_HASH_TBL_SIZE
];
119 static struct bofi_shadow dhash_table
[HDL_HASH_TBL_SIZE
];
120 #define HDL_DHASH(x) \
121 (&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
122 #define HDL_HHASH(x) \
123 (&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
125 static struct bofi_shadow shadow_list
;
126 static struct bofi_errent
*errent_listp
;
128 static char driver_list
[NAMESIZE
];
129 static int driver_list_size
;
130 static int driver_list_neg
;
131 static char nexus_name
[NAMESIZE
];
133 static int initialized
= 0;
136 static int clone_tab
[NCLONES
];
138 static dev_info_t
*our_dip
;
140 static kmutex_t bofi_mutex
;
141 static kmutex_t clone_tab_mutex
;
142 static kmutex_t bofi_low_mutex
;
143 static ddi_iblock_cookie_t bofi_low_cookie
;
144 static uint_t
bofi_signal(caddr_t arg
);
145 static int bofi_getinfo(dev_info_t
*, ddi_info_cmd_t
, void *, void **);
146 static int bofi_attach(dev_info_t
*, ddi_attach_cmd_t
);
147 static int bofi_detach(dev_info_t
*, ddi_detach_cmd_t
);
148 static int bofi_open(dev_t
*, int, int, cred_t
*);
149 static int bofi_close(dev_t
, int, int, cred_t
*);
150 static int bofi_ioctl(dev_t
, int, intptr_t, int, cred_t
*, int *);
151 static int bofi_errdef_alloc(struct bofi_errdef
*, char *,
152 struct bofi_errent
*);
153 static int bofi_errdef_free(struct bofi_errent
*);
154 static void bofi_start(struct bofi_errctl
*, char *);
155 static void bofi_stop(struct bofi_errctl
*, char *);
156 static void bofi_broadcast(struct bofi_errctl
*, char *);
157 static void bofi_clear_acc_chk(struct bofi_errctl
*, char *);
158 static void bofi_clear_errors(struct bofi_errctl
*, char *);
159 static void bofi_clear_errdefs(struct bofi_errctl
*, char *);
160 static int bofi_errdef_check(struct bofi_errstate
*,
161 struct acc_log_elem
**);
162 static int bofi_errdef_check_w(struct bofi_errstate
*,
163 struct acc_log_elem
**);
164 static int bofi_map(dev_info_t
*, dev_info_t
*, ddi_map_req_t
*,
165 off_t
, off_t
, caddr_t
*);
166 static int bofi_dma_allochdl(dev_info_t
*, dev_info_t
*,
167 ddi_dma_attr_t
*, int (*)(caddr_t
), caddr_t
,
169 static int bofi_dma_freehdl(dev_info_t
*, dev_info_t
*,
171 static int bofi_dma_bindhdl(dev_info_t
*, dev_info_t
*,
172 ddi_dma_handle_t
, struct ddi_dma_req
*, ddi_dma_cookie_t
*,
174 static int bofi_dma_unbindhdl(dev_info_t
*, dev_info_t
*,
176 static int bofi_dma_flush(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
177 off_t
, size_t, uint_t
);
178 static int bofi_dma_ctl(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
179 enum ddi_dma_ctlops
, off_t
*, size_t *, caddr_t
*, uint_t
);
180 static int bofi_dma_win(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
181 uint_t
, off_t
*, size_t *, ddi_dma_cookie_t
*, uint_t
*);
182 static int bofi_intr_ops(dev_info_t
*dip
, dev_info_t
*rdip
,
183 ddi_intr_op_t intr_op
, ddi_intr_handle_impl_t
*hdlp
,
185 static int bofi_fm_ereport_callback(sysevent_t
*ev
, void *cookie
);
187 evchan_t
*bofi_error_chan
;
189 #define FM_SIMULATED_DMA "simulated.dma"
190 #define FM_SIMULATED_PIO "simulated.pio"
192 static int driver_under_test(dev_info_t
*);
193 static int bofi_check_acc_hdl(ddi_acc_impl_t
*);
194 static int bofi_check_dma_hdl(ddi_dma_impl_t
*);
195 static int bofi_post_event(dev_info_t
*dip
, dev_info_t
*rdip
,
196 ddi_eventcookie_t eventhdl
, void *impl_data
);
198 static struct bus_ops bofi_bus_ops
= {
215 ndi_busop_get_eventcookie
,
216 ndi_busop_add_eventcall
,
217 ndi_busop_remove_eventcall
,
230 static struct cb_ops bofi_cb_ops
= {
231 bofi_open
, /* open */
232 bofi_close
, /* close */
233 nodev
, /* strategy */
238 bofi_ioctl
, /* ioctl */
242 nochpoll
, /* chpoll */
243 ddi_prop_op
, /* prop_op */
244 NULL
, /* for STREAMS drivers */
245 D_MP
, /* driver compatibility flag */
246 CB_REV
, /* cb_ops revision */
251 static struct dev_ops bofi_ops
= {
252 DEVO_REV
, /* driver build version */
253 0, /* device reference count */
263 ddi_quiesce_not_needed
, /* quiesce */
266 /* module configuration stuff */
269 static struct modldrv modldrv
= {
275 static struct modlinkage modlinkage
= {
281 static struct bus_ops save_bus_ops
;
284 * support routine - map user page into kernel virtual
287 dmareq_mapin(offset_t len
, caddr_t addr
, struct as
*as
, int flag
)
293 * mock up a buf structure so we can call bp_mapin_common()
295 buf
.b_flags
= B_PHYS
;
296 buf
.b_un
.b_addr
= (caddr_t
)addr
;
297 buf
.b_bcount
= (size_t)len
;
300 return (bp_mapin_common(&buf
, flag
));
305 * support routine - map page chain into kernel virtual
308 dmareq_pp_mapin(offset_t len
, uint_t offset
, page_t
*pp
, int flag
)
313 * mock up a buf structure so we can call bp_mapin_common()
315 buf
.b_flags
= B_PAGEIO
;
316 buf
.b_un
.b_addr
= (caddr_t
)(uintptr_t)offset
;
317 buf
.b_bcount
= (size_t)len
;
319 return (bp_mapin_common(&buf
, flag
));
324 * support routine - map page array into kernel virtual
327 dmareq_pplist_mapin(uint_t len
, caddr_t addr
, page_t
**pplist
, struct as
*as
,
334 * mock up a buf structure so we can call bp_mapin_common()
336 buf
.b_flags
= B_PHYS
|B_SHADOW
;
337 buf
.b_un
.b_addr
= addr
;
339 buf
.b_shadow
= pplist
;
342 return (bp_mapin_common(&buf
, flag
));
347 * support routine - map dmareq into kernel virtual if not already
348 * fills in *lenp with length
349 * *mapaddr will be new kernel virtual address - or null if no mapping needed
352 ddi_dmareq_mapin(struct ddi_dma_req
*dmareqp
, caddr_t
*mapaddrp
,
355 int sleep
= (dmareqp
->dmar_fp
== DDI_DMA_SLEEP
) ? VM_SLEEP
: VM_NOSLEEP
;
357 *lenp
= dmareqp
->dmar_object
.dmao_size
;
358 if (dmareqp
->dmar_object
.dmao_type
== DMA_OTYP_PAGES
) {
359 *mapaddrp
= dmareq_pp_mapin(dmareqp
->dmar_object
.dmao_size
,
360 dmareqp
->dmar_object
.dmao_obj
.pp_obj
.pp_offset
,
361 dmareqp
->dmar_object
.dmao_obj
.pp_obj
.pp_pp
, sleep
);
363 } else if (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_priv
!= NULL
) {
364 *mapaddrp
= dmareq_pplist_mapin(dmareqp
->dmar_object
.dmao_size
,
365 dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_addr
,
366 dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_priv
,
367 dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_as
, sleep
);
369 } else if (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_as
== &kas
) {
371 return (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_addr
);
372 } else if (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_as
== NULL
) {
374 return (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_addr
);
376 *mapaddrp
= dmareq_mapin(dmareqp
->dmar_object
.dmao_size
,
377 dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_addr
,
378 dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_as
, sleep
);
385 * support routine - free off kernel virtual mapping as allocated by
389 ddi_dmareq_mapout(caddr_t addr
, offset_t len
, int map_flags
, page_t
*pp
,
397 * mock up a buf structure
399 buf
.b_flags
= B_REMAPPED
| map_flags
;
400 buf
.b_un
.b_addr
= addr
;
401 buf
.b_bcount
= (size_t)len
;
403 buf
.b_shadow
= pplist
;
417 * reset the bus_ops structure of the specified nexus to point to
418 * the original values in the save_bus_ops structure.
420 * Note that both this routine and modify_bus_ops() rely on the current
421 * behavior of the framework in that nexus drivers are not unloadable
426 reset_bus_ops(char *name
, struct bus_ops
*bop
)
433 mutex_enter(&mod_lock
);
435 * find specified module
439 if (strcmp(name
, modp
->mod_modname
) == 0) {
440 if (!modp
->mod_linkage
) {
441 mutex_exit(&mod_lock
);
444 mp
= modp
->mod_linkage
->ml_linkage
[0];
445 if (!mp
|| !mp
->drv_dev_ops
) {
446 mutex_exit(&mod_lock
);
449 ops
= mp
->drv_dev_ops
;
450 bp
= ops
->devo_bus_ops
;
452 mutex_exit(&mod_lock
);
455 if (ops
->devo_refcnt
> 0) {
457 * As long as devices are active with modified
458 * bus ops bofi must not go away. There may be
459 * drivers with modified access or dma handles.
461 mutex_exit(&mod_lock
);
464 cmn_err(CE_NOTE
, "bofi reset bus_ops for %s",
466 bp
->bus_intr_op
= bop
->bus_intr_op
;
467 bp
->bus_post_event
= bop
->bus_post_event
;
468 bp
->bus_map
= bop
->bus_map
;
469 bp
->bus_dma_map
= bop
->bus_dma_map
;
470 bp
->bus_dma_allochdl
= bop
->bus_dma_allochdl
;
471 bp
->bus_dma_freehdl
= bop
->bus_dma_freehdl
;
472 bp
->bus_dma_bindhdl
= bop
->bus_dma_bindhdl
;
473 bp
->bus_dma_unbindhdl
= bop
->bus_dma_unbindhdl
;
474 bp
->bus_dma_flush
= bop
->bus_dma_flush
;
475 bp
->bus_dma_win
= bop
->bus_dma_win
;
476 bp
->bus_dma_ctl
= bop
->bus_dma_ctl
;
477 mutex_exit(&mod_lock
);
480 } while ((modp
= modp
->mod_next
) != &modules
);
481 mutex_exit(&mod_lock
);
486 * modify the bus_ops structure of the specified nexus to point to bofi
487 * routines, saving the original values in the save_bus_ops structure
491 modify_bus_ops(char *name
, struct bus_ops
*bop
)
498 if (ddi_name_to_major(name
) == -1)
501 mutex_enter(&mod_lock
);
503 * find specified module
507 if (strcmp(name
, modp
->mod_modname
) == 0) {
508 if (!modp
->mod_linkage
) {
509 mutex_exit(&mod_lock
);
512 mp
= modp
->mod_linkage
->ml_linkage
[0];
513 if (!mp
|| !mp
->drv_dev_ops
) {
514 mutex_exit(&mod_lock
);
517 ops
= mp
->drv_dev_ops
;
518 bp
= ops
->devo_bus_ops
;
520 mutex_exit(&mod_lock
);
523 if (ops
->devo_refcnt
== 0) {
525 * If there is no device active for this
526 * module then there is nothing to do for bofi.
528 mutex_exit(&mod_lock
);
531 cmn_err(CE_NOTE
, "bofi modify bus_ops for %s",
534 bp
->bus_intr_op
= bop
->bus_intr_op
;
535 bp
->bus_post_event
= bop
->bus_post_event
;
536 bp
->bus_map
= bop
->bus_map
;
537 bp
->bus_dma_map
= bop
->bus_dma_map
;
538 bp
->bus_dma_allochdl
= bop
->bus_dma_allochdl
;
539 bp
->bus_dma_freehdl
= bop
->bus_dma_freehdl
;
540 bp
->bus_dma_bindhdl
= bop
->bus_dma_bindhdl
;
541 bp
->bus_dma_unbindhdl
= bop
->bus_dma_unbindhdl
;
542 bp
->bus_dma_flush
= bop
->bus_dma_flush
;
543 bp
->bus_dma_win
= bop
->bus_dma_win
;
544 bp
->bus_dma_ctl
= bop
->bus_dma_ctl
;
545 mutex_exit(&mod_lock
);
548 } while ((modp
= modp
->mod_next
) != &modules
);
549 mutex_exit(&mod_lock
);
559 e
= ddi_soft_state_init(&statep
, sizeof (struct bofi_errent
), 1);
562 if ((e
= mod_install(&modlinkage
)) != 0)
563 ddi_soft_state_fini(&statep
);
573 if ((e
= mod_remove(&modlinkage
)) != 0)
575 ddi_soft_state_fini(&statep
);
581 _info(struct modinfo
*modinfop
)
583 return (mod_info(&modlinkage
, modinfop
));
588 bofi_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
598 if (cmd
!= DDI_ATTACH
)
599 return (DDI_FAILURE
);
601 * only one instance - but we clone using the open routine
603 if (ddi_get_instance(dip
) > 0)
604 return (DDI_FAILURE
);
607 if ((name
= ddi_get_name(dip
)) == NULL
)
608 return (DDI_FAILURE
);
609 (void) snprintf(buf
, sizeof (buf
), "%s,ctl", name
);
610 if (ddi_create_minor_node(dip
, buf
, S_IFCHR
, 0,
611 DDI_PSEUDO
, 0) == DDI_FAILURE
)
612 return (DDI_FAILURE
);
614 if (ddi_get_soft_iblock_cookie(dip
, DDI_SOFTINT_MED
,
615 &bofi_low_cookie
) != DDI_SUCCESS
) {
616 ddi_remove_minor_node(dip
, buf
);
617 return (DDI_FAILURE
); /* fail attach */
620 * get nexus name (from conf file)
622 if (ddi_prop_op(DDI_DEV_T_ANY
, dip
, PROP_LEN_AND_VAL_BUF
, 0,
623 "bofi-nexus", nexus_name
, &size
) != DDI_PROP_SUCCESS
) {
624 ddi_remove_minor_node(dip
, buf
);
625 return (DDI_FAILURE
);
628 * get whether to do dma map kmem private checking
630 if ((bofi_range_check
= ddi_prop_lookup_string(DDI_DEV_T_ANY
,
631 dip
, 0, "bofi-range-check", &ptr
)) != DDI_PROP_SUCCESS
)
632 bofi_range_check
= 0;
633 else if (strcmp(ptr
, "panic") == 0)
634 bofi_range_check
= 2;
635 else if (strcmp(ptr
, "warn") == 0)
636 bofi_range_check
= 1;
638 bofi_range_check
= 0;
642 * get whether to prevent direct access to register
644 if ((bofi_ddi_check
= ddi_prop_lookup_string(DDI_DEV_T_ANY
,
645 dip
, 0, "bofi-ddi-check", &ptr
)) != DDI_PROP_SUCCESS
)
647 else if (strcmp(ptr
, "on") == 0)
654 * get whether to do copy on ddi_dma_sync
656 if ((bofi_sync_check
= ddi_prop_lookup_string(DDI_DEV_T_ANY
,
657 dip
, 0, "bofi-sync-check", &ptr
)) != DDI_PROP_SUCCESS
)
659 else if (strcmp(ptr
, "on") == 0)
666 * get driver-under-test names (from conf file)
669 if (ddi_prop_op(DDI_DEV_T_ANY
, dip
, PROP_LEN_AND_VAL_BUF
, 0,
670 "bofi-to-test", driver_list
, &size
) != DDI_PROP_SUCCESS
)
673 * and convert into a sequence of strings
677 driver_list_size
= strlen(driver_list
);
678 for (i
= 0; i
< driver_list_size
; i
++) {
679 if (driver_list
[i
] == ' ') {
680 driver_list
[i
] = '\0';
682 } else if (new_string
) {
683 if (driver_list
[i
] != '!')
689 * initialize mutex, lists
691 mutex_init(&clone_tab_mutex
, NULL
, MUTEX_DRIVER
,
694 * fake up iblock cookie - need to protect outselves
695 * against drivers that use hilevel interrupts
700 mutex_init(&bofi_mutex
, NULL
, MUTEX_SPIN
, (void *)(uintptr_t)s
);
701 mutex_init(&bofi_low_mutex
, NULL
, MUTEX_DRIVER
,
702 (void *)bofi_low_cookie
);
703 shadow_list
.next
= &shadow_list
;
704 shadow_list
.prev
= &shadow_list
;
705 for (i
= 0; i
< HDL_HASH_TBL_SIZE
; i
++) {
706 hhash_table
[i
].hnext
= &hhash_table
[i
];
707 hhash_table
[i
].hprev
= &hhash_table
[i
];
708 dhash_table
[i
].dnext
= &dhash_table
[i
];
709 dhash_table
[i
].dprev
= &dhash_table
[i
];
711 for (i
= 1; i
< BOFI_NLINKS
; i
++)
712 bofi_link_array
[i
].link
= &bofi_link_array
[i
-1];
713 bofi_link_freelist
= &bofi_link_array
[BOFI_NLINKS
- 1];
715 * overlay bus_ops structure
717 if (modify_bus_ops(nexus_name
, &bofi_bus_ops
) == 0) {
718 ddi_remove_minor_node(dip
, buf
);
719 mutex_destroy(&clone_tab_mutex
);
720 mutex_destroy(&bofi_mutex
);
721 mutex_destroy(&bofi_low_mutex
);
722 return (DDI_FAILURE
);
724 if (sysevent_evc_bind(FM_ERROR_CHAN
, &bofi_error_chan
, 0) == 0)
725 (void) sysevent_evc_subscribe(bofi_error_chan
, "bofi",
726 EC_FM
, bofi_fm_ereport_callback
, NULL
, 0);
729 * save dip for getinfo
735 return (DDI_SUCCESS
);
740 bofi_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
745 if (cmd
!= DDI_DETACH
)
746 return (DDI_FAILURE
);
747 if (ddi_get_instance(dip
) > 0)
748 return (DDI_FAILURE
);
749 if ((name
= ddi_get_name(dip
)) == NULL
)
750 return (DDI_FAILURE
);
751 (void) snprintf(buf
, sizeof (buf
), "%s,ctl", name
);
752 mutex_enter(&bofi_low_mutex
);
753 mutex_enter(&bofi_mutex
);
755 * make sure test bofi is no longer in use
757 if (shadow_list
.next
!= &shadow_list
|| errent_listp
!= NULL
) {
758 mutex_exit(&bofi_mutex
);
759 mutex_exit(&bofi_low_mutex
);
760 return (DDI_FAILURE
);
762 mutex_exit(&bofi_mutex
);
763 mutex_exit(&bofi_low_mutex
);
766 * restore bus_ops structure
768 if (reset_bus_ops(nexus_name
, &save_bus_ops
) == 0)
769 return (DDI_FAILURE
);
771 (void) sysevent_evc_unbind(bofi_error_chan
);
773 mutex_destroy(&clone_tab_mutex
);
774 mutex_destroy(&bofi_mutex
);
775 mutex_destroy(&bofi_low_mutex
);
776 ddi_remove_minor_node(dip
, buf
);
779 return (DDI_SUCCESS
);
785 bofi_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **result
)
787 dev_t dev
= (dev_t
)arg
;
788 int minor
= (int)getminor(dev
);
792 case DDI_INFO_DEVT2DEVINFO
:
793 if (minor
!= 0 || our_dip
== NULL
) {
795 retval
= DDI_FAILURE
;
797 *result
= (void *)our_dip
;
798 retval
= DDI_SUCCESS
;
801 case DDI_INFO_DEVT2INSTANCE
:
803 retval
= DDI_SUCCESS
;
806 retval
= DDI_FAILURE
;
814 bofi_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*credp
)
816 int minor
= (int)getminor(*devp
);
817 struct bofi_errent
*softc
;
820 * only allow open on minor=0 - the clone device
825 * fail if not attached
830 * find a free slot and grab it
832 mutex_enter(&clone_tab_mutex
);
833 for (minor
= 1; minor
< NCLONES
; minor
++) {
834 if (clone_tab
[minor
] == 0) {
835 clone_tab
[minor
] = 1;
839 mutex_exit(&clone_tab_mutex
);
840 if (minor
== NCLONES
)
843 * soft state structure for this clone is used to maintain a list
844 * of allocated errdefs so they can be freed on close
846 if (ddi_soft_state_zalloc(statep
, minor
) != DDI_SUCCESS
) {
847 mutex_enter(&clone_tab_mutex
);
848 clone_tab
[minor
] = 0;
849 mutex_exit(&clone_tab_mutex
);
852 softc
= ddi_get_soft_state(statep
, minor
);
853 softc
->cnext
= softc
;
854 softc
->cprev
= softc
;
856 *devp
= makedevice(getmajor(*devp
), minor
);
863 bofi_close(dev_t dev
, int flag
, int otyp
, cred_t
*credp
)
865 int minor
= (int)getminor(dev
);
866 struct bofi_errent
*softc
;
867 struct bofi_errent
*ep
, *next_ep
;
869 softc
= ddi_get_soft_state(statep
, minor
);
873 * find list of errdefs and free them off
875 for (ep
= softc
->cnext
; ep
!= softc
; ) {
877 (void) bofi_errdef_free(ep
);
881 * free clone tab slot
883 mutex_enter(&clone_tab_mutex
);
884 clone_tab
[minor
] = 0;
885 mutex_exit(&clone_tab_mutex
);
887 ddi_soft_state_free(statep
, minor
);
894 bofi_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*credp
,
897 struct bofi_errent
*softc
;
898 int minor
= (int)getminor(dev
);
899 struct bofi_errdef errdef
;
900 struct bofi_errctl errctl
;
901 struct bofi_errstate errstate
;
903 struct bofi_get_handles get_handles
;
904 struct bofi_get_hdl_info hdl_info
;
905 struct handle_info
*hdlip
;
906 struct handle_info
*hib
;
911 int req_count
, count
, err
;
913 struct bofi_shadow
*hp
;
915 struct bofi_shadow
*hhashp
;
921 * add a new error definition
923 #ifdef _MULTI_DATAMODEL
924 switch (ddi_model_convert_from(mode
& FMODELS
)) {
925 case DDI_MODEL_ILP32
:
928 * For use when a 32 bit app makes a call into a
931 struct bofi_errdef32 errdef_32
;
933 if (ddi_copyin((void *)arg
, &errdef_32
,
934 sizeof (struct bofi_errdef32
), mode
)) {
937 errdef
.namesize
= errdef_32
.namesize
;
938 (void) strncpy(errdef
.name
, errdef_32
.name
, NAMESIZE
);
939 errdef
.instance
= errdef_32
.instance
;
940 errdef
.rnumber
= errdef_32
.rnumber
;
941 errdef
.offset
= errdef_32
.offset
;
942 errdef
.len
= errdef_32
.len
;
943 errdef
.access_type
= errdef_32
.access_type
;
944 errdef
.access_count
= errdef_32
.access_count
;
945 errdef
.fail_count
= errdef_32
.fail_count
;
946 errdef
.acc_chk
= errdef_32
.acc_chk
;
947 errdef
.optype
= errdef_32
.optype
;
948 errdef
.operand
= errdef_32
.operand
;
949 errdef
.log
.logsize
= errdef_32
.log
.logsize
;
950 errdef
.log
.entries
= errdef_32
.log
.entries
;
951 errdef
.log
.flags
= errdef_32
.log
.flags
;
952 errdef
.log
.wrapcnt
= errdef_32
.log
.wrapcnt
;
953 errdef
.log
.start_time
= errdef_32
.log
.start_time
;
954 errdef
.log
.stop_time
= errdef_32
.log
.stop_time
;
956 (caddr_t
)(uintptr_t)errdef_32
.log
.logbase
;
957 errdef
.errdef_handle
= errdef_32
.errdef_handle
;
961 if (ddi_copyin((void *)arg
, &errdef
,
962 sizeof (struct bofi_errdef
), mode
))
966 #else /* ! _MULTI_DATAMODEL */
967 if (ddi_copyin((void *)arg
, &errdef
,
968 sizeof (struct bofi_errdef
), mode
) != 0)
970 #endif /* _MULTI_DATAMODEL */
974 if (errdef
.fail_count
== 0)
976 if (errdef
.optype
!= 0) {
977 if (errdef
.access_type
& BOFI_INTR
&&
978 errdef
.optype
!= BOFI_DELAY_INTR
&&
979 errdef
.optype
!= BOFI_LOSE_INTR
&&
980 errdef
.optype
!= BOFI_EXTRA_INTR
)
982 if ((errdef
.access_type
& (BOFI_DMA_RW
|BOFI_PIO_R
)) &&
983 errdef
.optype
== BOFI_NO_TRANSFER
)
985 if ((errdef
.access_type
& (BOFI_PIO_RW
)) &&
986 errdef
.optype
!= BOFI_EQUAL
&&
987 errdef
.optype
!= BOFI_OR
&&
988 errdef
.optype
!= BOFI_XOR
&&
989 errdef
.optype
!= BOFI_AND
&&
990 errdef
.optype
!= BOFI_NO_TRANSFER
)
994 * find softstate for this clone, so we can tag
995 * new errdef on to it
997 softc
= ddi_get_soft_state(statep
, minor
);
1003 if (errdef
.namesize
> NAMESIZE
)
1005 namep
= kmem_zalloc(errdef
.namesize
+1, KM_SLEEP
);
1006 (void) strncpy(namep
, errdef
.name
, errdef
.namesize
);
1008 if (bofi_errdef_alloc(&errdef
, namep
, softc
) != DDI_SUCCESS
) {
1009 (void) bofi_errdef_free((struct bofi_errent
*)
1010 (uintptr_t)errdef
.errdef_handle
);
1011 kmem_free(namep
, errdef
.namesize
+1);
1015 * copy out errdef again, including filled in errdef_handle
1017 #ifdef _MULTI_DATAMODEL
1018 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1019 case DDI_MODEL_ILP32
:
1022 * For use when a 32 bit app makes a call into a
1025 struct bofi_errdef32 errdef_32
;
1027 errdef_32
.namesize
= errdef
.namesize
;
1028 (void) strncpy(errdef_32
.name
, errdef
.name
, NAMESIZE
);
1029 errdef_32
.instance
= errdef
.instance
;
1030 errdef_32
.rnumber
= errdef
.rnumber
;
1031 errdef_32
.offset
= errdef
.offset
;
1032 errdef_32
.len
= errdef
.len
;
1033 errdef_32
.access_type
= errdef
.access_type
;
1034 errdef_32
.access_count
= errdef
.access_count
;
1035 errdef_32
.fail_count
= errdef
.fail_count
;
1036 errdef_32
.acc_chk
= errdef
.acc_chk
;
1037 errdef_32
.optype
= errdef
.optype
;
1038 errdef_32
.operand
= errdef
.operand
;
1039 errdef_32
.log
.logsize
= errdef
.log
.logsize
;
1040 errdef_32
.log
.entries
= errdef
.log
.entries
;
1041 errdef_32
.log
.flags
= errdef
.log
.flags
;
1042 errdef_32
.log
.wrapcnt
= errdef
.log
.wrapcnt
;
1043 errdef_32
.log
.start_time
= errdef
.log
.start_time
;
1044 errdef_32
.log
.stop_time
= errdef
.log
.stop_time
;
1045 errdef_32
.log
.logbase
=
1046 (caddr32_t
)(uintptr_t)errdef
.log
.logbase
;
1047 errdef_32
.errdef_handle
= errdef
.errdef_handle
;
1048 if (ddi_copyout(&errdef_32
, (void *)arg
,
1049 sizeof (struct bofi_errdef32
), mode
) != 0) {
1050 (void) bofi_errdef_free((struct bofi_errent
*)
1051 errdef
.errdef_handle
);
1052 kmem_free(namep
, errdef
.namesize
+1);
1057 case DDI_MODEL_NONE
:
1058 if (ddi_copyout(&errdef
, (void *)arg
,
1059 sizeof (struct bofi_errdef
), mode
) != 0) {
1060 (void) bofi_errdef_free((struct bofi_errent
*)
1061 errdef
.errdef_handle
);
1062 kmem_free(namep
, errdef
.namesize
+1);
1067 #else /* ! _MULTI_DATAMODEL */
1068 if (ddi_copyout(&errdef
, (void *)arg
,
1069 sizeof (struct bofi_errdef
), mode
) != 0) {
1070 (void) bofi_errdef_free((struct bofi_errent
*)
1071 (uintptr_t)errdef
.errdef_handle
);
1072 kmem_free(namep
, errdef
.namesize
+1);
1075 #endif /* _MULTI_DATAMODEL */
1079 * delete existing errdef
1081 if (ddi_copyin((void *)arg
, &ed_handle
,
1082 sizeof (void *), mode
) != 0)
1084 return (bofi_errdef_free((struct bofi_errent
*)ed_handle
));
1087 * start all errdefs corresponding to
1088 * this name and instance
1090 if (ddi_copyin((void *)arg
, &errctl
,
1091 sizeof (struct bofi_errctl
), mode
) != 0)
1096 if (errctl
.namesize
> NAMESIZE
)
1098 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1099 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1100 bofi_start(&errctl
, namep
);
1101 kmem_free(namep
, errctl
.namesize
+1);
1105 * stop all errdefs corresponding to
1106 * this name and instance
1108 if (ddi_copyin((void *)arg
, &errctl
,
1109 sizeof (struct bofi_errctl
), mode
) != 0)
1114 if (errctl
.namesize
> NAMESIZE
)
1116 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1117 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1118 bofi_stop(&errctl
, namep
);
1119 kmem_free(namep
, errctl
.namesize
+1);
1121 case BOFI_BROADCAST
:
1123 * wakeup all errdefs corresponding to
1124 * this name and instance
1126 if (ddi_copyin((void *)arg
, &errctl
,
1127 sizeof (struct bofi_errctl
), mode
) != 0)
1132 if (errctl
.namesize
> NAMESIZE
)
1134 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1135 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1136 bofi_broadcast(&errctl
, namep
);
1137 kmem_free(namep
, errctl
.namesize
+1);
1139 case BOFI_CLEAR_ACC_CHK
:
1141 * clear "acc_chk" for all errdefs corresponding to
1142 * this name and instance
1144 if (ddi_copyin((void *)arg
, &errctl
,
1145 sizeof (struct bofi_errctl
), mode
) != 0)
1150 if (errctl
.namesize
> NAMESIZE
)
1152 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1153 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1154 bofi_clear_acc_chk(&errctl
, namep
);
1155 kmem_free(namep
, errctl
.namesize
+1);
1157 case BOFI_CLEAR_ERRORS
:
1159 * set "fail_count" to 0 for all errdefs corresponding to
1160 * this name and instance whose "access_count"
1163 if (ddi_copyin((void *)arg
, &errctl
,
1164 sizeof (struct bofi_errctl
), mode
) != 0)
1169 if (errctl
.namesize
> NAMESIZE
)
1171 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1172 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1173 bofi_clear_errors(&errctl
, namep
);
1174 kmem_free(namep
, errctl
.namesize
+1);
1176 case BOFI_CLEAR_ERRDEFS
:
1178 * set "access_count" and "fail_count" to 0 for all errdefs
1179 * corresponding to this name and instance
1181 if (ddi_copyin((void *)arg
, &errctl
,
1182 sizeof (struct bofi_errctl
), mode
) != 0)
1187 if (errctl
.namesize
> NAMESIZE
)
1189 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1190 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1191 bofi_clear_errdefs(&errctl
, namep
);
1192 kmem_free(namep
, errctl
.namesize
+1);
1194 case BOFI_CHK_STATE
:
1196 struct acc_log_elem
*klg
;
1199 * get state for this errdef - read in dummy errstate
1200 * with just the errdef_handle filled in
1202 #ifdef _MULTI_DATAMODEL
1203 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1204 case DDI_MODEL_ILP32
:
1207 * For use when a 32 bit app makes a call into a
1210 struct bofi_errstate32 errstate_32
;
1212 if (ddi_copyin((void *)arg
, &errstate_32
,
1213 sizeof (struct bofi_errstate32
), mode
) != 0) {
1216 errstate
.fail_time
= errstate_32
.fail_time
;
1217 errstate
.msg_time
= errstate_32
.msg_time
;
1218 errstate
.access_count
= errstate_32
.access_count
;
1219 errstate
.fail_count
= errstate_32
.fail_count
;
1220 errstate
.acc_chk
= errstate_32
.acc_chk
;
1221 errstate
.errmsg_count
= errstate_32
.errmsg_count
;
1222 (void) strncpy(errstate
.buffer
, errstate_32
.buffer
,
1224 errstate
.severity
= errstate_32
.severity
;
1225 errstate
.log
.logsize
= errstate_32
.log
.logsize
;
1226 errstate
.log
.entries
= errstate_32
.log
.entries
;
1227 errstate
.log
.flags
= errstate_32
.log
.flags
;
1228 errstate
.log
.wrapcnt
= errstate_32
.log
.wrapcnt
;
1229 errstate
.log
.start_time
= errstate_32
.log
.start_time
;
1230 errstate
.log
.stop_time
= errstate_32
.log
.stop_time
;
1231 errstate
.log
.logbase
=
1232 (caddr_t
)(uintptr_t)errstate_32
.log
.logbase
;
1233 errstate
.errdef_handle
= errstate_32
.errdef_handle
;
1236 case DDI_MODEL_NONE
:
1237 if (ddi_copyin((void *)arg
, &errstate
,
1238 sizeof (struct bofi_errstate
), mode
) != 0)
1242 #else /* ! _MULTI_DATAMODEL */
1243 if (ddi_copyin((void *)arg
, &errstate
,
1244 sizeof (struct bofi_errstate
), mode
) != 0)
1246 #endif /* _MULTI_DATAMODEL */
1247 if ((retval
= bofi_errdef_check(&errstate
, &klg
)) == EINVAL
)
1250 * copy out real errstate structure
1252 uls
= errstate
.log
.logsize
;
1253 if (errstate
.log
.entries
> uls
&& uls
)
1254 /* insufficient user memory */
1255 errstate
.log
.entries
= uls
;
1256 /* always pass back a time */
1257 if (errstate
.log
.stop_time
== 0ul)
1258 (void) drv_getparm(TIME
, &(errstate
.log
.stop_time
));
1260 #ifdef _MULTI_DATAMODEL
1261 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1262 case DDI_MODEL_ILP32
:
1265 * For use when a 32 bit app makes a call into a
1268 struct bofi_errstate32 errstate_32
;
1270 errstate_32
.fail_time
= errstate
.fail_time
;
1271 errstate_32
.msg_time
= errstate
.msg_time
;
1272 errstate_32
.access_count
= errstate
.access_count
;
1273 errstate_32
.fail_count
= errstate
.fail_count
;
1274 errstate_32
.acc_chk
= errstate
.acc_chk
;
1275 errstate_32
.errmsg_count
= errstate
.errmsg_count
;
1276 (void) strncpy(errstate_32
.buffer
, errstate
.buffer
,
1278 errstate_32
.severity
= errstate
.severity
;
1279 errstate_32
.log
.logsize
= errstate
.log
.logsize
;
1280 errstate_32
.log
.entries
= errstate
.log
.entries
;
1281 errstate_32
.log
.flags
= errstate
.log
.flags
;
1282 errstate_32
.log
.wrapcnt
= errstate
.log
.wrapcnt
;
1283 errstate_32
.log
.start_time
= errstate
.log
.start_time
;
1284 errstate_32
.log
.stop_time
= errstate
.log
.stop_time
;
1285 errstate_32
.log
.logbase
=
1286 (caddr32_t
)(uintptr_t)errstate
.log
.logbase
;
1287 errstate_32
.errdef_handle
= errstate
.errdef_handle
;
1288 if (ddi_copyout(&errstate_32
, (void *)arg
,
1289 sizeof (struct bofi_errstate32
), mode
) != 0)
1293 case DDI_MODEL_NONE
:
1294 if (ddi_copyout(&errstate
, (void *)arg
,
1295 sizeof (struct bofi_errstate
), mode
) != 0)
1299 #else /* ! _MULTI_DATAMODEL */
1300 if (ddi_copyout(&errstate
, (void *)arg
,
1301 sizeof (struct bofi_errstate
), mode
) != 0)
1303 #endif /* _MULTI_DATAMODEL */
1304 if (uls
&& errstate
.log
.entries
&&
1305 ddi_copyout(klg
, errstate
.log
.logbase
,
1306 errstate
.log
.entries
* sizeof (struct acc_log_elem
),
1312 case BOFI_CHK_STATE_W
:
1314 struct acc_log_elem
*klg
;
1317 * get state for this errdef - read in dummy errstate
1318 * with just the errdef_handle filled in. Then wait for
1319 * a ddi_report_fault message to come back
1321 #ifdef _MULTI_DATAMODEL
1322 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1323 case DDI_MODEL_ILP32
:
1326 * For use when a 32 bit app makes a call into a
1329 struct bofi_errstate32 errstate_32
;
1331 if (ddi_copyin((void *)arg
, &errstate_32
,
1332 sizeof (struct bofi_errstate32
), mode
) != 0) {
1335 errstate
.fail_time
= errstate_32
.fail_time
;
1336 errstate
.msg_time
= errstate_32
.msg_time
;
1337 errstate
.access_count
= errstate_32
.access_count
;
1338 errstate
.fail_count
= errstate_32
.fail_count
;
1339 errstate
.acc_chk
= errstate_32
.acc_chk
;
1340 errstate
.errmsg_count
= errstate_32
.errmsg_count
;
1341 (void) strncpy(errstate
.buffer
, errstate_32
.buffer
,
1343 errstate
.severity
= errstate_32
.severity
;
1344 errstate
.log
.logsize
= errstate_32
.log
.logsize
;
1345 errstate
.log
.entries
= errstate_32
.log
.entries
;
1346 errstate
.log
.flags
= errstate_32
.log
.flags
;
1347 errstate
.log
.wrapcnt
= errstate_32
.log
.wrapcnt
;
1348 errstate
.log
.start_time
= errstate_32
.log
.start_time
;
1349 errstate
.log
.stop_time
= errstate_32
.log
.stop_time
;
1350 errstate
.log
.logbase
=
1351 (caddr_t
)(uintptr_t)errstate_32
.log
.logbase
;
1352 errstate
.errdef_handle
= errstate_32
.errdef_handle
;
1355 case DDI_MODEL_NONE
:
1356 if (ddi_copyin((void *)arg
, &errstate
,
1357 sizeof (struct bofi_errstate
), mode
) != 0)
1361 #else /* ! _MULTI_DATAMODEL */
1362 if (ddi_copyin((void *)arg
, &errstate
,
1363 sizeof (struct bofi_errstate
), mode
) != 0)
1365 #endif /* _MULTI_DATAMODEL */
1366 if ((retval
= bofi_errdef_check_w(&errstate
, &klg
)) == EINVAL
)
1369 * copy out real errstate structure
1371 uls
= errstate
.log
.logsize
;
1372 uls
= errstate
.log
.logsize
;
1373 if (errstate
.log
.entries
> uls
&& uls
)
1374 /* insufficient user memory */
1375 errstate
.log
.entries
= uls
;
1376 /* always pass back a time */
1377 if (errstate
.log
.stop_time
== 0ul)
1378 (void) drv_getparm(TIME
, &(errstate
.log
.stop_time
));
1380 #ifdef _MULTI_DATAMODEL
1381 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1382 case DDI_MODEL_ILP32
:
1385 * For use when a 32 bit app makes a call into a
1388 struct bofi_errstate32 errstate_32
;
1390 errstate_32
.fail_time
= errstate
.fail_time
;
1391 errstate_32
.msg_time
= errstate
.msg_time
;
1392 errstate_32
.access_count
= errstate
.access_count
;
1393 errstate_32
.fail_count
= errstate
.fail_count
;
1394 errstate_32
.acc_chk
= errstate
.acc_chk
;
1395 errstate_32
.errmsg_count
= errstate
.errmsg_count
;
1396 (void) strncpy(errstate_32
.buffer
, errstate
.buffer
,
1398 errstate_32
.severity
= errstate
.severity
;
1399 errstate_32
.log
.logsize
= errstate
.log
.logsize
;
1400 errstate_32
.log
.entries
= errstate
.log
.entries
;
1401 errstate_32
.log
.flags
= errstate
.log
.flags
;
1402 errstate_32
.log
.wrapcnt
= errstate
.log
.wrapcnt
;
1403 errstate_32
.log
.start_time
= errstate
.log
.start_time
;
1404 errstate_32
.log
.stop_time
= errstate
.log
.stop_time
;
1405 errstate_32
.log
.logbase
=
1406 (caddr32_t
)(uintptr_t)errstate
.log
.logbase
;
1407 errstate_32
.errdef_handle
= errstate
.errdef_handle
;
1408 if (ddi_copyout(&errstate_32
, (void *)arg
,
1409 sizeof (struct bofi_errstate32
), mode
) != 0)
1413 case DDI_MODEL_NONE
:
1414 if (ddi_copyout(&errstate
, (void *)arg
,
1415 sizeof (struct bofi_errstate
), mode
) != 0)
1419 #else /* ! _MULTI_DATAMODEL */
1420 if (ddi_copyout(&errstate
, (void *)arg
,
1421 sizeof (struct bofi_errstate
), mode
) != 0)
1423 #endif /* _MULTI_DATAMODEL */
1425 if (uls
&& errstate
.log
.entries
&&
1426 ddi_copyout(klg
, errstate
.log
.logbase
,
1427 errstate
.log
.entries
* sizeof (struct acc_log_elem
),
1433 case BOFI_GET_HANDLES
:
1435 * display existing handles
1437 #ifdef _MULTI_DATAMODEL
1438 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1439 case DDI_MODEL_ILP32
:
1442 * For use when a 32 bit app makes a call into a
1445 struct bofi_get_handles32 get_handles_32
;
1447 if (ddi_copyin((void *)arg
, &get_handles_32
,
1448 sizeof (get_handles_32
), mode
) != 0) {
1451 get_handles
.namesize
= get_handles_32
.namesize
;
1452 (void) strncpy(get_handles
.name
, get_handles_32
.name
,
1454 get_handles
.instance
= get_handles_32
.instance
;
1455 get_handles
.count
= get_handles_32
.count
;
1456 get_handles
.buffer
=
1457 (caddr_t
)(uintptr_t)get_handles_32
.buffer
;
1460 case DDI_MODEL_NONE
:
1461 if (ddi_copyin((void *)arg
, &get_handles
,
1462 sizeof (get_handles
), mode
) != 0)
1466 #else /* ! _MULTI_DATAMODEL */
1467 if (ddi_copyin((void *)arg
, &get_handles
,
1468 sizeof (get_handles
), mode
) != 0)
1470 #endif /* _MULTI_DATAMODEL */
1474 if (get_handles
.namesize
> NAMESIZE
)
1476 namep
= kmem_zalloc(get_handles
.namesize
+1, KM_SLEEP
);
1477 (void) strncpy(namep
, get_handles
.name
, get_handles
.namesize
);
1478 req_count
= get_handles
.count
;
1479 bufptr
= buffer
= kmem_zalloc(req_count
, KM_SLEEP
);
1480 endbuf
= bufptr
+ req_count
;
1482 * display existing handles
1484 mutex_enter(&bofi_low_mutex
);
1485 mutex_enter(&bofi_mutex
);
1486 for (i
= 0; i
< HDL_HASH_TBL_SIZE
; i
++) {
1487 hhashp
= &hhash_table
[i
];
1488 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
) {
1489 if (!driver_under_test(hp
->dip
))
1491 if (ddi_name_to_major(ddi_get_name(hp
->dip
)) !=
1492 ddi_name_to_major(namep
))
1494 if (hp
->instance
!= get_handles
.instance
)
1497 * print information per handle - note that
1498 * DMA* means an unbound DMA handle
1500 (void) snprintf(bufptr
, (size_t)(endbuf
-bufptr
),
1501 " %s %d %s ", hp
->name
, hp
->instance
,
1502 (hp
->type
== BOFI_INT_HDL
) ? "INTR" :
1503 (hp
->type
== BOFI_ACC_HDL
) ? "PIO" :
1504 (hp
->type
== BOFI_DMA_HDL
) ? "DMA" :
1505 (hp
->hparrayp
!= NULL
) ? "DVMA" : "DMA*");
1506 bufptr
+= strlen(bufptr
);
1507 if (hp
->type
== BOFI_ACC_HDL
) {
1508 if (hp
->len
== INT_MAX
- hp
->offset
)
1509 (void) snprintf(bufptr
,
1510 (size_t)(endbuf
-bufptr
),
1511 "reg set %d off 0x%llx\n",
1512 hp
->rnumber
, hp
->offset
);
1514 (void) snprintf(bufptr
,
1515 (size_t)(endbuf
-bufptr
),
1516 "reg set %d off 0x%llx"
1518 hp
->rnumber
, hp
->offset
,
1520 } else if (hp
->type
== BOFI_DMA_HDL
)
1521 (void) snprintf(bufptr
,
1522 (size_t)(endbuf
-bufptr
),
1523 "handle no %d len 0x%llx"
1524 " addr 0x%p\n", hp
->rnumber
,
1525 hp
->len
, (void *)hp
->addr
);
1526 else if (hp
->type
== BOFI_NULL
&&
1527 hp
->hparrayp
== NULL
)
1528 (void) snprintf(bufptr
,
1529 (size_t)(endbuf
-bufptr
),
1530 "handle no %d\n", hp
->rnumber
);
1532 (void) snprintf(bufptr
,
1533 (size_t)(endbuf
-bufptr
), "\n");
1534 bufptr
+= strlen(bufptr
);
1537 mutex_exit(&bofi_mutex
);
1538 mutex_exit(&bofi_low_mutex
);
1539 err
= ddi_copyout(buffer
, get_handles
.buffer
, req_count
, mode
);
1540 kmem_free(namep
, get_handles
.namesize
+1);
1541 kmem_free(buffer
, req_count
);
1546 case BOFI_GET_HANDLE_INFO
:
1548 * display existing handles
1550 #ifdef _MULTI_DATAMODEL
1551 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1552 case DDI_MODEL_ILP32
:
1555 * For use when a 32 bit app makes a call into a
1558 struct bofi_get_hdl_info32 hdl_info_32
;
1560 if (ddi_copyin((void *)arg
, &hdl_info_32
,
1561 sizeof (hdl_info_32
), mode
)) {
1564 hdl_info
.namesize
= hdl_info_32
.namesize
;
1565 (void) strncpy(hdl_info
.name
, hdl_info_32
.name
,
1567 hdl_info
.count
= hdl_info_32
.count
;
1568 hdl_info
.hdli
= (caddr_t
)(uintptr_t)hdl_info_32
.hdli
;
1571 case DDI_MODEL_NONE
:
1572 if (ddi_copyin((void *)arg
, &hdl_info
,
1573 sizeof (hdl_info
), mode
))
1577 #else /* ! _MULTI_DATAMODEL */
1578 if (ddi_copyin((void *)arg
, &hdl_info
,
1579 sizeof (hdl_info
), mode
))
1581 #endif /* _MULTI_DATAMODEL */
1582 if (hdl_info
.namesize
> NAMESIZE
)
1584 namep
= kmem_zalloc(hdl_info
.namesize
+ 1, KM_SLEEP
);
1585 (void) strncpy(namep
, hdl_info
.name
, hdl_info
.namesize
);
1586 req_count
= hdl_info
.count
;
1587 count
= hdl_info
.count
= 0; /* the actual no of handles */
1588 if (req_count
> 0) {
1590 kmem_zalloc(req_count
* sizeof (struct handle_info
),
1594 req_count
= hdl_info
.count
= 0;
1598 * display existing handles
1600 mutex_enter(&bofi_low_mutex
);
1601 mutex_enter(&bofi_mutex
);
1602 for (i
= 0; i
< HDL_HASH_TBL_SIZE
; i
++) {
1603 hhashp
= &hhash_table
[i
];
1604 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
) {
1605 if (!driver_under_test(hp
->dip
) ||
1606 ddi_name_to_major(ddi_get_name(hp
->dip
)) !=
1607 ddi_name_to_major(namep
) ||
1608 ++(hdl_info
.count
) > req_count
||
1612 hdlip
->instance
= hp
->instance
;
1613 hdlip
->rnumber
= hp
->rnumber
;
1616 hdlip
->access_type
= BOFI_PIO_RW
;
1617 hdlip
->offset
= hp
->offset
;
1618 hdlip
->len
= hp
->len
;
1621 hdlip
->access_type
= 0;
1622 if (hp
->flags
& DDI_DMA_WRITE
)
1623 hdlip
->access_type
|=
1625 if (hp
->flags
& DDI_DMA_READ
)
1626 hdlip
->access_type
|=
1628 hdlip
->len
= hp
->len
;
1629 hdlip
->addr_cookie
=
1630 (uint64_t)(uintptr_t)hp
->addr
;
1633 hdlip
->access_type
= BOFI_INTR
;
1636 hdlip
->access_type
= 0;
1643 mutex_exit(&bofi_mutex
);
1644 mutex_exit(&bofi_low_mutex
);
1646 #ifdef _MULTI_DATAMODEL
1647 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1648 case DDI_MODEL_ILP32
:
1651 * For use when a 32 bit app makes a call into a
1654 struct bofi_get_hdl_info32 hdl_info_32
;
1656 hdl_info_32
.namesize
= hdl_info
.namesize
;
1657 (void) strncpy(hdl_info_32
.name
, hdl_info
.name
,
1659 hdl_info_32
.count
= hdl_info
.count
;
1660 hdl_info_32
.hdli
= (caddr32_t
)(uintptr_t)hdl_info
.hdli
;
1661 if (ddi_copyout(&hdl_info_32
, (void *)arg
,
1662 sizeof (hdl_info_32
), mode
) != 0) {
1663 kmem_free(namep
, hdl_info
.namesize
+1);
1666 req_count
* sizeof (*hib
));
1671 case DDI_MODEL_NONE
:
1672 if (ddi_copyout(&hdl_info
, (void *)arg
,
1673 sizeof (hdl_info
), mode
) != 0) {
1674 kmem_free(namep
, hdl_info
.namesize
+1);
1677 req_count
* sizeof (*hib
));
1682 #else /* ! _MULTI_DATAMODEL */
1683 if (ddi_copyout(&hdl_info
, (void *)arg
,
1684 sizeof (hdl_info
), mode
) != 0) {
1685 kmem_free(namep
, hdl_info
.namesize
+1);
1687 kmem_free(hib
, req_count
* sizeof (*hib
));
1690 #endif /* ! _MULTI_DATAMODEL */
1692 if (ddi_copyout(hib
, hdl_info
.hdli
,
1693 count
* sizeof (*hib
), mode
) != 0) {
1694 kmem_free(namep
, hdl_info
.namesize
+1);
1697 req_count
* sizeof (*hib
));
1701 kmem_free(namep
, hdl_info
.namesize
+1);
1703 kmem_free(hib
, req_count
* sizeof (*hib
));
1712 * add a new error definition
1715 bofi_errdef_alloc(struct bofi_errdef
*errdefp
, char *namep
,
1716 struct bofi_errent
*softc
)
1718 struct bofi_errent
*ep
;
1719 struct bofi_shadow
*hp
;
1720 struct bofi_link
*lp
;
1723 * allocate errdef structure and put on in-use list
1725 ep
= kmem_zalloc(sizeof (struct bofi_errent
), KM_SLEEP
);
1726 ep
->errdef
= *errdefp
;
1728 ep
->errdef
.errdef_handle
= (uint64_t)(uintptr_t)ep
;
1729 ep
->errstate
.severity
= DDI_SERVICE_RESTORED
;
1730 ep
->errstate
.errdef_handle
= (uint64_t)(uintptr_t)ep
;
1731 cv_init(&ep
->cv
, NULL
, CV_DRIVER
, NULL
);
1733 * allocate space for logging
1735 ep
->errdef
.log
.entries
= 0;
1736 ep
->errdef
.log
.wrapcnt
= 0;
1737 if (ep
->errdef
.access_type
& BOFI_LOG
)
1738 ep
->logbase
= kmem_alloc(sizeof (struct acc_log_elem
) *
1739 ep
->errdef
.log
.logsize
, KM_SLEEP
);
1743 * put on in-use list
1745 mutex_enter(&bofi_low_mutex
);
1746 mutex_enter(&bofi_mutex
);
1747 ep
->next
= errent_listp
;
1750 * and add it to the per-clone list
1752 ep
->cnext
= softc
->cnext
;
1753 softc
->cnext
->cprev
= ep
;
1758 * look for corresponding shadow handle structures and if we find any
1759 * tag this errdef structure on to their link lists.
1761 for (hp
= shadow_list
.next
; hp
!= &shadow_list
; hp
= hp
->next
) {
1762 if (ddi_name_to_major(hp
->name
) == ddi_name_to_major(namep
) &&
1763 hp
->instance
== errdefp
->instance
&&
1764 (((errdefp
->access_type
& BOFI_DMA_RW
) &&
1765 (ep
->errdef
.rnumber
== -1 ||
1766 hp
->rnumber
== ep
->errdef
.rnumber
) &&
1767 hp
->type
== BOFI_DMA_HDL
&&
1768 (((uintptr_t)(hp
->addr
+ ep
->errdef
.offset
+
1769 ep
->errdef
.len
) & ~LLSZMASK
) >
1770 ((uintptr_t)((hp
->addr
+ ep
->errdef
.offset
) +
1771 LLSZMASK
) & ~LLSZMASK
))) ||
1772 ((errdefp
->access_type
& BOFI_INTR
) &&
1773 hp
->type
== BOFI_INT_HDL
) ||
1774 ((errdefp
->access_type
& BOFI_PIO_RW
) &&
1775 hp
->type
== BOFI_ACC_HDL
&&
1776 (errdefp
->rnumber
== -1 ||
1777 hp
->rnumber
== errdefp
->rnumber
) &&
1778 (errdefp
->len
== 0 ||
1779 hp
->offset
< errdefp
->offset
+ errdefp
->len
) &&
1780 hp
->offset
+ hp
->len
> errdefp
->offset
))) {
1781 lp
= bofi_link_freelist
;
1783 bofi_link_freelist
= lp
->link
;
1785 lp
->link
= hp
->link
;
1790 errdefp
->errdef_handle
= (uint64_t)(uintptr_t)ep
;
1791 mutex_exit(&bofi_mutex
);
1792 mutex_exit(&bofi_low_mutex
);
1793 ep
->softintr_id
= NULL
;
1794 return (ddi_add_softintr(our_dip
, DDI_SOFTINT_MED
, &ep
->softintr_id
,
1795 NULL
, NULL
, bofi_signal
, (caddr_t
)&ep
->errdef
));
1800 * delete existing errdef
1803 bofi_errdef_free(struct bofi_errent
*ep
)
1805 struct bofi_errent
*hep
, *prev_hep
;
1806 struct bofi_link
*lp
, *prev_lp
, *next_lp
;
1807 struct bofi_shadow
*hp
;
1809 mutex_enter(&bofi_low_mutex
);
1810 mutex_enter(&bofi_mutex
);
1812 * don't just assume its a valid ep - check that its on the
1816 for (hep
= errent_listp
; hep
!= NULL
; ) {
1823 mutex_exit(&bofi_mutex
);
1824 mutex_exit(&bofi_low_mutex
);
1828 * found it - delete from in-use list
1832 prev_hep
->next
= hep
->next
;
1834 errent_listp
= hep
->next
;
1836 * and take it off the per-clone list
1838 hep
->cnext
->cprev
= hep
->cprev
;
1839 hep
->cprev
->cnext
= hep
->cnext
;
1841 * see if we are on any shadow handle link lists - and if we
1842 * are then take us off
1844 for (hp
= shadow_list
.next
; hp
!= &shadow_list
; hp
= hp
->next
) {
1846 for (lp
= hp
->link
; lp
!= NULL
; ) {
1847 if (lp
->errentp
== ep
) {
1849 prev_lp
->link
= lp
->link
;
1851 hp
->link
= lp
->link
;
1853 lp
->link
= bofi_link_freelist
;
1854 bofi_link_freelist
= lp
;
1862 mutex_exit(&bofi_mutex
);
1863 mutex_exit(&bofi_low_mutex
);
1865 cv_destroy(&ep
->cv
);
1866 kmem_free(ep
->name
, ep
->errdef
.namesize
+1);
1867 if ((ep
->errdef
.access_type
& BOFI_LOG
) &&
1868 ep
->errdef
.log
.logsize
&& ep
->logbase
) /* double check */
1869 kmem_free(ep
->logbase
,
1870 sizeof (struct acc_log_elem
) * ep
->errdef
.log
.logsize
);
1872 if (ep
->softintr_id
)
1873 ddi_remove_softintr(ep
->softintr_id
);
1874 kmem_free(ep
, sizeof (struct bofi_errent
));
1880 * start all errdefs corresponding to this name and instance
1883 bofi_start(struct bofi_errctl
*errctlp
, char *namep
)
1885 struct bofi_errent
*ep
;
1888 * look for any errdefs with matching name and instance
1890 mutex_enter(&bofi_low_mutex
);
1891 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
1892 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
1893 errctlp
->instance
== ep
->errdef
.instance
) {
1894 ep
->state
|= BOFI_DEV_ACTIVE
;
1895 (void) drv_getparm(TIME
, &(ep
->errdef
.log
.start_time
));
1896 ep
->errdef
.log
.stop_time
= 0ul;
1898 mutex_exit(&bofi_low_mutex
);
1903 * stop all errdefs corresponding to this name and instance
1906 bofi_stop(struct bofi_errctl
*errctlp
, char *namep
)
1908 struct bofi_errent
*ep
;
1911 * look for any errdefs with matching name and instance
1913 mutex_enter(&bofi_low_mutex
);
1914 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
1915 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
1916 errctlp
->instance
== ep
->errdef
.instance
) {
1917 ep
->state
&= ~BOFI_DEV_ACTIVE
;
1918 if (ep
->errdef
.log
.stop_time
== 0ul)
1919 (void) drv_getparm(TIME
,
1920 &(ep
->errdef
.log
.stop_time
));
1922 mutex_exit(&bofi_low_mutex
);
1927 * wake up any thread waiting on this errdefs
1930 bofi_signal(caddr_t arg
)
1932 struct bofi_errdef
*edp
= (struct bofi_errdef
*)arg
;
1933 struct bofi_errent
*hep
;
1934 struct bofi_errent
*ep
=
1935 (struct bofi_errent
*)(uintptr_t)edp
->errdef_handle
;
1937 mutex_enter(&bofi_low_mutex
);
1938 for (hep
= errent_listp
; hep
!= NULL
; ) {
1944 mutex_exit(&bofi_low_mutex
);
1945 return (DDI_INTR_UNCLAIMED
);
1947 if ((ep
->errdef
.access_type
& BOFI_LOG
) &&
1948 (edp
->log
.flags
& BOFI_LOG_FULL
)) {
1949 edp
->log
.stop_time
= bofi_gettime();
1950 ep
->state
|= BOFI_NEW_MESSAGE
;
1951 if (ep
->state
& BOFI_MESSAGE_WAIT
)
1952 cv_broadcast(&ep
->cv
);
1953 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
1955 if (ep
->errstate
.msg_time
!= 0) {
1956 ep
->state
|= BOFI_NEW_MESSAGE
;
1957 if (ep
->state
& BOFI_MESSAGE_WAIT
)
1958 cv_broadcast(&ep
->cv
);
1959 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
1961 mutex_exit(&bofi_low_mutex
);
1962 return (DDI_INTR_CLAIMED
);
1967 * wake up all errdefs corresponding to this name and instance
1970 bofi_broadcast(struct bofi_errctl
*errctlp
, char *namep
)
1972 struct bofi_errent
*ep
;
1975 * look for any errdefs with matching name and instance
1977 mutex_enter(&bofi_low_mutex
);
1978 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
1979 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
1980 errctlp
->instance
== ep
->errdef
.instance
) {
1984 ep
->state
|= BOFI_NEW_MESSAGE
;
1985 if (ep
->state
& BOFI_MESSAGE_WAIT
)
1986 cv_broadcast(&ep
->cv
);
1987 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
1989 mutex_exit(&bofi_low_mutex
);
1994 * clear "acc_chk" for all errdefs corresponding to this name and instance
1998 bofi_clear_acc_chk(struct bofi_errctl
*errctlp
, char *namep
)
2000 struct bofi_errent
*ep
;
2003 * look for any errdefs with matching name and instance
2005 mutex_enter(&bofi_low_mutex
);
2006 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
2007 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
2008 errctlp
->instance
== ep
->errdef
.instance
) {
2009 mutex_enter(&bofi_mutex
);
2010 if (ep
->errdef
.access_count
== 0 &&
2011 ep
->errdef
.fail_count
== 0)
2012 ep
->errdef
.acc_chk
= 0;
2013 mutex_exit(&bofi_mutex
);
2017 ep
->state
|= BOFI_NEW_MESSAGE
;
2018 if (ep
->state
& BOFI_MESSAGE_WAIT
)
2019 cv_broadcast(&ep
->cv
);
2020 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
2022 mutex_exit(&bofi_low_mutex
);
2027 * set "fail_count" to 0 for all errdefs corresponding to this name and instance
2028 * whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
2031 bofi_clear_errors(struct bofi_errctl
*errctlp
, char *namep
)
2033 struct bofi_errent
*ep
;
2036 * look for any errdefs with matching name and instance
2038 mutex_enter(&bofi_low_mutex
);
2039 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
2040 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
2041 errctlp
->instance
== ep
->errdef
.instance
) {
2042 mutex_enter(&bofi_mutex
);
2043 if (ep
->errdef
.access_count
== 0) {
2044 ep
->errdef
.acc_chk
= 0;
2045 ep
->errdef
.fail_count
= 0;
2046 mutex_exit(&bofi_mutex
);
2047 if (ep
->errdef
.log
.stop_time
== 0ul)
2048 (void) drv_getparm(TIME
,
2049 &(ep
->errdef
.log
.stop_time
));
2051 mutex_exit(&bofi_mutex
);
2055 ep
->state
|= BOFI_NEW_MESSAGE
;
2056 if (ep
->state
& BOFI_MESSAGE_WAIT
)
2057 cv_broadcast(&ep
->cv
);
2058 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
2060 mutex_exit(&bofi_low_mutex
);
2065 * set "access_count" and "fail_count" to 0 for all errdefs corresponding to
2066 * this name and instance, set "acc_chk" to 0, and wake them up.
2069 bofi_clear_errdefs(struct bofi_errctl
*errctlp
, char *namep
)
2071 struct bofi_errent
*ep
;
2074 * look for any errdefs with matching name and instance
2076 mutex_enter(&bofi_low_mutex
);
2077 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
2078 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
2079 errctlp
->instance
== ep
->errdef
.instance
) {
2080 mutex_enter(&bofi_mutex
);
2081 ep
->errdef
.acc_chk
= 0;
2082 ep
->errdef
.access_count
= 0;
2083 ep
->errdef
.fail_count
= 0;
2084 mutex_exit(&bofi_mutex
);
2085 if (ep
->errdef
.log
.stop_time
== 0ul)
2086 (void) drv_getparm(TIME
,
2087 &(ep
->errdef
.log
.stop_time
));
2091 ep
->state
|= BOFI_NEW_MESSAGE
;
2092 if (ep
->state
& BOFI_MESSAGE_WAIT
)
2093 cv_broadcast(&ep
->cv
);
2094 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
2096 mutex_exit(&bofi_low_mutex
);
2101 * get state for this errdef
2104 bofi_errdef_check(struct bofi_errstate
*errstatep
, struct acc_log_elem
**logpp
)
2106 struct bofi_errent
*hep
;
2107 struct bofi_errent
*ep
;
2109 ep
= (struct bofi_errent
*)(uintptr_t)errstatep
->errdef_handle
;
2110 mutex_enter(&bofi_low_mutex
);
2112 * don't just assume its a valid ep - check that its on the
2115 for (hep
= errent_listp
; hep
!= NULL
; hep
= hep
->next
)
2119 mutex_exit(&bofi_low_mutex
);
2122 mutex_enter(&bofi_mutex
);
2123 ep
->errstate
.access_count
= ep
->errdef
.access_count
;
2124 ep
->errstate
.fail_count
= ep
->errdef
.fail_count
;
2125 ep
->errstate
.acc_chk
= ep
->errdef
.acc_chk
;
2126 ep
->errstate
.log
= ep
->errdef
.log
;
2127 *logpp
= ep
->logbase
;
2128 *errstatep
= ep
->errstate
;
2129 mutex_exit(&bofi_mutex
);
2130 mutex_exit(&bofi_low_mutex
);
2136 * Wait for a ddi_report_fault message to come back for this errdef
2137 * Then return state for this errdef.
2138 * fault report is intercepted by bofi_post_event, which triggers
2139 * bofi_signal via a softint, which will wake up this routine if
2143 bofi_errdef_check_w(struct bofi_errstate
*errstatep
,
2144 struct acc_log_elem
**logpp
)
2146 struct bofi_errent
*hep
;
2147 struct bofi_errent
*ep
;
2150 ep
= (struct bofi_errent
*)(uintptr_t)errstatep
->errdef_handle
;
2151 mutex_enter(&bofi_low_mutex
);
2154 * don't just assume its a valid ep - check that its on the
2157 for (hep
= errent_listp
; hep
!= NULL
; hep
= hep
->next
)
2161 mutex_exit(&bofi_low_mutex
);
2165 * wait for ddi_report_fault for the devinfo corresponding
2168 if (rval
== 0 && !(ep
->state
& BOFI_NEW_MESSAGE
)) {
2169 ep
->state
|= BOFI_MESSAGE_WAIT
;
2170 if (cv_wait_sig(&ep
->cv
, &bofi_low_mutex
) == 0) {
2171 if (!(ep
->state
& BOFI_NEW_MESSAGE
))
2176 ep
->state
&= ~BOFI_NEW_MESSAGE
;
2178 * we either didn't need to sleep, we've been woken up or we've been
2179 * signaled - either way return state now
2181 mutex_enter(&bofi_mutex
);
2182 ep
->errstate
.access_count
= ep
->errdef
.access_count
;
2183 ep
->errstate
.fail_count
= ep
->errdef
.fail_count
;
2184 ep
->errstate
.acc_chk
= ep
->errdef
.acc_chk
;
2185 ep
->errstate
.log
= ep
->errdef
.log
;
2186 *logpp
= ep
->logbase
;
2187 *errstatep
= ep
->errstate
;
2188 mutex_exit(&bofi_mutex
);
2189 mutex_exit(&bofi_low_mutex
);
2195 * support routine - check if requested driver is defined as under test in the
2199 driver_under_test(dev_info_t
*rdip
)
2205 rname
= ddi_get_name(rdip
);
2206 rmaj
= ddi_name_to_major(rname
);
2209 * Enforce the user to specifically request the following drivers.
2211 for (i
= 0; i
< driver_list_size
; i
+= (1 + strlen(&driver_list
[i
]))) {
2212 if (driver_list_neg
== 0) {
2213 if (rmaj
== ddi_name_to_major(&driver_list
[i
]))
2216 if (rmaj
== ddi_name_to_major(&driver_list
[i
+1]))
2220 if (driver_list_neg
== 0)
2229 log_acc_event(struct bofi_errent
*ep
, uint_t at
, offset_t offset
, off_t len
,
2230 size_t repcount
, uint64_t *valuep
)
2232 struct bofi_errdef
*edp
= &(ep
->errdef
);
2233 struct acc_log
*log
= &edp
->log
;
2235 ASSERT(log
!= NULL
);
2236 ASSERT(MUTEX_HELD(&bofi_mutex
));
2238 if (log
->flags
& BOFI_LOG_REPIO
)
2240 else if (repcount
== 0 && edp
->access_count
> 0 &&
2241 (log
->flags
& BOFI_LOG_FULL
) == 0)
2242 edp
->access_count
+= 1;
2244 if (repcount
&& log
->entries
< log
->logsize
) {
2245 struct acc_log_elem
*elem
= ep
->logbase
+ log
->entries
;
2247 if (log
->flags
& BOFI_LOG_TIMESTAMP
)
2248 elem
->access_time
= bofi_gettime();
2249 elem
->access_type
= at
;
2250 elem
->offset
= offset
;
2251 elem
->value
= valuep
? *valuep
: 0ll;
2253 elem
->repcount
= repcount
;
2255 if (log
->entries
== log
->logsize
) {
2256 log
->flags
|= BOFI_LOG_FULL
;
2257 ddi_trigger_softintr(((struct bofi_errent
*)
2258 (uintptr_t)edp
->errdef_handle
)->softintr_id
);
2261 if ((log
->flags
& BOFI_LOG_WRAP
) && edp
->access_count
<= 1) {
2263 edp
->access_count
= log
->logsize
;
2264 log
->entries
= 0; /* wrap back to the start */
2270 * got a condition match on dma read/write - check counts and corrupt
2273 * bofi_mutex always held when this is called.
2276 do_dma_corrupt(struct bofi_shadow
*hp
, struct bofi_errent
*ep
,
2277 uint_t synctype
, off_t off
, off_t length
)
2285 ddi_dma_impl_t
*hdlp
;
2288 ASSERT(MUTEX_HELD(&bofi_mutex
));
2289 if ((ep
->errdef
.access_count
||
2290 ep
->errdef
.fail_count
) &&
2291 (ep
->errdef
.access_type
& BOFI_LOG
)) {
2294 if (synctype
== DDI_DMA_SYNC_FORDEV
)
2296 else if (synctype
== DDI_DMA_SYNC_FORCPU
||
2297 synctype
== DDI_DMA_SYNC_FORKERNEL
)
2301 if ((off
<= ep
->errdef
.offset
&&
2302 off
+ length
> ep
->errdef
.offset
) ||
2303 (off
> ep
->errdef
.offset
&&
2304 off
< ep
->errdef
.offset
+ ep
->errdef
.len
)) {
2305 logaddr
= (caddr_t
)((uintptr_t)(hp
->addr
+
2306 off
+ LLSZMASK
) & ~LLSZMASK
);
2308 log_acc_event(ep
, atype
, logaddr
- hp
->addr
,
2312 if (ep
->errdef
.access_count
> 1) {
2313 ep
->errdef
.access_count
--;
2314 } else if (ep
->errdef
.fail_count
> 0) {
2315 ep
->errdef
.fail_count
--;
2316 ep
->errdef
.access_count
= 0;
2318 * OK do the corruption
2320 if (ep
->errstate
.fail_time
== 0)
2321 ep
->errstate
.fail_time
= bofi_gettime();
2323 * work out how much to corrupt
2325 * Make sure endaddr isn't greater than hp->addr + hp->len.
2326 * If endaddr becomes less than addr len becomes negative
2327 * and the following loop isn't entered.
2329 addr
= (uint64_t *)((uintptr_t)((hp
->addr
+
2330 ep
->errdef
.offset
) + LLSZMASK
) & ~LLSZMASK
);
2331 endaddr
= (uint64_t *)((uintptr_t)(hp
->addr
+ min(hp
->len
,
2332 ep
->errdef
.offset
+ ep
->errdef
.len
)) & ~LLSZMASK
);
2333 len
= endaddr
- addr
;
2334 operand
= ep
->errdef
.operand
;
2335 hdlp
= (ddi_dma_impl_t
*)(hp
->hdl
.dma_handle
);
2336 errp
= &hdlp
->dmai_error
;
2337 if (ep
->errdef
.acc_chk
& 2) {
2339 char buf
[FM_MAX_CLASS
];
2341 errp
->err_status
= DDI_FM_NONFATAL
;
2342 (void) snprintf(buf
, FM_MAX_CLASS
, FM_SIMULATED_DMA
);
2343 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
2344 ddi_fm_ereport_post(hp
->dip
, buf
, ena
,
2345 DDI_NOSLEEP
, FM_VERSION
, DATA_TYPE_UINT8
,
2346 FM_EREPORT_VERS0
, NULL
);
2348 switch (ep
->errdef
.optype
) {
2350 for (i
= 0; i
< len
; i
++)
2351 *(addr
+ i
) = operand
;
2354 for (i
= 0; i
< len
; i
++)
2355 *(addr
+ i
) &= operand
;
2358 for (i
= 0; i
< len
; i
++)
2359 *(addr
+ i
) |= operand
;
2362 for (i
= 0; i
< len
; i
++)
2363 *(addr
+ i
) ^= operand
;
2373 static uint64_t do_bofi_rd8(struct bofi_shadow
*, caddr_t
);
2374 static uint64_t do_bofi_rd16(struct bofi_shadow
*, caddr_t
);
2375 static uint64_t do_bofi_rd32(struct bofi_shadow
*, caddr_t
);
2376 static uint64_t do_bofi_rd64(struct bofi_shadow
*, caddr_t
);
2380 * check all errdefs linked to this shadow handle. If we've got a condition
2381 * match check counts and corrupt data if necessary
2383 * bofi_mutex always held when this is called.
2385 * because of possibility of BOFI_NO_TRANSFER, we couldn't get data
2386 * from io-space before calling this, so we pass in the func to do the
2387 * transfer as a parameter.
2390 do_pior_corrupt(struct bofi_shadow
*hp
, caddr_t addr
,
2391 uint64_t (*func
)(), size_t repcount
, size_t accsize
)
2393 struct bofi_errent
*ep
;
2394 struct bofi_link
*lp
;
2399 uint64_t get_val
, gv
;
2400 ddi_acc_impl_t
*hdlp
;
2403 ASSERT(MUTEX_HELD(&bofi_mutex
));
2405 * check through all errdefs associated with this shadow handle
2407 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
2409 if (ep
->errdef
.len
== 0)
2412 minlen
= min(hp
->len
, ep
->errdef
.len
);
2413 base
= addr
- hp
->addr
- ep
->errdef
.offset
+ hp
->offset
;
2414 if ((ep
->errdef
.access_type
& BOFI_PIO_R
) &&
2415 (ep
->state
& BOFI_DEV_ACTIVE
) &&
2416 base
>= 0 && base
< minlen
) {
2418 * condition match for pio read
2420 if (ep
->errdef
.access_count
> 1) {
2421 ep
->errdef
.access_count
--;
2422 if (done_get
== 0) {
2424 gv
= get_val
= func(hp
, addr
);
2426 if (ep
->errdef
.access_type
& BOFI_LOG
) {
2427 log_acc_event(ep
, BOFI_PIO_R
,
2429 accsize
, repcount
, &gv
);
2431 } else if (ep
->errdef
.fail_count
> 0) {
2432 ep
->errdef
.fail_count
--;
2433 ep
->errdef
.access_count
= 0;
2437 if (ep
->errstate
.fail_time
== 0)
2438 ep
->errstate
.fail_time
= bofi_gettime();
2439 operand
= ep
->errdef
.operand
;
2440 if (done_get
== 0) {
2441 if (ep
->errdef
.optype
==
2444 * no transfer - bomb out
2448 gv
= get_val
= func(hp
, addr
);
2451 if (ep
->errdef
.access_type
& BOFI_LOG
) {
2452 log_acc_event(ep
, BOFI_PIO_R
,
2454 accsize
, repcount
, &gv
);
2456 hdlp
= (ddi_acc_impl_t
*)(hp
->hdl
.acc_handle
);
2457 errp
= hdlp
->ahi_err
;
2458 if (ep
->errdef
.acc_chk
& 1) {
2460 char buf
[FM_MAX_CLASS
];
2462 errp
->err_status
= DDI_FM_NONFATAL
;
2463 (void) snprintf(buf
, FM_MAX_CLASS
,
2465 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
2466 ddi_fm_ereport_post(hp
->dip
, buf
, ena
,
2467 DDI_NOSLEEP
, FM_VERSION
,
2468 DATA_TYPE_UINT8
, FM_EREPORT_VERS0
,
2471 switch (ep
->errdef
.optype
) {
2492 return (func(hp
, addr
));
2499 * check all errdefs linked to this shadow handle. If we've got a condition
2500 * match check counts and corrupt data if necessary
2502 * bofi_mutex always held when this is called.
2504 * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
2505 * is to be written out to io-space, 1 otherwise
2508 do_piow_corrupt(struct bofi_shadow
*hp
, caddr_t addr
, uint64_t *valuep
,
2509 size_t size
, size_t repcount
)
2511 struct bofi_errent
*ep
;
2512 struct bofi_link
*lp
;
2515 uint64_t v
= *valuep
;
2516 ddi_acc_impl_t
*hdlp
;
2519 ASSERT(MUTEX_HELD(&bofi_mutex
));
2521 * check through all errdefs associated with this shadow handle
2523 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
2525 if (ep
->errdef
.len
== 0)
2528 minlen
= min(hp
->len
, ep
->errdef
.len
);
2529 base
= (caddr_t
)addr
- hp
->addr
- ep
->errdef
.offset
+hp
->offset
;
2530 if ((ep
->errdef
.access_type
& BOFI_PIO_W
) &&
2531 (ep
->state
& BOFI_DEV_ACTIVE
) &&
2532 base
>= 0 && base
< minlen
) {
2534 * condition match for pio write
2537 if (ep
->errdef
.access_count
> 1) {
2538 ep
->errdef
.access_count
--;
2539 if (ep
->errdef
.access_type
& BOFI_LOG
)
2540 log_acc_event(ep
, BOFI_PIO_W
,
2541 addr
- hp
->addr
, size
,
2543 } else if (ep
->errdef
.fail_count
> 0) {
2544 ep
->errdef
.fail_count
--;
2545 ep
->errdef
.access_count
= 0;
2546 if (ep
->errdef
.access_type
& BOFI_LOG
)
2547 log_acc_event(ep
, BOFI_PIO_W
,
2548 addr
- hp
->addr
, size
,
2553 if (ep
->errstate
.fail_time
== 0)
2554 ep
->errstate
.fail_time
= bofi_gettime();
2555 hdlp
= (ddi_acc_impl_t
*)(hp
->hdl
.acc_handle
);
2556 errp
= hdlp
->ahi_err
;
2557 if (ep
->errdef
.acc_chk
& 1) {
2559 char buf
[FM_MAX_CLASS
];
2561 errp
->err_status
= DDI_FM_NONFATAL
;
2562 (void) snprintf(buf
, FM_MAX_CLASS
,
2564 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
2565 ddi_fm_ereport_post(hp
->dip
, buf
, ena
,
2566 DDI_NOSLEEP
, FM_VERSION
,
2567 DATA_TYPE_UINT8
, FM_EREPORT_VERS0
,
2570 switch (ep
->errdef
.optype
) {
2572 *valuep
= ep
->errdef
.operand
;
2575 *valuep
&= ep
->errdef
.operand
;
2578 *valuep
|= ep
->errdef
.operand
;
2581 *valuep
^= ep
->errdef
.operand
;
2583 case BOFI_NO_TRANSFER
:
2585 * no transfer - bomb out
2600 do_bofi_rd8(struct bofi_shadow
*hp
, caddr_t addr
)
2602 return (hp
->save
.acc
.ahi_get8(&hp
->save
.acc
, (uint8_t *)addr
));
2605 #define BOFI_READ_CHECKS(type) \
2606 if (bofi_ddi_check) \
2607 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2608 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2609 (caddr_t)addr - hp->addr >= hp->len)) { \
2610 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2611 "ddi_get() out of range addr %p not in %p/%llx", \
2612 (void *)addr, (void *)hp->addr, hp->len); \
2617 * our getb() routine - use tryenter
2620 bofi_rd8(ddi_acc_impl_t
*handle
, uint8_t *addr
)
2622 struct bofi_shadow
*hp
;
2625 hp
= handle
->ahi_common
.ah_bus_private
;
2626 BOFI_READ_CHECKS(uint8_t)
2627 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
))
2628 return (hp
->save
.acc
.ahi_get8(&hp
->save
.acc
, addr
));
2629 retval
= (uint8_t)do_pior_corrupt(hp
, (caddr_t
)addr
, do_bofi_rd8
, 1,
2631 mutex_exit(&bofi_mutex
);
2637 do_bofi_rd16(struct bofi_shadow
*hp
, caddr_t addr
)
2639 return (hp
->save
.acc
.ahi_get16(&hp
->save
.acc
, (uint16_t *)addr
));
2644 * our getw() routine - use tryenter
2647 bofi_rd16(ddi_acc_impl_t
*handle
, uint16_t *addr
)
2649 struct bofi_shadow
*hp
;
2652 hp
= handle
->ahi_common
.ah_bus_private
;
2653 BOFI_READ_CHECKS(uint16_t)
2654 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
))
2655 return (hp
->save
.acc
.ahi_get16(&hp
->save
.acc
, addr
));
2656 retval
= (uint16_t)do_pior_corrupt(hp
, (caddr_t
)addr
, do_bofi_rd16
, 1,
2658 mutex_exit(&bofi_mutex
);
2664 do_bofi_rd32(struct bofi_shadow
*hp
, caddr_t addr
)
2666 return (hp
->save
.acc
.ahi_get32(&hp
->save
.acc
, (uint32_t *)addr
));
2671 * our getl() routine - use tryenter
2674 bofi_rd32(ddi_acc_impl_t
*handle
, uint32_t *addr
)
2676 struct bofi_shadow
*hp
;
2679 hp
= handle
->ahi_common
.ah_bus_private
;
2680 BOFI_READ_CHECKS(uint32_t)
2681 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
))
2682 return (hp
->save
.acc
.ahi_get32(&hp
->save
.acc
, addr
));
2683 retval
= (uint32_t)do_pior_corrupt(hp
, (caddr_t
)addr
, do_bofi_rd32
, 1,
2685 mutex_exit(&bofi_mutex
);
2691 do_bofi_rd64(struct bofi_shadow
*hp
, caddr_t addr
)
2693 return (hp
->save
.acc
.ahi_get64(&hp
->save
.acc
, (uint64_t *)addr
));
2698 * our getll() routine - use tryenter
2701 bofi_rd64(ddi_acc_impl_t
*handle
, uint64_t *addr
)
2703 struct bofi_shadow
*hp
;
2706 hp
= handle
->ahi_common
.ah_bus_private
;
2707 BOFI_READ_CHECKS(uint64_t)
2708 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
))
2709 return (hp
->save
.acc
.ahi_get64(&hp
->save
.acc
, addr
));
2710 retval
= (uint64_t)do_pior_corrupt(hp
, (caddr_t
)addr
, do_bofi_rd64
, 1,
2712 mutex_exit(&bofi_mutex
);
2716 #define BOFI_WRITE_TESTS(type) \
2717 if (bofi_ddi_check) \
2718 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2719 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2720 (caddr_t)addr - hp->addr >= hp->len)) { \
2721 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2722 "ddi_put() out of range addr %p not in %p/%llx\n", \
2723 (void *)addr, (void *)hp->addr, hp->len); \
2728 * our putb() routine - use tryenter
2731 bofi_wr8(ddi_acc_impl_t
*handle
, uint8_t *addr
, uint8_t value
)
2733 struct bofi_shadow
*hp
;
2734 uint64_t llvalue
= value
;
2736 hp
= handle
->ahi_common
.ah_bus_private
;
2737 BOFI_WRITE_TESTS(uint8_t)
2738 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2739 hp
->save
.acc
.ahi_put8(&hp
->save
.acc
, addr
, (uint8_t)llvalue
);
2742 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 1, 1))
2743 hp
->save
.acc
.ahi_put8(&hp
->save
.acc
, addr
, (uint8_t)llvalue
);
2744 mutex_exit(&bofi_mutex
);
2749 * our putw() routine - use tryenter
2752 bofi_wr16(ddi_acc_impl_t
*handle
, uint16_t *addr
, uint16_t value
)
2754 struct bofi_shadow
*hp
;
2755 uint64_t llvalue
= value
;
2757 hp
= handle
->ahi_common
.ah_bus_private
;
2758 BOFI_WRITE_TESTS(uint16_t)
2759 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2760 hp
->save
.acc
.ahi_put16(&hp
->save
.acc
, addr
, (uint16_t)llvalue
);
2763 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 2, 1))
2764 hp
->save
.acc
.ahi_put16(&hp
->save
.acc
, addr
, (uint16_t)llvalue
);
2765 mutex_exit(&bofi_mutex
);
2770 * our putl() routine - use tryenter
2773 bofi_wr32(ddi_acc_impl_t
*handle
, uint32_t *addr
, uint32_t value
)
2775 struct bofi_shadow
*hp
;
2776 uint64_t llvalue
= value
;
2778 hp
= handle
->ahi_common
.ah_bus_private
;
2779 BOFI_WRITE_TESTS(uint32_t)
2780 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2781 hp
->save
.acc
.ahi_put32(&hp
->save
.acc
, addr
, (uint32_t)llvalue
);
2784 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 4, 1))
2785 hp
->save
.acc
.ahi_put32(&hp
->save
.acc
, addr
, (uint32_t)llvalue
);
2786 mutex_exit(&bofi_mutex
);
2791 * our putll() routine - use tryenter
2794 bofi_wr64(ddi_acc_impl_t
*handle
, uint64_t *addr
, uint64_t value
)
2796 struct bofi_shadow
*hp
;
2797 uint64_t llvalue
= value
;
2799 hp
= handle
->ahi_common
.ah_bus_private
;
2800 BOFI_WRITE_TESTS(uint64_t)
2801 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2802 hp
->save
.acc
.ahi_put64(&hp
->save
.acc
, addr
, (uint64_t)llvalue
);
2805 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 8, 1))
2806 hp
->save
.acc
.ahi_put64(&hp
->save
.acc
, addr
, (uint64_t)llvalue
);
2807 mutex_exit(&bofi_mutex
);
2810 #define BOFI_REP_READ_TESTS(type) \
2811 if (bofi_ddi_check) \
2812 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2813 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2814 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2815 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2816 "ddi_rep_get() out of range addr %p not in %p/%llx\n", \
2817 (void *)dev_addr, (void *)hp->addr, hp->len); \
2818 if ((caddr_t)dev_addr < hp->addr || \
2819 (caddr_t)dev_addr - hp->addr >= hp->len) \
2821 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2825 * our rep_getb() routine - use tryenter
2828 bofi_rep_rd8(ddi_acc_impl_t
*handle
, uint8_t *host_addr
, uint8_t *dev_addr
,
2829 size_t repcount
, uint_t flags
)
2831 struct bofi_shadow
*hp
;
2835 hp
= handle
->ahi_common
.ah_bus_private
;
2836 BOFI_REP_READ_TESTS(uint8_t)
2837 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2838 hp
->save
.acc
.ahi_rep_get8(&hp
->save
.acc
, host_addr
, dev_addr
,
2842 for (i
= 0; i
< repcount
; i
++) {
2843 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2844 *(host_addr
+ i
) = (uint8_t)do_pior_corrupt(hp
, (caddr_t
)addr
,
2845 do_bofi_rd8
, i
? 0 : repcount
, 1);
2847 mutex_exit(&bofi_mutex
);
2852 * our rep_getw() routine - use tryenter
2855 bofi_rep_rd16(ddi_acc_impl_t
*handle
, uint16_t *host_addr
,
2856 uint16_t *dev_addr
, size_t repcount
, uint_t flags
)
2858 struct bofi_shadow
*hp
;
2862 hp
= handle
->ahi_common
.ah_bus_private
;
2863 BOFI_REP_READ_TESTS(uint16_t)
2864 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2865 hp
->save
.acc
.ahi_rep_get16(&hp
->save
.acc
, host_addr
, dev_addr
,
2869 for (i
= 0; i
< repcount
; i
++) {
2870 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2871 *(host_addr
+ i
) = (uint16_t)do_pior_corrupt(hp
, (caddr_t
)addr
,
2872 do_bofi_rd16
, i
? 0 : repcount
, 2);
2874 mutex_exit(&bofi_mutex
);
2879 * our rep_getl() routine - use tryenter
2882 bofi_rep_rd32(ddi_acc_impl_t
*handle
, uint32_t *host_addr
,
2883 uint32_t *dev_addr
, size_t repcount
, uint_t flags
)
2885 struct bofi_shadow
*hp
;
2889 hp
= handle
->ahi_common
.ah_bus_private
;
2890 BOFI_REP_READ_TESTS(uint32_t)
2891 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2892 hp
->save
.acc
.ahi_rep_get32(&hp
->save
.acc
, host_addr
, dev_addr
,
2896 for (i
= 0; i
< repcount
; i
++) {
2897 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2898 *(host_addr
+ i
) = (uint32_t)do_pior_corrupt(hp
, (caddr_t
)addr
,
2899 do_bofi_rd32
, i
? 0 : repcount
, 4);
2901 mutex_exit(&bofi_mutex
);
2906 * our rep_getll() routine - use tryenter
2909 bofi_rep_rd64(ddi_acc_impl_t
*handle
, uint64_t *host_addr
,
2910 uint64_t *dev_addr
, size_t repcount
, uint_t flags
)
2912 struct bofi_shadow
*hp
;
2916 hp
= handle
->ahi_common
.ah_bus_private
;
2917 BOFI_REP_READ_TESTS(uint64_t)
2918 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2919 hp
->save
.acc
.ahi_rep_get64(&hp
->save
.acc
, host_addr
, dev_addr
,
2923 for (i
= 0; i
< repcount
; i
++) {
2924 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2925 *(host_addr
+ i
) = (uint64_t)do_pior_corrupt(hp
, (caddr_t
)addr
,
2926 do_bofi_rd64
, i
? 0 : repcount
, 8);
2928 mutex_exit(&bofi_mutex
);
2931 #define BOFI_REP_WRITE_TESTS(type) \
2932 if (bofi_ddi_check) \
2933 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2934 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2935 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2936 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2937 "ddi_rep_put() out of range addr %p not in %p/%llx\n", \
2938 (void *)dev_addr, (void *)hp->addr, hp->len); \
2939 if ((caddr_t)dev_addr < hp->addr || \
2940 (caddr_t)dev_addr - hp->addr >= hp->len) \
2942 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2946 * our rep_putb() routine - use tryenter
2949 bofi_rep_wr8(ddi_acc_impl_t
*handle
, uint8_t *host_addr
, uint8_t *dev_addr
,
2950 size_t repcount
, uint_t flags
)
2952 struct bofi_shadow
*hp
;
2957 hp
= handle
->ahi_common
.ah_bus_private
;
2958 BOFI_REP_WRITE_TESTS(uint8_t)
2959 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2960 hp
->save
.acc
.ahi_rep_put8(&hp
->save
.acc
, host_addr
, dev_addr
,
2964 for (i
= 0; i
< repcount
; i
++) {
2965 llvalue
= *(host_addr
+ i
);
2966 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2967 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 1, i
? 0 :
2969 hp
->save
.acc
.ahi_put8(&hp
->save
.acc
, addr
,
2972 mutex_exit(&bofi_mutex
);
2977 * our rep_putw() routine - use tryenter
2980 bofi_rep_wr16(ddi_acc_impl_t
*handle
, uint16_t *host_addr
,
2981 uint16_t *dev_addr
, size_t repcount
, uint_t flags
)
2983 struct bofi_shadow
*hp
;
2988 hp
= handle
->ahi_common
.ah_bus_private
;
2989 BOFI_REP_WRITE_TESTS(uint16_t)
2990 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2991 hp
->save
.acc
.ahi_rep_put16(&hp
->save
.acc
, host_addr
, dev_addr
,
2995 for (i
= 0; i
< repcount
; i
++) {
2996 llvalue
= *(host_addr
+ i
);
2997 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2998 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 2, i
? 0 :
3000 hp
->save
.acc
.ahi_put16(&hp
->save
.acc
, addr
,
3003 mutex_exit(&bofi_mutex
);
3008 * our rep_putl() routine - use tryenter
3011 bofi_rep_wr32(ddi_acc_impl_t
*handle
, uint32_t *host_addr
,
3012 uint32_t *dev_addr
, size_t repcount
, uint_t flags
)
3014 struct bofi_shadow
*hp
;
3019 hp
= handle
->ahi_common
.ah_bus_private
;
3020 BOFI_REP_WRITE_TESTS(uint32_t)
3021 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
3022 hp
->save
.acc
.ahi_rep_put32(&hp
->save
.acc
, host_addr
, dev_addr
,
3026 for (i
= 0; i
< repcount
; i
++) {
3027 llvalue
= *(host_addr
+ i
);
3028 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
3029 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 4, i
? 0 :
3031 hp
->save
.acc
.ahi_put32(&hp
->save
.acc
, addr
,
3034 mutex_exit(&bofi_mutex
);
3039 * our rep_putll() routine - use tryenter
3042 bofi_rep_wr64(ddi_acc_impl_t
*handle
, uint64_t *host_addr
,
3043 uint64_t *dev_addr
, size_t repcount
, uint_t flags
)
3045 struct bofi_shadow
*hp
;
3050 hp
= handle
->ahi_common
.ah_bus_private
;
3051 BOFI_REP_WRITE_TESTS(uint64_t)
3052 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
3053 hp
->save
.acc
.ahi_rep_put64(&hp
->save
.acc
, host_addr
, dev_addr
,
3057 for (i
= 0; i
< repcount
; i
++) {
3058 llvalue
= *(host_addr
+ i
);
3059 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
3060 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 8, i
? 0 :
3062 hp
->save
.acc
.ahi_put64(&hp
->save
.acc
, addr
,
3065 mutex_exit(&bofi_mutex
);
3070 * our ddi_map routine
3073 bofi_map(dev_info_t
*dip
, dev_info_t
*rdip
,
3074 ddi_map_req_t
*reqp
, off_t offset
, off_t len
, caddr_t
*vaddrp
)
3077 struct bofi_shadow
*hp
;
3078 struct bofi_errent
*ep
;
3079 struct bofi_link
*lp
, *next_lp
;
3081 struct bofi_shadow
*dhashp
;
3082 struct bofi_shadow
*hhashp
;
3084 switch (reqp
->map_op
) {
3085 case DDI_MO_MAP_LOCKED
:
3087 * for this case get nexus to do real work first
3089 retval
= save_bus_ops
.bus_map(dip
, rdip
, reqp
, offset
, len
,
3091 if (retval
!= DDI_SUCCESS
)
3094 ap
= (ddi_acc_impl_t
*)reqp
->map_handlep
;
3096 return (DDI_SUCCESS
);
3098 * if driver_list is set, only intercept those drivers
3100 if (!driver_under_test(ap
->ahi_common
.ah_dip
))
3101 return (DDI_SUCCESS
);
3104 * support for ddi_regs_map_setup()
3105 * - allocate shadow handle structure and fill it in
3107 hp
= kmem_zalloc(sizeof (struct bofi_shadow
), KM_SLEEP
);
3108 (void) strncpy(hp
->name
, ddi_get_name(ap
->ahi_common
.ah_dip
),
3110 hp
->instance
= ddi_get_instance(ap
->ahi_common
.ah_dip
);
3111 hp
->dip
= ap
->ahi_common
.ah_dip
;
3114 * return spurious value to catch direct access to registers
3117 *vaddrp
= (caddr_t
)64;
3118 hp
->rnumber
= ((ddi_acc_hdl_t
*)ap
)->ah_rnumber
;
3119 hp
->offset
= offset
;
3121 hp
->len
= INT_MAX
- offset
;
3123 hp
->len
= min(len
, INT_MAX
- offset
);
3124 hp
->hdl
.acc_handle
= (ddi_acc_handle_t
)ap
;
3126 hp
->type
= BOFI_ACC_HDL
;
3128 * save existing function pointers and plug in our own
3131 ap
->ahi_get8
= bofi_rd8
;
3132 ap
->ahi_get16
= bofi_rd16
;
3133 ap
->ahi_get32
= bofi_rd32
;
3134 ap
->ahi_get64
= bofi_rd64
;
3135 ap
->ahi_put8
= bofi_wr8
;
3136 ap
->ahi_put16
= bofi_wr16
;
3137 ap
->ahi_put32
= bofi_wr32
;
3138 ap
->ahi_put64
= bofi_wr64
;
3139 ap
->ahi_rep_get8
= bofi_rep_rd8
;
3140 ap
->ahi_rep_get16
= bofi_rep_rd16
;
3141 ap
->ahi_rep_get32
= bofi_rep_rd32
;
3142 ap
->ahi_rep_get64
= bofi_rep_rd64
;
3143 ap
->ahi_rep_put8
= bofi_rep_wr8
;
3144 ap
->ahi_rep_put16
= bofi_rep_wr16
;
3145 ap
->ahi_rep_put32
= bofi_rep_wr32
;
3146 ap
->ahi_rep_put64
= bofi_rep_wr64
;
3147 ap
->ahi_fault_check
= bofi_check_acc_hdl
;
3148 ap
->ahi_acc_attr
&= ~DDI_ACCATTR_DIRECT
;
3150 * stick in a pointer to our shadow handle
3152 ap
->ahi_common
.ah_bus_private
= hp
;
3154 * add to dhash, hhash and inuse lists
3156 mutex_enter(&bofi_low_mutex
);
3157 mutex_enter(&bofi_mutex
);
3158 hp
->next
= shadow_list
.next
;
3159 shadow_list
.next
->prev
= hp
;
3160 hp
->prev
= &shadow_list
;
3161 shadow_list
.next
= hp
;
3162 hhashp
= HDL_HHASH(ap
);
3163 hp
->hnext
= hhashp
->hnext
;
3164 hhashp
->hnext
->hprev
= hp
;
3167 dhashp
= HDL_DHASH(hp
->dip
);
3168 hp
->dnext
= dhashp
->dnext
;
3169 dhashp
->dnext
->dprev
= hp
;
3173 * chain on any pre-existing errdefs that apply to this
3176 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
) {
3177 if (ddi_name_to_major(hp
->name
) ==
3178 ddi_name_to_major(ep
->name
) &&
3179 hp
->instance
== ep
->errdef
.instance
&&
3180 (ep
->errdef
.access_type
& BOFI_PIO_RW
) &&
3181 (ep
->errdef
.rnumber
== -1 ||
3182 hp
->rnumber
== ep
->errdef
.rnumber
) &&
3183 (ep
->errdef
.len
== 0 ||
3184 offset
< ep
->errdef
.offset
+ ep
->errdef
.len
) &&
3185 offset
+ hp
->len
> ep
->errdef
.offset
) {
3186 lp
= bofi_link_freelist
;
3188 bofi_link_freelist
= lp
->link
;
3190 lp
->link
= hp
->link
;
3195 mutex_exit(&bofi_mutex
);
3196 mutex_exit(&bofi_low_mutex
);
3197 return (DDI_SUCCESS
);
3200 ap
= (ddi_acc_impl_t
*)reqp
->map_handlep
;
3204 * support for ddi_regs_map_free()
3205 * - check we really have a shadow handle for this one
3207 mutex_enter(&bofi_low_mutex
);
3208 mutex_enter(&bofi_mutex
);
3209 hhashp
= HDL_HHASH(ap
);
3210 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3211 if (hp
->hdl
.acc_handle
== (ddi_acc_handle_t
)ap
)
3214 mutex_exit(&bofi_mutex
);
3215 mutex_exit(&bofi_low_mutex
);
3219 * got a shadow handle - restore original pointers
3224 * remove from dhash, hhash and inuse lists
3226 hp
->hnext
->hprev
= hp
->hprev
;
3227 hp
->hprev
->hnext
= hp
->hnext
;
3228 hp
->dnext
->dprev
= hp
->dprev
;
3229 hp
->dprev
->dnext
= hp
->dnext
;
3230 hp
->next
->prev
= hp
->prev
;
3231 hp
->prev
->next
= hp
->next
;
3233 * free any errdef link structures tagged onto the shadow handle
3235 for (lp
= hp
->link
; lp
!= NULL
; ) {
3237 lp
->link
= bofi_link_freelist
;
3238 bofi_link_freelist
= lp
;
3242 mutex_exit(&bofi_mutex
);
3243 mutex_exit(&bofi_low_mutex
);
3245 * finally delete shadow handle
3247 kmem_free(hp
, sizeof (struct bofi_shadow
));
3252 return (save_bus_ops
.bus_map(dip
, rdip
, reqp
, offset
, len
, vaddrp
));
3257 * chain any pre-existing errdefs on to newly created dma handle
3258 * if required call do_dma_corrupt() to corrupt data
3261 chain_on_errdefs(struct bofi_shadow
*hp
)
3263 struct bofi_errent
*ep
;
3264 struct bofi_link
*lp
;
3266 ASSERT(MUTEX_HELD(&bofi_mutex
));
3268 * chain on any pre-existing errdefs that apply to this dma_handle
3270 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
) {
3271 if (ddi_name_to_major(hp
->name
) ==
3272 ddi_name_to_major(ep
->name
) &&
3273 hp
->instance
== ep
->errdef
.instance
&&
3274 (ep
->errdef
.rnumber
== -1 ||
3275 hp
->rnumber
== ep
->errdef
.rnumber
) &&
3276 ((ep
->errdef
.access_type
& BOFI_DMA_RW
) &&
3277 (((uintptr_t)(hp
->addr
+ ep
->errdef
.offset
+
3278 ep
->errdef
.len
) & ~LLSZMASK
) >
3279 ((uintptr_t)((hp
->addr
+ ep
->errdef
.offset
) +
3280 LLSZMASK
) & ~LLSZMASK
)))) {
3282 * got a match - link it on
3284 lp
= bofi_link_freelist
;
3286 bofi_link_freelist
= lp
->link
;
3288 lp
->link
= hp
->link
;
3290 if ((ep
->errdef
.access_type
& BOFI_DMA_W
) &&
3291 (hp
->flags
& DDI_DMA_WRITE
) &&
3292 (ep
->state
& BOFI_DEV_ACTIVE
)) {
3293 do_dma_corrupt(hp
, ep
,
3294 DDI_DMA_SYNC_FORDEV
,
3304 * need to do copy byte-by-byte in case one of pages is little-endian
3307 xbcopy(void *from
, void *to
, u_longlong_t len
)
3318 * our ddi_dma_allochdl routine
3321 bofi_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_attr_t
*attrp
,
3322 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
3324 int retval
= DDI_DMA_NORESOURCES
;
3325 struct bofi_shadow
*hp
, *xhp
;
3327 struct bofi_shadow
*dhashp
;
3328 struct bofi_shadow
*hhashp
;
3332 * if driver_list is set, only intercept those drivers
3334 if (!driver_under_test(rdip
))
3335 return (save_bus_ops
.bus_dma_allochdl(dip
, rdip
, attrp
,
3336 waitfp
, arg
, handlep
));
3339 * allocate shadow handle structure and fill it in
3341 hp
= kmem_zalloc(sizeof (struct bofi_shadow
),
3342 ((waitfp
== DDI_DMA_SLEEP
) ? KM_SLEEP
: KM_NOSLEEP
));
3345 * what to do here? Wait a bit and try again
3347 if (waitfp
!= DDI_DMA_DONTWAIT
)
3348 (void) timeout((void (*)())waitfp
, arg
, 10);
3351 (void) strncpy(hp
->name
, ddi_get_name(rdip
), NAMESIZE
);
3352 hp
->instance
= ddi_get_instance(rdip
);
3355 hp
->type
= BOFI_NULL
;
3357 * call nexus to do the real work
3359 retval
= save_bus_ops
.bus_dma_allochdl(dip
, rdip
, attrp
, waitfp
, arg
,
3361 if (retval
!= DDI_SUCCESS
) {
3362 kmem_free(hp
, sizeof (struct bofi_shadow
));
3366 * now point set dma_handle to point to real handle
3368 hp
->hdl
.dma_handle
= *handlep
;
3369 mp
= (ddi_dma_impl_t
*)*handlep
;
3370 mp
->dmai_fault_check
= bofi_check_dma_hdl
;
3372 * bind and unbind are cached in devinfo - must overwrite them
3373 * - note that our bind and unbind are quite happy dealing with
3374 * any handles for this devinfo that were previously allocated
3376 if (save_bus_ops
.bus_dma_bindhdl
== DEVI(rdip
)->devi_bus_dma_bindfunc
)
3377 DEVI(rdip
)->devi_bus_dma_bindfunc
= bofi_dma_bindhdl
;
3378 if (save_bus_ops
.bus_dma_unbindhdl
==
3379 DEVI(rdip
)->devi_bus_dma_unbindfunc
)
3380 DEVI(rdip
)->devi_bus_dma_unbindfunc
= bofi_dma_unbindhdl
;
3381 mutex_enter(&bofi_low_mutex
);
3382 mutex_enter(&bofi_mutex
);
3384 * get an "rnumber" for this handle - really just seeking to
3385 * get a unique number - generally only care for early allocated
3386 * handles - so we get as far as INT_MAX, just stay there
3388 dhashp
= HDL_DHASH(hp
->dip
);
3389 for (xhp
= dhashp
->dnext
; xhp
!= dhashp
; xhp
= xhp
->dnext
)
3390 if (ddi_name_to_major(xhp
->name
) ==
3391 ddi_name_to_major(hp
->name
) &&
3392 xhp
->instance
== hp
->instance
&&
3393 (xhp
->type
== BOFI_DMA_HDL
||
3394 xhp
->type
== BOFI_NULL
))
3395 if (xhp
->rnumber
>= maxrnumber
) {
3396 if (xhp
->rnumber
== INT_MAX
)
3397 maxrnumber
= INT_MAX
;
3399 maxrnumber
= xhp
->rnumber
+ 1;
3401 hp
->rnumber
= maxrnumber
;
3403 * add to dhash, hhash and inuse lists
3405 hp
->next
= shadow_list
.next
;
3406 shadow_list
.next
->prev
= hp
;
3407 hp
->prev
= &shadow_list
;
3408 shadow_list
.next
= hp
;
3409 hhashp
= HDL_HHASH(*handlep
);
3410 hp
->hnext
= hhashp
->hnext
;
3411 hhashp
->hnext
->hprev
= hp
;
3414 dhashp
= HDL_DHASH(hp
->dip
);
3415 hp
->dnext
= dhashp
->dnext
;
3416 dhashp
->dnext
->dprev
= hp
;
3419 mutex_exit(&bofi_mutex
);
3420 mutex_exit(&bofi_low_mutex
);
3426 * our ddi_dma_freehdl routine
3429 bofi_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
)
3432 struct bofi_shadow
*hp
;
3433 struct bofi_shadow
*hhashp
;
3436 * find shadow for this handle
3438 mutex_enter(&bofi_low_mutex
);
3439 mutex_enter(&bofi_mutex
);
3440 hhashp
= HDL_HHASH(handle
);
3441 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3442 if (hp
->hdl
.dma_handle
== handle
)
3444 mutex_exit(&bofi_mutex
);
3445 mutex_exit(&bofi_low_mutex
);
3447 * call nexus to do the real work
3449 retval
= save_bus_ops
.bus_dma_freehdl(dip
, rdip
, handle
);
3450 if (retval
!= DDI_SUCCESS
) {
3454 * did we really have a shadow for this handle
3459 * yes we have - see if it's still bound
3461 mutex_enter(&bofi_low_mutex
);
3462 mutex_enter(&bofi_mutex
);
3463 if (hp
->type
!= BOFI_NULL
)
3464 panic("driver freeing bound dma_handle");
3466 * remove from dhash, hhash and inuse lists
3468 hp
->hnext
->hprev
= hp
->hprev
;
3469 hp
->hprev
->hnext
= hp
->hnext
;
3470 hp
->dnext
->dprev
= hp
->dprev
;
3471 hp
->dprev
->dnext
= hp
->dnext
;
3472 hp
->next
->prev
= hp
->prev
;
3473 hp
->prev
->next
= hp
->next
;
3474 mutex_exit(&bofi_mutex
);
3475 mutex_exit(&bofi_low_mutex
);
3477 kmem_free(hp
, sizeof (struct bofi_shadow
));
3483 * our ddi_dma_bindhdl routine
3486 bofi_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
3487 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareqp
,
3488 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
3490 int retval
= DDI_DMA_NORESOURCES
;
3491 auto struct ddi_dma_req dmareq
;
3492 struct bofi_shadow
*hp
;
3493 struct bofi_shadow
*hhashp
;
3495 unsigned long pagemask
= ddi_ptob(rdip
, 1) - 1;
3498 * check we really have a shadow for this handle
3500 mutex_enter(&bofi_low_mutex
);
3501 mutex_enter(&bofi_mutex
);
3502 hhashp
= HDL_HHASH(handle
);
3503 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3504 if (hp
->hdl
.dma_handle
== handle
)
3506 mutex_exit(&bofi_mutex
);
3507 mutex_exit(&bofi_low_mutex
);
3510 * no we don't - just call nexus to do the real work
3512 return save_bus_ops
.bus_dma_bindhdl(dip
, rdip
, handle
, dmareqp
,
3516 * yes we have - see if it's already bound
3518 if (hp
->type
!= BOFI_NULL
)
3519 return (DDI_DMA_INUSE
);
3521 hp
->flags
= dmareqp
->dmar_flags
;
3522 if (dmareqp
->dmar_object
.dmao_type
== DMA_OTYP_PAGES
) {
3523 hp
->map_flags
= B_PAGEIO
;
3524 hp
->map_pp
= dmareqp
->dmar_object
.dmao_obj
.pp_obj
.pp_pp
;
3525 } else if (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_priv
!= NULL
) {
3526 hp
->map_flags
= B_SHADOW
;
3527 hp
->map_pplist
= dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_priv
;
3532 * get a kernel virtual mapping
3534 hp
->addr
= ddi_dmareq_mapin(dmareqp
, &hp
->mapaddr
, &hp
->len
);
3535 if (hp
->addr
== NULL
)
3537 if (bofi_sync_check
) {
3539 * Take a copy and pass pointers to this up to nexus instead.
3540 * Data will be copied from the original on explicit
3541 * and implicit ddi_dma_sync()
3543 * - maintain page alignment because some devices assume it.
3545 hp
->origaddr
= hp
->addr
;
3546 hp
->allocaddr
= ddi_umem_alloc(
3547 ((uintptr_t)hp
->addr
& pagemask
) + hp
->len
,
3548 (dmareqp
->dmar_fp
== DDI_DMA_SLEEP
) ? KM_SLEEP
: KM_NOSLEEP
,
3550 if (hp
->allocaddr
== NULL
)
3552 hp
->addr
= hp
->allocaddr
+ ((uintptr_t)hp
->addr
& pagemask
);
3553 if (dmareqp
->dmar_flags
& DDI_DMA_WRITE
)
3554 xbcopy(hp
->origaddr
, hp
->addr
, hp
->len
);
3556 dmareq
.dmar_object
.dmao_size
= hp
->len
;
3557 dmareq
.dmar_object
.dmao_type
= DMA_OTYP_VADDR
;
3558 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_as
= &kas
;
3559 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_addr
= hp
->addr
;
3560 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_priv
= NULL
;
3564 * call nexus to do the real work
3566 retval
= save_bus_ops
.bus_dma_bindhdl(dip
, rdip
, handle
, dmareqp
,
3568 if (retval
!= DDI_SUCCESS
)
3573 mp
= (ddi_dma_impl_t
*)handle
;
3574 mp
->dmai_rflags
&= ~DMP_NOSYNC
;
3576 * chain on any pre-existing errdefs that apply to this
3577 * acc_handle and corrupt if required (as there is an implicit
3578 * ddi_dma_sync() in this call)
3580 mutex_enter(&bofi_low_mutex
);
3581 mutex_enter(&bofi_mutex
);
3582 hp
->type
= BOFI_DMA_HDL
;
3583 chain_on_errdefs(hp
);
3584 mutex_exit(&bofi_mutex
);
3585 mutex_exit(&bofi_low_mutex
);
3589 if (dmareqp
->dmar_fp
!= DDI_DMA_DONTWAIT
) {
3591 * what to do here? Wait a bit and try again
3593 (void) timeout((void (*)())dmareqp
->dmar_fp
,
3594 dmareqp
->dmar_arg
, 10);
3598 ddi_dmareq_mapout(hp
->mapaddr
, hp
->len
, hp
->map_flags
,
3599 hp
->map_pp
, hp
->map_pplist
);
3600 if (bofi_sync_check
&& hp
->allocaddr
)
3601 ddi_umem_free(hp
->umem_cookie
);
3603 hp
->allocaddr
= NULL
;
3604 hp
->origaddr
= NULL
;
3611 * our ddi_dma_unbindhdl routine
3614 bofi_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
)
3616 struct bofi_link
*lp
, *next_lp
;
3617 struct bofi_errent
*ep
;
3619 struct bofi_shadow
*hp
;
3620 struct bofi_shadow
*hhashp
;
3623 * call nexus to do the real work
3625 retval
= save_bus_ops
.bus_dma_unbindhdl(dip
, rdip
, handle
);
3626 if (retval
!= DDI_SUCCESS
)
3629 * check we really have a shadow for this handle
3631 mutex_enter(&bofi_low_mutex
);
3632 mutex_enter(&bofi_mutex
);
3633 hhashp
= HDL_HHASH(handle
);
3634 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3635 if (hp
->hdl
.dma_handle
== handle
)
3638 mutex_exit(&bofi_mutex
);
3639 mutex_exit(&bofi_low_mutex
);
3643 * yes we have - see if it's already unbound
3645 if (hp
->type
== BOFI_NULL
)
3646 panic("driver unbinding unbound dma_handle");
3648 * free any errdef link structures tagged on to this
3651 for (lp
= hp
->link
; lp
!= NULL
; ) {
3654 * there is an implicit sync_for_cpu on free -
3655 * may need to corrupt
3658 if ((ep
->errdef
.access_type
& BOFI_DMA_R
) &&
3659 (hp
->flags
& DDI_DMA_READ
) &&
3660 (ep
->state
& BOFI_DEV_ACTIVE
)) {
3661 do_dma_corrupt(hp
, ep
, DDI_DMA_SYNC_FORCPU
, 0, hp
->len
);
3663 lp
->link
= bofi_link_freelist
;
3664 bofi_link_freelist
= lp
;
3668 hp
->type
= BOFI_NULL
;
3669 mutex_exit(&bofi_mutex
);
3670 mutex_exit(&bofi_low_mutex
);
3672 if (bofi_sync_check
&& (hp
->flags
& DDI_DMA_READ
))
3674 * implicit sync_for_cpu - copy data back
3677 xbcopy(hp
->addr
, hp
->origaddr
, hp
->len
);
3678 ddi_dmareq_mapout(hp
->mapaddr
, hp
->len
, hp
->map_flags
,
3679 hp
->map_pp
, hp
->map_pplist
);
3680 if (bofi_sync_check
&& hp
->allocaddr
)
3681 ddi_umem_free(hp
->umem_cookie
);
3683 hp
->allocaddr
= NULL
;
3684 hp
->origaddr
= NULL
;
3690 * our ddi_dma_sync routine
3693 bofi_dma_flush(dev_info_t
*dip
, dev_info_t
*rdip
,
3694 ddi_dma_handle_t handle
, off_t off
, size_t len
, uint_t flags
)
3696 struct bofi_link
*lp
;
3697 struct bofi_errent
*ep
;
3698 struct bofi_shadow
*hp
;
3699 struct bofi_shadow
*hhashp
;
3702 if (flags
== DDI_DMA_SYNC_FORCPU
|| flags
== DDI_DMA_SYNC_FORKERNEL
) {
3704 * in this case get nexus driver to do sync first
3706 retval
= save_bus_ops
.bus_dma_flush(dip
, rdip
, handle
, off
,
3708 if (retval
!= DDI_SUCCESS
)
3712 * check we really have a shadow for this handle
3714 mutex_enter(&bofi_low_mutex
);
3715 mutex_enter(&bofi_mutex
);
3716 hhashp
= HDL_HHASH(handle
);
3717 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3718 if (hp
->hdl
.dma_handle
== handle
&&
3719 hp
->type
== BOFI_DMA_HDL
)
3721 mutex_exit(&bofi_mutex
);
3722 mutex_exit(&bofi_low_mutex
);
3725 * yes - do we need to copy data from original
3727 if (bofi_sync_check
&& flags
== DDI_DMA_SYNC_FORDEV
)
3729 xbcopy(hp
->origaddr
+off
, hp
->addr
+off
,
3730 len
? len
: (hp
->len
- off
));
3732 * yes - check if we need to corrupt the data
3734 mutex_enter(&bofi_low_mutex
);
3735 mutex_enter(&bofi_mutex
);
3736 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
3738 if ((((ep
->errdef
.access_type
& BOFI_DMA_R
) &&
3739 (flags
== DDI_DMA_SYNC_FORCPU
||
3740 flags
== DDI_DMA_SYNC_FORKERNEL
)) ||
3741 ((ep
->errdef
.access_type
& BOFI_DMA_W
) &&
3742 (flags
== DDI_DMA_SYNC_FORDEV
))) &&
3743 (ep
->state
& BOFI_DEV_ACTIVE
)) {
3744 do_dma_corrupt(hp
, ep
, flags
, off
,
3745 len
? len
: (hp
->len
- off
));
3748 mutex_exit(&bofi_mutex
);
3749 mutex_exit(&bofi_low_mutex
);
3751 * do we need to copy data to original
3753 if (bofi_sync_check
&& (flags
== DDI_DMA_SYNC_FORCPU
||
3754 flags
== DDI_DMA_SYNC_FORKERNEL
))
3756 xbcopy(hp
->addr
+off
, hp
->origaddr
+off
,
3757 len
? len
: (hp
->len
- off
));
3759 if (flags
== DDI_DMA_SYNC_FORDEV
)
3761 * in this case get nexus driver to do sync last
3763 retval
= save_bus_ops
.bus_dma_flush(dip
, rdip
, handle
, off
,
3770 * our dma_win routine
3773 bofi_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
3774 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
,
3775 size_t *lenp
, ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
3777 struct bofi_shadow
*hp
;
3778 struct bofi_shadow
*hhashp
;
3783 * call nexus to do the real work
3785 retval
= save_bus_ops
.bus_dma_win(dip
, rdip
, handle
, win
, offp
, lenp
,
3787 if (retval
!= DDI_SUCCESS
)
3790 * check we really have a shadow for this handle
3792 mutex_enter(&bofi_low_mutex
);
3793 mutex_enter(&bofi_mutex
);
3794 hhashp
= HDL_HHASH(handle
);
3795 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3796 if (hp
->hdl
.dma_handle
== handle
)
3800 * yes - make sure DMP_NOSYNC is unset
3802 mp
= (ddi_dma_impl_t
*)handle
;
3803 mp
->dmai_rflags
&= ~DMP_NOSYNC
;
3805 mutex_exit(&bofi_mutex
);
3806 mutex_exit(&bofi_low_mutex
);
3812 * our dma_ctl routine
3815 bofi_dma_ctl(dev_info_t
*dip
, dev_info_t
*rdip
,
3816 ddi_dma_handle_t handle
, enum ddi_dma_ctlops request
,
3817 off_t
*offp
, size_t *lenp
, caddr_t
*objp
, uint_t flags
)
3819 struct bofi_shadow
*hp
;
3820 struct bofi_shadow
*hhashp
;
3823 struct bofi_shadow
*dummyhp
;
3826 * get nexus to do real work
3828 retval
= save_bus_ops
.bus_dma_ctl(dip
, rdip
, handle
, request
, offp
,
3830 if (retval
!= DDI_SUCCESS
)
3833 * if driver_list is set, only intercept those drivers
3835 if (!driver_under_test(rdip
))
3836 return (DDI_SUCCESS
);
3839 * check we really have a shadow for this handle
3841 mutex_enter(&bofi_low_mutex
);
3842 mutex_enter(&bofi_mutex
);
3843 hhashp
= HDL_HHASH(handle
);
3844 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3845 if (hp
->hdl
.dma_handle
== handle
)
3848 mutex_exit(&bofi_mutex
);
3849 mutex_exit(&bofi_low_mutex
);
3853 * yes we have - see what kind of command this is
3856 case DDI_DMA_RELEASE
:
3858 * dvma release - release dummy handle and all the index handles
3861 dummyhp
->hnext
->hprev
= dummyhp
->hprev
;
3862 dummyhp
->hprev
->hnext
= dummyhp
->hnext
;
3863 mutex_exit(&bofi_mutex
);
3864 mutex_exit(&bofi_low_mutex
);
3865 for (i
= 0; i
< dummyhp
->len
; i
++) {
3866 hp
= dummyhp
->hparrayp
[i
];
3868 * chek none of the index handles were still loaded
3870 if (hp
->type
!= BOFI_NULL
)
3871 panic("driver releasing loaded dvma");
3873 * remove from dhash and inuse lists
3875 mutex_enter(&bofi_low_mutex
);
3876 mutex_enter(&bofi_mutex
);
3877 hp
->dnext
->dprev
= hp
->dprev
;
3878 hp
->dprev
->dnext
= hp
->dnext
;
3879 hp
->next
->prev
= hp
->prev
;
3880 hp
->prev
->next
= hp
->next
;
3881 mutex_exit(&bofi_mutex
);
3882 mutex_exit(&bofi_low_mutex
);
3884 if (bofi_sync_check
&& hp
->allocaddr
)
3885 ddi_umem_free(hp
->umem_cookie
);
3886 kmem_free(hp
, sizeof (struct bofi_shadow
));
3888 kmem_free(dummyhp
->hparrayp
, dummyhp
->len
*
3889 sizeof (struct bofi_shadow
*));
3890 kmem_free(dummyhp
, sizeof (struct bofi_shadow
));
3895 mutex_exit(&bofi_mutex
);
3896 mutex_exit(&bofi_low_mutex
);
3901 * bofi intercept routine - gets called instead of users interrupt routine
3904 bofi_intercept_intr(caddr_t xp
, caddr_t arg2
)
3906 struct bofi_errent
*ep
;
3907 struct bofi_link
*lp
;
3908 struct bofi_shadow
*hp
;
3911 uint_t retval
= DDI_INTR_UNCLAIMED
;
3913 int unclaimed_counter
= 0;
3914 int jabber_detected
= 0;
3916 hp
= (struct bofi_shadow
*)xp
;
3918 * check if nothing to do
3920 if (hp
->link
== NULL
)
3921 return (hp
->save
.intr
.int_handler
3922 (hp
->save
.intr
.int_handler_arg1
, arg2
));
3923 mutex_enter(&bofi_mutex
);
3925 * look for any errdefs
3927 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
3929 if (ep
->state
& BOFI_DEV_ACTIVE
) {
3933 if ((ep
->errdef
.access_count
||
3934 ep
->errdef
.fail_count
) &&
3935 (ep
->errdef
.access_type
& BOFI_LOG
))
3936 log_acc_event(ep
, BOFI_INTR
, 0, 0, 1, 0);
3937 if (ep
->errdef
.access_count
> 1) {
3938 ep
->errdef
.access_count
--;
3939 } else if (ep
->errdef
.fail_count
> 0) {
3940 ep
->errdef
.fail_count
--;
3941 ep
->errdef
.access_count
= 0;
3943 * OK do "corruption"
3945 if (ep
->errstate
.fail_time
== 0)
3946 ep
->errstate
.fail_time
= bofi_gettime();
3947 switch (ep
->errdef
.optype
) {
3948 case BOFI_DELAY_INTR
:
3951 (ep
->errdef
.operand
);
3954 case BOFI_LOSE_INTR
:
3957 case BOFI_EXTRA_INTR
:
3958 intr_count
+= ep
->errdef
.operand
;
3966 mutex_exit(&bofi_mutex
);
3968 * send extra or fewer interrupts as requested
3970 for (i
= 0; i
< intr_count
; i
++) {
3971 result
= hp
->save
.intr
.int_handler
3972 (hp
->save
.intr
.int_handler_arg1
, arg2
);
3973 if (result
== DDI_INTR_CLAIMED
)
3974 unclaimed_counter
>>= 1;
3975 else if (++unclaimed_counter
>= 20)
3976 jabber_detected
= 1;
3981 * if more than 1000 spurious interrupts requested and
3982 * jabber not detected - give warning
3984 if (intr_count
> 1000 && !jabber_detected
)
3985 panic("undetected interrupt jabber: %s%d",
3986 hp
->name
, hp
->instance
);
3988 * return first response - or "unclaimed" if none
3995 * our ddi_check_acc_hdl
3999 bofi_check_acc_hdl(ddi_acc_impl_t
*handle
)
4001 struct bofi_shadow
*hp
;
4002 struct bofi_link
*lp
;
4005 hp
= handle
->ahi_common
.ah_bus_private
;
4006 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
4009 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
4011 * OR in error state from all associated
4014 if (lp
->errentp
->errdef
.access_count
== 0 &&
4015 (lp
->errentp
->state
& BOFI_DEV_ACTIVE
)) {
4016 result
= (lp
->errentp
->errdef
.acc_chk
& 1);
4019 mutex_exit(&bofi_mutex
);
4024 * our ddi_check_dma_hdl
4028 bofi_check_dma_hdl(ddi_dma_impl_t
*handle
)
4030 struct bofi_shadow
*hp
;
4031 struct bofi_link
*lp
;
4032 struct bofi_shadow
*hhashp
;
4035 if (!mutex_tryenter(&bofi_mutex
)) {
4038 hhashp
= HDL_HHASH(handle
);
4039 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
4040 if (hp
->hdl
.dma_handle
== (ddi_dma_handle_t
)handle
)
4043 mutex_exit(&bofi_mutex
);
4047 mutex_exit(&bofi_mutex
);
4050 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
4052 * OR in error state from all associated
4055 if (lp
->errentp
->errdef
.access_count
== 0 &&
4056 (lp
->errentp
->state
& BOFI_DEV_ACTIVE
)) {
4057 result
= ((lp
->errentp
->errdef
.acc_chk
& 2) ? 1 : 0);
4060 mutex_exit(&bofi_mutex
);
4067 bofi_post_event(dev_info_t
*dip
, dev_info_t
*rdip
,
4068 ddi_eventcookie_t eventhdl
, void *impl_data
)
4070 ddi_eventcookie_t ec
;
4071 struct ddi_fault_event_data
*arg
;
4072 struct bofi_errent
*ep
;
4073 struct bofi_shadow
*hp
;
4074 struct bofi_shadow
*dhashp
;
4075 struct bofi_link
*lp
;
4078 if (ddi_get_eventcookie(dip
, DDI_DEVI_FAULT_EVENT
, &ec
) != DDI_SUCCESS
)
4079 return (DDI_FAILURE
);
4082 return (save_bus_ops
.bus_post_event(dip
, rdip
, eventhdl
,
4085 arg
= (struct ddi_fault_event_data
*)impl_data
;
4086 mutex_enter(&bofi_mutex
);
4088 * find shadow handles with appropriate dev_infos
4089 * and set error reported on all associated errdef structures
4091 dhashp
= HDL_DHASH(arg
->f_dip
);
4092 for (hp
= dhashp
->dnext
; hp
!= dhashp
; hp
= hp
->dnext
) {
4093 if (hp
->dip
== arg
->f_dip
) {
4094 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
4096 ep
->errstate
.errmsg_count
++;
4097 if ((ep
->errstate
.msg_time
== 0 ||
4098 ep
->errstate
.severity
> arg
->f_impact
) &&
4099 (ep
->state
& BOFI_DEV_ACTIVE
)) {
4100 ep
->errstate
.msg_time
= bofi_gettime();
4101 ep
->errstate
.severity
= arg
->f_impact
;
4102 (void) strncpy(ep
->errstate
.buffer
,
4103 arg
->f_message
, ERRMSGSIZE
);
4104 ddi_trigger_softintr(ep
->softintr_id
);
4109 mutex_exit(&bofi_mutex
);
4110 return (save_bus_ops
.bus_post_event(dip
, rdip
, eventhdl
, impl_data
));
4115 bofi_fm_ereport_callback(sysevent_t
*ev
, void *cookie
)
4122 ddi_fault_impact_t impact
;
4123 struct bofi_errent
*ep
;
4124 struct bofi_shadow
*hp
;
4125 struct bofi_link
*lp
;
4126 char service_class
[FM_MAX_CLASS
];
4127 char hppath
[MAXPATHLEN
];
4128 int service_ereport
= 0;
4130 (void) sysevent_get_attr_list(ev
, &nvlist
);
4131 (void) nvlist_lookup_string(nvlist
, FM_CLASS
, &class);
4132 if (nvlist_lookup_nvlist(nvlist
, FM_EREPORT_DETECTOR
, &detector
) == 0)
4133 (void) nvlist_lookup_string(detector
, FM_FMRI_DEV_PATH
, &path
);
4135 (void) snprintf(service_class
, FM_MAX_CLASS
, "%s.%s.%s.",
4136 FM_EREPORT_CLASS
, DDI_IO_CLASS
, DDI_FM_SERVICE_IMPACT
);
4137 if (strncmp(class, service_class
, strlen(service_class
) - 1) == 0)
4138 service_ereport
= 1;
4140 mutex_enter(&bofi_mutex
);
4142 * find shadow handles with appropriate dev_infos
4143 * and set error reported on all associated errdef structures
4145 for (hp
= shadow_list
.next
; hp
!= &shadow_list
; hp
= hp
->next
) {
4146 (void) ddi_pathname(hp
->dip
, hppath
);
4147 if (strcmp(path
, hppath
) != 0)
4149 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
4151 ep
->errstate
.errmsg_count
++;
4152 if (!(ep
->state
& BOFI_DEV_ACTIVE
))
4154 if (ep
->errstate
.msg_time
!= 0)
4156 if (service_ereport
) {
4157 ptr
= class + strlen(service_class
);
4158 if (strcmp(ptr
, DDI_FM_SERVICE_LOST
) == 0)
4159 impact
= DDI_SERVICE_LOST
;
4160 else if (strcmp(ptr
,
4161 DDI_FM_SERVICE_DEGRADED
) == 0)
4162 impact
= DDI_SERVICE_DEGRADED
;
4163 else if (strcmp(ptr
,
4164 DDI_FM_SERVICE_RESTORED
) == 0)
4165 impact
= DDI_SERVICE_RESTORED
;
4167 impact
= DDI_SERVICE_UNAFFECTED
;
4168 if (ep
->errstate
.severity
> impact
)
4169 ep
->errstate
.severity
= impact
;
4170 } else if (ep
->errstate
.buffer
[0] == '\0') {
4171 (void) strncpy(ep
->errstate
.buffer
, class,
4174 if (ep
->errstate
.buffer
[0] != '\0' &&
4175 ep
->errstate
.severity
< DDI_SERVICE_RESTORED
) {
4176 ep
->errstate
.msg_time
= bofi_gettime();
4177 ddi_trigger_softintr(ep
->softintr_id
);
4181 nvlist_free(nvlist
);
4182 mutex_exit(&bofi_mutex
);
4187 * our intr_ops routine
4190 bofi_intr_ops(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_intr_op_t intr_op
,
4191 ddi_intr_handle_impl_t
*hdlp
, void *result
)
4194 struct bofi_shadow
*hp
;
4195 struct bofi_shadow
*dhashp
;
4196 struct bofi_shadow
*hhashp
;
4197 struct bofi_errent
*ep
;
4198 struct bofi_link
*lp
, *next_lp
;
4201 case DDI_INTROP_ADDISR
:
4203 * if driver_list is set, only intercept those drivers
4205 if (!driver_under_test(rdip
))
4206 return (save_bus_ops
.bus_intr_op(dip
, rdip
,
4207 intr_op
, hdlp
, result
));
4209 * allocate shadow handle structure and fill in
4211 hp
= kmem_zalloc(sizeof (struct bofi_shadow
), KM_SLEEP
);
4212 (void) strncpy(hp
->name
, ddi_get_name(rdip
), NAMESIZE
);
4213 hp
->instance
= ddi_get_instance(rdip
);
4214 hp
->save
.intr
.int_handler
= hdlp
->ih_cb_func
;
4215 hp
->save
.intr
.int_handler_arg1
= hdlp
->ih_cb_arg1
;
4216 hdlp
->ih_cb_func
= (ddi_intr_handler_t
*)bofi_intercept_intr
;
4217 hdlp
->ih_cb_arg1
= (caddr_t
)hp
;
4218 hp
->bofi_inum
= hdlp
->ih_inum
;
4221 hp
->type
= BOFI_INT_HDL
;
4223 * save whether hilevel or not
4226 if (hdlp
->ih_pri
>= ddi_intr_get_hilevel_pri())
4232 * call nexus to do real work, but specifying our handler, and
4233 * our shadow handle as argument
4235 retval
= save_bus_ops
.bus_intr_op(dip
, rdip
,
4236 intr_op
, hdlp
, result
);
4237 if (retval
!= DDI_SUCCESS
) {
4238 kmem_free(hp
, sizeof (struct bofi_shadow
));
4242 * add to dhash, hhash and inuse lists
4244 mutex_enter(&bofi_low_mutex
);
4245 mutex_enter(&bofi_mutex
);
4246 hp
->next
= shadow_list
.next
;
4247 shadow_list
.next
->prev
= hp
;
4248 hp
->prev
= &shadow_list
;
4249 shadow_list
.next
= hp
;
4250 hhashp
= HDL_HHASH(hdlp
->ih_inum
);
4251 hp
->hnext
= hhashp
->hnext
;
4252 hhashp
->hnext
->hprev
= hp
;
4255 dhashp
= HDL_DHASH(hp
->dip
);
4256 hp
->dnext
= dhashp
->dnext
;
4257 dhashp
->dnext
->dprev
= hp
;
4261 * chain on any pre-existing errdefs that apply to this
4264 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
) {
4265 if (ddi_name_to_major(hp
->name
) ==
4266 ddi_name_to_major(ep
->name
) &&
4267 hp
->instance
== ep
->errdef
.instance
&&
4268 (ep
->errdef
.access_type
& BOFI_INTR
)) {
4269 lp
= bofi_link_freelist
;
4271 bofi_link_freelist
= lp
->link
;
4273 lp
->link
= hp
->link
;
4278 mutex_exit(&bofi_mutex
);
4279 mutex_exit(&bofi_low_mutex
);
4281 case DDI_INTROP_REMISR
:
4283 * call nexus routine first
4285 retval
= save_bus_ops
.bus_intr_op(dip
, rdip
,
4286 intr_op
, hdlp
, result
);
4288 * find shadow handle
4290 mutex_enter(&bofi_low_mutex
);
4291 mutex_enter(&bofi_mutex
);
4292 hhashp
= HDL_HHASH(hdlp
->ih_inum
);
4293 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
) {
4294 if (hp
->dip
== rdip
&&
4295 hp
->type
== BOFI_INT_HDL
&&
4296 hp
->bofi_inum
== hdlp
->ih_inum
) {
4301 mutex_exit(&bofi_mutex
);
4302 mutex_exit(&bofi_low_mutex
);
4306 * found one - remove from dhash, hhash and inuse lists
4308 hp
->hnext
->hprev
= hp
->hprev
;
4309 hp
->hprev
->hnext
= hp
->hnext
;
4310 hp
->dnext
->dprev
= hp
->dprev
;
4311 hp
->dprev
->dnext
= hp
->dnext
;
4312 hp
->next
->prev
= hp
->prev
;
4313 hp
->prev
->next
= hp
->next
;
4315 * free any errdef link structures
4316 * tagged on to this shadow handle
4318 for (lp
= hp
->link
; lp
!= NULL
; ) {
4320 lp
->link
= bofi_link_freelist
;
4321 bofi_link_freelist
= lp
;
4325 mutex_exit(&bofi_mutex
);
4326 mutex_exit(&bofi_low_mutex
);
4327 kmem_free(hp
, sizeof (struct bofi_shadow
));
4330 return (save_bus_ops
.bus_intr_op(dip
, rdip
,
4331 intr_op
, hdlp
, result
));