Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / bofi.c
blob986127b174f99df0e0beffc077f05d09ed4815d1
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
30 #include <sys/types.h>
31 #include <sys/sysmacros.h>
32 #include <sys/buf.h>
33 #include <sys/errno.h>
34 #include <sys/modctl.h>
35 #include <sys/conf.h>
36 #include <sys/stat.h>
37 #include <sys/kmem.h>
38 #include <sys/proc.h>
39 #include <sys/cpuvar.h>
40 #include <sys/ddi_impldefs.h>
41 #include <sys/ddi.h>
42 #include <sys/fm/protocol.h>
43 #include <sys/fm/util.h>
44 #include <sys/fm/io/ddi.h>
45 #include <sys/sysevent/eventdefs.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/debug.h>
49 #include <sys/bofi.h>
50 #include <sys/bofi_impl.h>
53 * Testing the resilience of a hardened device driver requires a suitably wide
54 * range of different types of "typical" hardware faults to be injected,
55 * preferably in a controlled and repeatable fashion. This is not in general
56 * possible via hardware, so the "fault injection test harness" is provided.
57 * This works by intercepting calls from the driver to various DDI routines,
58 * and then corrupting the result of those DDI routine calls as if the
59 * hardware had caused the corruption.
61 * Conceptually, the bofi driver consists of two parts:
63 * A driver interface that supports a number of ioctls which allow error
64 * definitions ("errdefs") to be defined and subsequently managed. The
65 * driver is a clone driver, so each open will create a separate
66 * invocation. Any errdefs created by using ioctls to that invocation
67 * will automatically be deleted when that invocation is closed.
69 * Intercept routines: When the bofi driver is attached, it edits the
70 * bus_ops structure of the bus nexus specified by the "bofi-nexus"
71 * field in the "bofi.conf" file, thus allowing the
72 * bofi driver to intercept various ddi functions. These intercept
73 * routines primarily carry out fault injections based on the errdefs
74 * created for that device.
76 * Faults can be injected into:
78 * DMA (corrupting data for DMA to/from memory areas defined by
79 * ddi_dma_setup(), ddi_dma_bind_handle(), etc)
81 * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(),
82 * etc),
84 * Interrupts (generating spurious interrupts, losing interrupts,
85 * delaying interrupts).
87 * By default, ddi routines called from all drivers will be intercepted
88 * and faults potentially injected. However, the "bofi-to-test" field in
89 * the "bofi.conf" file can be set to a space-separated list of drivers to
90 * test (or by preceding each driver name in the list with an "!", a list
91 * of drivers not to test).
93 * In addition to fault injection, the bofi driver does a number of static
94 * checks which are controlled by properties in the "bofi.conf" file.
96 * "bofi-ddi-check" - if set will validate that there are no PIO access
97 * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
99 * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
100 * validate that calls to ddi_get8(), ddi_put8(), etc are not made
101 * specifying addresses outside the range of the access_handle.
103 * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
104 * are being made correctly.
107 extern void *bp_mapin_common(struct buf *, int);
109 static int bofi_ddi_check;
110 static int bofi_sync_check;
111 static int bofi_range_check;
113 static struct bofi_link bofi_link_array[BOFI_NLINKS], *bofi_link_freelist;
115 #define LLSZMASK (sizeof (uint64_t)-1)
117 #define HDL_HASH_TBL_SIZE 64
118 static struct bofi_shadow hhash_table[HDL_HASH_TBL_SIZE];
119 static struct bofi_shadow dhash_table[HDL_HASH_TBL_SIZE];
120 #define HDL_DHASH(x) \
121 (&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
122 #define HDL_HHASH(x) \
123 (&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
125 static struct bofi_shadow shadow_list;
126 static struct bofi_errent *errent_listp;
128 static char driver_list[NAMESIZE];
129 static int driver_list_size;
130 static int driver_list_neg;
131 static char nexus_name[NAMESIZE];
133 static int initialized = 0;
135 #define NCLONES 2560
136 static int clone_tab[NCLONES];
138 static dev_info_t *our_dip;
140 static kmutex_t bofi_mutex;
141 static kmutex_t clone_tab_mutex;
142 static kmutex_t bofi_low_mutex;
143 static ddi_iblock_cookie_t bofi_low_cookie;
144 static uint_t bofi_signal(caddr_t arg);
145 static int bofi_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
146 static int bofi_attach(dev_info_t *, ddi_attach_cmd_t);
147 static int bofi_detach(dev_info_t *, ddi_detach_cmd_t);
148 static int bofi_open(dev_t *, int, int, cred_t *);
149 static int bofi_close(dev_t, int, int, cred_t *);
150 static int bofi_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
151 static int bofi_errdef_alloc(struct bofi_errdef *, char *,
152 struct bofi_errent *);
153 static int bofi_errdef_free(struct bofi_errent *);
154 static void bofi_start(struct bofi_errctl *, char *);
155 static void bofi_stop(struct bofi_errctl *, char *);
156 static void bofi_broadcast(struct bofi_errctl *, char *);
157 static void bofi_clear_acc_chk(struct bofi_errctl *, char *);
158 static void bofi_clear_errors(struct bofi_errctl *, char *);
159 static void bofi_clear_errdefs(struct bofi_errctl *, char *);
160 static int bofi_errdef_check(struct bofi_errstate *,
161 struct acc_log_elem **);
162 static int bofi_errdef_check_w(struct bofi_errstate *,
163 struct acc_log_elem **);
164 static int bofi_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
165 off_t, off_t, caddr_t *);
166 static int bofi_dma_allochdl(dev_info_t *, dev_info_t *,
167 ddi_dma_attr_t *, int (*)(caddr_t), caddr_t,
168 ddi_dma_handle_t *);
169 static int bofi_dma_freehdl(dev_info_t *, dev_info_t *,
170 ddi_dma_handle_t);
171 static int bofi_dma_bindhdl(dev_info_t *, dev_info_t *,
172 ddi_dma_handle_t, struct ddi_dma_req *, ddi_dma_cookie_t *,
173 uint_t *);
174 static int bofi_dma_unbindhdl(dev_info_t *, dev_info_t *,
175 ddi_dma_handle_t);
176 static int bofi_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
177 off_t, size_t, uint_t);
178 static int bofi_dma_ctl(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
179 enum ddi_dma_ctlops, off_t *, size_t *, caddr_t *, uint_t);
180 static int bofi_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
181 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
182 static int bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip,
183 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp,
184 void *result);
185 static int bofi_fm_ereport_callback(sysevent_t *ev, void *cookie);
187 evchan_t *bofi_error_chan;
189 #define FM_SIMULATED_DMA "simulated.dma"
190 #define FM_SIMULATED_PIO "simulated.pio"
192 static int driver_under_test(dev_info_t *);
193 static int bofi_check_acc_hdl(ddi_acc_impl_t *);
194 static int bofi_check_dma_hdl(ddi_dma_impl_t *);
195 static int bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
196 ddi_eventcookie_t eventhdl, void *impl_data);
198 static struct bus_ops bofi_bus_ops = {
199 BUSO_REV,
200 bofi_map,
201 NULL,
202 NULL,
203 NULL,
204 i_ddi_map_fault,
205 NULL,
206 bofi_dma_allochdl,
207 bofi_dma_freehdl,
208 bofi_dma_bindhdl,
209 bofi_dma_unbindhdl,
210 bofi_dma_flush,
211 bofi_dma_win,
212 bofi_dma_ctl,
213 NULL,
214 ddi_bus_prop_op,
215 ndi_busop_get_eventcookie,
216 ndi_busop_add_eventcall,
217 ndi_busop_remove_eventcall,
218 bofi_post_event,
219 NULL,
227 bofi_intr_ops
230 static struct cb_ops bofi_cb_ops = {
231 bofi_open, /* open */
232 bofi_close, /* close */
233 nodev, /* strategy */
234 nodev, /* print */
235 nodev, /* dump */
236 nodev, /* read */
237 nodev, /* write */
238 bofi_ioctl, /* ioctl */
239 nodev, /* devmap */
240 nodev, /* mmap */
241 nodev, /* segmap */
242 nochpoll, /* chpoll */
243 ddi_prop_op, /* prop_op */
244 NULL, /* for STREAMS drivers */
245 D_MP, /* driver compatibility flag */
246 CB_REV, /* cb_ops revision */
247 nodev, /* aread */
248 nodev /* awrite */
251 static struct dev_ops bofi_ops = {
252 DEVO_REV, /* driver build version */
253 0, /* device reference count */
254 bofi_getinfo,
255 nulldev,
256 nulldev, /* probe */
257 bofi_attach,
258 bofi_detach,
259 nulldev, /* reset */
260 &bofi_cb_ops,
261 NULL,
262 nulldev, /* power */
263 ddi_quiesce_not_needed, /* quiesce */
266 /* module configuration stuff */
267 static void *statep;
269 static struct modldrv modldrv = {
270 &mod_driverops,
271 "bofi driver",
272 &bofi_ops
275 static struct modlinkage modlinkage = {
276 MODREV_1,
277 &modldrv,
281 static struct bus_ops save_bus_ops;
284 * support routine - map user page into kernel virtual
286 static caddr_t
287 dmareq_mapin(offset_t len, caddr_t addr, struct as *as, int flag)
289 struct buf buf;
290 struct proc proc;
293 * mock up a buf structure so we can call bp_mapin_common()
295 buf.b_flags = B_PHYS;
296 buf.b_un.b_addr = (caddr_t)addr;
297 buf.b_bcount = (size_t)len;
298 proc.p_as = as;
299 buf.b_proc = &proc;
300 return (bp_mapin_common(&buf, flag));
305 * support routine - map page chain into kernel virtual
307 static caddr_t
308 dmareq_pp_mapin(offset_t len, uint_t offset, page_t *pp, int flag)
310 struct buf buf;
313 * mock up a buf structure so we can call bp_mapin_common()
315 buf.b_flags = B_PAGEIO;
316 buf.b_un.b_addr = (caddr_t)(uintptr_t)offset;
317 buf.b_bcount = (size_t)len;
318 buf.b_pages = pp;
319 return (bp_mapin_common(&buf, flag));
324 * support routine - map page array into kernel virtual
326 static caddr_t
327 dmareq_pplist_mapin(uint_t len, caddr_t addr, page_t **pplist, struct as *as,
328 int flag)
330 struct buf buf;
331 struct proc proc;
334 * mock up a buf structure so we can call bp_mapin_common()
336 buf.b_flags = B_PHYS|B_SHADOW;
337 buf.b_un.b_addr = addr;
338 buf.b_bcount = len;
339 buf.b_shadow = pplist;
340 proc.p_as = as;
341 buf.b_proc = &proc;
342 return (bp_mapin_common(&buf, flag));
347 * support routine - map dmareq into kernel virtual if not already
348 * fills in *lenp with length
349 * *mapaddr will be new kernel virtual address - or null if no mapping needed
351 static caddr_t
352 ddi_dmareq_mapin(struct ddi_dma_req *dmareqp, caddr_t *mapaddrp,
353 offset_t *lenp)
355 int sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? VM_SLEEP: VM_NOSLEEP;
357 *lenp = dmareqp->dmar_object.dmao_size;
358 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
359 *mapaddrp = dmareq_pp_mapin(dmareqp->dmar_object.dmao_size,
360 dmareqp->dmar_object.dmao_obj.pp_obj.pp_offset,
361 dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp, sleep);
362 return (*mapaddrp);
363 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
364 *mapaddrp = dmareq_pplist_mapin(dmareqp->dmar_object.dmao_size,
365 dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
366 dmareqp->dmar_object.dmao_obj.virt_obj.v_priv,
367 dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
368 return (*mapaddrp);
369 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == &kas) {
370 *mapaddrp = NULL;
371 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
372 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == NULL) {
373 *mapaddrp = NULL;
374 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
375 } else {
376 *mapaddrp = dmareq_mapin(dmareqp->dmar_object.dmao_size,
377 dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
378 dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
379 return (*mapaddrp);
385 * support routine - free off kernel virtual mapping as allocated by
386 * ddi_dmareq_mapin()
388 static void
389 ddi_dmareq_mapout(caddr_t addr, offset_t len, int map_flags, page_t *pp,
390 page_t **pplist)
392 struct buf buf;
394 if (addr == NULL)
395 return;
397 * mock up a buf structure
399 buf.b_flags = B_REMAPPED | map_flags;
400 buf.b_un.b_addr = addr;
401 buf.b_bcount = (size_t)len;
402 buf.b_pages = pp;
403 buf.b_shadow = pplist;
404 bp_mapout(&buf);
407 static time_t
408 bofi_gettime()
410 timestruc_t ts;
412 gethrestime(&ts);
413 return (ts.tv_sec);
417 * reset the bus_ops structure of the specified nexus to point to
418 * the original values in the save_bus_ops structure.
420 * Note that both this routine and modify_bus_ops() rely on the current
421 * behavior of the framework in that nexus drivers are not unloadable
425 static int
426 reset_bus_ops(char *name, struct bus_ops *bop)
428 struct modctl *modp;
429 struct modldrv *mp;
430 struct bus_ops *bp;
431 struct dev_ops *ops;
433 mutex_enter(&mod_lock);
435 * find specified module
437 modp = &modules;
438 do {
439 if (strcmp(name, modp->mod_modname) == 0) {
440 if (!modp->mod_linkage) {
441 mutex_exit(&mod_lock);
442 return (0);
444 mp = modp->mod_linkage->ml_linkage[0];
445 if (!mp || !mp->drv_dev_ops) {
446 mutex_exit(&mod_lock);
447 return (0);
449 ops = mp->drv_dev_ops;
450 bp = ops->devo_bus_ops;
451 if (!bp) {
452 mutex_exit(&mod_lock);
453 return (0);
455 if (ops->devo_refcnt > 0) {
457 * As long as devices are active with modified
458 * bus ops bofi must not go away. There may be
459 * drivers with modified access or dma handles.
461 mutex_exit(&mod_lock);
462 return (0);
464 cmn_err(CE_NOTE, "bofi reset bus_ops for %s",
465 mp->drv_linkinfo);
466 bp->bus_intr_op = bop->bus_intr_op;
467 bp->bus_post_event = bop->bus_post_event;
468 bp->bus_map = bop->bus_map;
469 bp->bus_dma_map = bop->bus_dma_map;
470 bp->bus_dma_allochdl = bop->bus_dma_allochdl;
471 bp->bus_dma_freehdl = bop->bus_dma_freehdl;
472 bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
473 bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
474 bp->bus_dma_flush = bop->bus_dma_flush;
475 bp->bus_dma_win = bop->bus_dma_win;
476 bp->bus_dma_ctl = bop->bus_dma_ctl;
477 mutex_exit(&mod_lock);
478 return (1);
480 } while ((modp = modp->mod_next) != &modules);
481 mutex_exit(&mod_lock);
482 return (0);
486 * modify the bus_ops structure of the specified nexus to point to bofi
487 * routines, saving the original values in the save_bus_ops structure
490 static int
491 modify_bus_ops(char *name, struct bus_ops *bop)
493 struct modctl *modp;
494 struct modldrv *mp;
495 struct bus_ops *bp;
496 struct dev_ops *ops;
498 if (ddi_name_to_major(name) == -1)
499 return (0);
501 mutex_enter(&mod_lock);
503 * find specified module
505 modp = &modules;
506 do {
507 if (strcmp(name, modp->mod_modname) == 0) {
508 if (!modp->mod_linkage) {
509 mutex_exit(&mod_lock);
510 return (0);
512 mp = modp->mod_linkage->ml_linkage[0];
513 if (!mp || !mp->drv_dev_ops) {
514 mutex_exit(&mod_lock);
515 return (0);
517 ops = mp->drv_dev_ops;
518 bp = ops->devo_bus_ops;
519 if (!bp) {
520 mutex_exit(&mod_lock);
521 return (0);
523 if (ops->devo_refcnt == 0) {
525 * If there is no device active for this
526 * module then there is nothing to do for bofi.
528 mutex_exit(&mod_lock);
529 return (0);
531 cmn_err(CE_NOTE, "bofi modify bus_ops for %s",
532 mp->drv_linkinfo);
533 save_bus_ops = *bp;
534 bp->bus_intr_op = bop->bus_intr_op;
535 bp->bus_post_event = bop->bus_post_event;
536 bp->bus_map = bop->bus_map;
537 bp->bus_dma_map = bop->bus_dma_map;
538 bp->bus_dma_allochdl = bop->bus_dma_allochdl;
539 bp->bus_dma_freehdl = bop->bus_dma_freehdl;
540 bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
541 bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
542 bp->bus_dma_flush = bop->bus_dma_flush;
543 bp->bus_dma_win = bop->bus_dma_win;
544 bp->bus_dma_ctl = bop->bus_dma_ctl;
545 mutex_exit(&mod_lock);
546 return (1);
548 } while ((modp = modp->mod_next) != &modules);
549 mutex_exit(&mod_lock);
550 return (0);
555 _init(void)
557 int e;
559 e = ddi_soft_state_init(&statep, sizeof (struct bofi_errent), 1);
560 if (e != 0)
561 return (e);
562 if ((e = mod_install(&modlinkage)) != 0)
563 ddi_soft_state_fini(&statep);
564 return (e);
569 _fini(void)
571 int e;
573 if ((e = mod_remove(&modlinkage)) != 0)
574 return (e);
575 ddi_soft_state_fini(&statep);
576 return (e);
581 _info(struct modinfo *modinfop)
583 return (mod_info(&modlinkage, modinfop));
587 static int
588 bofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
590 char *name;
591 char buf[80];
592 int i;
593 int s, ss;
594 int size = NAMESIZE;
595 int new_string;
596 char *ptr;
598 if (cmd != DDI_ATTACH)
599 return (DDI_FAILURE);
601 * only one instance - but we clone using the open routine
603 if (ddi_get_instance(dip) > 0)
604 return (DDI_FAILURE);
606 if (!initialized) {
607 if ((name = ddi_get_name(dip)) == NULL)
608 return (DDI_FAILURE);
609 (void) snprintf(buf, sizeof (buf), "%s,ctl", name);
610 if (ddi_create_minor_node(dip, buf, S_IFCHR, 0,
611 DDI_PSEUDO, 0) == DDI_FAILURE)
612 return (DDI_FAILURE);
614 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_MED,
615 &bofi_low_cookie) != DDI_SUCCESS) {
616 ddi_remove_minor_node(dip, buf);
617 return (DDI_FAILURE); /* fail attach */
620 * get nexus name (from conf file)
622 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
623 "bofi-nexus", nexus_name, &size) != DDI_PROP_SUCCESS) {
624 ddi_remove_minor_node(dip, buf);
625 return (DDI_FAILURE);
628 * get whether to do dma map kmem private checking
630 if ((bofi_range_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
631 dip, 0, "bofi-range-check", &ptr)) != DDI_PROP_SUCCESS)
632 bofi_range_check = 0;
633 else if (strcmp(ptr, "panic") == 0)
634 bofi_range_check = 2;
635 else if (strcmp(ptr, "warn") == 0)
636 bofi_range_check = 1;
637 else
638 bofi_range_check = 0;
639 ddi_prop_free(ptr);
642 * get whether to prevent direct access to register
644 if ((bofi_ddi_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
645 dip, 0, "bofi-ddi-check", &ptr)) != DDI_PROP_SUCCESS)
646 bofi_ddi_check = 0;
647 else if (strcmp(ptr, "on") == 0)
648 bofi_ddi_check = 1;
649 else
650 bofi_ddi_check = 0;
651 ddi_prop_free(ptr);
654 * get whether to do copy on ddi_dma_sync
656 if ((bofi_sync_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
657 dip, 0, "bofi-sync-check", &ptr)) != DDI_PROP_SUCCESS)
658 bofi_sync_check = 0;
659 else if (strcmp(ptr, "on") == 0)
660 bofi_sync_check = 1;
661 else
662 bofi_sync_check = 0;
663 ddi_prop_free(ptr);
666 * get driver-under-test names (from conf file)
668 size = NAMESIZE;
669 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
670 "bofi-to-test", driver_list, &size) != DDI_PROP_SUCCESS)
671 driver_list[0] = 0;
673 * and convert into a sequence of strings
675 driver_list_neg = 1;
676 new_string = 1;
677 driver_list_size = strlen(driver_list);
678 for (i = 0; i < driver_list_size; i++) {
679 if (driver_list[i] == ' ') {
680 driver_list[i] = '\0';
681 new_string = 1;
682 } else if (new_string) {
683 if (driver_list[i] != '!')
684 driver_list_neg = 0;
685 new_string = 0;
689 * initialize mutex, lists
691 mutex_init(&clone_tab_mutex, NULL, MUTEX_DRIVER,
692 NULL);
694 * fake up iblock cookie - need to protect outselves
695 * against drivers that use hilevel interrupts
697 ss = spl8();
698 s = spl8();
699 splx(ss);
700 mutex_init(&bofi_mutex, NULL, MUTEX_SPIN, (void *)(uintptr_t)s);
701 mutex_init(&bofi_low_mutex, NULL, MUTEX_DRIVER,
702 (void *)bofi_low_cookie);
703 shadow_list.next = &shadow_list;
704 shadow_list.prev = &shadow_list;
705 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
706 hhash_table[i].hnext = &hhash_table[i];
707 hhash_table[i].hprev = &hhash_table[i];
708 dhash_table[i].dnext = &dhash_table[i];
709 dhash_table[i].dprev = &dhash_table[i];
711 for (i = 1; i < BOFI_NLINKS; i++)
712 bofi_link_array[i].link = &bofi_link_array[i-1];
713 bofi_link_freelist = &bofi_link_array[BOFI_NLINKS - 1];
715 * overlay bus_ops structure
717 if (modify_bus_ops(nexus_name, &bofi_bus_ops) == 0) {
718 ddi_remove_minor_node(dip, buf);
719 mutex_destroy(&clone_tab_mutex);
720 mutex_destroy(&bofi_mutex);
721 mutex_destroy(&bofi_low_mutex);
722 return (DDI_FAILURE);
724 if (sysevent_evc_bind(FM_ERROR_CHAN, &bofi_error_chan, 0) == 0)
725 (void) sysevent_evc_subscribe(bofi_error_chan, "bofi",
726 EC_FM, bofi_fm_ereport_callback, NULL, 0);
729 * save dip for getinfo
731 our_dip = dip;
732 ddi_report_dev(dip);
733 initialized = 1;
735 return (DDI_SUCCESS);
739 static int
740 bofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
742 char *name;
743 char buf[80];
745 if (cmd != DDI_DETACH)
746 return (DDI_FAILURE);
747 if (ddi_get_instance(dip) > 0)
748 return (DDI_FAILURE);
749 if ((name = ddi_get_name(dip)) == NULL)
750 return (DDI_FAILURE);
751 (void) snprintf(buf, sizeof (buf), "%s,ctl", name);
752 mutex_enter(&bofi_low_mutex);
753 mutex_enter(&bofi_mutex);
755 * make sure test bofi is no longer in use
757 if (shadow_list.next != &shadow_list || errent_listp != NULL) {
758 mutex_exit(&bofi_mutex);
759 mutex_exit(&bofi_low_mutex);
760 return (DDI_FAILURE);
762 mutex_exit(&bofi_mutex);
763 mutex_exit(&bofi_low_mutex);
766 * restore bus_ops structure
768 if (reset_bus_ops(nexus_name, &save_bus_ops) == 0)
769 return (DDI_FAILURE);
771 (void) sysevent_evc_unbind(bofi_error_chan);
773 mutex_destroy(&clone_tab_mutex);
774 mutex_destroy(&bofi_mutex);
775 mutex_destroy(&bofi_low_mutex);
776 ddi_remove_minor_node(dip, buf);
777 our_dip = NULL;
778 initialized = 0;
779 return (DDI_SUCCESS);
783 /* ARGSUSED */
784 static int
785 bofi_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
787 dev_t dev = (dev_t)arg;
788 int minor = (int)getminor(dev);
789 int retval;
791 switch (cmd) {
792 case DDI_INFO_DEVT2DEVINFO:
793 if (minor != 0 || our_dip == NULL) {
794 *result = NULL;
795 retval = DDI_FAILURE;
796 } else {
797 *result = (void *)our_dip;
798 retval = DDI_SUCCESS;
800 break;
801 case DDI_INFO_DEVT2INSTANCE:
802 *result = NULL;
803 retval = DDI_SUCCESS;
804 break;
805 default:
806 retval = DDI_FAILURE;
808 return (retval);
812 /* ARGSUSED */
813 static int
814 bofi_open(dev_t *devp, int flag, int otyp, cred_t *credp)
816 int minor = (int)getminor(*devp);
817 struct bofi_errent *softc;
820 * only allow open on minor=0 - the clone device
822 if (minor != 0)
823 return (ENXIO);
825 * fail if not attached
827 if (!initialized)
828 return (ENXIO);
830 * find a free slot and grab it
832 mutex_enter(&clone_tab_mutex);
833 for (minor = 1; minor < NCLONES; minor++) {
834 if (clone_tab[minor] == 0) {
835 clone_tab[minor] = 1;
836 break;
839 mutex_exit(&clone_tab_mutex);
840 if (minor == NCLONES)
841 return (EAGAIN);
843 * soft state structure for this clone is used to maintain a list
844 * of allocated errdefs so they can be freed on close
846 if (ddi_soft_state_zalloc(statep, minor) != DDI_SUCCESS) {
847 mutex_enter(&clone_tab_mutex);
848 clone_tab[minor] = 0;
849 mutex_exit(&clone_tab_mutex);
850 return (EAGAIN);
852 softc = ddi_get_soft_state(statep, minor);
853 softc->cnext = softc;
854 softc->cprev = softc;
856 *devp = makedevice(getmajor(*devp), minor);
857 return (0);
861 /* ARGSUSED */
862 static int
863 bofi_close(dev_t dev, int flag, int otyp, cred_t *credp)
865 int minor = (int)getminor(dev);
866 struct bofi_errent *softc;
867 struct bofi_errent *ep, *next_ep;
869 softc = ddi_get_soft_state(statep, minor);
870 if (softc == NULL)
871 return (ENXIO);
873 * find list of errdefs and free them off
875 for (ep = softc->cnext; ep != softc; ) {
876 next_ep = ep->cnext;
877 (void) bofi_errdef_free(ep);
878 ep = next_ep;
881 * free clone tab slot
883 mutex_enter(&clone_tab_mutex);
884 clone_tab[minor] = 0;
885 mutex_exit(&clone_tab_mutex);
887 ddi_soft_state_free(statep, minor);
888 return (0);
892 /* ARGSUSED */
893 static int
894 bofi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
895 int *rvalp)
897 struct bofi_errent *softc;
898 int minor = (int)getminor(dev);
899 struct bofi_errdef errdef;
900 struct bofi_errctl errctl;
901 struct bofi_errstate errstate;
902 void *ed_handle;
903 struct bofi_get_handles get_handles;
904 struct bofi_get_hdl_info hdl_info;
905 struct handle_info *hdlip;
906 struct handle_info *hib;
908 char *buffer;
909 char *bufptr;
910 char *endbuf;
911 int req_count, count, err;
912 char *namep;
913 struct bofi_shadow *hp;
914 int retval;
915 struct bofi_shadow *hhashp;
916 int i;
918 switch (cmd) {
919 case BOFI_ADD_DEF:
921 * add a new error definition
923 #ifdef _MULTI_DATAMODEL
924 switch (ddi_model_convert_from(mode & FMODELS)) {
925 case DDI_MODEL_ILP32:
928 * For use when a 32 bit app makes a call into a
929 * 64 bit ioctl
931 struct bofi_errdef32 errdef_32;
933 if (ddi_copyin((void *)arg, &errdef_32,
934 sizeof (struct bofi_errdef32), mode)) {
935 return (EFAULT);
937 errdef.namesize = errdef_32.namesize;
938 (void) strncpy(errdef.name, errdef_32.name, NAMESIZE);
939 errdef.instance = errdef_32.instance;
940 errdef.rnumber = errdef_32.rnumber;
941 errdef.offset = errdef_32.offset;
942 errdef.len = errdef_32.len;
943 errdef.access_type = errdef_32.access_type;
944 errdef.access_count = errdef_32.access_count;
945 errdef.fail_count = errdef_32.fail_count;
946 errdef.acc_chk = errdef_32.acc_chk;
947 errdef.optype = errdef_32.optype;
948 errdef.operand = errdef_32.operand;
949 errdef.log.logsize = errdef_32.log.logsize;
950 errdef.log.entries = errdef_32.log.entries;
951 errdef.log.flags = errdef_32.log.flags;
952 errdef.log.wrapcnt = errdef_32.log.wrapcnt;
953 errdef.log.start_time = errdef_32.log.start_time;
954 errdef.log.stop_time = errdef_32.log.stop_time;
955 errdef.log.logbase =
956 (caddr_t)(uintptr_t)errdef_32.log.logbase;
957 errdef.errdef_handle = errdef_32.errdef_handle;
958 break;
960 case DDI_MODEL_NONE:
961 if (ddi_copyin((void *)arg, &errdef,
962 sizeof (struct bofi_errdef), mode))
963 return (EFAULT);
964 break;
966 #else /* ! _MULTI_DATAMODEL */
967 if (ddi_copyin((void *)arg, &errdef,
968 sizeof (struct bofi_errdef), mode) != 0)
969 return (EFAULT);
970 #endif /* _MULTI_DATAMODEL */
972 * do some validation
974 if (errdef.fail_count == 0)
975 errdef.optype = 0;
976 if (errdef.optype != 0) {
977 if (errdef.access_type & BOFI_INTR &&
978 errdef.optype != BOFI_DELAY_INTR &&
979 errdef.optype != BOFI_LOSE_INTR &&
980 errdef.optype != BOFI_EXTRA_INTR)
981 return (EINVAL);
982 if ((errdef.access_type & (BOFI_DMA_RW|BOFI_PIO_R)) &&
983 errdef.optype == BOFI_NO_TRANSFER)
984 return (EINVAL);
985 if ((errdef.access_type & (BOFI_PIO_RW)) &&
986 errdef.optype != BOFI_EQUAL &&
987 errdef.optype != BOFI_OR &&
988 errdef.optype != BOFI_XOR &&
989 errdef.optype != BOFI_AND &&
990 errdef.optype != BOFI_NO_TRANSFER)
991 return (EINVAL);
994 * find softstate for this clone, so we can tag
995 * new errdef on to it
997 softc = ddi_get_soft_state(statep, minor);
998 if (softc == NULL)
999 return (ENXIO);
1001 * read in name
1003 if (errdef.namesize > NAMESIZE)
1004 return (EINVAL);
1005 namep = kmem_zalloc(errdef.namesize+1, KM_SLEEP);
1006 (void) strncpy(namep, errdef.name, errdef.namesize);
1008 if (bofi_errdef_alloc(&errdef, namep, softc) != DDI_SUCCESS) {
1009 (void) bofi_errdef_free((struct bofi_errent *)
1010 (uintptr_t)errdef.errdef_handle);
1011 kmem_free(namep, errdef.namesize+1);
1012 return (EINVAL);
1015 * copy out errdef again, including filled in errdef_handle
1017 #ifdef _MULTI_DATAMODEL
1018 switch (ddi_model_convert_from(mode & FMODELS)) {
1019 case DDI_MODEL_ILP32:
1022 * For use when a 32 bit app makes a call into a
1023 * 64 bit ioctl
1025 struct bofi_errdef32 errdef_32;
1027 errdef_32.namesize = errdef.namesize;
1028 (void) strncpy(errdef_32.name, errdef.name, NAMESIZE);
1029 errdef_32.instance = errdef.instance;
1030 errdef_32.rnumber = errdef.rnumber;
1031 errdef_32.offset = errdef.offset;
1032 errdef_32.len = errdef.len;
1033 errdef_32.access_type = errdef.access_type;
1034 errdef_32.access_count = errdef.access_count;
1035 errdef_32.fail_count = errdef.fail_count;
1036 errdef_32.acc_chk = errdef.acc_chk;
1037 errdef_32.optype = errdef.optype;
1038 errdef_32.operand = errdef.operand;
1039 errdef_32.log.logsize = errdef.log.logsize;
1040 errdef_32.log.entries = errdef.log.entries;
1041 errdef_32.log.flags = errdef.log.flags;
1042 errdef_32.log.wrapcnt = errdef.log.wrapcnt;
1043 errdef_32.log.start_time = errdef.log.start_time;
1044 errdef_32.log.stop_time = errdef.log.stop_time;
1045 errdef_32.log.logbase =
1046 (caddr32_t)(uintptr_t)errdef.log.logbase;
1047 errdef_32.errdef_handle = errdef.errdef_handle;
1048 if (ddi_copyout(&errdef_32, (void *)arg,
1049 sizeof (struct bofi_errdef32), mode) != 0) {
1050 (void) bofi_errdef_free((struct bofi_errent *)
1051 errdef.errdef_handle);
1052 kmem_free(namep, errdef.namesize+1);
1053 return (EFAULT);
1055 break;
1057 case DDI_MODEL_NONE:
1058 if (ddi_copyout(&errdef, (void *)arg,
1059 sizeof (struct bofi_errdef), mode) != 0) {
1060 (void) bofi_errdef_free((struct bofi_errent *)
1061 errdef.errdef_handle);
1062 kmem_free(namep, errdef.namesize+1);
1063 return (EFAULT);
1065 break;
1067 #else /* ! _MULTI_DATAMODEL */
1068 if (ddi_copyout(&errdef, (void *)arg,
1069 sizeof (struct bofi_errdef), mode) != 0) {
1070 (void) bofi_errdef_free((struct bofi_errent *)
1071 (uintptr_t)errdef.errdef_handle);
1072 kmem_free(namep, errdef.namesize+1);
1073 return (EFAULT);
1075 #endif /* _MULTI_DATAMODEL */
1076 return (0);
1077 case BOFI_DEL_DEF:
1079 * delete existing errdef
1081 if (ddi_copyin((void *)arg, &ed_handle,
1082 sizeof (void *), mode) != 0)
1083 return (EFAULT);
1084 return (bofi_errdef_free((struct bofi_errent *)ed_handle));
1085 case BOFI_START:
1087 * start all errdefs corresponding to
1088 * this name and instance
1090 if (ddi_copyin((void *)arg, &errctl,
1091 sizeof (struct bofi_errctl), mode) != 0)
1092 return (EFAULT);
1094 * copy in name
1096 if (errctl.namesize > NAMESIZE)
1097 return (EINVAL);
1098 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1099 (void) strncpy(namep, errctl.name, errctl.namesize);
1100 bofi_start(&errctl, namep);
1101 kmem_free(namep, errctl.namesize+1);
1102 return (0);
1103 case BOFI_STOP:
1105 * stop all errdefs corresponding to
1106 * this name and instance
1108 if (ddi_copyin((void *)arg, &errctl,
1109 sizeof (struct bofi_errctl), mode) != 0)
1110 return (EFAULT);
1112 * copy in name
1114 if (errctl.namesize > NAMESIZE)
1115 return (EINVAL);
1116 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1117 (void) strncpy(namep, errctl.name, errctl.namesize);
1118 bofi_stop(&errctl, namep);
1119 kmem_free(namep, errctl.namesize+1);
1120 return (0);
1121 case BOFI_BROADCAST:
1123 * wakeup all errdefs corresponding to
1124 * this name and instance
1126 if (ddi_copyin((void *)arg, &errctl,
1127 sizeof (struct bofi_errctl), mode) != 0)
1128 return (EFAULT);
1130 * copy in name
1132 if (errctl.namesize > NAMESIZE)
1133 return (EINVAL);
1134 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1135 (void) strncpy(namep, errctl.name, errctl.namesize);
1136 bofi_broadcast(&errctl, namep);
1137 kmem_free(namep, errctl.namesize+1);
1138 return (0);
1139 case BOFI_CLEAR_ACC_CHK:
1141 * clear "acc_chk" for all errdefs corresponding to
1142 * this name and instance
1144 if (ddi_copyin((void *)arg, &errctl,
1145 sizeof (struct bofi_errctl), mode) != 0)
1146 return (EFAULT);
1148 * copy in name
1150 if (errctl.namesize > NAMESIZE)
1151 return (EINVAL);
1152 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1153 (void) strncpy(namep, errctl.name, errctl.namesize);
1154 bofi_clear_acc_chk(&errctl, namep);
1155 kmem_free(namep, errctl.namesize+1);
1156 return (0);
1157 case BOFI_CLEAR_ERRORS:
1159 * set "fail_count" to 0 for all errdefs corresponding to
1160 * this name and instance whose "access_count"
1161 * has expired.
1163 if (ddi_copyin((void *)arg, &errctl,
1164 sizeof (struct bofi_errctl), mode) != 0)
1165 return (EFAULT);
1167 * copy in name
1169 if (errctl.namesize > NAMESIZE)
1170 return (EINVAL);
1171 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1172 (void) strncpy(namep, errctl.name, errctl.namesize);
1173 bofi_clear_errors(&errctl, namep);
1174 kmem_free(namep, errctl.namesize+1);
1175 return (0);
1176 case BOFI_CLEAR_ERRDEFS:
1178 * set "access_count" and "fail_count" to 0 for all errdefs
1179 * corresponding to this name and instance
1181 if (ddi_copyin((void *)arg, &errctl,
1182 sizeof (struct bofi_errctl), mode) != 0)
1183 return (EFAULT);
1185 * copy in name
1187 if (errctl.namesize > NAMESIZE)
1188 return (EINVAL);
1189 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1190 (void) strncpy(namep, errctl.name, errctl.namesize);
1191 bofi_clear_errdefs(&errctl, namep);
1192 kmem_free(namep, errctl.namesize+1);
1193 return (0);
1194 case BOFI_CHK_STATE:
1196 struct acc_log_elem *klg;
1197 size_t uls;
1199 * get state for this errdef - read in dummy errstate
1200 * with just the errdef_handle filled in
1202 #ifdef _MULTI_DATAMODEL
1203 switch (ddi_model_convert_from(mode & FMODELS)) {
1204 case DDI_MODEL_ILP32:
1207 * For use when a 32 bit app makes a call into a
1208 * 64 bit ioctl
1210 struct bofi_errstate32 errstate_32;
1212 if (ddi_copyin((void *)arg, &errstate_32,
1213 sizeof (struct bofi_errstate32), mode) != 0) {
1214 return (EFAULT);
1216 errstate.fail_time = errstate_32.fail_time;
1217 errstate.msg_time = errstate_32.msg_time;
1218 errstate.access_count = errstate_32.access_count;
1219 errstate.fail_count = errstate_32.fail_count;
1220 errstate.acc_chk = errstate_32.acc_chk;
1221 errstate.errmsg_count = errstate_32.errmsg_count;
1222 (void) strncpy(errstate.buffer, errstate_32.buffer,
1223 ERRMSGSIZE);
1224 errstate.severity = errstate_32.severity;
1225 errstate.log.logsize = errstate_32.log.logsize;
1226 errstate.log.entries = errstate_32.log.entries;
1227 errstate.log.flags = errstate_32.log.flags;
1228 errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1229 errstate.log.start_time = errstate_32.log.start_time;
1230 errstate.log.stop_time = errstate_32.log.stop_time;
1231 errstate.log.logbase =
1232 (caddr_t)(uintptr_t)errstate_32.log.logbase;
1233 errstate.errdef_handle = errstate_32.errdef_handle;
1234 break;
1236 case DDI_MODEL_NONE:
1237 if (ddi_copyin((void *)arg, &errstate,
1238 sizeof (struct bofi_errstate), mode) != 0)
1239 return (EFAULT);
1240 break;
1242 #else /* ! _MULTI_DATAMODEL */
1243 if (ddi_copyin((void *)arg, &errstate,
1244 sizeof (struct bofi_errstate), mode) != 0)
1245 return (EFAULT);
1246 #endif /* _MULTI_DATAMODEL */
1247 if ((retval = bofi_errdef_check(&errstate, &klg)) == EINVAL)
1248 return (EINVAL);
1250 * copy out real errstate structure
1252 uls = errstate.log.logsize;
1253 if (errstate.log.entries > uls && uls)
1254 /* insufficient user memory */
1255 errstate.log.entries = uls;
1256 /* always pass back a time */
1257 if (errstate.log.stop_time == 0ul)
1258 (void) drv_getparm(TIME, &(errstate.log.stop_time));
1260 #ifdef _MULTI_DATAMODEL
1261 switch (ddi_model_convert_from(mode & FMODELS)) {
1262 case DDI_MODEL_ILP32:
1265 * For use when a 32 bit app makes a call into a
1266 * 64 bit ioctl
1268 struct bofi_errstate32 errstate_32;
1270 errstate_32.fail_time = errstate.fail_time;
1271 errstate_32.msg_time = errstate.msg_time;
1272 errstate_32.access_count = errstate.access_count;
1273 errstate_32.fail_count = errstate.fail_count;
1274 errstate_32.acc_chk = errstate.acc_chk;
1275 errstate_32.errmsg_count = errstate.errmsg_count;
1276 (void) strncpy(errstate_32.buffer, errstate.buffer,
1277 ERRMSGSIZE);
1278 errstate_32.severity = errstate.severity;
1279 errstate_32.log.logsize = errstate.log.logsize;
1280 errstate_32.log.entries = errstate.log.entries;
1281 errstate_32.log.flags = errstate.log.flags;
1282 errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1283 errstate_32.log.start_time = errstate.log.start_time;
1284 errstate_32.log.stop_time = errstate.log.stop_time;
1285 errstate_32.log.logbase =
1286 (caddr32_t)(uintptr_t)errstate.log.logbase;
1287 errstate_32.errdef_handle = errstate.errdef_handle;
1288 if (ddi_copyout(&errstate_32, (void *)arg,
1289 sizeof (struct bofi_errstate32), mode) != 0)
1290 return (EFAULT);
1291 break;
1293 case DDI_MODEL_NONE:
1294 if (ddi_copyout(&errstate, (void *)arg,
1295 sizeof (struct bofi_errstate), mode) != 0)
1296 return (EFAULT);
1297 break;
1299 #else /* ! _MULTI_DATAMODEL */
1300 if (ddi_copyout(&errstate, (void *)arg,
1301 sizeof (struct bofi_errstate), mode) != 0)
1302 return (EFAULT);
1303 #endif /* _MULTI_DATAMODEL */
1304 if (uls && errstate.log.entries &&
1305 ddi_copyout(klg, errstate.log.logbase,
1306 errstate.log.entries * sizeof (struct acc_log_elem),
1307 mode) != 0) {
1308 return (EFAULT);
1310 return (retval);
1312 case BOFI_CHK_STATE_W:
1314 struct acc_log_elem *klg;
1315 size_t uls;
1317 * get state for this errdef - read in dummy errstate
1318 * with just the errdef_handle filled in. Then wait for
1319 * a ddi_report_fault message to come back
1321 #ifdef _MULTI_DATAMODEL
1322 switch (ddi_model_convert_from(mode & FMODELS)) {
1323 case DDI_MODEL_ILP32:
1326 * For use when a 32 bit app makes a call into a
1327 * 64 bit ioctl
1329 struct bofi_errstate32 errstate_32;
1331 if (ddi_copyin((void *)arg, &errstate_32,
1332 sizeof (struct bofi_errstate32), mode) != 0) {
1333 return (EFAULT);
1335 errstate.fail_time = errstate_32.fail_time;
1336 errstate.msg_time = errstate_32.msg_time;
1337 errstate.access_count = errstate_32.access_count;
1338 errstate.fail_count = errstate_32.fail_count;
1339 errstate.acc_chk = errstate_32.acc_chk;
1340 errstate.errmsg_count = errstate_32.errmsg_count;
1341 (void) strncpy(errstate.buffer, errstate_32.buffer,
1342 ERRMSGSIZE);
1343 errstate.severity = errstate_32.severity;
1344 errstate.log.logsize = errstate_32.log.logsize;
1345 errstate.log.entries = errstate_32.log.entries;
1346 errstate.log.flags = errstate_32.log.flags;
1347 errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1348 errstate.log.start_time = errstate_32.log.start_time;
1349 errstate.log.stop_time = errstate_32.log.stop_time;
1350 errstate.log.logbase =
1351 (caddr_t)(uintptr_t)errstate_32.log.logbase;
1352 errstate.errdef_handle = errstate_32.errdef_handle;
1353 break;
1355 case DDI_MODEL_NONE:
1356 if (ddi_copyin((void *)arg, &errstate,
1357 sizeof (struct bofi_errstate), mode) != 0)
1358 return (EFAULT);
1359 break;
1361 #else /* ! _MULTI_DATAMODEL */
1362 if (ddi_copyin((void *)arg, &errstate,
1363 sizeof (struct bofi_errstate), mode) != 0)
1364 return (EFAULT);
1365 #endif /* _MULTI_DATAMODEL */
1366 if ((retval = bofi_errdef_check_w(&errstate, &klg)) == EINVAL)
1367 return (EINVAL);
1369 * copy out real errstate structure
1371 uls = errstate.log.logsize;
1372 uls = errstate.log.logsize;
1373 if (errstate.log.entries > uls && uls)
1374 /* insufficient user memory */
1375 errstate.log.entries = uls;
1376 /* always pass back a time */
1377 if (errstate.log.stop_time == 0ul)
1378 (void) drv_getparm(TIME, &(errstate.log.stop_time));
1380 #ifdef _MULTI_DATAMODEL
1381 switch (ddi_model_convert_from(mode & FMODELS)) {
1382 case DDI_MODEL_ILP32:
1385 * For use when a 32 bit app makes a call into a
1386 * 64 bit ioctl
1388 struct bofi_errstate32 errstate_32;
1390 errstate_32.fail_time = errstate.fail_time;
1391 errstate_32.msg_time = errstate.msg_time;
1392 errstate_32.access_count = errstate.access_count;
1393 errstate_32.fail_count = errstate.fail_count;
1394 errstate_32.acc_chk = errstate.acc_chk;
1395 errstate_32.errmsg_count = errstate.errmsg_count;
1396 (void) strncpy(errstate_32.buffer, errstate.buffer,
1397 ERRMSGSIZE);
1398 errstate_32.severity = errstate.severity;
1399 errstate_32.log.logsize = errstate.log.logsize;
1400 errstate_32.log.entries = errstate.log.entries;
1401 errstate_32.log.flags = errstate.log.flags;
1402 errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1403 errstate_32.log.start_time = errstate.log.start_time;
1404 errstate_32.log.stop_time = errstate.log.stop_time;
1405 errstate_32.log.logbase =
1406 (caddr32_t)(uintptr_t)errstate.log.logbase;
1407 errstate_32.errdef_handle = errstate.errdef_handle;
1408 if (ddi_copyout(&errstate_32, (void *)arg,
1409 sizeof (struct bofi_errstate32), mode) != 0)
1410 return (EFAULT);
1411 break;
1413 case DDI_MODEL_NONE:
1414 if (ddi_copyout(&errstate, (void *)arg,
1415 sizeof (struct bofi_errstate), mode) != 0)
1416 return (EFAULT);
1417 break;
1419 #else /* ! _MULTI_DATAMODEL */
1420 if (ddi_copyout(&errstate, (void *)arg,
1421 sizeof (struct bofi_errstate), mode) != 0)
1422 return (EFAULT);
1423 #endif /* _MULTI_DATAMODEL */
1425 if (uls && errstate.log.entries &&
1426 ddi_copyout(klg, errstate.log.logbase,
1427 errstate.log.entries * sizeof (struct acc_log_elem),
1428 mode) != 0) {
1429 return (EFAULT);
1431 return (retval);
1433 case BOFI_GET_HANDLES:
1435 * display existing handles
1437 #ifdef _MULTI_DATAMODEL
1438 switch (ddi_model_convert_from(mode & FMODELS)) {
1439 case DDI_MODEL_ILP32:
1442 * For use when a 32 bit app makes a call into a
1443 * 64 bit ioctl
1445 struct bofi_get_handles32 get_handles_32;
1447 if (ddi_copyin((void *)arg, &get_handles_32,
1448 sizeof (get_handles_32), mode) != 0) {
1449 return (EFAULT);
1451 get_handles.namesize = get_handles_32.namesize;
1452 (void) strncpy(get_handles.name, get_handles_32.name,
1453 NAMESIZE);
1454 get_handles.instance = get_handles_32.instance;
1455 get_handles.count = get_handles_32.count;
1456 get_handles.buffer =
1457 (caddr_t)(uintptr_t)get_handles_32.buffer;
1458 break;
1460 case DDI_MODEL_NONE:
1461 if (ddi_copyin((void *)arg, &get_handles,
1462 sizeof (get_handles), mode) != 0)
1463 return (EFAULT);
1464 break;
1466 #else /* ! _MULTI_DATAMODEL */
1467 if (ddi_copyin((void *)arg, &get_handles,
1468 sizeof (get_handles), mode) != 0)
1469 return (EFAULT);
1470 #endif /* _MULTI_DATAMODEL */
1472 * read in name
1474 if (get_handles.namesize > NAMESIZE)
1475 return (EINVAL);
1476 namep = kmem_zalloc(get_handles.namesize+1, KM_SLEEP);
1477 (void) strncpy(namep, get_handles.name, get_handles.namesize);
1478 req_count = get_handles.count;
1479 bufptr = buffer = kmem_zalloc(req_count, KM_SLEEP);
1480 endbuf = bufptr + req_count;
1482 * display existing handles
1484 mutex_enter(&bofi_low_mutex);
1485 mutex_enter(&bofi_mutex);
1486 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1487 hhashp = &hhash_table[i];
1488 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1489 if (!driver_under_test(hp->dip))
1490 continue;
1491 if (ddi_name_to_major(ddi_get_name(hp->dip)) !=
1492 ddi_name_to_major(namep))
1493 continue;
1494 if (hp->instance != get_handles.instance)
1495 continue;
1497 * print information per handle - note that
1498 * DMA* means an unbound DMA handle
1500 (void) snprintf(bufptr, (size_t)(endbuf-bufptr),
1501 " %s %d %s ", hp->name, hp->instance,
1502 (hp->type == BOFI_INT_HDL) ? "INTR" :
1503 (hp->type == BOFI_ACC_HDL) ? "PIO" :
1504 (hp->type == BOFI_DMA_HDL) ? "DMA" :
1505 (hp->hparrayp != NULL) ? "DVMA" : "DMA*");
1506 bufptr += strlen(bufptr);
1507 if (hp->type == BOFI_ACC_HDL) {
1508 if (hp->len == INT_MAX - hp->offset)
1509 (void) snprintf(bufptr,
1510 (size_t)(endbuf-bufptr),
1511 "reg set %d off 0x%llx\n",
1512 hp->rnumber, hp->offset);
1513 else
1514 (void) snprintf(bufptr,
1515 (size_t)(endbuf-bufptr),
1516 "reg set %d off 0x%llx"
1517 " len 0x%llx\n",
1518 hp->rnumber, hp->offset,
1519 hp->len);
1520 } else if (hp->type == BOFI_DMA_HDL)
1521 (void) snprintf(bufptr,
1522 (size_t)(endbuf-bufptr),
1523 "handle no %d len 0x%llx"
1524 " addr 0x%p\n", hp->rnumber,
1525 hp->len, (void *)hp->addr);
1526 else if (hp->type == BOFI_NULL &&
1527 hp->hparrayp == NULL)
1528 (void) snprintf(bufptr,
1529 (size_t)(endbuf-bufptr),
1530 "handle no %d\n", hp->rnumber);
1531 else
1532 (void) snprintf(bufptr,
1533 (size_t)(endbuf-bufptr), "\n");
1534 bufptr += strlen(bufptr);
1537 mutex_exit(&bofi_mutex);
1538 mutex_exit(&bofi_low_mutex);
1539 err = ddi_copyout(buffer, get_handles.buffer, req_count, mode);
1540 kmem_free(namep, get_handles.namesize+1);
1541 kmem_free(buffer, req_count);
1542 if (err != 0)
1543 return (EFAULT);
1544 else
1545 return (0);
1546 case BOFI_GET_HANDLE_INFO:
1548 * display existing handles
1550 #ifdef _MULTI_DATAMODEL
1551 switch (ddi_model_convert_from(mode & FMODELS)) {
1552 case DDI_MODEL_ILP32:
1555 * For use when a 32 bit app makes a call into a
1556 * 64 bit ioctl
1558 struct bofi_get_hdl_info32 hdl_info_32;
1560 if (ddi_copyin((void *)arg, &hdl_info_32,
1561 sizeof (hdl_info_32), mode)) {
1562 return (EFAULT);
1564 hdl_info.namesize = hdl_info_32.namesize;
1565 (void) strncpy(hdl_info.name, hdl_info_32.name,
1566 NAMESIZE);
1567 hdl_info.count = hdl_info_32.count;
1568 hdl_info.hdli = (caddr_t)(uintptr_t)hdl_info_32.hdli;
1569 break;
1571 case DDI_MODEL_NONE:
1572 if (ddi_copyin((void *)arg, &hdl_info,
1573 sizeof (hdl_info), mode))
1574 return (EFAULT);
1575 break;
1577 #else /* ! _MULTI_DATAMODEL */
1578 if (ddi_copyin((void *)arg, &hdl_info,
1579 sizeof (hdl_info), mode))
1580 return (EFAULT);
1581 #endif /* _MULTI_DATAMODEL */
1582 if (hdl_info.namesize > NAMESIZE)
1583 return (EINVAL);
1584 namep = kmem_zalloc(hdl_info.namesize + 1, KM_SLEEP);
1585 (void) strncpy(namep, hdl_info.name, hdl_info.namesize);
1586 req_count = hdl_info.count;
1587 count = hdl_info.count = 0; /* the actual no of handles */
1588 if (req_count > 0) {
1589 hib = hdlip =
1590 kmem_zalloc(req_count * sizeof (struct handle_info),
1591 KM_SLEEP);
1592 } else {
1593 hib = hdlip = 0;
1594 req_count = hdl_info.count = 0;
1598 * display existing handles
1600 mutex_enter(&bofi_low_mutex);
1601 mutex_enter(&bofi_mutex);
1602 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1603 hhashp = &hhash_table[i];
1604 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1605 if (!driver_under_test(hp->dip) ||
1606 ddi_name_to_major(ddi_get_name(hp->dip)) !=
1607 ddi_name_to_major(namep) ||
1608 ++(hdl_info.count) > req_count ||
1609 count == req_count)
1610 continue;
1612 hdlip->instance = hp->instance;
1613 hdlip->rnumber = hp->rnumber;
1614 switch (hp->type) {
1615 case BOFI_ACC_HDL:
1616 hdlip->access_type = BOFI_PIO_RW;
1617 hdlip->offset = hp->offset;
1618 hdlip->len = hp->len;
1619 break;
1620 case BOFI_DMA_HDL:
1621 hdlip->access_type = 0;
1622 if (hp->flags & DDI_DMA_WRITE)
1623 hdlip->access_type |=
1624 BOFI_DMA_W;
1625 if (hp->flags & DDI_DMA_READ)
1626 hdlip->access_type |=
1627 BOFI_DMA_R;
1628 hdlip->len = hp->len;
1629 hdlip->addr_cookie =
1630 (uint64_t)(uintptr_t)hp->addr;
1631 break;
1632 case BOFI_INT_HDL:
1633 hdlip->access_type = BOFI_INTR;
1634 break;
1635 default:
1636 hdlip->access_type = 0;
1637 break;
1639 hdlip++;
1640 count++;
1643 mutex_exit(&bofi_mutex);
1644 mutex_exit(&bofi_low_mutex);
1645 err = 0;
1646 #ifdef _MULTI_DATAMODEL
1647 switch (ddi_model_convert_from(mode & FMODELS)) {
1648 case DDI_MODEL_ILP32:
1651 * For use when a 32 bit app makes a call into a
1652 * 64 bit ioctl
1654 struct bofi_get_hdl_info32 hdl_info_32;
1656 hdl_info_32.namesize = hdl_info.namesize;
1657 (void) strncpy(hdl_info_32.name, hdl_info.name,
1658 NAMESIZE);
1659 hdl_info_32.count = hdl_info.count;
1660 hdl_info_32.hdli = (caddr32_t)(uintptr_t)hdl_info.hdli;
1661 if (ddi_copyout(&hdl_info_32, (void *)arg,
1662 sizeof (hdl_info_32), mode) != 0) {
1663 kmem_free(namep, hdl_info.namesize+1);
1664 if (req_count > 0)
1665 kmem_free(hib,
1666 req_count * sizeof (*hib));
1667 return (EFAULT);
1669 break;
1671 case DDI_MODEL_NONE:
1672 if (ddi_copyout(&hdl_info, (void *)arg,
1673 sizeof (hdl_info), mode) != 0) {
1674 kmem_free(namep, hdl_info.namesize+1);
1675 if (req_count > 0)
1676 kmem_free(hib,
1677 req_count * sizeof (*hib));
1678 return (EFAULT);
1680 break;
1682 #else /* ! _MULTI_DATAMODEL */
1683 if (ddi_copyout(&hdl_info, (void *)arg,
1684 sizeof (hdl_info), mode) != 0) {
1685 kmem_free(namep, hdl_info.namesize+1);
1686 if (req_count > 0)
1687 kmem_free(hib, req_count * sizeof (*hib));
1688 return (EFAULT);
1690 #endif /* ! _MULTI_DATAMODEL */
1691 if (count > 0) {
1692 if (ddi_copyout(hib, hdl_info.hdli,
1693 count * sizeof (*hib), mode) != 0) {
1694 kmem_free(namep, hdl_info.namesize+1);
1695 if (req_count > 0)
1696 kmem_free(hib,
1697 req_count * sizeof (*hib));
1698 return (EFAULT);
1701 kmem_free(namep, hdl_info.namesize+1);
1702 if (req_count > 0)
1703 kmem_free(hib, req_count * sizeof (*hib));
1704 return (err);
1705 default:
1706 return (ENOTTY);
1712 * add a new error definition
1714 static int
1715 bofi_errdef_alloc(struct bofi_errdef *errdefp, char *namep,
1716 struct bofi_errent *softc)
1718 struct bofi_errent *ep;
1719 struct bofi_shadow *hp;
1720 struct bofi_link *lp;
1723 * allocate errdef structure and put on in-use list
1725 ep = kmem_zalloc(sizeof (struct bofi_errent), KM_SLEEP);
1726 ep->errdef = *errdefp;
1727 ep->name = namep;
1728 ep->errdef.errdef_handle = (uint64_t)(uintptr_t)ep;
1729 ep->errstate.severity = DDI_SERVICE_RESTORED;
1730 ep->errstate.errdef_handle = (uint64_t)(uintptr_t)ep;
1731 cv_init(&ep->cv, NULL, CV_DRIVER, NULL);
1733 * allocate space for logging
1735 ep->errdef.log.entries = 0;
1736 ep->errdef.log.wrapcnt = 0;
1737 if (ep->errdef.access_type & BOFI_LOG)
1738 ep->logbase = kmem_alloc(sizeof (struct acc_log_elem) *
1739 ep->errdef.log.logsize, KM_SLEEP);
1740 else
1741 ep->logbase = NULL;
1743 * put on in-use list
1745 mutex_enter(&bofi_low_mutex);
1746 mutex_enter(&bofi_mutex);
1747 ep->next = errent_listp;
1748 errent_listp = ep;
1750 * and add it to the per-clone list
1752 ep->cnext = softc->cnext;
1753 softc->cnext->cprev = ep;
1754 ep->cprev = softc;
1755 softc->cnext = ep;
1758 * look for corresponding shadow handle structures and if we find any
1759 * tag this errdef structure on to their link lists.
1761 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1762 if (ddi_name_to_major(hp->name) == ddi_name_to_major(namep) &&
1763 hp->instance == errdefp->instance &&
1764 (((errdefp->access_type & BOFI_DMA_RW) &&
1765 (ep->errdef.rnumber == -1 ||
1766 hp->rnumber == ep->errdef.rnumber) &&
1767 hp->type == BOFI_DMA_HDL &&
1768 (((uintptr_t)(hp->addr + ep->errdef.offset +
1769 ep->errdef.len) & ~LLSZMASK) >
1770 ((uintptr_t)((hp->addr + ep->errdef.offset) +
1771 LLSZMASK) & ~LLSZMASK))) ||
1772 ((errdefp->access_type & BOFI_INTR) &&
1773 hp->type == BOFI_INT_HDL) ||
1774 ((errdefp->access_type & BOFI_PIO_RW) &&
1775 hp->type == BOFI_ACC_HDL &&
1776 (errdefp->rnumber == -1 ||
1777 hp->rnumber == errdefp->rnumber) &&
1778 (errdefp->len == 0 ||
1779 hp->offset < errdefp->offset + errdefp->len) &&
1780 hp->offset + hp->len > errdefp->offset))) {
1781 lp = bofi_link_freelist;
1782 if (lp != NULL) {
1783 bofi_link_freelist = lp->link;
1784 lp->errentp = ep;
1785 lp->link = hp->link;
1786 hp->link = lp;
1790 errdefp->errdef_handle = (uint64_t)(uintptr_t)ep;
1791 mutex_exit(&bofi_mutex);
1792 mutex_exit(&bofi_low_mutex);
1793 ep->softintr_id = NULL;
1794 return (ddi_add_softintr(our_dip, DDI_SOFTINT_MED, &ep->softintr_id,
1795 NULL, NULL, bofi_signal, (caddr_t)&ep->errdef));
1800 * delete existing errdef
1802 static int
1803 bofi_errdef_free(struct bofi_errent *ep)
1805 struct bofi_errent *hep, *prev_hep;
1806 struct bofi_link *lp, *prev_lp, *next_lp;
1807 struct bofi_shadow *hp;
1809 mutex_enter(&bofi_low_mutex);
1810 mutex_enter(&bofi_mutex);
1812 * don't just assume its a valid ep - check that its on the
1813 * in-use list
1815 prev_hep = NULL;
1816 for (hep = errent_listp; hep != NULL; ) {
1817 if (hep == ep)
1818 break;
1819 prev_hep = hep;
1820 hep = hep->next;
1822 if (hep == NULL) {
1823 mutex_exit(&bofi_mutex);
1824 mutex_exit(&bofi_low_mutex);
1825 return (EINVAL);
1828 * found it - delete from in-use list
1831 if (prev_hep)
1832 prev_hep->next = hep->next;
1833 else
1834 errent_listp = hep->next;
1836 * and take it off the per-clone list
1838 hep->cnext->cprev = hep->cprev;
1839 hep->cprev->cnext = hep->cnext;
1841 * see if we are on any shadow handle link lists - and if we
1842 * are then take us off
1844 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1845 prev_lp = NULL;
1846 for (lp = hp->link; lp != NULL; ) {
1847 if (lp->errentp == ep) {
1848 if (prev_lp)
1849 prev_lp->link = lp->link;
1850 else
1851 hp->link = lp->link;
1852 next_lp = lp->link;
1853 lp->link = bofi_link_freelist;
1854 bofi_link_freelist = lp;
1855 lp = next_lp;
1856 } else {
1857 prev_lp = lp;
1858 lp = lp->link;
1862 mutex_exit(&bofi_mutex);
1863 mutex_exit(&bofi_low_mutex);
1865 cv_destroy(&ep->cv);
1866 kmem_free(ep->name, ep->errdef.namesize+1);
1867 if ((ep->errdef.access_type & BOFI_LOG) &&
1868 ep->errdef.log.logsize && ep->logbase) /* double check */
1869 kmem_free(ep->logbase,
1870 sizeof (struct acc_log_elem) * ep->errdef.log.logsize);
1872 if (ep->softintr_id)
1873 ddi_remove_softintr(ep->softintr_id);
1874 kmem_free(ep, sizeof (struct bofi_errent));
1875 return (0);
1880 * start all errdefs corresponding to this name and instance
1882 static void
1883 bofi_start(struct bofi_errctl *errctlp, char *namep)
1885 struct bofi_errent *ep;
1888 * look for any errdefs with matching name and instance
1890 mutex_enter(&bofi_low_mutex);
1891 for (ep = errent_listp; ep != NULL; ep = ep->next)
1892 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1893 errctlp->instance == ep->errdef.instance) {
1894 ep->state |= BOFI_DEV_ACTIVE;
1895 (void) drv_getparm(TIME, &(ep->errdef.log.start_time));
1896 ep->errdef.log.stop_time = 0ul;
1898 mutex_exit(&bofi_low_mutex);
1903 * stop all errdefs corresponding to this name and instance
1905 static void
1906 bofi_stop(struct bofi_errctl *errctlp, char *namep)
1908 struct bofi_errent *ep;
1911 * look for any errdefs with matching name and instance
1913 mutex_enter(&bofi_low_mutex);
1914 for (ep = errent_listp; ep != NULL; ep = ep->next)
1915 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1916 errctlp->instance == ep->errdef.instance) {
1917 ep->state &= ~BOFI_DEV_ACTIVE;
1918 if (ep->errdef.log.stop_time == 0ul)
1919 (void) drv_getparm(TIME,
1920 &(ep->errdef.log.stop_time));
1922 mutex_exit(&bofi_low_mutex);
1927 * wake up any thread waiting on this errdefs
1929 static uint_t
1930 bofi_signal(caddr_t arg)
1932 struct bofi_errdef *edp = (struct bofi_errdef *)arg;
1933 struct bofi_errent *hep;
1934 struct bofi_errent *ep =
1935 (struct bofi_errent *)(uintptr_t)edp->errdef_handle;
1937 mutex_enter(&bofi_low_mutex);
1938 for (hep = errent_listp; hep != NULL; ) {
1939 if (hep == ep)
1940 break;
1941 hep = hep->next;
1943 if (hep == NULL) {
1944 mutex_exit(&bofi_low_mutex);
1945 return (DDI_INTR_UNCLAIMED);
1947 if ((ep->errdef.access_type & BOFI_LOG) &&
1948 (edp->log.flags & BOFI_LOG_FULL)) {
1949 edp->log.stop_time = bofi_gettime();
1950 ep->state |= BOFI_NEW_MESSAGE;
1951 if (ep->state & BOFI_MESSAGE_WAIT)
1952 cv_broadcast(&ep->cv);
1953 ep->state &= ~BOFI_MESSAGE_WAIT;
1955 if (ep->errstate.msg_time != 0) {
1956 ep->state |= BOFI_NEW_MESSAGE;
1957 if (ep->state & BOFI_MESSAGE_WAIT)
1958 cv_broadcast(&ep->cv);
1959 ep->state &= ~BOFI_MESSAGE_WAIT;
1961 mutex_exit(&bofi_low_mutex);
1962 return (DDI_INTR_CLAIMED);
1967 * wake up all errdefs corresponding to this name and instance
1969 static void
1970 bofi_broadcast(struct bofi_errctl *errctlp, char *namep)
1972 struct bofi_errent *ep;
1975 * look for any errdefs with matching name and instance
1977 mutex_enter(&bofi_low_mutex);
1978 for (ep = errent_listp; ep != NULL; ep = ep->next)
1979 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1980 errctlp->instance == ep->errdef.instance) {
1982 * wake up sleepers
1984 ep->state |= BOFI_NEW_MESSAGE;
1985 if (ep->state & BOFI_MESSAGE_WAIT)
1986 cv_broadcast(&ep->cv);
1987 ep->state &= ~BOFI_MESSAGE_WAIT;
1989 mutex_exit(&bofi_low_mutex);
1994 * clear "acc_chk" for all errdefs corresponding to this name and instance
1995 * and wake them up.
1997 static void
1998 bofi_clear_acc_chk(struct bofi_errctl *errctlp, char *namep)
2000 struct bofi_errent *ep;
2003 * look for any errdefs with matching name and instance
2005 mutex_enter(&bofi_low_mutex);
2006 for (ep = errent_listp; ep != NULL; ep = ep->next)
2007 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2008 errctlp->instance == ep->errdef.instance) {
2009 mutex_enter(&bofi_mutex);
2010 if (ep->errdef.access_count == 0 &&
2011 ep->errdef.fail_count == 0)
2012 ep->errdef.acc_chk = 0;
2013 mutex_exit(&bofi_mutex);
2015 * wake up sleepers
2017 ep->state |= BOFI_NEW_MESSAGE;
2018 if (ep->state & BOFI_MESSAGE_WAIT)
2019 cv_broadcast(&ep->cv);
2020 ep->state &= ~BOFI_MESSAGE_WAIT;
2022 mutex_exit(&bofi_low_mutex);
2027 * set "fail_count" to 0 for all errdefs corresponding to this name and instance
2028 * whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
2030 static void
2031 bofi_clear_errors(struct bofi_errctl *errctlp, char *namep)
2033 struct bofi_errent *ep;
2036 * look for any errdefs with matching name and instance
2038 mutex_enter(&bofi_low_mutex);
2039 for (ep = errent_listp; ep != NULL; ep = ep->next)
2040 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2041 errctlp->instance == ep->errdef.instance) {
2042 mutex_enter(&bofi_mutex);
2043 if (ep->errdef.access_count == 0) {
2044 ep->errdef.acc_chk = 0;
2045 ep->errdef.fail_count = 0;
2046 mutex_exit(&bofi_mutex);
2047 if (ep->errdef.log.stop_time == 0ul)
2048 (void) drv_getparm(TIME,
2049 &(ep->errdef.log.stop_time));
2050 } else
2051 mutex_exit(&bofi_mutex);
2053 * wake up sleepers
2055 ep->state |= BOFI_NEW_MESSAGE;
2056 if (ep->state & BOFI_MESSAGE_WAIT)
2057 cv_broadcast(&ep->cv);
2058 ep->state &= ~BOFI_MESSAGE_WAIT;
2060 mutex_exit(&bofi_low_mutex);
2065 * set "access_count" and "fail_count" to 0 for all errdefs corresponding to
2066 * this name and instance, set "acc_chk" to 0, and wake them up.
2068 static void
2069 bofi_clear_errdefs(struct bofi_errctl *errctlp, char *namep)
2071 struct bofi_errent *ep;
2074 * look for any errdefs with matching name and instance
2076 mutex_enter(&bofi_low_mutex);
2077 for (ep = errent_listp; ep != NULL; ep = ep->next)
2078 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2079 errctlp->instance == ep->errdef.instance) {
2080 mutex_enter(&bofi_mutex);
2081 ep->errdef.acc_chk = 0;
2082 ep->errdef.access_count = 0;
2083 ep->errdef.fail_count = 0;
2084 mutex_exit(&bofi_mutex);
2085 if (ep->errdef.log.stop_time == 0ul)
2086 (void) drv_getparm(TIME,
2087 &(ep->errdef.log.stop_time));
2089 * wake up sleepers
2091 ep->state |= BOFI_NEW_MESSAGE;
2092 if (ep->state & BOFI_MESSAGE_WAIT)
2093 cv_broadcast(&ep->cv);
2094 ep->state &= ~BOFI_MESSAGE_WAIT;
2096 mutex_exit(&bofi_low_mutex);
2101 * get state for this errdef
2103 static int
2104 bofi_errdef_check(struct bofi_errstate *errstatep, struct acc_log_elem **logpp)
2106 struct bofi_errent *hep;
2107 struct bofi_errent *ep;
2109 ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2110 mutex_enter(&bofi_low_mutex);
2112 * don't just assume its a valid ep - check that its on the
2113 * in-use list
2115 for (hep = errent_listp; hep != NULL; hep = hep->next)
2116 if (hep == ep)
2117 break;
2118 if (hep == NULL) {
2119 mutex_exit(&bofi_low_mutex);
2120 return (EINVAL);
2122 mutex_enter(&bofi_mutex);
2123 ep->errstate.access_count = ep->errdef.access_count;
2124 ep->errstate.fail_count = ep->errdef.fail_count;
2125 ep->errstate.acc_chk = ep->errdef.acc_chk;
2126 ep->errstate.log = ep->errdef.log;
2127 *logpp = ep->logbase;
2128 *errstatep = ep->errstate;
2129 mutex_exit(&bofi_mutex);
2130 mutex_exit(&bofi_low_mutex);
2131 return (0);
2136 * Wait for a ddi_report_fault message to come back for this errdef
2137 * Then return state for this errdef.
2138 * fault report is intercepted by bofi_post_event, which triggers
2139 * bofi_signal via a softint, which will wake up this routine if
2140 * we are waiting
2142 static int
2143 bofi_errdef_check_w(struct bofi_errstate *errstatep,
2144 struct acc_log_elem **logpp)
2146 struct bofi_errent *hep;
2147 struct bofi_errent *ep;
2148 int rval = 0;
2150 ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2151 mutex_enter(&bofi_low_mutex);
2152 retry:
2154 * don't just assume its a valid ep - check that its on the
2155 * in-use list
2157 for (hep = errent_listp; hep != NULL; hep = hep->next)
2158 if (hep == ep)
2159 break;
2160 if (hep == NULL) {
2161 mutex_exit(&bofi_low_mutex);
2162 return (EINVAL);
2165 * wait for ddi_report_fault for the devinfo corresponding
2166 * to this errdef
2168 if (rval == 0 && !(ep->state & BOFI_NEW_MESSAGE)) {
2169 ep->state |= BOFI_MESSAGE_WAIT;
2170 if (cv_wait_sig(&ep->cv, &bofi_low_mutex) == 0) {
2171 if (!(ep->state & BOFI_NEW_MESSAGE))
2172 rval = EINTR;
2174 goto retry;
2176 ep->state &= ~BOFI_NEW_MESSAGE;
2178 * we either didn't need to sleep, we've been woken up or we've been
2179 * signaled - either way return state now
2181 mutex_enter(&bofi_mutex);
2182 ep->errstate.access_count = ep->errdef.access_count;
2183 ep->errstate.fail_count = ep->errdef.fail_count;
2184 ep->errstate.acc_chk = ep->errdef.acc_chk;
2185 ep->errstate.log = ep->errdef.log;
2186 *logpp = ep->logbase;
2187 *errstatep = ep->errstate;
2188 mutex_exit(&bofi_mutex);
2189 mutex_exit(&bofi_low_mutex);
2190 return (rval);
2195 * support routine - check if requested driver is defined as under test in the
2196 * conf file.
2198 static int
2199 driver_under_test(dev_info_t *rdip)
2201 int i;
2202 char *rname;
2203 major_t rmaj;
2205 rname = ddi_get_name(rdip);
2206 rmaj = ddi_name_to_major(rname);
2209 * Enforce the user to specifically request the following drivers.
2211 for (i = 0; i < driver_list_size; i += (1 + strlen(&driver_list[i]))) {
2212 if (driver_list_neg == 0) {
2213 if (rmaj == ddi_name_to_major(&driver_list[i]))
2214 return (1);
2215 } else {
2216 if (rmaj == ddi_name_to_major(&driver_list[i+1]))
2217 return (0);
2220 if (driver_list_neg == 0)
2221 return (0);
2222 else
2223 return (1);
2228 static void
2229 log_acc_event(struct bofi_errent *ep, uint_t at, offset_t offset, off_t len,
2230 size_t repcount, uint64_t *valuep)
2232 struct bofi_errdef *edp = &(ep->errdef);
2233 struct acc_log *log = &edp->log;
2235 ASSERT(log != NULL);
2236 ASSERT(MUTEX_HELD(&bofi_mutex));
2238 if (log->flags & BOFI_LOG_REPIO)
2239 repcount = 1;
2240 else if (repcount == 0 && edp->access_count > 0 &&
2241 (log->flags & BOFI_LOG_FULL) == 0)
2242 edp->access_count += 1;
2244 if (repcount && log->entries < log->logsize) {
2245 struct acc_log_elem *elem = ep->logbase + log->entries;
2247 if (log->flags & BOFI_LOG_TIMESTAMP)
2248 elem->access_time = bofi_gettime();
2249 elem->access_type = at;
2250 elem->offset = offset;
2251 elem->value = valuep ? *valuep : 0ll;
2252 elem->size = len;
2253 elem->repcount = repcount;
2254 ++log->entries;
2255 if (log->entries == log->logsize) {
2256 log->flags |= BOFI_LOG_FULL;
2257 ddi_trigger_softintr(((struct bofi_errent *)
2258 (uintptr_t)edp->errdef_handle)->softintr_id);
2261 if ((log->flags & BOFI_LOG_WRAP) && edp->access_count <= 1) {
2262 log->wrapcnt++;
2263 edp->access_count = log->logsize;
2264 log->entries = 0; /* wrap back to the start */
2270 * got a condition match on dma read/write - check counts and corrupt
2271 * data if necessary
2273 * bofi_mutex always held when this is called.
2275 static void
2276 do_dma_corrupt(struct bofi_shadow *hp, struct bofi_errent *ep,
2277 uint_t synctype, off_t off, off_t length)
2279 uint64_t operand;
2280 int i;
2281 off_t len;
2282 caddr_t logaddr;
2283 uint64_t *addr;
2284 uint64_t *endaddr;
2285 ddi_dma_impl_t *hdlp;
2286 ndi_err_t *errp;
2288 ASSERT(MUTEX_HELD(&bofi_mutex));
2289 if ((ep->errdef.access_count ||
2290 ep->errdef.fail_count) &&
2291 (ep->errdef.access_type & BOFI_LOG)) {
2292 uint_t atype;
2294 if (synctype == DDI_DMA_SYNC_FORDEV)
2295 atype = BOFI_DMA_W;
2296 else if (synctype == DDI_DMA_SYNC_FORCPU ||
2297 synctype == DDI_DMA_SYNC_FORKERNEL)
2298 atype = BOFI_DMA_R;
2299 else
2300 atype = 0;
2301 if ((off <= ep->errdef.offset &&
2302 off + length > ep->errdef.offset) ||
2303 (off > ep->errdef.offset &&
2304 off < ep->errdef.offset + ep->errdef.len)) {
2305 logaddr = (caddr_t)((uintptr_t)(hp->addr +
2306 off + LLSZMASK) & ~LLSZMASK);
2308 log_acc_event(ep, atype, logaddr - hp->addr,
2309 length, 1, 0);
2312 if (ep->errdef.access_count > 1) {
2313 ep->errdef.access_count--;
2314 } else if (ep->errdef.fail_count > 0) {
2315 ep->errdef.fail_count--;
2316 ep->errdef.access_count = 0;
2318 * OK do the corruption
2320 if (ep->errstate.fail_time == 0)
2321 ep->errstate.fail_time = bofi_gettime();
2323 * work out how much to corrupt
2325 * Make sure endaddr isn't greater than hp->addr + hp->len.
2326 * If endaddr becomes less than addr len becomes negative
2327 * and the following loop isn't entered.
2329 addr = (uint64_t *)((uintptr_t)((hp->addr +
2330 ep->errdef.offset) + LLSZMASK) & ~LLSZMASK);
2331 endaddr = (uint64_t *)((uintptr_t)(hp->addr + min(hp->len,
2332 ep->errdef.offset + ep->errdef.len)) & ~LLSZMASK);
2333 len = endaddr - addr;
2334 operand = ep->errdef.operand;
2335 hdlp = (ddi_dma_impl_t *)(hp->hdl.dma_handle);
2336 errp = &hdlp->dmai_error;
2337 if (ep->errdef.acc_chk & 2) {
2338 uint64_t ena;
2339 char buf[FM_MAX_CLASS];
2341 errp->err_status = DDI_FM_NONFATAL;
2342 (void) snprintf(buf, FM_MAX_CLASS, FM_SIMULATED_DMA);
2343 ena = fm_ena_generate(0, FM_ENA_FMT1);
2344 ddi_fm_ereport_post(hp->dip, buf, ena,
2345 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2346 FM_EREPORT_VERS0, NULL);
2348 switch (ep->errdef.optype) {
2349 case BOFI_EQUAL :
2350 for (i = 0; i < len; i++)
2351 *(addr + i) = operand;
2352 break;
2353 case BOFI_AND :
2354 for (i = 0; i < len; i++)
2355 *(addr + i) &= operand;
2356 break;
2357 case BOFI_OR :
2358 for (i = 0; i < len; i++)
2359 *(addr + i) |= operand;
2360 break;
2361 case BOFI_XOR :
2362 for (i = 0; i < len; i++)
2363 *(addr + i) ^= operand;
2364 break;
2365 default:
2366 /* do nothing */
2367 break;
2373 static uint64_t do_bofi_rd8(struct bofi_shadow *, caddr_t);
2374 static uint64_t do_bofi_rd16(struct bofi_shadow *, caddr_t);
2375 static uint64_t do_bofi_rd32(struct bofi_shadow *, caddr_t);
2376 static uint64_t do_bofi_rd64(struct bofi_shadow *, caddr_t);
2380 * check all errdefs linked to this shadow handle. If we've got a condition
2381 * match check counts and corrupt data if necessary
2383 * bofi_mutex always held when this is called.
2385 * because of possibility of BOFI_NO_TRANSFER, we couldn't get data
2386 * from io-space before calling this, so we pass in the func to do the
2387 * transfer as a parameter.
2389 static uint64_t
2390 do_pior_corrupt(struct bofi_shadow *hp, caddr_t addr,
2391 uint64_t (*func)(), size_t repcount, size_t accsize)
2393 struct bofi_errent *ep;
2394 struct bofi_link *lp;
2395 uint64_t operand;
2396 uintptr_t minlen;
2397 intptr_t base;
2398 int done_get = 0;
2399 uint64_t get_val, gv;
2400 ddi_acc_impl_t *hdlp;
2401 ndi_err_t *errp;
2403 ASSERT(MUTEX_HELD(&bofi_mutex));
2405 * check through all errdefs associated with this shadow handle
2407 for (lp = hp->link; lp != NULL; lp = lp->link) {
2408 ep = lp->errentp;
2409 if (ep->errdef.len == 0)
2410 minlen = hp->len;
2411 else
2412 minlen = min(hp->len, ep->errdef.len);
2413 base = addr - hp->addr - ep->errdef.offset + hp->offset;
2414 if ((ep->errdef.access_type & BOFI_PIO_R) &&
2415 (ep->state & BOFI_DEV_ACTIVE) &&
2416 base >= 0 && base < minlen) {
2418 * condition match for pio read
2420 if (ep->errdef.access_count > 1) {
2421 ep->errdef.access_count--;
2422 if (done_get == 0) {
2423 done_get = 1;
2424 gv = get_val = func(hp, addr);
2426 if (ep->errdef.access_type & BOFI_LOG) {
2427 log_acc_event(ep, BOFI_PIO_R,
2428 addr - hp->addr,
2429 accsize, repcount, &gv);
2431 } else if (ep->errdef.fail_count > 0) {
2432 ep->errdef.fail_count--;
2433 ep->errdef.access_count = 0;
2435 * OK do corruption
2437 if (ep->errstate.fail_time == 0)
2438 ep->errstate.fail_time = bofi_gettime();
2439 operand = ep->errdef.operand;
2440 if (done_get == 0) {
2441 if (ep->errdef.optype ==
2442 BOFI_NO_TRANSFER)
2444 * no transfer - bomb out
2446 return (operand);
2447 done_get = 1;
2448 gv = get_val = func(hp, addr);
2451 if (ep->errdef.access_type & BOFI_LOG) {
2452 log_acc_event(ep, BOFI_PIO_R,
2453 addr - hp->addr,
2454 accsize, repcount, &gv);
2456 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2457 errp = hdlp->ahi_err;
2458 if (ep->errdef.acc_chk & 1) {
2459 uint64_t ena;
2460 char buf[FM_MAX_CLASS];
2462 errp->err_status = DDI_FM_NONFATAL;
2463 (void) snprintf(buf, FM_MAX_CLASS,
2464 FM_SIMULATED_PIO);
2465 ena = fm_ena_generate(0, FM_ENA_FMT1);
2466 ddi_fm_ereport_post(hp->dip, buf, ena,
2467 DDI_NOSLEEP, FM_VERSION,
2468 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2469 NULL);
2471 switch (ep->errdef.optype) {
2472 case BOFI_EQUAL :
2473 get_val = operand;
2474 break;
2475 case BOFI_AND :
2476 get_val &= operand;
2477 break;
2478 case BOFI_OR :
2479 get_val |= operand;
2480 break;
2481 case BOFI_XOR :
2482 get_val ^= operand;
2483 break;
2484 default:
2485 /* do nothing */
2486 break;
2491 if (done_get == 0)
2492 return (func(hp, addr));
2493 else
2494 return (get_val);
2499 * check all errdefs linked to this shadow handle. If we've got a condition
2500 * match check counts and corrupt data if necessary
2502 * bofi_mutex always held when this is called.
2504 * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
2505 * is to be written out to io-space, 1 otherwise
2507 static int
2508 do_piow_corrupt(struct bofi_shadow *hp, caddr_t addr, uint64_t *valuep,
2509 size_t size, size_t repcount)
2511 struct bofi_errent *ep;
2512 struct bofi_link *lp;
2513 uintptr_t minlen;
2514 intptr_t base;
2515 uint64_t v = *valuep;
2516 ddi_acc_impl_t *hdlp;
2517 ndi_err_t *errp;
2519 ASSERT(MUTEX_HELD(&bofi_mutex));
2521 * check through all errdefs associated with this shadow handle
2523 for (lp = hp->link; lp != NULL; lp = lp->link) {
2524 ep = lp->errentp;
2525 if (ep->errdef.len == 0)
2526 minlen = hp->len;
2527 else
2528 minlen = min(hp->len, ep->errdef.len);
2529 base = (caddr_t)addr - hp->addr - ep->errdef.offset +hp->offset;
2530 if ((ep->errdef.access_type & BOFI_PIO_W) &&
2531 (ep->state & BOFI_DEV_ACTIVE) &&
2532 base >= 0 && base < minlen) {
2534 * condition match for pio write
2537 if (ep->errdef.access_count > 1) {
2538 ep->errdef.access_count--;
2539 if (ep->errdef.access_type & BOFI_LOG)
2540 log_acc_event(ep, BOFI_PIO_W,
2541 addr - hp->addr, size,
2542 repcount, &v);
2543 } else if (ep->errdef.fail_count > 0) {
2544 ep->errdef.fail_count--;
2545 ep->errdef.access_count = 0;
2546 if (ep->errdef.access_type & BOFI_LOG)
2547 log_acc_event(ep, BOFI_PIO_W,
2548 addr - hp->addr, size,
2549 repcount, &v);
2551 * OK do corruption
2553 if (ep->errstate.fail_time == 0)
2554 ep->errstate.fail_time = bofi_gettime();
2555 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2556 errp = hdlp->ahi_err;
2557 if (ep->errdef.acc_chk & 1) {
2558 uint64_t ena;
2559 char buf[FM_MAX_CLASS];
2561 errp->err_status = DDI_FM_NONFATAL;
2562 (void) snprintf(buf, FM_MAX_CLASS,
2563 FM_SIMULATED_PIO);
2564 ena = fm_ena_generate(0, FM_ENA_FMT1);
2565 ddi_fm_ereport_post(hp->dip, buf, ena,
2566 DDI_NOSLEEP, FM_VERSION,
2567 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2568 NULL);
2570 switch (ep->errdef.optype) {
2571 case BOFI_EQUAL :
2572 *valuep = ep->errdef.operand;
2573 break;
2574 case BOFI_AND :
2575 *valuep &= ep->errdef.operand;
2576 break;
2577 case BOFI_OR :
2578 *valuep |= ep->errdef.operand;
2579 break;
2580 case BOFI_XOR :
2581 *valuep ^= ep->errdef.operand;
2582 break;
2583 case BOFI_NO_TRANSFER :
2585 * no transfer - bomb out
2587 return (0);
2588 default:
2589 /* do nothing */
2590 break;
2595 return (1);
2599 static uint64_t
2600 do_bofi_rd8(struct bofi_shadow *hp, caddr_t addr)
2602 return (hp->save.acc.ahi_get8(&hp->save.acc, (uint8_t *)addr));
2605 #define BOFI_READ_CHECKS(type) \
2606 if (bofi_ddi_check) \
2607 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2608 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2609 (caddr_t)addr - hp->addr >= hp->len)) { \
2610 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2611 "ddi_get() out of range addr %p not in %p/%llx", \
2612 (void *)addr, (void *)hp->addr, hp->len); \
2613 return (0); \
2617 * our getb() routine - use tryenter
2619 static uint8_t
2620 bofi_rd8(ddi_acc_impl_t *handle, uint8_t *addr)
2622 struct bofi_shadow *hp;
2623 uint8_t retval;
2625 hp = handle->ahi_common.ah_bus_private;
2626 BOFI_READ_CHECKS(uint8_t)
2627 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2628 return (hp->save.acc.ahi_get8(&hp->save.acc, addr));
2629 retval = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd8, 1,
2631 mutex_exit(&bofi_mutex);
2632 return (retval);
2636 static uint64_t
2637 do_bofi_rd16(struct bofi_shadow *hp, caddr_t addr)
2639 return (hp->save.acc.ahi_get16(&hp->save.acc, (uint16_t *)addr));
2644 * our getw() routine - use tryenter
2646 static uint16_t
2647 bofi_rd16(ddi_acc_impl_t *handle, uint16_t *addr)
2649 struct bofi_shadow *hp;
2650 uint16_t retval;
2652 hp = handle->ahi_common.ah_bus_private;
2653 BOFI_READ_CHECKS(uint16_t)
2654 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2655 return (hp->save.acc.ahi_get16(&hp->save.acc, addr));
2656 retval = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd16, 1,
2658 mutex_exit(&bofi_mutex);
2659 return (retval);
2663 static uint64_t
2664 do_bofi_rd32(struct bofi_shadow *hp, caddr_t addr)
2666 return (hp->save.acc.ahi_get32(&hp->save.acc, (uint32_t *)addr));
2671 * our getl() routine - use tryenter
2673 static uint32_t
2674 bofi_rd32(ddi_acc_impl_t *handle, uint32_t *addr)
2676 struct bofi_shadow *hp;
2677 uint32_t retval;
2679 hp = handle->ahi_common.ah_bus_private;
2680 BOFI_READ_CHECKS(uint32_t)
2681 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2682 return (hp->save.acc.ahi_get32(&hp->save.acc, addr));
2683 retval = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd32, 1,
2685 mutex_exit(&bofi_mutex);
2686 return (retval);
2690 static uint64_t
2691 do_bofi_rd64(struct bofi_shadow *hp, caddr_t addr)
2693 return (hp->save.acc.ahi_get64(&hp->save.acc, (uint64_t *)addr));
2698 * our getll() routine - use tryenter
2700 static uint64_t
2701 bofi_rd64(ddi_acc_impl_t *handle, uint64_t *addr)
2703 struct bofi_shadow *hp;
2704 uint64_t retval;
2706 hp = handle->ahi_common.ah_bus_private;
2707 BOFI_READ_CHECKS(uint64_t)
2708 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2709 return (hp->save.acc.ahi_get64(&hp->save.acc, addr));
2710 retval = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd64, 1,
2712 mutex_exit(&bofi_mutex);
2713 return (retval);
2716 #define BOFI_WRITE_TESTS(type) \
2717 if (bofi_ddi_check) \
2718 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2719 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2720 (caddr_t)addr - hp->addr >= hp->len)) { \
2721 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2722 "ddi_put() out of range addr %p not in %p/%llx\n", \
2723 (void *)addr, (void *)hp->addr, hp->len); \
2724 return; \
2728 * our putb() routine - use tryenter
2730 static void
2731 bofi_wr8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t value)
2733 struct bofi_shadow *hp;
2734 uint64_t llvalue = value;
2736 hp = handle->ahi_common.ah_bus_private;
2737 BOFI_WRITE_TESTS(uint8_t)
2738 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2739 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2740 return;
2742 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, 1))
2743 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2744 mutex_exit(&bofi_mutex);
2749 * our putw() routine - use tryenter
2751 static void
2752 bofi_wr16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t value)
2754 struct bofi_shadow *hp;
2755 uint64_t llvalue = value;
2757 hp = handle->ahi_common.ah_bus_private;
2758 BOFI_WRITE_TESTS(uint16_t)
2759 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2760 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2761 return;
2763 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, 1))
2764 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2765 mutex_exit(&bofi_mutex);
2770 * our putl() routine - use tryenter
2772 static void
2773 bofi_wr32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t value)
2775 struct bofi_shadow *hp;
2776 uint64_t llvalue = value;
2778 hp = handle->ahi_common.ah_bus_private;
2779 BOFI_WRITE_TESTS(uint32_t)
2780 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2781 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2782 return;
2784 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, 1))
2785 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2786 mutex_exit(&bofi_mutex);
2791 * our putll() routine - use tryenter
2793 static void
2794 bofi_wr64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t value)
2796 struct bofi_shadow *hp;
2797 uint64_t llvalue = value;
2799 hp = handle->ahi_common.ah_bus_private;
2800 BOFI_WRITE_TESTS(uint64_t)
2801 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2802 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2803 return;
2805 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, 1))
2806 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2807 mutex_exit(&bofi_mutex);
2810 #define BOFI_REP_READ_TESTS(type) \
2811 if (bofi_ddi_check) \
2812 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2813 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2814 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2815 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2816 "ddi_rep_get() out of range addr %p not in %p/%llx\n", \
2817 (void *)dev_addr, (void *)hp->addr, hp->len); \
2818 if ((caddr_t)dev_addr < hp->addr || \
2819 (caddr_t)dev_addr - hp->addr >= hp->len) \
2820 return; \
2821 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2825 * our rep_getb() routine - use tryenter
2827 static void
2828 bofi_rep_rd8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2829 size_t repcount, uint_t flags)
2831 struct bofi_shadow *hp;
2832 int i;
2833 uint8_t *addr;
2835 hp = handle->ahi_common.ah_bus_private;
2836 BOFI_REP_READ_TESTS(uint8_t)
2837 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2838 hp->save.acc.ahi_rep_get8(&hp->save.acc, host_addr, dev_addr,
2839 repcount, flags);
2840 return;
2842 for (i = 0; i < repcount; i++) {
2843 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2844 *(host_addr + i) = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr,
2845 do_bofi_rd8, i ? 0 : repcount, 1);
2847 mutex_exit(&bofi_mutex);
2852 * our rep_getw() routine - use tryenter
2854 static void
2855 bofi_rep_rd16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2856 uint16_t *dev_addr, size_t repcount, uint_t flags)
2858 struct bofi_shadow *hp;
2859 int i;
2860 uint16_t *addr;
2862 hp = handle->ahi_common.ah_bus_private;
2863 BOFI_REP_READ_TESTS(uint16_t)
2864 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2865 hp->save.acc.ahi_rep_get16(&hp->save.acc, host_addr, dev_addr,
2866 repcount, flags);
2867 return;
2869 for (i = 0; i < repcount; i++) {
2870 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2871 *(host_addr + i) = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr,
2872 do_bofi_rd16, i ? 0 : repcount, 2);
2874 mutex_exit(&bofi_mutex);
2879 * our rep_getl() routine - use tryenter
2881 static void
2882 bofi_rep_rd32(ddi_acc_impl_t *handle, uint32_t *host_addr,
2883 uint32_t *dev_addr, size_t repcount, uint_t flags)
2885 struct bofi_shadow *hp;
2886 int i;
2887 uint32_t *addr;
2889 hp = handle->ahi_common.ah_bus_private;
2890 BOFI_REP_READ_TESTS(uint32_t)
2891 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2892 hp->save.acc.ahi_rep_get32(&hp->save.acc, host_addr, dev_addr,
2893 repcount, flags);
2894 return;
2896 for (i = 0; i < repcount; i++) {
2897 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2898 *(host_addr + i) = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr,
2899 do_bofi_rd32, i ? 0 : repcount, 4);
2901 mutex_exit(&bofi_mutex);
2906 * our rep_getll() routine - use tryenter
2908 static void
2909 bofi_rep_rd64(ddi_acc_impl_t *handle, uint64_t *host_addr,
2910 uint64_t *dev_addr, size_t repcount, uint_t flags)
2912 struct bofi_shadow *hp;
2913 int i;
2914 uint64_t *addr;
2916 hp = handle->ahi_common.ah_bus_private;
2917 BOFI_REP_READ_TESTS(uint64_t)
2918 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2919 hp->save.acc.ahi_rep_get64(&hp->save.acc, host_addr, dev_addr,
2920 repcount, flags);
2921 return;
2923 for (i = 0; i < repcount; i++) {
2924 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2925 *(host_addr + i) = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr,
2926 do_bofi_rd64, i ? 0 : repcount, 8);
2928 mutex_exit(&bofi_mutex);
2931 #define BOFI_REP_WRITE_TESTS(type) \
2932 if (bofi_ddi_check) \
2933 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2934 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2935 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2936 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2937 "ddi_rep_put() out of range addr %p not in %p/%llx\n", \
2938 (void *)dev_addr, (void *)hp->addr, hp->len); \
2939 if ((caddr_t)dev_addr < hp->addr || \
2940 (caddr_t)dev_addr - hp->addr >= hp->len) \
2941 return; \
2942 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2946 * our rep_putb() routine - use tryenter
2948 static void
2949 bofi_rep_wr8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2950 size_t repcount, uint_t flags)
2952 struct bofi_shadow *hp;
2953 int i;
2954 uint64_t llvalue;
2955 uint8_t *addr;
2957 hp = handle->ahi_common.ah_bus_private;
2958 BOFI_REP_WRITE_TESTS(uint8_t)
2959 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2960 hp->save.acc.ahi_rep_put8(&hp->save.acc, host_addr, dev_addr,
2961 repcount, flags);
2962 return;
2964 for (i = 0; i < repcount; i++) {
2965 llvalue = *(host_addr + i);
2966 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2967 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, i ? 0 :
2968 repcount))
2969 hp->save.acc.ahi_put8(&hp->save.acc, addr,
2970 (uint8_t)llvalue);
2972 mutex_exit(&bofi_mutex);
2977 * our rep_putw() routine - use tryenter
2979 static void
2980 bofi_rep_wr16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2981 uint16_t *dev_addr, size_t repcount, uint_t flags)
2983 struct bofi_shadow *hp;
2984 int i;
2985 uint64_t llvalue;
2986 uint16_t *addr;
2988 hp = handle->ahi_common.ah_bus_private;
2989 BOFI_REP_WRITE_TESTS(uint16_t)
2990 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2991 hp->save.acc.ahi_rep_put16(&hp->save.acc, host_addr, dev_addr,
2992 repcount, flags);
2993 return;
2995 for (i = 0; i < repcount; i++) {
2996 llvalue = *(host_addr + i);
2997 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2998 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, i ? 0 :
2999 repcount))
3000 hp->save.acc.ahi_put16(&hp->save.acc, addr,
3001 (uint16_t)llvalue);
3003 mutex_exit(&bofi_mutex);
3008 * our rep_putl() routine - use tryenter
3010 static void
3011 bofi_rep_wr32(ddi_acc_impl_t *handle, uint32_t *host_addr,
3012 uint32_t *dev_addr, size_t repcount, uint_t flags)
3014 struct bofi_shadow *hp;
3015 int i;
3016 uint64_t llvalue;
3017 uint32_t *addr;
3019 hp = handle->ahi_common.ah_bus_private;
3020 BOFI_REP_WRITE_TESTS(uint32_t)
3021 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3022 hp->save.acc.ahi_rep_put32(&hp->save.acc, host_addr, dev_addr,
3023 repcount, flags);
3024 return;
3026 for (i = 0; i < repcount; i++) {
3027 llvalue = *(host_addr + i);
3028 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3029 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, i ? 0 :
3030 repcount))
3031 hp->save.acc.ahi_put32(&hp->save.acc, addr,
3032 (uint32_t)llvalue);
3034 mutex_exit(&bofi_mutex);
3039 * our rep_putll() routine - use tryenter
3041 static void
3042 bofi_rep_wr64(ddi_acc_impl_t *handle, uint64_t *host_addr,
3043 uint64_t *dev_addr, size_t repcount, uint_t flags)
3045 struct bofi_shadow *hp;
3046 int i;
3047 uint64_t llvalue;
3048 uint64_t *addr;
3050 hp = handle->ahi_common.ah_bus_private;
3051 BOFI_REP_WRITE_TESTS(uint64_t)
3052 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3053 hp->save.acc.ahi_rep_put64(&hp->save.acc, host_addr, dev_addr,
3054 repcount, flags);
3055 return;
3057 for (i = 0; i < repcount; i++) {
3058 llvalue = *(host_addr + i);
3059 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3060 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, i ? 0 :
3061 repcount))
3062 hp->save.acc.ahi_put64(&hp->save.acc, addr,
3063 (uint64_t)llvalue);
3065 mutex_exit(&bofi_mutex);
3070 * our ddi_map routine
3072 static int
3073 bofi_map(dev_info_t *dip, dev_info_t *rdip,
3074 ddi_map_req_t *reqp, off_t offset, off_t len, caddr_t *vaddrp)
3076 ddi_acc_impl_t *ap;
3077 struct bofi_shadow *hp;
3078 struct bofi_errent *ep;
3079 struct bofi_link *lp, *next_lp;
3080 int retval;
3081 struct bofi_shadow *dhashp;
3082 struct bofi_shadow *hhashp;
3084 switch (reqp->map_op) {
3085 case DDI_MO_MAP_LOCKED:
3087 * for this case get nexus to do real work first
3089 retval = save_bus_ops.bus_map(dip, rdip, reqp, offset, len,
3090 vaddrp);
3091 if (retval != DDI_SUCCESS)
3092 return (retval);
3094 ap = (ddi_acc_impl_t *)reqp->map_handlep;
3095 if (ap == NULL)
3096 return (DDI_SUCCESS);
3098 * if driver_list is set, only intercept those drivers
3100 if (!driver_under_test(ap->ahi_common.ah_dip))
3101 return (DDI_SUCCESS);
3104 * support for ddi_regs_map_setup()
3105 * - allocate shadow handle structure and fill it in
3107 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
3108 (void) strncpy(hp->name, ddi_get_name(ap->ahi_common.ah_dip),
3109 NAMESIZE);
3110 hp->instance = ddi_get_instance(ap->ahi_common.ah_dip);
3111 hp->dip = ap->ahi_common.ah_dip;
3112 hp->addr = *vaddrp;
3114 * return spurious value to catch direct access to registers
3116 if (bofi_ddi_check)
3117 *vaddrp = (caddr_t)64;
3118 hp->rnumber = ((ddi_acc_hdl_t *)ap)->ah_rnumber;
3119 hp->offset = offset;
3120 if (len == 0)
3121 hp->len = INT_MAX - offset;
3122 else
3123 hp->len = min(len, INT_MAX - offset);
3124 hp->hdl.acc_handle = (ddi_acc_handle_t)ap;
3125 hp->link = NULL;
3126 hp->type = BOFI_ACC_HDL;
3128 * save existing function pointers and plug in our own
3130 hp->save.acc = *ap;
3131 ap->ahi_get8 = bofi_rd8;
3132 ap->ahi_get16 = bofi_rd16;
3133 ap->ahi_get32 = bofi_rd32;
3134 ap->ahi_get64 = bofi_rd64;
3135 ap->ahi_put8 = bofi_wr8;
3136 ap->ahi_put16 = bofi_wr16;
3137 ap->ahi_put32 = bofi_wr32;
3138 ap->ahi_put64 = bofi_wr64;
3139 ap->ahi_rep_get8 = bofi_rep_rd8;
3140 ap->ahi_rep_get16 = bofi_rep_rd16;
3141 ap->ahi_rep_get32 = bofi_rep_rd32;
3142 ap->ahi_rep_get64 = bofi_rep_rd64;
3143 ap->ahi_rep_put8 = bofi_rep_wr8;
3144 ap->ahi_rep_put16 = bofi_rep_wr16;
3145 ap->ahi_rep_put32 = bofi_rep_wr32;
3146 ap->ahi_rep_put64 = bofi_rep_wr64;
3147 ap->ahi_fault_check = bofi_check_acc_hdl;
3148 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
3150 * stick in a pointer to our shadow handle
3152 ap->ahi_common.ah_bus_private = hp;
3154 * add to dhash, hhash and inuse lists
3156 mutex_enter(&bofi_low_mutex);
3157 mutex_enter(&bofi_mutex);
3158 hp->next = shadow_list.next;
3159 shadow_list.next->prev = hp;
3160 hp->prev = &shadow_list;
3161 shadow_list.next = hp;
3162 hhashp = HDL_HHASH(ap);
3163 hp->hnext = hhashp->hnext;
3164 hhashp->hnext->hprev = hp;
3165 hp->hprev = hhashp;
3166 hhashp->hnext = hp;
3167 dhashp = HDL_DHASH(hp->dip);
3168 hp->dnext = dhashp->dnext;
3169 dhashp->dnext->dprev = hp;
3170 hp->dprev = dhashp;
3171 dhashp->dnext = hp;
3173 * chain on any pre-existing errdefs that apply to this
3174 * acc_handle
3176 for (ep = errent_listp; ep != NULL; ep = ep->next) {
3177 if (ddi_name_to_major(hp->name) ==
3178 ddi_name_to_major(ep->name) &&
3179 hp->instance == ep->errdef.instance &&
3180 (ep->errdef.access_type & BOFI_PIO_RW) &&
3181 (ep->errdef.rnumber == -1 ||
3182 hp->rnumber == ep->errdef.rnumber) &&
3183 (ep->errdef.len == 0 ||
3184 offset < ep->errdef.offset + ep->errdef.len) &&
3185 offset + hp->len > ep->errdef.offset) {
3186 lp = bofi_link_freelist;
3187 if (lp != NULL) {
3188 bofi_link_freelist = lp->link;
3189 lp->errentp = ep;
3190 lp->link = hp->link;
3191 hp->link = lp;
3195 mutex_exit(&bofi_mutex);
3196 mutex_exit(&bofi_low_mutex);
3197 return (DDI_SUCCESS);
3198 case DDI_MO_UNMAP:
3200 ap = (ddi_acc_impl_t *)reqp->map_handlep;
3201 if (ap == NULL)
3202 break;
3204 * support for ddi_regs_map_free()
3205 * - check we really have a shadow handle for this one
3207 mutex_enter(&bofi_low_mutex);
3208 mutex_enter(&bofi_mutex);
3209 hhashp = HDL_HHASH(ap);
3210 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3211 if (hp->hdl.acc_handle == (ddi_acc_handle_t)ap)
3212 break;
3213 if (hp == hhashp) {
3214 mutex_exit(&bofi_mutex);
3215 mutex_exit(&bofi_low_mutex);
3216 break;
3219 * got a shadow handle - restore original pointers
3221 *ap = hp->save.acc;
3222 *vaddrp = hp->addr;
3224 * remove from dhash, hhash and inuse lists
3226 hp->hnext->hprev = hp->hprev;
3227 hp->hprev->hnext = hp->hnext;
3228 hp->dnext->dprev = hp->dprev;
3229 hp->dprev->dnext = hp->dnext;
3230 hp->next->prev = hp->prev;
3231 hp->prev->next = hp->next;
3233 * free any errdef link structures tagged onto the shadow handle
3235 for (lp = hp->link; lp != NULL; ) {
3236 next_lp = lp->link;
3237 lp->link = bofi_link_freelist;
3238 bofi_link_freelist = lp;
3239 lp = next_lp;
3241 hp->link = NULL;
3242 mutex_exit(&bofi_mutex);
3243 mutex_exit(&bofi_low_mutex);
3245 * finally delete shadow handle
3247 kmem_free(hp, sizeof (struct bofi_shadow));
3248 break;
3249 default:
3250 break;
3252 return (save_bus_ops.bus_map(dip, rdip, reqp, offset, len, vaddrp));
3257 * chain any pre-existing errdefs on to newly created dma handle
3258 * if required call do_dma_corrupt() to corrupt data
3260 static void
3261 chain_on_errdefs(struct bofi_shadow *hp)
3263 struct bofi_errent *ep;
3264 struct bofi_link *lp;
3266 ASSERT(MUTEX_HELD(&bofi_mutex));
3268 * chain on any pre-existing errdefs that apply to this dma_handle
3270 for (ep = errent_listp; ep != NULL; ep = ep->next) {
3271 if (ddi_name_to_major(hp->name) ==
3272 ddi_name_to_major(ep->name) &&
3273 hp->instance == ep->errdef.instance &&
3274 (ep->errdef.rnumber == -1 ||
3275 hp->rnumber == ep->errdef.rnumber) &&
3276 ((ep->errdef.access_type & BOFI_DMA_RW) &&
3277 (((uintptr_t)(hp->addr + ep->errdef.offset +
3278 ep->errdef.len) & ~LLSZMASK) >
3279 ((uintptr_t)((hp->addr + ep->errdef.offset) +
3280 LLSZMASK) & ~LLSZMASK)))) {
3282 * got a match - link it on
3284 lp = bofi_link_freelist;
3285 if (lp != NULL) {
3286 bofi_link_freelist = lp->link;
3287 lp->errentp = ep;
3288 lp->link = hp->link;
3289 hp->link = lp;
3290 if ((ep->errdef.access_type & BOFI_DMA_W) &&
3291 (hp->flags & DDI_DMA_WRITE) &&
3292 (ep->state & BOFI_DEV_ACTIVE)) {
3293 do_dma_corrupt(hp, ep,
3294 DDI_DMA_SYNC_FORDEV,
3295 0, hp->len);
3304 * need to do copy byte-by-byte in case one of pages is little-endian
3306 static void
3307 xbcopy(void *from, void *to, u_longlong_t len)
3309 uchar_t *f = from;
3310 uchar_t *t = to;
3312 while (len--)
3313 *t++ = *f++;
3318 * our ddi_dma_allochdl routine
3320 static int
3321 bofi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
3322 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
3324 int retval = DDI_DMA_NORESOURCES;
3325 struct bofi_shadow *hp, *xhp;
3326 int maxrnumber = 0;
3327 struct bofi_shadow *dhashp;
3328 struct bofi_shadow *hhashp;
3329 ddi_dma_impl_t *mp;
3332 * if driver_list is set, only intercept those drivers
3334 if (!driver_under_test(rdip))
3335 return (save_bus_ops.bus_dma_allochdl(dip, rdip, attrp,
3336 waitfp, arg, handlep));
3339 * allocate shadow handle structure and fill it in
3341 hp = kmem_zalloc(sizeof (struct bofi_shadow),
3342 ((waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP));
3343 if (hp == NULL) {
3345 * what to do here? Wait a bit and try again
3347 if (waitfp != DDI_DMA_DONTWAIT)
3348 (void) timeout((void (*)())waitfp, arg, 10);
3349 return (retval);
3351 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3352 hp->instance = ddi_get_instance(rdip);
3353 hp->dip = rdip;
3354 hp->link = NULL;
3355 hp->type = BOFI_NULL;
3357 * call nexus to do the real work
3359 retval = save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, waitfp, arg,
3360 handlep);
3361 if (retval != DDI_SUCCESS) {
3362 kmem_free(hp, sizeof (struct bofi_shadow));
3363 return (retval);
3366 * now point set dma_handle to point to real handle
3368 hp->hdl.dma_handle = *handlep;
3369 mp = (ddi_dma_impl_t *)*handlep;
3370 mp->dmai_fault_check = bofi_check_dma_hdl;
3372 * bind and unbind are cached in devinfo - must overwrite them
3373 * - note that our bind and unbind are quite happy dealing with
3374 * any handles for this devinfo that were previously allocated
3376 if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3377 DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3378 if (save_bus_ops.bus_dma_unbindhdl ==
3379 DEVI(rdip)->devi_bus_dma_unbindfunc)
3380 DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3381 mutex_enter(&bofi_low_mutex);
3382 mutex_enter(&bofi_mutex);
3384 * get an "rnumber" for this handle - really just seeking to
3385 * get a unique number - generally only care for early allocated
3386 * handles - so we get as far as INT_MAX, just stay there
3388 dhashp = HDL_DHASH(hp->dip);
3389 for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3390 if (ddi_name_to_major(xhp->name) ==
3391 ddi_name_to_major(hp->name) &&
3392 xhp->instance == hp->instance &&
3393 (xhp->type == BOFI_DMA_HDL ||
3394 xhp->type == BOFI_NULL))
3395 if (xhp->rnumber >= maxrnumber) {
3396 if (xhp->rnumber == INT_MAX)
3397 maxrnumber = INT_MAX;
3398 else
3399 maxrnumber = xhp->rnumber + 1;
3401 hp->rnumber = maxrnumber;
3403 * add to dhash, hhash and inuse lists
3405 hp->next = shadow_list.next;
3406 shadow_list.next->prev = hp;
3407 hp->prev = &shadow_list;
3408 shadow_list.next = hp;
3409 hhashp = HDL_HHASH(*handlep);
3410 hp->hnext = hhashp->hnext;
3411 hhashp->hnext->hprev = hp;
3412 hp->hprev = hhashp;
3413 hhashp->hnext = hp;
3414 dhashp = HDL_DHASH(hp->dip);
3415 hp->dnext = dhashp->dnext;
3416 dhashp->dnext->dprev = hp;
3417 hp->dprev = dhashp;
3418 dhashp->dnext = hp;
3419 mutex_exit(&bofi_mutex);
3420 mutex_exit(&bofi_low_mutex);
3421 return (retval);
3426 * our ddi_dma_freehdl routine
3428 static int
3429 bofi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3431 int retval;
3432 struct bofi_shadow *hp;
3433 struct bofi_shadow *hhashp;
3436 * find shadow for this handle
3438 mutex_enter(&bofi_low_mutex);
3439 mutex_enter(&bofi_mutex);
3440 hhashp = HDL_HHASH(handle);
3441 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3442 if (hp->hdl.dma_handle == handle)
3443 break;
3444 mutex_exit(&bofi_mutex);
3445 mutex_exit(&bofi_low_mutex);
3447 * call nexus to do the real work
3449 retval = save_bus_ops.bus_dma_freehdl(dip, rdip, handle);
3450 if (retval != DDI_SUCCESS) {
3451 return (retval);
3454 * did we really have a shadow for this handle
3456 if (hp == hhashp)
3457 return (retval);
3459 * yes we have - see if it's still bound
3461 mutex_enter(&bofi_low_mutex);
3462 mutex_enter(&bofi_mutex);
3463 if (hp->type != BOFI_NULL)
3464 panic("driver freeing bound dma_handle");
3466 * remove from dhash, hhash and inuse lists
3468 hp->hnext->hprev = hp->hprev;
3469 hp->hprev->hnext = hp->hnext;
3470 hp->dnext->dprev = hp->dprev;
3471 hp->dprev->dnext = hp->dnext;
3472 hp->next->prev = hp->prev;
3473 hp->prev->next = hp->next;
3474 mutex_exit(&bofi_mutex);
3475 mutex_exit(&bofi_low_mutex);
3477 kmem_free(hp, sizeof (struct bofi_shadow));
3478 return (retval);
3483 * our ddi_dma_bindhdl routine
3485 static int
3486 bofi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
3487 ddi_dma_handle_t handle, struct ddi_dma_req *dmareqp,
3488 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3490 int retval = DDI_DMA_NORESOURCES;
3491 auto struct ddi_dma_req dmareq;
3492 struct bofi_shadow *hp;
3493 struct bofi_shadow *hhashp;
3494 ddi_dma_impl_t *mp;
3495 unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3498 * check we really have a shadow for this handle
3500 mutex_enter(&bofi_low_mutex);
3501 mutex_enter(&bofi_mutex);
3502 hhashp = HDL_HHASH(handle);
3503 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3504 if (hp->hdl.dma_handle == handle)
3505 break;
3506 mutex_exit(&bofi_mutex);
3507 mutex_exit(&bofi_low_mutex);
3508 if (hp == hhashp) {
3510 * no we don't - just call nexus to do the real work
3512 return save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3513 cookiep, ccountp);
3516 * yes we have - see if it's already bound
3518 if (hp->type != BOFI_NULL)
3519 return (DDI_DMA_INUSE);
3521 hp->flags = dmareqp->dmar_flags;
3522 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
3523 hp->map_flags = B_PAGEIO;
3524 hp->map_pp = dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp;
3525 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
3526 hp->map_flags = B_SHADOW;
3527 hp->map_pplist = dmareqp->dmar_object.dmao_obj.virt_obj.v_priv;
3528 } else {
3529 hp->map_flags = 0;
3532 * get a kernel virtual mapping
3534 hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3535 if (hp->addr == NULL)
3536 goto error;
3537 if (bofi_sync_check) {
3539 * Take a copy and pass pointers to this up to nexus instead.
3540 * Data will be copied from the original on explicit
3541 * and implicit ddi_dma_sync()
3543 * - maintain page alignment because some devices assume it.
3545 hp->origaddr = hp->addr;
3546 hp->allocaddr = ddi_umem_alloc(
3547 ((uintptr_t)hp->addr & pagemask) + hp->len,
3548 (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP,
3549 &hp->umem_cookie);
3550 if (hp->allocaddr == NULL)
3551 goto error;
3552 hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3553 if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3554 xbcopy(hp->origaddr, hp->addr, hp->len);
3555 dmareq = *dmareqp;
3556 dmareq.dmar_object.dmao_size = hp->len;
3557 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3558 dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3559 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3560 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3561 dmareqp = &dmareq;
3564 * call nexus to do the real work
3566 retval = save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3567 cookiep, ccountp);
3568 if (retval != DDI_SUCCESS)
3569 goto error2;
3571 * unset DMP_NOSYNC
3573 mp = (ddi_dma_impl_t *)handle;
3574 mp->dmai_rflags &= ~DMP_NOSYNC;
3576 * chain on any pre-existing errdefs that apply to this
3577 * acc_handle and corrupt if required (as there is an implicit
3578 * ddi_dma_sync() in this call)
3580 mutex_enter(&bofi_low_mutex);
3581 mutex_enter(&bofi_mutex);
3582 hp->type = BOFI_DMA_HDL;
3583 chain_on_errdefs(hp);
3584 mutex_exit(&bofi_mutex);
3585 mutex_exit(&bofi_low_mutex);
3586 return (retval);
3588 error:
3589 if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3591 * what to do here? Wait a bit and try again
3593 (void) timeout((void (*)())dmareqp->dmar_fp,
3594 dmareqp->dmar_arg, 10);
3596 error2:
3597 if (hp) {
3598 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3599 hp->map_pp, hp->map_pplist);
3600 if (bofi_sync_check && hp->allocaddr)
3601 ddi_umem_free(hp->umem_cookie);
3602 hp->mapaddr = NULL;
3603 hp->allocaddr = NULL;
3604 hp->origaddr = NULL;
3606 return (retval);
3611 * our ddi_dma_unbindhdl routine
3613 static int
3614 bofi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3616 struct bofi_link *lp, *next_lp;
3617 struct bofi_errent *ep;
3618 int retval;
3619 struct bofi_shadow *hp;
3620 struct bofi_shadow *hhashp;
3623 * call nexus to do the real work
3625 retval = save_bus_ops.bus_dma_unbindhdl(dip, rdip, handle);
3626 if (retval != DDI_SUCCESS)
3627 return (retval);
3629 * check we really have a shadow for this handle
3631 mutex_enter(&bofi_low_mutex);
3632 mutex_enter(&bofi_mutex);
3633 hhashp = HDL_HHASH(handle);
3634 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3635 if (hp->hdl.dma_handle == handle)
3636 break;
3637 if (hp == hhashp) {
3638 mutex_exit(&bofi_mutex);
3639 mutex_exit(&bofi_low_mutex);
3640 return (retval);
3643 * yes we have - see if it's already unbound
3645 if (hp->type == BOFI_NULL)
3646 panic("driver unbinding unbound dma_handle");
3648 * free any errdef link structures tagged on to this
3649 * shadow handle
3651 for (lp = hp->link; lp != NULL; ) {
3652 next_lp = lp->link;
3654 * there is an implicit sync_for_cpu on free -
3655 * may need to corrupt
3657 ep = lp->errentp;
3658 if ((ep->errdef.access_type & BOFI_DMA_R) &&
3659 (hp->flags & DDI_DMA_READ) &&
3660 (ep->state & BOFI_DEV_ACTIVE)) {
3661 do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU, 0, hp->len);
3663 lp->link = bofi_link_freelist;
3664 bofi_link_freelist = lp;
3665 lp = next_lp;
3667 hp->link = NULL;
3668 hp->type = BOFI_NULL;
3669 mutex_exit(&bofi_mutex);
3670 mutex_exit(&bofi_low_mutex);
3672 if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
3674 * implicit sync_for_cpu - copy data back
3676 if (hp->allocaddr)
3677 xbcopy(hp->addr, hp->origaddr, hp->len);
3678 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3679 hp->map_pp, hp->map_pplist);
3680 if (bofi_sync_check && hp->allocaddr)
3681 ddi_umem_free(hp->umem_cookie);
3682 hp->mapaddr = NULL;
3683 hp->allocaddr = NULL;
3684 hp->origaddr = NULL;
3685 return (retval);
3690 * our ddi_dma_sync routine
3692 static int
3693 bofi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
3694 ddi_dma_handle_t handle, off_t off, size_t len, uint_t flags)
3696 struct bofi_link *lp;
3697 struct bofi_errent *ep;
3698 struct bofi_shadow *hp;
3699 struct bofi_shadow *hhashp;
3700 int retval;
3702 if (flags == DDI_DMA_SYNC_FORCPU || flags == DDI_DMA_SYNC_FORKERNEL) {
3704 * in this case get nexus driver to do sync first
3706 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3707 len, flags);
3708 if (retval != DDI_SUCCESS)
3709 return (retval);
3712 * check we really have a shadow for this handle
3714 mutex_enter(&bofi_low_mutex);
3715 mutex_enter(&bofi_mutex);
3716 hhashp = HDL_HHASH(handle);
3717 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3718 if (hp->hdl.dma_handle == handle &&
3719 hp->type == BOFI_DMA_HDL)
3720 break;
3721 mutex_exit(&bofi_mutex);
3722 mutex_exit(&bofi_low_mutex);
3723 if (hp != hhashp) {
3725 * yes - do we need to copy data from original
3727 if (bofi_sync_check && flags == DDI_DMA_SYNC_FORDEV)
3728 if (hp->allocaddr)
3729 xbcopy(hp->origaddr+off, hp->addr+off,
3730 len ? len : (hp->len - off));
3732 * yes - check if we need to corrupt the data
3734 mutex_enter(&bofi_low_mutex);
3735 mutex_enter(&bofi_mutex);
3736 for (lp = hp->link; lp != NULL; lp = lp->link) {
3737 ep = lp->errentp;
3738 if ((((ep->errdef.access_type & BOFI_DMA_R) &&
3739 (flags == DDI_DMA_SYNC_FORCPU ||
3740 flags == DDI_DMA_SYNC_FORKERNEL)) ||
3741 ((ep->errdef.access_type & BOFI_DMA_W) &&
3742 (flags == DDI_DMA_SYNC_FORDEV))) &&
3743 (ep->state & BOFI_DEV_ACTIVE)) {
3744 do_dma_corrupt(hp, ep, flags, off,
3745 len ? len : (hp->len - off));
3748 mutex_exit(&bofi_mutex);
3749 mutex_exit(&bofi_low_mutex);
3751 * do we need to copy data to original
3753 if (bofi_sync_check && (flags == DDI_DMA_SYNC_FORCPU ||
3754 flags == DDI_DMA_SYNC_FORKERNEL))
3755 if (hp->allocaddr)
3756 xbcopy(hp->addr+off, hp->origaddr+off,
3757 len ? len : (hp->len - off));
3759 if (flags == DDI_DMA_SYNC_FORDEV)
3761 * in this case get nexus driver to do sync last
3763 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3764 len, flags);
3765 return (retval);
3770 * our dma_win routine
3772 static int
3773 bofi_dma_win(dev_info_t *dip, dev_info_t *rdip,
3774 ddi_dma_handle_t handle, uint_t win, off_t *offp,
3775 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3777 struct bofi_shadow *hp;
3778 struct bofi_shadow *hhashp;
3779 int retval;
3780 ddi_dma_impl_t *mp;
3783 * call nexus to do the real work
3785 retval = save_bus_ops.bus_dma_win(dip, rdip, handle, win, offp, lenp,
3786 cookiep, ccountp);
3787 if (retval != DDI_SUCCESS)
3788 return (retval);
3790 * check we really have a shadow for this handle
3792 mutex_enter(&bofi_low_mutex);
3793 mutex_enter(&bofi_mutex);
3794 hhashp = HDL_HHASH(handle);
3795 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3796 if (hp->hdl.dma_handle == handle)
3797 break;
3798 if (hp != hhashp) {
3800 * yes - make sure DMP_NOSYNC is unset
3802 mp = (ddi_dma_impl_t *)handle;
3803 mp->dmai_rflags &= ~DMP_NOSYNC;
3805 mutex_exit(&bofi_mutex);
3806 mutex_exit(&bofi_low_mutex);
3807 return (retval);
3812 * our dma_ctl routine
3814 static int
3815 bofi_dma_ctl(dev_info_t *dip, dev_info_t *rdip,
3816 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
3817 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
3819 struct bofi_shadow *hp;
3820 struct bofi_shadow *hhashp;
3821 int retval;
3822 int i;
3823 struct bofi_shadow *dummyhp;
3826 * get nexus to do real work
3828 retval = save_bus_ops.bus_dma_ctl(dip, rdip, handle, request, offp,
3829 lenp, objp, flags);
3830 if (retval != DDI_SUCCESS)
3831 return (retval);
3833 * if driver_list is set, only intercept those drivers
3835 if (!driver_under_test(rdip))
3836 return (DDI_SUCCESS);
3839 * check we really have a shadow for this handle
3841 mutex_enter(&bofi_low_mutex);
3842 mutex_enter(&bofi_mutex);
3843 hhashp = HDL_HHASH(handle);
3844 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3845 if (hp->hdl.dma_handle == handle)
3846 break;
3847 if (hp == hhashp) {
3848 mutex_exit(&bofi_mutex);
3849 mutex_exit(&bofi_low_mutex);
3850 return (retval);
3853 * yes we have - see what kind of command this is
3855 switch (request) {
3856 case DDI_DMA_RELEASE:
3858 * dvma release - release dummy handle and all the index handles
3860 dummyhp = hp;
3861 dummyhp->hnext->hprev = dummyhp->hprev;
3862 dummyhp->hprev->hnext = dummyhp->hnext;
3863 mutex_exit(&bofi_mutex);
3864 mutex_exit(&bofi_low_mutex);
3865 for (i = 0; i < dummyhp->len; i++) {
3866 hp = dummyhp->hparrayp[i];
3868 * chek none of the index handles were still loaded
3870 if (hp->type != BOFI_NULL)
3871 panic("driver releasing loaded dvma");
3873 * remove from dhash and inuse lists
3875 mutex_enter(&bofi_low_mutex);
3876 mutex_enter(&bofi_mutex);
3877 hp->dnext->dprev = hp->dprev;
3878 hp->dprev->dnext = hp->dnext;
3879 hp->next->prev = hp->prev;
3880 hp->prev->next = hp->next;
3881 mutex_exit(&bofi_mutex);
3882 mutex_exit(&bofi_low_mutex);
3884 if (bofi_sync_check && hp->allocaddr)
3885 ddi_umem_free(hp->umem_cookie);
3886 kmem_free(hp, sizeof (struct bofi_shadow));
3888 kmem_free(dummyhp->hparrayp, dummyhp->len *
3889 sizeof (struct bofi_shadow *));
3890 kmem_free(dummyhp, sizeof (struct bofi_shadow));
3891 return (retval);
3892 default:
3893 break;
3895 mutex_exit(&bofi_mutex);
3896 mutex_exit(&bofi_low_mutex);
3897 return (retval);
3901 * bofi intercept routine - gets called instead of users interrupt routine
3903 static uint_t
3904 bofi_intercept_intr(caddr_t xp, caddr_t arg2)
3906 struct bofi_errent *ep;
3907 struct bofi_link *lp;
3908 struct bofi_shadow *hp;
3909 int intr_count = 1;
3910 int i;
3911 uint_t retval = DDI_INTR_UNCLAIMED;
3912 uint_t result;
3913 int unclaimed_counter = 0;
3914 int jabber_detected = 0;
3916 hp = (struct bofi_shadow *)xp;
3918 * check if nothing to do
3920 if (hp->link == NULL)
3921 return (hp->save.intr.int_handler
3922 (hp->save.intr.int_handler_arg1, arg2));
3923 mutex_enter(&bofi_mutex);
3925 * look for any errdefs
3927 for (lp = hp->link; lp != NULL; lp = lp->link) {
3928 ep = lp->errentp;
3929 if (ep->state & BOFI_DEV_ACTIVE) {
3931 * got one
3933 if ((ep->errdef.access_count ||
3934 ep->errdef.fail_count) &&
3935 (ep->errdef.access_type & BOFI_LOG))
3936 log_acc_event(ep, BOFI_INTR, 0, 0, 1, 0);
3937 if (ep->errdef.access_count > 1) {
3938 ep->errdef.access_count--;
3939 } else if (ep->errdef.fail_count > 0) {
3940 ep->errdef.fail_count--;
3941 ep->errdef.access_count = 0;
3943 * OK do "corruption"
3945 if (ep->errstate.fail_time == 0)
3946 ep->errstate.fail_time = bofi_gettime();
3947 switch (ep->errdef.optype) {
3948 case BOFI_DELAY_INTR:
3949 if (!hp->hilevel) {
3950 drv_usecwait
3951 (ep->errdef.operand);
3953 break;
3954 case BOFI_LOSE_INTR:
3955 intr_count = 0;
3956 break;
3957 case BOFI_EXTRA_INTR:
3958 intr_count += ep->errdef.operand;
3959 break;
3960 default:
3961 break;
3966 mutex_exit(&bofi_mutex);
3968 * send extra or fewer interrupts as requested
3970 for (i = 0; i < intr_count; i++) {
3971 result = hp->save.intr.int_handler
3972 (hp->save.intr.int_handler_arg1, arg2);
3973 if (result == DDI_INTR_CLAIMED)
3974 unclaimed_counter >>= 1;
3975 else if (++unclaimed_counter >= 20)
3976 jabber_detected = 1;
3977 if (i == 0)
3978 retval = result;
3981 * if more than 1000 spurious interrupts requested and
3982 * jabber not detected - give warning
3984 if (intr_count > 1000 && !jabber_detected)
3985 panic("undetected interrupt jabber: %s%d",
3986 hp->name, hp->instance);
3988 * return first response - or "unclaimed" if none
3990 return (retval);
3995 * our ddi_check_acc_hdl
3997 /* ARGSUSED */
3998 static int
3999 bofi_check_acc_hdl(ddi_acc_impl_t *handle)
4001 struct bofi_shadow *hp;
4002 struct bofi_link *lp;
4003 uint_t result = 0;
4005 hp = handle->ahi_common.ah_bus_private;
4006 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
4007 return (0);
4009 for (lp = hp->link; lp != NULL; lp = lp->link) {
4011 * OR in error state from all associated
4012 * errdef structures
4014 if (lp->errentp->errdef.access_count == 0 &&
4015 (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4016 result = (lp->errentp->errdef.acc_chk & 1);
4019 mutex_exit(&bofi_mutex);
4020 return (result);
4024 * our ddi_check_dma_hdl
4026 /* ARGSUSED */
4027 static int
4028 bofi_check_dma_hdl(ddi_dma_impl_t *handle)
4030 struct bofi_shadow *hp;
4031 struct bofi_link *lp;
4032 struct bofi_shadow *hhashp;
4033 uint_t result = 0;
4035 if (!mutex_tryenter(&bofi_mutex)) {
4036 return (0);
4038 hhashp = HDL_HHASH(handle);
4039 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
4040 if (hp->hdl.dma_handle == (ddi_dma_handle_t)handle)
4041 break;
4042 if (hp == hhashp) {
4043 mutex_exit(&bofi_mutex);
4044 return (0);
4046 if (!hp->link) {
4047 mutex_exit(&bofi_mutex);
4048 return (0);
4050 for (lp = hp->link; lp != NULL; lp = lp->link) {
4052 * OR in error state from all associated
4053 * errdef structures
4055 if (lp->errentp->errdef.access_count == 0 &&
4056 (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4057 result = ((lp->errentp->errdef.acc_chk & 2) ? 1 : 0);
4060 mutex_exit(&bofi_mutex);
4061 return (result);
4065 /* ARGSUSED */
4066 static int
4067 bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
4068 ddi_eventcookie_t eventhdl, void *impl_data)
4070 ddi_eventcookie_t ec;
4071 struct ddi_fault_event_data *arg;
4072 struct bofi_errent *ep;
4073 struct bofi_shadow *hp;
4074 struct bofi_shadow *dhashp;
4075 struct bofi_link *lp;
4077 ASSERT(eventhdl);
4078 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != DDI_SUCCESS)
4079 return (DDI_FAILURE);
4081 if (ec != eventhdl)
4082 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl,
4083 impl_data));
4085 arg = (struct ddi_fault_event_data *)impl_data;
4086 mutex_enter(&bofi_mutex);
4088 * find shadow handles with appropriate dev_infos
4089 * and set error reported on all associated errdef structures
4091 dhashp = HDL_DHASH(arg->f_dip);
4092 for (hp = dhashp->dnext; hp != dhashp; hp = hp->dnext) {
4093 if (hp->dip == arg->f_dip) {
4094 for (lp = hp->link; lp != NULL; lp = lp->link) {
4095 ep = lp->errentp;
4096 ep->errstate.errmsg_count++;
4097 if ((ep->errstate.msg_time == 0 ||
4098 ep->errstate.severity > arg->f_impact) &&
4099 (ep->state & BOFI_DEV_ACTIVE)) {
4100 ep->errstate.msg_time = bofi_gettime();
4101 ep->errstate.severity = arg->f_impact;
4102 (void) strncpy(ep->errstate.buffer,
4103 arg->f_message, ERRMSGSIZE);
4104 ddi_trigger_softintr(ep->softintr_id);
4109 mutex_exit(&bofi_mutex);
4110 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, impl_data));
4113 /*ARGSUSED*/
4114 static int
4115 bofi_fm_ereport_callback(sysevent_t *ev, void *cookie)
4117 char *class = "";
4118 char *path = "";
4119 char *ptr;
4120 nvlist_t *nvlist;
4121 nvlist_t *detector;
4122 ddi_fault_impact_t impact;
4123 struct bofi_errent *ep;
4124 struct bofi_shadow *hp;
4125 struct bofi_link *lp;
4126 char service_class[FM_MAX_CLASS];
4127 char hppath[MAXPATHLEN];
4128 int service_ereport = 0;
4130 (void) sysevent_get_attr_list(ev, &nvlist);
4131 (void) nvlist_lookup_string(nvlist, FM_CLASS, &class);
4132 if (nvlist_lookup_nvlist(nvlist, FM_EREPORT_DETECTOR, &detector) == 0)
4133 (void) nvlist_lookup_string(detector, FM_FMRI_DEV_PATH, &path);
4135 (void) snprintf(service_class, FM_MAX_CLASS, "%s.%s.%s.",
4136 FM_EREPORT_CLASS, DDI_IO_CLASS, DDI_FM_SERVICE_IMPACT);
4137 if (strncmp(class, service_class, strlen(service_class) - 1) == 0)
4138 service_ereport = 1;
4140 mutex_enter(&bofi_mutex);
4142 * find shadow handles with appropriate dev_infos
4143 * and set error reported on all associated errdef structures
4145 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
4146 (void) ddi_pathname(hp->dip, hppath);
4147 if (strcmp(path, hppath) != 0)
4148 continue;
4149 for (lp = hp->link; lp != NULL; lp = lp->link) {
4150 ep = lp->errentp;
4151 ep->errstate.errmsg_count++;
4152 if (!(ep->state & BOFI_DEV_ACTIVE))
4153 continue;
4154 if (ep->errstate.msg_time != 0)
4155 continue;
4156 if (service_ereport) {
4157 ptr = class + strlen(service_class);
4158 if (strcmp(ptr, DDI_FM_SERVICE_LOST) == 0)
4159 impact = DDI_SERVICE_LOST;
4160 else if (strcmp(ptr,
4161 DDI_FM_SERVICE_DEGRADED) == 0)
4162 impact = DDI_SERVICE_DEGRADED;
4163 else if (strcmp(ptr,
4164 DDI_FM_SERVICE_RESTORED) == 0)
4165 impact = DDI_SERVICE_RESTORED;
4166 else
4167 impact = DDI_SERVICE_UNAFFECTED;
4168 if (ep->errstate.severity > impact)
4169 ep->errstate.severity = impact;
4170 } else if (ep->errstate.buffer[0] == '\0') {
4171 (void) strncpy(ep->errstate.buffer, class,
4172 ERRMSGSIZE);
4174 if (ep->errstate.buffer[0] != '\0' &&
4175 ep->errstate.severity < DDI_SERVICE_RESTORED) {
4176 ep->errstate.msg_time = bofi_gettime();
4177 ddi_trigger_softintr(ep->softintr_id);
4181 nvlist_free(nvlist);
4182 mutex_exit(&bofi_mutex);
4183 return (0);
4187 * our intr_ops routine
4189 static int
4190 bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
4191 ddi_intr_handle_impl_t *hdlp, void *result)
4193 int retval;
4194 struct bofi_shadow *hp;
4195 struct bofi_shadow *dhashp;
4196 struct bofi_shadow *hhashp;
4197 struct bofi_errent *ep;
4198 struct bofi_link *lp, *next_lp;
4200 switch (intr_op) {
4201 case DDI_INTROP_ADDISR:
4203 * if driver_list is set, only intercept those drivers
4205 if (!driver_under_test(rdip))
4206 return (save_bus_ops.bus_intr_op(dip, rdip,
4207 intr_op, hdlp, result));
4209 * allocate shadow handle structure and fill in
4211 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
4212 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4213 hp->instance = ddi_get_instance(rdip);
4214 hp->save.intr.int_handler = hdlp->ih_cb_func;
4215 hp->save.intr.int_handler_arg1 = hdlp->ih_cb_arg1;
4216 hdlp->ih_cb_func = (ddi_intr_handler_t *)bofi_intercept_intr;
4217 hdlp->ih_cb_arg1 = (caddr_t)hp;
4218 hp->bofi_inum = hdlp->ih_inum;
4219 hp->dip = rdip;
4220 hp->link = NULL;
4221 hp->type = BOFI_INT_HDL;
4223 * save whether hilevel or not
4226 if (hdlp->ih_pri >= ddi_intr_get_hilevel_pri())
4227 hp->hilevel = 1;
4228 else
4229 hp->hilevel = 0;
4232 * call nexus to do real work, but specifying our handler, and
4233 * our shadow handle as argument
4235 retval = save_bus_ops.bus_intr_op(dip, rdip,
4236 intr_op, hdlp, result);
4237 if (retval != DDI_SUCCESS) {
4238 kmem_free(hp, sizeof (struct bofi_shadow));
4239 return (retval);
4242 * add to dhash, hhash and inuse lists
4244 mutex_enter(&bofi_low_mutex);
4245 mutex_enter(&bofi_mutex);
4246 hp->next = shadow_list.next;
4247 shadow_list.next->prev = hp;
4248 hp->prev = &shadow_list;
4249 shadow_list.next = hp;
4250 hhashp = HDL_HHASH(hdlp->ih_inum);
4251 hp->hnext = hhashp->hnext;
4252 hhashp->hnext->hprev = hp;
4253 hp->hprev = hhashp;
4254 hhashp->hnext = hp;
4255 dhashp = HDL_DHASH(hp->dip);
4256 hp->dnext = dhashp->dnext;
4257 dhashp->dnext->dprev = hp;
4258 hp->dprev = dhashp;
4259 dhashp->dnext = hp;
4261 * chain on any pre-existing errdefs that apply to this
4262 * acc_handle
4264 for (ep = errent_listp; ep != NULL; ep = ep->next) {
4265 if (ddi_name_to_major(hp->name) ==
4266 ddi_name_to_major(ep->name) &&
4267 hp->instance == ep->errdef.instance &&
4268 (ep->errdef.access_type & BOFI_INTR)) {
4269 lp = bofi_link_freelist;
4270 if (lp != NULL) {
4271 bofi_link_freelist = lp->link;
4272 lp->errentp = ep;
4273 lp->link = hp->link;
4274 hp->link = lp;
4278 mutex_exit(&bofi_mutex);
4279 mutex_exit(&bofi_low_mutex);
4280 return (retval);
4281 case DDI_INTROP_REMISR:
4283 * call nexus routine first
4285 retval = save_bus_ops.bus_intr_op(dip, rdip,
4286 intr_op, hdlp, result);
4288 * find shadow handle
4290 mutex_enter(&bofi_low_mutex);
4291 mutex_enter(&bofi_mutex);
4292 hhashp = HDL_HHASH(hdlp->ih_inum);
4293 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
4294 if (hp->dip == rdip &&
4295 hp->type == BOFI_INT_HDL &&
4296 hp->bofi_inum == hdlp->ih_inum) {
4297 break;
4300 if (hp == hhashp) {
4301 mutex_exit(&bofi_mutex);
4302 mutex_exit(&bofi_low_mutex);
4303 return (retval);
4306 * found one - remove from dhash, hhash and inuse lists
4308 hp->hnext->hprev = hp->hprev;
4309 hp->hprev->hnext = hp->hnext;
4310 hp->dnext->dprev = hp->dprev;
4311 hp->dprev->dnext = hp->dnext;
4312 hp->next->prev = hp->prev;
4313 hp->prev->next = hp->next;
4315 * free any errdef link structures
4316 * tagged on to this shadow handle
4318 for (lp = hp->link; lp != NULL; ) {
4319 next_lp = lp->link;
4320 lp->link = bofi_link_freelist;
4321 bofi_link_freelist = lp;
4322 lp = next_lp;
4324 hp->link = NULL;
4325 mutex_exit(&bofi_mutex);
4326 mutex_exit(&bofi_low_mutex);
4327 kmem_free(hp, sizeof (struct bofi_shadow));
4328 return (retval);
4329 default:
4330 return (save_bus_ops.bus_intr_op(dip, rdip,
4331 intr_op, hdlp, result));