Sync usage with man page.
[netbsd-mini2440.git] / sys / dev / ic / icp.c
blobcbd894057b8708059e322463f6df7e92946c8d4e
1 /* $NetBSD: icp.c,v 1.28 2008/04/28 20:23:50 martin Exp $ */
3 /*-
4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by Niklas Hallqvist.
46 * 4. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
64 * This driver would not have written if it was not for the hardware donations
65 * from both ICP-Vortex and Öko.neT. I want to thank them for their support.
67 * Re-worked for NetBSD by Andrew Doran. Test hardware kindly supplied by
68 * Intel.
70 * Support for the ICP-Vortex management tools added by
71 * Jason R. Thorpe of Wasabi Systems, Inc., based on code
72 * provided by Achim Leubner <achim.leubner@intel.com>.
74 * Additional support for dynamic rescan of cacheservice drives by
75 * Jason R. Thorpe of Wasabi Systems, Inc.
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.28 2008/04/28 20:23:50 martin Exp $");
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/queue.h>
86 #include <sys/proc.h>
87 #include <sys/buf.h>
88 #include <sys/endian.h>
89 #include <sys/malloc.h>
90 #include <sys/disk.h>
92 #include <uvm/uvm_extern.h>
94 #include <sys/bswap.h>
95 #include <sys/bus.h>
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
101 #include <dev/ic/icpreg.h>
102 #include <dev/ic/icpvar.h>
104 #include <dev/scsipi/scsipi_all.h>
105 #include <dev/scsipi/scsiconf.h>
107 #include "locators.h"
109 int icp_async_event(struct icp_softc *, int);
110 void icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
111 void icp_chain(struct icp_softc *);
112 int icp_print(void *, const char *);
113 void icp_watchdog(void *);
114 void icp_ucmd_intr(struct icp_ccb *);
115 void icp_recompute_openings(struct icp_softc *);
117 int icp_count; /* total # of controllers, for ioctl interface */
120 * Statistics for the ioctl interface to query.
122 * XXX Global. They should probably be made per-controller
123 * XXX at some point.
125 gdt_statist_t icp_stats;
128 icp_init(struct icp_softc *icp, const char *intrstr)
130 struct icp_attach_args icpa;
131 struct icp_binfo binfo;
132 struct icp_ccb *ic;
133 u_int16_t cdev_cnt;
134 int i, j, state, feat, nsegs, rv;
135 int locs[ICPCF_NLOCS];
137 state = 0;
139 if (intrstr != NULL)
140 aprint_normal_dev(&icp->icp_dv, "interrupting at %s\n",
141 intrstr);
143 SIMPLEQ_INIT(&icp->icp_ccb_queue);
144 SIMPLEQ_INIT(&icp->icp_ccb_freelist);
145 SIMPLEQ_INIT(&icp->icp_ucmd_queue);
146 callout_init(&icp->icp_wdog_callout, 0);
149 * Allocate a scratch area.
151 if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
152 ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
153 &icp->icp_scr_dmamap) != 0) {
154 aprint_error_dev(&icp->icp_dv, "cannot create scratch dmamap\n");
155 return (1);
157 state++;
159 if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
160 icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
161 aprint_error_dev(&icp->icp_dv, "cannot alloc scratch dmamem\n");
162 goto bail_out;
164 state++;
166 if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
167 ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
168 aprint_error_dev(&icp->icp_dv, "cannot map scratch dmamem\n");
169 goto bail_out;
171 state++;
173 if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
174 ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
175 aprint_error_dev(&icp->icp_dv, "cannot load scratch dmamap\n");
176 goto bail_out;
178 state++;
181 * Allocate and initialize the command control blocks.
183 ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO);
184 if ((icp->icp_ccbs = ic) == NULL) {
185 aprint_error_dev(&icp->icp_dv, "malloc() failed\n");
186 goto bail_out;
188 state++;
190 for (i = 0; i < ICP_NCCBS; i++, ic++) {
192 * The first two command indexes have special meanings, so
193 * we can't use them.
195 ic->ic_ident = i + 2;
196 rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
197 ICP_MAXSG, ICP_MAX_XFER, 0,
198 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
199 &ic->ic_xfer_map);
200 if (rv != 0)
201 break;
202 icp->icp_nccbs++;
203 icp_ccb_free(icp, ic);
205 #ifdef DIAGNOSTIC
206 if (icp->icp_nccbs != ICP_NCCBS)
207 aprint_error_dev(&icp->icp_dv, "%d/%d CCBs usable\n",
208 icp->icp_nccbs, ICP_NCCBS);
209 #endif
212 * Initalize the controller.
214 if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
215 aprint_error_dev(&icp->icp_dv, "screen service init error %d\n",
216 icp->icp_status);
217 goto bail_out;
220 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
221 aprint_error_dev(&icp->icp_dv, "cache service init error %d\n",
222 icp->icp_status);
223 goto bail_out;
226 icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
228 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
229 aprint_error_dev(&icp->icp_dv, "cache service mount error %d\n",
230 icp->icp_status);
231 goto bail_out;
234 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
235 aprint_error_dev(&icp->icp_dv, "cache service post-mount init error %d\n",
236 icp->icp_status);
237 goto bail_out;
239 cdev_cnt = (u_int16_t)icp->icp_info;
240 icp->icp_fw_vers = icp->icp_service;
242 if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
243 aprint_error_dev(&icp->icp_dv, "raw service init error %d\n",
244 icp->icp_status);
245 goto bail_out;
249 * Set/get raw service features (scatter/gather).
251 feat = 0;
252 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
253 0, 0))
254 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
255 feat = icp->icp_info;
257 if ((feat & ICP_SCATTER_GATHER) == 0) {
258 #ifdef DIAGNOSTIC
259 aprint_normal_dev(&icp->icp_dv,
260 "scatter/gather not supported (raw service)\n");
261 #endif
262 } else
263 icp->icp_features |= ICP_FEAT_RAWSERVICE;
266 * Set/get cache service features (scatter/gather).
268 feat = 0;
269 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
270 ICP_SCATTER_GATHER, 0))
271 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
272 feat = icp->icp_info;
274 if ((feat & ICP_SCATTER_GATHER) == 0) {
275 #ifdef DIAGNOSTIC
276 aprint_normal_dev(&icp->icp_dv,
277 "scatter/gather not supported (cache service)\n");
278 #endif
279 } else
280 icp->icp_features |= ICP_FEAT_CACHESERVICE;
283 * Pull some information from the board and dump.
285 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
286 ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
287 aprint_error_dev(&icp->icp_dv, "unable to retrive board info\n");
288 goto bail_out;
290 memcpy(&binfo, icp->icp_scr, sizeof(binfo));
292 aprint_normal_dev(&icp->icp_dv,
293 "model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
294 binfo.bi_type_string, binfo.bi_raid_string,
295 binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
298 * Determine the number of devices, and number of openings per
299 * device.
301 if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
302 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
303 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
305 continue;
307 icp->icp_cdr[j].cd_size = icp->icp_info;
308 if (icp->icp_cdr[j].cd_size != 0)
309 icp->icp_ndevs++;
311 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
313 icp->icp_cdr[j].cd_type = icp->icp_info;
317 if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
318 icp->icp_nchan = binfo.bi_chan_count;
319 icp->icp_ndevs += icp->icp_nchan;
322 icp_recompute_openings(icp);
325 * Attach SCSI channels.
327 if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
328 struct icp_ioc_version *iv;
329 struct icp_rawioc *ri;
330 struct icp_getch *gc;
332 iv = (struct icp_ioc_version *)icp->icp_scr;
333 iv->iv_version = htole32(ICP_IOC_NEWEST);
334 iv->iv_listents = ICP_MAXBUS;
335 iv->iv_firstchan = 0;
336 iv->iv_lastchan = ICP_MAXBUS - 1;
337 iv->iv_listoffset = htole32(sizeof(*iv));
339 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
340 ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
341 sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
342 ri = (struct icp_rawioc *)(iv + 1);
343 for (j = 0; j < binfo.bi_chan_count; j++, ri++)
344 icp->icp_bus_id[j] = ri->ri_procid;
345 } else {
347 * Fall back to the old method.
349 gc = (struct icp_getch *)icp->icp_scr;
351 for (j = 0; j < binfo.bi_chan_count; j++) {
352 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
353 ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
354 ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
355 sizeof(*gc))) {
356 aprint_error_dev(&icp->icp_dv,
357 "unable to get chan info");
358 goto bail_out;
360 icp->icp_bus_id[j] = gc->gc_scsiid;
364 for (j = 0; j < binfo.bi_chan_count; j++) {
365 if (icp->icp_bus_id[j] > ICP_MAXID_FC)
366 icp->icp_bus_id[j] = ICP_MAXID_FC;
368 icpa.icpa_unit = j + ICPA_UNIT_SCSI;
370 locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI;
372 icp->icp_children[icpa.icpa_unit] =
373 config_found_sm_loc(&icp->icp_dv, "icp", locs,
374 &icpa, icp_print, config_stdsubmatch);
379 * Attach cache devices.
381 if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
382 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
383 if (icp->icp_cdr[j].cd_size == 0)
384 continue;
386 icpa.icpa_unit = j;
388 locs[ICPCF_UNIT] = j;
390 icp->icp_children[icpa.icpa_unit] =
391 config_found_sm_loc(&icp->icp_dv, "icp", locs,
392 &icpa, icp_print, config_stdsubmatch);
397 * Start the watchdog.
399 icp_watchdog(icp);
402 * Count the controller, and we're done!
404 if (icp_count++ == 0)
405 mutex_init(&icp_ioctl_mutex, MUTEX_DEFAULT, IPL_NONE);
407 return (0);
409 bail_out:
410 if (state > 4)
411 for (j = 0; j < i; j++)
412 bus_dmamap_destroy(icp->icp_dmat,
413 icp->icp_ccbs[j].ic_xfer_map);
414 if (state > 3)
415 free(icp->icp_ccbs, M_DEVBUF);
416 if (state > 2)
417 bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
418 if (state > 1)
419 bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
420 ICP_SCRATCH_SIZE);
421 if (state > 0)
422 bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
423 bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
425 return (1);
428 void
429 icp_register_servicecb(struct icp_softc *icp, int unit,
430 const struct icp_servicecb *cb)
433 icp->icp_servicecb[unit] = cb;
436 void
437 icp_rescan(struct icp_softc *icp, int unit)
439 struct icp_attach_args icpa;
440 u_int newsize, newtype;
441 int locs[ICPCF_NLOCS];
444 * NOTE: It is very important that the queue be frozen and not
445 * commands running when this is called. The ioctl mutex must
446 * also be held.
449 KASSERT(icp->icp_qfreeze != 0);
450 KASSERT(icp->icp_running == 0);
451 KASSERT(unit < ICP_MAX_HDRIVES);
453 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
454 #ifdef ICP_DEBUG
455 printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
456 device_xname(&icp->icp_dv), unit, icp->icp_status);
457 #endif
458 goto gone;
460 if ((newsize = icp->icp_info) == 0) {
461 #ifdef ICP_DEBUG
462 printf("%s: rescan: unit %d has zero size\n",
463 device_xname(&icp->icp_dv), unit);
464 #endif
465 gone:
467 * Host drive is no longer present; detach if a child
468 * is currently there.
470 if (icp->icp_cdr[unit].cd_size != 0)
471 icp->icp_ndevs--;
472 icp->icp_cdr[unit].cd_size = 0;
473 if (icp->icp_children[unit] != NULL) {
474 (void) config_detach(icp->icp_children[unit],
475 DETACH_FORCE);
476 icp->icp_children[unit] = NULL;
478 return;
481 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
482 newtype = icp->icp_info;
483 else {
484 #ifdef ICP_DEBUG
485 printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
486 device_xname(&icp->icp_dv), unit);
487 #endif
488 newtype = 0; /* XXX? */
491 #ifdef ICP_DEBUG
492 printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
493 device_xname(&icp->icp_dv), unit, icp->icp_cdr[unit].cd_size,
494 icp->icp_cdr[unit].cd_type, newsize, newtype);
495 #endif
498 * If the type or size changed, detach any old child (if it exists)
499 * and attach a new one.
501 if (icp->icp_children[unit] == NULL ||
502 newsize != icp->icp_cdr[unit].cd_size ||
503 newtype != icp->icp_cdr[unit].cd_type) {
504 if (icp->icp_cdr[unit].cd_size == 0)
505 icp->icp_ndevs++;
506 icp->icp_cdr[unit].cd_size = newsize;
507 icp->icp_cdr[unit].cd_type = newtype;
508 if (icp->icp_children[unit] != NULL)
509 (void) config_detach(icp->icp_children[unit],
510 DETACH_FORCE);
512 icpa.icpa_unit = unit;
514 locs[ICPCF_UNIT] = unit;
516 icp->icp_children[unit] = config_found_sm_loc(&icp->icp_dv,
517 "icp", locs, &icpa, icp_print, config_stdsubmatch);
520 icp_recompute_openings(icp);
523 void
524 icp_rescan_all(struct icp_softc *icp)
526 int unit;
527 u_int16_t cdev_cnt;
530 * This is the old method of rescanning the host drives. We
531 * start by reinitializing the cache service.
533 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
534 printf("%s: unable to re-initialize cache service for rescan\n",
535 device_xname(&icp->icp_dv));
536 return;
538 cdev_cnt = (u_int16_t) icp->icp_info;
540 /* For each host drive, do the new-style rescan. */
541 for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
542 icp_rescan(icp, unit);
544 /* Now detach anything in the slots after cdev_cnt. */
545 for (; unit < ICP_MAX_HDRIVES; unit++) {
546 if (icp->icp_cdr[unit].cd_size != 0) {
547 #ifdef ICP_DEBUG
548 printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
549 device_xname(&icp->icp_dv), unit, cdev_cnt);
550 #endif
551 icp->icp_ndevs--;
552 icp->icp_cdr[unit].cd_size = 0;
553 if (icp->icp_children[unit] != NULL) {
554 (void) config_detach(icp->icp_children[unit],
555 DETACH_FORCE);
556 icp->icp_children[unit] = NULL;
561 icp_recompute_openings(icp);
564 void
565 icp_recompute_openings(struct icp_softc *icp)
567 int unit, openings;
569 if (icp->icp_ndevs != 0)
570 openings =
571 (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
572 else
573 openings = 0;
574 if (openings == icp->icp_openings)
575 return;
576 icp->icp_openings = openings;
578 #ifdef ICP_DEBUG
579 printf("%s: %d device%s, %d openings per device\n",
580 device_xname(&icp->icp_dv), icp->icp_ndevs,
581 icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
582 #endif
584 for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
585 if (icp->icp_children[unit] != NULL)
586 (*icp->icp_servicecb[unit]->iscb_openings)(
587 icp->icp_children[unit], icp->icp_openings);
591 void
592 icp_watchdog(void *cookie)
594 struct icp_softc *icp;
595 int s;
597 icp = cookie;
599 s = splbio();
600 icp_intr(icp);
601 if (ICP_HAS_WORK(icp))
602 icp_ccb_enqueue(icp, NULL);
603 splx(s);
605 callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
606 icp_watchdog, icp);
610 icp_print(void *aux, const char *pnp)
612 struct icp_attach_args *icpa;
613 const char *str;
615 icpa = (struct icp_attach_args *)aux;
617 if (pnp != NULL) {
618 if (icpa->icpa_unit < ICPA_UNIT_SCSI)
619 str = "block device";
620 else
621 str = "SCSI channel";
622 aprint_normal("%s at %s", str, pnp);
624 aprint_normal(" unit %d", icpa->icpa_unit);
626 return (UNCONF);
630 icp_async_event(struct icp_softc *icp, int service)
633 if (service == ICP_SCREENSERVICE) {
634 if (icp->icp_status == ICP_S_MSG_REQUEST) {
635 /* XXX */
637 } else {
638 if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
639 icp->icp_evt.size = 0;
640 icp->icp_evt.eu.async.ionode =
641 device_unit(&icp->icp_dv);
642 icp->icp_evt.eu.async.status = icp->icp_status;
644 * Severity and event string are filled in by the
645 * hardware interface interrupt handler.
647 printf("%s: %s\n", device_xname(&icp->icp_dv),
648 icp->icp_evt.event_string);
649 } else {
650 icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
651 icp->icp_evt.eu.async.ionode =
652 device_unit(&icp->icp_dv);
653 icp->icp_evt.eu.async.service = service;
654 icp->icp_evt.eu.async.status = icp->icp_status;
655 icp->icp_evt.eu.async.info = icp->icp_info;
656 /* XXXJRT FIX THIS */
657 *(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
658 icp->icp_info2;
660 icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
663 return (0);
667 icp_intr(void *cookie)
669 struct icp_softc *icp;
670 struct icp_intr_ctx ctx;
671 struct icp_ccb *ic;
673 icp = cookie;
675 ctx.istatus = (*icp->icp_get_status)(icp);
676 if (!ctx.istatus) {
677 icp->icp_status = ICP_S_NO_STATUS;
678 return (0);
681 (*icp->icp_intr)(icp, &ctx);
683 icp->icp_status = ctx.cmd_status;
684 icp->icp_service = ctx.service;
685 icp->icp_info = ctx.info;
686 icp->icp_info2 = ctx.info2;
688 switch (ctx.istatus) {
689 case ICP_ASYNCINDEX:
690 icp_async_event(icp, ctx.service);
691 return (1);
693 case ICP_SPEZINDEX:
694 aprint_error_dev(&icp->icp_dv, "uninitialized or unknown service (%d/%d)\n",
695 ctx.info, ctx.info2);
696 icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
697 icp->icp_evt.eu.driver.ionode = device_unit(&icp->icp_dv);
698 icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
699 return (1);
702 if ((ctx.istatus - 2) > icp->icp_nccbs)
703 panic("icp_intr: bad command index returned");
705 ic = &icp->icp_ccbs[ctx.istatus - 2];
706 ic->ic_status = icp->icp_status;
708 if ((ic->ic_flags & IC_ALLOCED) == 0) {
709 /* XXX ICP's "iir" driver just sends an event here. */
710 panic("icp_intr: inactive CCB identified");
714 * Try to protect ourselves from the running command count already
715 * being 0 (e.g. if a polled command times out).
717 KDASSERT(icp->icp_running != 0);
718 if (--icp->icp_running == 0 &&
719 (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
720 icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
721 wakeup(&icp->icp_qfreeze);
724 switch (icp->icp_status) {
725 case ICP_S_BSY:
726 #ifdef ICP_DEBUG
727 printf("%s: ICP_S_BSY received\n", device_xname(&icp->icp_dv));
728 #endif
729 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
730 SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
731 else
732 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
733 break;
735 default:
736 ic->ic_flags |= IC_COMPLETE;
738 if ((ic->ic_flags & IC_WAITING) != 0)
739 wakeup(ic);
740 else if (ic->ic_intr != NULL)
741 (*ic->ic_intr)(ic);
743 if (ICP_HAS_WORK(icp))
744 icp_ccb_enqueue(icp, NULL);
746 break;
749 return (1);
752 struct icp_ucmd_ctx {
753 gdt_ucmd_t *iu_ucmd;
754 u_int32_t iu_cnt;
757 void
758 icp_ucmd_intr(struct icp_ccb *ic)
760 struct icp_softc *icp = (void *) ic->ic_dv;
761 struct icp_ucmd_ctx *iu = ic->ic_context;
762 gdt_ucmd_t *ucmd = iu->iu_ucmd;
764 ucmd->status = icp->icp_status;
765 ucmd->info = icp->icp_info;
767 if (iu->iu_cnt != 0) {
768 bus_dmamap_sync(icp->icp_dmat,
769 icp->icp_scr_dmamap,
770 ICP_SCRATCH_UCMD, iu->iu_cnt,
771 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
772 memcpy(ucmd->data,
773 (char *)icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
776 icp->icp_ucmd_ccb = NULL;
778 ic->ic_flags |= IC_COMPLETE;
779 wakeup(ic);
783 * NOTE: We assume that it is safe to sleep here!
786 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
787 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
789 struct icp_ioctlcmd *icmd;
790 struct icp_cachecmd *cc;
791 struct icp_rawcmd *rc;
792 int retries, rv;
793 struct icp_ccb *ic;
795 retries = ICP_RETRIES;
797 do {
798 ic = icp_ccb_alloc_wait(icp);
799 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
800 ic->ic_cmd.cmd_opcode = htole16(opcode);
802 switch (service) {
803 case ICP_CACHESERVICE:
804 if (opcode == ICP_IOCTL) {
805 icmd = &ic->ic_cmd.cmd_packet.ic;
806 icmd->ic_subfunc = htole16(arg1);
807 icmd->ic_channel = htole32(arg2);
808 icmd->ic_bufsize = htole32(arg3);
809 icmd->ic_addr =
810 htole32(icp->icp_scr_seg[0].ds_addr);
812 bus_dmamap_sync(icp->icp_dmat,
813 icp->icp_scr_dmamap, 0, arg3,
814 BUS_DMASYNC_PREWRITE |
815 BUS_DMASYNC_PREREAD);
816 } else {
817 cc = &ic->ic_cmd.cmd_packet.cc;
818 cc->cc_deviceno = htole16(arg1);
819 cc->cc_blockno = htole32(arg2);
821 break;
823 case ICP_SCSIRAWSERVICE:
824 rc = &ic->ic_cmd.cmd_packet.rc;
825 rc->rc_direction = htole32(arg1);
826 rc->rc_bus = arg2;
827 rc->rc_target = arg3;
828 rc->rc_lun = arg3 >> 8;
829 break;
832 ic->ic_service = service;
833 ic->ic_cmdlen = sizeof(ic->ic_cmd);
834 rv = icp_ccb_poll(icp, ic, 10000);
836 switch (service) {
837 case ICP_CACHESERVICE:
838 if (opcode == ICP_IOCTL) {
839 bus_dmamap_sync(icp->icp_dmat,
840 icp->icp_scr_dmamap, 0, arg3,
841 BUS_DMASYNC_POSTWRITE |
842 BUS_DMASYNC_POSTREAD);
844 break;
847 icp_ccb_free(icp, ic);
848 } while (rv != 0 && --retries > 0);
850 return (icp->icp_status == ICP_S_OK);
854 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
856 struct icp_ccb *ic;
857 struct icp_ucmd_ctx iu;
858 u_int32_t cnt;
859 int error;
861 if (ucmd->service == ICP_CACHESERVICE) {
862 if (ucmd->command.cmd_opcode == ICP_IOCTL) {
863 cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
864 if (cnt > GDT_SCRATCH_SZ) {
865 aprint_error_dev(&icp->icp_dv, "scratch buffer too small (%d/%d)\n",
866 GDT_SCRATCH_SZ, cnt);
867 return (EINVAL);
869 } else {
870 cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
871 ICP_SECTOR_SIZE;
872 if (cnt > GDT_SCRATCH_SZ) {
873 aprint_error_dev(&icp->icp_dv, "scratch buffer too small (%d/%d)\n",
874 GDT_SCRATCH_SZ, cnt);
875 return (EINVAL);
878 } else {
879 cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
880 ucmd->command.cmd_packet.rc.rc_sense_len;
881 if (cnt > GDT_SCRATCH_SZ) {
882 aprint_error_dev(&icp->icp_dv, "scratch buffer too small (%d/%d)\n",
883 GDT_SCRATCH_SZ, cnt);
884 return (EINVAL);
888 iu.iu_ucmd = ucmd;
889 iu.iu_cnt = cnt;
891 ic = icp_ccb_alloc_wait(icp);
892 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
893 ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
895 if (ucmd->service == ICP_CACHESERVICE) {
896 if (ucmd->command.cmd_opcode == ICP_IOCTL) {
897 struct icp_ioctlcmd *icmd, *uicmd;
899 icmd = &ic->ic_cmd.cmd_packet.ic;
900 uicmd = &ucmd->command.cmd_packet.ic;
902 icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
903 icmd->ic_channel = htole32(uicmd->ic_channel);
904 icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
905 icmd->ic_addr =
906 htole32(icp->icp_scr_seg[0].ds_addr +
907 ICP_SCRATCH_UCMD);
908 } else {
909 struct icp_cachecmd *cc, *ucc;
911 cc = &ic->ic_cmd.cmd_packet.cc;
912 ucc = &ucmd->command.cmd_packet.cc;
914 cc->cc_deviceno = htole16(ucc->cc_deviceno);
915 cc->cc_blockno = htole32(ucc->cc_blockno);
916 cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
917 cc->cc_addr = htole32(0xffffffffU);
918 cc->cc_nsgent = htole32(1);
919 cc->cc_sg[0].sg_addr =
920 htole32(icp->icp_scr_seg[0].ds_addr +
921 ICP_SCRATCH_UCMD);
922 cc->cc_sg[0].sg_len = htole32(cnt);
924 } else {
925 struct icp_rawcmd *rc, *urc;
927 rc = &ic->ic_cmd.cmd_packet.rc;
928 urc = &ucmd->command.cmd_packet.rc;
930 rc->rc_direction = htole32(urc->rc_direction);
931 rc->rc_sdata = htole32(0xffffffffU);
932 rc->rc_sdlen = htole32(urc->rc_sdlen);
933 rc->rc_clen = htole32(urc->rc_clen);
934 memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
935 rc->rc_target = urc->rc_target;
936 rc->rc_lun = urc->rc_lun;
937 rc->rc_bus = urc->rc_bus;
938 rc->rc_sense_len = htole32(urc->rc_sense_len);
939 rc->rc_sense_addr =
940 htole32(icp->icp_scr_seg[0].ds_addr +
941 ICP_SCRATCH_UCMD + urc->rc_sdlen);
942 rc->rc_nsgent = htole32(1);
943 rc->rc_sg[0].sg_addr =
944 htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
945 rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
948 ic->ic_service = ucmd->service;
949 ic->ic_cmdlen = sizeof(ic->ic_cmd);
950 ic->ic_context = &iu;
953 * XXX What units are ucmd->timeout in? Until we know, we
954 * XXX just pull a number out of thin air.
956 if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
957 aprint_error_dev(&icp->icp_dv, "error %d waiting for ucmd to complete\n",
958 error);
960 /* icp_ucmd_intr() has updated ucmd. */
961 icp_ccb_free(icp, ic);
963 return (error);
966 struct icp_ccb *
967 icp_ccb_alloc(struct icp_softc *icp)
969 struct icp_ccb *ic;
970 int s;
972 s = splbio();
973 if (__predict_false((ic =
974 SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
975 splx(s);
976 return (NULL);
978 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
979 splx(s);
981 ic->ic_flags = IC_ALLOCED;
982 return (ic);
985 struct icp_ccb *
986 icp_ccb_alloc_wait(struct icp_softc *icp)
988 struct icp_ccb *ic;
989 int s;
991 s = splbio();
992 while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
993 icp->icp_flags |= ICP_F_WAIT_CCB;
994 (void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
996 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
997 splx(s);
999 ic->ic_flags = IC_ALLOCED;
1000 return (ic);
1003 void
1004 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
1006 int s;
1008 s = splbio();
1009 ic->ic_flags = 0;
1010 ic->ic_intr = NULL;
1011 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
1012 if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
1013 icp->icp_flags &= ~ICP_F_WAIT_CCB;
1014 wakeup(&icp->icp_ccb_freelist);
1016 splx(s);
1019 void
1020 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
1022 int s;
1024 s = splbio();
1026 if (ic != NULL) {
1027 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1028 SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
1029 else
1030 SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
1033 for (; icp->icp_qfreeze == 0;) {
1034 if (__predict_false((ic =
1035 SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
1036 struct icp_ucmd_ctx *iu = ic->ic_context;
1037 gdt_ucmd_t *ucmd = iu->iu_ucmd;
1040 * All user-generated commands share the same
1041 * scratch space, so if one is already running,
1042 * we have to stall the command queue.
1044 if (icp->icp_ucmd_ccb != NULL)
1045 break;
1046 if ((*icp->icp_test_busy)(icp))
1047 break;
1048 icp->icp_ucmd_ccb = ic;
1050 if (iu->iu_cnt != 0) {
1051 memcpy((char *)icp->icp_scr + ICP_SCRATCH_UCMD,
1052 ucmd->data, iu->iu_cnt);
1053 bus_dmamap_sync(icp->icp_dmat,
1054 icp->icp_scr_dmamap,
1055 ICP_SCRATCH_UCMD, iu->iu_cnt,
1056 BUS_DMASYNC_PREREAD |
1057 BUS_DMASYNC_PREWRITE);
1059 } else if (__predict_true((ic =
1060 SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
1061 if ((*icp->icp_test_busy)(icp))
1062 break;
1063 } else {
1064 /* no command found */
1065 break;
1067 icp_ccb_submit(icp, ic);
1068 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1069 SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
1070 else
1071 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
1074 splx(s);
1078 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
1079 int dir)
1081 struct icp_sg *sg;
1082 int nsegs, i, rv;
1083 bus_dmamap_t xfer;
1085 xfer = ic->ic_xfer_map;
1087 rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
1088 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1089 ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1090 if (rv != 0)
1091 return (rv);
1093 nsegs = xfer->dm_nsegs;
1094 ic->ic_xfer_size = size;
1095 ic->ic_nsgent = nsegs;
1096 ic->ic_flags |= dir;
1097 sg = ic->ic_sg;
1099 if (sg != NULL) {
1100 for (i = 0; i < nsegs; i++, sg++) {
1101 sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
1102 sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
1104 } else if (nsegs > 1)
1105 panic("icp_ccb_map: no SG list specified, but nsegs > 1");
1107 if ((dir & IC_XFER_OUT) != 0)
1108 i = BUS_DMASYNC_PREWRITE;
1109 else /* if ((dir & IC_XFER_IN) != 0) */
1110 i = BUS_DMASYNC_PREREAD;
1112 bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
1113 return (0);
1116 void
1117 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
1119 int i;
1121 if ((ic->ic_flags & IC_XFER_OUT) != 0)
1122 i = BUS_DMASYNC_POSTWRITE;
1123 else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
1124 i = BUS_DMASYNC_POSTREAD;
1126 bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
1127 bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
1131 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1133 int s, rv;
1135 s = splbio();
1137 for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
1138 if (!(*icp->icp_test_busy)(icp))
1139 break;
1140 DELAY(10);
1142 if (timo == 0) {
1143 printf("%s: submit: busy\n", device_xname(&icp->icp_dv));
1144 return (EAGAIN);
1147 icp_ccb_submit(icp, ic);
1149 if (cold) {
1150 for (timo *= 10; timo != 0; timo--) {
1151 DELAY(100);
1152 icp_intr(icp);
1153 if ((ic->ic_flags & IC_COMPLETE) != 0)
1154 break;
1156 } else {
1157 ic->ic_flags |= IC_WAITING;
1158 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1159 if ((rv = tsleep(ic, PRIBIO, "icpwccb",
1160 mstohz(timo))) != 0) {
1161 timo = 0;
1162 break;
1167 if (timo != 0) {
1168 if (ic->ic_status != ICP_S_OK) {
1169 #ifdef ICP_DEBUG
1170 printf("%s: request failed; status=0x%04x\n",
1171 device_xname(&icp->icp_dv), ic->ic_status);
1172 #endif
1173 rv = EIO;
1174 } else
1175 rv = 0;
1176 } else {
1177 aprint_error_dev(&icp->icp_dv, "command timed out\n");
1178 rv = EIO;
1181 while ((*icp->icp_test_busy)(icp) != 0)
1182 DELAY(10);
1184 splx(s);
1186 return (rv);
1190 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1192 int s, rv;
1194 ic->ic_flags |= IC_WAITING;
1196 s = splbio();
1197 icp_ccb_enqueue(icp, ic);
1198 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1199 if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
1200 splx(s);
1201 return (rv);
1204 splx(s);
1206 if (ic->ic_status != ICP_S_OK) {
1207 aprint_error_dev(&icp->icp_dv, "command failed; status=%x\n",
1208 ic->ic_status);
1209 return (EIO);
1212 return (0);
1216 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1218 int s, rv;
1220 ic->ic_dv = &icp->icp_dv;
1221 ic->ic_intr = icp_ucmd_intr;
1222 ic->ic_flags |= IC_UCMD;
1224 s = splbio();
1225 icp_ccb_enqueue(icp, ic);
1226 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1227 if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
1228 splx(s);
1229 return (rv);
1232 splx(s);
1234 return (0);
1237 void
1238 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1241 ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
1243 (*icp->icp_set_sema0)(icp);
1244 DELAY(10);
1246 ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
1247 ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
1249 icp->icp_running++;
1251 (*icp->icp_copy_cmd)(icp, ic);
1252 (*icp->icp_release_event)(icp, ic);
1256 icp_freeze(struct icp_softc *icp)
1258 int s, error = 0;
1260 s = splbio();
1261 if (icp->icp_qfreeze++ == 0) {
1262 while (icp->icp_running != 0) {
1263 icp->icp_flags |= ICP_F_WAIT_FREEZE;
1264 error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
1265 "icpqfrz", 0);
1266 if (error != 0 && --icp->icp_qfreeze == 0 &&
1267 ICP_HAS_WORK(icp)) {
1268 icp_ccb_enqueue(icp, NULL);
1269 break;
1273 splx(s);
1275 return (error);
1278 void
1279 icp_unfreeze(struct icp_softc *icp)
1281 int s;
1283 s = splbio();
1284 KDASSERT(icp->icp_qfreeze != 0);
1285 if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
1286 icp_ccb_enqueue(icp, NULL);
1287 splx(s);
1290 /* XXX Global - should be per-controller? XXX */
1291 static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
1292 static int icp_event_oldidx;
1293 static int icp_event_lastidx;
1295 gdt_evt_str *
1296 icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
1297 gdt_evt_data *evt)
1299 gdt_evt_str *e;
1301 /* no source == no event */
1302 if (source == 0)
1303 return (NULL);
1305 e = &icp_event_buffer[icp_event_lastidx];
1306 if (e->event_source == source && e->event_idx == idx &&
1307 ((evt->size != 0 && e->event_data.size != 0 &&
1308 memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
1309 (evt->size == 0 && e->event_data.size == 0 &&
1310 strcmp((char *) e->event_data.event_string,
1311 (char *) evt->event_string) == 0))) {
1312 e->last_stamp = time_second;
1313 e->same_count++;
1314 } else {
1315 if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
1316 icp_event_lastidx++;
1317 if (icp_event_lastidx == ICP_MAX_EVENTS)
1318 icp_event_lastidx = 0;
1319 if (icp_event_lastidx == icp_event_oldidx) {
1320 icp_event_oldidx++;
1321 if (icp_event_oldidx == ICP_MAX_EVENTS)
1322 icp_event_oldidx = 0;
1325 e = &icp_event_buffer[icp_event_lastidx];
1326 e->event_source = source;
1327 e->event_idx = idx;
1328 e->first_stamp = e->last_stamp = time_second;
1329 e->same_count = 1;
1330 e->event_data = *evt;
1331 e->application = 0;
1333 return (e);
1337 icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
1339 gdt_evt_str *e;
1340 int eindex, s;
1342 s = splbio();
1344 if (handle == -1)
1345 eindex = icp_event_oldidx;
1346 else
1347 eindex = handle;
1349 estr->event_source = 0;
1351 if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
1352 splx(s);
1353 return (eindex);
1356 e = &icp_event_buffer[eindex];
1357 if (e->event_source != 0) {
1358 if (eindex != icp_event_lastidx) {
1359 eindex++;
1360 if (eindex == ICP_MAX_EVENTS)
1361 eindex = 0;
1362 } else
1363 eindex = -1;
1364 memcpy(estr, e, sizeof(gdt_evt_str));
1367 splx(s);
1369 return (eindex);
1372 void
1373 icp_readapp_event(struct icp_softc *icp, u_int8_t application,
1374 gdt_evt_str *estr)
1376 gdt_evt_str *e;
1377 int found = 0, eindex, s;
1379 s = splbio();
1381 eindex = icp_event_oldidx;
1382 for (;;) {
1383 e = &icp_event_buffer[eindex];
1384 if (e->event_source == 0)
1385 break;
1386 if ((e->application & application) == 0) {
1387 e->application |= application;
1388 found = 1;
1389 break;
1391 if (eindex == icp_event_lastidx)
1392 break;
1393 eindex++;
1394 if (eindex == ICP_MAX_EVENTS)
1395 eindex = 0;
1397 if (found)
1398 memcpy(estr, e, sizeof(gdt_evt_str));
1399 else
1400 estr->event_source = 0;
1402 splx(s);
1405 void
1406 icp_clear_events(struct icp_softc *icp)
1408 int s;
1410 s = splbio();
1411 icp_event_oldidx = icp_event_lastidx = 0;
1412 memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
1413 splx(s);