1 /* $NetBSD: icp.c,v 1.28 2008/04/28 20:23:50 martin Exp $ */
4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by Niklas Hallqvist.
46 * 4. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
64 * This driver would not have written if it was not for the hardware donations
65 * from both ICP-Vortex and Öko.neT. I want to thank them for their support.
67 * Re-worked for NetBSD by Andrew Doran. Test hardware kindly supplied by
70 * Support for the ICP-Vortex management tools added by
71 * Jason R. Thorpe of Wasabi Systems, Inc., based on code
72 * provided by Achim Leubner <achim.leubner@intel.com>.
74 * Additional support for dynamic rescan of cacheservice drives by
75 * Jason R. Thorpe of Wasabi Systems, Inc.
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.28 2008/04/28 20:23:50 martin Exp $");
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/queue.h>
88 #include <sys/endian.h>
89 #include <sys/malloc.h>
92 #include <uvm/uvm_extern.h>
94 #include <sys/bswap.h>
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
101 #include <dev/ic/icpreg.h>
102 #include <dev/ic/icpvar.h>
104 #include <dev/scsipi/scsipi_all.h>
105 #include <dev/scsipi/scsiconf.h>
107 #include "locators.h"
109 int icp_async_event(struct icp_softc
*, int);
110 void icp_ccb_submit(struct icp_softc
*icp
, struct icp_ccb
*ic
);
111 void icp_chain(struct icp_softc
*);
112 int icp_print(void *, const char *);
113 void icp_watchdog(void *);
114 void icp_ucmd_intr(struct icp_ccb
*);
115 void icp_recompute_openings(struct icp_softc
*);
117 int icp_count
; /* total # of controllers, for ioctl interface */
120 * Statistics for the ioctl interface to query.
122 * XXX Global. They should probably be made per-controller
125 gdt_statist_t icp_stats
;
128 icp_init(struct icp_softc
*icp
, const char *intrstr
)
130 struct icp_attach_args icpa
;
131 struct icp_binfo binfo
;
134 int i
, j
, state
, feat
, nsegs
, rv
;
135 int locs
[ICPCF_NLOCS
];
140 aprint_normal_dev(&icp
->icp_dv
, "interrupting at %s\n",
143 SIMPLEQ_INIT(&icp
->icp_ccb_queue
);
144 SIMPLEQ_INIT(&icp
->icp_ccb_freelist
);
145 SIMPLEQ_INIT(&icp
->icp_ucmd_queue
);
146 callout_init(&icp
->icp_wdog_callout
, 0);
149 * Allocate a scratch area.
151 if (bus_dmamap_create(icp
->icp_dmat
, ICP_SCRATCH_SIZE
, 1,
152 ICP_SCRATCH_SIZE
, 0, BUS_DMA_NOWAIT
| BUS_DMA_ALLOCNOW
,
153 &icp
->icp_scr_dmamap
) != 0) {
154 aprint_error_dev(&icp
->icp_dv
, "cannot create scratch dmamap\n");
159 if (bus_dmamem_alloc(icp
->icp_dmat
, ICP_SCRATCH_SIZE
, PAGE_SIZE
, 0,
160 icp
->icp_scr_seg
, 1, &nsegs
, BUS_DMA_NOWAIT
) != 0) {
161 aprint_error_dev(&icp
->icp_dv
, "cannot alloc scratch dmamem\n");
166 if (bus_dmamem_map(icp
->icp_dmat
, icp
->icp_scr_seg
, nsegs
,
167 ICP_SCRATCH_SIZE
, &icp
->icp_scr
, 0)) {
168 aprint_error_dev(&icp
->icp_dv
, "cannot map scratch dmamem\n");
173 if (bus_dmamap_load(icp
->icp_dmat
, icp
->icp_scr_dmamap
, icp
->icp_scr
,
174 ICP_SCRATCH_SIZE
, NULL
, BUS_DMA_NOWAIT
)) {
175 aprint_error_dev(&icp
->icp_dv
, "cannot load scratch dmamap\n");
181 * Allocate and initialize the command control blocks.
183 ic
= malloc(sizeof(*ic
) * ICP_NCCBS
, M_DEVBUF
, M_NOWAIT
| M_ZERO
);
184 if ((icp
->icp_ccbs
= ic
) == NULL
) {
185 aprint_error_dev(&icp
->icp_dv
, "malloc() failed\n");
190 for (i
= 0; i
< ICP_NCCBS
; i
++, ic
++) {
192 * The first two command indexes have special meanings, so
195 ic
->ic_ident
= i
+ 2;
196 rv
= bus_dmamap_create(icp
->icp_dmat
, ICP_MAX_XFER
,
197 ICP_MAXSG
, ICP_MAX_XFER
, 0,
198 BUS_DMA_NOWAIT
| BUS_DMA_ALLOCNOW
,
203 icp_ccb_free(icp
, ic
);
206 if (icp
->icp_nccbs
!= ICP_NCCBS
)
207 aprint_error_dev(&icp
->icp_dv
, "%d/%d CCBs usable\n",
208 icp
->icp_nccbs
, ICP_NCCBS
);
212 * Initalize the controller.
214 if (!icp_cmd(icp
, ICP_SCREENSERVICE
, ICP_INIT
, 0, 0, 0)) {
215 aprint_error_dev(&icp
->icp_dv
, "screen service init error %d\n",
220 if (!icp_cmd(icp
, ICP_CACHESERVICE
, ICP_INIT
, ICP_LINUX_OS
, 0, 0)) {
221 aprint_error_dev(&icp
->icp_dv
, "cache service init error %d\n",
226 icp_cmd(icp
, ICP_CACHESERVICE
, ICP_UNFREEZE_IO
, 0, 0, 0);
228 if (!icp_cmd(icp
, ICP_CACHESERVICE
, ICP_MOUNT
, 0xffff, 1, 0)) {
229 aprint_error_dev(&icp
->icp_dv
, "cache service mount error %d\n",
234 if (!icp_cmd(icp
, ICP_CACHESERVICE
, ICP_INIT
, ICP_LINUX_OS
, 0, 0)) {
235 aprint_error_dev(&icp
->icp_dv
, "cache service post-mount init error %d\n",
239 cdev_cnt
= (u_int16_t
)icp
->icp_info
;
240 icp
->icp_fw_vers
= icp
->icp_service
;
242 if (!icp_cmd(icp
, ICP_SCSIRAWSERVICE
, ICP_INIT
, 0, 0, 0)) {
243 aprint_error_dev(&icp
->icp_dv
, "raw service init error %d\n",
249 * Set/get raw service features (scatter/gather).
252 if (icp_cmd(icp
, ICP_SCSIRAWSERVICE
, ICP_SET_FEAT
, ICP_SCATTER_GATHER
,
254 if (icp_cmd(icp
, ICP_SCSIRAWSERVICE
, ICP_GET_FEAT
, 0, 0, 0))
255 feat
= icp
->icp_info
;
257 if ((feat
& ICP_SCATTER_GATHER
) == 0) {
259 aprint_normal_dev(&icp
->icp_dv
,
260 "scatter/gather not supported (raw service)\n");
263 icp
->icp_features
|= ICP_FEAT_RAWSERVICE
;
266 * Set/get cache service features (scatter/gather).
269 if (icp_cmd(icp
, ICP_CACHESERVICE
, ICP_SET_FEAT
, 0,
270 ICP_SCATTER_GATHER
, 0))
271 if (icp_cmd(icp
, ICP_CACHESERVICE
, ICP_GET_FEAT
, 0, 0, 0))
272 feat
= icp
->icp_info
;
274 if ((feat
& ICP_SCATTER_GATHER
) == 0) {
276 aprint_normal_dev(&icp
->icp_dv
,
277 "scatter/gather not supported (cache service)\n");
280 icp
->icp_features
|= ICP_FEAT_CACHESERVICE
;
283 * Pull some information from the board and dump.
285 if (!icp_cmd(icp
, ICP_CACHESERVICE
, ICP_IOCTL
, ICP_BOARD_INFO
,
286 ICP_INVALID_CHANNEL
, sizeof(struct icp_binfo
))) {
287 aprint_error_dev(&icp
->icp_dv
, "unable to retrive board info\n");
290 memcpy(&binfo
, icp
->icp_scr
, sizeof(binfo
));
292 aprint_normal_dev(&icp
->icp_dv
,
293 "model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
294 binfo
.bi_type_string
, binfo
.bi_raid_string
,
295 binfo
.bi_chan_count
, le32toh(binfo
.bi_memsize
) >> 20);
298 * Determine the number of devices, and number of openings per
301 if (icp
->icp_features
& ICP_FEAT_CACHESERVICE
) {
302 for (j
= 0; j
< cdev_cnt
&& j
< ICP_MAX_HDRIVES
; j
++) {
303 if (!icp_cmd(icp
, ICP_CACHESERVICE
, ICP_INFO
, j
, 0,
307 icp
->icp_cdr
[j
].cd_size
= icp
->icp_info
;
308 if (icp
->icp_cdr
[j
].cd_size
!= 0)
311 if (icp_cmd(icp
, ICP_CACHESERVICE
, ICP_DEVTYPE
, j
, 0,
313 icp
->icp_cdr
[j
].cd_type
= icp
->icp_info
;
317 if (icp
->icp_features
& ICP_FEAT_RAWSERVICE
) {
318 icp
->icp_nchan
= binfo
.bi_chan_count
;
319 icp
->icp_ndevs
+= icp
->icp_nchan
;
322 icp_recompute_openings(icp
);
325 * Attach SCSI channels.
327 if (icp
->icp_features
& ICP_FEAT_RAWSERVICE
) {
328 struct icp_ioc_version
*iv
;
329 struct icp_rawioc
*ri
;
330 struct icp_getch
*gc
;
332 iv
= (struct icp_ioc_version
*)icp
->icp_scr
;
333 iv
->iv_version
= htole32(ICP_IOC_NEWEST
);
334 iv
->iv_listents
= ICP_MAXBUS
;
335 iv
->iv_firstchan
= 0;
336 iv
->iv_lastchan
= ICP_MAXBUS
- 1;
337 iv
->iv_listoffset
= htole32(sizeof(*iv
));
339 if (icp_cmd(icp
, ICP_CACHESERVICE
, ICP_IOCTL
,
340 ICP_IOCHAN_RAW_DESC
, ICP_INVALID_CHANNEL
,
341 sizeof(*iv
) + ICP_MAXBUS
* sizeof(*ri
))) {
342 ri
= (struct icp_rawioc
*)(iv
+ 1);
343 for (j
= 0; j
< binfo
.bi_chan_count
; j
++, ri
++)
344 icp
->icp_bus_id
[j
] = ri
->ri_procid
;
347 * Fall back to the old method.
349 gc
= (struct icp_getch
*)icp
->icp_scr
;
351 for (j
= 0; j
< binfo
.bi_chan_count
; j
++) {
352 if (!icp_cmd(icp
, ICP_CACHESERVICE
, ICP_IOCTL
,
353 ICP_SCSI_CHAN_CNT
| ICP_L_CTRL_PATTERN
,
354 ICP_IO_CHANNEL
| ICP_INVALID_CHANNEL
,
356 aprint_error_dev(&icp
->icp_dv
,
357 "unable to get chan info");
360 icp
->icp_bus_id
[j
] = gc
->gc_scsiid
;
364 for (j
= 0; j
< binfo
.bi_chan_count
; j
++) {
365 if (icp
->icp_bus_id
[j
] > ICP_MAXID_FC
)
366 icp
->icp_bus_id
[j
] = ICP_MAXID_FC
;
368 icpa
.icpa_unit
= j
+ ICPA_UNIT_SCSI
;
370 locs
[ICPCF_UNIT
] = j
+ ICPA_UNIT_SCSI
;
372 icp
->icp_children
[icpa
.icpa_unit
] =
373 config_found_sm_loc(&icp
->icp_dv
, "icp", locs
,
374 &icpa
, icp_print
, config_stdsubmatch
);
379 * Attach cache devices.
381 if (icp
->icp_features
& ICP_FEAT_CACHESERVICE
) {
382 for (j
= 0; j
< cdev_cnt
&& j
< ICP_MAX_HDRIVES
; j
++) {
383 if (icp
->icp_cdr
[j
].cd_size
== 0)
388 locs
[ICPCF_UNIT
] = j
;
390 icp
->icp_children
[icpa
.icpa_unit
] =
391 config_found_sm_loc(&icp
->icp_dv
, "icp", locs
,
392 &icpa
, icp_print
, config_stdsubmatch
);
397 * Start the watchdog.
402 * Count the controller, and we're done!
404 if (icp_count
++ == 0)
405 mutex_init(&icp_ioctl_mutex
, MUTEX_DEFAULT
, IPL_NONE
);
411 for (j
= 0; j
< i
; j
++)
412 bus_dmamap_destroy(icp
->icp_dmat
,
413 icp
->icp_ccbs
[j
].ic_xfer_map
);
415 free(icp
->icp_ccbs
, M_DEVBUF
);
417 bus_dmamap_unload(icp
->icp_dmat
, icp
->icp_scr_dmamap
);
419 bus_dmamem_unmap(icp
->icp_dmat
, icp
->icp_scr
,
422 bus_dmamem_free(icp
->icp_dmat
, icp
->icp_scr_seg
, nsegs
);
423 bus_dmamap_destroy(icp
->icp_dmat
, icp
->icp_scr_dmamap
);
429 icp_register_servicecb(struct icp_softc
*icp
, int unit
,
430 const struct icp_servicecb
*cb
)
433 icp
->icp_servicecb
[unit
] = cb
;
437 icp_rescan(struct icp_softc
*icp
, int unit
)
439 struct icp_attach_args icpa
;
440 u_int newsize
, newtype
;
441 int locs
[ICPCF_NLOCS
];
444 * NOTE: It is very important that the queue be frozen and not
445 * commands running when this is called. The ioctl mutex must
449 KASSERT(icp
->icp_qfreeze
!= 0);
450 KASSERT(icp
->icp_running
== 0);
451 KASSERT(unit
< ICP_MAX_HDRIVES
);
453 if (!icp_cmd(icp
, ICP_CACHESERVICE
, ICP_INFO
, unit
, 0, 0)) {
455 printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
456 device_xname(&icp
->icp_dv
), unit
, icp
->icp_status
);
460 if ((newsize
= icp
->icp_info
) == 0) {
462 printf("%s: rescan: unit %d has zero size\n",
463 device_xname(&icp
->icp_dv
), unit
);
467 * Host drive is no longer present; detach if a child
468 * is currently there.
470 if (icp
->icp_cdr
[unit
].cd_size
!= 0)
472 icp
->icp_cdr
[unit
].cd_size
= 0;
473 if (icp
->icp_children
[unit
] != NULL
) {
474 (void) config_detach(icp
->icp_children
[unit
],
476 icp
->icp_children
[unit
] = NULL
;
481 if (icp_cmd(icp
, ICP_CACHESERVICE
, ICP_DEVTYPE
, unit
, 0, 0))
482 newtype
= icp
->icp_info
;
485 printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
486 device_xname(&icp
->icp_dv
), unit
);
488 newtype
= 0; /* XXX? */
492 printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
493 device_xname(&icp
->icp_dv
), unit
, icp
->icp_cdr
[unit
].cd_size
,
494 icp
->icp_cdr
[unit
].cd_type
, newsize
, newtype
);
498 * If the type or size changed, detach any old child (if it exists)
499 * and attach a new one.
501 if (icp
->icp_children
[unit
] == NULL
||
502 newsize
!= icp
->icp_cdr
[unit
].cd_size
||
503 newtype
!= icp
->icp_cdr
[unit
].cd_type
) {
504 if (icp
->icp_cdr
[unit
].cd_size
== 0)
506 icp
->icp_cdr
[unit
].cd_size
= newsize
;
507 icp
->icp_cdr
[unit
].cd_type
= newtype
;
508 if (icp
->icp_children
[unit
] != NULL
)
509 (void) config_detach(icp
->icp_children
[unit
],
512 icpa
.icpa_unit
= unit
;
514 locs
[ICPCF_UNIT
] = unit
;
516 icp
->icp_children
[unit
] = config_found_sm_loc(&icp
->icp_dv
,
517 "icp", locs
, &icpa
, icp_print
, config_stdsubmatch
);
520 icp_recompute_openings(icp
);
524 icp_rescan_all(struct icp_softc
*icp
)
530 * This is the old method of rescanning the host drives. We
531 * start by reinitializing the cache service.
533 if (!icp_cmd(icp
, ICP_CACHESERVICE
, ICP_INIT
, ICP_LINUX_OS
, 0, 0)) {
534 printf("%s: unable to re-initialize cache service for rescan\n",
535 device_xname(&icp
->icp_dv
));
538 cdev_cnt
= (u_int16_t
) icp
->icp_info
;
540 /* For each host drive, do the new-style rescan. */
541 for (unit
= 0; unit
< cdev_cnt
&& unit
< ICP_MAX_HDRIVES
; unit
++)
542 icp_rescan(icp
, unit
);
544 /* Now detach anything in the slots after cdev_cnt. */
545 for (; unit
< ICP_MAX_HDRIVES
; unit
++) {
546 if (icp
->icp_cdr
[unit
].cd_size
!= 0) {
548 printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
549 device_xname(&icp
->icp_dv
), unit
, cdev_cnt
);
552 icp
->icp_cdr
[unit
].cd_size
= 0;
553 if (icp
->icp_children
[unit
] != NULL
) {
554 (void) config_detach(icp
->icp_children
[unit
],
556 icp
->icp_children
[unit
] = NULL
;
561 icp_recompute_openings(icp
);
565 icp_recompute_openings(struct icp_softc
*icp
)
569 if (icp
->icp_ndevs
!= 0)
571 (icp
->icp_nccbs
- ICP_NCCB_RESERVE
) / icp
->icp_ndevs
;
574 if (openings
== icp
->icp_openings
)
576 icp
->icp_openings
= openings
;
579 printf("%s: %d device%s, %d openings per device\n",
580 device_xname(&icp
->icp_dv
), icp
->icp_ndevs
,
581 icp
->icp_ndevs
== 1 ? "" : "s", icp
->icp_openings
);
584 for (unit
= 0; unit
< ICP_MAX_HDRIVES
+ ICP_MAXBUS
; unit
++) {
585 if (icp
->icp_children
[unit
] != NULL
)
586 (*icp
->icp_servicecb
[unit
]->iscb_openings
)(
587 icp
->icp_children
[unit
], icp
->icp_openings
);
592 icp_watchdog(void *cookie
)
594 struct icp_softc
*icp
;
601 if (ICP_HAS_WORK(icp
))
602 icp_ccb_enqueue(icp
, NULL
);
605 callout_reset(&icp
->icp_wdog_callout
, hz
* ICP_WATCHDOG_FREQ
,
610 icp_print(void *aux
, const char *pnp
)
612 struct icp_attach_args
*icpa
;
615 icpa
= (struct icp_attach_args
*)aux
;
618 if (icpa
->icpa_unit
< ICPA_UNIT_SCSI
)
619 str
= "block device";
621 str
= "SCSI channel";
622 aprint_normal("%s at %s", str
, pnp
);
624 aprint_normal(" unit %d", icpa
->icpa_unit
);
630 icp_async_event(struct icp_softc
*icp
, int service
)
633 if (service
== ICP_SCREENSERVICE
) {
634 if (icp
->icp_status
== ICP_S_MSG_REQUEST
) {
638 if ((icp
->icp_fw_vers
& 0xff) >= 0x1a) {
639 icp
->icp_evt
.size
= 0;
640 icp
->icp_evt
.eu
.async
.ionode
=
641 device_unit(&icp
->icp_dv
);
642 icp
->icp_evt
.eu
.async
.status
= icp
->icp_status
;
644 * Severity and event string are filled in by the
645 * hardware interface interrupt handler.
647 printf("%s: %s\n", device_xname(&icp
->icp_dv
),
648 icp
->icp_evt
.event_string
);
650 icp
->icp_evt
.size
= sizeof(icp
->icp_evt
.eu
.async
);
651 icp
->icp_evt
.eu
.async
.ionode
=
652 device_unit(&icp
->icp_dv
);
653 icp
->icp_evt
.eu
.async
.service
= service
;
654 icp
->icp_evt
.eu
.async
.status
= icp
->icp_status
;
655 icp
->icp_evt
.eu
.async
.info
= icp
->icp_info
;
656 /* XXXJRT FIX THIS */
657 *(u_int32_t
*) icp
->icp_evt
.eu
.async
.scsi_coord
=
660 icp_store_event(icp
, GDT_ES_ASYNC
, service
, &icp
->icp_evt
);
667 icp_intr(void *cookie
)
669 struct icp_softc
*icp
;
670 struct icp_intr_ctx ctx
;
675 ctx
.istatus
= (*icp
->icp_get_status
)(icp
);
677 icp
->icp_status
= ICP_S_NO_STATUS
;
681 (*icp
->icp_intr
)(icp
, &ctx
);
683 icp
->icp_status
= ctx
.cmd_status
;
684 icp
->icp_service
= ctx
.service
;
685 icp
->icp_info
= ctx
.info
;
686 icp
->icp_info2
= ctx
.info2
;
688 switch (ctx
.istatus
) {
690 icp_async_event(icp
, ctx
.service
);
694 aprint_error_dev(&icp
->icp_dv
, "uninitialized or unknown service (%d/%d)\n",
695 ctx
.info
, ctx
.info2
);
696 icp
->icp_evt
.size
= sizeof(icp
->icp_evt
.eu
.driver
);
697 icp
->icp_evt
.eu
.driver
.ionode
= device_unit(&icp
->icp_dv
);
698 icp_store_event(icp
, GDT_ES_DRIVER
, 4, &icp
->icp_evt
);
702 if ((ctx
.istatus
- 2) > icp
->icp_nccbs
)
703 panic("icp_intr: bad command index returned");
705 ic
= &icp
->icp_ccbs
[ctx
.istatus
- 2];
706 ic
->ic_status
= icp
->icp_status
;
708 if ((ic
->ic_flags
& IC_ALLOCED
) == 0) {
709 /* XXX ICP's "iir" driver just sends an event here. */
710 panic("icp_intr: inactive CCB identified");
714 * Try to protect ourselves from the running command count already
715 * being 0 (e.g. if a polled command times out).
717 KDASSERT(icp
->icp_running
!= 0);
718 if (--icp
->icp_running
== 0 &&
719 (icp
->icp_flags
& ICP_F_WAIT_FREEZE
) != 0) {
720 icp
->icp_flags
&= ~ICP_F_WAIT_FREEZE
;
721 wakeup(&icp
->icp_qfreeze
);
724 switch (icp
->icp_status
) {
727 printf("%s: ICP_S_BSY received\n", device_xname(&icp
->icp_dv
));
729 if (__predict_false((ic
->ic_flags
& IC_UCMD
) != 0))
730 SIMPLEQ_INSERT_HEAD(&icp
->icp_ucmd_queue
, ic
, ic_chain
);
732 SIMPLEQ_INSERT_HEAD(&icp
->icp_ccb_queue
, ic
, ic_chain
);
736 ic
->ic_flags
|= IC_COMPLETE
;
738 if ((ic
->ic_flags
& IC_WAITING
) != 0)
740 else if (ic
->ic_intr
!= NULL
)
743 if (ICP_HAS_WORK(icp
))
744 icp_ccb_enqueue(icp
, NULL
);
752 struct icp_ucmd_ctx
{
758 icp_ucmd_intr(struct icp_ccb
*ic
)
760 struct icp_softc
*icp
= (void *) ic
->ic_dv
;
761 struct icp_ucmd_ctx
*iu
= ic
->ic_context
;
762 gdt_ucmd_t
*ucmd
= iu
->iu_ucmd
;
764 ucmd
->status
= icp
->icp_status
;
765 ucmd
->info
= icp
->icp_info
;
767 if (iu
->iu_cnt
!= 0) {
768 bus_dmamap_sync(icp
->icp_dmat
,
770 ICP_SCRATCH_UCMD
, iu
->iu_cnt
,
771 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
773 (char *)icp
->icp_scr
+ ICP_SCRATCH_UCMD
, iu
->iu_cnt
);
776 icp
->icp_ucmd_ccb
= NULL
;
778 ic
->ic_flags
|= IC_COMPLETE
;
783 * NOTE: We assume that it is safe to sleep here!
786 icp_cmd(struct icp_softc
*icp
, u_int8_t service
, u_int16_t opcode
,
787 u_int32_t arg1
, u_int32_t arg2
, u_int32_t arg3
)
789 struct icp_ioctlcmd
*icmd
;
790 struct icp_cachecmd
*cc
;
791 struct icp_rawcmd
*rc
;
795 retries
= ICP_RETRIES
;
798 ic
= icp_ccb_alloc_wait(icp
);
799 memset(&ic
->ic_cmd
, 0, sizeof(ic
->ic_cmd
));
800 ic
->ic_cmd
.cmd_opcode
= htole16(opcode
);
803 case ICP_CACHESERVICE
:
804 if (opcode
== ICP_IOCTL
) {
805 icmd
= &ic
->ic_cmd
.cmd_packet
.ic
;
806 icmd
->ic_subfunc
= htole16(arg1
);
807 icmd
->ic_channel
= htole32(arg2
);
808 icmd
->ic_bufsize
= htole32(arg3
);
810 htole32(icp
->icp_scr_seg
[0].ds_addr
);
812 bus_dmamap_sync(icp
->icp_dmat
,
813 icp
->icp_scr_dmamap
, 0, arg3
,
814 BUS_DMASYNC_PREWRITE
|
815 BUS_DMASYNC_PREREAD
);
817 cc
= &ic
->ic_cmd
.cmd_packet
.cc
;
818 cc
->cc_deviceno
= htole16(arg1
);
819 cc
->cc_blockno
= htole32(arg2
);
823 case ICP_SCSIRAWSERVICE
:
824 rc
= &ic
->ic_cmd
.cmd_packet
.rc
;
825 rc
->rc_direction
= htole32(arg1
);
827 rc
->rc_target
= arg3
;
828 rc
->rc_lun
= arg3
>> 8;
832 ic
->ic_service
= service
;
833 ic
->ic_cmdlen
= sizeof(ic
->ic_cmd
);
834 rv
= icp_ccb_poll(icp
, ic
, 10000);
837 case ICP_CACHESERVICE
:
838 if (opcode
== ICP_IOCTL
) {
839 bus_dmamap_sync(icp
->icp_dmat
,
840 icp
->icp_scr_dmamap
, 0, arg3
,
841 BUS_DMASYNC_POSTWRITE
|
842 BUS_DMASYNC_POSTREAD
);
847 icp_ccb_free(icp
, ic
);
848 } while (rv
!= 0 && --retries
> 0);
850 return (icp
->icp_status
== ICP_S_OK
);
854 icp_ucmd(struct icp_softc
*icp
, gdt_ucmd_t
*ucmd
)
857 struct icp_ucmd_ctx iu
;
861 if (ucmd
->service
== ICP_CACHESERVICE
) {
862 if (ucmd
->command
.cmd_opcode
== ICP_IOCTL
) {
863 cnt
= ucmd
->command
.cmd_packet
.ic
.ic_bufsize
;
864 if (cnt
> GDT_SCRATCH_SZ
) {
865 aprint_error_dev(&icp
->icp_dv
, "scratch buffer too small (%d/%d)\n",
866 GDT_SCRATCH_SZ
, cnt
);
870 cnt
= ucmd
->command
.cmd_packet
.cc
.cc_blockcnt
*
872 if (cnt
> GDT_SCRATCH_SZ
) {
873 aprint_error_dev(&icp
->icp_dv
, "scratch buffer too small (%d/%d)\n",
874 GDT_SCRATCH_SZ
, cnt
);
879 cnt
= ucmd
->command
.cmd_packet
.rc
.rc_sdlen
+
880 ucmd
->command
.cmd_packet
.rc
.rc_sense_len
;
881 if (cnt
> GDT_SCRATCH_SZ
) {
882 aprint_error_dev(&icp
->icp_dv
, "scratch buffer too small (%d/%d)\n",
883 GDT_SCRATCH_SZ
, cnt
);
891 ic
= icp_ccb_alloc_wait(icp
);
892 memset(&ic
->ic_cmd
, 0, sizeof(ic
->ic_cmd
));
893 ic
->ic_cmd
.cmd_opcode
= htole16(ucmd
->command
.cmd_opcode
);
895 if (ucmd
->service
== ICP_CACHESERVICE
) {
896 if (ucmd
->command
.cmd_opcode
== ICP_IOCTL
) {
897 struct icp_ioctlcmd
*icmd
, *uicmd
;
899 icmd
= &ic
->ic_cmd
.cmd_packet
.ic
;
900 uicmd
= &ucmd
->command
.cmd_packet
.ic
;
902 icmd
->ic_subfunc
= htole16(uicmd
->ic_subfunc
);
903 icmd
->ic_channel
= htole32(uicmd
->ic_channel
);
904 icmd
->ic_bufsize
= htole32(uicmd
->ic_bufsize
);
906 htole32(icp
->icp_scr_seg
[0].ds_addr
+
909 struct icp_cachecmd
*cc
, *ucc
;
911 cc
= &ic
->ic_cmd
.cmd_packet
.cc
;
912 ucc
= &ucmd
->command
.cmd_packet
.cc
;
914 cc
->cc_deviceno
= htole16(ucc
->cc_deviceno
);
915 cc
->cc_blockno
= htole32(ucc
->cc_blockno
);
916 cc
->cc_blockcnt
= htole32(ucc
->cc_blockcnt
);
917 cc
->cc_addr
= htole32(0xffffffffU
);
918 cc
->cc_nsgent
= htole32(1);
919 cc
->cc_sg
[0].sg_addr
=
920 htole32(icp
->icp_scr_seg
[0].ds_addr
+
922 cc
->cc_sg
[0].sg_len
= htole32(cnt
);
925 struct icp_rawcmd
*rc
, *urc
;
927 rc
= &ic
->ic_cmd
.cmd_packet
.rc
;
928 urc
= &ucmd
->command
.cmd_packet
.rc
;
930 rc
->rc_direction
= htole32(urc
->rc_direction
);
931 rc
->rc_sdata
= htole32(0xffffffffU
);
932 rc
->rc_sdlen
= htole32(urc
->rc_sdlen
);
933 rc
->rc_clen
= htole32(urc
->rc_clen
);
934 memcpy(rc
->rc_cdb
, urc
->rc_cdb
, sizeof(rc
->rc_cdb
));
935 rc
->rc_target
= urc
->rc_target
;
936 rc
->rc_lun
= urc
->rc_lun
;
937 rc
->rc_bus
= urc
->rc_bus
;
938 rc
->rc_sense_len
= htole32(urc
->rc_sense_len
);
940 htole32(icp
->icp_scr_seg
[0].ds_addr
+
941 ICP_SCRATCH_UCMD
+ urc
->rc_sdlen
);
942 rc
->rc_nsgent
= htole32(1);
943 rc
->rc_sg
[0].sg_addr
=
944 htole32(icp
->icp_scr_seg
[0].ds_addr
+ ICP_SCRATCH_UCMD
);
945 rc
->rc_sg
[0].sg_len
= htole32(cnt
- urc
->rc_sense_len
);
948 ic
->ic_service
= ucmd
->service
;
949 ic
->ic_cmdlen
= sizeof(ic
->ic_cmd
);
950 ic
->ic_context
= &iu
;
953 * XXX What units are ucmd->timeout in? Until we know, we
954 * XXX just pull a number out of thin air.
956 if (__predict_false((error
= icp_ccb_wait_user(icp
, ic
, 30000)) != 0))
957 aprint_error_dev(&icp
->icp_dv
, "error %d waiting for ucmd to complete\n",
960 /* icp_ucmd_intr() has updated ucmd. */
961 icp_ccb_free(icp
, ic
);
967 icp_ccb_alloc(struct icp_softc
*icp
)
973 if (__predict_false((ic
=
974 SIMPLEQ_FIRST(&icp
->icp_ccb_freelist
)) == NULL
)) {
978 SIMPLEQ_REMOVE_HEAD(&icp
->icp_ccb_freelist
, ic_chain
);
981 ic
->ic_flags
= IC_ALLOCED
;
986 icp_ccb_alloc_wait(struct icp_softc
*icp
)
992 while ((ic
= SIMPLEQ_FIRST(&icp
->icp_ccb_freelist
)) == NULL
) {
993 icp
->icp_flags
|= ICP_F_WAIT_CCB
;
994 (void) tsleep(&icp
->icp_ccb_freelist
, PRIBIO
, "icpccb", 0);
996 SIMPLEQ_REMOVE_HEAD(&icp
->icp_ccb_freelist
, ic_chain
);
999 ic
->ic_flags
= IC_ALLOCED
;
1004 icp_ccb_free(struct icp_softc
*icp
, struct icp_ccb
*ic
)
1011 SIMPLEQ_INSERT_HEAD(&icp
->icp_ccb_freelist
, ic
, ic_chain
);
1012 if (__predict_false((icp
->icp_flags
& ICP_F_WAIT_CCB
) != 0)) {
1013 icp
->icp_flags
&= ~ICP_F_WAIT_CCB
;
1014 wakeup(&icp
->icp_ccb_freelist
);
1020 icp_ccb_enqueue(struct icp_softc
*icp
, struct icp_ccb
*ic
)
1027 if (__predict_false((ic
->ic_flags
& IC_UCMD
) != 0))
1028 SIMPLEQ_INSERT_TAIL(&icp
->icp_ucmd_queue
, ic
, ic_chain
);
1030 SIMPLEQ_INSERT_TAIL(&icp
->icp_ccb_queue
, ic
, ic_chain
);
1033 for (; icp
->icp_qfreeze
== 0;) {
1034 if (__predict_false((ic
=
1035 SIMPLEQ_FIRST(&icp
->icp_ucmd_queue
)) != NULL
)) {
1036 struct icp_ucmd_ctx
*iu
= ic
->ic_context
;
1037 gdt_ucmd_t
*ucmd
= iu
->iu_ucmd
;
1040 * All user-generated commands share the same
1041 * scratch space, so if one is already running,
1042 * we have to stall the command queue.
1044 if (icp
->icp_ucmd_ccb
!= NULL
)
1046 if ((*icp
->icp_test_busy
)(icp
))
1048 icp
->icp_ucmd_ccb
= ic
;
1050 if (iu
->iu_cnt
!= 0) {
1051 memcpy((char *)icp
->icp_scr
+ ICP_SCRATCH_UCMD
,
1052 ucmd
->data
, iu
->iu_cnt
);
1053 bus_dmamap_sync(icp
->icp_dmat
,
1054 icp
->icp_scr_dmamap
,
1055 ICP_SCRATCH_UCMD
, iu
->iu_cnt
,
1056 BUS_DMASYNC_PREREAD
|
1057 BUS_DMASYNC_PREWRITE
);
1059 } else if (__predict_true((ic
=
1060 SIMPLEQ_FIRST(&icp
->icp_ccb_queue
)) != NULL
)) {
1061 if ((*icp
->icp_test_busy
)(icp
))
1064 /* no command found */
1067 icp_ccb_submit(icp
, ic
);
1068 if (__predict_false((ic
->ic_flags
& IC_UCMD
) != 0))
1069 SIMPLEQ_REMOVE_HEAD(&icp
->icp_ucmd_queue
, ic_chain
);
1071 SIMPLEQ_REMOVE_HEAD(&icp
->icp_ccb_queue
, ic_chain
);
1078 icp_ccb_map(struct icp_softc
*icp
, struct icp_ccb
*ic
, void *data
, int size
,
1085 xfer
= ic
->ic_xfer_map
;
1087 rv
= bus_dmamap_load(icp
->icp_dmat
, xfer
, data
, size
, NULL
,
1088 BUS_DMA_NOWAIT
| BUS_DMA_STREAMING
|
1089 ((dir
& IC_XFER_IN
) ? BUS_DMA_READ
: BUS_DMA_WRITE
));
1093 nsegs
= xfer
->dm_nsegs
;
1094 ic
->ic_xfer_size
= size
;
1095 ic
->ic_nsgent
= nsegs
;
1096 ic
->ic_flags
|= dir
;
1100 for (i
= 0; i
< nsegs
; i
++, sg
++) {
1101 sg
->sg_addr
= htole32(xfer
->dm_segs
[i
].ds_addr
);
1102 sg
->sg_len
= htole32(xfer
->dm_segs
[i
].ds_len
);
1104 } else if (nsegs
> 1)
1105 panic("icp_ccb_map: no SG list specified, but nsegs > 1");
1107 if ((dir
& IC_XFER_OUT
) != 0)
1108 i
= BUS_DMASYNC_PREWRITE
;
1109 else /* if ((dir & IC_XFER_IN) != 0) */
1110 i
= BUS_DMASYNC_PREREAD
;
1112 bus_dmamap_sync(icp
->icp_dmat
, xfer
, 0, ic
->ic_xfer_size
, i
);
1117 icp_ccb_unmap(struct icp_softc
*icp
, struct icp_ccb
*ic
)
1121 if ((ic
->ic_flags
& IC_XFER_OUT
) != 0)
1122 i
= BUS_DMASYNC_POSTWRITE
;
1123 else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
1124 i
= BUS_DMASYNC_POSTREAD
;
1126 bus_dmamap_sync(icp
->icp_dmat
, ic
->ic_xfer_map
, 0, ic
->ic_xfer_size
, i
);
1127 bus_dmamap_unload(icp
->icp_dmat
, ic
->ic_xfer_map
);
1131 icp_ccb_poll(struct icp_softc
*icp
, struct icp_ccb
*ic
, int timo
)
1137 for (timo
= ICP_BUSY_WAIT_MS
* 100; timo
!= 0; timo
--) {
1138 if (!(*icp
->icp_test_busy
)(icp
))
1143 printf("%s: submit: busy\n", device_xname(&icp
->icp_dv
));
1147 icp_ccb_submit(icp
, ic
);
1150 for (timo
*= 10; timo
!= 0; timo
--) {
1153 if ((ic
->ic_flags
& IC_COMPLETE
) != 0)
1157 ic
->ic_flags
|= IC_WAITING
;
1158 while ((ic
->ic_flags
& IC_COMPLETE
) == 0) {
1159 if ((rv
= tsleep(ic
, PRIBIO
, "icpwccb",
1160 mstohz(timo
))) != 0) {
1168 if (ic
->ic_status
!= ICP_S_OK
) {
1170 printf("%s: request failed; status=0x%04x\n",
1171 device_xname(&icp
->icp_dv
), ic
->ic_status
);
1177 aprint_error_dev(&icp
->icp_dv
, "command timed out\n");
1181 while ((*icp
->icp_test_busy
)(icp
) != 0)
1190 icp_ccb_wait(struct icp_softc
*icp
, struct icp_ccb
*ic
, int timo
)
1194 ic
->ic_flags
|= IC_WAITING
;
1197 icp_ccb_enqueue(icp
, ic
);
1198 while ((ic
->ic_flags
& IC_COMPLETE
) == 0) {
1199 if ((rv
= tsleep(ic
, PRIBIO
, "icpwccb", mstohz(timo
))) != 0) {
1206 if (ic
->ic_status
!= ICP_S_OK
) {
1207 aprint_error_dev(&icp
->icp_dv
, "command failed; status=%x\n",
1216 icp_ccb_wait_user(struct icp_softc
*icp
, struct icp_ccb
*ic
, int timo
)
1220 ic
->ic_dv
= &icp
->icp_dv
;
1221 ic
->ic_intr
= icp_ucmd_intr
;
1222 ic
->ic_flags
|= IC_UCMD
;
1225 icp_ccb_enqueue(icp
, ic
);
1226 while ((ic
->ic_flags
& IC_COMPLETE
) == 0) {
1227 if ((rv
= tsleep(ic
, PRIBIO
, "icpwuccb", mstohz(timo
))) != 0) {
1238 icp_ccb_submit(struct icp_softc
*icp
, struct icp_ccb
*ic
)
1241 ic
->ic_cmdlen
= (ic
->ic_cmdlen
+ 3) & ~3;
1243 (*icp
->icp_set_sema0
)(icp
);
1246 ic
->ic_cmd
.cmd_boardnode
= htole32(ICP_LOCALBOARD
);
1247 ic
->ic_cmd
.cmd_cmdindex
= htole32(ic
->ic_ident
);
1251 (*icp
->icp_copy_cmd
)(icp
, ic
);
1252 (*icp
->icp_release_event
)(icp
, ic
);
1256 icp_freeze(struct icp_softc
*icp
)
1261 if (icp
->icp_qfreeze
++ == 0) {
1262 while (icp
->icp_running
!= 0) {
1263 icp
->icp_flags
|= ICP_F_WAIT_FREEZE
;
1264 error
= tsleep(&icp
->icp_qfreeze
, PRIBIO
|PCATCH
,
1266 if (error
!= 0 && --icp
->icp_qfreeze
== 0 &&
1267 ICP_HAS_WORK(icp
)) {
1268 icp_ccb_enqueue(icp
, NULL
);
1279 icp_unfreeze(struct icp_softc
*icp
)
1284 KDASSERT(icp
->icp_qfreeze
!= 0);
1285 if (--icp
->icp_qfreeze
== 0 && ICP_HAS_WORK(icp
))
1286 icp_ccb_enqueue(icp
, NULL
);
1290 /* XXX Global - should be per-controller? XXX */
1291 static gdt_evt_str icp_event_buffer
[ICP_MAX_EVENTS
];
1292 static int icp_event_oldidx
;
1293 static int icp_event_lastidx
;
1296 icp_store_event(struct icp_softc
*icp
, u_int16_t source
, u_int16_t idx
,
1301 /* no source == no event */
1305 e
= &icp_event_buffer
[icp_event_lastidx
];
1306 if (e
->event_source
== source
&& e
->event_idx
== idx
&&
1307 ((evt
->size
!= 0 && e
->event_data
.size
!= 0 &&
1308 memcmp(&e
->event_data
.eu
, &evt
->eu
, evt
->size
) == 0) ||
1309 (evt
->size
== 0 && e
->event_data
.size
== 0 &&
1310 strcmp((char *) e
->event_data
.event_string
,
1311 (char *) evt
->event_string
) == 0))) {
1312 e
->last_stamp
= time_second
;
1315 if (icp_event_buffer
[icp_event_lastidx
].event_source
!= 0) {
1316 icp_event_lastidx
++;
1317 if (icp_event_lastidx
== ICP_MAX_EVENTS
)
1318 icp_event_lastidx
= 0;
1319 if (icp_event_lastidx
== icp_event_oldidx
) {
1321 if (icp_event_oldidx
== ICP_MAX_EVENTS
)
1322 icp_event_oldidx
= 0;
1325 e
= &icp_event_buffer
[icp_event_lastidx
];
1326 e
->event_source
= source
;
1328 e
->first_stamp
= e
->last_stamp
= time_second
;
1330 e
->event_data
= *evt
;
1337 icp_read_event(struct icp_softc
*icp
, int handle
, gdt_evt_str
*estr
)
1345 eindex
= icp_event_oldidx
;
1349 estr
->event_source
= 0;
1351 if (eindex
< 0 || eindex
>= ICP_MAX_EVENTS
) {
1356 e
= &icp_event_buffer
[eindex
];
1357 if (e
->event_source
!= 0) {
1358 if (eindex
!= icp_event_lastidx
) {
1360 if (eindex
== ICP_MAX_EVENTS
)
1364 memcpy(estr
, e
, sizeof(gdt_evt_str
));
1373 icp_readapp_event(struct icp_softc
*icp
, u_int8_t application
,
1377 int found
= 0, eindex
, s
;
1381 eindex
= icp_event_oldidx
;
1383 e
= &icp_event_buffer
[eindex
];
1384 if (e
->event_source
== 0)
1386 if ((e
->application
& application
) == 0) {
1387 e
->application
|= application
;
1391 if (eindex
== icp_event_lastidx
)
1394 if (eindex
== ICP_MAX_EVENTS
)
1398 memcpy(estr
, e
, sizeof(gdt_evt_str
));
1400 estr
->event_source
= 0;
1406 icp_clear_events(struct icp_softc
*icp
)
1411 icp_event_oldidx
= icp_event_lastidx
= 0;
1412 memset(icp_event_buffer
, 0, sizeof(icp_event_buffer
));