Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / dev / i2o / iop.c
blob1722fdd1250897b7e1de6a00bb546b7403c10bb6
1 /* $NetBSD: iop.c,v 1.76 2009/05/12 14:23:47 cegger Exp $ */
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Support for I2O IOPs (intelligent I/O processors).
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.76 2009/05/12 14:23:47 cegger Exp $");
39 #include "iop.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
49 #include <sys/endian.h>
50 #include <sys/conf.h>
51 #include <sys/kthread.h>
52 #include <sys/kauth.h>
53 #include <sys/bus.h>
55 #include <uvm/uvm_extern.h>
57 #include <dev/i2o/i2o.h>
58 #include <dev/i2o/iopio.h>
59 #include <dev/i2o/iopreg.h>
60 #include <dev/i2o/iopvar.h>
62 #include "locators.h"
64 #define POLL(ms, cond) \
65 do { \
66 int xi; \
67 for (xi = (ms) * 10; xi; xi--) { \
68 if (cond) \
69 break; \
70 DELAY(100); \
71 } \
72 } while (/* CONSTCOND */0);
74 #ifdef I2ODEBUG
75 #define DPRINTF(x) printf x
76 #else
77 #define DPRINTF(x)
78 #endif
80 #define IOP_ICTXHASH_NBUCKETS 16
81 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
83 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
85 #define IOP_TCTX_SHIFT 12
86 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
88 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
89 static u_long iop_ictxhash;
90 static void *iop_sdh;
91 static struct i2o_systab *iop_systab;
92 static int iop_systab_size;
94 extern struct cfdriver iop_cd;
96 dev_type_open(iopopen);
97 dev_type_close(iopclose);
98 dev_type_ioctl(iopioctl);
100 const struct cdevsw iop_cdevsw = {
101 iopopen, iopclose, noread, nowrite, iopioctl,
102 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
105 #define IC_CONFIGURE 0x01
106 #define IC_PRIORITY 0x02
108 static struct iop_class {
109 u_short ic_class;
110 u_short ic_flags;
111 const char *ic_caption;
112 } const iop_class[] = {
114 I2O_CLASS_EXECUTIVE,
116 "executive"
119 I2O_CLASS_DDM,
121 "device driver module"
124 I2O_CLASS_RANDOM_BLOCK_STORAGE,
125 IC_CONFIGURE | IC_PRIORITY,
126 "random block storage"
129 I2O_CLASS_SEQUENTIAL_STORAGE,
130 IC_CONFIGURE | IC_PRIORITY,
131 "sequential storage"
134 I2O_CLASS_LAN,
135 IC_CONFIGURE | IC_PRIORITY,
136 "LAN port"
139 I2O_CLASS_WAN,
140 IC_CONFIGURE | IC_PRIORITY,
141 "WAN port"
144 I2O_CLASS_FIBRE_CHANNEL_PORT,
145 IC_CONFIGURE,
146 "fibrechannel port"
149 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
151 "fibrechannel peripheral"
154 I2O_CLASS_SCSI_PERIPHERAL,
156 "SCSI peripheral"
159 I2O_CLASS_ATE_PORT,
160 IC_CONFIGURE,
161 "ATE port"
164 I2O_CLASS_ATE_PERIPHERAL,
166 "ATE peripheral"
169 I2O_CLASS_FLOPPY_CONTROLLER,
170 IC_CONFIGURE,
171 "floppy controller"
174 I2O_CLASS_FLOPPY_DEVICE,
176 "floppy device"
179 I2O_CLASS_BUS_ADAPTER_PORT,
180 IC_CONFIGURE,
181 "bus adapter port"
185 static const char * const iop_status[] = {
186 "success",
187 "abort (dirty)",
188 "abort (no data transfer)",
189 "abort (partial transfer)",
190 "error (dirty)",
191 "error (no data transfer)",
192 "error (partial transfer)",
193 "undefined error code",
194 "process abort (dirty)",
195 "process abort (no data transfer)",
196 "process abort (partial transfer)",
197 "transaction error",
200 static inline u_int32_t iop_inl(struct iop_softc *, int);
201 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
203 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
204 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
206 static void iop_config_interrupts(device_t);
207 static void iop_configure_devices(struct iop_softc *, int, int);
208 static void iop_devinfo(int, char *, size_t);
209 static int iop_print(void *, const char *);
210 static void iop_shutdown(void *);
212 static void iop_adjqparam(struct iop_softc *, int);
213 static int iop_handle_reply(struct iop_softc *, u_int32_t);
214 static int iop_hrt_get(struct iop_softc *);
215 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
216 static void iop_intr_event(device_t, struct iop_msg *, void *);
217 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
218 u_int32_t);
219 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
220 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
221 static int iop_ofifo_init(struct iop_softc *);
222 static int iop_passthrough(struct iop_softc *, struct ioppt *,
223 struct proc *);
224 static void iop_reconf_thread(void *);
225 static void iop_release_mfa(struct iop_softc *, u_int32_t);
226 static int iop_reset(struct iop_softc *);
227 static int iop_sys_enable(struct iop_softc *);
228 static int iop_systab_set(struct iop_softc *);
229 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
231 #ifdef I2ODEBUG
232 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
233 #endif
235 static inline u_int32_t
236 iop_inl(struct iop_softc *sc, int off)
239 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
240 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
241 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
244 static inline void
245 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
248 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 BUS_SPACE_BARRIER_WRITE);
253 static inline u_int32_t
254 iop_inl_msg(struct iop_softc *sc, int off)
257 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
258 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
259 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
262 static inline void
263 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
266 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
267 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
268 BUS_SPACE_BARRIER_WRITE);
272 * Initialise the IOP and our interface.
274 void
275 iop_init(struct iop_softc *sc, const char *intrstr)
277 struct iop_msg *im;
278 int rv, i, j, state, nsegs;
279 u_int32_t mask;
280 char ident[64];
282 state = 0;
284 printf("I2O adapter");
286 mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
287 mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
288 cv_init(&sc->sc_confcv, "iopconf");
290 if (iop_ictxhashtbl == NULL) {
291 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
292 true, &iop_ictxhash);
295 /* Disable interrupts at the IOP. */
296 mask = iop_inl(sc, IOP_REG_INTR_MASK);
297 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
299 /* Allocate a scratch DMA map for small miscellaneous shared data. */
300 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
301 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
302 aprint_error_dev(&sc->sc_dv, "cannot create scratch dmamap\n");
303 return;
306 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
307 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
308 aprint_error_dev(&sc->sc_dv, "cannot alloc scratch dmamem\n");
309 goto bail_out;
311 state++;
313 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
314 &sc->sc_scr, 0)) {
315 aprint_error_dev(&sc->sc_dv, "cannot map scratch dmamem\n");
316 goto bail_out;
318 state++;
320 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
321 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
322 aprint_error_dev(&sc->sc_dv, "cannot load scratch dmamap\n");
323 goto bail_out;
325 state++;
327 #ifdef I2ODEBUG
328 /* So that our debug checks don't choke. */
329 sc->sc_framesize = 128;
330 #endif
332 /* Avoid syncing the reply map until it's set up. */
333 sc->sc_curib = 0x123;
335 /* Reset the adapter and request status. */
336 if ((rv = iop_reset(sc)) != 0) {
337 aprint_error_dev(&sc->sc_dv, "not responding (reset)\n");
338 goto bail_out;
341 if ((rv = iop_status_get(sc, 1)) != 0) {
342 aprint_error_dev(&sc->sc_dv, "not responding (get status)\n");
343 goto bail_out;
346 sc->sc_flags |= IOP_HAVESTATUS;
347 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
348 ident, sizeof(ident));
349 printf(" <%s>\n", ident);
351 #ifdef I2ODEBUG
352 printf("%s: orgid=0x%04x version=%d\n",
353 device_xname(&sc->sc_dv),
354 le16toh(sc->sc_status.orgid),
355 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
356 printf("%s: type want have cbase\n", device_xname(&sc->sc_dv));
357 printf("%s: mem %04x %04x %08x\n", device_xname(&sc->sc_dv),
358 le32toh(sc->sc_status.desiredprivmemsize),
359 le32toh(sc->sc_status.currentprivmemsize),
360 le32toh(sc->sc_status.currentprivmembase));
361 printf("%s: i/o %04x %04x %08x\n", device_xname(&sc->sc_dv),
362 le32toh(sc->sc_status.desiredpriviosize),
363 le32toh(sc->sc_status.currentpriviosize),
364 le32toh(sc->sc_status.currentpriviobase));
365 #endif
367 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
368 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
369 sc->sc_maxob = IOP_MAX_OUTBOUND;
370 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
371 if (sc->sc_maxib > IOP_MAX_INBOUND)
372 sc->sc_maxib = IOP_MAX_INBOUND;
373 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
374 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
375 sc->sc_framesize = IOP_MAX_MSG_SIZE;
377 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
378 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
379 aprint_error_dev(&sc->sc_dv, "frame size too small (%d)\n",
380 sc->sc_framesize);
381 goto bail_out;
383 #endif
385 /* Allocate message wrappers. */
386 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
387 if (im == NULL) {
388 aprint_error_dev(&sc->sc_dv, "memory allocation failure\n");
389 goto bail_out;
391 state++;
392 sc->sc_ims = im;
393 SLIST_INIT(&sc->sc_im_freelist);
395 for (i = 0; i < sc->sc_maxib; i++, im++) {
396 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
397 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
398 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
399 &im->im_xfer[0].ix_map);
400 if (rv != 0) {
401 aprint_error_dev(&sc->sc_dv, "couldn't create dmamap (%d)", rv);
402 goto bail_out3;
405 im->im_tctx = i;
406 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
407 cv_init(&im->im_cv, "iopmsg");
410 /* Initialise the IOP's outbound FIFO. */
411 if (iop_ofifo_init(sc) != 0) {
412 aprint_error_dev(&sc->sc_dv, "unable to init oubound FIFO\n");
413 goto bail_out3;
417 * Defer further configuration until (a) interrupts are working and
418 * (b) we have enough information to build the system table.
420 config_interrupts((device_t)sc, iop_config_interrupts);
422 /* Configure shutdown hook before we start any device activity. */
423 if (iop_sdh == NULL)
424 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
426 /* Ensure interrupts are enabled at the IOP. */
427 mask = iop_inl(sc, IOP_REG_INTR_MASK);
428 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
430 if (intrstr != NULL)
431 printf("%s: interrupting at %s\n", device_xname(&sc->sc_dv),
432 intrstr);
434 #ifdef I2ODEBUG
435 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
436 device_xname(&sc->sc_dv), sc->sc_maxib,
437 le32toh(sc->sc_status.maxinboundmframes),
438 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
439 #endif
441 return;
443 bail_out3:
444 if (state > 3) {
445 for (j = 0; j < i; j++)
446 bus_dmamap_destroy(sc->sc_dmat,
447 sc->sc_ims[j].im_xfer[0].ix_map);
448 free(sc->sc_ims, M_DEVBUF);
450 bail_out:
451 if (state > 2)
452 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
453 if (state > 1)
454 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
455 if (state > 0)
456 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
457 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
461 * Perform autoconfiguration tasks.
463 static void
464 iop_config_interrupts(device_t self)
466 struct iop_attach_args ia;
467 struct iop_softc *sc, *iop;
468 struct i2o_systab_entry *ste;
469 int rv, i, niop;
470 int locs[IOPCF_NLOCS];
472 sc = device_private(self);
473 mutex_enter(&sc->sc_conflock);
475 LIST_INIT(&sc->sc_iilist);
477 printf("%s: configuring...\n", device_xname(&sc->sc_dv));
479 if (iop_hrt_get(sc) != 0) {
480 printf("%s: unable to retrieve HRT\n", device_xname(&sc->sc_dv));
481 mutex_exit(&sc->sc_conflock);
482 return;
486 * Build the system table.
488 if (iop_systab == NULL) {
489 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
490 if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
491 continue;
492 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
493 continue;
494 if (iop_status_get(iop, 1) != 0) {
495 aprint_error_dev(&sc->sc_dv, "unable to retrieve status\n");
496 iop->sc_flags &= ~IOP_HAVESTATUS;
497 continue;
499 niop++;
501 if (niop == 0) {
502 mutex_exit(&sc->sc_conflock);
503 return;
506 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
507 sizeof(struct i2o_systab);
508 iop_systab_size = i;
509 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
511 iop_systab->numentries = niop;
512 iop_systab->version = I2O_VERSION_11;
514 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
515 if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
516 continue;
517 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
518 continue;
520 ste->orgid = iop->sc_status.orgid;
521 ste->iopid = device_unit(&iop->sc_dv) + 2;
522 ste->segnumber =
523 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
524 ste->iopcaps = iop->sc_status.iopcaps;
525 ste->inboundmsgframesize =
526 iop->sc_status.inboundmframesize;
527 ste->inboundmsgportaddresslow =
528 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
529 ste++;
534 * Post the system table to the IOP and bring it to the OPERATIONAL
535 * state.
537 if (iop_systab_set(sc) != 0) {
538 aprint_error_dev(&sc->sc_dv, "unable to set system table\n");
539 mutex_exit(&sc->sc_conflock);
540 return;
542 if (iop_sys_enable(sc) != 0) {
543 aprint_error_dev(&sc->sc_dv, "unable to enable system\n");
544 mutex_exit(&sc->sc_conflock);
545 return;
549 * Set up an event handler for this IOP.
551 sc->sc_eventii.ii_dv = self;
552 sc->sc_eventii.ii_intr = iop_intr_event;
553 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
554 sc->sc_eventii.ii_tid = I2O_TID_IOP;
555 iop_initiator_register(sc, &sc->sc_eventii);
557 rv = iop_util_eventreg(sc, &sc->sc_eventii,
558 I2O_EVENT_EXEC_RESOURCE_LIMITS |
559 I2O_EVENT_EXEC_CONNECTION_FAIL |
560 I2O_EVENT_EXEC_ADAPTER_FAULT |
561 I2O_EVENT_EXEC_POWER_FAIL |
562 I2O_EVENT_EXEC_RESET_PENDING |
563 I2O_EVENT_EXEC_RESET_IMMINENT |
564 I2O_EVENT_EXEC_HARDWARE_FAIL |
565 I2O_EVENT_EXEC_XCT_CHANGE |
566 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
567 I2O_EVENT_GEN_DEVICE_RESET |
568 I2O_EVENT_GEN_STATE_CHANGE |
569 I2O_EVENT_GEN_GENERAL_WARNING);
570 if (rv != 0) {
571 aprint_error_dev(&sc->sc_dv, "unable to register for events");
572 mutex_exit(&sc->sc_conflock);
573 return;
577 * Attempt to match and attach a product-specific extension.
579 ia.ia_class = I2O_CLASS_ANY;
580 ia.ia_tid = I2O_TID_IOP;
581 locs[IOPCF_TID] = I2O_TID_IOP;
582 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
583 config_stdsubmatch);
586 * Start device configuration.
588 if ((rv = iop_reconfigure(sc, 0)) == -1)
589 aprint_error_dev(&sc->sc_dv, "configure failed (%d)\n", rv);
592 sc->sc_flags |= IOP_ONLINE;
593 rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
594 &sc->sc_reconf_thread, "%s", device_xname(&sc->sc_dv));
595 mutex_exit(&sc->sc_conflock);
596 if (rv != 0) {
597 aprint_error_dev(&sc->sc_dv, "unable to create reconfiguration thread (%d)", rv);
598 return;
603 * Reconfiguration thread; listens for LCT change notification, and
604 * initiates re-configuration if received.
606 static void
607 iop_reconf_thread(void *cookie)
609 struct iop_softc *sc;
610 struct lwp *l;
611 struct i2o_lct lct;
612 u_int32_t chgind;
613 int rv;
615 sc = cookie;
616 chgind = sc->sc_chgind + 1;
617 l = curlwp;
619 for (;;) {
620 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
621 device_xname(&sc->sc_dv), chgind));
623 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
625 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
626 device_xname(&sc->sc_dv), le32toh(lct.changeindicator), rv));
628 mutex_enter(&sc->sc_conflock);
629 if (rv == 0) {
630 iop_reconfigure(sc, le32toh(lct.changeindicator));
631 chgind = sc->sc_chgind + 1;
633 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
634 mutex_exit(&sc->sc_conflock);
639 * Reconfigure: find new and removed devices.
642 iop_reconfigure(struct iop_softc *sc, u_int chgind)
644 struct iop_msg *im;
645 struct i2o_hba_bus_scan mf;
646 struct i2o_lct_entry *le;
647 struct iop_initiator *ii, *nextii;
648 int rv, tid, i;
650 KASSERT(mutex_owned(&sc->sc_conflock));
653 * If the reconfiguration request isn't the result of LCT change
654 * notification, then be more thorough: ask all bus ports to scan
655 * their busses. Wait up to 5 minutes for each bus port to complete
656 * the request.
658 if (chgind == 0) {
659 if ((rv = iop_lct_get(sc)) != 0) {
660 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
661 return (rv);
664 le = sc->sc_lct->entry;
665 for (i = 0; i < sc->sc_nlctent; i++, le++) {
666 if ((le16toh(le->classid) & 4095) !=
667 I2O_CLASS_BUS_ADAPTER_PORT)
668 continue;
669 tid = le16toh(le->localtid) & 4095;
671 im = iop_msg_alloc(sc, IM_WAIT);
673 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
674 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
675 mf.msgictx = IOP_ICTX;
676 mf.msgtctx = im->im_tctx;
678 DPRINTF(("%s: scanning bus %d\n", device_xname(&sc->sc_dv),
679 tid));
681 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
682 iop_msg_free(sc, im);
683 #ifdef I2ODEBUG
684 if (rv != 0)
685 aprint_error_dev(&sc->sc_dv, "bus scan failed\n");
686 #endif
688 } else if (chgind <= sc->sc_chgind) {
689 DPRINTF(("%s: LCT unchanged (async)\n", device_xname(&sc->sc_dv)));
690 return (0);
693 /* Re-read the LCT and determine if it has changed. */
694 if ((rv = iop_lct_get(sc)) != 0) {
695 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
696 return (rv);
698 DPRINTF(("%s: %d LCT entries\n", device_xname(&sc->sc_dv), sc->sc_nlctent));
700 chgind = le32toh(sc->sc_lct->changeindicator);
701 if (chgind == sc->sc_chgind) {
702 DPRINTF(("%s: LCT unchanged\n", device_xname(&sc->sc_dv)));
703 return (0);
705 DPRINTF(("%s: LCT changed\n", device_xname(&sc->sc_dv)));
706 sc->sc_chgind = chgind;
708 if (sc->sc_tidmap != NULL)
709 free(sc->sc_tidmap, M_DEVBUF);
710 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
711 M_DEVBUF, M_NOWAIT|M_ZERO);
713 /* Allow 1 queued command per device while we're configuring. */
714 iop_adjqparam(sc, 1);
717 * Match and attach child devices. We configure high-level devices
718 * first so that any claims will propagate throughout the LCT,
719 * hopefully masking off aliased devices as a result.
721 * Re-reading the LCT at this point is a little dangerous, but we'll
722 * trust the IOP (and the operator) to behave itself...
724 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
725 IC_CONFIGURE | IC_PRIORITY);
726 if ((rv = iop_lct_get(sc)) != 0) {
727 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
729 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
730 IC_CONFIGURE);
732 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
733 nextii = LIST_NEXT(ii, ii_list);
735 /* Detach devices that were configured, but are now gone. */
736 for (i = 0; i < sc->sc_nlctent; i++)
737 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
738 break;
739 if (i == sc->sc_nlctent ||
740 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
741 config_detach(ii->ii_dv, DETACH_FORCE);
742 continue;
746 * Tell initiators that existed before the re-configuration
747 * to re-configure.
749 if (ii->ii_reconfig == NULL)
750 continue;
751 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
752 aprint_error_dev(&sc->sc_dv, "%s failed reconfigure (%d)\n",
753 device_xname(ii->ii_dv), rv);
756 /* Re-adjust queue parameters and return. */
757 if (sc->sc_nii != 0)
758 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
759 / sc->sc_nii);
761 return (0);
765 * Configure I2O devices into the system.
767 static void
768 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
770 struct iop_attach_args ia;
771 struct iop_initiator *ii;
772 const struct i2o_lct_entry *le;
773 device_t dv;
774 int i, j, nent;
775 u_int usertid;
776 int locs[IOPCF_NLOCS];
778 nent = sc->sc_nlctent;
779 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
780 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
782 /* Ignore the device if it's in use. */
783 usertid = le32toh(le->usertid) & 4095;
784 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
785 continue;
787 ia.ia_class = le16toh(le->classid) & 4095;
788 ia.ia_tid = sc->sc_tidmap[i].it_tid;
790 /* Ignore uninteresting devices. */
791 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
792 if (iop_class[j].ic_class == ia.ia_class)
793 break;
794 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
795 (iop_class[j].ic_flags & mask) != maskval)
796 continue;
799 * Try to configure the device only if it's not already
800 * configured.
802 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
803 if (ia.ia_tid == ii->ii_tid) {
804 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
805 strcpy(sc->sc_tidmap[i].it_dvname,
806 device_xname(ii->ii_dv));
807 break;
810 if (ii != NULL)
811 continue;
813 locs[IOPCF_TID] = ia.ia_tid;
815 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
816 iop_print, config_stdsubmatch);
817 if (dv != NULL) {
818 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
819 strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
825 * Adjust queue parameters for all child devices.
827 static void
828 iop_adjqparam(struct iop_softc *sc, int mpi)
830 struct iop_initiator *ii;
832 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
833 if (ii->ii_adjqparam != NULL)
834 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
837 static void
838 iop_devinfo(int class, char *devinfo, size_t l)
840 int i;
842 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
843 if (class == iop_class[i].ic_class)
844 break;
846 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
847 snprintf(devinfo, l, "device (class 0x%x)", class);
848 else
849 strlcpy(devinfo, iop_class[i].ic_caption, l);
852 static int
853 iop_print(void *aux, const char *pnp)
855 struct iop_attach_args *ia;
856 char devinfo[256];
858 ia = aux;
860 if (pnp != NULL) {
861 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
862 aprint_normal("%s at %s", devinfo, pnp);
864 aprint_normal(" tid %d", ia->ia_tid);
865 return (UNCONF);
869 * Shut down all configured IOPs.
871 static void
872 iop_shutdown(void *junk)
874 struct iop_softc *sc;
875 int i;
877 printf("shutting down iop devices...");
879 for (i = 0; i < iop_cd.cd_ndevs; i++) {
880 if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
881 continue;
882 if ((sc->sc_flags & IOP_ONLINE) == 0)
883 continue;
885 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
886 0, 5000);
888 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
890 * Some AMI firmware revisions will go to sleep and
891 * never come back after this.
893 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
894 IOP_ICTX, 0, 1000);
898 /* Wait. Some boards could still be flushing, stupidly enough. */
899 delay(5000*1000);
900 printf(" done\n");
904 * Retrieve IOP status.
907 iop_status_get(struct iop_softc *sc, int nosleep)
909 struct i2o_exec_status_get mf;
910 struct i2o_status *st;
911 paddr_t pa;
912 int rv, i;
914 pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
915 st = (struct i2o_status *)sc->sc_scr;
917 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
918 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
919 mf.reserved[0] = 0;
920 mf.reserved[1] = 0;
921 mf.reserved[2] = 0;
922 mf.reserved[3] = 0;
923 mf.addrlow = (u_int32_t)pa;
924 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
925 mf.length = sizeof(sc->sc_status);
927 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
928 BUS_DMASYNC_PREWRITE);
929 memset(st, 0, sizeof(*st));
930 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
931 BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE);
933 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
934 return (rv);
936 for (i = 100; i != 0; i--) {
937 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
938 sizeof(*st), BUS_DMASYNC_POSTREAD);
939 if (st->syncbyte == 0xff)
940 break;
941 if (nosleep)
942 DELAY(100*1000);
943 else
944 kpause("iopstat", false, hz / 10, NULL);
947 if (st->syncbyte != 0xff) {
948 aprint_error_dev(&sc->sc_dv, "STATUS_GET timed out\n");
949 rv = EIO;
950 } else {
951 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
952 rv = 0;
955 return (rv);
959 * Initialize and populate the IOP's outbound FIFO.
961 static int
962 iop_ofifo_init(struct iop_softc *sc)
964 bus_addr_t addr;
965 bus_dma_segment_t seg;
966 struct i2o_exec_outbound_init *mf;
967 int i, rseg, rv;
968 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
970 sw = (u_int32_t *)sc->sc_scr;
972 mf = (struct i2o_exec_outbound_init *)mb;
973 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
974 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
975 mf->msgictx = IOP_ICTX;
976 mf->msgtctx = 0;
977 mf->pagesize = PAGE_SIZE;
978 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
981 * The I2O spec says that there are two SGLs: one for the status
982 * word, and one for a list of discarded MFAs. It continues to say
983 * that if you don't want to get the list of MFAs, an IGNORE SGL is
984 * necessary; this isn't the case (and is in fact a bad thing).
986 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
987 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
988 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
989 (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
990 mb[0] += 2 << 16;
992 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
993 BUS_DMASYNC_PREWRITE);
994 *sw = 0;
995 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
996 BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE);
998 if ((rv = iop_post(sc, mb)) != 0)
999 return (rv);
1001 POLL(5000,
1002 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1003 BUS_DMASYNC_POSTREAD),
1004 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1006 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1007 aprint_error_dev(&sc->sc_dv, "outbound FIFO init failed (%d)\n",
1008 le32toh(*sw));
1009 return (EIO);
1012 /* Allocate DMA safe memory for the reply frames. */
1013 if (sc->sc_rep_phys == 0) {
1014 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1016 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1017 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1018 if (rv != 0) {
1019 aprint_error_dev(&sc->sc_dv, "DMA alloc = %d\n",
1020 rv);
1021 return (rv);
1024 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1025 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1026 if (rv != 0) {
1027 aprint_error_dev(&sc->sc_dv, "DMA map = %d\n", rv);
1028 return (rv);
1031 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1032 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1033 if (rv != 0) {
1034 aprint_error_dev(&sc->sc_dv, "DMA create = %d\n", rv);
1035 return (rv);
1038 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1039 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1040 if (rv != 0) {
1041 aprint_error_dev(&sc->sc_dv, "DMA load = %d\n", rv);
1042 return (rv);
1045 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1047 /* Now safe to sync the reply map. */
1048 sc->sc_curib = 0;
1051 /* Populate the outbound FIFO. */
1052 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1053 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1054 addr += sc->sc_framesize;
1057 return (0);
1061 * Read the specified number of bytes from the IOP's hardware resource table.
1063 static int
1064 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1066 struct iop_msg *im;
1067 int rv;
1068 struct i2o_exec_hrt_get *mf;
1069 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1071 im = iop_msg_alloc(sc, IM_WAIT);
1072 mf = (struct i2o_exec_hrt_get *)mb;
1073 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1074 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1075 mf->msgictx = IOP_ICTX;
1076 mf->msgtctx = im->im_tctx;
1078 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1079 rv = iop_msg_post(sc, im, mb, 30000);
1080 iop_msg_unmap(sc, im);
1081 iop_msg_free(sc, im);
1082 return (rv);
1086 * Read the IOP's hardware resource table.
1088 static int
1089 iop_hrt_get(struct iop_softc *sc)
1091 struct i2o_hrt hrthdr, *hrt;
1092 int size, rv;
1094 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1095 if (rv != 0)
1096 return (rv);
1098 DPRINTF(("%s: %d hrt entries\n", device_xname(&sc->sc_dv),
1099 le16toh(hrthdr.numentries)));
1101 size = sizeof(struct i2o_hrt) +
1102 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1103 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1105 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1106 free(hrt, M_DEVBUF);
1107 return (rv);
1110 if (sc->sc_hrt != NULL)
1111 free(sc->sc_hrt, M_DEVBUF);
1112 sc->sc_hrt = hrt;
1113 return (0);
1117 * Request the specified number of bytes from the IOP's logical
1118 * configuration table. If a change indicator is specified, this
1119 * is a verbatim notification request, so the caller is prepared
1120 * to wait indefinitely.
1122 static int
1123 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1124 u_int32_t chgind)
1126 struct iop_msg *im;
1127 struct i2o_exec_lct_notify *mf;
1128 int rv;
1129 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1131 im = iop_msg_alloc(sc, IM_WAIT);
1132 memset(lct, 0, size);
1134 mf = (struct i2o_exec_lct_notify *)mb;
1135 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1136 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1137 mf->msgictx = IOP_ICTX;
1138 mf->msgtctx = im->im_tctx;
1139 mf->classid = I2O_CLASS_ANY;
1140 mf->changeindicator = chgind;
1142 #ifdef I2ODEBUG
1143 printf("iop_lct_get0: reading LCT");
1144 if (chgind != 0)
1145 printf(" (async)");
1146 printf("\n");
1147 #endif
1149 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1150 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1151 iop_msg_unmap(sc, im);
1152 iop_msg_free(sc, im);
1153 return (rv);
1157 * Read the IOP's logical configuration table.
1160 iop_lct_get(struct iop_softc *sc)
1162 int esize, size, rv;
1163 struct i2o_lct *lct;
1165 esize = le32toh(sc->sc_status.expectedlctsize);
1166 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1167 if (lct == NULL)
1168 return (ENOMEM);
1170 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1171 free(lct, M_DEVBUF);
1172 return (rv);
1175 size = le16toh(lct->tablesize) << 2;
1176 if (esize != size) {
1177 free(lct, M_DEVBUF);
1178 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1179 if (lct == NULL)
1180 return (ENOMEM);
1182 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1183 free(lct, M_DEVBUF);
1184 return (rv);
1188 /* Swap in the new LCT. */
1189 if (sc->sc_lct != NULL)
1190 free(sc->sc_lct, M_DEVBUF);
1191 sc->sc_lct = lct;
1192 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1193 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1194 sizeof(struct i2o_lct_entry);
1195 return (0);
1199 * Post a SYS_ENABLE message to the adapter.
1202 iop_sys_enable(struct iop_softc *sc)
1204 struct iop_msg *im;
1205 struct i2o_msg mf;
1206 int rv;
1208 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1210 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1211 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1212 mf.msgictx = IOP_ICTX;
1213 mf.msgtctx = im->im_tctx;
1215 rv = iop_msg_post(sc, im, &mf, 30000);
1216 if (rv == 0) {
1217 if ((im->im_flags & IM_FAIL) != 0)
1218 rv = ENXIO;
1219 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1220 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1221 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1222 rv = 0;
1223 else
1224 rv = EIO;
1227 iop_msg_free(sc, im);
1228 return (rv);
1232 * Request the specified parameter group from the target. If an initiator
1233 * is specified (a) don't wait for the operation to complete, but instead
1234 * let the initiator's interrupt handler deal with the reply and (b) place a
1235 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1238 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1239 int size, struct iop_initiator *ii)
1241 struct iop_msg *im;
1242 struct i2o_util_params_op *mf;
1243 int rv;
1244 struct iop_pgop *pgop;
1245 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1247 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1248 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1249 iop_msg_free(sc, im);
1250 return (ENOMEM);
1252 im->im_dvcontext = pgop;
1254 mf = (struct i2o_util_params_op *)mb;
1255 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1256 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1257 mf->msgictx = IOP_ICTX;
1258 mf->msgtctx = im->im_tctx;
1259 mf->flags = 0;
1261 pgop->olh.count = htole16(1);
1262 pgop->olh.reserved = htole16(0);
1263 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1264 pgop->oat.fieldcount = htole16(0xffff);
1265 pgop->oat.group = htole16(group);
1267 memset(buf, 0, size);
1268 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1269 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1270 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1272 /* Detect errors; let partial transfers to count as success. */
1273 if (ii == NULL && rv == 0) {
1274 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1275 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1276 rv = 0;
1277 else
1278 rv = (im->im_reqstatus != 0 ? EIO : 0);
1280 if (rv != 0)
1281 printf("%s: FIELD_GET failed for tid %d group %d\n",
1282 device_xname(&sc->sc_dv), tid, group);
1285 if (ii == NULL || rv != 0) {
1286 iop_msg_unmap(sc, im);
1287 iop_msg_free(sc, im);
1288 free(pgop, M_DEVBUF);
1291 return (rv);
1295 * Set a single field in a scalar parameter group.
1298 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1299 int size, int field)
1301 struct iop_msg *im;
1302 struct i2o_util_params_op *mf;
1303 struct iop_pgop *pgop;
1304 int rv, totsize;
1305 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1307 totsize = sizeof(*pgop) + size;
1309 im = iop_msg_alloc(sc, IM_WAIT);
1310 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1311 iop_msg_free(sc, im);
1312 return (ENOMEM);
1315 mf = (struct i2o_util_params_op *)mb;
1316 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1317 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1318 mf->msgictx = IOP_ICTX;
1319 mf->msgtctx = im->im_tctx;
1320 mf->flags = 0;
1322 pgop->olh.count = htole16(1);
1323 pgop->olh.reserved = htole16(0);
1324 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1325 pgop->oat.fieldcount = htole16(1);
1326 pgop->oat.group = htole16(group);
1327 pgop->oat.fields[0] = htole16(field);
1328 memcpy(pgop + 1, buf, size);
1330 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1331 rv = iop_msg_post(sc, im, mb, 30000);
1332 if (rv != 0)
1333 aprint_error_dev(&sc->sc_dv, "FIELD_SET failed for tid %d group %d\n",
1334 tid, group);
1336 iop_msg_unmap(sc, im);
1337 iop_msg_free(sc, im);
1338 free(pgop, M_DEVBUF);
1339 return (rv);
1343 * Delete all rows in a tablular parameter group.
1346 iop_table_clear(struct iop_softc *sc, int tid, int group)
1348 struct iop_msg *im;
1349 struct i2o_util_params_op *mf;
1350 struct iop_pgop pgop;
1351 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1352 int rv;
1354 im = iop_msg_alloc(sc, IM_WAIT);
1356 mf = (struct i2o_util_params_op *)mb;
1357 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1358 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1359 mf->msgictx = IOP_ICTX;
1360 mf->msgtctx = im->im_tctx;
1361 mf->flags = 0;
1363 pgop.olh.count = htole16(1);
1364 pgop.olh.reserved = htole16(0);
1365 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1366 pgop.oat.fieldcount = htole16(0);
1367 pgop.oat.group = htole16(group);
1368 pgop.oat.fields[0] = htole16(0);
1370 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1371 rv = iop_msg_post(sc, im, mb, 30000);
1372 if (rv != 0)
1373 aprint_error_dev(&sc->sc_dv, "TABLE_CLEAR failed for tid %d group %d\n",
1374 tid, group);
1376 iop_msg_unmap(sc, im);
1377 iop_msg_free(sc, im);
1378 return (rv);
1382 * Add a single row to a tabular parameter group. The row can have only one
1383 * field.
1386 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1387 int size, int row)
1389 struct iop_msg *im;
1390 struct i2o_util_params_op *mf;
1391 struct iop_pgop *pgop;
1392 int rv, totsize;
1393 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1395 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1397 im = iop_msg_alloc(sc, IM_WAIT);
1398 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1399 iop_msg_free(sc, im);
1400 return (ENOMEM);
1403 mf = (struct i2o_util_params_op *)mb;
1404 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1405 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1406 mf->msgictx = IOP_ICTX;
1407 mf->msgtctx = im->im_tctx;
1408 mf->flags = 0;
1410 pgop->olh.count = htole16(1);
1411 pgop->olh.reserved = htole16(0);
1412 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1413 pgop->oat.fieldcount = htole16(1);
1414 pgop->oat.group = htole16(group);
1415 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1416 pgop->oat.fields[1] = htole16(1); /* RowCount */
1417 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1418 memcpy(&pgop->oat.fields[3], buf, size);
1420 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1421 rv = iop_msg_post(sc, im, mb, 30000);
1422 if (rv != 0)
1423 aprint_error_dev(&sc->sc_dv, "ADD_ROW failed for tid %d group %d row %d\n",
1424 tid, group, row);
1426 iop_msg_unmap(sc, im);
1427 iop_msg_free(sc, im);
1428 free(pgop, M_DEVBUF);
1429 return (rv);
1433 * Execute a simple command (no parameters).
1436 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1437 int async, int timo)
1439 struct iop_msg *im;
1440 struct i2o_msg mf;
1441 int rv, fl;
1443 fl = (async != 0 ? IM_WAIT : IM_POLL);
1444 im = iop_msg_alloc(sc, fl);
1446 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1447 mf.msgfunc = I2O_MSGFUNC(tid, function);
1448 mf.msgictx = ictx;
1449 mf.msgtctx = im->im_tctx;
1451 rv = iop_msg_post(sc, im, &mf, timo);
1452 iop_msg_free(sc, im);
1453 return (rv);
1457 * Post the system table to the IOP.
1459 static int
1460 iop_systab_set(struct iop_softc *sc)
1462 struct i2o_exec_sys_tab_set *mf;
1463 struct iop_msg *im;
1464 bus_space_handle_t bsh;
1465 bus_addr_t boo;
1466 u_int32_t mema[2], ioa[2];
1467 int rv;
1468 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1470 im = iop_msg_alloc(sc, IM_WAIT);
1472 mf = (struct i2o_exec_sys_tab_set *)mb;
1473 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1474 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1475 mf->msgictx = IOP_ICTX;
1476 mf->msgtctx = im->im_tctx;
1477 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1478 mf->segnumber = 0;
1480 mema[1] = sc->sc_status.desiredprivmemsize;
1481 ioa[1] = sc->sc_status.desiredpriviosize;
1483 if (mema[1] != 0) {
1484 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1485 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1486 mema[0] = htole32(boo);
1487 if (rv != 0) {
1488 aprint_error_dev(&sc->sc_dv, "can't alloc priv mem space, err = %d\n", rv);
1489 mema[0] = 0;
1490 mema[1] = 0;
1494 if (ioa[1] != 0) {
1495 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1496 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1497 ioa[0] = htole32(boo);
1498 if (rv != 0) {
1499 aprint_error_dev(&sc->sc_dv, "can't alloc priv i/o space, err = %d\n", rv);
1500 ioa[0] = 0;
1501 ioa[1] = 0;
1505 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1506 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1507 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1508 rv = iop_msg_post(sc, im, mb, 5000);
1509 iop_msg_unmap(sc, im);
1510 iop_msg_free(sc, im);
1511 return (rv);
1515 * Reset the IOP. Must be called with interrupts disabled.
1517 static int
1518 iop_reset(struct iop_softc *sc)
1520 u_int32_t mfa, *sw;
1521 struct i2o_exec_iop_reset mf;
1522 int rv;
1523 paddr_t pa;
1525 sw = (u_int32_t *)sc->sc_scr;
1526 pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
1528 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1529 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1530 mf.reserved[0] = 0;
1531 mf.reserved[1] = 0;
1532 mf.reserved[2] = 0;
1533 mf.reserved[3] = 0;
1534 mf.statuslow = (u_int32_t)pa;
1535 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1537 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1538 BUS_DMASYNC_PREWRITE);
1539 *sw = htole32(0);
1540 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1541 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1543 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1544 return (rv);
1546 POLL(2500,
1547 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1548 BUS_DMASYNC_POSTREAD), *sw != 0));
1549 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1550 aprint_error_dev(&sc->sc_dv, "reset rejected, status 0x%x\n",
1551 le32toh(*sw));
1552 return (EIO);
1556 * IOP is now in the INIT state. Wait no more than 10 seconds for
1557 * the inbound queue to become responsive.
1559 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1560 if (mfa == IOP_MFA_EMPTY) {
1561 aprint_error_dev(&sc->sc_dv, "reset failed\n");
1562 return (EIO);
1565 iop_release_mfa(sc, mfa);
1566 return (0);
1570 * Register a new initiator. Must be called with the configuration lock
1571 * held.
1573 void
1574 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1576 static int ictxgen;
1578 /* 0 is reserved (by us) for system messages. */
1579 ii->ii_ictx = ++ictxgen;
1582 * `Utility initiators' don't make it onto the per-IOP initiator list
1583 * (which is used only for configuration), but do get one slot on
1584 * the inbound queue.
1586 if ((ii->ii_flags & II_UTILITY) == 0) {
1587 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1588 sc->sc_nii++;
1589 } else
1590 sc->sc_nuii++;
1592 cv_init(&ii->ii_cv, "iopevt");
1594 mutex_spin_enter(&sc->sc_intrlock);
1595 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1596 mutex_spin_exit(&sc->sc_intrlock);
1600 * Unregister an initiator. Must be called with the configuration lock
1601 * held.
1603 void
1604 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1607 if ((ii->ii_flags & II_UTILITY) == 0) {
1608 LIST_REMOVE(ii, ii_list);
1609 sc->sc_nii--;
1610 } else
1611 sc->sc_nuii--;
1613 mutex_spin_enter(&sc->sc_intrlock);
1614 LIST_REMOVE(ii, ii_hash);
1615 mutex_spin_exit(&sc->sc_intrlock);
1617 cv_destroy(&ii->ii_cv);
1621 * Handle a reply frame from the IOP.
1623 static int
1624 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1626 struct iop_msg *im;
1627 struct i2o_reply *rb;
1628 struct i2o_fault_notify *fn;
1629 struct iop_initiator *ii;
1630 u_int off, ictx, tctx, status, size;
1632 KASSERT(mutex_owned(&sc->sc_intrlock));
1634 off = (int)(rmfa - sc->sc_rep_phys);
1635 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1637 /* Perform reply queue DMA synchronisation. */
1638 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1639 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1641 #ifdef I2ODEBUG
1642 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1643 panic("iop_handle_reply: 64-bit reply");
1644 #endif
1646 * Find the initiator.
1648 ictx = le32toh(rb->msgictx);
1649 if (ictx == IOP_ICTX)
1650 ii = NULL;
1651 else {
1652 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1653 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1654 if (ii->ii_ictx == ictx)
1655 break;
1656 if (ii == NULL) {
1657 #ifdef I2ODEBUG
1658 iop_reply_print(sc, rb);
1659 #endif
1660 aprint_error_dev(&sc->sc_dv, "WARNING: bad ictx returned (%x)\n",
1661 ictx);
1662 return (-1);
1667 * If we received a transport failure notice, we've got to dig the
1668 * transaction context (if any) out of the original message frame,
1669 * and then release the original MFA back to the inbound FIFO.
1671 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1672 status = I2O_STATUS_SUCCESS;
1674 fn = (struct i2o_fault_notify *)rb;
1675 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1676 iop_release_mfa(sc, fn->lowmfa);
1677 iop_tfn_print(sc, fn);
1678 } else {
1679 status = rb->reqstatus;
1680 tctx = le32toh(rb->msgtctx);
1683 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1685 * This initiator tracks state using message wrappers.
1687 * Find the originating message wrapper, and if requested
1688 * notify the initiator.
1690 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1691 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1692 (im->im_flags & IM_ALLOCED) == 0 ||
1693 tctx != im->im_tctx) {
1694 aprint_error_dev(&sc->sc_dv, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1695 if (im != NULL)
1696 aprint_error_dev(&sc->sc_dv, "flags=0x%08x tctx=0x%08x\n",
1697 im->im_flags, im->im_tctx);
1698 #ifdef I2ODEBUG
1699 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1700 iop_reply_print(sc, rb);
1701 #endif
1702 return (-1);
1705 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1706 im->im_flags |= IM_FAIL;
1708 #ifdef I2ODEBUG
1709 if ((im->im_flags & IM_REPLIED) != 0)
1710 panic("%s: dup reply", device_xname(&sc->sc_dv));
1711 #endif
1712 im->im_flags |= IM_REPLIED;
1714 #ifdef I2ODEBUG
1715 if (status != I2O_STATUS_SUCCESS)
1716 iop_reply_print(sc, rb);
1717 #endif
1718 im->im_reqstatus = status;
1719 im->im_detstatus = le16toh(rb->detail);
1721 /* Copy the reply frame, if requested. */
1722 if (im->im_rb != NULL) {
1723 size = (le32toh(rb->msgflags) >> 14) & ~3;
1724 #ifdef I2ODEBUG
1725 if (size > sc->sc_framesize)
1726 panic("iop_handle_reply: reply too large");
1727 #endif
1728 memcpy(im->im_rb, rb, size);
1731 /* Notify the initiator. */
1732 if ((im->im_flags & IM_WAIT) != 0)
1733 cv_broadcast(&im->im_cv);
1734 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1735 if (ii != NULL) {
1736 mutex_spin_exit(&sc->sc_intrlock);
1737 (*ii->ii_intr)(ii->ii_dv, im, rb);
1738 mutex_spin_enter(&sc->sc_intrlock);
1741 } else {
1743 * This initiator discards message wrappers.
1745 * Simply pass the reply frame to the initiator.
1747 if (ii != NULL) {
1748 mutex_spin_exit(&sc->sc_intrlock);
1749 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1750 mutex_spin_enter(&sc->sc_intrlock);
1754 return (status);
1758 * Handle an interrupt from the IOP.
1761 iop_intr(void *arg)
1763 struct iop_softc *sc;
1764 u_int32_t rmfa;
1766 sc = arg;
1768 mutex_spin_enter(&sc->sc_intrlock);
1770 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1771 mutex_spin_exit(&sc->sc_intrlock);
1772 return (0);
1775 for (;;) {
1776 /* Double read to account for IOP bug. */
1777 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1778 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1779 if (rmfa == IOP_MFA_EMPTY)
1780 break;
1782 iop_handle_reply(sc, rmfa);
1783 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1786 mutex_spin_exit(&sc->sc_intrlock);
1787 return (1);
1791 * Handle an event signalled by the executive.
1793 static void
1794 iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
1796 struct i2o_util_event_register_reply *rb;
1797 u_int event;
1799 rb = reply;
1801 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1802 return;
1804 event = le32toh(rb->event);
1805 printf("%s: event 0x%08x received\n", device_xname(dv), event);
1809 * Allocate a message wrapper.
1811 struct iop_msg *
1812 iop_msg_alloc(struct iop_softc *sc, int flags)
1814 struct iop_msg *im;
1815 static u_int tctxgen;
1816 int i;
1818 #ifdef I2ODEBUG
1819 if ((flags & IM_SYSMASK) != 0)
1820 panic("iop_msg_alloc: system flags specified");
1821 #endif
1823 mutex_spin_enter(&sc->sc_intrlock);
1824 im = SLIST_FIRST(&sc->sc_im_freelist);
1825 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1826 if (im == NULL)
1827 panic("iop_msg_alloc: no free wrappers");
1828 #endif
1829 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1830 mutex_spin_exit(&sc->sc_intrlock);
1832 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1833 tctxgen += (1 << IOP_TCTX_SHIFT);
1834 im->im_flags = flags | IM_ALLOCED;
1835 im->im_rb = NULL;
1836 i = 0;
1837 do {
1838 im->im_xfer[i++].ix_size = 0;
1839 } while (i < IOP_MAX_MSG_XFERS);
1841 return (im);
1845 * Free a message wrapper.
1847 void
1848 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1851 #ifdef I2ODEBUG
1852 if ((im->im_flags & IM_ALLOCED) == 0)
1853 panic("iop_msg_free: wrapper not allocated");
1854 #endif
1856 im->im_flags = 0;
1857 mutex_spin_enter(&sc->sc_intrlock);
1858 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1859 mutex_spin_exit(&sc->sc_intrlock);
1863 * Map a data transfer. Write a scatter-gather list into the message frame.
1866 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1867 void *xferaddr, int xfersize, int out, struct proc *up)
1869 bus_dmamap_t dm;
1870 bus_dma_segment_t *ds;
1871 struct iop_xfer *ix;
1872 u_int rv, i, nsegs, flg, off, xn;
1873 u_int32_t *p;
1875 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1876 if (ix->ix_size == 0)
1877 break;
1879 #ifdef I2ODEBUG
1880 if (xfersize == 0)
1881 panic("iop_msg_map: null transfer");
1882 if (xfersize > IOP_MAX_XFER)
1883 panic("iop_msg_map: transfer too large");
1884 if (xn == IOP_MAX_MSG_XFERS)
1885 panic("iop_msg_map: too many xfers");
1886 #endif
1889 * Only the first DMA map is static.
1891 if (xn != 0) {
1892 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1893 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1894 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1895 if (rv != 0)
1896 return (rv);
1899 dm = ix->ix_map;
1900 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1901 (up == NULL ? BUS_DMA_NOWAIT : 0));
1902 if (rv != 0)
1903 goto bad;
1906 * How many SIMPLE SG elements can we fit in this message?
1908 off = mb[0] >> 16;
1909 p = mb + off;
1910 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1912 if (dm->dm_nsegs > nsegs) {
1913 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1914 rv = EFBIG;
1915 DPRINTF(("iop_msg_map: too many segs\n"));
1916 goto bad;
1919 nsegs = dm->dm_nsegs;
1920 xfersize = 0;
1923 * Write out the SG list.
1925 if (out)
1926 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1927 else
1928 flg = I2O_SGL_SIMPLE;
1930 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1931 p[0] = (u_int32_t)ds->ds_len | flg;
1932 p[1] = (u_int32_t)ds->ds_addr;
1933 xfersize += ds->ds_len;
1936 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1937 p[1] = (u_int32_t)ds->ds_addr;
1938 xfersize += ds->ds_len;
1940 /* Fix up the transfer record, and sync the map. */
1941 ix->ix_flags = (out ? IX_OUT : IX_IN);
1942 ix->ix_size = xfersize;
1943 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1944 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1947 * If this is the first xfer we've mapped for this message, adjust
1948 * the SGL offset field in the message header.
1950 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1951 mb[0] += (mb[0] >> 12) & 0xf0;
1952 im->im_flags |= IM_SGLOFFADJ;
1954 mb[0] += (nsegs << 17);
1955 return (0);
1957 bad:
1958 if (xn != 0)
1959 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1960 return (rv);
1964 * Map a block I/O data transfer (different in that there's only one per
1965 * message maximum, and PAGE addressing may be used). Write a scatter
1966 * gather list into the message frame.
1969 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1970 void *xferaddr, int xfersize, int out)
1972 bus_dma_segment_t *ds;
1973 bus_dmamap_t dm;
1974 struct iop_xfer *ix;
1975 u_int rv, i, nsegs, off, slen, tlen, flg;
1976 paddr_t saddr, eaddr;
1977 u_int32_t *p;
1979 #ifdef I2ODEBUG
1980 if (xfersize == 0)
1981 panic("iop_msg_map_bio: null transfer");
1982 if (xfersize > IOP_MAX_XFER)
1983 panic("iop_msg_map_bio: transfer too large");
1984 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1985 panic("iop_msg_map_bio: SGLOFFADJ");
1986 #endif
1988 ix = im->im_xfer;
1989 dm = ix->ix_map;
1990 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1991 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1992 if (rv != 0)
1993 return (rv);
1995 off = mb[0] >> 16;
1996 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1999 * If the transfer is highly fragmented and won't fit using SIMPLE
2000 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2001 * potentially more efficient, both for us and the IOP.
2003 if (dm->dm_nsegs > nsegs) {
2004 nsegs = 1;
2005 p = mb + off + 1;
2007 /* XXX This should be done with a bus_space flag. */
2008 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2009 slen = ds->ds_len;
2010 saddr = ds->ds_addr;
2012 while (slen > 0) {
2013 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2014 tlen = min(eaddr - saddr, slen);
2015 slen -= tlen;
2016 *p++ = le32toh(saddr);
2017 saddr = eaddr;
2018 nsegs++;
2022 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2023 I2O_SGL_END;
2024 if (out)
2025 mb[off] |= I2O_SGL_DATA_OUT;
2026 } else {
2027 p = mb + off;
2028 nsegs = dm->dm_nsegs;
2030 if (out)
2031 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2032 else
2033 flg = I2O_SGL_SIMPLE;
2035 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2036 p[0] = (u_int32_t)ds->ds_len | flg;
2037 p[1] = (u_int32_t)ds->ds_addr;
2040 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2041 I2O_SGL_END;
2042 p[1] = (u_int32_t)ds->ds_addr;
2043 nsegs <<= 1;
2046 /* Fix up the transfer record, and sync the map. */
2047 ix->ix_flags = (out ? IX_OUT : IX_IN);
2048 ix->ix_size = xfersize;
2049 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2050 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2053 * Adjust the SGL offset and total message size fields. We don't
2054 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2056 mb[0] += ((off << 4) + (nsegs << 16));
2057 return (0);
2061 * Unmap all data transfers associated with a message wrapper.
2063 void
2064 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2066 struct iop_xfer *ix;
2067 int i;
2069 #ifdef I2ODEBUG
2070 if (im->im_xfer[0].ix_size == 0)
2071 panic("iop_msg_unmap: no transfers mapped");
2072 #endif
2074 for (ix = im->im_xfer, i = 0;;) {
2075 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2076 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2077 BUS_DMASYNC_POSTREAD);
2078 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2080 /* Only the first DMA map is static. */
2081 if (i != 0)
2082 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2083 if ((++ix)->ix_size == 0)
2084 break;
2085 if (++i >= IOP_MAX_MSG_XFERS)
2086 break;
2091 * Post a message frame to the IOP's inbound queue.
2094 iop_post(struct iop_softc *sc, u_int32_t *mb)
2096 u_int32_t mfa;
2098 #ifdef I2ODEBUG
2099 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2100 panic("iop_post: frame too large");
2101 #endif
2103 mutex_spin_enter(&sc->sc_intrlock);
2105 /* Allocate a slot with the IOP. */
2106 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2107 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2108 mutex_spin_exit(&sc->sc_intrlock);
2109 aprint_error_dev(&sc->sc_dv, "mfa not forthcoming\n");
2110 return (EAGAIN);
2113 /* Perform reply buffer DMA synchronisation. */
2114 if (sc->sc_rep_size != 0) {
2115 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2116 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2119 /* Copy out the message frame. */
2120 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2121 mb[0] >> 16);
2122 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2123 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2125 /* Post the MFA back to the IOP. */
2126 iop_outl(sc, IOP_REG_IFIFO, mfa);
2128 mutex_spin_exit(&sc->sc_intrlock);
2129 return (0);
2133 * Post a message to the IOP and deal with completion.
2136 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2138 u_int32_t *mb;
2139 int rv;
2141 mb = xmb;
2143 /* Terminate the scatter/gather list chain. */
2144 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2145 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2147 if ((rv = iop_post(sc, mb)) != 0)
2148 return (rv);
2150 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2151 if ((im->im_flags & IM_POLL) != 0)
2152 iop_msg_poll(sc, im, timo);
2153 else
2154 iop_msg_wait(sc, im, timo);
2156 mutex_spin_enter(&sc->sc_intrlock);
2157 if ((im->im_flags & IM_REPLIED) != 0) {
2158 if ((im->im_flags & IM_NOSTATUS) != 0)
2159 rv = 0;
2160 else if ((im->im_flags & IM_FAIL) != 0)
2161 rv = ENXIO;
2162 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2163 rv = EIO;
2164 else
2165 rv = 0;
2166 } else
2167 rv = EBUSY;
2168 mutex_spin_exit(&sc->sc_intrlock);
2169 } else
2170 rv = 0;
2172 return (rv);
2176 * Spin until the specified message is replied to.
2178 static void
2179 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2181 u_int32_t rmfa;
2183 mutex_spin_enter(&sc->sc_intrlock);
2185 for (timo *= 10; timo != 0; timo--) {
2186 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2187 /* Double read to account for IOP bug. */
2188 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2189 if (rmfa == IOP_MFA_EMPTY)
2190 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2191 if (rmfa != IOP_MFA_EMPTY) {
2192 iop_handle_reply(sc, rmfa);
2195 * Return the reply frame to the IOP's
2196 * outbound FIFO.
2198 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2201 if ((im->im_flags & IM_REPLIED) != 0)
2202 break;
2203 mutex_spin_exit(&sc->sc_intrlock);
2204 DELAY(100);
2205 mutex_spin_enter(&sc->sc_intrlock);
2208 if (timo == 0) {
2209 #ifdef I2ODEBUG
2210 printf("%s: poll - no reply\n", device_xname(&sc->sc_dv));
2211 if (iop_status_get(sc, 1) != 0)
2212 printf("iop_msg_poll: unable to retrieve status\n");
2213 else
2214 printf("iop_msg_poll: IOP state = %d\n",
2215 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2216 #endif
2219 mutex_spin_exit(&sc->sc_intrlock);
2223 * Sleep until the specified message is replied to.
2225 static void
2226 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2228 int rv;
2230 mutex_spin_enter(&sc->sc_intrlock);
2231 if ((im->im_flags & IM_REPLIED) != 0) {
2232 mutex_spin_exit(&sc->sc_intrlock);
2233 return;
2235 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2236 mutex_spin_exit(&sc->sc_intrlock);
2238 #ifdef I2ODEBUG
2239 if (rv != 0) {
2240 printf("iop_msg_wait: tsleep() == %d\n", rv);
2241 if (iop_status_get(sc, 0) != 0)
2242 printf("iop_msg_wait: unable to retrieve status\n");
2243 else
2244 printf("iop_msg_wait: IOP state = %d\n",
2245 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2247 #endif
2251 * Release an unused message frame back to the IOP's inbound fifo.
2253 static void
2254 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2257 /* Use the frame to issue a no-op. */
2258 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2259 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2260 iop_outl_msg(sc, mfa + 8, 0);
2261 iop_outl_msg(sc, mfa + 12, 0);
2263 iop_outl(sc, IOP_REG_IFIFO, mfa);
2266 #ifdef I2ODEBUG
2268 * Dump a reply frame header.
2270 static void
2271 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2273 u_int function, detail;
2274 const char *statusstr;
2276 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2277 detail = le16toh(rb->detail);
2279 printf("%s: reply:\n", device_xname(&sc->sc_dv));
2281 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2282 statusstr = iop_status[rb->reqstatus];
2283 else
2284 statusstr = "undefined error code";
2286 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2287 device_xname(&sc->sc_dv), function, rb->reqstatus, statusstr);
2288 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2289 device_xname(&sc->sc_dv), detail, le32toh(rb->msgictx),
2290 le32toh(rb->msgtctx));
2291 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", device_xname(&sc->sc_dv),
2292 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2293 (le32toh(rb->msgflags) >> 8) & 0xff);
2295 #endif
2298 * Dump a transport failure reply.
2300 static void
2301 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2304 printf("%s: WARNING: transport failure:\n", device_xname(&sc->sc_dv));
2306 printf("%s: ictx=0x%08x tctx=0x%08x\n", device_xname(&sc->sc_dv),
2307 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2308 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2309 device_xname(&sc->sc_dv), fn->failurecode, fn->severity);
2310 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2311 device_xname(&sc->sc_dv), fn->highestver, fn->lowestver);
2315 * Translate an I2O ASCII field into a C string.
2317 void
2318 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2320 int hc, lc, i, nit;
2322 dlen--;
2323 lc = 0;
2324 hc = 0;
2325 i = 0;
2328 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2329 * spec has nothing to say about it. Since AMI fields are usually
2330 * filled with junk after the terminator, ...
2332 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2334 while (slen-- != 0 && dlen-- != 0) {
2335 if (nit && *src == '\0')
2336 break;
2337 else if (*src <= 0x20 || *src >= 0x7f) {
2338 if (hc)
2339 dst[i++] = ' ';
2340 } else {
2341 hc = 1;
2342 dst[i++] = *src;
2343 lc = i;
2345 src++;
2348 dst[lc] = '\0';
2352 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2355 iop_print_ident(struct iop_softc *sc, int tid)
2357 struct {
2358 struct i2o_param_op_results pr;
2359 struct i2o_param_read_results prr;
2360 struct i2o_param_device_identity di;
2361 } __packed p;
2362 char buf[32];
2363 int rv;
2365 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2366 sizeof(p), NULL);
2367 if (rv != 0)
2368 return (rv);
2370 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2371 sizeof(buf));
2372 printf(" <%s, ", buf);
2373 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2374 sizeof(buf));
2375 printf("%s, ", buf);
2376 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2377 printf("%s>", buf);
2379 return (0);
2383 * Claim or unclaim the specified TID.
2386 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2387 int flags)
2389 struct iop_msg *im;
2390 struct i2o_util_claim mf;
2391 int rv, func;
2393 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2394 im = iop_msg_alloc(sc, IM_WAIT);
2396 /* We can use the same structure, as they're identical. */
2397 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2398 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2399 mf.msgictx = ii->ii_ictx;
2400 mf.msgtctx = im->im_tctx;
2401 mf.flags = flags;
2403 rv = iop_msg_post(sc, im, &mf, 5000);
2404 iop_msg_free(sc, im);
2405 return (rv);
2409 * Perform an abort.
2411 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2412 int tctxabort, int flags)
2414 struct iop_msg *im;
2415 struct i2o_util_abort mf;
2416 int rv;
2418 im = iop_msg_alloc(sc, IM_WAIT);
2420 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2421 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2422 mf.msgictx = ii->ii_ictx;
2423 mf.msgtctx = im->im_tctx;
2424 mf.flags = (func << 24) | flags;
2425 mf.tctxabort = tctxabort;
2427 rv = iop_msg_post(sc, im, &mf, 5000);
2428 iop_msg_free(sc, im);
2429 return (rv);
2433 * Enable or disable reception of events for the specified device.
2435 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2437 struct i2o_util_event_register mf;
2439 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2440 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2441 mf.msgictx = ii->ii_ictx;
2442 mf.msgtctx = 0;
2443 mf.eventmask = mask;
2445 /* This message is replied to only when events are signalled. */
2446 return (iop_post(sc, (u_int32_t *)&mf));
2450 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2452 struct iop_softc *sc;
2454 if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
2455 return (ENXIO);
2456 if ((sc->sc_flags & IOP_ONLINE) == 0)
2457 return (ENXIO);
2458 if ((sc->sc_flags & IOP_OPEN) != 0)
2459 return (EBUSY);
2460 sc->sc_flags |= IOP_OPEN;
2462 return (0);
2466 iopclose(dev_t dev, int flag, int mode,
2467 struct lwp *l)
2469 struct iop_softc *sc;
2471 sc = device_lookup_private(&iop_cd, minor(dev));
2472 sc->sc_flags &= ~IOP_OPEN;
2474 return (0);
2478 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2480 struct iop_softc *sc;
2481 struct iovec *iov;
2482 int rv, i;
2484 sc = device_lookup_private(&iop_cd, minor(dev));
2485 rv = 0;
2487 switch (cmd) {
2488 case IOPIOCPT:
2489 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2490 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2491 if (rv)
2492 return (rv);
2494 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2496 case IOPIOCGSTATUS:
2497 iov = (struct iovec *)data;
2498 i = sizeof(struct i2o_status);
2499 if (i > iov->iov_len)
2500 i = iov->iov_len;
2501 else
2502 iov->iov_len = i;
2503 if ((rv = iop_status_get(sc, 0)) == 0)
2504 rv = copyout(&sc->sc_status, iov->iov_base, i);
2505 return (rv);
2507 case IOPIOCGLCT:
2508 case IOPIOCGTIDMAP:
2509 case IOPIOCRECONFIG:
2510 break;
2512 default:
2513 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2514 printf("%s: unknown ioctl %lx\n", device_xname(&sc->sc_dv), cmd);
2515 #endif
2516 return (ENOTTY);
2519 mutex_enter(&sc->sc_conflock);
2521 switch (cmd) {
2522 case IOPIOCGLCT:
2523 iov = (struct iovec *)data;
2524 i = le16toh(sc->sc_lct->tablesize) << 2;
2525 if (i > iov->iov_len)
2526 i = iov->iov_len;
2527 else
2528 iov->iov_len = i;
2529 rv = copyout(sc->sc_lct, iov->iov_base, i);
2530 break;
2532 case IOPIOCRECONFIG:
2533 rv = iop_reconfigure(sc, 0);
2534 break;
2536 case IOPIOCGTIDMAP:
2537 iov = (struct iovec *)data;
2538 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2539 if (i > iov->iov_len)
2540 i = iov->iov_len;
2541 else
2542 iov->iov_len = i;
2543 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2544 break;
2547 mutex_exit(&sc->sc_conflock);
2548 return (rv);
2551 static int
2552 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2554 struct iop_msg *im;
2555 struct i2o_msg *mf;
2556 struct ioppt_buf *ptb;
2557 int rv, i, mapped;
2559 mf = NULL;
2560 im = NULL;
2561 mapped = 1;
2563 if (pt->pt_msglen > sc->sc_framesize ||
2564 pt->pt_msglen < sizeof(struct i2o_msg) ||
2565 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2566 pt->pt_nbufs < 0 ||
2567 #if 0
2568 pt->pt_replylen < 0 ||
2569 #endif
2570 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2571 return (EINVAL);
2573 for (i = 0; i < pt->pt_nbufs; i++)
2574 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2575 rv = ENOMEM;
2576 goto bad;
2579 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2580 if (mf == NULL)
2581 return (ENOMEM);
2583 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2584 goto bad;
2586 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2587 im->im_rb = (struct i2o_reply *)mf;
2588 mf->msgictx = IOP_ICTX;
2589 mf->msgtctx = im->im_tctx;
2591 for (i = 0; i < pt->pt_nbufs; i++) {
2592 ptb = &pt->pt_bufs[i];
2593 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2594 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2595 if (rv != 0)
2596 goto bad;
2597 mapped = 1;
2600 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2601 goto bad;
2603 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2604 if (i > sc->sc_framesize)
2605 i = sc->sc_framesize;
2606 if (i > pt->pt_replylen)
2607 i = pt->pt_replylen;
2608 rv = copyout(im->im_rb, pt->pt_reply, i);
2610 bad:
2611 if (mapped != 0)
2612 iop_msg_unmap(sc, im);
2613 if (im != NULL)
2614 iop_msg_free(sc, im);
2615 if (mf != NULL)
2616 free(mf, M_DEVBUF);
2617 return (rv);