No empty .Rs/.Re
[netbsd-mini2440.git] / sys / dev / i2o / iopsp.c
blobb9bba8bdb5965760597f6a5afce6386e7d9d1a7a
1 /* $NetBSD: iopsp.c,v 1.34 2009/05/12 12:14:18 cegger Exp $ */
3 /*-
4 * Copyright (c) 2000, 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Raw SCSI device support for I2O. IOPs present SCSI devices individually;
34 * we group them by controlling port.
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: iopsp.c,v 1.34 2009/05/12 12:14:18 cegger Exp $");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/endian.h>
48 #include <sys/malloc.h>
49 #include <sys/scsiio.h>
51 #include <sys/bswap.h>
52 #include <sys/bus.h>
54 #include <dev/scsipi/scsi_all.h>
55 #include <dev/scsipi/scsi_disk.h>
56 #include <dev/scsipi/scsipi_all.h>
57 #include <dev/scsipi/scsiconf.h>
58 #include <dev/scsipi/scsi_message.h>
60 #include <dev/i2o/i2o.h>
61 #include <dev/i2o/iopio.h>
62 #include <dev/i2o/iopvar.h>
63 #include <dev/i2o/iopspvar.h>
65 static void iopsp_adjqparam(device_t, int);
66 static void iopsp_attach(device_t, device_t, void *);
67 static void iopsp_intr(device_t, struct iop_msg *, void *);
68 static int iopsp_ioctl(struct scsipi_channel *, u_long,
69 void *, int, struct proc *);
70 static int iopsp_match(device_t, cfdata_t, void *);
71 static int iopsp_rescan(struct iopsp_softc *);
72 static int iopsp_reconfig(device_t);
73 static void iopsp_scsipi_request(struct scsipi_channel *,
74 scsipi_adapter_req_t, void *);
76 CFATTACH_DECL(iopsp, sizeof(struct iopsp_softc),
77 iopsp_match, iopsp_attach, NULL, NULL);
80 * Match a supported device.
82 static int
83 iopsp_match(device_t parent, cfdata_t match, void *aux)
85 struct iop_attach_args *ia;
86 struct {
87 struct i2o_param_op_results pr;
88 struct i2o_param_read_results prr;
89 struct i2o_param_hba_ctlr_info ci;
90 } __packed param;
92 ia = aux;
94 if (ia->ia_class != I2O_CLASS_BUS_ADAPTER_PORT)
95 return (0);
97 if (iop_field_get_all((struct iop_softc *)parent, ia->ia_tid,
98 I2O_PARAM_HBA_CTLR_INFO, &param, sizeof(param), NULL) != 0)
99 return (0);
101 return (param.ci.bustype == I2O_HBA_BUS_SCSI ||
102 param.ci.bustype == I2O_HBA_BUS_FCA);
106 * Attach a supported device.
108 static void
109 iopsp_attach(device_t parent, device_t self, void *aux)
111 struct iop_attach_args *ia;
112 struct iopsp_softc *sc;
113 struct iop_softc *iop;
114 struct {
115 struct i2o_param_op_results pr;
116 struct i2o_param_read_results prr;
117 union {
118 struct i2o_param_hba_ctlr_info ci;
119 struct i2o_param_hba_scsi_ctlr_info sci;
120 struct i2o_param_hba_scsi_port_info spi;
121 } p;
122 } __packed param;
123 int fc, rv;
124 int size;
126 ia = (struct iop_attach_args *)aux;
127 sc = device_private(self);
128 iop = device_private(parent);
130 /* Register us as an initiator. */
131 sc->sc_ii.ii_dv = self;
132 sc->sc_ii.ii_intr = iopsp_intr;
133 sc->sc_ii.ii_flags = 0;
134 sc->sc_ii.ii_tid = ia->ia_tid;
135 sc->sc_ii.ii_reconfig = iopsp_reconfig;
136 sc->sc_ii.ii_adjqparam = iopsp_adjqparam;
137 iop_initiator_register(iop, &sc->sc_ii);
139 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_CTLR_INFO,
140 &param, sizeof(param), NULL);
141 if (rv != 0)
142 goto bad;
144 fc = (param.p.ci.bustype == I2O_HBA_BUS_FCA);
147 * Say what the device is. If we can find out what the controling
148 * device is, say what that is too.
150 aprint_normal(": SCSI port");
151 iop_print_ident(iop, ia->ia_tid);
152 aprint_normal("\n");
154 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_SCSI_CTLR_INFO,
155 &param, sizeof(param), NULL);
156 if (rv != 0)
157 goto bad;
159 aprint_normal_dev(&sc->sc_dv, "");
160 if (fc)
161 aprint_normal("FC");
162 else
163 aprint_normal("%d-bit", param.p.sci.maxdatawidth);
164 aprint_normal(", max sync rate %dMHz, initiator ID %d\n",
165 (u_int32_t)le64toh(param.p.sci.maxsyncrate) / 1000,
166 le32toh(param.p.sci.initiatorid));
168 sc->sc_openings = 1;
170 sc->sc_adapter.adapt_dev = &sc->sc_dv;
171 sc->sc_adapter.adapt_nchannels = 1;
172 sc->sc_adapter.adapt_openings = 1;
173 sc->sc_adapter.adapt_max_periph = 1;
174 sc->sc_adapter.adapt_ioctl = iopsp_ioctl;
175 sc->sc_adapter.adapt_minphys = minphys;
176 sc->sc_adapter.adapt_request = iopsp_scsipi_request;
178 memset(&sc->sc_channel, 0, sizeof(sc->sc_channel));
179 sc->sc_channel.chan_adapter = &sc->sc_adapter;
180 sc->sc_channel.chan_bustype = &scsi_bustype;
181 sc->sc_channel.chan_channel = 0;
182 sc->sc_channel.chan_ntargets = fc ?
183 IOPSP_MAX_FC_TARGET : param.p.sci.maxdatawidth;
184 sc->sc_channel.chan_nluns = IOPSP_MAX_LUN;
185 sc->sc_channel.chan_id = le32toh(param.p.sci.initiatorid);
186 sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE;
189 * Allocate the target map. Currently used for informational
190 * purposes only.
192 size = sc->sc_channel.chan_ntargets * sizeof(struct iopsp_target);
193 sc->sc_targetmap = malloc(size, M_DEVBUF, M_NOWAIT|M_ZERO);
195 /* Build the two maps, and attach to scsipi. */
196 if (iopsp_reconfig(self) != 0) {
197 aprint_error_dev(&sc->sc_dv, "configure failed\n");
198 goto bad;
200 config_found(self, &sc->sc_channel, scsiprint);
201 return;
203 bad:
204 iop_initiator_unregister(iop, &sc->sc_ii);
208 * Scan the LCT to determine which devices we control, and enter them into
209 * the maps.
211 static int
212 iopsp_reconfig(device_t dv)
214 struct iopsp_softc *sc;
215 struct iop_softc *iop;
216 struct i2o_lct_entry *le;
217 struct scsipi_channel *sc_chan;
218 struct {
219 struct i2o_param_op_results pr;
220 struct i2o_param_read_results prr;
221 struct i2o_param_scsi_device_info sdi;
222 } __packed param;
223 u_int tid, nent, i, targ, lun, size, rv, bptid;
224 u_short *tidmap;
225 void *tofree;
226 struct iopsp_target *it;
227 int syncrate;
229 sc = (struct iopsp_softc *)dv;
230 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
231 sc_chan = &sc->sc_channel;
233 KASSERT(mutex_owned(&iop->sc_conflock));
235 /* Anything to do? */
236 if (iop->sc_chgind == sc->sc_chgind)
237 return (0);
240 * Allocate memory for the target/LUN -> TID map. Use zero to
241 * denote absent targets (zero is the TID of the I2O executive,
242 * and we never address that here).
244 size = sc_chan->chan_ntargets * (IOPSP_MAX_LUN) * sizeof(u_short);
245 if ((tidmap = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL)
246 return (ENOMEM);
248 for (i = 0; i < sc_chan->chan_ntargets; i++)
249 sc->sc_targetmap[i].it_flags &= ~IT_PRESENT;
252 * A quick hack to handle Intel's stacked bus port arrangement.
254 bptid = sc->sc_ii.ii_tid;
255 nent = iop->sc_nlctent;
256 for (le = iop->sc_lct->entry; nent != 0; nent--, le++)
257 if ((le16toh(le->classid) & 4095) ==
258 I2O_CLASS_BUS_ADAPTER_PORT &&
259 (le32toh(le->usertid) & 4095) == bptid) {
260 bptid = le16toh(le->localtid) & 4095;
261 break;
264 nent = iop->sc_nlctent;
265 for (i = 0, le = iop->sc_lct->entry; i < nent; i++, le++) {
266 if ((le16toh(le->classid) & 4095) != I2O_CLASS_SCSI_PERIPHERAL)
267 continue;
268 if (((le32toh(le->usertid) >> 12) & 4095) != bptid)
269 continue;
270 tid = le16toh(le->localtid) & 4095;
272 rv = iop_field_get_all(iop, tid, I2O_PARAM_SCSI_DEVICE_INFO,
273 &param, sizeof(param), NULL);
274 if (rv != 0)
275 continue;
276 targ = le32toh(param.sdi.identifier);
277 lun = param.sdi.luninfo[1];
278 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
279 if (targ >= sc_chan->chan_ntargets ||
280 lun >= sc_chan->chan_nluns) {
281 aprint_error_dev(&sc->sc_dv, "target %d,%d (tid %d): "
282 "bad target/LUN\n", targ, lun, tid);
283 continue;
285 #endif
288 * If we've already described this target, and nothing has
289 * changed, then don't describe it again.
291 it = &sc->sc_targetmap[targ];
292 it->it_flags |= IT_PRESENT;
293 syncrate = ((int)le64toh(param.sdi.negsyncrate) + 500) / 1000;
294 if (it->it_width != param.sdi.negdatawidth ||
295 it->it_offset != param.sdi.negoffset ||
296 it->it_syncrate != syncrate) {
297 it->it_width = param.sdi.negdatawidth;
298 it->it_offset = param.sdi.negoffset;
299 it->it_syncrate = syncrate;
301 aprint_verbose_dev(&sc->sc_dv, "target %d (tid %d): %d-bit, ",
302 targ, tid, it->it_width);
303 if (it->it_syncrate == 0)
304 aprint_verbose("asynchronous\n");
305 else
306 aprint_verbose("synchronous at %dMHz, "
307 "offset 0x%x\n", it->it_syncrate,
308 it->it_offset);
311 /* Ignore the device if it's in use by somebody else. */
312 if ((le32toh(le->usertid) & 4095) != I2O_TID_NONE) {
313 if (sc->sc_tidmap == NULL ||
314 IOPSP_TIDMAP(sc->sc_tidmap, targ, lun) !=
315 IOPSP_TID_INUSE) {
316 aprint_verbose_dev(&sc->sc_dv, "target %d,%d (tid %d): "
317 "in use by tid %d\n",
318 targ, lun, tid,
319 le32toh(le->usertid) & 4095);
321 IOPSP_TIDMAP(tidmap, targ, lun) = IOPSP_TID_INUSE;
322 } else
323 IOPSP_TIDMAP(tidmap, targ, lun) = (u_short)tid;
326 for (i = 0; i < sc_chan->chan_ntargets; i++)
327 if ((sc->sc_targetmap[i].it_flags & IT_PRESENT) == 0)
328 sc->sc_targetmap[i].it_width = 0;
330 /* Swap in the new map and return. */
331 mutex_spin_enter(&iop->sc_intrlock);
332 tofree = sc->sc_tidmap;
333 sc->sc_tidmap = tidmap;
334 mutex_spin_exit(&iop->sc_intrlock);
336 if (tofree != NULL)
337 free(tofree, M_DEVBUF);
338 sc->sc_chgind = iop->sc_chgind;
339 return (0);
343 * Re-scan the bus; to be called from a higher level (e.g. scsipi).
345 static int
346 iopsp_rescan(struct iopsp_softc *sc)
348 struct iop_softc *iop;
349 struct iop_msg *im;
350 struct i2o_hba_bus_scan mf;
351 int rv;
353 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
355 mutex_enter(&iop->sc_conflock);
356 im = iop_msg_alloc(iop, IM_WAIT);
358 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
359 mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_HBA_BUS_SCAN);
360 mf.msgictx = sc->sc_ii.ii_ictx;
361 mf.msgtctx = im->im_tctx;
363 rv = iop_msg_post(iop, im, &mf, 5*60*1000);
364 iop_msg_free(iop, im);
365 if (rv != 0)
366 aprint_error_dev(&sc->sc_dv, "bus rescan failed (error %d)\n",
367 rv);
369 if ((rv = iop_lct_get(iop)) == 0)
370 rv = iopsp_reconfig(&sc->sc_dv);
372 mutex_exit(&iop->sc_conflock);
373 return (rv);
377 * Start a SCSI command.
379 static void
380 iopsp_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
381 void *arg)
383 struct scsipi_xfer *xs;
384 struct scsipi_periph *periph;
385 struct iopsp_softc *sc;
386 struct iop_msg *im;
387 struct iop_softc *iop;
388 struct i2o_scsi_scb_exec *mf;
389 int error, flags, tid;
390 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
392 sc = (void *)chan->chan_adapter->adapt_dev;
393 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
395 switch (req) {
396 case ADAPTER_REQ_RUN_XFER:
397 xs = arg;
398 periph = xs->xs_periph;
399 flags = xs->xs_control;
401 SC_DEBUG(periph, SCSIPI_DB2, ("iopsp_scsi_request run_xfer\n"));
403 tid = IOPSP_TIDMAP(sc->sc_tidmap, periph->periph_target,
404 periph->periph_lun);
405 if (tid == IOPSP_TID_ABSENT || tid == IOPSP_TID_INUSE) {
406 xs->error = XS_SELTIMEOUT;
407 scsipi_done(xs);
408 return;
411 /* Need to reset the target? */
412 if ((flags & XS_CTL_RESET) != 0) {
413 if (iop_simple_cmd(iop, tid, I2O_SCSI_DEVICE_RESET,
414 sc->sc_ii.ii_ictx, 1, 30*1000) != 0) {
415 aprint_error_dev(&sc->sc_dv, "reset failed\n");
416 xs->error = XS_DRIVER_STUFFUP;
417 } else
418 xs->error = XS_NOERROR;
420 scsipi_done(xs);
421 return;
424 #if defined(I2ODEBUG) || defined(SCSIDEBUG)
425 if (xs->cmdlen > sizeof(mf->cdb))
426 panic("%s: CDB too large", device_xname(&sc->sc_dv));
427 #endif
429 im = iop_msg_alloc(iop, IM_POLL_INTR |
430 IM_NOSTATUS | ((flags & XS_CTL_POLL) != 0 ? IM_POLL : 0));
431 im->im_dvcontext = xs;
433 mf = (struct i2o_scsi_scb_exec *)mb;
434 mf->msgflags = I2O_MSGFLAGS(i2o_scsi_scb_exec);
435 mf->msgfunc = I2O_MSGFUNC(tid, I2O_SCSI_SCB_EXEC);
436 mf->msgictx = sc->sc_ii.ii_ictx;
437 mf->msgtctx = im->im_tctx;
438 mf->flags = xs->cmdlen | I2O_SCB_FLAG_ENABLE_DISCONNECT |
439 I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
440 mf->datalen = xs->datalen;
441 memcpy(mf->cdb, xs->cmd, xs->cmdlen);
443 switch (xs->xs_tag_type) {
444 case MSG_ORDERED_Q_TAG:
445 mf->flags |= I2O_SCB_FLAG_ORDERED_QUEUE_TAG;
446 break;
447 case MSG_SIMPLE_Q_TAG:
448 mf->flags |= I2O_SCB_FLAG_SIMPLE_QUEUE_TAG;
449 break;
450 case MSG_HEAD_OF_Q_TAG:
451 mf->flags |= I2O_SCB_FLAG_HEAD_QUEUE_TAG;
452 break;
453 default:
454 break;
457 if (xs->datalen != 0) {
458 error = iop_msg_map_bio(iop, im, mb, xs->data,
459 xs->datalen, (flags & XS_CTL_DATA_OUT) == 0);
460 if (error) {
461 xs->error = XS_DRIVER_STUFFUP;
462 iop_msg_free(iop, im);
463 scsipi_done(xs);
464 return;
466 if ((flags & XS_CTL_DATA_IN) == 0)
467 mf->flags |= I2O_SCB_FLAG_XFER_TO_DEVICE;
468 else
469 mf->flags |= I2O_SCB_FLAG_XFER_FROM_DEVICE;
472 if (iop_msg_post(iop, im, mb, xs->timeout)) {
473 if (xs->datalen != 0)
474 iop_msg_unmap(iop, im);
475 iop_msg_free(iop, im);
476 xs->error = XS_DRIVER_STUFFUP;
477 scsipi_done(xs);
479 break;
481 case ADAPTER_REQ_GROW_RESOURCES:
483 * Not supported.
485 break;
487 case ADAPTER_REQ_SET_XFER_MODE:
489 * The DDM takes care of this, and we can't modify its
490 * behaviour.
492 break;
496 #ifdef notyet
498 * Abort the specified I2O_SCSI_SCB_EXEC message and its associated SCB.
500 static int
501 iopsp_scsi_abort(struct iopsp_softc *sc, int atid, struct iop_msg *aim)
503 struct iop_msg *im;
504 struct i2o_scsi_scb_abort mf;
505 struct iop_softc *iop;
506 int rv, s;
508 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
509 im = iop_msg_alloc(iop, IM_POLL);
511 mf.msgflags = I2O_MSGFLAGS(i2o_scsi_scb_abort);
512 mf.msgfunc = I2O_MSGFUNC(atid, I2O_SCSI_SCB_ABORT);
513 mf.msgictx = sc->sc_ii.ii_ictx;
514 mf.msgtctx = im->im_tctx;
515 mf.tctxabort = aim->im_tctx;
517 rv = iop_msg_post(iop, im, &mf, 30000);
518 iop_msg_free(iop, im);
520 return (rv);
522 #endif
525 * We have a message which has been processed and replied to by the IOP -
526 * deal with it.
528 static void
529 iopsp_intr(device_t dv, struct iop_msg *im, void *reply)
531 struct scsipi_xfer *xs;
532 struct iopsp_softc *sc;
533 struct i2o_scsi_reply *rb;
534 struct iop_softc *iop;
535 u_int sl;
537 sc = (struct iopsp_softc *)dv;
538 xs = (struct scsipi_xfer *)im->im_dvcontext;
539 iop = (struct iop_softc *)device_parent(dv);
540 rb = reply;
542 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("iopsp_intr\n"));
544 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
545 xs->error = XS_DRIVER_STUFFUP;
546 xs->resid = xs->datalen;
547 } else {
548 if (rb->hbastatus != I2O_SCSI_DSC_SUCCESS) {
549 switch (rb->hbastatus) {
550 case I2O_SCSI_DSC_ADAPTER_BUSY:
551 case I2O_SCSI_DSC_SCSI_BUS_RESET:
552 case I2O_SCSI_DSC_BUS_BUSY:
553 xs->error = XS_BUSY;
554 break;
555 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
556 xs->error = XS_SELTIMEOUT;
557 break;
558 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
559 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
560 case I2O_SCSI_DSC_LUN_INVALID:
561 case I2O_SCSI_DSC_SCSI_TID_INVALID:
562 xs->error = XS_TIMEOUT;
563 break;
564 default:
565 xs->error = XS_DRIVER_STUFFUP;
566 break;
568 aprint_error_dev(&sc->sc_dv, "HBA status 0x%02x\n",
569 rb->hbastatus);
570 } else if (rb->scsistatus != SCSI_OK) {
571 switch (rb->scsistatus) {
572 case SCSI_CHECK:
573 xs->error = XS_SENSE;
574 sl = le32toh(rb->senselen);
575 if (sl > sizeof(xs->sense.scsi_sense))
576 sl = sizeof(xs->sense.scsi_sense);
577 memcpy(&xs->sense.scsi_sense, rb->sense, sl);
578 break;
579 case SCSI_QUEUE_FULL:
580 case SCSI_BUSY:
581 xs->error = XS_BUSY;
582 break;
583 default:
584 xs->error = XS_DRIVER_STUFFUP;
585 break;
587 } else
588 xs->error = XS_NOERROR;
590 xs->resid = xs->datalen - le32toh(rb->datalen);
591 xs->status = rb->scsistatus;
594 /* Free the message wrapper and pass the news to scsipi. */
595 if (xs->datalen != 0)
596 iop_msg_unmap(iop, im);
597 iop_msg_free(iop, im);
599 scsipi_done(xs);
603 * ioctl hook; used here only to initiate low-level rescans.
605 static int
606 iopsp_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
607 int flag, struct proc *p)
609 int rv;
611 switch (cmd) {
612 case SCBUSIOLLSCAN:
614 * If it's boot time, the bus will have been scanned and the
615 * maps built. Locking would stop re-configuration, but we
616 * want to fake success.
618 if (curlwp != &lwp0)
619 rv = iopsp_rescan(
620 (struct iopsp_softc *)chan->chan_adapter->adapt_dev);
621 else
622 rv = 0;
623 break;
625 default:
626 rv = ENOTTY;
627 break;
630 return (rv);
634 * The number of openings available to us has changed, so inform scsipi.
636 static void
637 iopsp_adjqparam(device_t dv, int mpi)
639 struct iopsp_softc *sc;
640 struct iop_softc *iop;
642 sc = device_private(dv);
643 iop = device_private(device_parent(dv));
645 mutex_spin_enter(&iop->sc_intrlock);
646 sc->sc_adapter.adapt_openings += mpi - sc->sc_openings;
647 sc->sc_openings = mpi;
648 mutex_spin_exit(&iop->sc_intrlock);