1 /* $NetBSD: mpt_netbsd.c,v 1.13 2007/08/04 22:01:06 tron Exp $ */
4 * Copyright (c) 2003 Wasabi Systems, Inc.
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.13 2007/08/04 22:01:06 tron Exp $");
82 #include <dev/ic/mpt.h> /* pulls in all headers */
84 #include <machine/stdarg.h> /* for mpt_prt() */
86 static int mpt_poll(mpt_softc_t
*, struct scsipi_xfer
*, int);
87 static void mpt_timeout(void *);
88 static void mpt_done(mpt_softc_t
*, uint32_t);
89 static void mpt_run_xfer(mpt_softc_t
*, struct scsipi_xfer
*);
90 static void mpt_set_xfer_mode(mpt_softc_t
*, struct scsipi_xfer_mode
*);
91 static void mpt_get_xfer_mode(mpt_softc_t
*, struct scsipi_periph
*);
92 static void mpt_ctlop(mpt_softc_t
*, void *vmsg
, uint32_t);
93 static void mpt_event_notify_reply(mpt_softc_t
*, MSG_EVENT_NOTIFY_REPLY
*);
95 static void mpt_scsipi_request(struct scsipi_channel
*,
96 scsipi_adapter_req_t
, void *);
97 static void mpt_minphys(struct buf
*);
100 mpt_scsipi_attach(mpt_softc_t
*mpt
)
102 struct scsipi_adapter
*adapt
= &mpt
->sc_adapter
;
103 struct scsipi_channel
*chan
= &mpt
->sc_channel
;
106 mpt
->bus
= 0; /* XXX ?? */
108 maxq
= (mpt
->mpt_global_credits
< MPT_MAX_REQUESTS(mpt
)) ?
109 mpt
->mpt_global_credits
: MPT_MAX_REQUESTS(mpt
);
111 /* Fill in the scsipi_adapter. */
112 memset(adapt
, 0, sizeof(*adapt
));
113 adapt
->adapt_dev
= &mpt
->sc_dev
;
114 adapt
->adapt_nchannels
= 1;
115 adapt
->adapt_openings
= maxq
;
116 adapt
->adapt_max_periph
= maxq
;
117 adapt
->adapt_request
= mpt_scsipi_request
;
118 adapt
->adapt_minphys
= mpt_minphys
;
120 /* Fill in the scsipi_channel. */
121 memset(chan
, 0, sizeof(*chan
));
122 chan
->chan_adapter
= adapt
;
123 chan
->chan_bustype
= &scsi_bustype
;
124 chan
->chan_channel
= 0;
125 chan
->chan_flags
= 0;
126 chan
->chan_nluns
= 8;
127 chan
->chan_ntargets
= mpt
->mpt_max_devices
;
128 chan
->chan_id
= mpt
->mpt_ini_id
;
130 (void) config_found(&mpt
->sc_dev
, &mpt
->sc_channel
, scsiprint
);
134 mpt_dma_mem_alloc(mpt_softc_t
*mpt
)
136 bus_dma_segment_t reply_seg
, request_seg
;
137 int reply_rseg
, request_rseg
;
138 bus_addr_t pptr
, end
;
143 /* Check if we have already allocated the reply memory. */
144 if (mpt
->reply
!= NULL
)
148 * Allocate the request pool. This isn't really DMA'd memory,
149 * but it's a convenient place to do it.
151 len
= sizeof(request_t
) * MPT_MAX_REQUESTS(mpt
);
152 mpt
->request_pool
= malloc(len
, M_DEVBUF
, M_WAITOK
| M_ZERO
);
153 if (mpt
->request_pool
== NULL
) {
154 aprint_error_dev(&mpt
->sc_dev
, "unable to allocate request pool\n");
159 * Allocate DMA resources for reply buffers.
161 error
= bus_dmamem_alloc(mpt
->sc_dmat
, PAGE_SIZE
, PAGE_SIZE
, 0,
162 &reply_seg
, 1, &reply_rseg
, 0);
164 aprint_error_dev(&mpt
->sc_dev
, "unable to allocate reply area, error = %d\n",
169 error
= bus_dmamem_map(mpt
->sc_dmat
, &reply_seg
, reply_rseg
, PAGE_SIZE
,
170 (void **) &mpt
->reply
, BUS_DMA_COHERENT
/*XXX*/);
172 aprint_error_dev(&mpt
->sc_dev
, "unable to map reply area, error = %d\n",
177 error
= bus_dmamap_create(mpt
->sc_dmat
, PAGE_SIZE
, 1, PAGE_SIZE
,
178 0, 0, &mpt
->reply_dmap
);
180 aprint_error_dev(&mpt
->sc_dev
, "unable to create reply DMA map, error = %d\n",
185 error
= bus_dmamap_load(mpt
->sc_dmat
, mpt
->reply_dmap
, mpt
->reply
,
188 aprint_error_dev(&mpt
->sc_dev
, "unable to load reply DMA map, error = %d\n",
192 mpt
->reply_phys
= mpt
->reply_dmap
->dm_segs
[0].ds_addr
;
195 * Allocate DMA resources for request buffers.
197 error
= bus_dmamem_alloc(mpt
->sc_dmat
, MPT_REQ_MEM_SIZE(mpt
),
198 PAGE_SIZE
, 0, &request_seg
, 1, &request_rseg
, 0);
200 aprint_error_dev(&mpt
->sc_dev
, "unable to allocate request area, "
201 "error = %d\n", error
);
205 error
= bus_dmamem_map(mpt
->sc_dmat
, &request_seg
, request_rseg
,
206 MPT_REQ_MEM_SIZE(mpt
), (void **) &mpt
->request
, 0);
208 aprint_error_dev(&mpt
->sc_dev
, "unable to map request area, error = %d\n",
213 error
= bus_dmamap_create(mpt
->sc_dmat
, MPT_REQ_MEM_SIZE(mpt
), 1,
214 MPT_REQ_MEM_SIZE(mpt
), 0, 0, &mpt
->request_dmap
);
216 aprint_error_dev(&mpt
->sc_dev
, "unable to create request DMA map, "
217 "error = %d\n", error
);
221 error
= bus_dmamap_load(mpt
->sc_dmat
, mpt
->request_dmap
, mpt
->request
,
222 MPT_REQ_MEM_SIZE(mpt
), NULL
, 0);
224 aprint_error_dev(&mpt
->sc_dev
, "unable to load request DMA map, error = %d\n",
228 mpt
->request_phys
= mpt
->request_dmap
->dm_segs
[0].ds_addr
;
230 pptr
= mpt
->request_phys
;
231 vptr
= (void *) mpt
->request
;
232 end
= pptr
+ MPT_REQ_MEM_SIZE(mpt
);
234 for (i
= 0; pptr
< end
; i
++) {
235 request_t
*req
= &mpt
->request_pool
[i
];
238 /* Store location of Request Data */
239 req
->req_pbuf
= pptr
;
240 req
->req_vbuf
= vptr
;
242 pptr
+= MPT_REQUEST_AREA
;
243 vptr
+= MPT_REQUEST_AREA
;
245 req
->sense_pbuf
= (pptr
- MPT_SENSE_SIZE
);
246 req
->sense_vbuf
= (vptr
- MPT_SENSE_SIZE
);
248 error
= bus_dmamap_create(mpt
->sc_dmat
, MAXPHYS
,
249 MPT_SGL_MAX
, MAXPHYS
, 0, 0, &req
->dmap
);
251 aprint_error_dev(&mpt
->sc_dev
, "unable to create req %d DMA map, "
252 "error = %d\n", i
, error
);
260 for (--i
; i
>= 0; i
--) {
261 request_t
*req
= &mpt
->request_pool
[i
];
262 if (req
->dmap
!= NULL
)
263 bus_dmamap_destroy(mpt
->sc_dmat
, req
->dmap
);
265 bus_dmamap_unload(mpt
->sc_dmat
, mpt
->request_dmap
);
267 bus_dmamap_destroy(mpt
->sc_dmat
, mpt
->request_dmap
);
269 bus_dmamem_unmap(mpt
->sc_dmat
, (void *)mpt
->request
, PAGE_SIZE
);
271 bus_dmamem_free(mpt
->sc_dmat
, &request_seg
, request_rseg
);
273 bus_dmamap_unload(mpt
->sc_dmat
, mpt
->reply_dmap
);
275 bus_dmamap_destroy(mpt
->sc_dmat
, mpt
->reply_dmap
);
277 bus_dmamem_unmap(mpt
->sc_dmat
, (void *)mpt
->reply
, PAGE_SIZE
);
279 bus_dmamem_free(mpt
->sc_dmat
, &reply_seg
, reply_rseg
);
281 free(mpt
->request_pool
, M_DEVBUF
);
285 mpt
->request_pool
= NULL
;
293 mpt_softc_t
*mpt
= arg
;
297 if ((mpt_read(mpt
, MPT_OFFSET_INTR_STATUS
) & MPT_INTR_REPLY_READY
) == 0)
300 reply
= mpt_pop_reply_queue(mpt
);
301 while (reply
!= MPT_REPLY_EMPTY
) {
303 if (mpt
->verbose
> 1) {
304 if ((reply
& MPT_CONTEXT_REPLY
) != 0) {
305 /* Address reply; IOC has something to say */
306 mpt_print_reply(MPT_REPLY_PTOV(mpt
, reply
));
308 /* Context reply; all went well */
309 mpt_prt(mpt
, "context %u reply OK", reply
);
312 mpt_done(mpt
, reply
);
313 reply
= mpt_pop_reply_queue(mpt
);
319 mpt_prt(mpt_softc_t
*mpt
, const char *fmt
, ...)
323 printf("%s: ", device_xname(&mpt
->sc_dev
));
331 mpt_poll(mpt_softc_t
*mpt
, struct scsipi_xfer
*xs
, int count
)
334 /* Timeouts are in msec, so we loop in 1000usec cycles */
337 if (xs
->xs_status
& XS_STS_DONE
)
339 delay(1000); /* only happens in boot, so ok */
346 mpt_timeout(void *arg
)
348 request_t
*req
= arg
;
349 struct scsipi_xfer
*xs
= req
->xfer
;
350 struct scsipi_periph
*periph
= xs
->xs_periph
;
352 (void *) periph
->periph_channel
->chan_adapter
->adapt_dev
;
356 scsipi_printaddr(periph
);
357 printf("command timeout\n");
361 oseq
= req
->sequence
;
364 if (req
->sequence
!= oseq
) {
365 mpt_prt(mpt
, "recovered from command timeout");
371 "timeout on request index = 0x%x, seq = 0x%08x",
372 req
->index
, req
->sequence
);
373 mpt_check_doorbell(mpt
);
374 mpt_prt(mpt
, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
375 mpt_read(mpt
, MPT_OFFSET_INTR_STATUS
),
376 mpt_read(mpt
, MPT_OFFSET_INTR_MASK
),
377 mpt_read(mpt
, MPT_OFFSET_DOORBELL
));
378 mpt_prt(mpt
, "request state: %s", mpt_req_state(req
->debug
));
379 if (mpt
->verbose
> 1)
380 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST
*)req
->req_vbuf
);
382 /* XXX WHAT IF THE IOC IS STILL USING IT?? */
384 mpt_free_request(mpt
, req
);
386 xs
->error
= XS_TIMEOUT
;
393 mpt_done(mpt_softc_t
*mpt
, uint32_t reply
)
395 struct scsipi_xfer
*xs
= NULL
;
396 struct scsipi_periph
*periph
;
399 MSG_REQUEST_HEADER
*mpt_req
;
400 MSG_SCSI_IO_REPLY
*mpt_reply
;
402 if (__predict_true((reply
& MPT_CONTEXT_REPLY
) == 0)) {
403 /* context reply (ok) */
405 index
= reply
& MPT_CONTEXT_MASK
;
407 /* address reply (error) */
409 /* XXX BUS_DMASYNC_POSTREAD XXX */
410 mpt_reply
= MPT_REPLY_PTOV(mpt
, reply
);
411 if (mpt
->verbose
> 1) {
412 uint32_t *pReply
= (uint32_t *) mpt_reply
;
414 mpt_prt(mpt
, "Address Reply (index %u):",
415 mpt_reply
->MsgContext
& 0xffff);
416 mpt_prt(mpt
, "%08x %08x %08x %08x",
417 pReply
[0], pReply
[1], pReply
[2], pReply
[3]);
418 mpt_prt(mpt
, "%08x %08x %08x %08x",
419 pReply
[4], pReply
[5], pReply
[6], pReply
[7]);
420 mpt_prt(mpt
, "%08x %08x %08x %08x",
421 pReply
[8], pReply
[9], pReply
[10], pReply
[11]);
423 index
= mpt_reply
->MsgContext
;
427 * Address reply with MessageContext high bit set.
428 * This is most likely a notify message, so we try
429 * to process it, then free it.
431 if (__predict_false((index
& 0x80000000) != 0)) {
432 if (mpt_reply
!= NULL
)
433 mpt_ctlop(mpt
, mpt_reply
, reply
);
435 mpt_prt(mpt
, "mpt_done: index 0x%x, NULL reply", index
);
439 /* Did we end up with a valid index into the table? */
440 if (__predict_false(index
< 0 || index
>= MPT_MAX_REQUESTS(mpt
))) {
441 mpt_prt(mpt
, "mpt_done: invalid index (0x%x) in reply", index
);
445 req
= &mpt
->request_pool
[index
];
447 /* Make sure memory hasn't been trashed. */
448 if (__predict_false(req
->index
!= index
)) {
449 mpt_prt(mpt
, "mpt_done: corrupted request_t (0x%x)", index
);
453 MPT_SYNC_REQ(mpt
, req
, BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
454 mpt_req
= req
->req_vbuf
;
456 /* Short cut for task management replies; nothing more for us to do. */
457 if (__predict_false(mpt_req
->Function
== MPI_FUNCTION_SCSI_TASK_MGMT
)) {
458 if (mpt
->verbose
> 1)
459 mpt_prt(mpt
, "mpt_done: TASK MGMT");
463 if (__predict_false(mpt_req
->Function
== MPI_FUNCTION_PORT_ENABLE
))
467 * At this point, it had better be a SCSI I/O command, but don't
470 if (__predict_false(mpt_req
->Function
!=
471 MPI_FUNCTION_SCSI_IO_REQUEST
)) {
472 if (mpt
->verbose
> 1)
473 mpt_prt(mpt
, "mpt_done: unknown Function 0x%x (0x%x)",
474 mpt_req
->Function
, index
);
478 /* Recover scsipi_xfer from the request structure. */
481 /* Can't have a SCSI command without a scsipi_xfer. */
482 if (__predict_false(xs
== NULL
)) {
484 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x",
485 req
->index
, req
->sequence
);
486 mpt_prt(mpt
, "request state: %s", mpt_req_state(req
->debug
));
487 mpt_prt(mpt
, "mpt_request:");
488 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST
*)req
->req_vbuf
);
490 if (mpt_reply
!= NULL
) {
491 mpt_prt(mpt
, "mpt_reply:");
492 mpt_print_reply(mpt_reply
);
494 mpt_prt(mpt
, "context reply: 0x%08x", reply
);
499 callout_stop(&xs
->xs_callout
);
501 periph
= xs
->xs_periph
;
504 * If we were a data transfer, unload the map that described
507 if (__predict_true(xs
->datalen
!= 0)) {
508 bus_dmamap_sync(mpt
->sc_dmat
, req
->dmap
, 0,
509 req
->dmap
->dm_mapsize
,
510 (xs
->xs_control
& XS_CTL_DATA_IN
) ? BUS_DMASYNC_POSTREAD
511 : BUS_DMASYNC_POSTWRITE
);
512 bus_dmamap_unload(mpt
->sc_dmat
, req
->dmap
);
515 if (__predict_true(mpt_reply
== NULL
)) {
517 * Context reply; report that the command was
520 * Also report the xfer mode, if necessary.
522 if (__predict_false(mpt
->mpt_report_xfer_mode
!= 0)) {
523 if ((mpt
->mpt_report_xfer_mode
&
524 (1 << periph
->periph_target
)) != 0)
525 mpt_get_xfer_mode(mpt
, periph
);
527 xs
->error
= XS_NOERROR
;
528 xs
->status
= SCSI_OK
;
530 mpt_free_request(mpt
, req
);
535 xs
->status
= mpt_reply
->SCSIStatus
;
536 switch (mpt_reply
->IOCStatus
) {
537 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN
:
538 xs
->error
= XS_DRIVER_STUFFUP
;
541 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN
:
543 * Yikes! Tagged queue full comes through this path!
545 * So we'll change it to a status error and anything
546 * that returns status should probably be a status
549 xs
->resid
= xs
->datalen
- mpt_reply
->TransferCount
;
550 if (mpt_reply
->SCSIState
&
551 MPI_SCSI_STATE_NO_SCSI_STATUS
) {
552 xs
->error
= XS_DRIVER_STUFFUP
;
556 case MPI_IOCSTATUS_SUCCESS
:
557 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR
:
558 switch (xs
->status
) {
560 /* Report the xfer mode, if necessary. */
561 if ((mpt
->mpt_report_xfer_mode
&
562 (1 << periph
->periph_target
)) != 0)
563 mpt_get_xfer_mode(mpt
, periph
);
568 xs
->error
= XS_SENSE
;
572 case SCSI_QUEUE_FULL
:
577 scsipi_printaddr(periph
);
578 printf("invalid status code %d\n", xs
->status
);
579 xs
->error
= XS_DRIVER_STUFFUP
;
584 case MPI_IOCSTATUS_BUSY
:
585 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES
:
586 xs
->error
= XS_RESOURCE_SHORTAGE
;
589 case MPI_IOCSTATUS_SCSI_INVALID_BUS
:
590 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID
:
591 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE
:
592 xs
->error
= XS_SELTIMEOUT
;
595 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH
:
596 xs
->error
= XS_DRIVER_STUFFUP
;
599 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED
:
600 /* XXX What should we do here? */
603 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED
:
605 xs
->error
= XS_DRIVER_STUFFUP
;
608 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED
:
610 xs
->error
= XS_DRIVER_STUFFUP
;
613 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED
:
614 /* XXX This is a bus-reset */
615 xs
->error
= XS_DRIVER_STUFFUP
;
619 /* XXX unrecognized HBA error */
620 xs
->error
= XS_DRIVER_STUFFUP
;
624 if (mpt_reply
->SCSIState
& MPI_SCSI_STATE_AUTOSENSE_VALID
) {
625 memcpy(&xs
->sense
.scsi_sense
, req
->sense_vbuf
,
626 sizeof(xs
->sense
.scsi_sense
));
627 } else if (mpt_reply
->SCSIState
& MPI_SCSI_STATE_AUTOSENSE_FAILED
) {
629 * This will cause the scsipi layer to issue
632 if (xs
->status
== SCSI_CHECK
)
637 /* If IOC done with this requeset, free it up. */
638 if (mpt_reply
== NULL
|| (mpt_reply
->MsgFlags
& 0x80) == 0)
639 mpt_free_request(mpt
, req
);
641 /* If address reply, give the buffer back to the IOC. */
642 if (mpt_reply
!= NULL
)
643 mpt_free_reply(mpt
, (reply
<< 1));
650 mpt_run_xfer(mpt_softc_t
*mpt
, struct scsipi_xfer
*xs
)
652 struct scsipi_periph
*periph
= xs
->xs_periph
;
654 MSG_SCSI_IO_REQUEST
*mpt_req
;
658 req
= mpt_get_request(mpt
);
659 if (__predict_false(req
== NULL
)) {
660 /* This should happen very infrequently. */
661 xs
->error
= XS_RESOURCE_SHORTAGE
;
668 /* Link the req and the scsipi_xfer. */
671 /* Now we build the command for the IOC */
672 mpt_req
= req
->req_vbuf
;
673 memset(mpt_req
, 0, sizeof(*mpt_req
));
675 mpt_req
->Function
= MPI_FUNCTION_SCSI_IO_REQUEST
;
676 mpt_req
->Bus
= mpt
->bus
;
678 mpt_req
->SenseBufferLength
=
679 (sizeof(xs
->sense
.scsi_sense
) < MPT_SENSE_SIZE
) ?
680 sizeof(xs
->sense
.scsi_sense
) : MPT_SENSE_SIZE
;
683 * We use the message context to find the request structure when
684 * we get the command completion interrupt from the IOC.
686 mpt_req
->MsgContext
= req
->index
;
688 /* Which physical device to do the I/O on. */
689 mpt_req
->TargetID
= periph
->periph_target
;
690 mpt_req
->LUN
[1] = periph
->periph_lun
;
692 /* Set the direction of the transfer. */
693 if (xs
->xs_control
& XS_CTL_DATA_IN
)
694 mpt_req
->Control
= MPI_SCSIIO_CONTROL_READ
;
695 else if (xs
->xs_control
& XS_CTL_DATA_OUT
)
696 mpt_req
->Control
= MPI_SCSIIO_CONTROL_WRITE
;
698 mpt_req
->Control
= MPI_SCSIIO_CONTROL_NODATATRANSFER
;
700 /* Set the queue behavior. */
701 if (__predict_true((!mpt
->is_scsi
) ||
702 (mpt
->mpt_tag_enable
&
703 (1 << periph
->periph_target
)))) {
704 switch (XS_CTL_TAGTYPE(xs
)) {
705 case XS_CTL_HEAD_TAG
:
706 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_HEADOFQ
;
711 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_ACAQ
;
715 case XS_CTL_ORDERED_TAG
:
716 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_ORDEREDQ
;
719 case XS_CTL_SIMPLE_TAG
:
720 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_SIMPLEQ
;
725 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_UNTAGGED
;
727 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_SIMPLEQ
;
731 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_UNTAGGED
;
733 if (__predict_false(mpt
->is_scsi
&&
734 (mpt
->mpt_disc_enable
&
735 (1 << periph
->periph_target
)) == 0))
736 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_NO_DISCONNECT
;
738 /* Copy the SCSI command block into place. */
739 memcpy(mpt_req
->CDB
, xs
->cmd
, xs
->cmdlen
);
741 mpt_req
->CDBLength
= xs
->cmdlen
;
742 mpt_req
->DataLength
= xs
->datalen
;
743 mpt_req
->SenseBufferLowAddr
= req
->sense_pbuf
;
746 * Map the DMA transfer.
751 error
= bus_dmamap_load(mpt
->sc_dmat
, req
->dmap
, xs
->data
,
753 ((xs
->xs_control
& XS_CTL_NOSLEEP
) ? BUS_DMA_NOWAIT
756 ((xs
->xs_control
& XS_CTL_DATA_IN
) ? BUS_DMA_READ
764 xs
->error
= XS_RESOURCE_SHORTAGE
;
768 xs
->error
= XS_DRIVER_STUFFUP
;
769 mpt_prt(mpt
, "error %d loading DMA map", error
);
772 mpt_free_request(mpt
, req
);
778 if (req
->dmap
->dm_nsegs
> MPT_NSGL_FIRST(mpt
)) {
779 int seg
, i
, nleft
= req
->dmap
->dm_nsegs
;
785 mpt_req
->DataLength
= xs
->datalen
;
786 flags
= MPI_SGE_FLAGS_SIMPLE_ELEMENT
;
787 if (xs
->xs_control
& XS_CTL_DATA_OUT
)
788 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
790 se
= (SGE_SIMPLE32
*) &mpt_req
->SGL
;
791 for (i
= 0; i
< MPT_NSGL_FIRST(mpt
) - 1;
795 memset(se
, 0, sizeof(*se
));
796 se
->Address
= req
->dmap
->dm_segs
[seg
].ds_addr
;
797 MPI_pSGE_SET_LENGTH(se
,
798 req
->dmap
->dm_segs
[seg
].ds_len
);
800 if (i
== MPT_NSGL_FIRST(mpt
) - 2)
801 tf
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
802 MPI_pSGE_SET_FLAGS(se
, tf
);
807 * Tell the IOC where to find the first chain element.
809 mpt_req
->ChainOffset
=
810 ((char *)se
- (char *)mpt_req
) >> 2;
813 * Until we're finished with all segments...
819 * Construct the chain element that points to
822 ce
= (SGE_CHAIN32
*) se
++;
823 if (nleft
> MPT_NSGL(mpt
)) {
824 ntodo
= MPT_NSGL(mpt
) - 1;
825 ce
->NextChainOffset
= (MPT_RQSL(mpt
) -
826 sizeof(SGE_SIMPLE32
)) >> 2;
827 ce
->Length
= MPT_NSGL(mpt
)
828 * sizeof(SGE_SIMPLE32
);
831 ce
->NextChainOffset
= 0;
833 * sizeof(SGE_SIMPLE32
);
835 ce
->Address
= req
->req_pbuf
+
836 ((char *)se
- (char *)mpt_req
);
837 ce
->Flags
= MPI_SGE_FLAGS_CHAIN_ELEMENT
;
838 for (i
= 0; i
< ntodo
; i
++, se
++, seg
++) {
841 memset(se
, 0, sizeof(*se
));
843 req
->dmap
->dm_segs
[seg
].ds_addr
;
844 MPI_pSGE_SET_LENGTH(se
,
845 req
->dmap
->dm_segs
[seg
].ds_len
);
847 if (i
== ntodo
- 1) {
849 MPI_SGE_FLAGS_LAST_ELEMENT
;
850 if (ce
->NextChainOffset
== 0) {
852 MPI_SGE_FLAGS_END_OF_LIST
|
853 MPI_SGE_FLAGS_END_OF_BUFFER
;
856 MPI_pSGE_SET_FLAGS(se
, tf
);
860 bus_dmamap_sync(mpt
->sc_dmat
, req
->dmap
, 0,
861 req
->dmap
->dm_mapsize
,
862 (xs
->xs_control
& XS_CTL_DATA_IN
) ?
864 : BUS_DMASYNC_PREWRITE
);
869 mpt_req
->DataLength
= xs
->datalen
;
870 flags
= MPI_SGE_FLAGS_SIMPLE_ELEMENT
;
871 if (xs
->xs_control
& XS_CTL_DATA_OUT
)
872 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
874 /* Copy the segments into our SG list. */
875 se
= (SGE_SIMPLE32
*) &mpt_req
->SGL
;
876 for (i
= 0; i
< req
->dmap
->dm_nsegs
;
880 memset(se
, 0, sizeof(*se
));
881 se
->Address
= req
->dmap
->dm_segs
[i
].ds_addr
;
882 MPI_pSGE_SET_LENGTH(se
,
883 req
->dmap
->dm_segs
[i
].ds_len
);
885 if (i
== req
->dmap
->dm_nsegs
- 1) {
887 MPI_SGE_FLAGS_LAST_ELEMENT
|
888 MPI_SGE_FLAGS_END_OF_BUFFER
|
889 MPI_SGE_FLAGS_END_OF_LIST
;
891 MPI_pSGE_SET_FLAGS(se
, tf
);
893 bus_dmamap_sync(mpt
->sc_dmat
, req
->dmap
, 0,
894 req
->dmap
->dm_mapsize
,
895 (xs
->xs_control
& XS_CTL_DATA_IN
) ?
897 : BUS_DMASYNC_PREWRITE
);
901 * No data to transfer; just make a single simple SGL
904 SGE_SIMPLE32
*se
= (SGE_SIMPLE32
*) &mpt_req
->SGL
;
905 memset(se
, 0, sizeof(*se
));
906 MPI_pSGE_SET_FLAGS(se
,
907 (MPI_SGE_FLAGS_LAST_ELEMENT
| MPI_SGE_FLAGS_END_OF_BUFFER
|
908 MPI_SGE_FLAGS_SIMPLE_ELEMENT
| MPI_SGE_FLAGS_END_OF_LIST
));
911 if (mpt
->verbose
> 1)
912 mpt_print_scsi_io_request(mpt_req
);
915 if (__predict_true((xs
->xs_control
& XS_CTL_POLL
) == 0))
916 callout_reset(&xs
->xs_callout
,
917 mstohz(xs
->timeout
), mpt_timeout
, req
);
918 mpt_send_cmd(mpt
, req
);
921 if (__predict_true((xs
->xs_control
& XS_CTL_POLL
) == 0))
925 * If we can't use interrupts, poll on completion.
927 if (mpt_poll(mpt
, xs
, xs
->timeout
))
932 mpt_set_xfer_mode(mpt_softc_t
*mpt
, struct scsipi_xfer_mode
*xm
)
934 fCONFIG_PAGE_SCSI_DEVICE_1 tmp
;
938 * SCSI transport settings don't make any sense for
939 * Fibre Channel; silently ignore the request.
945 * Always allow disconnect; we don't have a way to disable
946 * it right now, in any case.
948 mpt
->mpt_disc_enable
|= (1 << xm
->xm_target
);
950 if (xm
->xm_mode
& PERIPH_CAP_TQING
)
951 mpt
->mpt_tag_enable
|= (1 << xm
->xm_target
);
953 mpt
->mpt_tag_enable
&= ~(1 << xm
->xm_target
);
955 tmp
= mpt
->mpt_dev_page1
[xm
->xm_target
];
958 * Set the wide/narrow parameter for the target.
960 if (xm
->xm_mode
& PERIPH_CAP_WIDE16
)
961 tmp
.RequestedParameters
|= MPI_SCSIDEVPAGE1_RP_WIDE
;
963 tmp
.RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_WIDE
;
966 * Set the synchronous parameters for the target.
968 * XXX If we request sync transfers, we just go ahead and
969 * XXX request the maximum available. We need finer control
970 * XXX in order to implement Domain Validation.
972 tmp
.RequestedParameters
&= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK
|
973 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK
|
974 MPI_SCSIDEVPAGE1_RP_DT
| MPI_SCSIDEVPAGE1_RP_QAS
|
975 MPI_SCSIDEVPAGE1_RP_IU
);
976 if (xm
->xm_mode
& PERIPH_CAP_SYNC
) {
977 int factor
, offset
, np
;
979 factor
= (mpt
->mpt_port_page0
.Capabilities
>> 8) & 0xff;
980 offset
= (mpt
->mpt_port_page0
.Capabilities
>> 16) & 0xff;
984 np
|= MPI_SCSIDEVPAGE1_RP_QAS
| MPI_SCSIDEVPAGE1_RP_IU
;
987 /* at least Ultra160 */
988 np
|= MPI_SCSIDEVPAGE1_RP_DT
;
990 np
|= (factor
<< 8) | (offset
<< 16);
991 tmp
.RequestedParameters
|= np
;
994 if (mpt_write_cfg_page(mpt
, xm
->xm_target
, &tmp
.Header
)) {
995 mpt_prt(mpt
, "unable to write Device Page 1");
999 if (mpt_read_cfg_page(mpt
, xm
->xm_target
, &tmp
.Header
)) {
1000 mpt_prt(mpt
, "unable to read back Device Page 1");
1004 mpt
->mpt_dev_page1
[xm
->xm_target
] = tmp
;
1005 if (mpt
->verbose
> 1) {
1007 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1009 mpt
->mpt_dev_page1
[xm
->xm_target
].RequestedParameters
,
1010 mpt
->mpt_dev_page1
[xm
->xm_target
].Configuration
);
1014 * Make a note that we should perform an async callback at the
1015 * end of the next successful command completion to report the
1016 * negotiated transfer mode.
1018 mpt
->mpt_report_xfer_mode
|= (1 << xm
->xm_target
);
1022 mpt_get_xfer_mode(mpt_softc_t
*mpt
, struct scsipi_periph
*periph
)
1024 fCONFIG_PAGE_SCSI_DEVICE_0 tmp
;
1025 struct scsipi_xfer_mode xm
;
1028 tmp
= mpt
->mpt_dev_page0
[periph
->periph_target
];
1029 if (mpt_read_cfg_page(mpt
, periph
->periph_target
, &tmp
.Header
)) {
1030 mpt_prt(mpt
, "unable to read Device Page 0");
1034 if (mpt
->verbose
> 1) {
1036 "SPI Tgt %d Page 0: NParms %x Information %x",
1037 periph
->periph_target
,
1038 tmp
.NegotiatedParameters
, tmp
.Information
);
1041 xm
.xm_target
= periph
->periph_target
;
1044 if (tmp
.NegotiatedParameters
& MPI_SCSIDEVPAGE0_NP_WIDE
)
1045 xm
.xm_mode
|= PERIPH_CAP_WIDE16
;
1047 period
= (tmp
.NegotiatedParameters
>> 8) & 0xff;
1048 offset
= (tmp
.NegotiatedParameters
>> 16) & 0xff;
1050 xm
.xm_period
= period
;
1051 xm
.xm_offset
= offset
;
1052 xm
.xm_mode
|= PERIPH_CAP_SYNC
;
1056 * Tagged queueing is all controlled by us; there is no
1057 * other setting to query.
1059 if (mpt
->mpt_tag_enable
& (1 << periph
->periph_target
))
1060 xm
.xm_mode
|= PERIPH_CAP_TQING
;
1063 * We're going to deliver the async event, so clear the marker.
1065 mpt
->mpt_report_xfer_mode
&= ~(1 << periph
->periph_target
);
1067 scsipi_async_event(&mpt
->sc_channel
, ASYNC_EVENT_XFER_MODE
, &xm
);
1071 mpt_ctlop(mpt_softc_t
*mpt
, void *vmsg
, uint32_t reply
)
1073 MSG_DEFAULT_REPLY
*dmsg
= vmsg
;
1075 switch (dmsg
->Function
) {
1076 case MPI_FUNCTION_EVENT_NOTIFICATION
:
1077 mpt_event_notify_reply(mpt
, vmsg
);
1078 mpt_free_reply(mpt
, (reply
<< 1));
1081 case MPI_FUNCTION_EVENT_ACK
:
1082 mpt_free_reply(mpt
, (reply
<< 1));
1085 case MPI_FUNCTION_PORT_ENABLE
:
1087 MSG_PORT_ENABLE_REPLY
*msg
= vmsg
;
1088 int index
= msg
->MsgContext
& ~0x80000000;
1089 if (mpt
->verbose
> 1)
1090 mpt_prt(mpt
, "enable port reply index %d", index
);
1091 if (index
>= 0 && index
< MPT_MAX_REQUESTS(mpt
)) {
1092 request_t
*req
= &mpt
->request_pool
[index
];
1093 req
->debug
= REQ_DONE
;
1095 mpt_free_reply(mpt
, (reply
<< 1));
1099 case MPI_FUNCTION_CONFIG
:
1101 MSG_CONFIG_REPLY
*msg
= vmsg
;
1102 int index
= msg
->MsgContext
& ~0x80000000;
1103 if (index
>= 0 && index
< MPT_MAX_REQUESTS(mpt
)) {
1104 request_t
*req
= &mpt
->request_pool
[index
];
1105 req
->debug
= REQ_DONE
;
1106 req
->sequence
= reply
;
1108 mpt_free_reply(mpt
, (reply
<< 1));
1113 mpt_prt(mpt
, "unknown ctlop: 0x%x", dmsg
->Function
);
1118 mpt_event_notify_reply(mpt_softc_t
*mpt
, MSG_EVENT_NOTIFY_REPLY
*msg
)
1121 switch (msg
->Event
) {
1122 case MPI_EVENT_LOG_DATA
:
1126 /* Some error occurrerd that the Fusion wants logged. */
1127 mpt_prt(mpt
, "EvtLogData: IOCLogInfo: 0x%08x", msg
->IOCLogInfo
);
1128 mpt_prt(mpt
, "EvtLogData: Event Data:");
1129 for (i
= 0; i
< msg
->EventDataLength
; i
++) {
1131 printf("%s:\t", device_xname(&mpt
->sc_dev
));
1132 printf("0x%08x%c", msg
->Data
[i
],
1133 ((i
% 4) == 3) ? '\n' : ' ');
1140 case MPI_EVENT_UNIT_ATTENTION
:
1141 mpt_prt(mpt
, "Unit Attn: Bus 0x%02x Target 0x%02x",
1142 (msg
->Data
[0] >> 8) & 0xff, msg
->Data
[0] & 0xff);
1145 case MPI_EVENT_IOC_BUS_RESET
:
1146 /* We generated a bus reset. */
1147 mpt_prt(mpt
, "IOC Bus Reset Port %d",
1148 (msg
->Data
[0] >> 8) & 0xff);
1151 case MPI_EVENT_EXT_BUS_RESET
:
1152 /* Someone else generated a bus reset. */
1153 mpt_prt(mpt
, "External Bus Reset");
1155 * These replies don't return EventData like the MPI
1156 * spec says they do.
1158 /* XXX Send an async event? */
1161 case MPI_EVENT_RESCAN
:
1163 * In general, thise means a device has been added
1166 mpt_prt(mpt
, "Rescan Port %d", (msg
->Data
[0] >> 8) & 0xff);
1167 /* XXX Send an async event? */
1170 case MPI_EVENT_LINK_STATUS_CHANGE
:
1171 mpt_prt(mpt
, "Port %d: Link state %s",
1172 (msg
->Data
[1] >> 8) & 0xff,
1173 (msg
->Data
[0] & 0xff) == 0 ? "Failed" : "Active");
1176 case MPI_EVENT_LOOP_STATE_CHANGE
:
1177 switch ((msg
->Data
[0] >> 16) & 0xff) {
1180 "Port %d: FC Link Event: LIP(%02x,%02x) "
1181 "(Loop Initialization)",
1182 (msg
->Data
[1] >> 8) & 0xff,
1183 (msg
->Data
[0] >> 8) & 0xff,
1184 (msg
->Data
[0] ) & 0xff);
1185 switch ((msg
->Data
[0] >> 8) & 0xff) {
1187 if ((msg
->Data
[0] & 0xff) == 0xf7)
1188 mpt_prt(mpt
, "\tDevice needs AL_PA");
1190 mpt_prt(mpt
, "\tDevice %02x doesn't "
1191 "like FC performance",
1192 msg
->Data
[0] & 0xff);
1196 if ((msg
->Data
[0] & 0xff) == 0xf7)
1197 mpt_prt(mpt
, "\tDevice detected loop "
1198 "failure before acquiring AL_PA");
1200 mpt_prt(mpt
, "\tDevice %02x detected "
1202 msg
->Data
[0] & 0xff);
1206 mpt_prt(mpt
, "\tDevice %02x requests that "
1207 "device %02x reset itself",
1208 msg
->Data
[0] & 0xff,
1209 (msg
->Data
[0] >> 8) & 0xff);
1215 mpt_prt(mpt
, "Port %d: FC Link Event: LPE(%02x,%02x) "
1216 "(Loop Port Enable)",
1217 (msg
->Data
[1] >> 8) & 0xff,
1218 (msg
->Data
[0] >> 8) & 0xff,
1219 (msg
->Data
[0] ) & 0xff);
1223 mpt_prt(mpt
, "Port %d: FC Link Event: LPB(%02x,%02x) "
1224 "(Loop Port Bypass)",
1225 (msg
->Data
[1] >> 8) & 0xff,
1226 (msg
->Data
[0] >> 8) & 0xff,
1227 (msg
->Data
[0] ) & 0xff);
1231 mpt_prt(mpt
, "Port %d: FC Link Event: "
1232 "Unknown event (%02x %02x %02x)",
1233 (msg
->Data
[1] >> 8) & 0xff,
1234 (msg
->Data
[0] >> 16) & 0xff,
1235 (msg
->Data
[0] >> 8) & 0xff,
1236 (msg
->Data
[0] ) & 0xff);
1241 case MPI_EVENT_LOGOUT
:
1242 mpt_prt(mpt
, "Port %d: FC Logout: N_PortID: %02x",
1243 (msg
->Data
[1] >> 8) & 0xff, msg
->Data
[0]);
1246 case MPI_EVENT_EVENT_CHANGE
:
1248 * This is just an acknowledgement of our
1249 * mpt_send_event_request().
1253 case MPI_EVENT_SAS_PHY_LINK_STATUS
:
1254 switch ((msg
->Data
[0] >> 12) & 0x0f) {
1256 mpt_prt(mpt
, "Phy %d: Link Status Unknown",
1257 msg
->Data
[0] & 0xff);
1260 mpt_prt(mpt
, "Phy %d: Link Disabled",
1261 msg
->Data
[0] & 0xff);
1264 mpt_prt(mpt
, "Phy %d: Failed Speed Negotiation",
1265 msg
->Data
[0] & 0xff);
1268 mpt_prt(mpt
, "Phy %d: SATA OOB Complete",
1269 msg
->Data
[0] & 0xff);
1272 mpt_prt(mpt
, "Phy %d: Link Rate 1.5 Gbps",
1273 msg
->Data
[0] & 0xff);
1276 mpt_prt(mpt
, "Phy %d: Link Rate 3.0 Gbps",
1277 msg
->Data
[0] & 0xff);
1280 mpt_prt(mpt
, "Phy %d: SAS Phy Link Status Event: "
1281 "Unknown event (%0x)",
1282 msg
->Data
[0] & 0xff, (msg
->Data
[0] >> 8) & 0xff);
1286 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE
:
1287 case MPI_EVENT_SAS_DISCOVERY
:
1288 /* ignore these events for now */
1292 mpt_prt(mpt
, "Unknown async event: 0x%x", msg
->Event
);
1296 if (msg
->AckRequired
) {
1297 MSG_EVENT_ACK
*ackp
;
1300 if ((req
= mpt_get_request(mpt
)) == NULL
) {
1301 /* XXX XXX XXX XXXJRT */
1302 panic("mpt_event_notify_reply: unable to allocate "
1303 "request structure");
1306 ackp
= (MSG_EVENT_ACK
*) req
->req_vbuf
;
1307 memset(ackp
, 0, sizeof(*ackp
));
1308 ackp
->Function
= MPI_FUNCTION_EVENT_ACK
;
1309 ackp
->Event
= msg
->Event
;
1310 ackp
->EventContext
= msg
->EventContext
;
1311 ackp
->MsgContext
= req
->index
| 0x80000000;
1312 mpt_check_doorbell(mpt
);
1313 mpt_send_cmd(mpt
, req
);
1317 /* XXXJRT mpt_bus_reset() */
1319 /*****************************************************************************
1320 * SCSI interface routines
1321 *****************************************************************************/
1324 mpt_scsipi_request(struct scsipi_channel
*chan
, scsipi_adapter_req_t req
,
1327 struct scsipi_adapter
*adapt
= chan
->chan_adapter
;
1328 mpt_softc_t
*mpt
= (void *) adapt
->adapt_dev
;
1331 case ADAPTER_REQ_RUN_XFER
:
1332 mpt_run_xfer(mpt
, (struct scsipi_xfer
*) arg
);
1335 case ADAPTER_REQ_GROW_RESOURCES
:
1336 /* Not supported. */
1339 case ADAPTER_REQ_SET_XFER_MODE
:
1340 mpt_set_xfer_mode(mpt
, (struct scsipi_xfer_mode
*) arg
);
1346 mpt_minphys(struct buf
*bp
)
1350 * Subtract one from the SGL limit, since we need an extra one to handle
1351 * an non-page-aligned transfer.
1353 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1355 if (bp
->b_bcount
> MPT_MAX_XFER
)
1356 bp
->b_bcount
= MPT_MAX_XFER
;