1 /* $NetBSD: isp_netbsd.c,v 1.80 2009/06/25 23:44:02 mjacob Exp $ */
3 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
6 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
9 * Additional Copyright (C) 2000-2007 by Matthew Jacob
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.80 2009/06/25 23:44:02 mjacob Exp $");
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/ic/isp_ioctl.h>
40 #include <sys/scsiio.h>
42 #include <sys/timevar.h>
45 * Set a timeout for the watchdogging of a command.
47 * The dimensional analysis is
49 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
53 * (milliseconds / 1000) * hz = ticks
56 * For timeouts less than 1 second, we'll get zero. Because of this, and
57 * because we want to establish *our* timeout to be longer than what the
58 * firmware might do, we just add 3 seconds at the back end.
60 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
62 static void isp_config_interrupts(device_t
);
63 static void ispminphys_1020(struct buf
*);
64 static void ispminphys(struct buf
*);
65 static void ispcmd(struct ispsoftc
*, XS_T
*);
66 static void isprequest(struct scsipi_channel
*, scsipi_adapter_req_t
, void *);
68 ispioctl(struct scsipi_channel
*, u_long
, void *, int, struct proc
*);
70 static void isp_polled_cmd_wait(struct ispsoftc
*, XS_T
*);
71 static void isp_dog(void *);
72 static void isp_gdt(void *);
73 static void isp_ldt(void *);
74 static void isp_make_here(ispsoftc_t
*, int);
75 static void isp_make_gone(ispsoftc_t
*, int);
76 static void isp_fc_worker(void *);
78 static const char *roles
[4] = {
79 "(none)", "Target", "Initiator", "Target/Initiator"
81 static const char prom3
[] =
82 "PortID 0x%06x Departed from Target %u because of %s";
83 int isp_change_is_bad
= 0; /* "changed" devices are bad */
84 int isp_quickboot_time
= 15; /* don't wait more than N secs for loop up */
85 static int isp_fabric_hysteresis
= 5;
86 #define isp_change_is_bad 0
89 * Complete attachment of hardware, include subdevices.
93 isp_attach(struct ispsoftc
*isp
)
95 device_t self
= isp
->isp_osinfo
.dev
;
98 isp
->isp_state
= ISP_RUNSTATE
;
100 isp
->isp_osinfo
.adapter
.adapt_dev
= self
;
101 isp
->isp_osinfo
.adapter
.adapt_openings
= isp
->isp_maxcmds
;
102 isp
->isp_osinfo
.loop_down_limit
= 300;
105 * It's not stated whether max_periph is limited by SPI
106 * tag uage, but let's assume that it is.
108 isp
->isp_osinfo
.adapter
.adapt_max_periph
= min(isp
->isp_maxcmds
, 255);
109 isp
->isp_osinfo
.adapter
.adapt_ioctl
= ispioctl
;
110 isp
->isp_osinfo
.adapter
.adapt_request
= isprequest
;
111 if (isp
->isp_type
<= ISP_HA_SCSI_1020A
) {
112 isp
->isp_osinfo
.adapter
.adapt_minphys
= ispminphys_1020
;
114 isp
->isp_osinfo
.adapter
.adapt_minphys
= ispminphys
;
117 callout_init(&isp
->isp_osinfo
.gdt
, 0);
118 callout_setfunc(&isp
->isp_osinfo
.gdt
, isp_gdt
, isp
);
119 callout_init(&isp
->isp_osinfo
.ldt
, 0);
120 callout_setfunc(&isp
->isp_osinfo
.ldt
, isp_ldt
, isp
);
122 if (kthread_create(PRI_NONE
, 0, NULL
, isp_fc_worker
, isp
,
123 &isp
->isp_osinfo
.thread
, "%s:fc_thrd",
124 device_xname(self
))) {
125 isp_prt(isp
, ISP_LOGERR
,
126 "unable to create FC worker thread");
131 for (i
= 0; i
!= isp
->isp_osinfo
.adapter
.adapt_nchannels
; i
++) {
132 isp
->isp_osinfo
.chan
[i
].chan_adapter
=
133 &isp
->isp_osinfo
.adapter
;
134 isp
->isp_osinfo
.chan
[i
].chan_bustype
= &scsi_bustype
;
135 isp
->isp_osinfo
.chan
[i
].chan_channel
= i
;
137 * Until the midlayer is fixed to use REPORT LUNS,
140 isp
->isp_osinfo
.chan
[i
].chan_nluns
= min(isp
->isp_maxluns
, 8);
142 isp
->isp_osinfo
.chan
[i
].chan_ntargets
= MAX_FC_TARG
;
143 if (ISP_CAP_2KLOGIN(isp
) == 0 && MAX_FC_TARG
> 256) {
144 isp
->isp_osinfo
.chan
[i
].chan_ntargets
= 256;
146 isp
->isp_osinfo
.chan
[i
].chan_id
= MAX_FC_TARG
;
148 isp
->isp_osinfo
.chan
[i
].chan_ntargets
= MAX_TARGETS
;
149 isp
->isp_osinfo
.chan
[i
].chan_id
=
150 SDPARAM(isp
, i
)->isp_initiator_id
;
152 (void) isp_control(isp
, ISPCTL_RESET_BUS
, i
);
158 * Defer enabling mailbox interrupts until later.
160 config_interrupts(self
, isp_config_interrupts
);
164 isp_config_interrupts(device_t self
)
167 struct ispsoftc
*isp
= device_private(self
);
169 isp
->isp_osinfo
.mbox_sleep_ok
= 1;
171 if (IS_FC(isp
) && (FCPARAM(isp
, 0)->isp_fwstate
!= FW_READY
||
172 FCPARAM(isp
, 0)->isp_loopstate
!= LOOP_READY
)) {
173 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
174 "Starting Initial Loop Down Timer");
175 callout_schedule(&isp
->isp_osinfo
.ldt
, isp_quickboot_time
* hz
);
179 * And attach children (if any).
181 for (i
= 0; i
< isp
->isp_osinfo
.adapter
.adapt_nchannels
; i
++) {
182 config_found(self
, &isp
->isp_osinfo
.chan
[i
], scsiprint
);
190 ispminphys_1020(struct buf
*bp
)
192 if (bp
->b_bcount
>= (1 << 24)) {
193 bp
->b_bcount
= (1 << 24);
199 ispminphys(struct buf
*bp
)
201 if (bp
->b_bcount
>= (1 << 30)) {
202 bp
->b_bcount
= (1 << 30);
208 ispioctl(struct scsipi_channel
*chan
, u_long cmd
, void *addr
, int flag
,
211 struct ispsoftc
*isp
= device_private(chan
->chan_adapter
->adapt_dev
);
212 int nr
, bus
, retval
= ENOTTY
;
217 int olddblev
= isp
->isp_dblev
;
218 isp
->isp_dblev
= *(int *)addr
;
219 *(int *)addr
= olddblev
;
225 if (bus
< 0 || bus
>= isp
->isp_nchan
) {
230 *(int *)addr
= FCPARAM(isp
, bus
)->role
;
232 *(int *)addr
= SDPARAM(isp
, bus
)->role
;
240 if (bus
< 0 || bus
>= isp
->isp_nchan
) {
245 if (nr
& ~(ISP_ROLE_INITIATOR
|ISP_ROLE_TARGET
)) {
250 *(int *)addr
= FCPARAM(isp
, bus
)->role
;
251 FCPARAM(isp
, bus
)->role
= nr
;
253 *(int *)addr
= SDPARAM(isp
, bus
)->role
;
254 SDPARAM(isp
, bus
)->role
= nr
;
269 if (bus
< 0 || bus
>= isp
->isp_nchan
) {
274 if (isp_fc_runstate(isp
, bus
, 5 * 1000000)) {
286 if (bus
< 0 || bus
>= isp
->isp_nchan
) {
291 if (isp_control(isp
, ISPCTL_SEND_LIP
, bus
)) {
299 case ISP_FC_GETDINFO
:
301 struct isp_fc_device
*ifc
= (struct isp_fc_device
*) addr
;
307 if (ifc
->loopid
>= MAX_FC_TARG
) {
311 lp
= &FCPARAM(isp
, ifc
->chan
)->portdb
[ifc
->loopid
];
312 if (lp
->state
== FC_PORTDB_STATE_VALID
) {
313 ifc
->role
= lp
->roles
;
314 ifc
->loopid
= lp
->handle
;
315 ifc
->portid
= lp
->portid
;
316 ifc
->node_wwn
= lp
->node_wwn
;
317 ifc
->port_wwn
= lp
->port_wwn
;
326 isp_stats_t
*sp
= (isp_stats_t
*) addr
;
328 ISP_MEMZERO(sp
, sizeof (*sp
));
329 sp
->isp_stat_version
= ISP_STATS_VERSION
;
330 sp
->isp_type
= isp
->isp_type
;
331 sp
->isp_revision
= isp
->isp_revision
;
333 sp
->isp_stats
[ISP_INTCNT
] = isp
->isp_intcnt
;
334 sp
->isp_stats
[ISP_INTBOGUS
] = isp
->isp_intbogus
;
335 sp
->isp_stats
[ISP_INTMBOXC
] = isp
->isp_intmboxc
;
336 sp
->isp_stats
[ISP_INGOASYNC
] = isp
->isp_intoasync
;
337 sp
->isp_stats
[ISP_RSLTCCMPLT
] = isp
->isp_rsltccmplt
;
338 sp
->isp_stats
[ISP_FPHCCMCPLT
] = isp
->isp_fphccmplt
;
339 sp
->isp_stats
[ISP_RSCCHIWAT
] = isp
->isp_rscchiwater
;
340 sp
->isp_stats
[ISP_FPCCHIWAT
] = isp
->isp_fpcchiwater
;
348 isp
->isp_intbogus
= 0;
349 isp
->isp_intmboxc
= 0;
350 isp
->isp_intoasync
= 0;
351 isp
->isp_rsltccmplt
= 0;
352 isp
->isp_fphccmplt
= 0;
353 isp
->isp_rscchiwater
= 0;
354 isp
->isp_fpcchiwater
= 0;
358 case ISP_FC_GETHINFO
:
360 struct isp_hba_device
*hba
= (struct isp_hba_device
*) addr
;
361 bus
= hba
->fc_channel
;
363 if (bus
< 0 || bus
>= isp
->isp_nchan
) {
367 hba
->fc_fw_major
= ISP_FW_MAJORX(isp
->isp_fwrev
);
368 hba
->fc_fw_minor
= ISP_FW_MINORX(isp
->isp_fwrev
);
369 hba
->fc_fw_micro
= ISP_FW_MICROX(isp
->isp_fwrev
);
370 hba
->fc_nchannels
= isp
->isp_nchan
;
371 hba
->fc_nports
= isp
->isp_nchan
;/* XXXX 24XX STUFF? XXX */
373 hba
->fc_speed
= FCPARAM(isp
, bus
)->isp_gbspeed
;
374 hba
->fc_topology
= FCPARAM(isp
, bus
)->isp_topo
+ 1;
375 hba
->fc_loopid
= FCPARAM(isp
, bus
)->isp_loopid
;
376 hba
->nvram_node_wwn
= FCPARAM(isp
, bus
)->isp_wwnn_nvram
;
377 hba
->nvram_port_wwn
= FCPARAM(isp
, bus
)->isp_wwpn_nvram
;
378 hba
->active_node_wwn
= FCPARAM(isp
, bus
)->isp_wwnn
;
379 hba
->active_port_wwn
= FCPARAM(isp
, bus
)->isp_wwpn
;
382 hba
->fc_topology
= 0;
383 hba
->nvram_node_wwn
= 0ull;
384 hba
->nvram_port_wwn
= 0ull;
385 hba
->active_node_wwn
= 0ull;
386 hba
->active_port_wwn
= 0ull;
394 struct isp_fc_tsk_mgmt
*fct
= (struct isp_fc_tsk_mgmt
*) addr
;
403 if (bus
< 0 || bus
>= isp
->isp_nchan
) {
408 memset(&mbs
, 0, sizeof (mbs
));
409 needmarker
= retval
= 0;
410 loopid
= fct
->loopid
;
411 if (ISP_CAP_2KLOGIN(isp
) == 0) {
414 switch (fct
->action
) {
416 mbs
.param
[0] = MBOX_CLEAR_ACA
;
417 mbs
.param
[1] = loopid
;
418 mbs
.param
[2] = fct
->lun
;
420 case IPT_TARGET_RESET
:
421 mbs
.param
[0] = MBOX_TARGET_RESET
;
422 mbs
.param
[1] = loopid
;
426 mbs
.param
[0] = MBOX_LUN_RESET
;
427 mbs
.param
[1] = loopid
;
428 mbs
.param
[2] = fct
->lun
;
431 case IPT_CLEAR_TASK_SET
:
432 mbs
.param
[0] = MBOX_CLEAR_TASK_SET
;
433 mbs
.param
[1] = loopid
;
434 mbs
.param
[2] = fct
->lun
;
437 case IPT_ABORT_TASK_SET
:
438 mbs
.param
[0] = MBOX_ABORT_TASK_SET
;
439 mbs
.param
[1] = loopid
;
440 mbs
.param
[2] = fct
->lun
;
449 FCPARAM(isp
, bus
)->sendmarker
= 1;
452 retval
= isp_control(isp
, ISPCTL_RUN_MBOXCMD
, &mbs
);
460 case ISP_FC_GETDLIST
:
462 isp_dlist_t local
, *ua
;
463 uint16_t nph
, nphe
, count
, channel
, lim
;
464 struct wwnpair pair
, *uptr
;
471 ua
= *(isp_dlist_t
**)addr
;
472 if (copyin(ua
, &local
, sizeof (isp_dlist_t
))) {
477 channel
= local
.channel
;
479 ua
= *(isp_dlist_t
**)addr
;
482 if (ISP_CAP_2KLOGIN(isp
)) {
487 for (count
= 0, nph
= 0; count
< lim
&& nph
!= nphe
; nph
++) {
489 retval
= isp_control(isp
, ISPCTL_GET_NAMES
, channel
,
490 nph
, &pair
.wwnn
, &pair
.wwpn
);
492 if (retval
|| (pair
.wwpn
== INI_NONE
&&
493 pair
.wwnn
== INI_NONE
)) {
497 if (copyout(&pair
, (void *)uptr
++, sizeof (pair
))) {
504 if (copyout(&count
, (void *)&ua
->count
,
513 if (isp_control(isp
, ISPCTL_RESET_BUS
, &chan
->chan_channel
)) {
527 ispcmd(struct ispsoftc
*isp
, XS_T
*xs
)
529 volatile uint8_t ombi
;
533 if (isp
->isp_state
< ISP_RUNSTATE
) {
534 ISP_DISABLE_INTS(isp
);
536 if (isp
->isp_state
!= ISP_INITSTATE
) {
537 ISP_ENABLE_INTS(isp
);
539 isp_prt(isp
, ISP_LOGERR
, "isp not at init state");
540 XS_SETERR(xs
, HBA_BOTCH
);
544 isp
->isp_state
= ISP_RUNSTATE
;
545 ISP_ENABLE_INTS(isp
);
547 chan
= XS_CHANNEL(xs
);
550 * Handle the case of a FC card where the FC thread hasn't
551 * fired up yet and we don't yet have a known loop state.
553 if (IS_FC(isp
) && (FCPARAM(isp
, chan
)->isp_fwstate
!= FW_READY
||
554 FCPARAM(isp
, chan
)->isp_loopstate
!= LOOP_READY
) &&
555 isp
->isp_osinfo
.thread
== NULL
) {
556 ombi
= isp
->isp_osinfo
.mbox_sleep_ok
!= 0;
559 if (xs
->xs_control
& XS_CTL_POLL
) {
560 isp
->isp_osinfo
.mbox_sleep_ok
= 0;
563 if (isp
->isp_osinfo
.loop_checked
== 0) {
564 delay_time
= 10 * 1000000;
565 isp
->isp_osinfo
.loop_checked
= 1;
570 if (isp_fc_runstate(isp
, XS_CHANNEL(xs
), delay_time
) != 0) {
571 if (xs
->xs_control
& XS_CTL_POLL
) {
572 isp
->isp_osinfo
.mbox_sleep_ok
= ombi
;
574 if (FCPARAM(isp
, XS_CHANNEL(xs
))->loop_seen_once
== 0) {
575 XS_SETERR(xs
, HBA_SELTIMEOUT
);
581 * Otherwise, fall thru to be queued up for later.
585 (isp
->isp_osinfo
.blocked
|| isp
->isp_osinfo
.paused
);
586 isp
->isp_osinfo
.blocked
= isp
->isp_osinfo
.paused
= 0;
588 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
589 "THAW QUEUES @ LINE %d", __LINE__
);
590 scsipi_channel_thaw(&isp
->isp_osinfo
.chan
[chan
],
594 if (xs
->xs_control
& XS_CTL_POLL
) {
595 isp
->isp_osinfo
.mbox_sleep_ok
= ombi
;
599 if (isp
->isp_osinfo
.paused
) {
600 isp_prt(isp
, ISP_LOGWARN
, "I/O while paused");
601 xs
->error
= XS_RESOURCE_SHORTAGE
;
606 if (isp
->isp_osinfo
.blocked
) {
607 isp_prt(isp
, ISP_LOGWARN
,
608 "I/O while blocked with retries %d", xs
, xs
->xs_retries
);
609 if (xs
->xs_retries
) {
610 xs
->error
= XS_REQUEUE
;
613 XS_SETERR(xs
, HBA_SELTIMEOUT
);
620 if (xs
->xs_control
& XS_CTL_POLL
) {
621 ombi
= isp
->isp_osinfo
.mbox_sleep_ok
;
622 isp
->isp_osinfo
.mbox_sleep_ok
= 0;
625 switch (isp_start(xs
)) {
627 if (xs
->xs_control
& XS_CTL_POLL
) {
628 isp_polled_cmd_wait(isp
, xs
);
629 isp
->isp_osinfo
.mbox_sleep_ok
= ombi
;
630 } else if (xs
->timeout
) {
631 callout_reset(&xs
->xs_callout
, _XT(xs
), isp_dog
, xs
);
635 isp
->isp_osinfo
.paused
= 1;
636 xs
->error
= XS_RESOURCE_SHORTAGE
;
637 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
638 "FREEZE QUEUES @ LINE %d", __LINE__
);
639 for (chan
= 0; chan
< isp
->isp_nchan
; chan
++) {
640 scsipi_channel_freeze(&isp
->isp_osinfo
.chan
[chan
], 1);
646 * We can only get RQLATER from FC devices (1 channel only)
648 * If we've never seen loop up see if if we've been down
649 * quickboot time, otherwise wait loop down limit time.
650 * If so, then we start giving up on commands.
652 if (FCPARAM(isp
, XS_CHANNEL(xs
))->loop_seen_once
== 0) {
653 lim
= isp_quickboot_time
;
655 lim
= isp
->isp_osinfo
.loop_down_limit
;
657 if (isp
->isp_osinfo
.loop_down_time
>= lim
) {
658 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
659 "RQLATER->SELTIMEOUT for %d (%d >= %d)", XS_TGT(xs
),
660 isp
->isp_osinfo
.loop_down_time
, lim
);
661 XS_SETERR(xs
, HBA_SELTIMEOUT
);
665 if (isp
->isp_osinfo
.blocked
== 0) {
666 isp
->isp_osinfo
.blocked
= 1;
667 scsipi_channel_freeze(&isp
->isp_osinfo
.chan
[chan
], 1);
668 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
669 "FREEZE QUEUES @ LINE %d", __LINE__
);
671 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
672 "RQLATER WITH FROZEN QUEUES @ LINE %d", __LINE__
);
674 xs
->error
= XS_REQUEUE
;
685 isprequest(struct scsipi_channel
*chan
, scsipi_adapter_req_t req
, void *arg
)
687 struct ispsoftc
*isp
= device_private(chan
->chan_adapter
->adapt_dev
);
690 case ADAPTER_REQ_RUN_XFER
:
691 ispcmd(isp
, (XS_T
*) arg
);
694 case ADAPTER_REQ_GROW_RESOURCES
:
698 case ADAPTER_REQ_SET_XFER_MODE
:
700 struct scsipi_xfer_mode
*xm
= arg
;
702 sdparam
*sdp
= SDPARAM(isp
, chan
->chan_channel
);
704 if (xm
->xm_mode
& PERIPH_CAP_TQING
)
705 dflags
|= DPARM_TQING
;
706 if (xm
->xm_mode
& PERIPH_CAP_WIDE16
)
707 dflags
|= DPARM_WIDE
;
708 if (xm
->xm_mode
& PERIPH_CAP_SYNC
)
709 dflags
|= DPARM_SYNC
;
711 sdp
->isp_devparam
[xm
->xm_target
].goal_flags
|= dflags
;
712 dflags
= sdp
->isp_devparam
[xm
->xm_target
].goal_flags
;
713 sdp
->isp_devparam
[xm
->xm_target
].dev_update
= 1;
716 isp_prt(isp
, ISP_LOGDEBUG1
,
717 "isprequest: device flags 0x%x for %d.%d.X",
718 dflags
, chan
->chan_channel
, xm
->xm_target
);
727 isp_polled_cmd_wait(struct ispsoftc
*isp
, XS_T
*xs
)
729 int infinite
= 0, mswait
;
732 * If we can't use interrupts, poll on completion.
734 if ((mswait
= XS_TIME(xs
)) == 0) {
738 while (mswait
|| infinite
) {
741 if (ISP_READ_ISR(isp
, &isr
, &sema
, &mbox
)) {
742 isp_intr(isp
, isr
, sema
, mbox
);
743 if (XS_CMD_DONE_P(xs
)) {
752 * If no other error occurred but we didn't finish
753 * something bad happened, so abort the command.
755 if (XS_CMD_DONE_P(xs
) == 0) {
756 if (isp_control(isp
, ISPCTL_ABORT_CMD
, xs
)) {
760 isp_prt(isp
, ISP_LOGERR
, "polled command timed out");
761 XS_SETERR(xs
, HBA_BOTCH
);
770 if (XS_CMD_WDOG_P(xs
) == 0) {
771 struct ispsoftc
*isp
= XS_ISP(xs
);
772 callout_stop(&xs
->xs_callout
);
773 if (XS_CMD_GRACE_P(xs
)) {
774 isp_prt(isp
, ISP_LOGDEBUG1
,
775 "finished command on borrowed time");
779 * Fixup- if we get a QFULL, we need
780 * to set XS_BUSY as the error.
782 if (xs
->status
== SCSI_QUEUE_FULL
) {
785 if (isp
->isp_osinfo
.paused
) {
787 isp
->isp_osinfo
.paused
= 0;
788 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
789 "THAW QUEUES @ LINE %d", __LINE__
);
790 for (i
= 0; i
< isp
->isp_nchan
; i
++) {
791 scsipi_channel_timed_thaw(&isp
->isp_osinfo
.chan
[i
]);
794 if (xs
->error
== XS_DRIVER_STUFFUP
) {
795 isp_prt(isp
, ISP_LOGERR
,
796 "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
797 XS_CHANNEL(xs
), XS_TGT(xs
), XS_LUN(xs
),
798 XS_CDBP(xs
)[0], (long) XS_XFRLEN(xs
));
808 struct ispsoftc
*isp
= XS_ISP(xs
);
814 * We've decided this command is dead. Make sure we're not trying
815 * to kill a command that's already dead by getting it's handle and
816 * and seeing whether it's still alive.
818 handle
= isp_find_handle(isp
, xs
);
823 if (XS_CMD_DONE_P(xs
)) {
824 isp_prt(isp
, ISP_LOGDEBUG1
,
825 "watchdog found done cmd (handle 0x%x)", handle
);
830 if (XS_CMD_WDOG_P(xs
)) {
831 isp_prt(isp
, ISP_LOGDEBUG1
,
832 "recursive watchdog (handle 0x%x)", handle
);
839 if (ISP_READ_ISR(isp
, &isr
, &sema
, &mbox
)) {
840 isp_intr(isp
, isr
, sema
, mbox
);
843 if (XS_CMD_DONE_P(xs
)) {
844 isp_prt(isp
, ISP_LOGDEBUG1
,
845 "watchdog cleanup for handle 0x%x", handle
);
848 } else if (XS_CMD_GRACE_P(xs
)) {
849 isp_prt(isp
, ISP_LOGDEBUG1
,
850 "watchdog timeout for handle 0x%x", handle
);
852 * Make sure the command is *really* dead before we
853 * release the handle (and DMA resources) for reuse.
855 (void) isp_control(isp
, ISPCTL_ABORT_CMD
, arg
);
858 * After this point, the command is really dead.
861 ISP_DMAFREE(isp
, xs
, handle
);
863 isp_destroy_handle(isp
, handle
);
864 XS_SETERR(xs
, XS_TIMEOUT
);
869 isp_marker_t local
, *mp
= &local
;
870 isp_prt(isp
, ISP_LOGDEBUG2
,
871 "possible command timeout on handle %x", handle
);
873 callout_reset(&xs
->xs_callout
, hz
, isp_dog
, xs
);
874 qe
= isp_getrqentry(isp
);
880 ISP_MEMZERO((void *) mp
, sizeof (*mp
));
881 mp
->mrk_header
.rqs_entry_count
= 1;
882 mp
->mrk_header
.rqs_entry_type
= RQSTYPE_MARKER
;
883 mp
->mrk_modifier
= SYNC_ALL
;
884 mp
->mrk_target
= XS_CHANNEL(xs
) << 7;
885 isp_put_marker(isp
, mp
, qe
);
886 ISP_SYNC_REQUEST(isp
);
889 isp_prt(isp
, ISP_LOGDEBUG0
, "watchdog with no command");
895 * Gone Device Timer Function- when we have decided that a device has gone
896 * away, we wait a specific period of time prior to telling the OS it has
899 * This timer function fires once a second and then scans the port database
900 * for devices that are marked dead but still have a virtual target assigned.
901 * We decrement a counter for that port database entry, and when it hits zero,
902 * we tell the OS the device has gone away.
907 ispsoftc_t
*isp
= arg
;
909 int dbidx
, tgt
, more_to_do
= 0;
911 isp_prt(isp
, ISP_LOGDEBUG0
, "GDT timer expired");
913 for (dbidx
= 0; dbidx
< MAX_FC_TARG
; dbidx
++) {
914 lp
= &FCPARAM(isp
, 0)->portdb
[dbidx
];
916 if (lp
->state
!= FC_PORTDB_STATE_ZOMBIE
) {
919 if (lp
->dev_map_idx
== 0) {
922 if (lp
->new_reserved
== 0) {
925 lp
->new_reserved
-= 1;
926 if (lp
->new_reserved
!= 0) {
930 tgt
= lp
->dev_map_idx
- 1;
931 FCPARAM(isp
, 0)->isp_dev_map
[tgt
] = 0;
933 lp
->state
= FC_PORTDB_STATE_NIL
;
934 isp_prt(isp
, ISP_LOGCONFIG
, prom3
, lp
->portid
, tgt
,
935 "Gone Device Timeout");
936 isp_make_gone(isp
, tgt
);
939 callout_schedule(&isp
->isp_osinfo
.gdt
, hz
);
941 isp
->isp_osinfo
.gdt_running
= 0;
942 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
943 "stopping Gone Device Timer");
949 * Loop Down Timer Function- when loop goes down, a timer is started and
950 * and after it expires we come here and take all probational devices that
951 * the OS knows about and the tell the OS that they've gone away.
953 * We don't clear the devices out of our port database because, when loop
954 * come back up, we have to do some actual cleanup with the chip at that
955 * point (implicit PLOGO, e.g., to get the chip's port database state right).
960 ispsoftc_t
*isp
= arg
;
964 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
, "Loop Down Timer expired");
968 * Notify to the OS all targets who we now consider have departed.
970 for (dbidx
= 0; dbidx
< MAX_FC_TARG
; dbidx
++) {
971 lp
= &FCPARAM(isp
, 0)->portdb
[dbidx
];
973 if (lp
->state
!= FC_PORTDB_STATE_PROBATIONAL
) {
976 if (lp
->dev_map_idx
== 0) {
981 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST!
985 * Mark that we've announced that this device is gone....
990 * but *don't* change the state of the entry. Just clear
991 * any target id stuff and announce to CAM that the
992 * device is gone. This way any necessary PLOGO stuff
993 * will happen when loop comes back up.
996 tgt
= lp
->dev_map_idx
- 1;
997 FCPARAM(isp
, 0)->isp_dev_map
[tgt
] = 0;
999 isp_prt(isp
, ISP_LOGCONFIG
, prom3
, lp
->portid
, tgt
,
1000 "Loop Down Timeout");
1001 isp_make_gone(isp
, tgt
);
1005 * The loop down timer has expired. Wake up the kthread
1006 * to notice that fact (or make it false).
1008 isp
->isp_osinfo
.loop_down_time
= isp
->isp_osinfo
.loop_down_limit
+1;
1009 wakeup(&isp
->isp_osinfo
.thread
);
1014 isp_make_here(ispsoftc_t
*isp
, int tgt
)
1016 isp_prt(isp
, ISP_LOGINFO
, "target %d has arrived", tgt
);
1020 isp_make_gone(ispsoftc_t
*isp
, int tgt
)
1022 isp_prt(isp
, ISP_LOGINFO
, "target %d has departed", tgt
);
1026 isp_fc_worker(void *arg
)
1028 void scsipi_run_queue(struct scsipi_channel
*);
1029 ispsoftc_t
*isp
= arg
;
1035 * The first loop is for our usage where we have yet to have
1036 * gotten good fibre channel state.
1038 while (isp
->isp_osinfo
.thread
!= NULL
) {
1041 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
, "checking FC state");
1042 sok
= isp
->isp_osinfo
.mbox_sleep_ok
;
1043 isp
->isp_osinfo
.mbox_sleep_ok
= 1;
1044 lb
= isp_fc_runstate(isp
, chan
, 250000);
1045 isp
->isp_osinfo
.mbox_sleep_ok
= sok
;
1048 * Increment loop down time by the last sleep interval
1050 isp
->isp_osinfo
.loop_down_time
+= slp
;
1053 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1054 "FC loop not up (down count %d)",
1055 isp
->isp_osinfo
.loop_down_time
);
1057 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1058 "FC got to %d (down count %d)",
1059 lb
, isp
->isp_osinfo
.loop_down_time
);
1064 * If we've never seen loop up and we've waited longer
1065 * than quickboot time, or we've seen loop up but we've
1066 * waited longer than loop_down_limit, give up and go
1067 * to sleep until loop comes up.
1069 if (FCPARAM(isp
, 0)->loop_seen_once
== 0) {
1070 lim
= isp_quickboot_time
;
1072 lim
= isp
->isp_osinfo
.loop_down_limit
;
1074 if (isp
->isp_osinfo
.loop_down_time
>= lim
) {
1076 * If we're now past our limit, release
1077 * the queues and let them come in and
1078 * either get HBA_SELTIMOUT or cause
1081 isp
->isp_osinfo
.blocked
= 1;
1083 } else if (isp
->isp_osinfo
.loop_down_time
< 10) {
1085 } else if (isp
->isp_osinfo
.loop_down_time
< 30) {
1087 } else if (isp
->isp_osinfo
.loop_down_time
< 60) {
1089 } else if (isp
->isp_osinfo
.loop_down_time
< 120) {
1096 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1098 isp
->isp_osinfo
.loop_down_time
= 0;
1100 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1101 "THAW QUEUES @ LINE %d", __LINE__
);
1102 scsipi_channel_thaw(&isp
->isp_osinfo
.chan
[chan
], 1);
1106 * If we'd frozen the queues, unfreeze them now so that
1107 * we can start getting commands. If the FC state isn't
1108 * okay yet, they'll hit that in isp_start which will
1109 * freeze the queues again.
1111 if (isp
->isp_osinfo
.blocked
) {
1112 isp
->isp_osinfo
.blocked
= 0;
1113 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1114 "THAW QUEUES @ LINE %d", __LINE__
);
1115 scsipi_channel_thaw(&isp
->isp_osinfo
.chan
[chan
], 1);
1117 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
, "sleep time %d", slp
);
1118 tsleep(&isp
->isp_osinfo
.thread
, PRIBIO
, "ispf", slp
* hz
);
1121 * If slp is zero, we're waking up for the first time after
1122 * things have been okay. In this case, we set a deferral state
1123 * for all commands and delay hysteresis seconds before starting
1124 * the FC state evaluation. This gives the loop/fabric a chance
1127 if (slp
== 0 && isp_fabric_hysteresis
) {
1128 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1129 "sleep hysteresis tick time %d",
1130 isp_fabric_hysteresis
* hz
);
1131 (void) tsleep(&isp_fabric_hysteresis
, PRIBIO
, "ispT",
1132 (isp_fabric_hysteresis
* hz
));
1137 /* In case parent is waiting for us to exit. */
1138 wakeup(&isp
->isp_osinfo
.thread
);
1143 * Free any associated resources prior to decommissioning and
1144 * set the card to a known state (so it doesn't wake up and kick
1145 * us when we aren't expecting it to).
1147 * Locks are held before coming here.
1150 isp_uninit(struct ispsoftc
*isp
)
1154 * Leave with interrupts disabled.
1156 ISP_DISABLE_INTS(isp
);
1161 isp_async(struct ispsoftc
*isp
, ispasync_t cmd
, ...)
1164 const char *msg
= NULL
;
1165 static const char prom
[] =
1166 "PortID 0x%06x handle 0x%x role %s %s\n"
1167 " WWNN 0x%08x%08x WWPN 0x%08x%08x";
1168 static const char prom2
[] =
1169 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n"
1170 " WWNN 0x%08x%08x WWPN 0x%08x%08x";
1175 case ISPASYNC_NEW_TGT_PARAMS
:
1179 struct scsipi_xfer_mode xm
;
1182 bus
= va_arg(ap
, int);
1183 tgt
= va_arg(ap
, int);
1185 sdp
= SDPARAM(isp
, bus
);
1186 flags
= sdp
->isp_devparam
[tgt
].actv_flags
;
1189 xm
.xm_period
= sdp
->isp_devparam
[tgt
].actv_period
;
1190 xm
.xm_offset
= sdp
->isp_devparam
[tgt
].actv_offset
;
1193 if ((flags
& DPARM_SYNC
) && xm
.xm_period
&& xm
.xm_offset
)
1194 xm
.xm_mode
|= PERIPH_CAP_SYNC
;
1195 if (flags
& DPARM_WIDE
)
1196 xm
.xm_mode
|= PERIPH_CAP_WIDE16
;
1197 if (flags
& DPARM_TQING
)
1198 xm
.xm_mode
|= PERIPH_CAP_TQING
;
1199 scsipi_async_event(&isp
->isp_osinfo
.chan
[bus
],
1200 ASYNC_EVENT_XFER_MODE
, &xm
);
1203 case ISPASYNC_BUS_RESET
:
1205 bus
= va_arg(ap
, int);
1207 isp_prt(isp
, ISP_LOGINFO
, "SCSI bus %d reset detected", bus
);
1208 scsipi_async_event(&isp
->isp_osinfo
.chan
[bus
],
1209 ASYNC_EVENT_RESET
, NULL
);
1213 msg
= "LIP Received";
1216 case ISPASYNC_LOOP_RESET
:
1218 msg
= "LOOP Reset Received";
1221 case ISPASYNC_LOOP_DOWN
:
1226 bus
= va_arg(ap
, int);
1230 * Don't do queue freezes or blockage until we have the
1231 * thread running and interrupts that can unfreeze/unblock us.
1233 if (isp
->isp_osinfo
.mbox_sleep_ok
&&
1234 isp
->isp_osinfo
.blocked
== 0 &&
1235 isp
->isp_osinfo
.thread
) {
1236 isp
->isp_osinfo
.blocked
= 1;
1237 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1238 "FREEZE QUEUES @ LINE %d", __LINE__
);
1239 scsipi_channel_freeze(&isp
->isp_osinfo
.chan
[bus
], 1);
1240 if (callout_pending(&isp
->isp_osinfo
.ldt
) == 0) {
1241 callout_schedule(&isp
->isp_osinfo
.ldt
,
1242 isp
->isp_osinfo
.loop_down_limit
* hz
);
1243 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1244 "Starting Loop Down Timer");
1247 isp_prt(isp
, ISP_LOGINFO
, msg
);
1249 case ISPASYNC_LOOP_UP
:
1251 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
1252 * the FC worker thread. When the FC worker thread
1253 * is done, let *it* call scsipi_channel_thaw...
1255 isp_prt(isp
, ISP_LOGINFO
, "Loop UP");
1257 case ISPASYNC_DEV_ARRIVED
:
1259 bus
= va_arg(ap
, int);
1260 lp
= va_arg(ap
, fcportdb_t
*);
1263 if ((FCPARAM(isp
, bus
)->role
& ISP_ROLE_INITIATOR
) &&
1264 (lp
->roles
& (SVC3_TGT_ROLE
>> SVC3_ROLE_SHIFT
))) {
1265 int dbidx
= lp
- FCPARAM(isp
, bus
)->portdb
;
1268 for (i
= 0; i
< MAX_FC_TARG
; i
++) {
1269 if (i
>= FL_ID
&& i
<= SNS_ID
) {
1272 if (FCPARAM(isp
, bus
)->isp_dev_map
[i
] == 0) {
1276 if (i
< MAX_FC_TARG
) {
1277 FCPARAM(isp
, bus
)->isp_dev_map
[i
] = dbidx
+ 1;
1278 lp
->dev_map_idx
= i
+ 1;
1280 isp_prt(isp
, ISP_LOGWARN
, "out of target ids");
1281 isp_dump_portdb(isp
, bus
);
1284 if (lp
->dev_map_idx
) {
1285 tgt
= lp
->dev_map_idx
- 1;
1286 isp_prt(isp
, ISP_LOGCONFIG
, prom2
,
1287 lp
->portid
, lp
->handle
,
1288 roles
[lp
->roles
], "arrived at", tgt
,
1289 (uint32_t) (lp
->node_wwn
>> 32),
1290 (uint32_t) lp
->node_wwn
,
1291 (uint32_t) (lp
->port_wwn
>> 32),
1292 (uint32_t) lp
->port_wwn
);
1293 isp_make_here(isp
, tgt
);
1295 isp_prt(isp
, ISP_LOGCONFIG
, prom
,
1296 lp
->portid
, lp
->handle
,
1297 roles
[lp
->roles
], "arrived",
1298 (uint32_t) (lp
->node_wwn
>> 32),
1299 (uint32_t) lp
->node_wwn
,
1300 (uint32_t) (lp
->port_wwn
>> 32),
1301 (uint32_t) lp
->port_wwn
);
1304 case ISPASYNC_DEV_CHANGED
:
1306 bus
= va_arg(ap
, int);
1307 lp
= va_arg(ap
, fcportdb_t
*);
1309 if (isp_change_is_bad
) {
1310 lp
->state
= FC_PORTDB_STATE_NIL
;
1311 if (lp
->dev_map_idx
) {
1312 tgt
= lp
->dev_map_idx
- 1;
1313 FCPARAM(isp
, bus
)->isp_dev_map
[tgt
] = 0;
1314 lp
->dev_map_idx
= 0;
1315 isp_prt(isp
, ISP_LOGCONFIG
, prom3
,
1316 lp
->portid
, tgt
, "change is bad");
1317 isp_make_gone(isp
, tgt
);
1319 isp_prt(isp
, ISP_LOGCONFIG
, prom
,
1320 lp
->portid
, lp
->handle
,
1322 "changed and departed",
1323 (uint32_t) (lp
->node_wwn
>> 32),
1324 (uint32_t) lp
->node_wwn
,
1325 (uint32_t) (lp
->port_wwn
>> 32),
1326 (uint32_t) lp
->port_wwn
);
1329 lp
->portid
= lp
->new_portid
;
1330 lp
->roles
= lp
->new_roles
;
1331 if (lp
->dev_map_idx
) {
1332 int t
= lp
->dev_map_idx
- 1;
1333 FCPARAM(isp
, bus
)->isp_dev_map
[t
] =
1334 (lp
- FCPARAM(isp
, bus
)->portdb
) + 1;
1335 tgt
= lp
->dev_map_idx
- 1;
1336 isp_prt(isp
, ISP_LOGCONFIG
, prom2
,
1337 lp
->portid
, lp
->handle
,
1338 roles
[lp
->roles
], "changed at", tgt
,
1339 (uint32_t) (lp
->node_wwn
>> 32),
1340 (uint32_t) lp
->node_wwn
,
1341 (uint32_t) (lp
->port_wwn
>> 32),
1342 (uint32_t) lp
->port_wwn
);
1344 isp_prt(isp
, ISP_LOGCONFIG
, prom
,
1345 lp
->portid
, lp
->handle
,
1346 roles
[lp
->roles
], "changed",
1347 (uint32_t) (lp
->node_wwn
>> 32),
1348 (uint32_t) lp
->node_wwn
,
1349 (uint32_t) (lp
->port_wwn
>> 32),
1350 (uint32_t) lp
->port_wwn
);
1354 case ISPASYNC_DEV_STAYED
:
1356 bus
= va_arg(ap
, int);
1357 lp
= va_arg(ap
, fcportdb_t
*);
1359 if (lp
->dev_map_idx
) {
1360 tgt
= lp
->dev_map_idx
- 1;
1361 isp_prt(isp
, ISP_LOGCONFIG
, prom2
,
1362 lp
->portid
, lp
->handle
,
1363 roles
[lp
->roles
], "stayed at", tgt
,
1364 (uint32_t) (lp
->node_wwn
>> 32),
1365 (uint32_t) lp
->node_wwn
,
1366 (uint32_t) (lp
->port_wwn
>> 32),
1367 (uint32_t) lp
->port_wwn
);
1369 isp_prt(isp
, ISP_LOGCONFIG
, prom
,
1370 lp
->portid
, lp
->handle
,
1371 roles
[lp
->roles
], "stayed",
1372 (uint32_t) (lp
->node_wwn
>> 32),
1373 (uint32_t) lp
->node_wwn
,
1374 (uint32_t) (lp
->port_wwn
>> 32),
1375 (uint32_t) lp
->port_wwn
);
1378 case ISPASYNC_DEV_GONE
:
1380 bus
= va_arg(ap
, int);
1381 lp
= va_arg(ap
, fcportdb_t
*);
1384 * If this has a virtual target and we haven't marked it
1385 * that we're going to have isp_gdt tell the OS it's gone,
1386 * set the isp_gdt timer running on it.
1388 * If it isn't marked that isp_gdt is going to get rid of it,
1389 * announce that it's gone.
1391 if (lp
->dev_map_idx
&& lp
->reserved
== 0) {
1393 lp
->new_reserved
= isp
->isp_osinfo
.gone_device_time
;
1394 lp
->state
= FC_PORTDB_STATE_ZOMBIE
;
1395 if (isp
->isp_osinfo
.gdt_running
== 0) {
1396 isp
->isp_osinfo
.gdt_running
= 1;
1397 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1398 "starting Gone Device Timer");
1399 callout_schedule(&isp
->isp_osinfo
.gdt
, hz
);
1401 tgt
= lp
->dev_map_idx
- 1;
1402 isp_prt(isp
, ISP_LOGCONFIG
, prom2
,
1403 lp
->portid
, lp
->handle
,
1404 roles
[lp
->roles
], "gone zombie at", tgt
,
1405 (uint32_t) (lp
->node_wwn
>> 32),
1406 (uint32_t) lp
->node_wwn
,
1407 (uint32_t) (lp
->port_wwn
>> 32),
1408 (uint32_t) lp
->port_wwn
);
1409 } else if (lp
->reserved
== 0) {
1410 isp_prt(isp
, ISP_LOGCONFIG
, prom
,
1411 lp
->portid
, lp
->handle
,
1412 roles
[lp
->roles
], "departed",
1413 (uint32_t) (lp
->node_wwn
>> 32),
1414 (uint32_t) lp
->node_wwn
,
1415 (uint32_t) (lp
->port_wwn
>> 32),
1416 (uint32_t) lp
->port_wwn
);
1419 case ISPASYNC_CHANGE_NOTIFY
:
1424 bus
= va_arg(ap
, int);
1425 opt
= va_arg(ap
, int);
1428 if (opt
== ISPASYNC_CHANGE_PDB
) {
1429 msg
= "Port Database Changed";
1430 } else if (opt
== ISPASYNC_CHANGE_SNS
) {
1431 msg
= "Name Server Database Changed";
1433 msg
= "Other Change Notify";
1436 * If the loop down timer is running, cancel it.
1438 if (callout_pending(&isp
->isp_osinfo
.ldt
)) {
1439 callout_stop(&isp
->isp_osinfo
.ldt
);
1440 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1441 "Stopping Loop Down Timer");
1443 isp_prt(isp
, ISP_LOGINFO
, msg
);
1445 * We can set blocked here because we know it's now okay
1446 * to try and run isp_fc_runstate (in order to build loop
1447 * state). But we don't try and freeze the midlayer's queue
1448 * if we have no thread that we can wake to later unfreeze
1451 if (isp
->isp_osinfo
.blocked
== 0) {
1452 isp
->isp_osinfo
.blocked
= 1;
1453 if (isp
->isp_osinfo
.thread
) {
1454 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1455 "FREEZE QUEUES @ LINE %d", __LINE__
);
1456 scsipi_channel_freeze(&isp
->isp_osinfo
.chan
[bus
], 1);
1460 * Note that we have work for the thread to do, and
1461 * if the thread is here already, wake it up.
1463 if (isp
->isp_osinfo
.thread
) {
1464 wakeup(&isp
->isp_osinfo
.thread
);
1466 isp_prt(isp
, ISP_LOGDEBUG1
, "no FC thread yet");
1470 case ISPASYNC_FW_CRASH
:
1473 mbox1
= ISP_READ(isp
, OUTMAILBOX1
);
1474 if (IS_DUALBUS(isp
)) {
1475 bus
= ISP_READ(isp
, OUTMAILBOX6
);
1479 isp_prt(isp
, ISP_LOGERR
,
1480 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1483 if (isp
->isp_osinfo
.blocked
== 0) {
1484 isp
->isp_osinfo
.blocked
= 1;
1485 isp_prt(isp
, ISP_LOGSANCFG
|ISP_LOGDEBUG0
,
1486 "FREEZE QUEUES @ LINE %d", __LINE__
);
1487 scsipi_channel_freeze(&isp
->isp_osinfo
.chan
[bus
], 1);
1490 mbox1
= isp
->isp_osinfo
.mbox_sleep_ok
;
1491 isp
->isp_osinfo
.mbox_sleep_ok
= 0;
1493 isp
->isp_osinfo
.mbox_sleep_ok
= mbox1
;
1494 isp_async(isp
, ISPASYNC_FW_RESTARTED
, NULL
);
1503 isp_prt(struct ispsoftc
*isp
, int level
, const char *fmt
, ...)
1506 if (level
!= ISP_LOGALL
&& (level
& isp
->isp_dblev
) == 0) {
1509 printf("%s: ", device_xname(isp
->isp_osinfo
.dev
));
1517 isp_lock(struct ispsoftc
*isp
)
1520 if (isp
->isp_osinfo
.islocked
++ == 0) {
1521 isp
->isp_osinfo
.splsaved
= s
;
1528 isp_unlock(struct ispsoftc
*isp
)
1530 if (isp
->isp_osinfo
.islocked
-- <= 1) {
1531 isp
->isp_osinfo
.islocked
= 0;
1532 splx(isp
->isp_osinfo
.splsaved
);
1537 isp_microtime_sub(struct timeval
*b
, struct timeval
*a
)
1542 elapsed
= GET_NANOSEC(&x
);
1549 isp_mbox_acquire(ispsoftc_t
*isp
)
1551 if (isp
->isp_osinfo
.mboxbsy
) {
1554 isp
->isp_osinfo
.mboxcmd_done
= 0;
1555 isp
->isp_osinfo
.mboxbsy
= 1;
1561 isp_mbox_wait_complete(struct ispsoftc
*isp
, mbreg_t
*mbp
)
1563 unsigned int usecs
= mbp
->timeout
;
1564 unsigned int maxc
, olim
, ilim
;
1565 struct timeval start
;
1568 usecs
= MBCMD_DEFAULT_TIMEOUT
;
1570 maxc
= isp
->isp_mbxwrk0
+ 1;
1573 if (isp
->isp_osinfo
.mbox_sleep_ok
) {
1579 for (olim
= 0; olim
< maxc
; olim
++) {
1580 tv
.tv_sec
+= (usecs
/ 1000000);
1581 tv
.tv_usec
+= (usecs
% 1000000);
1582 if (tv
.tv_usec
>= 100000) {
1584 tv
.tv_usec
-= 1000000;
1587 timeradd(&tv
, &start
, &tv
);
1592 isp
->isp_osinfo
.mbox_sleep_ok
= 0;
1593 isp
->isp_osinfo
.mbox_sleeping
= 1;
1594 tsleep(&isp
->isp_mbxworkp
, PRIBIO
, "ispmbx_sleep", to
);
1595 isp
->isp_osinfo
.mbox_sleeping
= 0;
1596 isp
->isp_osinfo
.mbox_sleep_ok
= 1;
1598 for (olim
= 0; olim
< maxc
; olim
++) {
1599 for (ilim
= 0; ilim
< usecs
; ilim
+= 100) {
1601 uint16_t sema
, mbox
;
1602 if (isp
->isp_osinfo
.mboxcmd_done
) {
1605 if (ISP_READ_ISR(isp
, &isr
, &sema
, &mbox
)) {
1606 isp_intr(isp
, isr
, sema
, mbox
);
1607 if (isp
->isp_osinfo
.mboxcmd_done
) {
1613 if (isp
->isp_osinfo
.mboxcmd_done
) {
1618 if (isp
->isp_osinfo
.mboxcmd_done
== 0) {
1619 struct timeval finish
, elapsed
;
1622 timersub(&finish
, &start
, &elapsed
);
1623 isp_prt(isp
, ISP_LOGWARN
,
1624 "%s Mailbox Command (0x%x) Timeout (%uus actual)",
1625 isp
->isp_osinfo
.mbox_sleep_ok
? "Interrupting" : "Polled",
1626 isp
->isp_lastmbxcmd
, (elapsed
.tv_sec
* 1000000) +
1628 mbp
->param
[0] = MBOX_TIMEOUT
;
1629 isp
->isp_osinfo
.mboxcmd_done
= 1;
1634 isp_mbox_notify_done(ispsoftc_t
*isp
)
1636 if (isp
->isp_osinfo
.mbox_sleeping
) {
1637 wakeup(&isp
->isp_mbxworkp
);
1639 isp
->isp_osinfo
.mboxcmd_done
= 1;
1643 isp_mbox_release(ispsoftc_t
*isp
)
1645 isp
->isp_osinfo
.mboxbsy
= 0;