1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
32 #include "lpfc_version.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
40 #include "lpfc_vport.h"
42 #define LPFC_RESET_WAIT 2
43 #define LPFC_ABORT_WAIT 2
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
50 lpfc_adjust_queue_depth(struct lpfc_hba
*phba
)
54 spin_lock_irqsave(&phba
->hbalock
, flags
);
55 atomic_inc(&phba
->num_rsrc_err
);
56 phba
->last_rsrc_error_time
= jiffies
;
58 if ((phba
->last_ramp_down_time
+ QUEUE_RAMP_DOWN_INTERVAL
) > jiffies
) {
59 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
63 phba
->last_ramp_down_time
= jiffies
;
65 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
67 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
68 if ((phba
->pport
->work_port_events
&
69 WORKER_RAMP_DOWN_QUEUE
) == 0) {
70 phba
->pport
->work_port_events
|= WORKER_RAMP_DOWN_QUEUE
;
72 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
74 spin_lock_irqsave(&phba
->hbalock
, flags
);
76 wake_up(phba
->work_wait
);
77 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
83 * This function is called with no lock held when there is a successful
84 * SCSI command completion.
87 lpfc_rampup_queue_depth(struct lpfc_vport
*vport
,
88 struct scsi_device
*sdev
)
91 struct lpfc_hba
*phba
= vport
->phba
;
92 atomic_inc(&phba
->num_cmd_success
);
94 if (vport
->cfg_lun_queue_depth
<= sdev
->queue_depth
)
96 spin_lock_irqsave(&phba
->hbalock
, flags
);
97 if (((phba
->last_ramp_up_time
+ QUEUE_RAMP_UP_INTERVAL
) > jiffies
) ||
98 ((phba
->last_rsrc_error_time
+ QUEUE_RAMP_UP_INTERVAL
) > jiffies
)) {
99 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
102 phba
->last_ramp_up_time
= jiffies
;
103 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
105 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
106 if ((phba
->pport
->work_port_events
&
107 WORKER_RAMP_UP_QUEUE
) == 0) {
108 phba
->pport
->work_port_events
|= WORKER_RAMP_UP_QUEUE
;
110 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
112 spin_lock_irqsave(&phba
->hbalock
, flags
);
114 wake_up(phba
->work_wait
);
115 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
119 lpfc_ramp_down_queue_handler(struct lpfc_hba
*phba
)
121 struct lpfc_vport
**vports
;
122 struct Scsi_Host
*shost
;
123 struct scsi_device
*sdev
;
124 unsigned long new_queue_depth
;
125 unsigned long num_rsrc_err
, num_cmd_success
;
128 num_rsrc_err
= atomic_read(&phba
->num_rsrc_err
);
129 num_cmd_success
= atomic_read(&phba
->num_cmd_success
);
131 vports
= lpfc_create_vport_work_array(phba
);
133 for(i
= 0; i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
; i
++) {
134 shost
= lpfc_shost_from_vport(vports
[i
]);
135 shost_for_each_device(sdev
, shost
) {
137 sdev
->queue_depth
* num_rsrc_err
/
138 (num_rsrc_err
+ num_cmd_success
);
139 if (!new_queue_depth
)
140 new_queue_depth
= sdev
->queue_depth
- 1;
142 new_queue_depth
= sdev
->queue_depth
-
144 if (sdev
->ordered_tags
)
145 scsi_adjust_queue_depth(sdev
,
149 scsi_adjust_queue_depth(sdev
,
154 lpfc_destroy_vport_work_array(vports
);
155 atomic_set(&phba
->num_rsrc_err
, 0);
156 atomic_set(&phba
->num_cmd_success
, 0);
160 lpfc_ramp_up_queue_handler(struct lpfc_hba
*phba
)
162 struct lpfc_vport
**vports
;
163 struct Scsi_Host
*shost
;
164 struct scsi_device
*sdev
;
167 vports
= lpfc_create_vport_work_array(phba
);
169 for(i
= 0; i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
; i
++) {
170 shost
= lpfc_shost_from_vport(vports
[i
]);
171 shost_for_each_device(sdev
, shost
) {
172 if (sdev
->ordered_tags
)
173 scsi_adjust_queue_depth(sdev
,
175 sdev
->queue_depth
+1);
177 scsi_adjust_queue_depth(sdev
,
179 sdev
->queue_depth
+1);
182 lpfc_destroy_vport_work_array(vports
);
183 atomic_set(&phba
->num_rsrc_err
, 0);
184 atomic_set(&phba
->num_cmd_success
, 0);
188 * This routine allocates a scsi buffer, which contains all the necessary
189 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
190 * contains information to build the IOCB. The DMAable region contains
191 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
192 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
193 * and the BPL BDE is setup in the IOCB.
195 static struct lpfc_scsi_buf
*
196 lpfc_new_scsi_buf(struct lpfc_vport
*vport
)
198 struct lpfc_hba
*phba
= vport
->phba
;
199 struct lpfc_scsi_buf
*psb
;
200 struct ulp_bde64
*bpl
;
202 dma_addr_t pdma_phys
;
205 psb
= kmalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
208 memset(psb
, 0, sizeof (struct lpfc_scsi_buf
));
211 * Get memory from the pci pool to map the virt space to pci bus space
212 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
213 * struct fcp_rsp and the number of bde's necessary to support the
216 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
, GFP_KERNEL
,
223 /* Initialize virtual ptrs to dma_buf region. */
224 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
226 /* Allocate iotag for psb->cur_iocbq. */
227 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
229 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
230 psb
->data
, psb
->dma_handle
);
234 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
236 psb
->fcp_cmnd
= psb
->data
;
237 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
238 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
239 sizeof(struct fcp_rsp
);
241 /* Initialize local short-hand pointers. */
243 pdma_phys
= psb
->dma_handle
;
246 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
247 * list bdes. Initialize the first two and leave the rest for
250 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
251 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
252 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_cmnd
);
253 bpl
->tus
.f
.bdeFlags
= BUFF_USE_CMND
;
254 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
257 /* Setup the physical region for the FCP RSP */
258 pdma_phys
+= sizeof (struct fcp_cmnd
);
259 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
260 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
261 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_rsp
);
262 bpl
->tus
.f
.bdeFlags
= (BUFF_USE_CMND
| BUFF_USE_RCV
);
263 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
266 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
267 * initialize it with all known data now.
269 pdma_phys
+= (sizeof (struct fcp_rsp
));
270 iocb
= &psb
->cur_iocbq
.iocb
;
271 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
272 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys
);
273 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys
);
274 iocb
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
275 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDL
;
276 iocb
->ulpBdeCount
= 1;
277 iocb
->ulpClass
= CLASS3
;
282 static struct lpfc_scsi_buf
*
283 lpfc_get_scsi_buf(struct lpfc_hba
* phba
)
285 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
286 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
287 unsigned long iflag
= 0;
289 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
290 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
292 lpfc_cmd
->seg_cnt
= 0;
293 lpfc_cmd
->nonsg_phys
= 0;
295 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
300 lpfc_release_scsi_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
302 unsigned long iflag
= 0;
304 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
306 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
307 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
311 lpfc_scsi_prep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
313 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
314 struct scatterlist
*sgel
= NULL
;
315 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
316 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
317 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
319 uint32_t i
, num_bde
= 0;
320 int nseg
, datadir
= scsi_cmnd
->sc_data_direction
;
323 * There are three possibilities here - use scatter-gather segment, use
324 * the single mapping, or neither. Start the lpfc command prep by
325 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
329 if (scsi_sg_count(scsi_cmnd
)) {
331 * The driver stores the segment count returned from pci_map_sg
332 * because this a count of dma-mappings used to map the use_sg
333 * pages. They are not guaranteed to be the same for those
334 * architectures that implement an IOMMU.
337 nseg
= dma_map_sg(&phba
->pcidev
->dev
, scsi_sglist(scsi_cmnd
),
338 scsi_sg_count(scsi_cmnd
), datadir
);
342 lpfc_cmd
->seg_cnt
= nseg
;
343 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
344 printk(KERN_ERR
"%s: Too many sg segments from "
345 "dma_map_sg. Config %d, seg_cnt %d",
346 __FUNCTION__
, phba
->cfg_sg_seg_cnt
,
348 scsi_dma_unmap(scsi_cmnd
);
353 * The driver established a maximum scatter-gather segment count
354 * during probe that limits the number of sg elements in any
355 * single scsi command. Just run through the seg_cnt and format
358 scsi_for_each_sg(scsi_cmnd
, sgel
, nseg
, i
) {
359 physaddr
= sg_dma_address(sgel
);
360 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
361 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
362 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
363 if (datadir
== DMA_TO_DEVICE
)
364 bpl
->tus
.f
.bdeFlags
= 0;
366 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
367 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
374 * Finish initializing those IOCB fields that are dependent on the
375 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
376 * reinitialized since all iocb memory resources are used many times
377 * for transmit, receive, and continuation bpl's.
379 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
380 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+=
381 (num_bde
* sizeof (struct ulp_bde64
));
382 iocb_cmd
->ulpBdeCount
= 1;
384 fcp_cmnd
->fcpDl
= be32_to_cpu(scsi_bufflen(scsi_cmnd
));
389 lpfc_scsi_unprep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* psb
)
392 * There are only two special cases to consider. (1) the scsi command
393 * requested scatter-gather usage or (2) the scsi command allocated
394 * a request buffer, but did not request use_sg. There is a third
395 * case, but it does not require resource deallocation.
397 if (psb
->seg_cnt
> 0)
398 scsi_dma_unmap(psb
->pCmd
);
402 lpfc_handle_fcp_err(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
403 struct lpfc_iocbq
*rsp_iocb
)
405 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
406 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
407 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
408 uint32_t fcpi_parm
= rsp_iocb
->iocb
.un
.fcpi
.fcpi_parm
;
409 uint32_t resp_info
= fcprsp
->rspStatus2
;
410 uint32_t scsi_status
= fcprsp
->rspStatus3
;
412 uint32_t host_status
= DID_OK
;
414 uint32_t logit
= LOG_FCP
| LOG_FCP_ERROR
;
417 * If this is a task management command, there is no
418 * scsi packet associated with this lpfc_cmd. The driver
421 if (fcpcmd
->fcpCntl2
) {
426 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
427 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
428 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
429 snslen
= SCSI_SENSE_BUFFERSIZE
;
431 if (resp_info
& RSP_LEN_VALID
)
432 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
433 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
435 lp
= (uint32_t *)cmnd
->sense_buffer
;
437 if (!scsi_status
&& (resp_info
& RESID_UNDER
))
440 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
441 "0730 FCP command x%x failed: x%x SNS x%x x%x "
442 "Data: x%x x%x x%x x%x x%x\n",
443 cmnd
->cmnd
[0], scsi_status
,
444 be32_to_cpu(*lp
), be32_to_cpu(*(lp
+ 3)), resp_info
,
445 be32_to_cpu(fcprsp
->rspResId
),
446 be32_to_cpu(fcprsp
->rspSnsLen
),
447 be32_to_cpu(fcprsp
->rspRspLen
),
450 if (resp_info
& RSP_LEN_VALID
) {
451 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
452 if ((rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) ||
453 (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
)) {
454 host_status
= DID_ERROR
;
459 scsi_set_resid(cmnd
, 0);
460 if (resp_info
& RESID_UNDER
) {
461 scsi_set_resid(cmnd
, be32_to_cpu(fcprsp
->rspResId
));
463 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
464 "0716 FCP Read Underrun, expected %d, "
465 "residual %d Data: x%x x%x x%x\n",
466 be32_to_cpu(fcpcmd
->fcpDl
),
467 scsi_get_resid(cmnd
), fcpi_parm
, cmnd
->cmnd
[0],
471 * If there is an under run check if under run reported by
472 * storage array is same as the under run reported by HBA.
473 * If this is not same, there is a dropped frame.
475 if ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) &&
477 (scsi_get_resid(cmnd
) != fcpi_parm
)) {
478 lpfc_printf_vlog(vport
, KERN_WARNING
,
479 LOG_FCP
| LOG_FCP_ERROR
,
480 "0735 FCP Read Check Error "
481 "and Underrun Data: x%x x%x x%x x%x\n",
482 be32_to_cpu(fcpcmd
->fcpDl
),
483 scsi_get_resid(cmnd
), fcpi_parm
,
485 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
486 host_status
= DID_ERROR
;
489 * The cmnd->underflow is the minimum number of bytes that must
490 * be transfered for this command. Provided a sense condition
491 * is not present, make sure the actual amount transferred is at
492 * least the underflow value or fail.
494 if (!(resp_info
& SNS_LEN_VALID
) &&
495 (scsi_status
== SAM_STAT_GOOD
) &&
496 (scsi_bufflen(cmnd
) - scsi_get_resid(cmnd
)
497 < cmnd
->underflow
)) {
498 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
499 "0717 FCP command x%x residual "
500 "underrun converted to error "
501 "Data: x%x x%x x%x\n",
502 cmnd
->cmnd
[0], scsi_bufflen(cmnd
),
503 scsi_get_resid(cmnd
), cmnd
->underflow
);
504 host_status
= DID_ERROR
;
506 } else if (resp_info
& RESID_OVER
) {
507 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
508 "0720 FCP command x%x residual overrun error. "
509 "Data: x%x x%x \n", cmnd
->cmnd
[0],
510 scsi_bufflen(cmnd
), scsi_get_resid(cmnd
));
511 host_status
= DID_ERROR
;
514 * Check SLI validation that all the transfer was actually done
515 * (fcpi_parm should be zero). Apply check only to reads.
517 } else if ((scsi_status
== SAM_STAT_GOOD
) && fcpi_parm
&&
518 (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
519 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
| LOG_FCP_ERROR
,
520 "0734 FCP Read Check Error Data: "
522 be32_to_cpu(fcpcmd
->fcpDl
),
523 be32_to_cpu(fcprsp
->rspResId
),
524 fcpi_parm
, cmnd
->cmnd
[0]);
525 host_status
= DID_ERROR
;
526 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
530 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
534 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
535 struct lpfc_iocbq
*pIocbOut
)
537 struct lpfc_scsi_buf
*lpfc_cmd
=
538 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
539 struct lpfc_vport
*vport
= pIocbIn
->vport
;
540 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
541 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
542 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
544 struct scsi_device
*sdev
, *tmp_sdev
;
547 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
548 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
550 if (lpfc_cmd
->status
) {
551 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
552 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
553 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
554 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
555 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
557 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
558 "0729 FCP cmd x%x failed <%d/%d> "
559 "status: x%x result: x%x Data: x%x x%x\n",
561 cmd
->device
? cmd
->device
->id
: 0xffff,
562 cmd
->device
? cmd
->device
->lun
: 0xffff,
563 lpfc_cmd
->status
, lpfc_cmd
->result
,
564 pIocbOut
->iocb
.ulpContext
,
565 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
567 switch (lpfc_cmd
->status
) {
568 case IOSTAT_FCP_RSP_ERROR
:
569 /* Call FCP RSP handler to determine result */
570 lpfc_handle_fcp_err(vport
, lpfc_cmd
, pIocbOut
);
572 case IOSTAT_NPORT_BSY
:
573 case IOSTAT_FABRIC_BSY
:
574 cmd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
576 case IOSTAT_LOCAL_REJECT
:
577 if (lpfc_cmd
->result
== RJT_UNAVAIL_PERM
||
578 lpfc_cmd
->result
== IOERR_NO_RESOURCES
||
579 lpfc_cmd
->result
== RJT_LOGIN_REQUIRED
) {
580 cmd
->result
= ScsiResult(DID_REQUEUE
, 0);
582 } /* else: fall through */
584 cmd
->result
= ScsiResult(DID_ERROR
, 0);
589 || (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
590 cmd
->result
= ScsiResult(DID_BUS_BUSY
, SAM_STAT_BUSY
);
592 cmd
->result
= ScsiResult(DID_OK
, 0);
595 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
596 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
598 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
599 "0710 Iodone <%d/%d> cmd %p, error "
600 "x%x SNS x%x x%x Data: x%x x%x\n",
601 cmd
->device
->id
, cmd
->device
->lun
, cmd
,
602 cmd
->result
, *lp
, *(lp
+ 3), cmd
->retries
,
603 scsi_get_resid(cmd
));
606 result
= cmd
->result
;
608 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
611 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
612 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
618 lpfc_rampup_queue_depth(vport
, sdev
);
620 if (!result
&& pnode
!= NULL
&&
621 ((jiffies
- pnode
->last_ramp_up_time
) >
622 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
623 ((jiffies
- pnode
->last_q_full_time
) >
624 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
625 (vport
->cfg_lun_queue_depth
> sdev
->queue_depth
)) {
626 shost_for_each_device(tmp_sdev
, sdev
->host
) {
627 if (vport
->cfg_lun_queue_depth
> tmp_sdev
->queue_depth
){
628 if (tmp_sdev
->id
!= sdev
->id
)
630 if (tmp_sdev
->ordered_tags
)
631 scsi_adjust_queue_depth(tmp_sdev
,
633 tmp_sdev
->queue_depth
+1);
635 scsi_adjust_queue_depth(tmp_sdev
,
637 tmp_sdev
->queue_depth
+1);
639 pnode
->last_ramp_up_time
= jiffies
;
645 * Check for queue full. If the lun is reporting queue full, then
646 * back off the lun queue depth to prevent target overloads.
648 if (result
== SAM_STAT_TASK_SET_FULL
&& pnode
!= NULL
) {
649 pnode
->last_q_full_time
= jiffies
;
651 shost_for_each_device(tmp_sdev
, sdev
->host
) {
652 if (tmp_sdev
->id
!= sdev
->id
)
654 depth
= scsi_track_queue_full(tmp_sdev
,
655 tmp_sdev
->queue_depth
- 1);
658 * The queue depth cannot be lowered any more.
659 * Modify the returned error code to store
660 * the final depth value set by
661 * scsi_track_queue_full.
664 depth
= sdev
->host
->cmd_per_lun
;
667 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
668 "0711 detected queue full - lun queue "
669 "depth adjusted to %d.\n", depth
);
673 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
677 lpfc_scsi_prep_cmnd(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
678 struct lpfc_nodelist
*pnode
)
680 struct lpfc_hba
*phba
= vport
->phba
;
681 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
682 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
683 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
684 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
685 int datadir
= scsi_cmnd
->sc_data_direction
;
687 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
688 /* clear task management bits */
689 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
691 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
692 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
694 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
696 if (scsi_cmnd
->device
->tagged_supported
) {
697 switch (scsi_cmnd
->tag
) {
698 case HEAD_OF_QUEUE_TAG
:
699 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
701 case ORDERED_QUEUE_TAG
:
702 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
705 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
709 fcp_cmnd
->fcpCntl1
= 0;
712 * There are three possibilities here - use scatter-gather segment, use
713 * the single mapping, or neither. Start the lpfc command prep by
714 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
717 if (scsi_sg_count(scsi_cmnd
)) {
718 if (datadir
== DMA_TO_DEVICE
) {
719 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
720 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
722 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
723 phba
->fc4OutputRequests
++;
725 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
726 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
727 iocb_cmd
->un
.fcpi
.fcpi_parm
= scsi_bufflen(scsi_cmnd
);
728 fcp_cmnd
->fcpCntl3
= READ_DATA
;
729 phba
->fc4InputRequests
++;
732 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
733 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
735 fcp_cmnd
->fcpCntl3
= 0;
736 phba
->fc4ControlRequests
++;
740 * Finish initializing those IOCB fields that are independent
741 * of the scsi_cmnd request_buffer
743 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
744 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
745 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
747 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
748 piocbq
->context1
= lpfc_cmd
;
749 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
750 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
751 piocbq
->vport
= vport
;
755 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport
*vport
,
756 struct lpfc_scsi_buf
*lpfc_cmd
,
758 uint8_t task_mgmt_cmd
)
760 struct lpfc_iocbq
*piocbq
;
762 struct fcp_cmnd
*fcp_cmnd
;
763 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
764 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
766 if ((ndlp
== NULL
) || (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
770 piocbq
= &(lpfc_cmd
->cur_iocbq
);
771 piocbq
->vport
= vport
;
773 piocb
= &piocbq
->iocb
;
775 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
776 int_to_scsilun(lun
, &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
777 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
779 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
781 piocb
->ulpContext
= ndlp
->nlp_rpi
;
782 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
783 piocb
->ulpFCP2Rcvy
= 1;
785 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
787 /* ulpTimeout is only one byte */
788 if (lpfc_cmd
->timeout
> 0xff) {
790 * Do not timeout the command at the firmware level.
791 * The driver will provide the timeout mechanism.
793 piocb
->ulpTimeout
= 0;
795 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
802 lpfc_tskmgmt_def_cmpl(struct lpfc_hba
*phba
,
803 struct lpfc_iocbq
*cmdiocbq
,
804 struct lpfc_iocbq
*rspiocbq
)
806 struct lpfc_scsi_buf
*lpfc_cmd
=
807 (struct lpfc_scsi_buf
*) cmdiocbq
->context1
;
809 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
814 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf
*lpfc_cmd
, struct lpfc_vport
*vport
,
815 unsigned tgt_id
, unsigned int lun
,
816 struct lpfc_rport_data
*rdata
)
818 struct lpfc_hba
*phba
= vport
->phba
;
819 struct lpfc_iocbq
*iocbq
;
820 struct lpfc_iocbq
*iocbqrsp
;
826 lpfc_cmd
->rdata
= rdata
;
827 ret
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, lun
,
832 iocbq
= &lpfc_cmd
->cur_iocbq
;
833 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
838 /* Issue Target Reset to TGT <num> */
839 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
840 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
841 tgt_id
, rdata
->pnode
->nlp_rpi
, rdata
->pnode
->nlp_flag
);
842 ret
= lpfc_sli_issue_iocb_wait(phba
,
843 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
844 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
845 if (ret
!= IOCB_SUCCESS
) {
846 if (ret
== IOCB_TIMEDOUT
)
847 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
848 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
851 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
852 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
853 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
854 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
855 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
858 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
863 lpfc_info(struct Scsi_Host
*host
)
865 struct lpfc_vport
*vport
= (struct lpfc_vport
*) host
->hostdata
;
866 struct lpfc_hba
*phba
= vport
->phba
;
868 static char lpfcinfobuf
[384];
870 memset(lpfcinfobuf
,0,384);
871 if (phba
&& phba
->pcidev
){
872 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
873 len
= strlen(lpfcinfobuf
);
874 snprintf(lpfcinfobuf
+ len
,
876 " on PCI bus %02x device %02x irq %d",
877 phba
->pcidev
->bus
->number
,
880 len
= strlen(lpfcinfobuf
);
882 snprintf(lpfcinfobuf
+ len
,
891 static __inline__
void lpfc_poll_rearm_timer(struct lpfc_hba
* phba
)
893 unsigned long poll_tmo_expires
=
894 (jiffies
+ msecs_to_jiffies(phba
->cfg_poll_tmo
));
896 if (phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq_cnt
)
897 mod_timer(&phba
->fcp_poll_timer
,
901 void lpfc_poll_start_timer(struct lpfc_hba
* phba
)
903 lpfc_poll_rearm_timer(phba
);
906 void lpfc_poll_timeout(unsigned long ptr
)
908 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
910 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
911 lpfc_sli_poll_fcp_ring (phba
);
912 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
913 lpfc_poll_rearm_timer(phba
);
918 lpfc_queuecommand(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
920 struct Scsi_Host
*shost
= cmnd
->device
->host
;
921 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
922 struct lpfc_hba
*phba
= vport
->phba
;
923 struct lpfc_sli
*psli
= &phba
->sli
;
924 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
925 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
926 struct lpfc_scsi_buf
*lpfc_cmd
;
927 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
930 err
= fc_remote_port_chkready(rport
);
933 goto out_fail_command
;
937 * Catch race where our node has transitioned, but the
938 * transport is still transitioning.
941 cmnd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
942 goto out_fail_command
;
944 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
945 if (lpfc_cmd
== NULL
) {
946 lpfc_adjust_queue_depth(phba
);
948 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
949 "0707 driver's buffer pool is empty, "
955 * Store the midlayer's command structure for the completion phase
956 * and complete the command initialization.
958 lpfc_cmd
->pCmd
= cmnd
;
959 lpfc_cmd
->rdata
= rdata
;
960 lpfc_cmd
->timeout
= 0;
961 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
962 cmnd
->scsi_done
= done
;
964 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
966 goto out_host_busy_free_buf
;
968 lpfc_scsi_prep_cmnd(vport
, lpfc_cmd
, ndlp
);
970 err
= lpfc_sli_issue_iocb(phba
, &phba
->sli
.ring
[psli
->fcp_ring
],
971 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
973 goto out_host_busy_free_buf
;
975 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
976 lpfc_sli_poll_fcp_ring(phba
);
977 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
978 lpfc_poll_rearm_timer(phba
);
983 out_host_busy_free_buf
:
984 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
985 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
987 return SCSI_MLQUEUE_HOST_BUSY
;
995 lpfc_block_error_handler(struct scsi_cmnd
*cmnd
)
997 struct Scsi_Host
*shost
= cmnd
->device
->host
;
998 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
1000 spin_lock_irq(shost
->host_lock
);
1001 while (rport
->port_state
== FC_PORTSTATE_BLOCKED
) {
1002 spin_unlock_irq(shost
->host_lock
);
1004 spin_lock_irq(shost
->host_lock
);
1006 spin_unlock_irq(shost
->host_lock
);
1011 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
1013 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1014 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1015 struct lpfc_hba
*phba
= vport
->phba
;
1016 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[phba
->sli
.fcp_ring
];
1017 struct lpfc_iocbq
*iocb
;
1018 struct lpfc_iocbq
*abtsiocb
;
1019 struct lpfc_scsi_buf
*lpfc_cmd
;
1021 unsigned int loop_count
= 0;
1024 lpfc_block_error_handler(cmnd
);
1025 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
1029 * If pCmd field of the corresponding lpfc_scsi_buf structure
1030 * points to a different SCSI command, then the driver has
1031 * already completed this command, but the midlayer did not
1032 * see the completion before the eh fired. Just return
1035 iocb
= &lpfc_cmd
->cur_iocbq
;
1036 if (lpfc_cmd
->pCmd
!= cmnd
)
1039 BUG_ON(iocb
->context1
!= lpfc_cmd
);
1041 abtsiocb
= lpfc_sli_get_iocbq(phba
);
1042 if (abtsiocb
== NULL
) {
1048 * The scsi command can not be in txq and it is in flight because the
1049 * pCmd is still pointig at the SCSI command we have to abort. There
1050 * is no need to search the txcmplq. Just send an abort to the FW.
1054 icmd
= &abtsiocb
->iocb
;
1055 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
1056 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
1057 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
1060 icmd
->ulpClass
= cmd
->ulpClass
;
1061 if (lpfc_is_link_up(phba
))
1062 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
1064 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
1066 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
1067 abtsiocb
->vport
= vport
;
1068 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0) == IOCB_ERROR
) {
1069 lpfc_sli_release_iocbq(phba
, abtsiocb
);
1074 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1075 lpfc_sli_poll_fcp_ring (phba
);
1077 /* Wait for abort to complete */
1078 while (lpfc_cmd
->pCmd
== cmnd
)
1080 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1081 lpfc_sli_poll_fcp_ring (phba
);
1083 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT
* HZ
);
1085 > (2 * vport
->cfg_devloss_tmo
)/LPFC_ABORT_WAIT
)
1089 if (lpfc_cmd
->pCmd
== cmnd
) {
1091 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1092 "0748 abort handler timed out waiting "
1093 "for abort to complete: ret %#x, ID %d, "
1094 "LUN %d, snum %#lx\n",
1095 ret
, cmnd
->device
->id
, cmnd
->device
->lun
,
1096 cmnd
->serial_number
);
1100 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
1101 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1102 "LUN %d snum %#lx\n", ret
, cmnd
->device
->id
,
1103 cmnd
->device
->lun
, cmnd
->serial_number
);
1108 lpfc_device_reset_handler(struct scsi_cmnd
*cmnd
)
1110 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1111 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1112 struct lpfc_hba
*phba
= vport
->phba
;
1113 struct lpfc_scsi_buf
*lpfc_cmd
;
1114 struct lpfc_iocbq
*iocbq
, *iocbqrsp
;
1115 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
1116 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
1117 uint32_t cmd_result
= 0, cmd_status
= 0;
1119 int iocb_status
= IOCB_SUCCESS
;
1122 lpfc_block_error_handler(cmnd
);
1125 * If target is not in a MAPPED state, delay the reset until
1126 * target is rediscovered or devloss timeout expires.
1132 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
1133 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1135 rdata
= cmnd
->device
->hostdata
;
1137 (loopcnt
> ((vport
->cfg_devloss_tmo
* 2) + 1))){
1138 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1139 "0721 LUN Reset rport "
1140 "failure: cnt x%x rdata x%p\n",
1144 pnode
= rdata
->pnode
;
1148 if (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
)
1152 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1153 if (lpfc_cmd
== NULL
)
1156 lpfc_cmd
->timeout
= 60;
1157 lpfc_cmd
->rdata
= rdata
;
1159 ret
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, cmnd
->device
->lun
,
1162 goto out_free_scsi_buf
;
1164 iocbq
= &lpfc_cmd
->cur_iocbq
;
1166 /* get a buffer for this IOCB command response */
1167 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
1168 if (iocbqrsp
== NULL
)
1169 goto out_free_scsi_buf
;
1171 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
1172 "0703 Issue target reset to TGT %d LUN %d "
1173 "rpi x%x nlp_flag x%x\n", cmnd
->device
->id
,
1174 cmnd
->device
->lun
, pnode
->nlp_rpi
, pnode
->nlp_flag
);
1175 iocb_status
= lpfc_sli_issue_iocb_wait(phba
,
1176 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1177 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
1179 if (iocb_status
== IOCB_TIMEDOUT
)
1180 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
1182 if (iocb_status
== IOCB_SUCCESS
)
1187 cmd_result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
1188 cmd_status
= iocbqrsp
->iocb
.ulpStatus
;
1190 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
1193 * All outstanding txcmplq I/Os should have been aborted by the device.
1194 * Unfortunately, some targets do not abide by this forcing the driver
1197 cnt
= lpfc_sli_sum_iocb(vport
, cmnd
->device
->id
, cmnd
->device
->lun
,
1200 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1201 cmnd
->device
->id
, cmnd
->device
->lun
,
1205 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1208 > (2 * vport
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1211 cnt
= lpfc_sli_sum_iocb(vport
, cmnd
->device
->id
,
1212 cmnd
->device
->lun
, LPFC_CTX_LUN
);
1216 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1217 "0719 device reset I/O flush failure: "
1223 if (iocb_status
!= IOCB_TIMEDOUT
) {
1224 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1226 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1227 "0713 SCSI layer issued device reset (%d, %d) "
1228 "return x%x status x%x result x%x\n",
1229 cmnd
->device
->id
, cmnd
->device
->lun
, ret
,
1230 cmd_status
, cmd_result
);
1236 lpfc_bus_reset_handler(struct scsi_cmnd
*cmnd
)
1238 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1239 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1240 struct lpfc_hba
*phba
= vport
->phba
;
1241 struct lpfc_nodelist
*ndlp
= NULL
;
1243 int ret
= FAILED
, i
, err_count
= 0;
1245 struct lpfc_scsi_buf
* lpfc_cmd
;
1247 lpfc_block_error_handler(cmnd
);
1249 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1250 if (lpfc_cmd
== NULL
)
1253 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1254 lpfc_cmd
->timeout
= 60;
1257 * Since the driver manages a single bus device, reset all
1258 * targets known to the driver. Should any target reset
1259 * fail, this routine returns failure to the midlayer.
1261 for (i
= 0; i
< LPFC_MAX_TARGET
; i
++) {
1262 /* Search for mapped node by target ID */
1264 spin_lock_irq(shost
->host_lock
);
1265 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
1266 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
&&
1267 i
== ndlp
->nlp_sid
&&
1273 spin_unlock_irq(shost
->host_lock
);
1277 ret
= lpfc_scsi_tgt_reset(lpfc_cmd
, vport
, i
,
1279 ndlp
->rport
->dd_data
);
1280 if (ret
!= SUCCESS
) {
1281 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1282 "0700 Bus Reset on target %d failed\n",
1289 if (ret
!= IOCB_TIMEDOUT
)
1290 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1298 * All outstanding txcmplq I/Os should have been aborted by
1299 * the targets. Unfortunately, some targets do not abide by
1300 * this forcing the driver to double check.
1302 cnt
= lpfc_sli_sum_iocb(vport
, 0, 0, LPFC_CTX_HOST
);
1304 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1305 0, 0, LPFC_CTX_HOST
);
1308 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1311 > (2 * vport
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1314 cnt
= lpfc_sli_sum_iocb(vport
, 0, 0, LPFC_CTX_HOST
);
1318 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1319 "0715 Bus Reset I/O flush failure: "
1320 "cnt x%x left x%x\n", cnt
, i
);
1324 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1325 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret
);
1331 lpfc_slave_alloc(struct scsi_device
*sdev
)
1333 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
1334 struct lpfc_hba
*phba
= vport
->phba
;
1335 struct lpfc_scsi_buf
*scsi_buf
= NULL
;
1336 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
1337 uint32_t total
= 0, i
;
1338 uint32_t num_to_alloc
= 0;
1339 unsigned long flags
;
1341 if (!rport
|| fc_remote_port_chkready(rport
))
1344 sdev
->hostdata
= rport
->dd_data
;
1347 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1348 * available list of scsi buffers. Don't allocate more than the
1349 * HBA limit conveyed to the midlayer via the host structure. The
1350 * formula accounts for the lun_queue_depth + error handlers + 1
1351 * extra. This list of scsi bufs exists for the lifetime of the driver.
1353 total
= phba
->total_scsi_bufs
;
1354 num_to_alloc
= vport
->cfg_lun_queue_depth
+ 2;
1356 /* Allow some exchanges to be available always to complete discovery */
1357 if (total
>= phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
1358 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
1359 "0704 At limitation of %d preallocated "
1360 "command buffers\n", total
);
1362 /* Allow some exchanges to be available always to complete discovery */
1363 } else if (total
+ num_to_alloc
>
1364 phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
1365 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
1366 "0705 Allocation request of %d "
1367 "command buffers will exceed max of %d. "
1368 "Reducing allocation request to %d.\n",
1369 num_to_alloc
, phba
->cfg_hba_queue_depth
,
1370 (phba
->cfg_hba_queue_depth
- total
));
1371 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
1374 for (i
= 0; i
< num_to_alloc
; i
++) {
1375 scsi_buf
= lpfc_new_scsi_buf(vport
);
1377 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1378 "0706 Failed to allocate "
1379 "command buffer\n");
1383 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, flags
);
1384 phba
->total_scsi_bufs
++;
1385 list_add_tail(&scsi_buf
->list
, &phba
->lpfc_scsi_buf_list
);
1386 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, flags
);
1392 lpfc_slave_configure(struct scsi_device
*sdev
)
1394 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
1395 struct lpfc_hba
*phba
= vport
->phba
;
1396 struct fc_rport
*rport
= starget_to_rport(sdev
->sdev_target
);
1398 if (sdev
->tagged_supported
)
1399 scsi_activate_tcq(sdev
, vport
->cfg_lun_queue_depth
);
1401 scsi_deactivate_tcq(sdev
, vport
->cfg_lun_queue_depth
);
1404 * Initialize the fc transport attributes for the target
1405 * containing this scsi device. Also note that the driver's
1406 * target pointer is stored in the starget_data for the
1407 * driver's sysfs entry point functions.
1409 rport
->dev_loss_tmo
= vport
->cfg_devloss_tmo
;
1411 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
1412 lpfc_sli_poll_fcp_ring(phba
);
1413 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1414 lpfc_poll_rearm_timer(phba
);
1421 lpfc_slave_destroy(struct scsi_device
*sdev
)
1423 sdev
->hostdata
= NULL
;
1428 struct scsi_host_template lpfc_template
= {
1429 .module
= THIS_MODULE
,
1430 .name
= LPFC_DRIVER_NAME
,
1432 .queuecommand
= lpfc_queuecommand
,
1433 .eh_abort_handler
= lpfc_abort_handler
,
1434 .eh_device_reset_handler
= lpfc_device_reset_handler
,
1435 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
1436 .slave_alloc
= lpfc_slave_alloc
,
1437 .slave_configure
= lpfc_slave_configure
,
1438 .slave_destroy
= lpfc_slave_destroy
,
1439 .scan_finished
= lpfc_scan_finished
,
1441 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1442 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1443 .use_clustering
= ENABLE_CLUSTERING
,
1444 .shost_attrs
= lpfc_hba_attrs
,
1445 .max_sectors
= 0xFFFF,
1448 struct scsi_host_template lpfc_vport_template
= {
1449 .module
= THIS_MODULE
,
1450 .name
= LPFC_DRIVER_NAME
,
1452 .queuecommand
= lpfc_queuecommand
,
1453 .eh_abort_handler
= lpfc_abort_handler
,
1454 .eh_device_reset_handler
= lpfc_device_reset_handler
,
1455 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
1456 .slave_alloc
= lpfc_slave_alloc
,
1457 .slave_configure
= lpfc_slave_configure
,
1458 .slave_destroy
= lpfc_slave_destroy
,
1459 .scan_finished
= lpfc_scan_finished
,
1461 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1462 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1463 .use_clustering
= ENABLE_CLUSTERING
,
1464 .shost_attrs
= lpfc_vport_attrs
,
1465 .max_sectors
= 0xFFFF,