1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
32 #include "lpfc_version.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
40 #include "lpfc_vport.h"
42 #define LPFC_RESET_WAIT 2
43 #define LPFC_ABORT_WAIT 2
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
50 lpfc_adjust_queue_depth(struct lpfc_hba
*phba
)
54 spin_lock_irqsave(&phba
->hbalock
, flags
);
55 atomic_inc(&phba
->num_rsrc_err
);
56 phba
->last_rsrc_error_time
= jiffies
;
58 if ((phba
->last_ramp_down_time
+ QUEUE_RAMP_DOWN_INTERVAL
) > jiffies
) {
59 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
63 phba
->last_ramp_down_time
= jiffies
;
65 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
67 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
68 if ((phba
->pport
->work_port_events
&
69 WORKER_RAMP_DOWN_QUEUE
) == 0) {
70 phba
->pport
->work_port_events
|= WORKER_RAMP_DOWN_QUEUE
;
72 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
74 spin_lock_irqsave(&phba
->hbalock
, flags
);
76 wake_up(phba
->work_wait
);
77 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
83 * This function is called with no lock held when there is a successful
84 * SCSI command completion.
87 lpfc_rampup_queue_depth(struct lpfc_vport
*vport
,
88 struct scsi_device
*sdev
)
91 struct lpfc_hba
*phba
= vport
->phba
;
92 atomic_inc(&phba
->num_cmd_success
);
94 if (vport
->cfg_lun_queue_depth
<= sdev
->queue_depth
)
96 spin_lock_irqsave(&phba
->hbalock
, flags
);
97 if (((phba
->last_ramp_up_time
+ QUEUE_RAMP_UP_INTERVAL
) > jiffies
) ||
98 ((phba
->last_rsrc_error_time
+ QUEUE_RAMP_UP_INTERVAL
) > jiffies
)) {
99 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
102 phba
->last_ramp_up_time
= jiffies
;
103 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
105 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
106 if ((phba
->pport
->work_port_events
&
107 WORKER_RAMP_UP_QUEUE
) == 0) {
108 phba
->pport
->work_port_events
|= WORKER_RAMP_UP_QUEUE
;
110 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
112 spin_lock_irqsave(&phba
->hbalock
, flags
);
114 wake_up(phba
->work_wait
);
115 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
119 lpfc_ramp_down_queue_handler(struct lpfc_hba
*phba
)
121 struct lpfc_vport
**vports
;
122 struct Scsi_Host
*shost
;
123 struct scsi_device
*sdev
;
124 unsigned long new_queue_depth
;
125 unsigned long num_rsrc_err
, num_cmd_success
;
128 num_rsrc_err
= atomic_read(&phba
->num_rsrc_err
);
129 num_cmd_success
= atomic_read(&phba
->num_cmd_success
);
131 vports
= lpfc_create_vport_work_array(phba
);
133 for(i
= 0; i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
; i
++) {
134 shost
= lpfc_shost_from_vport(vports
[i
]);
135 shost_for_each_device(sdev
, shost
) {
137 sdev
->queue_depth
* num_rsrc_err
/
138 (num_rsrc_err
+ num_cmd_success
);
139 if (!new_queue_depth
)
140 new_queue_depth
= sdev
->queue_depth
- 1;
142 new_queue_depth
= sdev
->queue_depth
-
144 if (sdev
->ordered_tags
)
145 scsi_adjust_queue_depth(sdev
,
149 scsi_adjust_queue_depth(sdev
,
154 lpfc_destroy_vport_work_array(vports
);
155 atomic_set(&phba
->num_rsrc_err
, 0);
156 atomic_set(&phba
->num_cmd_success
, 0);
160 lpfc_ramp_up_queue_handler(struct lpfc_hba
*phba
)
162 struct lpfc_vport
**vports
;
163 struct Scsi_Host
*shost
;
164 struct scsi_device
*sdev
;
167 vports
= lpfc_create_vport_work_array(phba
);
169 for(i
= 0; i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
; i
++) {
170 shost
= lpfc_shost_from_vport(vports
[i
]);
171 shost_for_each_device(sdev
, shost
) {
172 if (sdev
->ordered_tags
)
173 scsi_adjust_queue_depth(sdev
,
175 sdev
->queue_depth
+1);
177 scsi_adjust_queue_depth(sdev
,
179 sdev
->queue_depth
+1);
182 lpfc_destroy_vport_work_array(vports
);
183 atomic_set(&phba
->num_rsrc_err
, 0);
184 atomic_set(&phba
->num_cmd_success
, 0);
188 * This routine allocates a scsi buffer, which contains all the necessary
189 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
190 * contains information to build the IOCB. The DMAable region contains
191 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
192 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
193 * and the BPL BDE is setup in the IOCB.
195 static struct lpfc_scsi_buf
*
196 lpfc_new_scsi_buf(struct lpfc_vport
*vport
)
198 struct lpfc_hba
*phba
= vport
->phba
;
199 struct lpfc_scsi_buf
*psb
;
200 struct ulp_bde64
*bpl
;
202 dma_addr_t pdma_phys
;
205 psb
= kzalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
210 * Get memory from the pci pool to map the virt space to pci bus space
211 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
212 * struct fcp_rsp and the number of bde's necessary to support the
215 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
, GFP_KERNEL
,
222 /* Initialize virtual ptrs to dma_buf region. */
223 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
225 /* Allocate iotag for psb->cur_iocbq. */
226 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
228 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
229 psb
->data
, psb
->dma_handle
);
233 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
235 psb
->fcp_cmnd
= psb
->data
;
236 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
237 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
238 sizeof(struct fcp_rsp
);
240 /* Initialize local short-hand pointers. */
242 pdma_phys
= psb
->dma_handle
;
245 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
246 * list bdes. Initialize the first two and leave the rest for
249 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
250 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
251 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_cmnd
);
252 bpl
->tus
.f
.bdeFlags
= BUFF_USE_CMND
;
253 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
256 /* Setup the physical region for the FCP RSP */
257 pdma_phys
+= sizeof (struct fcp_cmnd
);
258 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
259 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
260 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_rsp
);
261 bpl
->tus
.f
.bdeFlags
= (BUFF_USE_CMND
| BUFF_USE_RCV
);
262 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
265 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
266 * initialize it with all known data now.
268 pdma_phys
+= (sizeof (struct fcp_rsp
));
269 iocb
= &psb
->cur_iocbq
.iocb
;
270 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
271 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys
);
272 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys
);
273 iocb
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
274 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDL
;
275 iocb
->ulpBdeCount
= 1;
276 iocb
->ulpClass
= CLASS3
;
281 static struct lpfc_scsi_buf
*
282 lpfc_get_scsi_buf(struct lpfc_hba
* phba
)
284 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
285 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
286 unsigned long iflag
= 0;
288 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
289 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
291 lpfc_cmd
->seg_cnt
= 0;
292 lpfc_cmd
->nonsg_phys
= 0;
294 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
299 lpfc_release_scsi_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
301 unsigned long iflag
= 0;
303 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
305 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
306 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
310 lpfc_scsi_prep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
312 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
313 struct scatterlist
*sgel
= NULL
;
314 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
315 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
316 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
318 uint32_t i
, num_bde
= 0;
319 int nseg
, datadir
= scsi_cmnd
->sc_data_direction
;
322 * There are three possibilities here - use scatter-gather segment, use
323 * the single mapping, or neither. Start the lpfc command prep by
324 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
328 if (scsi_sg_count(scsi_cmnd
)) {
330 * The driver stores the segment count returned from pci_map_sg
331 * because this a count of dma-mappings used to map the use_sg
332 * pages. They are not guaranteed to be the same for those
333 * architectures that implement an IOMMU.
336 nseg
= dma_map_sg(&phba
->pcidev
->dev
, scsi_sglist(scsi_cmnd
),
337 scsi_sg_count(scsi_cmnd
), datadir
);
341 lpfc_cmd
->seg_cnt
= nseg
;
342 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
343 printk(KERN_ERR
"%s: Too many sg segments from "
344 "dma_map_sg. Config %d, seg_cnt %d",
345 __FUNCTION__
, phba
->cfg_sg_seg_cnt
,
347 scsi_dma_unmap(scsi_cmnd
);
352 * The driver established a maximum scatter-gather segment count
353 * during probe that limits the number of sg elements in any
354 * single scsi command. Just run through the seg_cnt and format
357 scsi_for_each_sg(scsi_cmnd
, sgel
, nseg
, i
) {
358 physaddr
= sg_dma_address(sgel
);
359 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
360 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
361 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
362 if (datadir
== DMA_TO_DEVICE
)
363 bpl
->tus
.f
.bdeFlags
= 0;
365 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
366 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
373 * Finish initializing those IOCB fields that are dependent on the
374 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
375 * reinitialized since all iocb memory resources are used many times
376 * for transmit, receive, and continuation bpl's.
378 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
379 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+=
380 (num_bde
* sizeof (struct ulp_bde64
));
381 iocb_cmd
->ulpBdeCount
= 1;
383 fcp_cmnd
->fcpDl
= be32_to_cpu(scsi_bufflen(scsi_cmnd
));
388 lpfc_scsi_unprep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* psb
)
391 * There are only two special cases to consider. (1) the scsi command
392 * requested scatter-gather usage or (2) the scsi command allocated
393 * a request buffer, but did not request use_sg. There is a third
394 * case, but it does not require resource deallocation.
396 if (psb
->seg_cnt
> 0)
397 scsi_dma_unmap(psb
->pCmd
);
401 lpfc_handle_fcp_err(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
402 struct lpfc_iocbq
*rsp_iocb
)
404 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
405 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
406 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
407 uint32_t fcpi_parm
= rsp_iocb
->iocb
.un
.fcpi
.fcpi_parm
;
408 uint32_t resp_info
= fcprsp
->rspStatus2
;
409 uint32_t scsi_status
= fcprsp
->rspStatus3
;
411 uint32_t host_status
= DID_OK
;
413 uint32_t logit
= LOG_FCP
| LOG_FCP_ERROR
;
416 * If this is a task management command, there is no
417 * scsi packet associated with this lpfc_cmd. The driver
420 if (fcpcmd
->fcpCntl2
) {
425 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
426 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
427 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
428 snslen
= SCSI_SENSE_BUFFERSIZE
;
430 if (resp_info
& RSP_LEN_VALID
)
431 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
432 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
434 lp
= (uint32_t *)cmnd
->sense_buffer
;
436 if (!scsi_status
&& (resp_info
& RESID_UNDER
))
439 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
440 "0730 FCP command x%x failed: x%x SNS x%x x%x "
441 "Data: x%x x%x x%x x%x x%x\n",
442 cmnd
->cmnd
[0], scsi_status
,
443 be32_to_cpu(*lp
), be32_to_cpu(*(lp
+ 3)), resp_info
,
444 be32_to_cpu(fcprsp
->rspResId
),
445 be32_to_cpu(fcprsp
->rspSnsLen
),
446 be32_to_cpu(fcprsp
->rspRspLen
),
449 if (resp_info
& RSP_LEN_VALID
) {
450 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
451 if ((rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) ||
452 (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
)) {
453 host_status
= DID_ERROR
;
458 scsi_set_resid(cmnd
, 0);
459 if (resp_info
& RESID_UNDER
) {
460 scsi_set_resid(cmnd
, be32_to_cpu(fcprsp
->rspResId
));
462 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
463 "0716 FCP Read Underrun, expected %d, "
464 "residual %d Data: x%x x%x x%x\n",
465 be32_to_cpu(fcpcmd
->fcpDl
),
466 scsi_get_resid(cmnd
), fcpi_parm
, cmnd
->cmnd
[0],
470 * If there is an under run check if under run reported by
471 * storage array is same as the under run reported by HBA.
472 * If this is not same, there is a dropped frame.
474 if ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) &&
476 (scsi_get_resid(cmnd
) != fcpi_parm
)) {
477 lpfc_printf_vlog(vport
, KERN_WARNING
,
478 LOG_FCP
| LOG_FCP_ERROR
,
479 "0735 FCP Read Check Error "
480 "and Underrun Data: x%x x%x x%x x%x\n",
481 be32_to_cpu(fcpcmd
->fcpDl
),
482 scsi_get_resid(cmnd
), fcpi_parm
,
484 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
485 host_status
= DID_ERROR
;
488 * The cmnd->underflow is the minimum number of bytes that must
489 * be transfered for this command. Provided a sense condition
490 * is not present, make sure the actual amount transferred is at
491 * least the underflow value or fail.
493 if (!(resp_info
& SNS_LEN_VALID
) &&
494 (scsi_status
== SAM_STAT_GOOD
) &&
495 (scsi_bufflen(cmnd
) - scsi_get_resid(cmnd
)
496 < cmnd
->underflow
)) {
497 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
498 "0717 FCP command x%x residual "
499 "underrun converted to error "
500 "Data: x%x x%x x%x\n",
501 cmnd
->cmnd
[0], scsi_bufflen(cmnd
),
502 scsi_get_resid(cmnd
), cmnd
->underflow
);
503 host_status
= DID_ERROR
;
505 } else if (resp_info
& RESID_OVER
) {
506 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
507 "0720 FCP command x%x residual overrun error. "
508 "Data: x%x x%x \n", cmnd
->cmnd
[0],
509 scsi_bufflen(cmnd
), scsi_get_resid(cmnd
));
510 host_status
= DID_ERROR
;
513 * Check SLI validation that all the transfer was actually done
514 * (fcpi_parm should be zero). Apply check only to reads.
516 } else if ((scsi_status
== SAM_STAT_GOOD
) && fcpi_parm
&&
517 (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
518 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
| LOG_FCP_ERROR
,
519 "0734 FCP Read Check Error Data: "
521 be32_to_cpu(fcpcmd
->fcpDl
),
522 be32_to_cpu(fcprsp
->rspResId
),
523 fcpi_parm
, cmnd
->cmnd
[0]);
524 host_status
= DID_ERROR
;
525 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
529 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
533 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
534 struct lpfc_iocbq
*pIocbOut
)
536 struct lpfc_scsi_buf
*lpfc_cmd
=
537 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
538 struct lpfc_vport
*vport
= pIocbIn
->vport
;
539 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
540 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
541 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
543 struct scsi_device
*sdev
, *tmp_sdev
;
546 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
547 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
549 if (lpfc_cmd
->status
) {
550 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
551 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
552 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
553 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
554 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
556 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
557 "0729 FCP cmd x%x failed <%d/%d> "
558 "status: x%x result: x%x Data: x%x x%x\n",
560 cmd
->device
? cmd
->device
->id
: 0xffff,
561 cmd
->device
? cmd
->device
->lun
: 0xffff,
562 lpfc_cmd
->status
, lpfc_cmd
->result
,
563 pIocbOut
->iocb
.ulpContext
,
564 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
566 switch (lpfc_cmd
->status
) {
567 case IOSTAT_FCP_RSP_ERROR
:
568 /* Call FCP RSP handler to determine result */
569 lpfc_handle_fcp_err(vport
, lpfc_cmd
, pIocbOut
);
571 case IOSTAT_NPORT_BSY
:
572 case IOSTAT_FABRIC_BSY
:
573 cmd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
575 case IOSTAT_LOCAL_REJECT
:
576 if (lpfc_cmd
->result
== RJT_UNAVAIL_PERM
||
577 lpfc_cmd
->result
== IOERR_NO_RESOURCES
||
578 lpfc_cmd
->result
== RJT_LOGIN_REQUIRED
) {
579 cmd
->result
= ScsiResult(DID_REQUEUE
, 0);
581 } /* else: fall through */
583 cmd
->result
= ScsiResult(DID_ERROR
, 0);
588 || (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
589 cmd
->result
= ScsiResult(DID_BUS_BUSY
, SAM_STAT_BUSY
);
591 cmd
->result
= ScsiResult(DID_OK
, 0);
594 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
595 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
597 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
598 "0710 Iodone <%d/%d> cmd %p, error "
599 "x%x SNS x%x x%x Data: x%x x%x\n",
600 cmd
->device
->id
, cmd
->device
->lun
, cmd
,
601 cmd
->result
, *lp
, *(lp
+ 3), cmd
->retries
,
602 scsi_get_resid(cmd
));
605 result
= cmd
->result
;
607 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
610 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
611 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
617 lpfc_rampup_queue_depth(vport
, sdev
);
619 if (!result
&& pnode
!= NULL
&&
620 ((jiffies
- pnode
->last_ramp_up_time
) >
621 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
622 ((jiffies
- pnode
->last_q_full_time
) >
623 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
624 (vport
->cfg_lun_queue_depth
> sdev
->queue_depth
)) {
625 shost_for_each_device(tmp_sdev
, sdev
->host
) {
626 if (vport
->cfg_lun_queue_depth
> tmp_sdev
->queue_depth
){
627 if (tmp_sdev
->id
!= sdev
->id
)
629 if (tmp_sdev
->ordered_tags
)
630 scsi_adjust_queue_depth(tmp_sdev
,
632 tmp_sdev
->queue_depth
+1);
634 scsi_adjust_queue_depth(tmp_sdev
,
636 tmp_sdev
->queue_depth
+1);
638 pnode
->last_ramp_up_time
= jiffies
;
644 * Check for queue full. If the lun is reporting queue full, then
645 * back off the lun queue depth to prevent target overloads.
647 if (result
== SAM_STAT_TASK_SET_FULL
&& pnode
!= NULL
) {
648 pnode
->last_q_full_time
= jiffies
;
650 shost_for_each_device(tmp_sdev
, sdev
->host
) {
651 if (tmp_sdev
->id
!= sdev
->id
)
653 depth
= scsi_track_queue_full(tmp_sdev
,
654 tmp_sdev
->queue_depth
- 1);
657 * The queue depth cannot be lowered any more.
658 * Modify the returned error code to store
659 * the final depth value set by
660 * scsi_track_queue_full.
663 depth
= sdev
->host
->cmd_per_lun
;
666 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
667 "0711 detected queue full - lun queue "
668 "depth adjusted to %d.\n", depth
);
672 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
676 lpfc_scsi_prep_cmnd(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
677 struct lpfc_nodelist
*pnode
)
679 struct lpfc_hba
*phba
= vport
->phba
;
680 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
681 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
682 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
683 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
684 int datadir
= scsi_cmnd
->sc_data_direction
;
686 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
687 /* clear task management bits */
688 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
690 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
691 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
693 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
695 if (scsi_cmnd
->device
->tagged_supported
) {
696 switch (scsi_cmnd
->tag
) {
697 case HEAD_OF_QUEUE_TAG
:
698 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
700 case ORDERED_QUEUE_TAG
:
701 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
704 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
708 fcp_cmnd
->fcpCntl1
= 0;
711 * There are three possibilities here - use scatter-gather segment, use
712 * the single mapping, or neither. Start the lpfc command prep by
713 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
716 if (scsi_sg_count(scsi_cmnd
)) {
717 if (datadir
== DMA_TO_DEVICE
) {
718 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
719 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
721 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
722 phba
->fc4OutputRequests
++;
724 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
725 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
726 iocb_cmd
->un
.fcpi
.fcpi_parm
= scsi_bufflen(scsi_cmnd
);
727 fcp_cmnd
->fcpCntl3
= READ_DATA
;
728 phba
->fc4InputRequests
++;
731 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
732 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
734 fcp_cmnd
->fcpCntl3
= 0;
735 phba
->fc4ControlRequests
++;
739 * Finish initializing those IOCB fields that are independent
740 * of the scsi_cmnd request_buffer
742 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
743 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
744 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
746 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
747 piocbq
->context1
= lpfc_cmd
;
748 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
749 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
750 piocbq
->vport
= vport
;
754 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport
*vport
,
755 struct lpfc_scsi_buf
*lpfc_cmd
,
757 uint8_t task_mgmt_cmd
)
759 struct lpfc_iocbq
*piocbq
;
761 struct fcp_cmnd
*fcp_cmnd
;
762 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
763 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
765 if ((ndlp
== NULL
) || (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
769 piocbq
= &(lpfc_cmd
->cur_iocbq
);
770 piocbq
->vport
= vport
;
772 piocb
= &piocbq
->iocb
;
774 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
775 int_to_scsilun(lun
, &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
776 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
778 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
780 piocb
->ulpContext
= ndlp
->nlp_rpi
;
781 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
782 piocb
->ulpFCP2Rcvy
= 1;
784 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
786 /* ulpTimeout is only one byte */
787 if (lpfc_cmd
->timeout
> 0xff) {
789 * Do not timeout the command at the firmware level.
790 * The driver will provide the timeout mechanism.
792 piocb
->ulpTimeout
= 0;
794 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
801 lpfc_tskmgmt_def_cmpl(struct lpfc_hba
*phba
,
802 struct lpfc_iocbq
*cmdiocbq
,
803 struct lpfc_iocbq
*rspiocbq
)
805 struct lpfc_scsi_buf
*lpfc_cmd
=
806 (struct lpfc_scsi_buf
*) cmdiocbq
->context1
;
808 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
813 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf
*lpfc_cmd
, struct lpfc_vport
*vport
,
814 unsigned tgt_id
, unsigned int lun
,
815 struct lpfc_rport_data
*rdata
)
817 struct lpfc_hba
*phba
= vport
->phba
;
818 struct lpfc_iocbq
*iocbq
;
819 struct lpfc_iocbq
*iocbqrsp
;
825 lpfc_cmd
->rdata
= rdata
;
826 ret
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, lun
,
831 iocbq
= &lpfc_cmd
->cur_iocbq
;
832 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
837 /* Issue Target Reset to TGT <num> */
838 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
839 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
840 tgt_id
, rdata
->pnode
->nlp_rpi
, rdata
->pnode
->nlp_flag
);
841 ret
= lpfc_sli_issue_iocb_wait(phba
,
842 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
843 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
844 if (ret
!= IOCB_SUCCESS
) {
845 if (ret
== IOCB_TIMEDOUT
)
846 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
847 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
850 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
851 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
852 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
853 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
854 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
857 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
862 lpfc_info(struct Scsi_Host
*host
)
864 struct lpfc_vport
*vport
= (struct lpfc_vport
*) host
->hostdata
;
865 struct lpfc_hba
*phba
= vport
->phba
;
867 static char lpfcinfobuf
[384];
869 memset(lpfcinfobuf
,0,384);
870 if (phba
&& phba
->pcidev
){
871 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
872 len
= strlen(lpfcinfobuf
);
873 snprintf(lpfcinfobuf
+ len
,
875 " on PCI bus %02x device %02x irq %d",
876 phba
->pcidev
->bus
->number
,
879 len
= strlen(lpfcinfobuf
);
881 snprintf(lpfcinfobuf
+ len
,
890 static __inline__
void lpfc_poll_rearm_timer(struct lpfc_hba
* phba
)
892 unsigned long poll_tmo_expires
=
893 (jiffies
+ msecs_to_jiffies(phba
->cfg_poll_tmo
));
895 if (phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq_cnt
)
896 mod_timer(&phba
->fcp_poll_timer
,
900 void lpfc_poll_start_timer(struct lpfc_hba
* phba
)
902 lpfc_poll_rearm_timer(phba
);
905 void lpfc_poll_timeout(unsigned long ptr
)
907 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
909 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
910 lpfc_sli_poll_fcp_ring (phba
);
911 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
912 lpfc_poll_rearm_timer(phba
);
917 lpfc_queuecommand(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
919 struct Scsi_Host
*shost
= cmnd
->device
->host
;
920 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
921 struct lpfc_hba
*phba
= vport
->phba
;
922 struct lpfc_sli
*psli
= &phba
->sli
;
923 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
924 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
925 struct lpfc_scsi_buf
*lpfc_cmd
;
926 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
929 err
= fc_remote_port_chkready(rport
);
932 goto out_fail_command
;
936 * Catch race where our node has transitioned, but the
937 * transport is still transitioning.
940 cmnd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
941 goto out_fail_command
;
943 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
944 if (lpfc_cmd
== NULL
) {
945 lpfc_adjust_queue_depth(phba
);
947 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
948 "0707 driver's buffer pool is empty, "
954 * Store the midlayer's command structure for the completion phase
955 * and complete the command initialization.
957 lpfc_cmd
->pCmd
= cmnd
;
958 lpfc_cmd
->rdata
= rdata
;
959 lpfc_cmd
->timeout
= 0;
960 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
961 cmnd
->scsi_done
= done
;
963 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
965 goto out_host_busy_free_buf
;
967 lpfc_scsi_prep_cmnd(vport
, lpfc_cmd
, ndlp
);
969 err
= lpfc_sli_issue_iocb(phba
, &phba
->sli
.ring
[psli
->fcp_ring
],
970 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
972 goto out_host_busy_free_buf
;
974 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
975 lpfc_sli_poll_fcp_ring(phba
);
976 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
977 lpfc_poll_rearm_timer(phba
);
982 out_host_busy_free_buf
:
983 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
984 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
986 return SCSI_MLQUEUE_HOST_BUSY
;
994 lpfc_block_error_handler(struct scsi_cmnd
*cmnd
)
996 struct Scsi_Host
*shost
= cmnd
->device
->host
;
997 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
999 spin_lock_irq(shost
->host_lock
);
1000 while (rport
->port_state
== FC_PORTSTATE_BLOCKED
) {
1001 spin_unlock_irq(shost
->host_lock
);
1003 spin_lock_irq(shost
->host_lock
);
1005 spin_unlock_irq(shost
->host_lock
);
1010 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
1012 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1013 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1014 struct lpfc_hba
*phba
= vport
->phba
;
1015 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[phba
->sli
.fcp_ring
];
1016 struct lpfc_iocbq
*iocb
;
1017 struct lpfc_iocbq
*abtsiocb
;
1018 struct lpfc_scsi_buf
*lpfc_cmd
;
1020 unsigned int loop_count
= 0;
1023 lpfc_block_error_handler(cmnd
);
1024 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
1028 * If pCmd field of the corresponding lpfc_scsi_buf structure
1029 * points to a different SCSI command, then the driver has
1030 * already completed this command, but the midlayer did not
1031 * see the completion before the eh fired. Just return
1034 iocb
= &lpfc_cmd
->cur_iocbq
;
1035 if (lpfc_cmd
->pCmd
!= cmnd
)
1038 BUG_ON(iocb
->context1
!= lpfc_cmd
);
1040 abtsiocb
= lpfc_sli_get_iocbq(phba
);
1041 if (abtsiocb
== NULL
) {
1047 * The scsi command can not be in txq and it is in flight because the
1048 * pCmd is still pointig at the SCSI command we have to abort. There
1049 * is no need to search the txcmplq. Just send an abort to the FW.
1053 icmd
= &abtsiocb
->iocb
;
1054 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
1055 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
1056 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
1059 icmd
->ulpClass
= cmd
->ulpClass
;
1060 if (lpfc_is_link_up(phba
))
1061 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
1063 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
1065 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
1066 abtsiocb
->vport
= vport
;
1067 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0) == IOCB_ERROR
) {
1068 lpfc_sli_release_iocbq(phba
, abtsiocb
);
1073 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1074 lpfc_sli_poll_fcp_ring (phba
);
1076 /* Wait for abort to complete */
1077 while (lpfc_cmd
->pCmd
== cmnd
)
1079 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1080 lpfc_sli_poll_fcp_ring (phba
);
1082 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT
* HZ
);
1084 > (2 * vport
->cfg_devloss_tmo
)/LPFC_ABORT_WAIT
)
1088 if (lpfc_cmd
->pCmd
== cmnd
) {
1090 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1091 "0748 abort handler timed out waiting "
1092 "for abort to complete: ret %#x, ID %d, "
1093 "LUN %d, snum %#lx\n",
1094 ret
, cmnd
->device
->id
, cmnd
->device
->lun
,
1095 cmnd
->serial_number
);
1099 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
1100 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1101 "LUN %d snum %#lx\n", ret
, cmnd
->device
->id
,
1102 cmnd
->device
->lun
, cmnd
->serial_number
);
1107 lpfc_device_reset_handler(struct scsi_cmnd
*cmnd
)
1109 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1110 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1111 struct lpfc_hba
*phba
= vport
->phba
;
1112 struct lpfc_scsi_buf
*lpfc_cmd
;
1113 struct lpfc_iocbq
*iocbq
, *iocbqrsp
;
1114 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
1115 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
1116 uint32_t cmd_result
= 0, cmd_status
= 0;
1118 int iocb_status
= IOCB_SUCCESS
;
1121 lpfc_block_error_handler(cmnd
);
1124 * If target is not in a MAPPED state, delay the reset until
1125 * target is rediscovered or devloss timeout expires.
1131 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
1132 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1134 rdata
= cmnd
->device
->hostdata
;
1136 (loopcnt
> ((vport
->cfg_devloss_tmo
* 2) + 1))){
1137 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1138 "0721 LUN Reset rport "
1139 "failure: cnt x%x rdata x%p\n",
1143 pnode
= rdata
->pnode
;
1147 if (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
)
1151 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1152 if (lpfc_cmd
== NULL
)
1155 lpfc_cmd
->timeout
= 60;
1156 lpfc_cmd
->rdata
= rdata
;
1158 ret
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, cmnd
->device
->lun
,
1161 goto out_free_scsi_buf
;
1163 iocbq
= &lpfc_cmd
->cur_iocbq
;
1165 /* get a buffer for this IOCB command response */
1166 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
1167 if (iocbqrsp
== NULL
)
1168 goto out_free_scsi_buf
;
1170 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
1171 "0703 Issue target reset to TGT %d LUN %d "
1172 "rpi x%x nlp_flag x%x\n", cmnd
->device
->id
,
1173 cmnd
->device
->lun
, pnode
->nlp_rpi
, pnode
->nlp_flag
);
1174 iocb_status
= lpfc_sli_issue_iocb_wait(phba
,
1175 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1176 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
1178 if (iocb_status
== IOCB_TIMEDOUT
)
1179 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
1181 if (iocb_status
== IOCB_SUCCESS
)
1186 cmd_result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
1187 cmd_status
= iocbqrsp
->iocb
.ulpStatus
;
1189 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
1192 * All outstanding txcmplq I/Os should have been aborted by the device.
1193 * Unfortunately, some targets do not abide by this forcing the driver
1196 cnt
= lpfc_sli_sum_iocb(vport
, cmnd
->device
->id
, cmnd
->device
->lun
,
1199 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1200 cmnd
->device
->id
, cmnd
->device
->lun
,
1204 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1207 > (2 * vport
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1210 cnt
= lpfc_sli_sum_iocb(vport
, cmnd
->device
->id
,
1211 cmnd
->device
->lun
, LPFC_CTX_LUN
);
1215 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1216 "0719 device reset I/O flush failure: "
1222 if (iocb_status
!= IOCB_TIMEDOUT
) {
1223 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1225 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1226 "0713 SCSI layer issued device reset (%d, %d) "
1227 "return x%x status x%x result x%x\n",
1228 cmnd
->device
->id
, cmnd
->device
->lun
, ret
,
1229 cmd_status
, cmd_result
);
1235 lpfc_bus_reset_handler(struct scsi_cmnd
*cmnd
)
1237 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1238 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1239 struct lpfc_hba
*phba
= vport
->phba
;
1240 struct lpfc_nodelist
*ndlp
= NULL
;
1242 int ret
= FAILED
, i
, err_count
= 0;
1244 struct lpfc_scsi_buf
* lpfc_cmd
;
1246 lpfc_block_error_handler(cmnd
);
1248 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1249 if (lpfc_cmd
== NULL
)
1252 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1253 lpfc_cmd
->timeout
= 60;
1256 * Since the driver manages a single bus device, reset all
1257 * targets known to the driver. Should any target reset
1258 * fail, this routine returns failure to the midlayer.
1260 for (i
= 0; i
< LPFC_MAX_TARGET
; i
++) {
1261 /* Search for mapped node by target ID */
1263 spin_lock_irq(shost
->host_lock
);
1264 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
1265 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
&&
1266 i
== ndlp
->nlp_sid
&&
1272 spin_unlock_irq(shost
->host_lock
);
1276 ret
= lpfc_scsi_tgt_reset(lpfc_cmd
, vport
, i
,
1278 ndlp
->rport
->dd_data
);
1279 if (ret
!= SUCCESS
) {
1280 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1281 "0700 Bus Reset on target %d failed\n",
1288 if (ret
!= IOCB_TIMEDOUT
)
1289 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1297 * All outstanding txcmplq I/Os should have been aborted by
1298 * the targets. Unfortunately, some targets do not abide by
1299 * this forcing the driver to double check.
1301 cnt
= lpfc_sli_sum_iocb(vport
, 0, 0, LPFC_CTX_HOST
);
1303 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1304 0, 0, LPFC_CTX_HOST
);
1307 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1310 > (2 * vport
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1313 cnt
= lpfc_sli_sum_iocb(vport
, 0, 0, LPFC_CTX_HOST
);
1317 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1318 "0715 Bus Reset I/O flush failure: "
1319 "cnt x%x left x%x\n", cnt
, i
);
1323 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1324 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret
);
1330 lpfc_slave_alloc(struct scsi_device
*sdev
)
1332 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
1333 struct lpfc_hba
*phba
= vport
->phba
;
1334 struct lpfc_scsi_buf
*scsi_buf
= NULL
;
1335 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
1336 uint32_t total
= 0, i
;
1337 uint32_t num_to_alloc
= 0;
1338 unsigned long flags
;
1340 if (!rport
|| fc_remote_port_chkready(rport
))
1343 sdev
->hostdata
= rport
->dd_data
;
1346 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1347 * available list of scsi buffers. Don't allocate more than the
1348 * HBA limit conveyed to the midlayer via the host structure. The
1349 * formula accounts for the lun_queue_depth + error handlers + 1
1350 * extra. This list of scsi bufs exists for the lifetime of the driver.
1352 total
= phba
->total_scsi_bufs
;
1353 num_to_alloc
= vport
->cfg_lun_queue_depth
+ 2;
1355 /* Allow some exchanges to be available always to complete discovery */
1356 if (total
>= phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
1357 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
1358 "0704 At limitation of %d preallocated "
1359 "command buffers\n", total
);
1361 /* Allow some exchanges to be available always to complete discovery */
1362 } else if (total
+ num_to_alloc
>
1363 phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
1364 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
1365 "0705 Allocation request of %d "
1366 "command buffers will exceed max of %d. "
1367 "Reducing allocation request to %d.\n",
1368 num_to_alloc
, phba
->cfg_hba_queue_depth
,
1369 (phba
->cfg_hba_queue_depth
- total
));
1370 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
1373 for (i
= 0; i
< num_to_alloc
; i
++) {
1374 scsi_buf
= lpfc_new_scsi_buf(vport
);
1376 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1377 "0706 Failed to allocate "
1378 "command buffer\n");
1382 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, flags
);
1383 phba
->total_scsi_bufs
++;
1384 list_add_tail(&scsi_buf
->list
, &phba
->lpfc_scsi_buf_list
);
1385 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, flags
);
1391 lpfc_slave_configure(struct scsi_device
*sdev
)
1393 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
1394 struct lpfc_hba
*phba
= vport
->phba
;
1395 struct fc_rport
*rport
= starget_to_rport(sdev
->sdev_target
);
1397 if (sdev
->tagged_supported
)
1398 scsi_activate_tcq(sdev
, vport
->cfg_lun_queue_depth
);
1400 scsi_deactivate_tcq(sdev
, vport
->cfg_lun_queue_depth
);
1403 * Initialize the fc transport attributes for the target
1404 * containing this scsi device. Also note that the driver's
1405 * target pointer is stored in the starget_data for the
1406 * driver's sysfs entry point functions.
1408 rport
->dev_loss_tmo
= vport
->cfg_devloss_tmo
;
1410 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
1411 lpfc_sli_poll_fcp_ring(phba
);
1412 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1413 lpfc_poll_rearm_timer(phba
);
1420 lpfc_slave_destroy(struct scsi_device
*sdev
)
1422 sdev
->hostdata
= NULL
;
1427 struct scsi_host_template lpfc_template
= {
1428 .module
= THIS_MODULE
,
1429 .name
= LPFC_DRIVER_NAME
,
1431 .queuecommand
= lpfc_queuecommand
,
1432 .eh_abort_handler
= lpfc_abort_handler
,
1433 .eh_device_reset_handler
= lpfc_device_reset_handler
,
1434 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
1435 .slave_alloc
= lpfc_slave_alloc
,
1436 .slave_configure
= lpfc_slave_configure
,
1437 .slave_destroy
= lpfc_slave_destroy
,
1438 .scan_finished
= lpfc_scan_finished
,
1440 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1441 .use_sg_chaining
= ENABLE_SG_CHAINING
,
1442 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1443 .use_clustering
= ENABLE_CLUSTERING
,
1444 .shost_attrs
= lpfc_hba_attrs
,
1445 .max_sectors
= 0xFFFF,
1448 struct scsi_host_template lpfc_vport_template
= {
1449 .module
= THIS_MODULE
,
1450 .name
= LPFC_DRIVER_NAME
,
1452 .queuecommand
= lpfc_queuecommand
,
1453 .eh_abort_handler
= lpfc_abort_handler
,
1454 .eh_device_reset_handler
= lpfc_device_reset_handler
,
1455 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
1456 .slave_alloc
= lpfc_slave_alloc
,
1457 .slave_configure
= lpfc_slave_configure
,
1458 .slave_destroy
= lpfc_slave_destroy
,
1459 .scan_finished
= lpfc_scan_finished
,
1461 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1462 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1463 .use_clustering
= ENABLE_CLUSTERING
,
1464 .use_sg_chaining
= ENABLE_SG_CHAINING
,
1465 .shost_attrs
= lpfc_vport_attrs
,
1466 .max_sectors
= 0xFFFF,