1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
32 #include "lpfc_version.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
40 #include "lpfc_vport.h"
42 #define LPFC_RESET_WAIT 2
43 #define LPFC_ABORT_WAIT 2
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
50 lpfc_adjust_queue_depth(struct lpfc_hba
*phba
)
54 spin_lock_irqsave(&phba
->hbalock
, flags
);
55 atomic_inc(&phba
->num_rsrc_err
);
56 phba
->last_rsrc_error_time
= jiffies
;
58 if ((phba
->last_ramp_down_time
+ QUEUE_RAMP_DOWN_INTERVAL
) > jiffies
) {
59 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
63 phba
->last_ramp_down_time
= jiffies
;
65 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
67 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
68 if ((phba
->pport
->work_port_events
&
69 WORKER_RAMP_DOWN_QUEUE
) == 0) {
70 phba
->pport
->work_port_events
|= WORKER_RAMP_DOWN_QUEUE
;
72 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
74 spin_lock_irqsave(&phba
->hbalock
, flags
);
76 wake_up(phba
->work_wait
);
77 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
83 * This function is called with no lock held when there is a successful
84 * SCSI command completion.
87 lpfc_rampup_queue_depth(struct lpfc_vport
*vport
,
88 struct scsi_device
*sdev
)
91 struct lpfc_hba
*phba
= vport
->phba
;
92 atomic_inc(&phba
->num_cmd_success
);
94 if (vport
->cfg_lun_queue_depth
<= sdev
->queue_depth
)
96 spin_lock_irqsave(&phba
->hbalock
, flags
);
97 if (((phba
->last_ramp_up_time
+ QUEUE_RAMP_UP_INTERVAL
) > jiffies
) ||
98 ((phba
->last_rsrc_error_time
+ QUEUE_RAMP_UP_INTERVAL
) > jiffies
)) {
99 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
102 phba
->last_ramp_up_time
= jiffies
;
103 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
105 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
106 if ((phba
->pport
->work_port_events
&
107 WORKER_RAMP_UP_QUEUE
) == 0) {
108 phba
->pport
->work_port_events
|= WORKER_RAMP_UP_QUEUE
;
110 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
112 spin_lock_irqsave(&phba
->hbalock
, flags
);
114 wake_up(phba
->work_wait
);
115 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
119 lpfc_ramp_down_queue_handler(struct lpfc_hba
*phba
)
121 struct lpfc_vport
**vports
;
122 struct Scsi_Host
*shost
;
123 struct scsi_device
*sdev
;
124 unsigned long new_queue_depth
;
125 unsigned long num_rsrc_err
, num_cmd_success
;
128 num_rsrc_err
= atomic_read(&phba
->num_rsrc_err
);
129 num_cmd_success
= atomic_read(&phba
->num_cmd_success
);
131 vports
= lpfc_create_vport_work_array(phba
);
133 for(i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
134 shost
= lpfc_shost_from_vport(vports
[i
]);
135 shost_for_each_device(sdev
, shost
) {
137 sdev
->queue_depth
* num_rsrc_err
/
138 (num_rsrc_err
+ num_cmd_success
);
139 if (!new_queue_depth
)
140 new_queue_depth
= sdev
->queue_depth
- 1;
142 new_queue_depth
= sdev
->queue_depth
-
144 if (sdev
->ordered_tags
)
145 scsi_adjust_queue_depth(sdev
,
149 scsi_adjust_queue_depth(sdev
,
154 lpfc_destroy_vport_work_array(phba
, vports
);
155 atomic_set(&phba
->num_rsrc_err
, 0);
156 atomic_set(&phba
->num_cmd_success
, 0);
160 lpfc_ramp_up_queue_handler(struct lpfc_hba
*phba
)
162 struct lpfc_vport
**vports
;
163 struct Scsi_Host
*shost
;
164 struct scsi_device
*sdev
;
167 vports
= lpfc_create_vport_work_array(phba
);
169 for(i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
170 shost
= lpfc_shost_from_vport(vports
[i
]);
171 shost_for_each_device(sdev
, shost
) {
172 if (sdev
->ordered_tags
)
173 scsi_adjust_queue_depth(sdev
,
175 sdev
->queue_depth
+1);
177 scsi_adjust_queue_depth(sdev
,
179 sdev
->queue_depth
+1);
182 lpfc_destroy_vport_work_array(phba
, vports
);
183 atomic_set(&phba
->num_rsrc_err
, 0);
184 atomic_set(&phba
->num_cmd_success
, 0);
188 * This routine allocates a scsi buffer, which contains all the necessary
189 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
190 * contains information to build the IOCB. The DMAable region contains
191 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
192 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
193 * and the BPL BDE is setup in the IOCB.
195 static struct lpfc_scsi_buf
*
196 lpfc_new_scsi_buf(struct lpfc_vport
*vport
)
198 struct lpfc_hba
*phba
= vport
->phba
;
199 struct lpfc_scsi_buf
*psb
;
200 struct ulp_bde64
*bpl
;
202 dma_addr_t pdma_phys
;
205 psb
= kzalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
210 * Get memory from the pci pool to map the virt space to pci bus space
211 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
212 * struct fcp_rsp and the number of bde's necessary to support the
215 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
, GFP_KERNEL
,
222 /* Initialize virtual ptrs to dma_buf region. */
223 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
225 /* Allocate iotag for psb->cur_iocbq. */
226 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
228 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
229 psb
->data
, psb
->dma_handle
);
233 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
235 psb
->fcp_cmnd
= psb
->data
;
236 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
237 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
238 sizeof(struct fcp_rsp
);
240 /* Initialize local short-hand pointers. */
242 pdma_phys
= psb
->dma_handle
;
245 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
246 * list bdes. Initialize the first two and leave the rest for
249 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
250 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
251 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_cmnd
);
252 bpl
->tus
.f
.bdeFlags
= BUFF_USE_CMND
;
253 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
256 /* Setup the physical region for the FCP RSP */
257 pdma_phys
+= sizeof (struct fcp_cmnd
);
258 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
259 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
260 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_rsp
);
261 bpl
->tus
.f
.bdeFlags
= (BUFF_USE_CMND
| BUFF_USE_RCV
);
262 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
265 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
266 * initialize it with all known data now.
268 pdma_phys
+= (sizeof (struct fcp_rsp
));
269 iocb
= &psb
->cur_iocbq
.iocb
;
270 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
271 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys
);
272 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys
);
273 iocb
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
274 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDL
;
275 iocb
->ulpBdeCount
= 1;
276 iocb
->ulpClass
= CLASS3
;
281 static struct lpfc_scsi_buf
*
282 lpfc_get_scsi_buf(struct lpfc_hba
* phba
)
284 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
285 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
286 unsigned long iflag
= 0;
288 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
289 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
291 lpfc_cmd
->seg_cnt
= 0;
292 lpfc_cmd
->nonsg_phys
= 0;
294 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
299 lpfc_release_scsi_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
301 unsigned long iflag
= 0;
303 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
305 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
306 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
310 lpfc_scsi_prep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
312 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
313 struct scatterlist
*sgel
= NULL
;
314 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
315 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
316 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
318 uint32_t i
, num_bde
= 0;
319 int nseg
, datadir
= scsi_cmnd
->sc_data_direction
;
322 * There are three possibilities here - use scatter-gather segment, use
323 * the single mapping, or neither. Start the lpfc command prep by
324 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
328 if (scsi_sg_count(scsi_cmnd
)) {
330 * The driver stores the segment count returned from pci_map_sg
331 * because this a count of dma-mappings used to map the use_sg
332 * pages. They are not guaranteed to be the same for those
333 * architectures that implement an IOMMU.
336 nseg
= dma_map_sg(&phba
->pcidev
->dev
, scsi_sglist(scsi_cmnd
),
337 scsi_sg_count(scsi_cmnd
), datadir
);
341 lpfc_cmd
->seg_cnt
= nseg
;
342 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
343 printk(KERN_ERR
"%s: Too many sg segments from "
344 "dma_map_sg. Config %d, seg_cnt %d",
345 __FUNCTION__
, phba
->cfg_sg_seg_cnt
,
347 scsi_dma_unmap(scsi_cmnd
);
352 * The driver established a maximum scatter-gather segment count
353 * during probe that limits the number of sg elements in any
354 * single scsi command. Just run through the seg_cnt and format
357 scsi_for_each_sg(scsi_cmnd
, sgel
, nseg
, i
) {
358 physaddr
= sg_dma_address(sgel
);
359 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
360 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
361 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
362 if (datadir
== DMA_TO_DEVICE
)
363 bpl
->tus
.f
.bdeFlags
= 0;
365 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
366 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
373 * Finish initializing those IOCB fields that are dependent on the
374 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
375 * reinitialized since all iocb memory resources are used many times
376 * for transmit, receive, and continuation bpl's.
378 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
379 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+=
380 (num_bde
* sizeof (struct ulp_bde64
));
381 iocb_cmd
->ulpBdeCount
= 1;
383 fcp_cmnd
->fcpDl
= cpu_to_be32(scsi_bufflen(scsi_cmnd
));
388 lpfc_scsi_unprep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* psb
)
391 * There are only two special cases to consider. (1) the scsi command
392 * requested scatter-gather usage or (2) the scsi command allocated
393 * a request buffer, but did not request use_sg. There is a third
394 * case, but it does not require resource deallocation.
396 if (psb
->seg_cnt
> 0)
397 scsi_dma_unmap(psb
->pCmd
);
401 lpfc_handle_fcp_err(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
402 struct lpfc_iocbq
*rsp_iocb
)
404 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
405 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
406 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
407 uint32_t fcpi_parm
= rsp_iocb
->iocb
.un
.fcpi
.fcpi_parm
;
408 uint32_t resp_info
= fcprsp
->rspStatus2
;
409 uint32_t scsi_status
= fcprsp
->rspStatus3
;
411 uint32_t host_status
= DID_OK
;
413 uint32_t logit
= LOG_FCP
| LOG_FCP_ERROR
;
416 * If this is a task management command, there is no
417 * scsi packet associated with this lpfc_cmd. The driver
420 if (fcpcmd
->fcpCntl2
) {
425 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
426 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
427 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
428 snslen
= SCSI_SENSE_BUFFERSIZE
;
430 if (resp_info
& RSP_LEN_VALID
)
431 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
432 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
434 lp
= (uint32_t *)cmnd
->sense_buffer
;
436 if (!scsi_status
&& (resp_info
& RESID_UNDER
))
439 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
440 "0730 FCP command x%x failed: x%x SNS x%x x%x "
441 "Data: x%x x%x x%x x%x x%x\n",
442 cmnd
->cmnd
[0], scsi_status
,
443 be32_to_cpu(*lp
), be32_to_cpu(*(lp
+ 3)), resp_info
,
444 be32_to_cpu(fcprsp
->rspResId
),
445 be32_to_cpu(fcprsp
->rspSnsLen
),
446 be32_to_cpu(fcprsp
->rspRspLen
),
449 if (resp_info
& RSP_LEN_VALID
) {
450 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
451 if ((rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) ||
452 (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
)) {
453 host_status
= DID_ERROR
;
458 scsi_set_resid(cmnd
, 0);
459 if (resp_info
& RESID_UNDER
) {
460 scsi_set_resid(cmnd
, be32_to_cpu(fcprsp
->rspResId
));
462 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
463 "0716 FCP Read Underrun, expected %d, "
464 "residual %d Data: x%x x%x x%x\n",
465 be32_to_cpu(fcpcmd
->fcpDl
),
466 scsi_get_resid(cmnd
), fcpi_parm
, cmnd
->cmnd
[0],
470 * If there is an under run check if under run reported by
471 * storage array is same as the under run reported by HBA.
472 * If this is not same, there is a dropped frame.
474 if ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) &&
476 (scsi_get_resid(cmnd
) != fcpi_parm
)) {
477 lpfc_printf_vlog(vport
, KERN_WARNING
,
478 LOG_FCP
| LOG_FCP_ERROR
,
479 "0735 FCP Read Check Error "
480 "and Underrun Data: x%x x%x x%x x%x\n",
481 be32_to_cpu(fcpcmd
->fcpDl
),
482 scsi_get_resid(cmnd
), fcpi_parm
,
484 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
485 host_status
= DID_ERROR
;
488 * The cmnd->underflow is the minimum number of bytes that must
489 * be transfered for this command. Provided a sense condition
490 * is not present, make sure the actual amount transferred is at
491 * least the underflow value or fail.
493 if (!(resp_info
& SNS_LEN_VALID
) &&
494 (scsi_status
== SAM_STAT_GOOD
) &&
495 (scsi_bufflen(cmnd
) - scsi_get_resid(cmnd
)
496 < cmnd
->underflow
)) {
497 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
498 "0717 FCP command x%x residual "
499 "underrun converted to error "
500 "Data: x%x x%x x%x\n",
501 cmnd
->cmnd
[0], scsi_bufflen(cmnd
),
502 scsi_get_resid(cmnd
), cmnd
->underflow
);
503 host_status
= DID_ERROR
;
505 } else if (resp_info
& RESID_OVER
) {
506 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
507 "0720 FCP command x%x residual overrun error. "
508 "Data: x%x x%x \n", cmnd
->cmnd
[0],
509 scsi_bufflen(cmnd
), scsi_get_resid(cmnd
));
510 host_status
= DID_ERROR
;
513 * Check SLI validation that all the transfer was actually done
514 * (fcpi_parm should be zero). Apply check only to reads.
516 } else if ((scsi_status
== SAM_STAT_GOOD
) && fcpi_parm
&&
517 (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
518 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
| LOG_FCP_ERROR
,
519 "0734 FCP Read Check Error Data: "
521 be32_to_cpu(fcpcmd
->fcpDl
),
522 be32_to_cpu(fcprsp
->rspResId
),
523 fcpi_parm
, cmnd
->cmnd
[0]);
524 host_status
= DID_ERROR
;
525 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
529 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
533 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
534 struct lpfc_iocbq
*pIocbOut
)
536 struct lpfc_scsi_buf
*lpfc_cmd
=
537 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
538 struct lpfc_vport
*vport
= pIocbIn
->vport
;
539 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
540 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
541 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
543 struct scsi_device
*sdev
, *tmp_sdev
;
547 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
548 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
550 if (lpfc_cmd
->status
) {
551 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
552 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
553 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
554 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
555 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
557 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
558 "0729 FCP cmd x%x failed <%d/%d> "
559 "status: x%x result: x%x Data: x%x x%x\n",
561 cmd
->device
? cmd
->device
->id
: 0xffff,
562 cmd
->device
? cmd
->device
->lun
: 0xffff,
563 lpfc_cmd
->status
, lpfc_cmd
->result
,
564 pIocbOut
->iocb
.ulpContext
,
565 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
567 switch (lpfc_cmd
->status
) {
568 case IOSTAT_FCP_RSP_ERROR
:
569 /* Call FCP RSP handler to determine result */
570 lpfc_handle_fcp_err(vport
, lpfc_cmd
, pIocbOut
);
572 case IOSTAT_NPORT_BSY
:
573 case IOSTAT_FABRIC_BSY
:
574 cmd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
576 case IOSTAT_LOCAL_REJECT
:
577 if (lpfc_cmd
->result
== RJT_UNAVAIL_PERM
||
578 lpfc_cmd
->result
== IOERR_NO_RESOURCES
||
579 lpfc_cmd
->result
== RJT_LOGIN_REQUIRED
) {
580 cmd
->result
= ScsiResult(DID_REQUEUE
, 0);
582 } /* else: fall through */
584 cmd
->result
= ScsiResult(DID_ERROR
, 0);
589 || (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
590 cmd
->result
= ScsiResult(DID_BUS_BUSY
, SAM_STAT_BUSY
);
592 cmd
->result
= ScsiResult(DID_OK
, 0);
595 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
596 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
598 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
599 "0710 Iodone <%d/%d> cmd %p, error "
600 "x%x SNS x%x x%x Data: x%x x%x\n",
601 cmd
->device
->id
, cmd
->device
->lun
, cmd
,
602 cmd
->result
, *lp
, *(lp
+ 3), cmd
->retries
,
603 scsi_get_resid(cmd
));
606 result
= cmd
->result
;
608 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
611 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
613 * If there is a thread waiting for command completion
614 * wake up the thread.
616 spin_lock_irqsave(sdev
->host
->host_lock
, flags
);
617 lpfc_cmd
->pCmd
= NULL
;
619 wake_up(lpfc_cmd
->waitq
);
620 spin_unlock_irqrestore(sdev
->host
->host_lock
, flags
);
621 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
627 lpfc_rampup_queue_depth(vport
, sdev
);
629 if (!result
&& pnode
!= NULL
&&
630 ((jiffies
- pnode
->last_ramp_up_time
) >
631 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
632 ((jiffies
- pnode
->last_q_full_time
) >
633 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
634 (vport
->cfg_lun_queue_depth
> sdev
->queue_depth
)) {
635 shost_for_each_device(tmp_sdev
, sdev
->host
) {
636 if (vport
->cfg_lun_queue_depth
> tmp_sdev
->queue_depth
){
637 if (tmp_sdev
->id
!= sdev
->id
)
639 if (tmp_sdev
->ordered_tags
)
640 scsi_adjust_queue_depth(tmp_sdev
,
642 tmp_sdev
->queue_depth
+1);
644 scsi_adjust_queue_depth(tmp_sdev
,
646 tmp_sdev
->queue_depth
+1);
648 pnode
->last_ramp_up_time
= jiffies
;
654 * Check for queue full. If the lun is reporting queue full, then
655 * back off the lun queue depth to prevent target overloads.
657 if (result
== SAM_STAT_TASK_SET_FULL
&& pnode
!= NULL
) {
658 pnode
->last_q_full_time
= jiffies
;
660 shost_for_each_device(tmp_sdev
, sdev
->host
) {
661 if (tmp_sdev
->id
!= sdev
->id
)
663 depth
= scsi_track_queue_full(tmp_sdev
,
664 tmp_sdev
->queue_depth
- 1);
667 * The queue depth cannot be lowered any more.
668 * Modify the returned error code to store
669 * the final depth value set by
670 * scsi_track_queue_full.
673 depth
= sdev
->host
->cmd_per_lun
;
676 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
677 "0711 detected queue full - lun queue "
678 "depth adjusted to %d.\n", depth
);
683 * If there is a thread waiting for command completion
684 * wake up the thread.
686 spin_lock_irqsave(sdev
->host
->host_lock
, flags
);
687 lpfc_cmd
->pCmd
= NULL
;
689 wake_up(lpfc_cmd
->waitq
);
690 spin_unlock_irqrestore(sdev
->host
->host_lock
, flags
);
692 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
696 lpfc_scsi_prep_cmnd(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
697 struct lpfc_nodelist
*pnode
)
699 struct lpfc_hba
*phba
= vport
->phba
;
700 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
701 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
702 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
703 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
704 int datadir
= scsi_cmnd
->sc_data_direction
;
707 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
708 /* clear task management bits */
709 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
711 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
712 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
714 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
716 if (scsi_populate_tag_msg(scsi_cmnd
, tag
)) {
718 case HEAD_OF_QUEUE_TAG
:
719 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
721 case ORDERED_QUEUE_TAG
:
722 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
725 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
729 fcp_cmnd
->fcpCntl1
= 0;
732 * There are three possibilities here - use scatter-gather segment, use
733 * the single mapping, or neither. Start the lpfc command prep by
734 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
737 if (scsi_sg_count(scsi_cmnd
)) {
738 if (datadir
== DMA_TO_DEVICE
) {
739 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
740 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
742 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
743 phba
->fc4OutputRequests
++;
745 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
746 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
747 iocb_cmd
->un
.fcpi
.fcpi_parm
= scsi_bufflen(scsi_cmnd
);
748 fcp_cmnd
->fcpCntl3
= READ_DATA
;
749 phba
->fc4InputRequests
++;
752 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
753 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
755 fcp_cmnd
->fcpCntl3
= 0;
756 phba
->fc4ControlRequests
++;
760 * Finish initializing those IOCB fields that are independent
761 * of the scsi_cmnd request_buffer
763 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
764 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
765 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
767 piocbq
->iocb
.ulpFCP2Rcvy
= 0;
769 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
770 piocbq
->context1
= lpfc_cmd
;
771 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
772 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
773 piocbq
->vport
= vport
;
777 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport
*vport
,
778 struct lpfc_scsi_buf
*lpfc_cmd
,
780 uint8_t task_mgmt_cmd
)
782 struct lpfc_iocbq
*piocbq
;
784 struct fcp_cmnd
*fcp_cmnd
;
785 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
786 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
788 if ((ndlp
== NULL
) || (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
792 piocbq
= &(lpfc_cmd
->cur_iocbq
);
793 piocbq
->vport
= vport
;
795 piocb
= &piocbq
->iocb
;
797 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
798 int_to_scsilun(lun
, &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
799 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
801 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
803 piocb
->ulpContext
= ndlp
->nlp_rpi
;
804 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
805 piocb
->ulpFCP2Rcvy
= 1;
807 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
809 /* ulpTimeout is only one byte */
810 if (lpfc_cmd
->timeout
> 0xff) {
812 * Do not timeout the command at the firmware level.
813 * The driver will provide the timeout mechanism.
815 piocb
->ulpTimeout
= 0;
817 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
824 lpfc_tskmgmt_def_cmpl(struct lpfc_hba
*phba
,
825 struct lpfc_iocbq
*cmdiocbq
,
826 struct lpfc_iocbq
*rspiocbq
)
828 struct lpfc_scsi_buf
*lpfc_cmd
=
829 (struct lpfc_scsi_buf
*) cmdiocbq
->context1
;
831 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
836 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf
*lpfc_cmd
, struct lpfc_vport
*vport
,
837 unsigned tgt_id
, unsigned int lun
,
838 struct lpfc_rport_data
*rdata
)
840 struct lpfc_hba
*phba
= vport
->phba
;
841 struct lpfc_iocbq
*iocbq
;
842 struct lpfc_iocbq
*iocbqrsp
;
848 lpfc_cmd
->rdata
= rdata
;
849 ret
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, lun
,
854 iocbq
= &lpfc_cmd
->cur_iocbq
;
855 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
860 /* Issue Target Reset to TGT <num> */
861 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
862 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
863 tgt_id
, rdata
->pnode
->nlp_rpi
, rdata
->pnode
->nlp_flag
);
864 ret
= lpfc_sli_issue_iocb_wait(phba
,
865 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
866 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
867 if (ret
!= IOCB_SUCCESS
) {
868 if (ret
== IOCB_TIMEDOUT
)
869 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
870 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
873 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
874 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
875 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
876 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
877 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
880 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
885 lpfc_info(struct Scsi_Host
*host
)
887 struct lpfc_vport
*vport
= (struct lpfc_vport
*) host
->hostdata
;
888 struct lpfc_hba
*phba
= vport
->phba
;
890 static char lpfcinfobuf
[384];
892 memset(lpfcinfobuf
,0,384);
893 if (phba
&& phba
->pcidev
){
894 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
895 len
= strlen(lpfcinfobuf
);
896 snprintf(lpfcinfobuf
+ len
,
898 " on PCI bus %02x device %02x irq %d",
899 phba
->pcidev
->bus
->number
,
902 len
= strlen(lpfcinfobuf
);
904 snprintf(lpfcinfobuf
+ len
,
913 static __inline__
void lpfc_poll_rearm_timer(struct lpfc_hba
* phba
)
915 unsigned long poll_tmo_expires
=
916 (jiffies
+ msecs_to_jiffies(phba
->cfg_poll_tmo
));
918 if (phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq_cnt
)
919 mod_timer(&phba
->fcp_poll_timer
,
923 void lpfc_poll_start_timer(struct lpfc_hba
* phba
)
925 lpfc_poll_rearm_timer(phba
);
928 void lpfc_poll_timeout(unsigned long ptr
)
930 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
932 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
933 lpfc_sli_poll_fcp_ring (phba
);
934 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
935 lpfc_poll_rearm_timer(phba
);
940 lpfc_queuecommand(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
942 struct Scsi_Host
*shost
= cmnd
->device
->host
;
943 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
944 struct lpfc_hba
*phba
= vport
->phba
;
945 struct lpfc_sli
*psli
= &phba
->sli
;
946 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
947 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
948 struct lpfc_scsi_buf
*lpfc_cmd
;
949 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
952 err
= fc_remote_port_chkready(rport
);
955 goto out_fail_command
;
959 * Catch race where our node has transitioned, but the
960 * transport is still transitioning.
963 cmnd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
964 goto out_fail_command
;
966 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
967 if (lpfc_cmd
== NULL
) {
968 lpfc_adjust_queue_depth(phba
);
970 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
971 "0707 driver's buffer pool is empty, "
977 * Store the midlayer's command structure for the completion phase
978 * and complete the command initialization.
980 lpfc_cmd
->pCmd
= cmnd
;
981 lpfc_cmd
->rdata
= rdata
;
982 lpfc_cmd
->timeout
= 0;
983 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
984 cmnd
->scsi_done
= done
;
986 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
988 goto out_host_busy_free_buf
;
990 lpfc_scsi_prep_cmnd(vport
, lpfc_cmd
, ndlp
);
992 err
= lpfc_sli_issue_iocb(phba
, &phba
->sli
.ring
[psli
->fcp_ring
],
993 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
995 goto out_host_busy_free_buf
;
997 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
998 lpfc_sli_poll_fcp_ring(phba
);
999 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1000 lpfc_poll_rearm_timer(phba
);
1005 out_host_busy_free_buf
:
1006 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
1007 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1009 return SCSI_MLQUEUE_HOST_BUSY
;
1017 lpfc_block_error_handler(struct scsi_cmnd
*cmnd
)
1019 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1020 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
1022 spin_lock_irq(shost
->host_lock
);
1023 while (rport
->port_state
== FC_PORTSTATE_BLOCKED
) {
1024 spin_unlock_irq(shost
->host_lock
);
1026 spin_lock_irq(shost
->host_lock
);
1028 spin_unlock_irq(shost
->host_lock
);
1033 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
1035 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1036 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1037 struct lpfc_hba
*phba
= vport
->phba
;
1038 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[phba
->sli
.fcp_ring
];
1039 struct lpfc_iocbq
*iocb
;
1040 struct lpfc_iocbq
*abtsiocb
;
1041 struct lpfc_scsi_buf
*lpfc_cmd
;
1044 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq
);
1046 lpfc_block_error_handler(cmnd
);
1047 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
1051 * If pCmd field of the corresponding lpfc_scsi_buf structure
1052 * points to a different SCSI command, then the driver has
1053 * already completed this command, but the midlayer did not
1054 * see the completion before the eh fired. Just return
1057 iocb
= &lpfc_cmd
->cur_iocbq
;
1058 if (lpfc_cmd
->pCmd
!= cmnd
)
1061 BUG_ON(iocb
->context1
!= lpfc_cmd
);
1063 abtsiocb
= lpfc_sli_get_iocbq(phba
);
1064 if (abtsiocb
== NULL
) {
1070 * The scsi command can not be in txq and it is in flight because the
1071 * pCmd is still pointig at the SCSI command we have to abort. There
1072 * is no need to search the txcmplq. Just send an abort to the FW.
1076 icmd
= &abtsiocb
->iocb
;
1077 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
1078 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
1079 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
1082 icmd
->ulpClass
= cmd
->ulpClass
;
1083 if (lpfc_is_link_up(phba
))
1084 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
1086 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
1088 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
1089 abtsiocb
->vport
= vport
;
1090 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0) == IOCB_ERROR
) {
1091 lpfc_sli_release_iocbq(phba
, abtsiocb
);
1096 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1097 lpfc_sli_poll_fcp_ring (phba
);
1099 lpfc_cmd
->waitq
= &waitq
;
1100 /* Wait for abort to complete */
1101 wait_event_timeout(waitq
,
1102 (lpfc_cmd
->pCmd
!= cmnd
),
1103 (2*vport
->cfg_devloss_tmo
*HZ
));
1105 spin_lock_irq(shost
->host_lock
);
1106 lpfc_cmd
->waitq
= NULL
;
1107 spin_unlock_irq(shost
->host_lock
);
1109 if (lpfc_cmd
->pCmd
== cmnd
) {
1111 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1112 "0748 abort handler timed out waiting "
1113 "for abort to complete: ret %#x, ID %d, "
1114 "LUN %d, snum %#lx\n",
1115 ret
, cmnd
->device
->id
, cmnd
->device
->lun
,
1116 cmnd
->serial_number
);
1120 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
1121 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1122 "LUN %d snum %#lx\n", ret
, cmnd
->device
->id
,
1123 cmnd
->device
->lun
, cmnd
->serial_number
);
1128 lpfc_device_reset_handler(struct scsi_cmnd
*cmnd
)
1130 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1131 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1132 struct lpfc_hba
*phba
= vport
->phba
;
1133 struct lpfc_scsi_buf
*lpfc_cmd
;
1134 struct lpfc_iocbq
*iocbq
, *iocbqrsp
;
1135 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
1136 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
1137 uint32_t cmd_result
= 0, cmd_status
= 0;
1139 int iocb_status
= IOCB_SUCCESS
;
1142 lpfc_block_error_handler(cmnd
);
1145 * If target is not in a MAPPED state, delay the reset until
1146 * target is rediscovered or devloss timeout expires.
1152 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
1153 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1155 rdata
= cmnd
->device
->hostdata
;
1157 (loopcnt
> ((vport
->cfg_devloss_tmo
* 2) + 1))){
1158 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1159 "0721 LUN Reset rport "
1160 "failure: cnt x%x rdata x%p\n",
1164 pnode
= rdata
->pnode
;
1168 if (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
)
1172 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1173 if (lpfc_cmd
== NULL
)
1176 lpfc_cmd
->timeout
= 60;
1177 lpfc_cmd
->rdata
= rdata
;
1179 ret
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, cmnd
->device
->lun
,
1182 goto out_free_scsi_buf
;
1184 iocbq
= &lpfc_cmd
->cur_iocbq
;
1186 /* get a buffer for this IOCB command response */
1187 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
1188 if (iocbqrsp
== NULL
)
1189 goto out_free_scsi_buf
;
1191 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
1192 "0703 Issue target reset to TGT %d LUN %d "
1193 "rpi x%x nlp_flag x%x\n", cmnd
->device
->id
,
1194 cmnd
->device
->lun
, pnode
->nlp_rpi
, pnode
->nlp_flag
);
1195 iocb_status
= lpfc_sli_issue_iocb_wait(phba
,
1196 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1197 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
1199 if (iocb_status
== IOCB_TIMEDOUT
)
1200 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
1202 if (iocb_status
== IOCB_SUCCESS
)
1207 cmd_result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
1208 cmd_status
= iocbqrsp
->iocb
.ulpStatus
;
1210 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
1213 * All outstanding txcmplq I/Os should have been aborted by the device.
1214 * Unfortunately, some targets do not abide by this forcing the driver
1217 cnt
= lpfc_sli_sum_iocb(vport
, cmnd
->device
->id
, cmnd
->device
->lun
,
1220 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1221 cmnd
->device
->id
, cmnd
->device
->lun
,
1225 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1228 > (2 * vport
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1231 cnt
= lpfc_sli_sum_iocb(vport
, cmnd
->device
->id
,
1232 cmnd
->device
->lun
, LPFC_CTX_LUN
);
1236 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1237 "0719 device reset I/O flush failure: "
1243 if (iocb_status
!= IOCB_TIMEDOUT
) {
1244 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1246 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1247 "0713 SCSI layer issued device reset (%d, %d) "
1248 "return x%x status x%x result x%x\n",
1249 cmnd
->device
->id
, cmnd
->device
->lun
, ret
,
1250 cmd_status
, cmd_result
);
1256 lpfc_bus_reset_handler(struct scsi_cmnd
*cmnd
)
1258 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1259 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1260 struct lpfc_hba
*phba
= vport
->phba
;
1261 struct lpfc_nodelist
*ndlp
= NULL
;
1263 int ret
= FAILED
, i
, err_count
= 0;
1265 struct lpfc_scsi_buf
* lpfc_cmd
;
1267 lpfc_block_error_handler(cmnd
);
1269 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1270 if (lpfc_cmd
== NULL
)
1273 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1274 lpfc_cmd
->timeout
= 60;
1277 * Since the driver manages a single bus device, reset all
1278 * targets known to the driver. Should any target reset
1279 * fail, this routine returns failure to the midlayer.
1281 for (i
= 0; i
< LPFC_MAX_TARGET
; i
++) {
1282 /* Search for mapped node by target ID */
1284 spin_lock_irq(shost
->host_lock
);
1285 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
1286 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
&&
1287 i
== ndlp
->nlp_sid
&&
1293 spin_unlock_irq(shost
->host_lock
);
1297 ret
= lpfc_scsi_tgt_reset(lpfc_cmd
, vport
, i
,
1299 ndlp
->rport
->dd_data
);
1300 if (ret
!= SUCCESS
) {
1301 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1302 "0700 Bus Reset on target %d failed\n",
1309 if (ret
!= IOCB_TIMEDOUT
)
1310 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1318 * All outstanding txcmplq I/Os should have been aborted by
1319 * the targets. Unfortunately, some targets do not abide by
1320 * this forcing the driver to double check.
1322 cnt
= lpfc_sli_sum_iocb(vport
, 0, 0, LPFC_CTX_HOST
);
1324 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1325 0, 0, LPFC_CTX_HOST
);
1328 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1331 > (2 * vport
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1334 cnt
= lpfc_sli_sum_iocb(vport
, 0, 0, LPFC_CTX_HOST
);
1338 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1339 "0715 Bus Reset I/O flush failure: "
1340 "cnt x%x left x%x\n", cnt
, i
);
1344 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1345 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret
);
1351 lpfc_slave_alloc(struct scsi_device
*sdev
)
1353 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
1354 struct lpfc_hba
*phba
= vport
->phba
;
1355 struct lpfc_scsi_buf
*scsi_buf
= NULL
;
1356 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
1357 uint32_t total
= 0, i
;
1358 uint32_t num_to_alloc
= 0;
1359 unsigned long flags
;
1361 if (!rport
|| fc_remote_port_chkready(rport
))
1364 sdev
->hostdata
= rport
->dd_data
;
1367 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1368 * available list of scsi buffers. Don't allocate more than the
1369 * HBA limit conveyed to the midlayer via the host structure. The
1370 * formula accounts for the lun_queue_depth + error handlers + 1
1371 * extra. This list of scsi bufs exists for the lifetime of the driver.
1373 total
= phba
->total_scsi_bufs
;
1374 num_to_alloc
= vport
->cfg_lun_queue_depth
+ 2;
1376 /* Allow some exchanges to be available always to complete discovery */
1377 if (total
>= phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
1378 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
1379 "0704 At limitation of %d preallocated "
1380 "command buffers\n", total
);
1382 /* Allow some exchanges to be available always to complete discovery */
1383 } else if (total
+ num_to_alloc
>
1384 phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
1385 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
1386 "0705 Allocation request of %d "
1387 "command buffers will exceed max of %d. "
1388 "Reducing allocation request to %d.\n",
1389 num_to_alloc
, phba
->cfg_hba_queue_depth
,
1390 (phba
->cfg_hba_queue_depth
- total
));
1391 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
1394 for (i
= 0; i
< num_to_alloc
; i
++) {
1395 scsi_buf
= lpfc_new_scsi_buf(vport
);
1397 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
1398 "0706 Failed to allocate "
1399 "command buffer\n");
1403 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, flags
);
1404 phba
->total_scsi_bufs
++;
1405 list_add_tail(&scsi_buf
->list
, &phba
->lpfc_scsi_buf_list
);
1406 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, flags
);
1412 lpfc_slave_configure(struct scsi_device
*sdev
)
1414 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
1415 struct lpfc_hba
*phba
= vport
->phba
;
1416 struct fc_rport
*rport
= starget_to_rport(sdev
->sdev_target
);
1418 if (sdev
->tagged_supported
)
1419 scsi_activate_tcq(sdev
, vport
->cfg_lun_queue_depth
);
1421 scsi_deactivate_tcq(sdev
, vport
->cfg_lun_queue_depth
);
1424 * Initialize the fc transport attributes for the target
1425 * containing this scsi device. Also note that the driver's
1426 * target pointer is stored in the starget_data for the
1427 * driver's sysfs entry point functions.
1429 rport
->dev_loss_tmo
= vport
->cfg_devloss_tmo
;
1431 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
1432 lpfc_sli_poll_fcp_ring(phba
);
1433 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1434 lpfc_poll_rearm_timer(phba
);
1441 lpfc_slave_destroy(struct scsi_device
*sdev
)
1443 sdev
->hostdata
= NULL
;
1448 struct scsi_host_template lpfc_template
= {
1449 .module
= THIS_MODULE
,
1450 .name
= LPFC_DRIVER_NAME
,
1452 .queuecommand
= lpfc_queuecommand
,
1453 .eh_abort_handler
= lpfc_abort_handler
,
1454 .eh_device_reset_handler
= lpfc_device_reset_handler
,
1455 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
1456 .slave_alloc
= lpfc_slave_alloc
,
1457 .slave_configure
= lpfc_slave_configure
,
1458 .slave_destroy
= lpfc_slave_destroy
,
1459 .scan_finished
= lpfc_scan_finished
,
1461 .sg_tablesize
= LPFC_DEFAULT_SG_SEG_CNT
,
1462 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1463 .use_clustering
= ENABLE_CLUSTERING
,
1464 .shost_attrs
= lpfc_hba_attrs
,
1465 .max_sectors
= 0xFFFF,
1468 struct scsi_host_template lpfc_vport_template
= {
1469 .module
= THIS_MODULE
,
1470 .name
= LPFC_DRIVER_NAME
,
1472 .queuecommand
= lpfc_queuecommand
,
1473 .eh_abort_handler
= lpfc_abort_handler
,
1474 .eh_device_reset_handler
= lpfc_device_reset_handler
,
1475 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
1476 .slave_alloc
= lpfc_slave_alloc
,
1477 .slave_configure
= lpfc_slave_configure
,
1478 .slave_destroy
= lpfc_slave_destroy
,
1479 .scan_finished
= lpfc_scan_finished
,
1481 .sg_tablesize
= LPFC_DEFAULT_SG_SEG_CNT
,
1482 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1483 .use_clustering
= ENABLE_CLUSTERING
,
1484 .shost_attrs
= lpfc_vport_attrs
,
1485 .max_sectors
= 0xFFFF,