1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_tcq.h>
29 #include <scsi/scsi_transport_fc.h>
31 #include "lpfc_version.h"
34 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
40 #define LPFC_RESET_WAIT 2
41 #define LPFC_ABORT_WAIT 2
45 * This routine allocates a scsi buffer, which contains all the necessary
46 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
47 * contains information to build the IOCB. The DMAable region contains
48 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
49 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
50 * and the BPL BDE is setup in the IOCB.
52 static struct lpfc_scsi_buf
*
53 lpfc_get_scsi_buf(struct lpfc_hba
* phba
)
55 struct lpfc_scsi_buf
*psb
;
56 struct ulp_bde64
*bpl
;
60 psb
= kmalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
63 memset(psb
, 0, sizeof (struct lpfc_scsi_buf
));
67 * Get memory from the pci pool to map the virt space to pci bus space
68 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
69 * struct fcp_rsp and the number of bde's necessary to support the
72 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
, GFP_KERNEL
,
79 /* Initialize virtual ptrs to dma_buf region. */
80 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
82 psb
->fcp_cmnd
= psb
->data
;
83 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
84 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
85 sizeof(struct fcp_rsp
);
87 /* Initialize local short-hand pointers. */
89 pdma_phys
= psb
->dma_handle
;
92 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
93 * list bdes. Initialize the first two and leave the rest for
96 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
97 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
98 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_cmnd
);
99 bpl
->tus
.f
.bdeFlags
= BUFF_USE_CMND
;
100 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
103 /* Setup the physical region for the FCP RSP */
104 pdma_phys
+= sizeof (struct fcp_cmnd
);
105 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
106 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
107 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_rsp
);
108 bpl
->tus
.f
.bdeFlags
= (BUFF_USE_CMND
| BUFF_USE_RCV
);
109 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
112 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
113 * initialize it with all known data now.
115 pdma_phys
+= (sizeof (struct fcp_rsp
));
116 iocb
= &psb
->cur_iocbq
.iocb
;
117 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
118 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys
);
119 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys
);
120 iocb
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
121 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDL
;
122 iocb
->ulpBdeCount
= 1;
123 iocb
->ulpClass
= CLASS3
;
129 lpfc_free_scsi_buf(struct lpfc_scsi_buf
* psb
)
131 struct lpfc_hba
*phba
= psb
->scsi_hba
;
134 * There are only two special cases to consider. (1) the scsi command
135 * requested scatter-gather usage or (2) the scsi command allocated
136 * a request buffer, but did not request use_sg. There is a third
137 * case, but it does not require resource deallocation.
139 if ((psb
->seg_cnt
> 0) && (psb
->pCmd
->use_sg
)) {
140 dma_unmap_sg(&phba
->pcidev
->dev
, psb
->pCmd
->request_buffer
,
141 psb
->seg_cnt
, psb
->pCmd
->sc_data_direction
);
143 if ((psb
->nonsg_phys
) && (psb
->pCmd
->request_bufflen
)) {
144 dma_unmap_single(&phba
->pcidev
->dev
, psb
->nonsg_phys
,
145 psb
->pCmd
->request_bufflen
,
146 psb
->pCmd
->sc_data_direction
);
150 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
154 lpfc_scsi_prep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
)
156 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
157 struct scatterlist
*sgel
= NULL
;
158 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
159 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
160 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
162 uint32_t i
, num_bde
= 0;
163 int datadir
= scsi_cmnd
->sc_data_direction
;
167 * There are three possibilities here - use scatter-gather segment, use
168 * the single mapping, or neither. Start the lpfc command prep by
169 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
173 if (scsi_cmnd
->use_sg
) {
175 * The driver stores the segment count returned from pci_map_sg
176 * because this a count of dma-mappings used to map the use_sg
177 * pages. They are not guaranteed to be the same for those
178 * architectures that implement an IOMMU.
180 sgel
= (struct scatterlist
*)scsi_cmnd
->request_buffer
;
181 lpfc_cmd
->seg_cnt
= dma_map_sg(&phba
->pcidev
->dev
, sgel
,
182 scsi_cmnd
->use_sg
, datadir
);
183 if (lpfc_cmd
->seg_cnt
== 0)
186 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
187 printk(KERN_ERR
"%s: Too many sg segments from "
188 "dma_map_sg. Config %d, seg_cnt %d",
189 __FUNCTION__
, phba
->cfg_sg_seg_cnt
,
191 dma_unmap_sg(&phba
->pcidev
->dev
, sgel
,
192 lpfc_cmd
->seg_cnt
, datadir
);
197 * The driver established a maximum scatter-gather segment count
198 * during probe that limits the number of sg elements in any
199 * single scsi command. Just run through the seg_cnt and format
202 for (i
= 0; i
< lpfc_cmd
->seg_cnt
; i
++) {
203 physaddr
= sg_dma_address(sgel
);
204 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
205 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
206 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
207 if (datadir
== DMA_TO_DEVICE
)
208 bpl
->tus
.f
.bdeFlags
= 0;
210 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
211 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
216 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
217 physaddr
= dma_map_single(&phba
->pcidev
->dev
,
218 scsi_cmnd
->request_buffer
,
219 scsi_cmnd
->request_bufflen
,
221 dma_error
= dma_mapping_error(physaddr
);
223 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
224 "%d:0718 Unable to dma_map_single "
225 "request_buffer: x%x\n",
226 phba
->brd_no
, dma_error
);
230 lpfc_cmd
->nonsg_phys
= physaddr
;
231 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
232 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
233 bpl
->tus
.f
.bdeSize
= scsi_cmnd
->request_bufflen
;
234 if (datadir
== DMA_TO_DEVICE
)
235 bpl
->tus
.f
.bdeFlags
= 0;
237 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
238 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
244 * Finish initializing those IOCB fields that are dependent on the
245 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
246 * reinitialized since all iocb memory resources are used many times
247 * for transmit, receive, and continuation bpl's.
249 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
250 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+=
251 (num_bde
* sizeof (struct ulp_bde64
));
252 iocb_cmd
->ulpBdeCount
= 1;
254 fcp_cmnd
->fcpDl
= be32_to_cpu(scsi_cmnd
->request_bufflen
);
259 lpfc_handle_fcp_err(struct lpfc_scsi_buf
*lpfc_cmd
)
261 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
262 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
263 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
264 struct lpfc_hba
*phba
= lpfc_cmd
->scsi_hba
;
265 uint32_t fcpi_parm
= lpfc_cmd
->cur_iocbq
.iocb
.un
.fcpi
.fcpi_parm
;
266 uint32_t resp_info
= fcprsp
->rspStatus2
;
267 uint32_t scsi_status
= fcprsp
->rspStatus3
;
268 uint32_t host_status
= DID_OK
;
272 * If this is a task management command, there is no
273 * scsi packet associated with this lpfc_cmd. The driver
276 if (fcpcmd
->fcpCntl2
) {
281 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
282 "%d:0730 FCP command failed: RSP "
283 "Data: x%x x%x x%x x%x x%x x%x\n",
284 phba
->brd_no
, resp_info
, scsi_status
,
285 be32_to_cpu(fcprsp
->rspResId
),
286 be32_to_cpu(fcprsp
->rspSnsLen
),
287 be32_to_cpu(fcprsp
->rspRspLen
),
290 if (resp_info
& RSP_LEN_VALID
) {
291 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
292 if ((rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) ||
293 (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
)) {
294 host_status
= DID_ERROR
;
299 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
300 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
301 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
302 snslen
= SCSI_SENSE_BUFFERSIZE
;
304 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
308 if (resp_info
& RESID_UNDER
) {
309 cmnd
->resid
= be32_to_cpu(fcprsp
->rspResId
);
311 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
312 "%d:0716 FCP Read Underrun, expected %d, "
313 "residual %d Data: x%x x%x x%x\n", phba
->brd_no
,
314 be32_to_cpu(fcpcmd
->fcpDl
), cmnd
->resid
,
315 fcpi_parm
, cmnd
->cmnd
[0], cmnd
->underflow
);
318 * The cmnd->underflow is the minimum number of bytes that must
319 * be transfered for this command. Provided a sense condition
320 * is not present, make sure the actual amount transferred is at
321 * least the underflow value or fail.
323 if (!(resp_info
& SNS_LEN_VALID
) &&
324 (scsi_status
== SAM_STAT_GOOD
) &&
325 (cmnd
->request_bufflen
- cmnd
->resid
) < cmnd
->underflow
) {
326 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
327 "%d:0717 FCP command x%x residual "
328 "underrun converted to error "
329 "Data: x%x x%x x%x\n", phba
->brd_no
,
330 cmnd
->cmnd
[0], cmnd
->request_bufflen
,
331 cmnd
->resid
, cmnd
->underflow
);
333 host_status
= DID_ERROR
;
335 } else if (resp_info
& RESID_OVER
) {
336 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
337 "%d:0720 FCP command x%x residual "
338 "overrun error. Data: x%x x%x \n",
339 phba
->brd_no
, cmnd
->cmnd
[0],
340 cmnd
->request_bufflen
, cmnd
->resid
);
341 host_status
= DID_ERROR
;
344 * Check SLI validation that all the transfer was actually done
345 * (fcpi_parm should be zero). Apply check only to reads.
347 } else if ((scsi_status
== SAM_STAT_GOOD
) && fcpi_parm
&&
348 (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
349 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
350 "%d:0734 FCP Read Check Error Data: "
351 "x%x x%x x%x x%x\n", phba
->brd_no
,
352 be32_to_cpu(fcpcmd
->fcpDl
),
353 be32_to_cpu(fcprsp
->rspResId
),
354 fcpi_parm
, cmnd
->cmnd
[0]);
355 host_status
= DID_ERROR
;
356 cmnd
->resid
= cmnd
->request_bufflen
;
360 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
364 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
365 struct lpfc_iocbq
*pIocbOut
)
367 struct lpfc_scsi_buf
*lpfc_cmd
=
368 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
369 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
370 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
371 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
374 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
375 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
377 if (lpfc_cmd
->status
) {
378 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
379 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
380 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
381 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
382 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
384 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
385 "%d:0729 FCP cmd x%x failed <%d/%d> status: "
386 "x%x result: x%x Data: x%x x%x\n",
387 phba
->brd_no
, cmd
->cmnd
[0], cmd
->device
->id
,
388 cmd
->device
->lun
, lpfc_cmd
->status
,
389 lpfc_cmd
->result
, pIocbOut
->iocb
.ulpContext
,
390 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
392 switch (lpfc_cmd
->status
) {
393 case IOSTAT_FCP_RSP_ERROR
:
394 /* Call FCP RSP handler to determine result */
395 lpfc_handle_fcp_err(lpfc_cmd
);
397 case IOSTAT_NPORT_BSY
:
398 case IOSTAT_FABRIC_BSY
:
399 cmd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
402 cmd
->result
= ScsiResult(DID_ERROR
, 0);
407 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
)
408 cmd
->result
= ScsiResult(DID_BUS_BUSY
,
412 cmd
->result
= ScsiResult(DID_NO_CONNECT
, 0);
415 cmd
->result
= ScsiResult(DID_OK
, 0);
418 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
419 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
421 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
422 "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
423 "SNS x%x x%x Data: x%x x%x\n",
424 phba
->brd_no
, cmd
->device
->id
,
425 cmd
->device
->lun
, cmd
, cmd
->result
,
426 *lp
, *(lp
+ 3), cmd
->retries
, cmd
->resid
);
429 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
430 lpfc_free_scsi_buf(lpfc_cmd
);
431 cmd
->host_scribble
= NULL
;
432 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
438 lpfc_scsi_prep_cmnd(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
,
439 struct lpfc_nodelist
*pnode
)
441 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
442 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
443 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
444 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
445 int datadir
= scsi_cmnd
->sc_data_direction
;
447 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
448 /* clear task management bits */
449 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
451 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
452 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
454 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
456 if (scsi_cmnd
->device
->tagged_supported
) {
457 switch (scsi_cmnd
->tag
) {
458 case HEAD_OF_QUEUE_TAG
:
459 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
461 case ORDERED_QUEUE_TAG
:
462 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
465 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
469 fcp_cmnd
->fcpCntl1
= 0;
472 * There are three possibilities here - use scatter-gather segment, use
473 * the single mapping, or neither. Start the lpfc command prep by
474 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
477 if (scsi_cmnd
->use_sg
) {
478 if (datadir
== DMA_TO_DEVICE
) {
479 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
480 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
482 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
483 phba
->fc4OutputRequests
++;
485 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
486 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
487 iocb_cmd
->un
.fcpi
.fcpi_parm
=
488 scsi_cmnd
->request_bufflen
;
489 fcp_cmnd
->fcpCntl3
= READ_DATA
;
490 phba
->fc4InputRequests
++;
492 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
493 if (datadir
== DMA_TO_DEVICE
) {
494 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
495 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
497 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
498 phba
->fc4OutputRequests
++;
500 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
501 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
502 iocb_cmd
->un
.fcpi
.fcpi_parm
=
503 scsi_cmnd
->request_bufflen
;
504 fcp_cmnd
->fcpCntl3
= READ_DATA
;
505 phba
->fc4InputRequests
++;
508 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
509 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
511 fcp_cmnd
->fcpCntl3
= 0;
512 phba
->fc4ControlRequests
++;
516 * Finish initializing those IOCB fields that are independent
517 * of the scsi_cmnd request_buffer
519 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
520 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
521 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
523 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
524 piocbq
->context1
= lpfc_cmd
;
525 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
526 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
530 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba
*phba
,
531 struct lpfc_scsi_buf
*lpfc_cmd
,
532 uint8_t task_mgmt_cmd
)
534 struct lpfc_sli
*psli
;
535 struct lpfc_iocbq
*piocbq
;
537 struct fcp_cmnd
*fcp_cmnd
;
538 struct scsi_device
*scsi_dev
= lpfc_cmd
->pCmd
->device
;
539 struct lpfc_rport_data
*rdata
= scsi_dev
->hostdata
;
540 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
542 if ((ndlp
== 0) || (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
547 piocbq
= &(lpfc_cmd
->cur_iocbq
);
548 piocb
= &piocbq
->iocb
;
550 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
551 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
552 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
553 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
555 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
557 piocb
->ulpContext
= ndlp
->nlp_rpi
;
558 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
559 piocb
->ulpFCP2Rcvy
= 1;
561 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
563 /* ulpTimeout is only one byte */
564 if (lpfc_cmd
->timeout
> 0xff) {
566 * Do not timeout the command at the firmware level.
567 * The driver will provide the timeout mechanism.
569 piocb
->ulpTimeout
= 0;
571 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
574 lpfc_cmd
->rdata
= rdata
;
576 switch (task_mgmt_cmd
) {
578 /* Issue LUN Reset to TGT <num> LUN <num> */
579 lpfc_printf_log(phba
,
582 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
585 scsi_dev
->id
, scsi_dev
->lun
,
586 ndlp
->nlp_rpi
, ndlp
->nlp_flag
);
589 case FCP_ABORT_TASK_SET
:
590 /* Issue Abort Task Set to TGT <num> LUN <num> */
591 lpfc_printf_log(phba
,
594 "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
597 scsi_dev
->id
, scsi_dev
->lun
,
598 ndlp
->nlp_rpi
, ndlp
->nlp_flag
);
601 case FCP_TARGET_RESET
:
602 /* Issue Target Reset to TGT <num> */
603 lpfc_printf_log(phba
,
606 "%d:0702 Issue Target Reset to TGT %d "
609 scsi_dev
->id
, ndlp
->nlp_rpi
,
618 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf
* lpfc_cmd
, struct lpfc_hba
* phba
)
620 struct lpfc_iocbq
*iocbq
;
621 struct lpfc_iocbq
*iocbqrsp
= NULL
;
622 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
625 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, FCP_TARGET_RESET
);
629 lpfc_cmd
->scsi_hba
= phba
;
630 iocbq
= &lpfc_cmd
->cur_iocbq
;
631 list_remove_head(lpfc_iocb_list
, iocbqrsp
, struct lpfc_iocbq
, list
);
634 memset(iocbqrsp
, 0, sizeof (struct lpfc_iocbq
));
636 iocbq
->iocb_flag
|= LPFC_IO_POLL
;
637 ret
= lpfc_sli_issue_iocb_wait_high_priority(phba
,
638 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
639 iocbq
, SLI_IOCB_HIGH_PRIORITY
,
642 if (ret
!= IOCB_SUCCESS
) {
643 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
647 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
648 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
649 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
650 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
651 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
655 * All outstanding txcmplq I/Os should have been aborted by the target.
656 * Unfortunately, some targets do not abide by this forcing the driver
659 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
660 lpfc_cmd
->pCmd
->device
->id
,
661 lpfc_cmd
->pCmd
->device
->lun
, 0, LPFC_CTX_TGT
);
663 /* Return response IOCB to free list. */
664 list_add_tail(&iocbqrsp
->list
, lpfc_iocb_list
);
669 lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
670 struct lpfc_iocbq
*pIocbOut
)
673 struct lpfc_scsi_buf
*lpfc_cmd
=
674 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
676 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
677 lpfc_free_scsi_buf(lpfc_cmd
);
678 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
682 lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba
*phba
,
683 struct lpfc_iocbq
*pIocbIn
,
684 struct lpfc_iocbq
*pIocbOut
)
686 struct scsi_cmnd
*ml_cmd
=
687 ((struct lpfc_scsi_buf
*) pIocbIn
->context1
)->pCmd
;
689 lpfc_scsi_cmd_iocb_cleanup (phba
, pIocbIn
, pIocbOut
);
690 ml_cmd
->host_scribble
= NULL
;
694 lpfc_info(struct Scsi_Host
*host
)
696 struct lpfc_hba
*phba
= (struct lpfc_hba
*) host
->hostdata
[0];
698 static char lpfcinfobuf
[384];
700 memset(lpfcinfobuf
,0,384);
701 if (phba
&& phba
->pcidev
){
702 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
703 len
= strlen(lpfcinfobuf
);
704 snprintf(lpfcinfobuf
+ len
,
706 " on PCI bus %02x device %02x irq %d",
707 phba
->pcidev
->bus
->number
,
710 len
= strlen(lpfcinfobuf
);
712 snprintf(lpfcinfobuf
+ len
,
722 lpfc_queuecommand(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
724 struct lpfc_hba
*phba
=
725 (struct lpfc_hba
*) cmnd
->device
->host
->hostdata
[0];
726 struct lpfc_sli
*psli
= &phba
->sli
;
727 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
728 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
729 struct lpfc_scsi_buf
*lpfc_cmd
= NULL
;
730 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
734 * The target pointer is guaranteed not to be NULL because the driver
735 * only clears the device->hostdata field in lpfc_slave_destroy. This
736 * approach guarantees no further IO calls on this target.
739 cmnd
->result
= ScsiResult(DID_NO_CONNECT
, 0);
740 goto out_fail_command
;
744 * A Fibre Channel target is present and functioning only when the node
745 * state is MAPPED. Any other state is a failure.
747 if (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
748 if ((ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
) ||
749 (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)) {
750 cmnd
->result
= ScsiResult(DID_NO_CONNECT
, 0);
751 goto out_fail_command
;
753 else if (ndlp
->nlp_state
== NLP_STE_NPR_NODE
) {
754 cmnd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
755 goto out_fail_command
;
758 * The device is most likely recovered and the driver
759 * needs a bit more time to finish. Ask the midlayer
765 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
766 if (lpfc_cmd
== NULL
) {
767 printk(KERN_WARNING
"%s: No buffer available - list empty, "
768 "total count %d\n", __FUNCTION__
, phba
->total_scsi_bufs
);
773 * Store the midlayer's command structure for the completion phase
774 * and complete the command initialization.
776 lpfc_cmd
->pCmd
= cmnd
;
777 lpfc_cmd
->rdata
= rdata
;
778 lpfc_cmd
->timeout
= 0;
779 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
780 cmnd
->scsi_done
= done
;
782 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
784 goto out_host_busy_free_buf
;
786 lpfc_scsi_prep_cmnd(phba
, lpfc_cmd
, ndlp
);
788 err
= lpfc_sli_issue_iocb(phba
, &phba
->sli
.ring
[psli
->fcp_ring
],
789 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
791 goto out_host_busy_free_buf
;
794 out_host_busy_free_buf
:
795 lpfc_free_scsi_buf(lpfc_cmd
);
796 cmnd
->host_scribble
= NULL
;
798 return SCSI_MLQUEUE_HOST_BUSY
;
806 __lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
808 struct lpfc_hba
*phba
=
809 (struct lpfc_hba
*)cmnd
->device
->host
->hostdata
[0];
810 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[phba
->sli
.fcp_ring
];
811 struct lpfc_iocbq
*iocb
, *next_iocb
;
812 struct lpfc_iocbq
*abtsiocb
= NULL
;
813 struct lpfc_scsi_buf
*lpfc_cmd
;
814 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
817 unsigned int id
, lun
;
818 unsigned int loop_count
= 0;
819 int ret
= IOCB_SUCCESS
;
822 * If the host_scribble data area is NULL, then the driver has already
823 * completed this command, but the midlayer did not see the completion
824 * before the eh fired. Just return SUCCESS.
826 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
830 /* save these now since lpfc_cmd can be freed */
831 id
= lpfc_cmd
->pCmd
->device
->id
;
832 lun
= lpfc_cmd
->pCmd
->device
->lun
;
833 snum
= lpfc_cmd
->pCmd
->serial_number
;
835 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
837 if (iocb
->context1
!= lpfc_cmd
)
840 list_del_init(&iocb
->list
);
842 if (!iocb
->iocb_cmpl
) {
843 list_add_tail(&iocb
->list
, lpfc_iocb_list
);
846 cmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
847 cmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
848 lpfc_scsi_cmd_iocb_cmpl_aborted(phba
, iocb
, iocb
);
854 list_remove_head(lpfc_iocb_list
, abtsiocb
, struct lpfc_iocbq
, list
);
855 if (abtsiocb
== NULL
)
858 memset(abtsiocb
, 0, sizeof (struct lpfc_iocbq
));
861 * The scsi command was not in the txq. Check the txcmplq and if it is
862 * found, send an abort to the FW.
864 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
865 if (iocb
->context1
!= lpfc_cmd
)
868 iocb
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl_aborted
;
870 icmd
= &abtsiocb
->iocb
;
871 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
872 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
873 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
876 icmd
->ulpClass
= cmd
->ulpClass
;
877 if (phba
->hba_state
>= LPFC_LINK_UP
)
878 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
880 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
882 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
883 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0) ==
885 list_add_tail(&abtsiocb
->list
, lpfc_iocb_list
);
890 /* Wait for abort to complete */
891 while (cmnd
->host_scribble
)
893 spin_unlock_irq(phba
->host
->host_lock
);
894 set_current_state(TASK_UNINTERRUPTIBLE
);
895 schedule_timeout(LPFC_ABORT_WAIT
*HZ
);
896 spin_lock_irq(phba
->host
->host_lock
);
898 > (2 * phba
->cfg_nodev_tmo
)/LPFC_ABORT_WAIT
)
902 if(cmnd
->host_scribble
) {
903 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
904 "%d:0748 abort handler timed "
905 "out waiting for abort to "
907 "x%x x%x x%x x%lx\n",
908 phba
->brd_no
, ret
, id
, lun
, snum
);
909 cmnd
->host_scribble
= NULL
;
910 iocb
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cleanup
;
918 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
919 "%d:0749 SCSI layer issued abort device "
920 "Data: x%x x%x x%x x%lx\n",
921 phba
->brd_no
, ret
, id
, lun
, snum
);
923 return ret
== IOCB_SUCCESS
? SUCCESS
: FAILED
;
927 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
930 spin_lock_irq(cmnd
->device
->host
->host_lock
);
931 rc
= __lpfc_abort_handler(cmnd
);
932 spin_unlock_irq(cmnd
->device
->host
->host_lock
);
937 __lpfc_reset_lun_handler(struct scsi_cmnd
*cmnd
)
939 struct Scsi_Host
*shost
= cmnd
->device
->host
;
940 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
[0];
941 struct lpfc_sli
*psli
= &phba
->sli
;
942 struct lpfc_scsi_buf
*lpfc_cmd
= NULL
;
943 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
944 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
945 struct lpfc_iocbq
*iocbq
, *iocbqrsp
= NULL
;
946 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
947 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
952 * If target is not in a MAPPED state, delay the reset until
953 * target is rediscovered or nodev timeout expires.
959 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
960 spin_unlock_irq(phba
->host
->host_lock
);
961 set_current_state(TASK_UNINTERRUPTIBLE
);
962 schedule_timeout( HZ
/2);
963 spin_lock_irq(phba
->host
->host_lock
);
965 if ((pnode
) && (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
))
969 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
970 if (lpfc_cmd
== NULL
)
973 lpfc_cmd
->pCmd
= cmnd
;
974 lpfc_cmd
->timeout
= 60;
975 lpfc_cmd
->scsi_hba
= phba
;
977 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, FCP_LUN_RESET
);
979 goto out_free_scsi_buf
;
981 iocbq
= &lpfc_cmd
->cur_iocbq
;
983 /* get a buffer for this IOCB command response */
984 list_remove_head(lpfc_iocb_list
, iocbqrsp
, struct lpfc_iocbq
, list
);
985 if (iocbqrsp
== NULL
)
986 goto out_free_scsi_buf
;
988 memset(iocbqrsp
, 0, sizeof (struct lpfc_iocbq
));
990 iocbq
->iocb_flag
|= LPFC_IO_POLL
;
991 iocbq
->iocb_cmpl
= lpfc_sli_wake_iocb_high_priority
;
993 ret
= lpfc_sli_issue_iocb_wait_high_priority(phba
,
994 &phba
->sli
.ring
[psli
->fcp_ring
],
995 iocbq
, 0, iocbqrsp
, 60);
996 if (ret
== IOCB_SUCCESS
)
999 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
1000 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
1001 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
)
1002 if (lpfc_cmd
->result
& IOERR_DRVR_MASK
)
1003 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
1006 * All outstanding txcmplq I/Os should have been aborted by the target.
1007 * Unfortunately, some targets do not abide by this forcing the driver
1010 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1011 cmnd
->device
->id
, cmnd
->device
->lun
, 0,
1015 while((cnt
= lpfc_sli_sum_iocb(phba
,
1016 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1017 cmnd
->device
->id
, cmnd
->device
->lun
,
1019 spin_unlock_irq(phba
->host
->host_lock
);
1020 set_current_state(TASK_UNINTERRUPTIBLE
);
1021 schedule_timeout(LPFC_RESET_WAIT
*HZ
);
1022 spin_lock_irq(phba
->host
->host_lock
);
1025 > (2 * phba
->cfg_nodev_tmo
)/LPFC_RESET_WAIT
)
1030 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1031 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
1035 list_add_tail(&iocbqrsp
->list
, lpfc_iocb_list
);
1038 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1039 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1040 "Data: x%x x%x x%x\n",
1041 phba
->brd_no
, lpfc_cmd
->pCmd
->device
->id
,
1042 lpfc_cmd
->pCmd
->device
->lun
, ret
, lpfc_cmd
->status
,
1044 lpfc_free_scsi_buf(lpfc_cmd
);
1050 lpfc_reset_lun_handler(struct scsi_cmnd
*cmnd
)
1053 spin_lock_irq(cmnd
->device
->host
->host_lock
);
1054 rc
= __lpfc_reset_lun_handler(cmnd
);
1055 spin_unlock_irq(cmnd
->device
->host
->host_lock
);
1060 * Note: midlayer calls this function with the host_lock held
1063 __lpfc_reset_bus_handler(struct scsi_cmnd
*cmnd
)
1065 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1066 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
[0];
1067 struct lpfc_nodelist
*ndlp
= NULL
;
1069 int ret
= FAILED
, i
, err_count
= 0;
1071 unsigned int midlayer_id
= 0;
1072 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
1073 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
1075 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
1076 if (lpfc_cmd
== NULL
)
1079 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1080 lpfc_cmd
->timeout
= 60;
1081 lpfc_cmd
->pCmd
= cmnd
;
1082 lpfc_cmd
->scsi_hba
= phba
;
1085 * Since the driver manages a single bus device, reset all
1086 * targets known to the driver. Should any target reset
1087 * fail, this routine returns failure to the midlayer.
1089 midlayer_id
= cmnd
->device
->id
;
1090 for (i
= 0; i
< MAX_FCP_TARGET
; i
++) {
1091 /* Search the mapped list for this target ID */
1093 list_for_each_entry(ndlp
, &phba
->fc_nlpmap_list
, nlp_listp
) {
1094 if ((i
== ndlp
->nlp_sid
) && ndlp
->rport
) {
1102 lpfc_cmd
->pCmd
->device
->id
= i
;
1103 lpfc_cmd
->pCmd
->device
->hostdata
= ndlp
->rport
->dd_data
;
1104 ret
= lpfc_scsi_tgt_reset(lpfc_cmd
, phba
);
1105 if (ret
!= SUCCESS
) {
1106 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1107 "%d:0713 Bus Reset on target %d failed\n",
1113 cmnd
->device
->id
= midlayer_id
;
1115 while((cnt
= lpfc_sli_sum_iocb(phba
,
1116 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1117 0, 0, LPFC_CTX_HOST
))) {
1118 spin_unlock_irq(phba
->host
->host_lock
);
1119 set_current_state(TASK_UNINTERRUPTIBLE
);
1120 schedule_timeout(LPFC_RESET_WAIT
*HZ
);
1121 spin_lock_irq(phba
->host
->host_lock
);
1124 > (2 * phba
->cfg_nodev_tmo
)/LPFC_RESET_WAIT
)
1129 /* flush all outstanding commands on the host */
1130 i
= lpfc_sli_abort_iocb(phba
,
1131 &phba
->sli
.ring
[phba
->sli
.fcp_ring
], 0, 0, 0,
1134 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1135 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
1136 phba
->brd_no
, cnt
, i
);
1142 lpfc_free_scsi_buf(lpfc_cmd
);
1143 lpfc_printf_log(phba
,
1146 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1153 lpfc_reset_bus_handler(struct scsi_cmnd
*cmnd
)
1156 spin_lock_irq(cmnd
->device
->host
->host_lock
);
1157 rc
= __lpfc_reset_bus_handler(cmnd
);
1158 spin_unlock_irq(cmnd
->device
->host
->host_lock
);
1163 lpfc_slave_alloc(struct scsi_device
*sdev
)
1165 struct lpfc_hba
*phba
= (struct lpfc_hba
*)sdev
->host
->hostdata
[0];
1166 struct lpfc_nodelist
*ndlp
= NULL
;
1168 struct lpfc_scsi_buf
*scsi_buf
= NULL
;
1169 uint32_t total
= 0, i
;
1170 uint32_t num_to_alloc
= 0;
1171 unsigned long flags
;
1172 struct list_head
*listp
;
1173 struct list_head
*node_list
[6];
1176 * Store the target pointer in the scsi_device hostdata pointer provided
1177 * the driver has already discovered the target id.
1180 /* Search the nlp lists other than unmap_list for this target ID */
1181 node_list
[0] = &phba
->fc_npr_list
;
1182 node_list
[1] = &phba
->fc_nlpmap_list
;
1183 node_list
[2] = &phba
->fc_prli_list
;
1184 node_list
[3] = &phba
->fc_reglogin_list
;
1185 node_list
[4] = &phba
->fc_adisc_list
;
1186 node_list
[5] = &phba
->fc_plogi_list
;
1188 for (i
= 0; i
< 6 && !match
; i
++) {
1189 listp
= node_list
[i
];
1190 if (list_empty(listp
))
1192 list_for_each_entry(ndlp
, listp
, nlp_listp
) {
1193 if ((sdev
->id
== ndlp
->nlp_sid
) && ndlp
->rport
) {
1203 sdev
->hostdata
= ndlp
->rport
->dd_data
;
1206 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1207 * available list of scsi buffers. Don't allocate more than the
1208 * HBA limit conveyed to the midlayer via the host structure. Note
1209 * that this list of scsi bufs exists for the lifetime of the driver.
1211 total
= phba
->total_scsi_bufs
;
1212 num_to_alloc
= LPFC_CMD_PER_LUN
;
1213 if (total
>= phba
->cfg_hba_queue_depth
) {
1214 printk(KERN_WARNING
"%s, At config limitation of "
1215 "%d allocated scsi_bufs\n", __FUNCTION__
, total
);
1217 } else if (total
+ num_to_alloc
> phba
->cfg_hba_queue_depth
) {
1218 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
1221 for (i
= 0; i
< num_to_alloc
; i
++) {
1222 scsi_buf
= lpfc_get_scsi_buf(phba
);
1224 printk(KERN_ERR
"%s, failed to allocate "
1225 "scsi_buf\n", __FUNCTION__
);
1229 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
1230 phba
->total_scsi_bufs
++;
1231 list_add_tail(&scsi_buf
->list
, &phba
->lpfc_scsi_buf_list
);
1232 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
1238 lpfc_slave_configure(struct scsi_device
*sdev
)
1240 struct lpfc_hba
*phba
= (struct lpfc_hba
*) sdev
->host
->hostdata
[0];
1241 struct fc_rport
*rport
= starget_to_rport(sdev
->sdev_target
);
1243 if (sdev
->tagged_supported
)
1244 scsi_activate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1246 scsi_deactivate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1249 * Initialize the fc transport attributes for the target
1250 * containing this scsi device. Also note that the driver's
1251 * target pointer is stored in the starget_data for the
1252 * driver's sysfs entry point functions.
1254 rport
->dev_loss_tmo
= phba
->cfg_nodev_tmo
+ 5;
1260 lpfc_slave_destroy(struct scsi_device
*sdev
)
1262 sdev
->hostdata
= NULL
;
1266 struct scsi_host_template lpfc_template
= {
1267 .module
= THIS_MODULE
,
1268 .name
= LPFC_DRIVER_NAME
,
1270 .queuecommand
= lpfc_queuecommand
,
1271 .eh_abort_handler
= lpfc_abort_handler
,
1272 .eh_device_reset_handler
= lpfc_reset_lun_handler
,
1273 .eh_bus_reset_handler
= lpfc_reset_bus_handler
,
1274 .slave_alloc
= lpfc_slave_alloc
,
1275 .slave_configure
= lpfc_slave_configure
,
1276 .slave_destroy
= lpfc_slave_destroy
,
1278 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1279 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1280 .use_clustering
= ENABLE_CLUSTERING
,
1281 .shost_attrs
= lpfc_host_attrs
,
1282 .max_sectors
= 0xFFFF,