1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
22 * $Id: lpfc_scsi.c 1.37 2005/04/13 14:27:09EDT sf_support Exp $
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi_transport_fc.h>
34 #include "lpfc_version.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
43 #define LPFC_RESET_WAIT 2
44 #define LPFC_ABORT_WAIT 2
46 static inline void lpfc_put_lun(struct fcp_cmnd
*fcmd
, unsigned int lun
)
49 fcmd
->fcpLunMsl
= swab16((uint16_t)lun
);
53 * This routine allocates a scsi buffer, which contains all the necessary
54 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
55 * contains information to build the IOCB. The DMAable region contains
56 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
57 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
58 * and the BPL BDE is setup in the IOCB.
60 static struct lpfc_scsi_buf
*
61 lpfc_get_scsi_buf(struct lpfc_hba
* phba
)
63 struct lpfc_scsi_buf
*psb
;
64 struct ulp_bde64
*bpl
;
68 psb
= kmalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
71 memset(psb
, 0, sizeof (struct lpfc_scsi_buf
));
75 * Get memory from the pci pool to map the virt space to pci bus space
76 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
77 * struct fcp_rsp and the number of bde's necessary to support the
80 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
, GFP_KERNEL
,
87 /* Initialize virtual ptrs to dma_buf region. */
88 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
90 psb
->fcp_cmnd
= psb
->data
;
91 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
92 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
93 sizeof(struct fcp_rsp
);
95 /* Initialize local short-hand pointers. */
97 pdma_phys
= psb
->dma_handle
;
100 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
101 * list bdes. Initialize the first two and leave the rest for
104 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
105 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
106 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_cmnd
);
107 bpl
->tus
.f
.bdeFlags
= BUFF_USE_CMND
;
108 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
111 /* Setup the physical region for the FCP RSP */
112 pdma_phys
+= sizeof (struct fcp_cmnd
);
113 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
114 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
115 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_rsp
);
116 bpl
->tus
.f
.bdeFlags
= (BUFF_USE_CMND
| BUFF_USE_RCV
);
117 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
120 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
121 * initialize it with all known data now.
123 pdma_phys
+= (sizeof (struct fcp_rsp
));
124 iocb
= &psb
->cur_iocbq
.iocb
;
125 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
126 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys
);
127 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys
);
128 iocb
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
129 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDL
;
130 iocb
->ulpBdeCount
= 1;
131 iocb
->ulpClass
= CLASS3
;
137 lpfc_free_scsi_buf(struct lpfc_scsi_buf
* psb
)
139 struct lpfc_hba
*phba
= psb
->scsi_hba
;
142 * There are only two special cases to consider. (1) the scsi command
143 * requested scatter-gather usage or (2) the scsi command allocated
144 * a request buffer, but did not request use_sg. There is a third
145 * case, but it does not require resource deallocation.
147 if ((psb
->seg_cnt
> 0) && (psb
->pCmd
->use_sg
)) {
148 dma_unmap_sg(&phba
->pcidev
->dev
, psb
->pCmd
->request_buffer
,
149 psb
->seg_cnt
, psb
->pCmd
->sc_data_direction
);
151 if ((psb
->nonsg_phys
) && (psb
->pCmd
->request_bufflen
)) {
152 dma_unmap_single(&phba
->pcidev
->dev
, psb
->nonsg_phys
,
153 psb
->pCmd
->request_bufflen
,
154 psb
->pCmd
->sc_data_direction
);
158 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
162 lpfc_scsi_prep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
)
164 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
165 struct scatterlist
*sgel
= NULL
;
166 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
167 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
168 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
170 uint32_t i
, num_bde
= 0;
171 int datadir
= scsi_cmnd
->sc_data_direction
;
175 * There are three possibilities here - use scatter-gather segment, use
176 * the single mapping, or neither. Start the lpfc command prep by
177 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
181 if (scsi_cmnd
->use_sg
) {
183 * The driver stores the segment count returned from pci_map_sg
184 * because this a count of dma-mappings used to map the use_sg
185 * pages. They are not guaranteed to be the same for those
186 * architectures that implement an IOMMU.
188 sgel
= (struct scatterlist
*)scsi_cmnd
->request_buffer
;
189 lpfc_cmd
->seg_cnt
= dma_map_sg(&phba
->pcidev
->dev
, sgel
,
190 scsi_cmnd
->use_sg
, datadir
);
191 if (lpfc_cmd
->seg_cnt
== 0)
194 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
195 printk(KERN_ERR
"%s: Too many sg segments from "
196 "dma_map_sg. Config %d, seg_cnt %d",
197 __FUNCTION__
, phba
->cfg_sg_seg_cnt
,
199 dma_unmap_sg(&phba
->pcidev
->dev
, sgel
,
200 lpfc_cmd
->seg_cnt
, datadir
);
205 * The driver established a maximum scatter-gather segment count
206 * during probe that limits the number of sg elements in any
207 * single scsi command. Just run through the seg_cnt and format
210 for (i
= 0; i
< lpfc_cmd
->seg_cnt
; i
++) {
211 physaddr
= sg_dma_address(sgel
);
212 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
213 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
214 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
215 if (datadir
== DMA_TO_DEVICE
)
216 bpl
->tus
.f
.bdeFlags
= 0;
218 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
219 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
224 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
225 physaddr
= dma_map_single(&phba
->pcidev
->dev
,
226 scsi_cmnd
->request_buffer
,
227 scsi_cmnd
->request_bufflen
,
229 dma_error
= dma_mapping_error(physaddr
);
231 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
232 "%d:0718 Unable to dma_map_single "
233 "request_buffer: x%x\n",
234 phba
->brd_no
, dma_error
);
238 lpfc_cmd
->nonsg_phys
= physaddr
;
239 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
240 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
241 bpl
->tus
.f
.bdeSize
= scsi_cmnd
->request_bufflen
;
242 if (datadir
== DMA_TO_DEVICE
)
243 bpl
->tus
.f
.bdeFlags
= 0;
244 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
250 * Finish initializing those IOCB fields that are dependent on the
251 * scsi_cmnd request_buffer
253 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+=
254 (num_bde
* sizeof (struct ulp_bde64
));
255 iocb_cmd
->ulpBdeCount
= 1;
257 fcp_cmnd
->fcpDl
= be32_to_cpu(scsi_cmnd
->request_bufflen
);
262 lpfc_handle_fcp_err(struct lpfc_scsi_buf
*lpfc_cmd
)
264 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
265 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
266 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
267 struct lpfc_hba
*phba
= lpfc_cmd
->scsi_hba
;
268 uint32_t fcpi_parm
= lpfc_cmd
->cur_iocbq
.iocb
.un
.fcpi
.fcpi_parm
;
269 uint32_t resp_info
= fcprsp
->rspStatus2
;
270 uint32_t scsi_status
= fcprsp
->rspStatus3
;
271 uint32_t host_status
= DID_OK
;
275 * If this is a task management command, there is no
276 * scsi packet associated with this lpfc_cmd. The driver
279 if (fcpcmd
->fcpCntl2
) {
284 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
285 "%d:0730 FCP command failed: RSP "
286 "Data: x%x x%x x%x x%x x%x x%x\n",
287 phba
->brd_no
, resp_info
, scsi_status
,
288 be32_to_cpu(fcprsp
->rspResId
),
289 be32_to_cpu(fcprsp
->rspSnsLen
),
290 be32_to_cpu(fcprsp
->rspRspLen
),
293 if (resp_info
& RSP_LEN_VALID
) {
294 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
295 if ((rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) ||
296 (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
)) {
297 host_status
= DID_ERROR
;
302 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
303 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
304 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
305 snslen
= SCSI_SENSE_BUFFERSIZE
;
307 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
311 if (resp_info
& RESID_UNDER
) {
312 cmnd
->resid
= be32_to_cpu(fcprsp
->rspResId
);
314 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
315 "%d:0716 FCP Read Underrun, expected %d, "
316 "residual %d Data: x%x x%x x%x\n", phba
->brd_no
,
317 be32_to_cpu(fcpcmd
->fcpDl
), cmnd
->resid
,
318 fcpi_parm
, cmnd
->cmnd
[0], cmnd
->underflow
);
321 * The cmnd->underflow is the minimum number of bytes that must
322 * be transfered for this command. Provided a sense condition
323 * is not present, make sure the actual amount transferred is at
324 * least the underflow value or fail.
326 if (!(resp_info
& SNS_LEN_VALID
) &&
327 (scsi_status
== SAM_STAT_GOOD
) &&
328 (cmnd
->request_bufflen
- cmnd
->resid
) < cmnd
->underflow
) {
329 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
330 "%d:0717 FCP command x%x residual "
331 "underrun converted to error "
332 "Data: x%x x%x x%x\n", phba
->brd_no
,
333 cmnd
->cmnd
[0], cmnd
->request_bufflen
,
334 cmnd
->resid
, cmnd
->underflow
);
336 host_status
= DID_ERROR
;
338 } else if (resp_info
& RESID_OVER
) {
339 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
340 "%d:0720 FCP command x%x residual "
341 "overrun error. Data: x%x x%x \n",
342 phba
->brd_no
, cmnd
->cmnd
[0],
343 cmnd
->request_bufflen
, cmnd
->resid
);
344 host_status
= DID_ERROR
;
347 * Check SLI validation that all the transfer was actually done
348 * (fcpi_parm should be zero). Apply check only to reads.
350 } else if ((scsi_status
== SAM_STAT_GOOD
) && fcpi_parm
&&
351 (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
352 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
353 "%d:0734 FCP Read Check Error Data: "
354 "x%x x%x x%x x%x\n", phba
->brd_no
,
355 be32_to_cpu(fcpcmd
->fcpDl
),
356 be32_to_cpu(fcprsp
->rspResId
),
357 fcpi_parm
, cmnd
->cmnd
[0]);
358 host_status
= DID_ERROR
;
359 cmnd
->resid
= cmnd
->request_bufflen
;
363 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
367 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
368 struct lpfc_iocbq
*pIocbOut
)
370 struct lpfc_scsi_buf
*lpfc_cmd
=
371 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
372 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
373 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
374 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
377 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
378 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
380 if (lpfc_cmd
->status
) {
381 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
382 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
383 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
384 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
385 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
387 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
388 "%d:0729 FCP cmd x%x failed <%d/%d> status: "
389 "x%x result: x%x Data: x%x x%x\n",
390 phba
->brd_no
, cmd
->cmnd
[0], cmd
->device
->id
,
391 cmd
->device
->lun
, lpfc_cmd
->status
,
392 lpfc_cmd
->result
, pIocbOut
->iocb
.ulpContext
,
393 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
395 switch (lpfc_cmd
->status
) {
396 case IOSTAT_FCP_RSP_ERROR
:
397 /* Call FCP RSP handler to determine result */
398 lpfc_handle_fcp_err(lpfc_cmd
);
400 case IOSTAT_NPORT_BSY
:
401 case IOSTAT_FABRIC_BSY
:
402 cmd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
405 cmd
->result
= ScsiResult(DID_ERROR
, 0);
410 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
)
411 cmd
->result
= ScsiResult(DID_BUS_BUSY
,
415 cmd
->result
= ScsiResult(DID_NO_CONNECT
, 0);
418 cmd
->result
= ScsiResult(DID_OK
, 0);
421 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
422 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
424 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
425 "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
426 "SNS x%x x%x Data: x%x x%x\n",
427 phba
->brd_no
, cmd
->device
->id
,
428 cmd
->device
->lun
, cmd
, cmd
->result
,
429 *lp
, *(lp
+ 3), cmd
->retries
, cmd
->resid
);
432 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
433 lpfc_free_scsi_buf(lpfc_cmd
);
434 cmd
->host_scribble
= NULL
;
435 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
441 lpfc_scsi_prep_cmnd(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
,
442 struct lpfc_nodelist
*pnode
)
444 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
445 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
446 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
447 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
448 int datadir
= scsi_cmnd
->sc_data_direction
;
450 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
452 lpfc_put_lun(lpfc_cmd
->fcp_cmnd
, lpfc_cmd
->pCmd
->device
->lun
);
454 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
456 if (scsi_cmnd
->device
->tagged_supported
) {
457 switch (scsi_cmnd
->tag
) {
458 case HEAD_OF_QUEUE_TAG
:
459 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
461 case ORDERED_QUEUE_TAG
:
462 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
465 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
469 fcp_cmnd
->fcpCntl1
= 0;
472 * There are three possibilities here - use scatter-gather segment, use
473 * the single mapping, or neither. Start the lpfc command prep by
474 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
477 if (scsi_cmnd
->use_sg
) {
478 if (datadir
== DMA_TO_DEVICE
) {
479 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
480 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
482 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
483 phba
->fc4OutputRequests
++;
485 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
486 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
487 iocb_cmd
->un
.fcpi
.fcpi_parm
=
488 scsi_cmnd
->request_bufflen
;
489 fcp_cmnd
->fcpCntl3
= READ_DATA
;
490 phba
->fc4InputRequests
++;
492 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
493 if (datadir
== DMA_TO_DEVICE
) {
494 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
495 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
497 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
498 phba
->fc4OutputRequests
++;
500 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
501 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
502 iocb_cmd
->un
.fcpi
.fcpi_parm
=
503 scsi_cmnd
->request_bufflen
;
504 fcp_cmnd
->fcpCntl3
= READ_DATA
;
505 phba
->fc4InputRequests
++;
508 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
509 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
511 fcp_cmnd
->fcpCntl3
= 0;
512 phba
->fc4ControlRequests
++;
516 * Finish initializing those IOCB fields that are independent
517 * of the scsi_cmnd request_buffer
519 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
520 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
521 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
523 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
524 piocbq
->context1
= lpfc_cmd
;
525 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
526 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
530 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba
*phba
,
531 struct lpfc_scsi_buf
*lpfc_cmd
,
532 uint8_t task_mgmt_cmd
)
534 struct lpfc_sli
*psli
;
535 struct lpfc_iocbq
*piocbq
;
537 struct fcp_cmnd
*fcp_cmnd
;
538 struct scsi_device
*scsi_dev
= lpfc_cmd
->pCmd
->device
;
539 struct lpfc_rport_data
*rdata
= scsi_dev
->hostdata
;
540 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
542 if ((ndlp
== 0) || (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
547 piocbq
= &(lpfc_cmd
->cur_iocbq
);
548 piocb
= &piocbq
->iocb
;
550 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
551 lpfc_put_lun(lpfc_cmd
->fcp_cmnd
, lpfc_cmd
->pCmd
->device
->lun
);
552 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
554 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
556 piocb
->ulpContext
= ndlp
->nlp_rpi
;
557 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
558 piocb
->ulpFCP2Rcvy
= 1;
560 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
562 /* ulpTimeout is only one byte */
563 if (lpfc_cmd
->timeout
> 0xff) {
565 * Do not timeout the command at the firmware level.
566 * The driver will provide the timeout mechanism.
568 piocb
->ulpTimeout
= 0;
570 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
573 lpfc_cmd
->rdata
= rdata
;
575 switch (task_mgmt_cmd
) {
577 /* Issue LUN Reset to TGT <num> LUN <num> */
578 lpfc_printf_log(phba
,
581 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
584 scsi_dev
->id
, scsi_dev
->lun
,
585 ndlp
->nlp_rpi
, ndlp
->nlp_flag
);
588 case FCP_ABORT_TASK_SET
:
589 /* Issue Abort Task Set to TGT <num> LUN <num> */
590 lpfc_printf_log(phba
,
593 "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
596 scsi_dev
->id
, scsi_dev
->lun
,
597 ndlp
->nlp_rpi
, ndlp
->nlp_flag
);
600 case FCP_TARGET_RESET
:
601 /* Issue Target Reset to TGT <num> */
602 lpfc_printf_log(phba
,
605 "%d:0702 Issue Target Reset to TGT %d "
608 scsi_dev
->id
, ndlp
->nlp_rpi
,
617 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf
* lpfc_cmd
, struct lpfc_hba
* phba
)
619 struct lpfc_iocbq
*iocbq
;
620 struct lpfc_iocbq
*iocbqrsp
= NULL
;
621 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
624 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, FCP_TARGET_RESET
);
628 lpfc_cmd
->scsi_hba
= phba
;
629 iocbq
= &lpfc_cmd
->cur_iocbq
;
630 list_remove_head(lpfc_iocb_list
, iocbqrsp
, struct lpfc_iocbq
, list
);
633 memset(iocbqrsp
, 0, sizeof (struct lpfc_iocbq
));
635 iocbq
->iocb_flag
|= LPFC_IO_POLL
;
636 ret
= lpfc_sli_issue_iocb_wait_high_priority(phba
,
637 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
638 iocbq
, SLI_IOCB_HIGH_PRIORITY
,
641 if (ret
!= IOCB_SUCCESS
) {
642 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
646 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
647 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
648 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
649 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
650 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
654 * All outstanding txcmplq I/Os should have been aborted by the target.
655 * Unfortunately, some targets do not abide by this forcing the driver
658 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
659 lpfc_cmd
->pCmd
->device
->id
,
660 lpfc_cmd
->pCmd
->device
->lun
, 0, LPFC_CTX_TGT
);
662 /* Return response IOCB to free list. */
663 list_add_tail(&iocbqrsp
->list
, lpfc_iocb_list
);
668 lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
669 struct lpfc_iocbq
*pIocbOut
)
672 struct lpfc_scsi_buf
*lpfc_cmd
=
673 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
675 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
676 lpfc_free_scsi_buf(lpfc_cmd
);
677 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
681 lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba
*phba
,
682 struct lpfc_iocbq
*pIocbIn
,
683 struct lpfc_iocbq
*pIocbOut
)
685 struct scsi_cmnd
*ml_cmd
=
686 ((struct lpfc_scsi_buf
*) pIocbIn
->context1
)->pCmd
;
688 lpfc_scsi_cmd_iocb_cleanup (phba
, pIocbIn
, pIocbOut
);
689 ml_cmd
->host_scribble
= NULL
;
693 lpfc_info(struct Scsi_Host
*host
)
695 struct lpfc_hba
*phba
= (struct lpfc_hba
*) host
->hostdata
[0];
697 static char lpfcinfobuf
[384];
699 memset(lpfcinfobuf
,0,384);
700 if (phba
&& phba
->pcidev
){
701 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
702 len
= strlen(lpfcinfobuf
);
703 snprintf(lpfcinfobuf
+ len
,
705 " on PCI bus %02x device %02x irq %d",
706 phba
->pcidev
->bus
->number
,
709 len
= strlen(lpfcinfobuf
);
711 snprintf(lpfcinfobuf
+ len
,
721 lpfc_queuecommand(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
723 struct lpfc_hba
*phba
=
724 (struct lpfc_hba
*) cmnd
->device
->host
->hostdata
[0];
725 struct lpfc_sli
*psli
= &phba
->sli
;
726 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
727 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
728 struct lpfc_scsi_buf
*lpfc_cmd
= NULL
;
729 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
733 * The target pointer is guaranteed not to be NULL because the driver
734 * only clears the device->hostdata field in lpfc_slave_destroy. This
735 * approach guarantees no further IO calls on this target.
738 cmnd
->result
= ScsiResult(DID_NO_CONNECT
, 0);
739 goto out_fail_command
;
743 * A Fibre Channel target is present and functioning only when the node
744 * state is MAPPED. Any other state is a failure.
746 if (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
747 if ((ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
) ||
748 (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)) {
749 cmnd
->result
= ScsiResult(DID_NO_CONNECT
, 0);
750 goto out_fail_command
;
753 * The device is most likely recovered and the driver
754 * needs a bit more time to finish. Ask the midlayer
760 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
761 if (lpfc_cmd
== NULL
) {
762 printk(KERN_WARNING
"%s: No buffer available - list empty, "
763 "total count %d\n", __FUNCTION__
, phba
->total_scsi_bufs
);
768 * Store the midlayer's command structure for the completion phase
769 * and complete the command initialization.
771 lpfc_cmd
->pCmd
= cmnd
;
772 lpfc_cmd
->rdata
= rdata
;
773 lpfc_cmd
->timeout
= 0;
774 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
775 cmnd
->scsi_done
= done
;
777 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
779 goto out_host_busy_free_buf
;
781 lpfc_scsi_prep_cmnd(phba
, lpfc_cmd
, ndlp
);
783 err
= lpfc_sli_issue_iocb(phba
, &phba
->sli
.ring
[psli
->fcp_ring
],
784 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
786 goto out_host_busy_free_buf
;
789 out_host_busy_free_buf
:
790 lpfc_free_scsi_buf(lpfc_cmd
);
791 cmnd
->host_scribble
= NULL
;
793 return SCSI_MLQUEUE_HOST_BUSY
;
801 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
803 struct lpfc_hba
*phba
=
804 (struct lpfc_hba
*)cmnd
->device
->host
->hostdata
[0];
805 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[phba
->sli
.fcp_ring
];
806 struct lpfc_iocbq
*iocb
, *next_iocb
;
807 struct lpfc_iocbq
*abtsiocb
= NULL
;
808 struct lpfc_scsi_buf
*lpfc_cmd
;
809 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
812 unsigned int id
, lun
;
813 unsigned int loop_count
= 0;
814 int ret
= IOCB_SUCCESS
;
817 * If the host_scribble data area is NULL, then the driver has already
818 * completed this command, but the midlayer did not see the completion
819 * before the eh fired. Just return SUCCESS.
821 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
825 /* save these now since lpfc_cmd can be freed */
826 id
= lpfc_cmd
->pCmd
->device
->id
;
827 lun
= lpfc_cmd
->pCmd
->device
->lun
;
828 snum
= lpfc_cmd
->pCmd
->serial_number
;
830 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
832 if (iocb
->context1
!= lpfc_cmd
)
835 list_del_init(&iocb
->list
);
837 if (!iocb
->iocb_cmpl
) {
838 list_add_tail(&iocb
->list
, lpfc_iocb_list
);
841 cmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
842 cmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
843 lpfc_scsi_cmd_iocb_cmpl_aborted(phba
, iocb
, iocb
);
849 list_remove_head(lpfc_iocb_list
, abtsiocb
, struct lpfc_iocbq
, list
);
850 if (abtsiocb
== NULL
)
853 memset(abtsiocb
, 0, sizeof (struct lpfc_iocbq
));
856 * The scsi command was not in the txq. Check the txcmplq and if it is
857 * found, send an abort to the FW.
859 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
860 if (iocb
->context1
!= lpfc_cmd
)
863 iocb
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl_aborted
;
865 icmd
= &abtsiocb
->iocb
;
866 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
867 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
868 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
871 icmd
->ulpClass
= cmd
->ulpClass
;
872 if (phba
->hba_state
>= LPFC_LINK_UP
)
873 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
875 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
877 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0) ==
879 list_add_tail(&abtsiocb
->list
, lpfc_iocb_list
);
884 /* Wait for abort to complete */
885 while (cmnd
->host_scribble
)
887 spin_unlock_irq(phba
->host
->host_lock
);
888 set_current_state(TASK_UNINTERRUPTIBLE
);
889 schedule_timeout(LPFC_ABORT_WAIT
*HZ
);
890 spin_lock_irq(phba
->host
->host_lock
);
892 > (2 * phba
->cfg_nodev_tmo
)/LPFC_ABORT_WAIT
)
896 if(cmnd
->host_scribble
) {
897 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
898 "%d:0748 abort handler timed "
899 "out waiting for abort to "
901 "x%x x%x x%x x%lx\n",
902 phba
->brd_no
, ret
, id
, lun
, snum
);
903 cmnd
->host_scribble
= NULL
;
904 iocb
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cleanup
;
912 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
913 "%d:0749 SCSI layer issued abort device "
914 "Data: x%x x%x x%x x%lx\n",
915 phba
->brd_no
, ret
, id
, lun
, snum
);
917 return ret
== IOCB_SUCCESS
? SUCCESS
: FAILED
;
921 lpfc_reset_lun_handler(struct scsi_cmnd
*cmnd
)
923 struct Scsi_Host
*shost
= cmnd
->device
->host
;
924 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
[0];
925 struct lpfc_sli
*psli
= &phba
->sli
;
926 struct lpfc_scsi_buf
*lpfc_cmd
= NULL
;
927 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
928 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
929 struct lpfc_iocbq
*iocbq
, *iocbqrsp
= NULL
;
930 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
931 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
936 * If target is not in a MAPPED state, delay the reset until
937 * target is rediscovered or nodev timeout expires.
943 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
944 spin_unlock_irq(phba
->host
->host_lock
);
945 set_current_state(TASK_UNINTERRUPTIBLE
);
946 schedule_timeout( HZ
/2);
947 spin_lock_irq(phba
->host
->host_lock
);
949 if ((pnode
) && (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
))
953 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
954 if (lpfc_cmd
== NULL
)
957 lpfc_cmd
->pCmd
= cmnd
;
958 lpfc_cmd
->timeout
= 60;
959 lpfc_cmd
->scsi_hba
= phba
;
961 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, FCP_LUN_RESET
);
963 goto out_free_scsi_buf
;
965 iocbq
= &lpfc_cmd
->cur_iocbq
;
967 /* get a buffer for this IOCB command response */
968 list_remove_head(lpfc_iocb_list
, iocbqrsp
, struct lpfc_iocbq
, list
);
969 if (iocbqrsp
== NULL
)
970 goto out_free_scsi_buf
;
972 memset(iocbqrsp
, 0, sizeof (struct lpfc_iocbq
));
974 iocbq
->iocb_flag
|= LPFC_IO_POLL
;
975 iocbq
->iocb_cmpl
= lpfc_sli_wake_iocb_high_priority
;
977 ret
= lpfc_sli_issue_iocb_wait_high_priority(phba
,
978 &phba
->sli
.ring
[psli
->fcp_ring
],
979 iocbq
, 0, iocbqrsp
, 60);
980 if (ret
== IOCB_SUCCESS
)
983 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
984 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
985 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
)
986 if (lpfc_cmd
->result
& IOERR_DRVR_MASK
)
987 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
990 * All outstanding txcmplq I/Os should have been aborted by the target.
991 * Unfortunately, some targets do not abide by this forcing the driver
994 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
995 cmnd
->device
->id
, cmnd
->device
->lun
, 0,
999 while((cnt
= lpfc_sli_sum_iocb(phba
,
1000 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1001 cmnd
->device
->id
, cmnd
->device
->lun
,
1003 spin_unlock_irq(phba
->host
->host_lock
);
1004 set_current_state(TASK_UNINTERRUPTIBLE
);
1005 schedule_timeout(LPFC_RESET_WAIT
*HZ
);
1006 spin_lock_irq(phba
->host
->host_lock
);
1009 > (2 * phba
->cfg_nodev_tmo
)/LPFC_RESET_WAIT
)
1014 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1015 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
1019 list_add_tail(&iocbqrsp
->list
, lpfc_iocb_list
);
1022 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1023 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1024 "Data: x%x x%x x%x\n",
1025 phba
->brd_no
, lpfc_cmd
->pCmd
->device
->id
,
1026 lpfc_cmd
->pCmd
->device
->lun
, ret
, lpfc_cmd
->status
,
1028 lpfc_free_scsi_buf(lpfc_cmd
);
1034 * Note: midlayer calls this function with the host_lock held
1037 lpfc_reset_bus_handler(struct scsi_cmnd
*cmnd
)
1039 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1040 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
[0];
1041 struct lpfc_nodelist
*ndlp
= NULL
;
1043 int ret
= FAILED
, i
, err_count
= 0;
1045 unsigned int midlayer_id
= 0;
1046 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
1047 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
1049 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
1050 if (lpfc_cmd
== NULL
)
1053 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1054 lpfc_cmd
->timeout
= 60;
1055 lpfc_cmd
->pCmd
= cmnd
;
1056 lpfc_cmd
->scsi_hba
= phba
;
1059 * Since the driver manages a single bus device, reset all
1060 * targets known to the driver. Should any target reset
1061 * fail, this routine returns failure to the midlayer.
1063 midlayer_id
= cmnd
->device
->id
;
1064 for (i
= 0; i
< MAX_FCP_TARGET
; i
++) {
1065 /* Search the mapped list for this target ID */
1067 list_for_each_entry(ndlp
, &phba
->fc_nlpmap_list
, nlp_listp
) {
1068 if ((i
== ndlp
->nlp_sid
) && ndlp
->rport
) {
1076 lpfc_cmd
->pCmd
->device
->id
= i
;
1077 lpfc_cmd
->pCmd
->device
->hostdata
= ndlp
->rport
->dd_data
;
1078 ret
= lpfc_scsi_tgt_reset(lpfc_cmd
, phba
);
1079 if (ret
!= SUCCESS
) {
1080 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1081 "%d:0713 Bus Reset on target %d failed\n",
1087 cmnd
->device
->id
= midlayer_id
;
1089 while((cnt
= lpfc_sli_sum_iocb(phba
,
1090 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1091 0, 0, LPFC_CTX_HOST
))) {
1092 spin_unlock_irq(phba
->host
->host_lock
);
1093 set_current_state(TASK_UNINTERRUPTIBLE
);
1094 schedule_timeout(LPFC_RESET_WAIT
*HZ
);
1095 spin_lock_irq(phba
->host
->host_lock
);
1098 > (2 * phba
->cfg_nodev_tmo
)/LPFC_RESET_WAIT
)
1103 /* flush all outstanding commands on the host */
1104 i
= lpfc_sli_abort_iocb(phba
,
1105 &phba
->sli
.ring
[phba
->sli
.fcp_ring
], 0, 0, 0,
1108 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1109 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
1110 phba
->brd_no
, cnt
, i
);
1116 lpfc_free_scsi_buf(lpfc_cmd
);
1117 lpfc_printf_log(phba
,
1120 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1127 lpfc_slave_alloc(struct scsi_device
*sdev
)
1129 struct lpfc_hba
*phba
= (struct lpfc_hba
*)sdev
->host
->hostdata
[0];
1130 struct lpfc_nodelist
*ndlp
= NULL
;
1132 struct lpfc_scsi_buf
*scsi_buf
= NULL
;
1133 uint32_t total
= 0, i
;
1134 uint32_t num_to_alloc
= 0;
1135 unsigned long flags
;
1136 struct list_head
*listp
;
1137 struct list_head
*node_list
[6];
1140 * Store the target pointer in the scsi_device hostdata pointer provided
1141 * the driver has already discovered the target id.
1144 /* Search the nlp lists other than unmap_list for this target ID */
1145 node_list
[0] = &phba
->fc_npr_list
;
1146 node_list
[1] = &phba
->fc_nlpmap_list
;
1147 node_list
[2] = &phba
->fc_prli_list
;
1148 node_list
[3] = &phba
->fc_reglogin_list
;
1149 node_list
[4] = &phba
->fc_adisc_list
;
1150 node_list
[5] = &phba
->fc_plogi_list
;
1152 for (i
= 0; i
< 6 && !match
; i
++) {
1153 listp
= node_list
[i
];
1154 if (list_empty(listp
))
1156 list_for_each_entry(ndlp
, listp
, nlp_listp
) {
1157 if ((sdev
->id
== ndlp
->nlp_sid
) && ndlp
->rport
) {
1167 sdev
->hostdata
= ndlp
->rport
->dd_data
;
1170 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1171 * available list of scsi buffers. Don't allocate more than the
1172 * HBA limit conveyed to the midlayer via the host structure. Note
1173 * that this list of scsi bufs exists for the lifetime of the driver.
1175 total
= phba
->total_scsi_bufs
;
1176 num_to_alloc
= LPFC_CMD_PER_LUN
;
1177 if (total
>= phba
->cfg_hba_queue_depth
) {
1178 printk(KERN_WARNING
"%s, At config limitation of "
1179 "%d allocated scsi_bufs\n", __FUNCTION__
, total
);
1181 } else if (total
+ num_to_alloc
> phba
->cfg_hba_queue_depth
) {
1182 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
1185 for (i
= 0; i
< num_to_alloc
; i
++) {
1186 scsi_buf
= lpfc_get_scsi_buf(phba
);
1188 printk(KERN_ERR
"%s, failed to allocate "
1189 "scsi_buf\n", __FUNCTION__
);
1193 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
1194 phba
->total_scsi_bufs
++;
1195 list_add_tail(&scsi_buf
->list
, &phba
->lpfc_scsi_buf_list
);
1196 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
1202 lpfc_slave_configure(struct scsi_device
*sdev
)
1204 struct lpfc_hba
*phba
= (struct lpfc_hba
*) sdev
->host
->hostdata
[0];
1205 struct fc_rport
*rport
= starget_to_rport(sdev
->sdev_target
);
1207 if (sdev
->tagged_supported
)
1208 scsi_activate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1210 scsi_deactivate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1213 * Initialize the fc transport attributes for the target
1214 * containing this scsi device. Also note that the driver's
1215 * target pointer is stored in the starget_data for the
1216 * driver's sysfs entry point functions.
1218 rport
->dev_loss_tmo
= phba
->cfg_nodev_tmo
+ 5;
1224 lpfc_slave_destroy(struct scsi_device
*sdev
)
1226 sdev
->hostdata
= NULL
;
1230 struct scsi_host_template lpfc_template
= {
1231 .module
= THIS_MODULE
,
1232 .name
= LPFC_DRIVER_NAME
,
1234 .queuecommand
= lpfc_queuecommand
,
1235 .eh_abort_handler
= lpfc_abort_handler
,
1236 .eh_device_reset_handler
= lpfc_reset_lun_handler
,
1237 .eh_bus_reset_handler
= lpfc_reset_bus_handler
,
1238 .slave_alloc
= lpfc_slave_alloc
,
1239 .slave_configure
= lpfc_slave_configure
,
1240 .slave_destroy
= lpfc_slave_destroy
,
1242 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1243 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1244 .use_clustering
= ENABLE_CLUSTERING
,
1245 .shost_attrs
= lpfc_host_attrs
,