1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_tcq.h>
29 #include <scsi/scsi_transport_fc.h>
31 #include "lpfc_version.h"
34 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
40 #define LPFC_RESET_WAIT 2
41 #define LPFC_ABORT_WAIT 2
45 lpfc_block_requests(struct lpfc_hba
* phba
)
47 down(&phba
->hba_can_block
);
48 scsi_block_requests(phba
->host
);
52 lpfc_unblock_requests(struct lpfc_hba
* phba
)
54 scsi_unblock_requests(phba
->host
);
55 up(&phba
->hba_can_block
);
59 * This routine allocates a scsi buffer, which contains all the necessary
60 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
61 * contains information to build the IOCB. The DMAable region contains
62 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
63 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
64 * and the BPL BDE is setup in the IOCB.
66 static struct lpfc_scsi_buf
*
67 lpfc_new_scsi_buf(struct lpfc_hba
* phba
)
69 struct lpfc_scsi_buf
*psb
;
70 struct ulp_bde64
*bpl
;
75 psb
= kmalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
78 memset(psb
, 0, sizeof (struct lpfc_scsi_buf
));
82 * Get memory from the pci pool to map the virt space to pci bus space
83 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
84 * struct fcp_rsp and the number of bde's necessary to support the
87 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
, GFP_KERNEL
,
94 /* Initialize virtual ptrs to dma_buf region. */
95 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
97 /* Allocate iotag for psb->cur_iocbq. */
98 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
100 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
101 psb
->data
, psb
->dma_handle
);
105 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
107 psb
->fcp_cmnd
= psb
->data
;
108 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
109 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
110 sizeof(struct fcp_rsp
);
112 /* Initialize local short-hand pointers. */
114 pdma_phys
= psb
->dma_handle
;
117 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
118 * list bdes. Initialize the first two and leave the rest for
121 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
122 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
123 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_cmnd
);
124 bpl
->tus
.f
.bdeFlags
= BUFF_USE_CMND
;
125 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
128 /* Setup the physical region for the FCP RSP */
129 pdma_phys
+= sizeof (struct fcp_cmnd
);
130 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
131 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
132 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_rsp
);
133 bpl
->tus
.f
.bdeFlags
= (BUFF_USE_CMND
| BUFF_USE_RCV
);
134 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
137 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
138 * initialize it with all known data now.
140 pdma_phys
+= (sizeof (struct fcp_rsp
));
141 iocb
= &psb
->cur_iocbq
.iocb
;
142 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
143 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys
);
144 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys
);
145 iocb
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
146 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDL
;
147 iocb
->ulpBdeCount
= 1;
148 iocb
->ulpClass
= CLASS3
;
153 static struct lpfc_scsi_buf
*
154 lpfc_get_scsi_buf(struct lpfc_hba
* phba
)
156 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
157 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
158 unsigned long iflag
= 0;
160 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
161 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
162 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
167 lpfc_release_scsi_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* psb
)
169 unsigned long iflag
= 0;
171 * There are only two special cases to consider. (1) the scsi command
172 * requested scatter-gather usage or (2) the scsi command allocated
173 * a request buffer, but did not request use_sg. There is a third
174 * case, but it does not require resource deallocation.
176 if ((psb
->seg_cnt
> 0) && (psb
->pCmd
->use_sg
)) {
177 dma_unmap_sg(&phba
->pcidev
->dev
, psb
->pCmd
->request_buffer
,
178 psb
->seg_cnt
, psb
->pCmd
->sc_data_direction
);
180 if ((psb
->nonsg_phys
) && (psb
->pCmd
->request_bufflen
)) {
181 dma_unmap_single(&phba
->pcidev
->dev
, psb
->nonsg_phys
,
182 psb
->pCmd
->request_bufflen
,
183 psb
->pCmd
->sc_data_direction
);
187 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
189 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
190 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
194 lpfc_scsi_prep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
)
196 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
197 struct scatterlist
*sgel
= NULL
;
198 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
199 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
200 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
202 uint32_t i
, num_bde
= 0;
203 int datadir
= scsi_cmnd
->sc_data_direction
;
207 * There are three possibilities here - use scatter-gather segment, use
208 * the single mapping, or neither. Start the lpfc command prep by
209 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
213 if (scsi_cmnd
->use_sg
) {
215 * The driver stores the segment count returned from pci_map_sg
216 * because this a count of dma-mappings used to map the use_sg
217 * pages. They are not guaranteed to be the same for those
218 * architectures that implement an IOMMU.
220 sgel
= (struct scatterlist
*)scsi_cmnd
->request_buffer
;
221 lpfc_cmd
->seg_cnt
= dma_map_sg(&phba
->pcidev
->dev
, sgel
,
222 scsi_cmnd
->use_sg
, datadir
);
223 if (lpfc_cmd
->seg_cnt
== 0)
226 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
227 printk(KERN_ERR
"%s: Too many sg segments from "
228 "dma_map_sg. Config %d, seg_cnt %d",
229 __FUNCTION__
, phba
->cfg_sg_seg_cnt
,
231 dma_unmap_sg(&phba
->pcidev
->dev
, sgel
,
232 lpfc_cmd
->seg_cnt
, datadir
);
237 * The driver established a maximum scatter-gather segment count
238 * during probe that limits the number of sg elements in any
239 * single scsi command. Just run through the seg_cnt and format
242 for (i
= 0; i
< lpfc_cmd
->seg_cnt
; i
++) {
243 physaddr
= sg_dma_address(sgel
);
244 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
245 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
246 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
247 if (datadir
== DMA_TO_DEVICE
)
248 bpl
->tus
.f
.bdeFlags
= 0;
250 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
251 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
256 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
257 physaddr
= dma_map_single(&phba
->pcidev
->dev
,
258 scsi_cmnd
->request_buffer
,
259 scsi_cmnd
->request_bufflen
,
261 dma_error
= dma_mapping_error(physaddr
);
263 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
264 "%d:0718 Unable to dma_map_single "
265 "request_buffer: x%x\n",
266 phba
->brd_no
, dma_error
);
270 lpfc_cmd
->nonsg_phys
= physaddr
;
271 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
272 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
273 bpl
->tus
.f
.bdeSize
= scsi_cmnd
->request_bufflen
;
274 if (datadir
== DMA_TO_DEVICE
)
275 bpl
->tus
.f
.bdeFlags
= 0;
277 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
278 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
284 * Finish initializing those IOCB fields that are dependent on the
285 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
286 * reinitialized since all iocb memory resources are used many times
287 * for transmit, receive, and continuation bpl's.
289 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
290 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+=
291 (num_bde
* sizeof (struct ulp_bde64
));
292 iocb_cmd
->ulpBdeCount
= 1;
294 fcp_cmnd
->fcpDl
= be32_to_cpu(scsi_cmnd
->request_bufflen
);
299 lpfc_handle_fcp_err(struct lpfc_scsi_buf
*lpfc_cmd
)
301 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
302 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
303 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
304 struct lpfc_hba
*phba
= lpfc_cmd
->scsi_hba
;
305 uint32_t fcpi_parm
= lpfc_cmd
->cur_iocbq
.iocb
.un
.fcpi
.fcpi_parm
;
306 uint32_t resp_info
= fcprsp
->rspStatus2
;
307 uint32_t scsi_status
= fcprsp
->rspStatus3
;
308 uint32_t host_status
= DID_OK
;
312 * If this is a task management command, there is no
313 * scsi packet associated with this lpfc_cmd. The driver
316 if (fcpcmd
->fcpCntl2
) {
321 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
322 "%d:0730 FCP command failed: RSP "
323 "Data: x%x x%x x%x x%x x%x x%x\n",
324 phba
->brd_no
, resp_info
, scsi_status
,
325 be32_to_cpu(fcprsp
->rspResId
),
326 be32_to_cpu(fcprsp
->rspSnsLen
),
327 be32_to_cpu(fcprsp
->rspRspLen
),
330 if (resp_info
& RSP_LEN_VALID
) {
331 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
332 if ((rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) ||
333 (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
)) {
334 host_status
= DID_ERROR
;
339 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
340 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
341 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
342 snslen
= SCSI_SENSE_BUFFERSIZE
;
344 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
348 if (resp_info
& RESID_UNDER
) {
349 cmnd
->resid
= be32_to_cpu(fcprsp
->rspResId
);
351 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
352 "%d:0716 FCP Read Underrun, expected %d, "
353 "residual %d Data: x%x x%x x%x\n", phba
->brd_no
,
354 be32_to_cpu(fcpcmd
->fcpDl
), cmnd
->resid
,
355 fcpi_parm
, cmnd
->cmnd
[0], cmnd
->underflow
);
358 * The cmnd->underflow is the minimum number of bytes that must
359 * be transfered for this command. Provided a sense condition
360 * is not present, make sure the actual amount transferred is at
361 * least the underflow value or fail.
363 if (!(resp_info
& SNS_LEN_VALID
) &&
364 (scsi_status
== SAM_STAT_GOOD
) &&
365 (cmnd
->request_bufflen
- cmnd
->resid
) < cmnd
->underflow
) {
366 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
367 "%d:0717 FCP command x%x residual "
368 "underrun converted to error "
369 "Data: x%x x%x x%x\n", phba
->brd_no
,
370 cmnd
->cmnd
[0], cmnd
->request_bufflen
,
371 cmnd
->resid
, cmnd
->underflow
);
373 host_status
= DID_ERROR
;
375 } else if (resp_info
& RESID_OVER
) {
376 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
377 "%d:0720 FCP command x%x residual "
378 "overrun error. Data: x%x x%x \n",
379 phba
->brd_no
, cmnd
->cmnd
[0],
380 cmnd
->request_bufflen
, cmnd
->resid
);
381 host_status
= DID_ERROR
;
384 * Check SLI validation that all the transfer was actually done
385 * (fcpi_parm should be zero). Apply check only to reads.
387 } else if ((scsi_status
== SAM_STAT_GOOD
) && fcpi_parm
&&
388 (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
389 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
390 "%d:0734 FCP Read Check Error Data: "
391 "x%x x%x x%x x%x\n", phba
->brd_no
,
392 be32_to_cpu(fcpcmd
->fcpDl
),
393 be32_to_cpu(fcprsp
->rspResId
),
394 fcpi_parm
, cmnd
->cmnd
[0]);
395 host_status
= DID_ERROR
;
396 cmnd
->resid
= cmnd
->request_bufflen
;
400 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
404 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
405 struct lpfc_iocbq
*pIocbOut
)
407 struct lpfc_scsi_buf
*lpfc_cmd
=
408 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
409 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
410 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
411 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
413 struct scsi_device
*sdev
, *tmp_sdev
;
416 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
417 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
419 if (lpfc_cmd
->status
) {
420 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
421 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
422 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
423 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
424 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
426 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
427 "%d:0729 FCP cmd x%x failed <%d/%d> status: "
428 "x%x result: x%x Data: x%x x%x\n",
429 phba
->brd_no
, cmd
->cmnd
[0], cmd
->device
->id
,
430 cmd
->device
->lun
, lpfc_cmd
->status
,
431 lpfc_cmd
->result
, pIocbOut
->iocb
.ulpContext
,
432 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
434 switch (lpfc_cmd
->status
) {
435 case IOSTAT_FCP_RSP_ERROR
:
436 /* Call FCP RSP handler to determine result */
437 lpfc_handle_fcp_err(lpfc_cmd
);
439 case IOSTAT_NPORT_BSY
:
440 case IOSTAT_FABRIC_BSY
:
441 cmd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
444 cmd
->result
= ScsiResult(DID_ERROR
, 0);
449 || (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
450 cmd
->result
= ScsiResult(DID_BUS_BUSY
, SAM_STAT_BUSY
);
452 cmd
->result
= ScsiResult(DID_OK
, 0);
455 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
456 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
458 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
459 "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
460 "SNS x%x x%x Data: x%x x%x\n",
461 phba
->brd_no
, cmd
->device
->id
,
462 cmd
->device
->lun
, cmd
, cmd
->result
,
463 *lp
, *(lp
+ 3), cmd
->retries
, cmd
->resid
);
466 result
= cmd
->result
;
470 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
471 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
475 if (!result
&& pnode
!= NULL
&&
476 ((jiffies
- pnode
->last_ramp_up_time
) >
477 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
478 ((jiffies
- pnode
->last_q_full_time
) >
479 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
480 (phba
->cfg_lun_queue_depth
> sdev
->queue_depth
)) {
481 shost_for_each_device(tmp_sdev
, sdev
->host
) {
482 if (phba
->cfg_lun_queue_depth
> tmp_sdev
->queue_depth
) {
483 if (tmp_sdev
->id
!= sdev
->id
)
485 if (tmp_sdev
->ordered_tags
)
486 scsi_adjust_queue_depth(tmp_sdev
,
488 tmp_sdev
->queue_depth
+1);
490 scsi_adjust_queue_depth(tmp_sdev
,
492 tmp_sdev
->queue_depth
+1);
494 pnode
->last_ramp_up_time
= jiffies
;
500 * Check for queue full. If the lun is reporting queue full, then
501 * back off the lun queue depth to prevent target overloads.
503 if (result
== SAM_STAT_TASK_SET_FULL
&& pnode
!= NULL
) {
504 pnode
->last_q_full_time
= jiffies
;
506 shost_for_each_device(tmp_sdev
, sdev
->host
) {
507 if (tmp_sdev
->id
!= sdev
->id
)
509 depth
= scsi_track_queue_full(tmp_sdev
,
510 tmp_sdev
->queue_depth
- 1);
513 * The queue depth cannot be lowered any more.
514 * Modify the returned error code to store
515 * the final depth value set by
516 * scsi_track_queue_full.
519 depth
= sdev
->host
->cmd_per_lun
;
522 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
523 "%d:0711 detected queue full - lun queue depth "
524 " adjusted to %d.\n", phba
->brd_no
, depth
);
528 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
532 lpfc_scsi_prep_cmnd(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
,
533 struct lpfc_nodelist
*pnode
)
535 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
536 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
537 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
538 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
539 int datadir
= scsi_cmnd
->sc_data_direction
;
541 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
542 /* clear task management bits */
543 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
545 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
546 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
548 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
550 if (scsi_cmnd
->device
->tagged_supported
) {
551 switch (scsi_cmnd
->tag
) {
552 case HEAD_OF_QUEUE_TAG
:
553 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
555 case ORDERED_QUEUE_TAG
:
556 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
559 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
563 fcp_cmnd
->fcpCntl1
= 0;
566 * There are three possibilities here - use scatter-gather segment, use
567 * the single mapping, or neither. Start the lpfc command prep by
568 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
571 if (scsi_cmnd
->use_sg
) {
572 if (datadir
== DMA_TO_DEVICE
) {
573 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
574 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
576 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
577 phba
->fc4OutputRequests
++;
579 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
580 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
581 iocb_cmd
->un
.fcpi
.fcpi_parm
=
582 scsi_cmnd
->request_bufflen
;
583 fcp_cmnd
->fcpCntl3
= READ_DATA
;
584 phba
->fc4InputRequests
++;
586 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
587 if (datadir
== DMA_TO_DEVICE
) {
588 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
589 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
591 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
592 phba
->fc4OutputRequests
++;
594 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
595 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
596 iocb_cmd
->un
.fcpi
.fcpi_parm
=
597 scsi_cmnd
->request_bufflen
;
598 fcp_cmnd
->fcpCntl3
= READ_DATA
;
599 phba
->fc4InputRequests
++;
602 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
603 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
605 fcp_cmnd
->fcpCntl3
= 0;
606 phba
->fc4ControlRequests
++;
610 * Finish initializing those IOCB fields that are independent
611 * of the scsi_cmnd request_buffer
613 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
614 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
615 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
617 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
618 piocbq
->context1
= lpfc_cmd
;
619 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
620 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
624 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba
*phba
,
625 struct lpfc_scsi_buf
*lpfc_cmd
,
626 uint8_t task_mgmt_cmd
)
628 struct lpfc_sli
*psli
;
629 struct lpfc_iocbq
*piocbq
;
631 struct fcp_cmnd
*fcp_cmnd
;
632 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
633 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
635 if ((ndlp
== NULL
) || (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
640 piocbq
= &(lpfc_cmd
->cur_iocbq
);
641 piocb
= &piocbq
->iocb
;
643 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
644 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
645 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
646 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
648 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
650 piocb
->ulpContext
= ndlp
->nlp_rpi
;
651 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
652 piocb
->ulpFCP2Rcvy
= 1;
654 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
656 /* ulpTimeout is only one byte */
657 if (lpfc_cmd
->timeout
> 0xff) {
659 * Do not timeout the command at the firmware level.
660 * The driver will provide the timeout mechanism.
662 piocb
->ulpTimeout
= 0;
664 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
671 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf
* lpfc_cmd
, struct lpfc_hba
* phba
,
672 unsigned tgt_id
, struct lpfc_rport_data
*rdata
)
674 struct lpfc_iocbq
*iocbq
;
675 struct lpfc_iocbq
*iocbqrsp
;
678 lpfc_cmd
->rdata
= rdata
;
679 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, FCP_TARGET_RESET
);
683 lpfc_cmd
->scsi_hba
= phba
;
684 iocbq
= &lpfc_cmd
->cur_iocbq
;
685 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
690 /* Issue Target Reset to TGT <num> */
691 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
692 "%d:0702 Issue Target Reset to TGT %d "
694 phba
->brd_no
, tgt_id
, rdata
->pnode
->nlp_rpi
,
695 rdata
->pnode
->nlp_flag
);
697 ret
= lpfc_sli_issue_iocb_wait(phba
,
698 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
699 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
700 if (ret
!= IOCB_SUCCESS
) {
701 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
705 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
706 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
707 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
708 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
709 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
712 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
717 lpfc_info(struct Scsi_Host
*host
)
719 struct lpfc_hba
*phba
= (struct lpfc_hba
*) host
->hostdata
;
721 static char lpfcinfobuf
[384];
723 memset(lpfcinfobuf
,0,384);
724 if (phba
&& phba
->pcidev
){
725 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
726 len
= strlen(lpfcinfobuf
);
727 snprintf(lpfcinfobuf
+ len
,
729 " on PCI bus %02x device %02x irq %d",
730 phba
->pcidev
->bus
->number
,
733 len
= strlen(lpfcinfobuf
);
735 snprintf(lpfcinfobuf
+ len
,
744 static __inline__
void lpfc_poll_rearm_timer(struct lpfc_hba
* phba
)
746 unsigned long poll_tmo_expires
=
747 (jiffies
+ msecs_to_jiffies(phba
->cfg_poll_tmo
));
749 if (phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq_cnt
)
750 mod_timer(&phba
->fcp_poll_timer
,
754 void lpfc_poll_start_timer(struct lpfc_hba
* phba
)
756 lpfc_poll_rearm_timer(phba
);
759 void lpfc_poll_timeout(unsigned long ptr
)
761 struct lpfc_hba
*phba
= (struct lpfc_hba
*)ptr
;
764 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
766 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
767 lpfc_sli_poll_fcp_ring (phba
);
768 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
769 lpfc_poll_rearm_timer(phba
);
772 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
776 lpfc_queuecommand(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
778 struct lpfc_hba
*phba
=
779 (struct lpfc_hba
*) cmnd
->device
->host
->hostdata
;
780 struct lpfc_sli
*psli
= &phba
->sli
;
781 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
782 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
783 struct lpfc_scsi_buf
*lpfc_cmd
;
784 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
787 err
= fc_remote_port_chkready(rport
);
790 goto out_fail_command
;
794 * Catch race where our node has transitioned, but the
795 * transport is still transitioning.
798 cmnd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
799 goto out_fail_command
;
801 lpfc_cmd
= lpfc_get_scsi_buf (phba
);
802 if (lpfc_cmd
== NULL
) {
803 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
804 "%d:0707 driver's buffer pool is empty, "
805 "IO busied\n", phba
->brd_no
);
810 * Store the midlayer's command structure for the completion phase
811 * and complete the command initialization.
813 lpfc_cmd
->pCmd
= cmnd
;
814 lpfc_cmd
->rdata
= rdata
;
815 lpfc_cmd
->timeout
= 0;
816 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
817 cmnd
->scsi_done
= done
;
819 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
821 goto out_host_busy_free_buf
;
823 lpfc_scsi_prep_cmnd(phba
, lpfc_cmd
, ndlp
);
825 err
= lpfc_sli_issue_iocb(phba
, &phba
->sli
.ring
[psli
->fcp_ring
],
826 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
828 goto out_host_busy_free_buf
;
830 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
831 lpfc_sli_poll_fcp_ring(phba
);
832 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
833 lpfc_poll_rearm_timer(phba
);
838 out_host_busy_free_buf
:
839 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
841 return SCSI_MLQUEUE_HOST_BUSY
;
850 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
852 struct Scsi_Host
*shost
= cmnd
->device
->host
;
853 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
;
854 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[phba
->sli
.fcp_ring
];
855 struct lpfc_iocbq
*iocb
;
856 struct lpfc_iocbq
*abtsiocb
;
857 struct lpfc_scsi_buf
*lpfc_cmd
;
859 unsigned int loop_count
= 0;
862 lpfc_block_requests(phba
);
863 spin_lock_irq(shost
->host_lock
);
865 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
869 * If pCmd field of the corresponding lpfc_scsi_buf structure
870 * points to a different SCSI command, then the driver has
871 * already completed this command, but the midlayer did not
872 * see the completion before the eh fired. Just return
875 iocb
= &lpfc_cmd
->cur_iocbq
;
876 if (lpfc_cmd
->pCmd
!= cmnd
)
879 BUG_ON(iocb
->context1
!= lpfc_cmd
);
881 abtsiocb
= lpfc_sli_get_iocbq(phba
);
882 if (abtsiocb
== NULL
) {
888 * The scsi command can not be in txq and it is in flight because the
889 * pCmd is still pointig at the SCSI command we have to abort. There
890 * is no need to search the txcmplq. Just send an abort to the FW.
894 icmd
= &abtsiocb
->iocb
;
895 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
896 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
897 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
900 icmd
->ulpClass
= cmd
->ulpClass
;
901 if (phba
->hba_state
>= LPFC_LINK_UP
)
902 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
904 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
906 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
907 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0) == IOCB_ERROR
) {
908 lpfc_sli_release_iocbq(phba
, abtsiocb
);
913 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
914 lpfc_sli_poll_fcp_ring (phba
);
916 /* Wait for abort to complete */
917 while (lpfc_cmd
->pCmd
== cmnd
)
919 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
920 lpfc_sli_poll_fcp_ring (phba
);
922 spin_unlock_irq(phba
->host
->host_lock
);
923 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT
*HZ
);
924 spin_lock_irq(phba
->host
->host_lock
);
926 > (2 * phba
->cfg_nodev_tmo
)/LPFC_ABORT_WAIT
)
930 if (lpfc_cmd
->pCmd
== cmnd
) {
932 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
933 "%d:0748 abort handler timed out waiting for "
934 "abort to complete: ret %#x, ID %d, LUN %d, "
936 phba
->brd_no
, ret
, cmnd
->device
->id
,
937 cmnd
->device
->lun
, cmnd
->serial_number
);
941 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
942 "%d:0749 SCSI Layer I/O Abort Request "
943 "Status x%x ID %d LUN %d snum %#lx\n",
944 phba
->brd_no
, ret
, cmnd
->device
->id
,
945 cmnd
->device
->lun
, cmnd
->serial_number
);
947 spin_unlock_irq(shost
->host_lock
);
948 lpfc_unblock_requests(phba
);
954 lpfc_reset_lun_handler(struct scsi_cmnd
*cmnd
)
956 struct Scsi_Host
*shost
= cmnd
->device
->host
;
957 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
;
958 struct lpfc_scsi_buf
*lpfc_cmd
;
959 struct lpfc_iocbq
*iocbq
, *iocbqrsp
;
960 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
961 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
962 uint32_t cmd_result
= 0, cmd_status
= 0;
966 lpfc_block_requests(phba
);
967 spin_lock_irq(shost
->host_lock
);
969 * If target is not in a MAPPED state, delay the reset until
970 * target is rediscovered or nodev timeout expires.
976 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
977 spin_unlock_irq(phba
->host
->host_lock
);
978 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
979 spin_lock_irq(phba
->host
->host_lock
);
981 if ((pnode
) && (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
))
985 lpfc_cmd
= lpfc_get_scsi_buf (phba
);
986 if (lpfc_cmd
== NULL
)
989 lpfc_cmd
->pCmd
= cmnd
;
990 lpfc_cmd
->timeout
= 60;
991 lpfc_cmd
->scsi_hba
= phba
;
992 lpfc_cmd
->rdata
= rdata
;
994 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, FCP_LUN_RESET
);
996 goto out_free_scsi_buf
;
998 iocbq
= &lpfc_cmd
->cur_iocbq
;
1000 /* get a buffer for this IOCB command response */
1001 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
1002 if (iocbqrsp
== NULL
)
1003 goto out_free_scsi_buf
;
1005 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1006 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
1007 "Data: x%x x%x\n", phba
->brd_no
, cmnd
->device
->id
,
1008 cmnd
->device
->lun
, pnode
->nlp_rpi
, pnode
->nlp_flag
);
1010 ret
= lpfc_sli_issue_iocb_wait(phba
,
1011 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1012 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
1013 if (ret
== IOCB_SUCCESS
)
1017 cmd_result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
1018 cmd_status
= iocbqrsp
->iocb
.ulpStatus
;
1020 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
1021 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1024 * All outstanding txcmplq I/Os should have been aborted by the device.
1025 * Unfortunately, some targets do not abide by this forcing the driver
1028 cnt
= lpfc_sli_sum_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1029 cmnd
->device
->id
, cmnd
->device
->lun
,
1032 lpfc_sli_abort_iocb(phba
,
1033 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1034 cmnd
->device
->id
, cmnd
->device
->lun
,
1038 spin_unlock_irq(phba
->host
->host_lock
);
1039 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1040 spin_lock_irq(phba
->host
->host_lock
);
1043 > (2 * phba
->cfg_nodev_tmo
)/LPFC_RESET_WAIT
)
1046 cnt
= lpfc_sli_sum_iocb(phba
,
1047 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1048 cmnd
->device
->id
, cmnd
->device
->lun
,
1053 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1054 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
1060 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1061 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1062 "Data: x%x x%x x%x\n",
1063 phba
->brd_no
, cmnd
->device
->id
,cmnd
->device
->lun
,
1064 ret
, cmd_status
, cmd_result
);
1067 spin_unlock_irq(shost
->host_lock
);
1068 lpfc_unblock_requests(phba
);
1073 lpfc_reset_bus_handler(struct scsi_cmnd
*cmnd
)
1075 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1076 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
;
1077 struct lpfc_nodelist
*ndlp
= NULL
;
1079 int ret
= FAILED
, i
, err_count
= 0;
1081 struct lpfc_scsi_buf
* lpfc_cmd
;
1083 lpfc_block_requests(phba
);
1084 spin_lock_irq(shost
->host_lock
);
1086 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1087 if (lpfc_cmd
== NULL
)
1090 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1091 lpfc_cmd
->timeout
= 60;
1092 lpfc_cmd
->pCmd
= cmnd
;
1093 lpfc_cmd
->scsi_hba
= phba
;
1096 * Since the driver manages a single bus device, reset all
1097 * targets known to the driver. Should any target reset
1098 * fail, this routine returns failure to the midlayer.
1100 for (i
= 0; i
< MAX_FCP_TARGET
; i
++) {
1101 /* Search the mapped list for this target ID */
1103 list_for_each_entry(ndlp
, &phba
->fc_nlpmap_list
, nlp_listp
) {
1104 if ((i
== ndlp
->nlp_sid
) && ndlp
->rport
) {
1112 ret
= lpfc_scsi_tgt_reset(lpfc_cmd
, phba
,
1113 i
, ndlp
->rport
->dd_data
);
1114 if (ret
!= SUCCESS
) {
1115 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1116 "%d:0713 Bus Reset on target %d failed\n",
1125 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1128 * All outstanding txcmplq I/Os should have been aborted by
1129 * the targets. Unfortunately, some targets do not abide by
1130 * this forcing the driver to double check.
1132 cnt
= lpfc_sli_sum_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1133 0, 0, LPFC_CTX_HOST
);
1135 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1136 0, 0, 0, LPFC_CTX_HOST
);
1139 spin_unlock_irq(phba
->host
->host_lock
);
1140 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1141 spin_lock_irq(phba
->host
->host_lock
);
1144 > (2 * phba
->cfg_nodev_tmo
)/LPFC_RESET_WAIT
)
1147 cnt
= lpfc_sli_sum_iocb(phba
,
1148 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1149 0, 0, LPFC_CTX_HOST
);
1153 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1154 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
1155 phba
->brd_no
, cnt
, i
);
1159 lpfc_printf_log(phba
,
1162 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1165 spin_unlock_irq(shost
->host_lock
);
1166 lpfc_unblock_requests(phba
);
1171 lpfc_slave_alloc(struct scsi_device
*sdev
)
1173 struct lpfc_hba
*phba
= (struct lpfc_hba
*)sdev
->host
->hostdata
;
1174 struct lpfc_scsi_buf
*scsi_buf
= NULL
;
1175 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
1176 uint32_t total
= 0, i
;
1177 uint32_t num_to_alloc
= 0;
1178 unsigned long flags
;
1180 if (!rport
|| fc_remote_port_chkready(rport
))
1183 sdev
->hostdata
= rport
->dd_data
;
1186 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1187 * available list of scsi buffers. Don't allocate more than the
1188 * HBA limit conveyed to the midlayer via the host structure. The
1189 * formula accounts for the lun_queue_depth + error handlers + 1
1190 * extra. This list of scsi bufs exists for the lifetime of the driver.
1192 total
= phba
->total_scsi_bufs
;
1193 num_to_alloc
= phba
->cfg_lun_queue_depth
+ 2;
1194 if (total
>= phba
->cfg_hba_queue_depth
) {
1195 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1196 "%d:0704 At limitation of %d preallocated "
1197 "command buffers\n", phba
->brd_no
, total
);
1199 } else if (total
+ num_to_alloc
> phba
->cfg_hba_queue_depth
) {
1200 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1201 "%d:0705 Allocation request of %d command "
1202 "buffers will exceed max of %d. Reducing "
1203 "allocation request to %d.\n", phba
->brd_no
,
1204 num_to_alloc
, phba
->cfg_hba_queue_depth
,
1205 (phba
->cfg_hba_queue_depth
- total
));
1206 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
1209 for (i
= 0; i
< num_to_alloc
; i
++) {
1210 scsi_buf
= lpfc_new_scsi_buf(phba
);
1212 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1213 "%d:0706 Failed to allocate command "
1214 "buffer\n", phba
->brd_no
);
1218 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, flags
);
1219 phba
->total_scsi_bufs
++;
1220 list_add_tail(&scsi_buf
->list
, &phba
->lpfc_scsi_buf_list
);
1221 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, flags
);
1227 lpfc_slave_configure(struct scsi_device
*sdev
)
1229 struct lpfc_hba
*phba
= (struct lpfc_hba
*) sdev
->host
->hostdata
;
1230 struct fc_rport
*rport
= starget_to_rport(sdev
->sdev_target
);
1232 if (sdev
->tagged_supported
)
1233 scsi_activate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1235 scsi_deactivate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1238 * Initialize the fc transport attributes for the target
1239 * containing this scsi device. Also note that the driver's
1240 * target pointer is stored in the starget_data for the
1241 * driver's sysfs entry point functions.
1243 rport
->dev_loss_tmo
= phba
->cfg_nodev_tmo
+ 5;
1245 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
1246 lpfc_sli_poll_fcp_ring(phba
);
1247 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1248 lpfc_poll_rearm_timer(phba
);
1255 lpfc_slave_destroy(struct scsi_device
*sdev
)
1257 sdev
->hostdata
= NULL
;
1261 struct scsi_host_template lpfc_template
= {
1262 .module
= THIS_MODULE
,
1263 .name
= LPFC_DRIVER_NAME
,
1265 .queuecommand
= lpfc_queuecommand
,
1266 .eh_abort_handler
= lpfc_abort_handler
,
1267 .eh_device_reset_handler
= lpfc_reset_lun_handler
,
1268 .eh_bus_reset_handler
= lpfc_reset_bus_handler
,
1269 .slave_alloc
= lpfc_slave_alloc
,
1270 .slave_configure
= lpfc_slave_configure
,
1271 .slave_destroy
= lpfc_slave_destroy
,
1273 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1274 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1275 .use_clustering
= ENABLE_CLUSTERING
,
1276 .shost_attrs
= lpfc_host_attrs
,
1277 .max_sectors
= 0xFFFF,