1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_eh.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi_transport_fc.h>
34 #include "lpfc_version.h"
38 #include "lpfc_sli4.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_vport.h"
47 #define LPFC_RESET_WAIT 2
48 #define LPFC_ABORT_WAIT 2
52 static char *dif_op_str
[] = {
54 "SCSI_PROT_READ_INSERT",
55 "SCSI_PROT_WRITE_STRIP",
56 "SCSI_PROT_READ_STRIP",
57 "SCSI_PROT_WRITE_INSERT",
58 "SCSI_PROT_READ_PASS",
59 "SCSI_PROT_WRITE_PASS",
62 lpfc_release_scsi_buf_s4(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
);
64 lpfc_release_scsi_buf_s3(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
);
67 lpfc_debug_save_data(struct lpfc_hba
*phba
, struct scsi_cmnd
*cmnd
)
70 struct scatterlist
*sgde
= scsi_sglist(cmnd
);
72 if (!_dump_buf_data
) {
73 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
74 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
81 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
82 "9051 BLKGRD: ERROR: data scatterlist is null\n");
86 dst
= (void *) _dump_buf_data
;
89 memcpy(dst
, src
, sgde
->length
);
96 lpfc_debug_save_dif(struct lpfc_hba
*phba
, struct scsi_cmnd
*cmnd
)
99 struct scatterlist
*sgde
= scsi_prot_sglist(cmnd
);
101 if (!_dump_buf_dif
) {
102 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
103 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
109 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
110 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
117 memcpy(dst
, src
, sgde
->length
);
119 sgde
= sg_next(sgde
);
124 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
125 * @phba: Pointer to HBA object.
126 * @lpfc_cmd: lpfc scsi command object pointer.
128 * This function is called from the lpfc_prep_task_mgmt_cmd function to
129 * set the last bit in the response sge entry.
132 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba
*phba
,
133 struct lpfc_scsi_buf
*lpfc_cmd
)
135 struct sli4_sge
*sgl
= (struct sli4_sge
*)lpfc_cmd
->fcp_bpl
;
138 sgl
->word2
= le32_to_cpu(sgl
->word2
);
139 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
140 sgl
->word2
= cpu_to_le32(sgl
->word2
);
145 * lpfc_update_stats - Update statistical data for the command completion
146 * @phba: Pointer to HBA object.
147 * @lpfc_cmd: lpfc scsi command object pointer.
149 * This function is called when there is a command completion and this
150 * function updates the statistical data for the command completion.
153 lpfc_update_stats(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
155 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
156 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
157 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
159 struct Scsi_Host
*shost
= cmd
->device
->host
;
160 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
161 unsigned long latency
;
167 latency
= jiffies_to_msecs((long)jiffies
- (long)lpfc_cmd
->start_time
);
169 spin_lock_irqsave(shost
->host_lock
, flags
);
170 if (!vport
->stat_data_enabled
||
171 vport
->stat_data_blocked
||
174 (phba
->bucket_type
== LPFC_NO_BUCKET
)) {
175 spin_unlock_irqrestore(shost
->host_lock
, flags
);
179 if (phba
->bucket_type
== LPFC_LINEAR_BUCKET
) {
180 i
= (latency
+ phba
->bucket_step
- 1 - phba
->bucket_base
)/
182 /* check array subscript bounds */
185 else if (i
>= LPFC_MAX_BUCKET_COUNT
)
186 i
= LPFC_MAX_BUCKET_COUNT
- 1;
188 for (i
= 0; i
< LPFC_MAX_BUCKET_COUNT
-1; i
++)
189 if (latency
<= (phba
->bucket_base
+
190 ((1<<i
)*phba
->bucket_step
)))
194 pnode
->lat_data
[i
].cmd_count
++;
195 spin_unlock_irqrestore(shost
->host_lock
, flags
);
199 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
200 * @phba: Pointer to HBA context object.
201 * @vport: Pointer to vport object.
202 * @ndlp: Pointer to FC node associated with the target.
203 * @lun: Lun number of the scsi device.
204 * @old_val: Old value of the queue depth.
205 * @new_val: New value of the queue depth.
207 * This function sends an event to the mgmt application indicating
208 * there is a change in the scsi device queue depth.
211 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba
*phba
,
212 struct lpfc_vport
*vport
,
213 struct lpfc_nodelist
*ndlp
,
218 struct lpfc_fast_path_event
*fast_path_evt
;
221 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
225 fast_path_evt
->un
.queue_depth_evt
.scsi_event
.event_type
=
227 fast_path_evt
->un
.queue_depth_evt
.scsi_event
.subcategory
=
228 LPFC_EVENT_VARQUEDEPTH
;
230 /* Report all luns with change in queue depth */
231 fast_path_evt
->un
.queue_depth_evt
.scsi_event
.lun
= lun
;
232 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
)) {
233 memcpy(&fast_path_evt
->un
.queue_depth_evt
.scsi_event
.wwpn
,
234 &ndlp
->nlp_portname
, sizeof(struct lpfc_name
));
235 memcpy(&fast_path_evt
->un
.queue_depth_evt
.scsi_event
.wwnn
,
236 &ndlp
->nlp_nodename
, sizeof(struct lpfc_name
));
239 fast_path_evt
->un
.queue_depth_evt
.oldval
= old_val
;
240 fast_path_evt
->un
.queue_depth_evt
.newval
= new_val
;
241 fast_path_evt
->vport
= vport
;
243 fast_path_evt
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
244 spin_lock_irqsave(&phba
->hbalock
, flags
);
245 list_add_tail(&fast_path_evt
->work_evt
.evt_listp
, &phba
->work_list
);
246 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
247 lpfc_worker_wake_up(phba
);
253 * lpfc_change_queue_depth - Alter scsi device queue depth
254 * @sdev: Pointer the scsi device on which to change the queue depth.
255 * @qdepth: New queue depth to set the sdev to.
256 * @reason: The reason for the queue depth change.
258 * This function is called by the midlayer and the LLD to alter the queue
259 * depth for a scsi device. This function sets the queue depth to the new
260 * value and sends an event out to log the queue depth change.
263 lpfc_change_queue_depth(struct scsi_device
*sdev
, int qdepth
, int reason
)
265 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
266 struct lpfc_hba
*phba
= vport
->phba
;
267 struct lpfc_rport_data
*rdata
;
268 unsigned long new_queue_depth
, old_queue_depth
;
270 old_queue_depth
= sdev
->queue_depth
;
271 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
272 new_queue_depth
= sdev
->queue_depth
;
273 rdata
= sdev
->hostdata
;
275 lpfc_send_sdev_queuedepth_change_event(phba
, vport
,
276 rdata
->pnode
, sdev
->lun
,
279 return sdev
->queue_depth
;
283 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
284 * @phba: The Hba for which this call is being executed.
286 * This routine is called when there is resource error in driver or firmware.
287 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
288 * posts at most 1 event each second. This routine wakes up worker thread of
289 * @phba to process WORKER_RAM_DOWN_EVENT event.
291 * This routine should be called with no lock held.
294 lpfc_rampdown_queue_depth(struct lpfc_hba
*phba
)
299 spin_lock_irqsave(&phba
->hbalock
, flags
);
300 atomic_inc(&phba
->num_rsrc_err
);
301 phba
->last_rsrc_error_time
= jiffies
;
303 if ((phba
->last_ramp_down_time
+ QUEUE_RAMP_DOWN_INTERVAL
) > jiffies
) {
304 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
308 phba
->last_ramp_down_time
= jiffies
;
310 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
312 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
313 evt_posted
= phba
->pport
->work_port_events
& WORKER_RAMP_DOWN_QUEUE
;
315 phba
->pport
->work_port_events
|= WORKER_RAMP_DOWN_QUEUE
;
316 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
319 lpfc_worker_wake_up(phba
);
324 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
325 * @phba: The Hba for which this call is being executed.
327 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
328 * post at most 1 event every 5 minute after last_ramp_up_time or
329 * last_rsrc_error_time. This routine wakes up worker thread of @phba
330 * to process WORKER_RAM_DOWN_EVENT event.
332 * This routine should be called with no lock held.
335 lpfc_rampup_queue_depth(struct lpfc_vport
*vport
,
336 uint32_t queue_depth
)
339 struct lpfc_hba
*phba
= vport
->phba
;
341 atomic_inc(&phba
->num_cmd_success
);
343 if (vport
->cfg_lun_queue_depth
<= queue_depth
)
345 spin_lock_irqsave(&phba
->hbalock
, flags
);
346 if (time_before(jiffies
,
347 phba
->last_ramp_up_time
+ QUEUE_RAMP_UP_INTERVAL
) ||
349 phba
->last_rsrc_error_time
+ QUEUE_RAMP_UP_INTERVAL
)) {
350 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
353 phba
->last_ramp_up_time
= jiffies
;
354 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
356 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
357 evt_posted
= phba
->pport
->work_port_events
& WORKER_RAMP_UP_QUEUE
;
359 phba
->pport
->work_port_events
|= WORKER_RAMP_UP_QUEUE
;
360 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
363 lpfc_worker_wake_up(phba
);
368 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
369 * @phba: The Hba for which this call is being executed.
371 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
372 * thread.This routine reduces queue depth for all scsi device on each vport
373 * associated with @phba.
376 lpfc_ramp_down_queue_handler(struct lpfc_hba
*phba
)
378 struct lpfc_vport
**vports
;
379 struct Scsi_Host
*shost
;
380 struct scsi_device
*sdev
;
381 unsigned long new_queue_depth
;
382 unsigned long num_rsrc_err
, num_cmd_success
;
385 num_rsrc_err
= atomic_read(&phba
->num_rsrc_err
);
386 num_cmd_success
= atomic_read(&phba
->num_cmd_success
);
388 vports
= lpfc_create_vport_work_array(phba
);
390 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
391 shost
= lpfc_shost_from_vport(vports
[i
]);
392 shost_for_each_device(sdev
, shost
) {
394 sdev
->queue_depth
* num_rsrc_err
/
395 (num_rsrc_err
+ num_cmd_success
);
396 if (!new_queue_depth
)
397 new_queue_depth
= sdev
->queue_depth
- 1;
399 new_queue_depth
= sdev
->queue_depth
-
401 lpfc_change_queue_depth(sdev
, new_queue_depth
,
402 SCSI_QDEPTH_DEFAULT
);
405 lpfc_destroy_vport_work_array(phba
, vports
);
406 atomic_set(&phba
->num_rsrc_err
, 0);
407 atomic_set(&phba
->num_cmd_success
, 0);
411 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
412 * @phba: The Hba for which this call is being executed.
414 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
415 * thread.This routine increases queue depth for all scsi device on each vport
416 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
417 * num_cmd_success to zero.
420 lpfc_ramp_up_queue_handler(struct lpfc_hba
*phba
)
422 struct lpfc_vport
**vports
;
423 struct Scsi_Host
*shost
;
424 struct scsi_device
*sdev
;
427 vports
= lpfc_create_vport_work_array(phba
);
429 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
430 shost
= lpfc_shost_from_vport(vports
[i
]);
431 shost_for_each_device(sdev
, shost
) {
432 if (vports
[i
]->cfg_lun_queue_depth
<=
435 lpfc_change_queue_depth(sdev
,
437 SCSI_QDEPTH_RAMP_UP
);
440 lpfc_destroy_vport_work_array(phba
, vports
);
441 atomic_set(&phba
->num_rsrc_err
, 0);
442 atomic_set(&phba
->num_cmd_success
, 0);
446 * lpfc_scsi_dev_block - set all scsi hosts to block state
447 * @phba: Pointer to HBA context object.
449 * This function walks vport list and set each SCSI host to block state
450 * by invoking fc_remote_port_delete() routine. This function is invoked
451 * with EEH when device's PCI slot has been permanently disabled.
454 lpfc_scsi_dev_block(struct lpfc_hba
*phba
)
456 struct lpfc_vport
**vports
;
457 struct Scsi_Host
*shost
;
458 struct scsi_device
*sdev
;
459 struct fc_rport
*rport
;
462 vports
= lpfc_create_vport_work_array(phba
);
464 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
465 shost
= lpfc_shost_from_vport(vports
[i
]);
466 shost_for_each_device(sdev
, shost
) {
467 rport
= starget_to_rport(scsi_target(sdev
));
468 fc_remote_port_delete(rport
);
471 lpfc_destroy_vport_work_array(phba
, vports
);
475 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
476 * @vport: The virtual port for which this call being executed.
477 * @num_to_allocate: The requested number of buffers to allocate.
479 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
480 * the scsi buffer contains all the necessary information needed to initiate
481 * a SCSI I/O. The non-DMAable buffer region contains information to build
482 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
483 * and the initial BPL. In addition to allocating memory, the FCP CMND and
484 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
487 * int - number of scsi buffers that were allocated.
488 * 0 = failure, less than num_to_alloc is a partial failure.
491 lpfc_new_scsi_buf_s3(struct lpfc_vport
*vport
, int num_to_alloc
)
493 struct lpfc_hba
*phba
= vport
->phba
;
494 struct lpfc_scsi_buf
*psb
;
495 struct ulp_bde64
*bpl
;
497 dma_addr_t pdma_phys_fcp_cmd
;
498 dma_addr_t pdma_phys_fcp_rsp
;
499 dma_addr_t pdma_phys_bpl
;
503 for (bcnt
= 0; bcnt
< num_to_alloc
; bcnt
++) {
504 psb
= kzalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
509 * Get memory from the pci pool to map the virt space to pci
510 * bus space for an I/O. The DMA buffer includes space for the
511 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
512 * necessary to support the sg_tablesize.
514 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
,
515 GFP_KERNEL
, &psb
->dma_handle
);
521 /* Initialize virtual ptrs to dma_buf region. */
522 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
524 /* Allocate iotag for psb->cur_iocbq. */
525 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
527 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
528 psb
->data
, psb
->dma_handle
);
532 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
534 psb
->fcp_cmnd
= psb
->data
;
535 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
536 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
537 sizeof(struct fcp_rsp
);
539 /* Initialize local short-hand pointers. */
541 pdma_phys_fcp_cmd
= psb
->dma_handle
;
542 pdma_phys_fcp_rsp
= psb
->dma_handle
+ sizeof(struct fcp_cmnd
);
543 pdma_phys_bpl
= psb
->dma_handle
+ sizeof(struct fcp_cmnd
) +
544 sizeof(struct fcp_rsp
);
547 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
548 * are sg list bdes. Initialize the first two and leave the
549 * rest for queuecommand.
551 bpl
[0].addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd
));
552 bpl
[0].addrLow
= le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd
));
553 bpl
[0].tus
.f
.bdeSize
= sizeof(struct fcp_cmnd
);
554 bpl
[0].tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
555 bpl
[0].tus
.w
= le32_to_cpu(bpl
[0].tus
.w
);
557 /* Setup the physical region for the FCP RSP */
558 bpl
[1].addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp
));
559 bpl
[1].addrLow
= le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp
));
560 bpl
[1].tus
.f
.bdeSize
= sizeof(struct fcp_rsp
);
561 bpl
[1].tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
562 bpl
[1].tus
.w
= le32_to_cpu(bpl
[1].tus
.w
);
565 * Since the IOCB for the FCP I/O is built into this
566 * lpfc_scsi_buf, initialize it with all known data now.
568 iocb
= &psb
->cur_iocbq
.iocb
;
569 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
570 if ((phba
->sli_rev
== 3) &&
571 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
)) {
572 /* fill in immediate fcp command BDE */
573 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDE_IMMED
;
574 iocb
->un
.fcpi64
.bdl
.bdeSize
= sizeof(struct fcp_cmnd
);
575 iocb
->un
.fcpi64
.bdl
.addrLow
= offsetof(IOCB_t
,
577 iocb
->un
.fcpi64
.bdl
.addrHigh
= 0;
578 iocb
->ulpBdeCount
= 0;
580 /* fill in responce BDE */
581 iocb
->unsli3
.fcp_ext
.rbde
.tus
.f
.bdeFlags
=
583 iocb
->unsli3
.fcp_ext
.rbde
.tus
.f
.bdeSize
=
584 sizeof(struct fcp_rsp
);
585 iocb
->unsli3
.fcp_ext
.rbde
.addrLow
=
586 putPaddrLow(pdma_phys_fcp_rsp
);
587 iocb
->unsli3
.fcp_ext
.rbde
.addrHigh
=
588 putPaddrHigh(pdma_phys_fcp_rsp
);
590 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
591 iocb
->un
.fcpi64
.bdl
.bdeSize
=
592 (2 * sizeof(struct ulp_bde64
));
593 iocb
->un
.fcpi64
.bdl
.addrLow
=
594 putPaddrLow(pdma_phys_bpl
);
595 iocb
->un
.fcpi64
.bdl
.addrHigh
=
596 putPaddrHigh(pdma_phys_bpl
);
597 iocb
->ulpBdeCount
= 1;
600 iocb
->ulpClass
= CLASS3
;
601 psb
->status
= IOSTAT_SUCCESS
;
602 /* Put it back into the SCSI buffer list */
603 psb
->cur_iocbq
.context1
= psb
;
604 lpfc_release_scsi_buf_s3(phba
, psb
);
612 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
613 * @phba: pointer to lpfc hba data structure.
614 * @axri: pointer to the fcp xri abort wcqe structure.
616 * This routine is invoked by the worker thread to process a SLI4 fast-path
620 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba
*phba
,
621 struct sli4_wcqe_xri_aborted
*axri
)
623 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
624 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
625 struct lpfc_scsi_buf
*psb
, *next_psb
;
626 unsigned long iflag
= 0;
627 struct lpfc_iocbq
*iocbq
;
629 struct lpfc_nodelist
*ndlp
;
631 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
633 spin_lock_irqsave(&phba
->hbalock
, iflag
);
634 spin_lock(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
635 list_for_each_entry_safe(psb
, next_psb
,
636 &phba
->sli4_hba
.lpfc_abts_scsi_buf_list
, list
) {
637 if (psb
->cur_iocbq
.sli4_xritag
== xri
) {
638 list_del(&psb
->list
);
640 psb
->status
= IOSTAT_SUCCESS
;
642 &phba
->sli4_hba
.abts_scsi_buf_list_lock
);
643 ndlp
= psb
->rdata
->pnode
;
644 rrq_empty
= list_empty(&phba
->active_rrq_list
);
645 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
647 lpfc_set_rrq_active(phba
, ndlp
, xri
, rxid
, 1);
648 lpfc_release_scsi_buf_s4(phba
, psb
);
650 lpfc_worker_wake_up(phba
);
654 spin_unlock(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
655 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
656 iocbq
= phba
->sli
.iocbq_lookup
[i
];
658 if (!(iocbq
->iocb_flag
& LPFC_IO_FCP
) ||
659 (iocbq
->iocb_flag
& LPFC_IO_LIBDFC
))
661 if (iocbq
->sli4_xritag
!= xri
)
663 psb
= container_of(iocbq
, struct lpfc_scsi_buf
, cur_iocbq
);
665 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
667 lpfc_worker_wake_up(phba
);
671 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
675 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
676 * @phba: pointer to lpfc hba data structure.
678 * This routine walks the list of scsi buffers that have been allocated and
679 * repost them to the HBA by using SGL block post. This is needed after a
680 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
681 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
682 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
684 * Returns: 0 = success, non-zero failure.
687 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba
*phba
)
689 struct lpfc_scsi_buf
*psb
;
690 int index
, status
, bcnt
= 0, rcnt
= 0, rc
= 0;
693 for (index
= 0; index
< phba
->sli4_hba
.scsi_xri_cnt
; index
++) {
694 psb
= phba
->sli4_hba
.lpfc_scsi_psb_array
[index
];
696 /* Remove from SCSI buffer list */
697 list_del(&psb
->list
);
698 /* Add it to a local SCSI buffer list */
699 list_add_tail(&psb
->list
, &sblist
);
700 if (++rcnt
== LPFC_NEMBED_MBOX_SGL_CNT
) {
705 /* A hole present in the XRI array, need to skip */
708 if (index
== phba
->sli4_hba
.scsi_xri_cnt
- 1)
709 /* End of XRI array for SCSI buffer, complete */
712 /* Continue until collect up to a nembed page worth of sgls */
715 /* Now, post the SCSI buffer list sgls as a block */
716 status
= lpfc_sli4_post_scsi_sgl_block(phba
, &sblist
, bcnt
);
717 /* Reset SCSI buffer count for next round of posting */
719 while (!list_empty(&sblist
)) {
720 list_remove_head(&sblist
, psb
, struct lpfc_scsi_buf
,
723 /* Put this back on the abort scsi list */
728 psb
->status
= IOSTAT_SUCCESS
;
730 /* Put it back into the SCSI buffer list */
731 lpfc_release_scsi_buf_s4(phba
, psb
);
738 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
739 * @vport: The virtual port for which this call being executed.
740 * @num_to_allocate: The requested number of buffers to allocate.
742 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
743 * the scsi buffer contains all the necessary information needed to initiate
747 * int - number of scsi buffers that were allocated.
748 * 0 = failure, less than num_to_alloc is a partial failure.
751 lpfc_new_scsi_buf_s4(struct lpfc_vport
*vport
, int num_to_alloc
)
753 struct lpfc_hba
*phba
= vport
->phba
;
754 struct lpfc_scsi_buf
*psb
;
755 struct sli4_sge
*sgl
;
757 dma_addr_t pdma_phys_fcp_cmd
;
758 dma_addr_t pdma_phys_fcp_rsp
;
759 dma_addr_t pdma_phys_bpl
, pdma_phys_bpl1
;
760 uint16_t iotag
, last_xritag
= NO_XRI
;
761 int status
= 0, index
;
763 int non_sequential_xri
= 0;
766 for (bcnt
= 0; bcnt
< num_to_alloc
; bcnt
++) {
767 psb
= kzalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
772 * Get memory from the pci pool to map the virt space to pci bus
773 * space for an I/O. The DMA buffer includes space for the
774 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
775 * necessary to support the sg_tablesize.
777 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
,
778 GFP_KERNEL
, &psb
->dma_handle
);
784 /* Initialize virtual ptrs to dma_buf region. */
785 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
787 /* Allocate iotag for psb->cur_iocbq. */
788 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
790 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
791 psb
->data
, psb
->dma_handle
);
796 psb
->cur_iocbq
.sli4_xritag
= lpfc_sli4_next_xritag(phba
);
797 if (psb
->cur_iocbq
.sli4_xritag
== NO_XRI
) {
798 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
799 psb
->data
, psb
->dma_handle
);
803 if (last_xritag
!= NO_XRI
804 && psb
->cur_iocbq
.sli4_xritag
!= (last_xritag
+1)) {
805 non_sequential_xri
= 1;
807 list_add_tail(&psb
->list
, &sblist
);
808 last_xritag
= psb
->cur_iocbq
.sli4_xritag
;
810 index
= phba
->sli4_hba
.scsi_xri_cnt
++;
811 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
813 psb
->fcp_bpl
= psb
->data
;
814 psb
->fcp_cmnd
= (psb
->data
+ phba
->cfg_sg_dma_buf_size
)
815 - (sizeof(struct fcp_cmnd
) + sizeof(struct fcp_rsp
));
816 psb
->fcp_rsp
= (struct fcp_rsp
*)((uint8_t *)psb
->fcp_cmnd
+
817 sizeof(struct fcp_cmnd
));
819 /* Initialize local short-hand pointers. */
820 sgl
= (struct sli4_sge
*)psb
->fcp_bpl
;
821 pdma_phys_bpl
= psb
->dma_handle
;
823 (psb
->dma_handle
+ phba
->cfg_sg_dma_buf_size
)
824 - (sizeof(struct fcp_cmnd
) + sizeof(struct fcp_rsp
));
825 pdma_phys_fcp_rsp
= pdma_phys_fcp_cmd
+ sizeof(struct fcp_cmnd
);
828 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
829 * are sg list bdes. Initialize the first two and leave the
830 * rest for queuecommand.
832 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd
));
833 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd
));
834 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
835 sgl
->word2
= cpu_to_le32(sgl
->word2
);
836 sgl
->sge_len
= cpu_to_le32(sizeof(struct fcp_cmnd
));
839 /* Setup the physical region for the FCP RSP */
840 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp
));
841 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp
));
842 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
843 sgl
->word2
= cpu_to_le32(sgl
->word2
);
844 sgl
->sge_len
= cpu_to_le32(sizeof(struct fcp_rsp
));
847 * Since the IOCB for the FCP I/O is built into this
848 * lpfc_scsi_buf, initialize it with all known data now.
850 iocb
= &psb
->cur_iocbq
.iocb
;
851 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
852 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDE_64
;
853 /* setting the BLP size to 2 * sizeof BDE may not be correct.
854 * We are setting the bpl to point to out sgl. An sgl's
855 * entries are 16 bytes, a bpl entries are 12 bytes.
857 iocb
->un
.fcpi64
.bdl
.bdeSize
= sizeof(struct fcp_cmnd
);
858 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys_fcp_cmd
);
859 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys_fcp_cmd
);
860 iocb
->ulpBdeCount
= 1;
862 iocb
->ulpClass
= CLASS3
;
863 psb
->cur_iocbq
.context1
= psb
;
864 if (phba
->cfg_sg_dma_buf_size
> SGL_PAGE_SIZE
)
865 pdma_phys_bpl1
= pdma_phys_bpl
+ SGL_PAGE_SIZE
;
868 psb
->dma_phys_bpl
= pdma_phys_bpl
;
869 phba
->sli4_hba
.lpfc_scsi_psb_array
[index
] = psb
;
870 if (non_sequential_xri
) {
871 status
= lpfc_sli4_post_sgl(phba
, pdma_phys_bpl
,
873 psb
->cur_iocbq
.sli4_xritag
);
875 /* Put this back on the abort scsi list */
879 psb
->status
= IOSTAT_SUCCESS
;
881 /* Put it back into the SCSI buffer list */
882 lpfc_release_scsi_buf_s4(phba
, psb
);
887 status
= lpfc_sli4_post_scsi_sgl_block(phba
, &sblist
, bcnt
);
888 /* Reset SCSI buffer count for next round of posting */
889 while (!list_empty(&sblist
)) {
890 list_remove_head(&sblist
, psb
, struct lpfc_scsi_buf
,
893 /* Put this back on the abort scsi list */
897 psb
->status
= IOSTAT_SUCCESS
;
899 /* Put it back into the SCSI buffer list */
900 lpfc_release_scsi_buf_s4(phba
, psb
);
904 return bcnt
+ non_sequential_xri
;
908 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
909 * @vport: The virtual port for which this call being executed.
910 * @num_to_allocate: The requested number of buffers to allocate.
912 * This routine wraps the actual SCSI buffer allocator function pointer from
913 * the lpfc_hba struct.
916 * int - number of scsi buffers that were allocated.
917 * 0 = failure, less than num_to_alloc is a partial failure.
920 lpfc_new_scsi_buf(struct lpfc_vport
*vport
, int num_to_alloc
)
922 return vport
->phba
->lpfc_new_scsi_buf(vport
, num_to_alloc
);
926 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
927 * @phba: The HBA for which this call is being executed.
929 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
930 * and returns to caller.
934 * Pointer to lpfc_scsi_buf - Success
936 static struct lpfc_scsi_buf
*
937 lpfc_get_scsi_buf_s3(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
939 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
940 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
941 unsigned long iflag
= 0;
943 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
944 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
946 lpfc_cmd
->seg_cnt
= 0;
947 lpfc_cmd
->nonsg_phys
= 0;
948 lpfc_cmd
->prot_seg_cnt
= 0;
950 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
954 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
955 * @phba: The HBA for which this call is being executed.
957 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
958 * and returns to caller.
962 * Pointer to lpfc_scsi_buf - Success
964 static struct lpfc_scsi_buf
*
965 lpfc_get_scsi_buf_s4(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
967 struct lpfc_scsi_buf
*lpfc_cmd
= NULL
;
968 struct lpfc_scsi_buf
*start_lpfc_cmd
= NULL
;
969 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
970 unsigned long iflag
= 0;
973 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
974 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
975 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
976 while (!found
&& lpfc_cmd
) {
977 if (lpfc_test_rrq_active(phba
, ndlp
,
978 lpfc_cmd
->cur_iocbq
.sli4_xritag
)) {
979 lpfc_release_scsi_buf_s4(phba
, lpfc_cmd
);
980 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
981 list_remove_head(scsi_buf_list
, lpfc_cmd
,
982 struct lpfc_scsi_buf
, list
);
983 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
,
985 if (lpfc_cmd
== start_lpfc_cmd
) {
992 lpfc_cmd
->seg_cnt
= 0;
993 lpfc_cmd
->nonsg_phys
= 0;
994 lpfc_cmd
->prot_seg_cnt
= 0;
999 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1000 * @phba: The HBA for which this call is being executed.
1002 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1003 * and returns to caller.
1007 * Pointer to lpfc_scsi_buf - Success
1009 static struct lpfc_scsi_buf
*
1010 lpfc_get_scsi_buf(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
1012 return phba
->lpfc_get_scsi_buf(phba
, ndlp
);
1016 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1017 * @phba: The Hba for which this call is being executed.
1018 * @psb: The scsi buffer which is being released.
1020 * This routine releases @psb scsi buffer by adding it to tail of @phba
1021 * lpfc_scsi_buf_list list.
1024 lpfc_release_scsi_buf_s3(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
1026 unsigned long iflag
= 0;
1028 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
1030 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
1031 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
1035 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1036 * @phba: The Hba for which this call is being executed.
1037 * @psb: The scsi buffer which is being released.
1039 * This routine releases @psb scsi buffer by adding it to tail of @phba
1040 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1041 * and cannot be reused for at least RA_TOV amount of time if it was
1045 lpfc_release_scsi_buf_s4(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
1047 unsigned long iflag
= 0;
1049 if (psb
->exch_busy
) {
1050 spin_lock_irqsave(&phba
->sli4_hba
.abts_scsi_buf_list_lock
,
1053 list_add_tail(&psb
->list
,
1054 &phba
->sli4_hba
.lpfc_abts_scsi_buf_list
);
1055 spin_unlock_irqrestore(&phba
->sli4_hba
.abts_scsi_buf_list_lock
,
1059 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
1061 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
1062 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
1067 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1068 * @phba: The Hba for which this call is being executed.
1069 * @psb: The scsi buffer which is being released.
1071 * This routine releases @psb scsi buffer by adding it to tail of @phba
1072 * lpfc_scsi_buf_list list.
1075 lpfc_release_scsi_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
1078 phba
->lpfc_release_scsi_buf(phba
, psb
);
1082 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1083 * @phba: The Hba for which this call is being executed.
1084 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1086 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1087 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1088 * through sg elements and format the bdea. This routine also initializes all
1089 * IOCB fields which are dependent on scsi command request buffer.
1096 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
1098 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
1099 struct scatterlist
*sgel
= NULL
;
1100 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
1101 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
1102 struct lpfc_iocbq
*iocbq
= &lpfc_cmd
->cur_iocbq
;
1103 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
1104 struct ulp_bde64
*data_bde
= iocb_cmd
->unsli3
.fcp_ext
.dbde
;
1105 dma_addr_t physaddr
;
1106 uint32_t num_bde
= 0;
1107 int nseg
, datadir
= scsi_cmnd
->sc_data_direction
;
1110 * There are three possibilities here - use scatter-gather segment, use
1111 * the single mapping, or neither. Start the lpfc command prep by
1112 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1116 if (scsi_sg_count(scsi_cmnd
)) {
1118 * The driver stores the segment count returned from pci_map_sg
1119 * because this a count of dma-mappings used to map the use_sg
1120 * pages. They are not guaranteed to be the same for those
1121 * architectures that implement an IOMMU.
1124 nseg
= dma_map_sg(&phba
->pcidev
->dev
, scsi_sglist(scsi_cmnd
),
1125 scsi_sg_count(scsi_cmnd
), datadir
);
1126 if (unlikely(!nseg
))
1129 lpfc_cmd
->seg_cnt
= nseg
;
1130 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
1131 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1132 "9064 BLKGRD: %s: Too many sg segments from "
1133 "dma_map_sg. Config %d, seg_cnt %d\n",
1134 __func__
, phba
->cfg_sg_seg_cnt
,
1136 scsi_dma_unmap(scsi_cmnd
);
1141 * The driver established a maximum scatter-gather segment count
1142 * during probe that limits the number of sg elements in any
1143 * single scsi command. Just run through the seg_cnt and format
1145 * When using SLI-3 the driver will try to fit all the BDEs into
1146 * the IOCB. If it can't then the BDEs get added to a BPL as it
1147 * does for SLI-2 mode.
1149 scsi_for_each_sg(scsi_cmnd
, sgel
, nseg
, num_bde
) {
1150 physaddr
= sg_dma_address(sgel
);
1151 if (phba
->sli_rev
== 3 &&
1152 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) &&
1153 !(iocbq
->iocb_flag
& DSS_SECURITY_OP
) &&
1154 nseg
<= LPFC_EXT_DATA_BDE_COUNT
) {
1155 data_bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1156 data_bde
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
1157 data_bde
->addrLow
= putPaddrLow(physaddr
);
1158 data_bde
->addrHigh
= putPaddrHigh(physaddr
);
1161 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1162 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
1163 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1165 le32_to_cpu(putPaddrLow(physaddr
));
1167 le32_to_cpu(putPaddrHigh(physaddr
));
1174 * Finish initializing those IOCB fields that are dependent on the
1175 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1176 * explicitly reinitialized and for SLI-3 the extended bde count is
1177 * explicitly reinitialized since all iocb memory resources are reused.
1179 if (phba
->sli_rev
== 3 &&
1180 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) &&
1181 !(iocbq
->iocb_flag
& DSS_SECURITY_OP
)) {
1182 if (num_bde
> LPFC_EXT_DATA_BDE_COUNT
) {
1184 * The extended IOCB format can only fit 3 BDE or a BPL.
1185 * This I/O has more than 3 BDE so the 1st data bde will
1186 * be a BPL that is filled in here.
1188 physaddr
= lpfc_cmd
->dma_handle
;
1189 data_bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BLP_64
;
1190 data_bde
->tus
.f
.bdeSize
= (num_bde
*
1191 sizeof(struct ulp_bde64
));
1192 physaddr
+= (sizeof(struct fcp_cmnd
) +
1193 sizeof(struct fcp_rsp
) +
1194 (2 * sizeof(struct ulp_bde64
)));
1195 data_bde
->addrHigh
= putPaddrHigh(physaddr
);
1196 data_bde
->addrLow
= putPaddrLow(physaddr
);
1197 /* ebde count includes the responce bde and data bpl */
1198 iocb_cmd
->unsli3
.fcp_ext
.ebde_count
= 2;
1200 /* ebde count includes the responce bde and data bdes */
1201 iocb_cmd
->unsli3
.fcp_ext
.ebde_count
= (num_bde
+ 1);
1204 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
=
1205 ((num_bde
+ 2) * sizeof(struct ulp_bde64
));
1206 iocb_cmd
->unsli3
.fcp_ext
.ebde_count
= (num_bde
+ 1);
1208 fcp_cmnd
->fcpDl
= cpu_to_be32(scsi_bufflen(scsi_cmnd
));
1211 * Due to difference in data length between DIF/non-DIF paths,
1212 * we need to set word 4 of IOCB here
1214 iocb_cmd
->un
.fcpi
.fcpi_parm
= scsi_bufflen(scsi_cmnd
);
1219 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
1220 * @sc: The SCSI command to examine
1221 * @txopt: (out) BlockGuard operation for transmitted data
1222 * @rxopt: (out) BlockGuard operation for received data
1224 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1228 lpfc_sc_to_bg_opcodes(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1229 uint8_t *txop
, uint8_t *rxop
)
1231 uint8_t guard_type
= scsi_host_get_guard(sc
->device
->host
);
1234 if (guard_type
== SHOST_DIX_GUARD_IP
) {
1235 switch (scsi_get_prot_op(sc
)) {
1236 case SCSI_PROT_READ_INSERT
:
1237 case SCSI_PROT_WRITE_STRIP
:
1238 *txop
= BG_OP_IN_CSUM_OUT_NODIF
;
1239 *rxop
= BG_OP_IN_NODIF_OUT_CSUM
;
1242 case SCSI_PROT_READ_STRIP
:
1243 case SCSI_PROT_WRITE_INSERT
:
1244 *txop
= BG_OP_IN_NODIF_OUT_CRC
;
1245 *rxop
= BG_OP_IN_CRC_OUT_NODIF
;
1248 case SCSI_PROT_READ_PASS
:
1249 case SCSI_PROT_WRITE_PASS
:
1250 *txop
= BG_OP_IN_CSUM_OUT_CRC
;
1251 *rxop
= BG_OP_IN_CRC_OUT_CSUM
;
1254 case SCSI_PROT_NORMAL
:
1256 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1257 "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
1258 scsi_get_prot_op(sc
), guard_type
);
1263 } else if (guard_type
== SHOST_DIX_GUARD_CRC
) {
1264 switch (scsi_get_prot_op(sc
)) {
1265 case SCSI_PROT_READ_STRIP
:
1266 case SCSI_PROT_WRITE_INSERT
:
1267 *txop
= BG_OP_IN_NODIF_OUT_CRC
;
1268 *rxop
= BG_OP_IN_CRC_OUT_NODIF
;
1271 case SCSI_PROT_READ_PASS
:
1272 case SCSI_PROT_WRITE_PASS
:
1273 *txop
= BG_OP_IN_CRC_OUT_CRC
;
1274 *rxop
= BG_OP_IN_CRC_OUT_CRC
;
1277 case SCSI_PROT_READ_INSERT
:
1278 case SCSI_PROT_WRITE_STRIP
:
1279 case SCSI_PROT_NORMAL
:
1281 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1282 "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
1283 scsi_get_prot_op(sc
), guard_type
);
1288 /* unsupported format */
1295 struct scsi_dif_tuple
{
1296 __be16 guard_tag
; /* Checksum */
1297 __be16 app_tag
; /* Opaque storage */
1298 __be32 ref_tag
; /* Target LBA or indirect LBA */
1301 static inline unsigned
1302 lpfc_cmd_blksize(struct scsi_cmnd
*sc
)
1304 return sc
->device
->sector_size
;
1308 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
1309 * @sc: in: SCSI command
1310 * @apptagmask: out: app tag mask
1311 * @apptagval: out: app tag value
1312 * @reftag: out: ref tag (reference tag)
1315 * Extract DIF parameters from the command if possible. Otherwise,
1316 * use default parameters.
1320 lpfc_get_cmd_dif_parms(struct scsi_cmnd
*sc
, uint16_t *apptagmask
,
1321 uint16_t *apptagval
, uint32_t *reftag
)
1323 struct scsi_dif_tuple
*spt
;
1324 unsigned char op
= scsi_get_prot_op(sc
);
1325 unsigned int protcnt
= scsi_prot_sg_count(sc
);
1328 if (protcnt
&& (op
== SCSI_PROT_WRITE_STRIP
||
1329 op
== SCSI_PROT_WRITE_PASS
)) {
1332 spt
= page_address(sg_page(scsi_prot_sglist(sc
))) +
1333 scsi_prot_sglist(sc
)[0].offset
;
1336 *reftag
= cpu_to_be32(spt
->ref_tag
);
1339 /* SBC defines ref tag to be lower 32bits of LBA */
1340 *reftag
= (uint32_t) (0xffffffff & scsi_get_lba(sc
));
1347 * This function sets up buffer list for protection groups of
1348 * type LPFC_PG_TYPE_NO_DIF
1350 * This is usually used when the HBA is instructed to generate
1351 * DIFs and insert them into data stream (or strip DIF from
1352 * incoming data stream)
1354 * The buffer list consists of just one protection group described
1356 * +-------------------------+
1357 * start of prot group --> | PDE_5 |
1358 * +-------------------------+
1360 * +-------------------------+
1362 * +-------------------------+
1363 * |more Data BDE's ... (opt)|
1364 * +-------------------------+
1366 * @sc: pointer to scsi command we're working on
1367 * @bpl: pointer to buffer list for protection groups
1368 * @datacnt: number of segments of data that have been dma mapped
1370 * Note: Data s/g buffers have been dma mapped
1373 lpfc_bg_setup_bpl(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1374 struct ulp_bde64
*bpl
, int datasegcnt
)
1376 struct scatterlist
*sgde
= NULL
; /* s/g data entry */
1377 struct lpfc_pde5
*pde5
= NULL
;
1378 struct lpfc_pde6
*pde6
= NULL
;
1379 dma_addr_t physaddr
;
1380 int i
= 0, num_bde
= 0, status
;
1381 int datadir
= sc
->sc_data_direction
;
1384 uint16_t apptagmask
, apptagval
;
1387 status
= lpfc_sc_to_bg_opcodes(phba
, sc
, &txop
, &rxop
);
1391 /* extract some info from the scsi command for pde*/
1392 blksize
= lpfc_cmd_blksize(sc
);
1393 lpfc_get_cmd_dif_parms(sc
, &apptagmask
, &apptagval
, &reftag
);
1395 /* setup PDE5 with what we have */
1396 pde5
= (struct lpfc_pde5
*) bpl
;
1397 memset(pde5
, 0, sizeof(struct lpfc_pde5
));
1398 bf_set(pde5_type
, pde5
, LPFC_PDE5_DESCRIPTOR
);
1399 pde5
->reftag
= reftag
;
1401 /* Endianness conversion if necessary for PDE5 */
1402 pde5
->word0
= cpu_to_le32(pde5
->word0
);
1403 pde5
->reftag
= cpu_to_le32(pde5
->reftag
);
1405 /* advance bpl and increment bde count */
1408 pde6
= (struct lpfc_pde6
*) bpl
;
1410 /* setup PDE6 with the rest of the info */
1411 memset(pde6
, 0, sizeof(struct lpfc_pde6
));
1412 bf_set(pde6_type
, pde6
, LPFC_PDE6_DESCRIPTOR
);
1413 bf_set(pde6_optx
, pde6
, txop
);
1414 bf_set(pde6_oprx
, pde6
, rxop
);
1415 if (datadir
== DMA_FROM_DEVICE
) {
1416 bf_set(pde6_ce
, pde6
, 1);
1417 bf_set(pde6_re
, pde6
, 1);
1418 bf_set(pde6_ae
, pde6
, 1);
1420 bf_set(pde6_ai
, pde6
, 1);
1421 bf_set(pde6_apptagval
, pde6
, apptagval
);
1423 /* Endianness conversion if necessary for PDE6 */
1424 pde6
->word0
= cpu_to_le32(pde6
->word0
);
1425 pde6
->word1
= cpu_to_le32(pde6
->word1
);
1426 pde6
->word2
= cpu_to_le32(pde6
->word2
);
1428 /* advance bpl and increment bde count */
1432 /* assumption: caller has already run dma_map_sg on command data */
1433 scsi_for_each_sg(sc
, sgde
, datasegcnt
, i
) {
1434 physaddr
= sg_dma_address(sgde
);
1435 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
1436 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
1437 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgde
);
1438 if (datadir
== DMA_TO_DEVICE
)
1439 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1441 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
1442 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1452 * This function sets up buffer list for protection groups of
1453 * type LPFC_PG_TYPE_DIF_BUF
1455 * This is usually used when DIFs are in their own buffers,
1456 * separate from the data. The HBA can then by instructed
1457 * to place the DIFs in the outgoing stream. For read operations,
1458 * The HBA could extract the DIFs and place it in DIF buffers.
1460 * The buffer list for this type consists of one or more of the
1461 * protection groups described below:
1462 * +-------------------------+
1463 * start of first prot group --> | PDE_5 |
1464 * +-------------------------+
1466 * +-------------------------+
1467 * | PDE_7 (Prot BDE) |
1468 * +-------------------------+
1470 * +-------------------------+
1471 * |more Data BDE's ... (opt)|
1472 * +-------------------------+
1473 * start of new prot group --> | PDE_5 |
1474 * +-------------------------+
1476 * +-------------------------+
1478 * @sc: pointer to scsi command we're working on
1479 * @bpl: pointer to buffer list for protection groups
1480 * @datacnt: number of segments of data that have been dma mapped
1481 * @protcnt: number of segment of protection data that have been dma mapped
1483 * Note: It is assumed that both data and protection s/g buffers have been
1487 lpfc_bg_setup_bpl_prot(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1488 struct ulp_bde64
*bpl
, int datacnt
, int protcnt
)
1490 struct scatterlist
*sgde
= NULL
; /* s/g data entry */
1491 struct scatterlist
*sgpe
= NULL
; /* s/g prot entry */
1492 struct lpfc_pde5
*pde5
= NULL
;
1493 struct lpfc_pde6
*pde6
= NULL
;
1494 struct ulp_bde64
*prot_bde
= NULL
;
1495 dma_addr_t dataphysaddr
, protphysaddr
;
1496 unsigned short curr_data
= 0, curr_prot
= 0;
1497 unsigned int split_offset
, protgroup_len
;
1498 unsigned int protgrp_blks
, protgrp_bytes
;
1499 unsigned int remainder
, subtotal
;
1501 int datadir
= sc
->sc_data_direction
;
1502 unsigned char pgdone
= 0, alldone
= 0;
1505 uint16_t apptagmask
, apptagval
;
1509 sgpe
= scsi_prot_sglist(sc
);
1510 sgde
= scsi_sglist(sc
);
1512 if (!sgpe
|| !sgde
) {
1513 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1514 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
1519 status
= lpfc_sc_to_bg_opcodes(phba
, sc
, &txop
, &rxop
);
1523 /* extract some info from the scsi command */
1524 blksize
= lpfc_cmd_blksize(sc
);
1525 lpfc_get_cmd_dif_parms(sc
, &apptagmask
, &apptagval
, &reftag
);
1529 /* setup PDE5 with what we have */
1530 pde5
= (struct lpfc_pde5
*) bpl
;
1531 memset(pde5
, 0, sizeof(struct lpfc_pde5
));
1532 bf_set(pde5_type
, pde5
, LPFC_PDE5_DESCRIPTOR
);
1533 pde5
->reftag
= reftag
;
1535 /* Endianness conversion if necessary for PDE5 */
1536 pde5
->word0
= cpu_to_le32(pde5
->word0
);
1537 pde5
->reftag
= cpu_to_le32(pde5
->reftag
);
1539 /* advance bpl and increment bde count */
1542 pde6
= (struct lpfc_pde6
*) bpl
;
1544 /* setup PDE6 with the rest of the info */
1545 memset(pde6
, 0, sizeof(struct lpfc_pde6
));
1546 bf_set(pde6_type
, pde6
, LPFC_PDE6_DESCRIPTOR
);
1547 bf_set(pde6_optx
, pde6
, txop
);
1548 bf_set(pde6_oprx
, pde6
, rxop
);
1549 bf_set(pde6_ce
, pde6
, 1);
1550 bf_set(pde6_re
, pde6
, 1);
1551 bf_set(pde6_ae
, pde6
, 1);
1552 bf_set(pde6_ai
, pde6
, 1);
1553 bf_set(pde6_apptagval
, pde6
, apptagval
);
1555 /* Endianness conversion if necessary for PDE6 */
1556 pde6
->word0
= cpu_to_le32(pde6
->word0
);
1557 pde6
->word1
= cpu_to_le32(pde6
->word1
);
1558 pde6
->word2
= cpu_to_le32(pde6
->word2
);
1560 /* advance bpl and increment bde count */
1564 /* setup the first BDE that points to protection buffer */
1565 prot_bde
= (struct ulp_bde64
*) bpl
;
1566 protphysaddr
= sg_dma_address(sgpe
);
1567 prot_bde
->addrHigh
= le32_to_cpu(putPaddrLow(protphysaddr
));
1568 prot_bde
->addrLow
= le32_to_cpu(putPaddrHigh(protphysaddr
));
1569 protgroup_len
= sg_dma_len(sgpe
);
1571 /* must be integer multiple of the DIF block length */
1572 BUG_ON(protgroup_len
% 8);
1574 protgrp_blks
= protgroup_len
/ 8;
1575 protgrp_bytes
= protgrp_blks
* blksize
;
1577 prot_bde
->tus
.f
.bdeSize
= protgroup_len
;
1578 prot_bde
->tus
.f
.bdeFlags
= LPFC_PDE7_DESCRIPTOR
;
1579 prot_bde
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1584 /* setup BDE's for data blocks associated with DIF data */
1586 subtotal
= 0; /* total bytes processed for current prot grp */
1589 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1590 "9065 BLKGRD:%s Invalid data segment\n",
1595 dataphysaddr
= sg_dma_address(sgde
) + split_offset
;
1596 bpl
->addrLow
= le32_to_cpu(putPaddrLow(dataphysaddr
));
1597 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(dataphysaddr
));
1599 remainder
= sg_dma_len(sgde
) - split_offset
;
1601 if ((subtotal
+ remainder
) <= protgrp_bytes
) {
1602 /* we can use this whole buffer */
1603 bpl
->tus
.f
.bdeSize
= remainder
;
1606 if ((subtotal
+ remainder
) == protgrp_bytes
)
1609 /* must split this buffer with next prot grp */
1610 bpl
->tus
.f
.bdeSize
= protgrp_bytes
- subtotal
;
1611 split_offset
+= bpl
->tus
.f
.bdeSize
;
1614 subtotal
+= bpl
->tus
.f
.bdeSize
;
1616 if (datadir
== DMA_TO_DEVICE
)
1617 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1619 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
1620 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1628 /* Move to the next s/g segment if possible */
1629 sgde
= sg_next(sgde
);
1634 if (curr_prot
== protcnt
) {
1636 } else if (curr_prot
< protcnt
) {
1637 /* advance to next prot buffer */
1638 sgpe
= sg_next(sgpe
);
1641 /* update the reference tag */
1642 reftag
+= protgrp_blks
;
1644 /* if we're here, we have a bug */
1645 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1646 "9054 BLKGRD: bug in %s\n", __func__
);
1656 * Given a SCSI command that supports DIF, determine composition of protection
1657 * groups involved in setting up buffer lists
1660 * for DIF (for both read and write)
1663 lpfc_prot_group_type(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
)
1665 int ret
= LPFC_PG_TYPE_INVALID
;
1666 unsigned char op
= scsi_get_prot_op(sc
);
1669 case SCSI_PROT_READ_STRIP
:
1670 case SCSI_PROT_WRITE_INSERT
:
1671 ret
= LPFC_PG_TYPE_NO_DIF
;
1673 case SCSI_PROT_READ_INSERT
:
1674 case SCSI_PROT_WRITE_STRIP
:
1675 case SCSI_PROT_READ_PASS
:
1676 case SCSI_PROT_WRITE_PASS
:
1677 ret
= LPFC_PG_TYPE_DIF_BUF
;
1680 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1681 "9021 Unsupported protection op:%d\n", op
);
1689 * This is the protection/DIF aware version of
1690 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1691 * two functions eventually, but for now, it's here
1694 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba
*phba
,
1695 struct lpfc_scsi_buf
*lpfc_cmd
)
1697 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
1698 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
1699 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
1700 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
1701 uint32_t num_bde
= 0;
1702 int datasegcnt
, protsegcnt
, datadir
= scsi_cmnd
->sc_data_direction
;
1703 int prot_group_type
= 0;
1708 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1709 * fcp_rsp regions to the first data bde entry
1712 if (scsi_sg_count(scsi_cmnd
)) {
1714 * The driver stores the segment count returned from pci_map_sg
1715 * because this a count of dma-mappings used to map the use_sg
1716 * pages. They are not guaranteed to be the same for those
1717 * architectures that implement an IOMMU.
1719 datasegcnt
= dma_map_sg(&phba
->pcidev
->dev
,
1720 scsi_sglist(scsi_cmnd
),
1721 scsi_sg_count(scsi_cmnd
), datadir
);
1722 if (unlikely(!datasegcnt
))
1725 lpfc_cmd
->seg_cnt
= datasegcnt
;
1726 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
1727 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1728 "9067 BLKGRD: %s: Too many sg segments"
1729 " from dma_map_sg. Config %d, seg_cnt"
1731 __func__
, phba
->cfg_sg_seg_cnt
,
1733 scsi_dma_unmap(scsi_cmnd
);
1737 prot_group_type
= lpfc_prot_group_type(phba
, scsi_cmnd
);
1739 switch (prot_group_type
) {
1740 case LPFC_PG_TYPE_NO_DIF
:
1741 num_bde
= lpfc_bg_setup_bpl(phba
, scsi_cmnd
, bpl
,
1743 /* we should have 2 or more entries in buffer list */
1747 case LPFC_PG_TYPE_DIF_BUF
:{
1749 * This type indicates that protection buffers are
1750 * passed to the driver, so that needs to be prepared
1753 protsegcnt
= dma_map_sg(&phba
->pcidev
->dev
,
1754 scsi_prot_sglist(scsi_cmnd
),
1755 scsi_prot_sg_count(scsi_cmnd
), datadir
);
1756 if (unlikely(!protsegcnt
)) {
1757 scsi_dma_unmap(scsi_cmnd
);
1761 lpfc_cmd
->prot_seg_cnt
= protsegcnt
;
1762 if (lpfc_cmd
->prot_seg_cnt
1763 > phba
->cfg_prot_sg_seg_cnt
) {
1764 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1765 "9068 BLKGRD: %s: Too many prot sg "
1766 "segments from dma_map_sg. Config %d,"
1767 "prot_seg_cnt %d\n", __func__
,
1768 phba
->cfg_prot_sg_seg_cnt
,
1769 lpfc_cmd
->prot_seg_cnt
);
1770 dma_unmap_sg(&phba
->pcidev
->dev
,
1771 scsi_prot_sglist(scsi_cmnd
),
1772 scsi_prot_sg_count(scsi_cmnd
),
1774 scsi_dma_unmap(scsi_cmnd
);
1778 num_bde
= lpfc_bg_setup_bpl_prot(phba
, scsi_cmnd
, bpl
,
1779 datasegcnt
, protsegcnt
);
1780 /* we should have 3 or more entries in buffer list */
1785 case LPFC_PG_TYPE_INVALID
:
1787 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1788 "9022 Unexpected protection group %i\n",
1795 * Finish initializing those IOCB fields that are dependent on the
1796 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1797 * reinitialized since all iocb memory resources are used many times
1798 * for transmit, receive, and continuation bpl's.
1800 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof(struct ulp_bde64
));
1801 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+= (num_bde
* sizeof(struct ulp_bde64
));
1802 iocb_cmd
->ulpBdeCount
= 1;
1803 iocb_cmd
->ulpLe
= 1;
1805 fcpdl
= scsi_bufflen(scsi_cmnd
);
1807 if (scsi_get_prot_type(scsi_cmnd
) == SCSI_PROT_DIF_TYPE1
) {
1809 * We are in DIF Type 1 mode
1810 * Every data block has a 8 byte DIF (trailer)
1811 * attached to it. Must ajust FCP data length
1813 blksize
= lpfc_cmd_blksize(scsi_cmnd
);
1814 diflen
= (fcpdl
/ blksize
) * 8;
1817 fcp_cmnd
->fcpDl
= be32_to_cpu(fcpdl
);
1820 * Due to difference in data length between DIF/non-DIF paths,
1821 * we need to set word 4 of IOCB here
1823 iocb_cmd
->un
.fcpi
.fcpi_parm
= fcpdl
;
1827 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1828 "9023 Could not setup all needed BDE's"
1829 "prot_group_type=%d, num_bde=%d\n",
1830 prot_group_type
, num_bde
);
1835 * This function checks for BlockGuard errors detected by
1836 * the HBA. In case of errors, the ASC/ASCQ fields in the
1837 * sense buffer will be set accordingly, paired with
1838 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1839 * detected corruption.
1842 * 0 - No error found
1843 * 1 - BlockGuard error found
1844 * -1 - Internal error (bad profile, ...etc)
1847 lpfc_parse_bg_err(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
,
1848 struct lpfc_iocbq
*pIocbOut
)
1850 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
1851 struct sli3_bg_fields
*bgf
= &pIocbOut
->iocb
.unsli3
.sli3_bg
;
1853 uint32_t bghm
= bgf
->bghm
;
1854 uint32_t bgstat
= bgf
->bgstat
;
1855 uint64_t failing_sector
= 0;
1857 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
, "9069 BLKGRD: BG ERROR in cmd"
1858 " 0x%x lba 0x%llx blk cnt 0x%x "
1859 "bgstat=0x%x bghm=0x%x\n",
1860 cmd
->cmnd
[0], (unsigned long long)scsi_get_lba(cmd
),
1861 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
1863 spin_lock(&_dump_buf_lock
);
1864 if (!_dump_buf_done
) {
1865 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
, "9070 BLKGRD: Saving"
1866 " Data for %u blocks to debugfs\n",
1867 (cmd
->cmnd
[7] << 8 | cmd
->cmnd
[8]));
1868 lpfc_debug_save_data(phba
, cmd
);
1870 /* If we have a prot sgl, save the DIF buffer */
1871 if (lpfc_prot_group_type(phba
, cmd
) ==
1872 LPFC_PG_TYPE_DIF_BUF
) {
1873 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
, "9071 BLKGRD: "
1874 "Saving DIF for %u blocks to debugfs\n",
1875 (cmd
->cmnd
[7] << 8 | cmd
->cmnd
[8]));
1876 lpfc_debug_save_dif(phba
, cmd
);
1881 spin_unlock(&_dump_buf_lock
);
1883 if (lpfc_bgs_get_invalid_prof(bgstat
)) {
1884 cmd
->result
= ScsiResult(DID_ERROR
, 0);
1885 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
, "9072 BLKGRD: Invalid"
1886 " BlockGuard profile. bgstat:0x%x\n",
1892 if (lpfc_bgs_get_uninit_dif_block(bgstat
)) {
1893 cmd
->result
= ScsiResult(DID_ERROR
, 0);
1894 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
, "9073 BLKGRD: "
1895 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1901 if (lpfc_bgs_get_guard_err(bgstat
)) {
1904 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1906 cmd
->result
= DRIVER_SENSE
<< 24
1907 | ScsiResult(DID_ABORT
, SAM_STAT_CHECK_CONDITION
);
1908 phba
->bg_guard_err_cnt
++;
1909 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1910 "9055 BLKGRD: guard_tag error\n");
1913 if (lpfc_bgs_get_reftag_err(bgstat
)) {
1916 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1918 cmd
->result
= DRIVER_SENSE
<< 24
1919 | ScsiResult(DID_ABORT
, SAM_STAT_CHECK_CONDITION
);
1921 phba
->bg_reftag_err_cnt
++;
1922 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1923 "9056 BLKGRD: ref_tag error\n");
1926 if (lpfc_bgs_get_apptag_err(bgstat
)) {
1929 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1931 cmd
->result
= DRIVER_SENSE
<< 24
1932 | ScsiResult(DID_ABORT
, SAM_STAT_CHECK_CONDITION
);
1934 phba
->bg_apptag_err_cnt
++;
1935 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1936 "9061 BLKGRD: app_tag error\n");
1939 if (lpfc_bgs_get_hi_water_mark_present(bgstat
)) {
1941 * setup sense data descriptor 0 per SPC-4 as an information
1942 * field, and put the failing LBA in it
1944 cmd
->sense_buffer
[8] = 0; /* Information */
1945 cmd
->sense_buffer
[9] = 0xa; /* Add. length */
1946 bghm
/= cmd
->device
->sector_size
;
1948 failing_sector
= scsi_get_lba(cmd
);
1949 failing_sector
+= bghm
;
1951 put_unaligned_be64(failing_sector
, &cmd
->sense_buffer
[10]);
1955 /* No error was reported - problem in FW? */
1956 cmd
->result
= ScsiResult(DID_ERROR
, 0);
1957 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1958 "9057 BLKGRD: no errors reported!\n");
1966 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1967 * @phba: The Hba for which this call is being executed.
1968 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1970 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1971 * field of @lpfc_cmd for device with SLI-4 interface spec.
1978 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
1980 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
1981 struct scatterlist
*sgel
= NULL
;
1982 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
1983 struct sli4_sge
*sgl
= (struct sli4_sge
*)lpfc_cmd
->fcp_bpl
;
1984 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
1985 dma_addr_t physaddr
;
1986 uint32_t num_bde
= 0;
1988 uint32_t dma_offset
= 0;
1992 * There are three possibilities here - use scatter-gather segment, use
1993 * the single mapping, or neither. Start the lpfc command prep by
1994 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1997 if (scsi_sg_count(scsi_cmnd
)) {
1999 * The driver stores the segment count returned from pci_map_sg
2000 * because this a count of dma-mappings used to map the use_sg
2001 * pages. They are not guaranteed to be the same for those
2002 * architectures that implement an IOMMU.
2005 nseg
= scsi_dma_map(scsi_cmnd
);
2006 if (unlikely(!nseg
))
2009 /* clear the last flag in the fcp_rsp map entry */
2010 sgl
->word2
= le32_to_cpu(sgl
->word2
);
2011 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
2012 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2015 lpfc_cmd
->seg_cnt
= nseg
;
2016 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
2017 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
, "9074 BLKGRD:"
2018 " %s: Too many sg segments from "
2019 "dma_map_sg. Config %d, seg_cnt %d\n",
2020 __func__
, phba
->cfg_sg_seg_cnt
,
2022 scsi_dma_unmap(scsi_cmnd
);
2027 * The driver established a maximum scatter-gather segment count
2028 * during probe that limits the number of sg elements in any
2029 * single scsi command. Just run through the seg_cnt and format
2031 * When using SLI-3 the driver will try to fit all the BDEs into
2032 * the IOCB. If it can't then the BDEs get added to a BPL as it
2033 * does for SLI-2 mode.
2035 scsi_for_each_sg(scsi_cmnd
, sgel
, nseg
, num_bde
) {
2036 physaddr
= sg_dma_address(sgel
);
2037 dma_len
= sg_dma_len(sgel
);
2038 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(physaddr
));
2039 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(physaddr
));
2040 if ((num_bde
+ 1) == nseg
)
2041 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
2043 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
2044 bf_set(lpfc_sli4_sge_offset
, sgl
, dma_offset
);
2045 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2046 sgl
->sge_len
= cpu_to_le32(dma_len
);
2047 dma_offset
+= dma_len
;
2052 /* clear the last flag in the fcp_rsp map entry */
2053 sgl
->word2
= le32_to_cpu(sgl
->word2
);
2054 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
2055 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2059 * Finish initializing those IOCB fields that are dependent on the
2060 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
2061 * explicitly reinitialized.
2062 * all iocb memory resources are reused.
2064 fcp_cmnd
->fcpDl
= cpu_to_be32(scsi_bufflen(scsi_cmnd
));
2067 * Due to difference in data length between DIF/non-DIF paths,
2068 * we need to set word 4 of IOCB here
2070 iocb_cmd
->un
.fcpi
.fcpi_parm
= scsi_bufflen(scsi_cmnd
);
2075 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
2076 * @phba: The Hba for which this call is being executed.
2077 * @lpfc_cmd: The scsi buffer which is going to be mapped.
2079 * This routine wraps the actual DMA mapping function pointer from the
2087 lpfc_scsi_prep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
2089 return phba
->lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
2093 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
2094 * @phba: Pointer to hba context object.
2095 * @vport: Pointer to vport object.
2096 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
2097 * @rsp_iocb: Pointer to response iocb object which reported error.
2099 * This function posts an event when there is a SCSI command reporting
2100 * error from the scsi device.
2103 lpfc_send_scsi_error_event(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
,
2104 struct lpfc_scsi_buf
*lpfc_cmd
, struct lpfc_iocbq
*rsp_iocb
) {
2105 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
2106 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
2107 uint32_t resp_info
= fcprsp
->rspStatus2
;
2108 uint32_t scsi_status
= fcprsp
->rspStatus3
;
2109 uint32_t fcpi_parm
= rsp_iocb
->iocb
.un
.fcpi
.fcpi_parm
;
2110 struct lpfc_fast_path_event
*fast_path_evt
= NULL
;
2111 struct lpfc_nodelist
*pnode
= lpfc_cmd
->rdata
->pnode
;
2112 unsigned long flags
;
2114 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
))
2117 /* If there is queuefull or busy condition send a scsi event */
2118 if ((cmnd
->result
== SAM_STAT_TASK_SET_FULL
) ||
2119 (cmnd
->result
== SAM_STAT_BUSY
)) {
2120 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
2123 fast_path_evt
->un
.scsi_evt
.event_type
=
2125 fast_path_evt
->un
.scsi_evt
.subcategory
=
2126 (cmnd
->result
== SAM_STAT_TASK_SET_FULL
) ?
2127 LPFC_EVENT_QFULL
: LPFC_EVENT_DEVBSY
;
2128 fast_path_evt
->un
.scsi_evt
.lun
= cmnd
->device
->lun
;
2129 memcpy(&fast_path_evt
->un
.scsi_evt
.wwpn
,
2130 &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
2131 memcpy(&fast_path_evt
->un
.scsi_evt
.wwnn
,
2132 &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
2133 } else if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
&&
2134 ((cmnd
->cmnd
[0] == READ_10
) || (cmnd
->cmnd
[0] == WRITE_10
))) {
2135 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
2138 fast_path_evt
->un
.check_cond_evt
.scsi_event
.event_type
=
2140 fast_path_evt
->un
.check_cond_evt
.scsi_event
.subcategory
=
2141 LPFC_EVENT_CHECK_COND
;
2142 fast_path_evt
->un
.check_cond_evt
.scsi_event
.lun
=
2144 memcpy(&fast_path_evt
->un
.check_cond_evt
.scsi_event
.wwpn
,
2145 &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
2146 memcpy(&fast_path_evt
->un
.check_cond_evt
.scsi_event
.wwnn
,
2147 &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
2148 fast_path_evt
->un
.check_cond_evt
.sense_key
=
2149 cmnd
->sense_buffer
[2] & 0xf;
2150 fast_path_evt
->un
.check_cond_evt
.asc
= cmnd
->sense_buffer
[12];
2151 fast_path_evt
->un
.check_cond_evt
.ascq
= cmnd
->sense_buffer
[13];
2152 } else if ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) &&
2154 ((be32_to_cpu(fcprsp
->rspResId
) != fcpi_parm
) ||
2155 ((scsi_status
== SAM_STAT_GOOD
) &&
2156 !(resp_info
& (RESID_UNDER
| RESID_OVER
))))) {
2158 * If status is good or resid does not match with fcp_param and
2159 * there is valid fcpi_parm, then there is a read_check error
2161 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
2164 fast_path_evt
->un
.read_check_error
.header
.event_type
=
2165 FC_REG_FABRIC_EVENT
;
2166 fast_path_evt
->un
.read_check_error
.header
.subcategory
=
2167 LPFC_EVENT_FCPRDCHKERR
;
2168 memcpy(&fast_path_evt
->un
.read_check_error
.header
.wwpn
,
2169 &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
2170 memcpy(&fast_path_evt
->un
.read_check_error
.header
.wwnn
,
2171 &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
2172 fast_path_evt
->un
.read_check_error
.lun
= cmnd
->device
->lun
;
2173 fast_path_evt
->un
.read_check_error
.opcode
= cmnd
->cmnd
[0];
2174 fast_path_evt
->un
.read_check_error
.fcpiparam
=
2179 fast_path_evt
->vport
= vport
;
2180 spin_lock_irqsave(&phba
->hbalock
, flags
);
2181 list_add_tail(&fast_path_evt
->work_evt
.evt_listp
, &phba
->work_list
);
2182 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2183 lpfc_worker_wake_up(phba
);
2188 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
2189 * @phba: The HBA for which this call is being executed.
2190 * @psb: The scsi buffer which is going to be un-mapped.
2192 * This routine does DMA un-mapping of scatter gather list of scsi command
2193 * field of @lpfc_cmd for device with SLI-3 interface spec.
2196 lpfc_scsi_unprep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
2199 * There are only two special cases to consider. (1) the scsi command
2200 * requested scatter-gather usage or (2) the scsi command allocated
2201 * a request buffer, but did not request use_sg. There is a third
2202 * case, but it does not require resource deallocation.
2204 if (psb
->seg_cnt
> 0)
2205 scsi_dma_unmap(psb
->pCmd
);
2206 if (psb
->prot_seg_cnt
> 0)
2207 dma_unmap_sg(&phba
->pcidev
->dev
, scsi_prot_sglist(psb
->pCmd
),
2208 scsi_prot_sg_count(psb
->pCmd
),
2209 psb
->pCmd
->sc_data_direction
);
2213 * lpfc_handler_fcp_err - FCP response handler
2214 * @vport: The virtual port for which this call is being executed.
2215 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2216 * @rsp_iocb: The response IOCB which contains FCP error.
2218 * This routine is called to process response IOCB with status field
2219 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
2220 * based upon SCSI and FCP error.
2223 lpfc_handle_fcp_err(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
2224 struct lpfc_iocbq
*rsp_iocb
)
2226 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
2227 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
2228 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
2229 uint32_t fcpi_parm
= rsp_iocb
->iocb
.un
.fcpi
.fcpi_parm
;
2230 uint32_t resp_info
= fcprsp
->rspStatus2
;
2231 uint32_t scsi_status
= fcprsp
->rspStatus3
;
2233 uint32_t host_status
= DID_OK
;
2234 uint32_t rsplen
= 0;
2235 uint32_t logit
= LOG_FCP
| LOG_FCP_ERROR
;
2239 * If this is a task management command, there is no
2240 * scsi packet associated with this lpfc_cmd. The driver
2243 if (fcpcmd
->fcpCntl2
) {
2248 if (resp_info
& RSP_LEN_VALID
) {
2249 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
2250 if (rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) {
2251 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
2252 "2719 Invalid response length: "
2253 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
2255 cmnd
->device
->lun
, cmnd
->cmnd
[0],
2257 host_status
= DID_ERROR
;
2260 if (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
) {
2261 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
2262 "2757 Protocol failure detected during "
2263 "processing of FCP I/O op: "
2264 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
2266 cmnd
->device
->lun
, cmnd
->cmnd
[0],
2268 host_status
= DID_ERROR
;
2273 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
2274 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
2275 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
2276 snslen
= SCSI_SENSE_BUFFERSIZE
;
2278 if (resp_info
& RSP_LEN_VALID
)
2279 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
2280 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
2282 lp
= (uint32_t *)cmnd
->sense_buffer
;
2284 if (!scsi_status
&& (resp_info
& RESID_UNDER
))
2287 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
2288 "9024 FCP command x%x failed: x%x SNS x%x x%x "
2289 "Data: x%x x%x x%x x%x x%x\n",
2290 cmnd
->cmnd
[0], scsi_status
,
2291 be32_to_cpu(*lp
), be32_to_cpu(*(lp
+ 3)), resp_info
,
2292 be32_to_cpu(fcprsp
->rspResId
),
2293 be32_to_cpu(fcprsp
->rspSnsLen
),
2294 be32_to_cpu(fcprsp
->rspRspLen
),
2297 scsi_set_resid(cmnd
, 0);
2298 if (resp_info
& RESID_UNDER
) {
2299 scsi_set_resid(cmnd
, be32_to_cpu(fcprsp
->rspResId
));
2301 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
2302 "9025 FCP Read Underrun, expected %d, "
2303 "residual %d Data: x%x x%x x%x\n",
2304 be32_to_cpu(fcpcmd
->fcpDl
),
2305 scsi_get_resid(cmnd
), fcpi_parm
, cmnd
->cmnd
[0],
2309 * If there is an under run check if under run reported by
2310 * storage array is same as the under run reported by HBA.
2311 * If this is not same, there is a dropped frame.
2313 if ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) &&
2315 (scsi_get_resid(cmnd
) != fcpi_parm
)) {
2316 lpfc_printf_vlog(vport
, KERN_WARNING
,
2317 LOG_FCP
| LOG_FCP_ERROR
,
2318 "9026 FCP Read Check Error "
2319 "and Underrun Data: x%x x%x x%x x%x\n",
2320 be32_to_cpu(fcpcmd
->fcpDl
),
2321 scsi_get_resid(cmnd
), fcpi_parm
,
2323 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
2324 host_status
= DID_ERROR
;
2327 * The cmnd->underflow is the minimum number of bytes that must
2328 * be transfered for this command. Provided a sense condition
2329 * is not present, make sure the actual amount transferred is at
2330 * least the underflow value or fail.
2332 if (!(resp_info
& SNS_LEN_VALID
) &&
2333 (scsi_status
== SAM_STAT_GOOD
) &&
2334 (scsi_bufflen(cmnd
) - scsi_get_resid(cmnd
)
2335 < cmnd
->underflow
)) {
2336 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
2337 "9027 FCP command x%x residual "
2338 "underrun converted to error "
2339 "Data: x%x x%x x%x\n",
2340 cmnd
->cmnd
[0], scsi_bufflen(cmnd
),
2341 scsi_get_resid(cmnd
), cmnd
->underflow
);
2342 host_status
= DID_ERROR
;
2344 } else if (resp_info
& RESID_OVER
) {
2345 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
2346 "9028 FCP command x%x residual overrun error. "
2347 "Data: x%x x%x\n", cmnd
->cmnd
[0],
2348 scsi_bufflen(cmnd
), scsi_get_resid(cmnd
));
2349 host_status
= DID_ERROR
;
2352 * Check SLI validation that all the transfer was actually done
2353 * (fcpi_parm should be zero). Apply check only to reads.
2355 } else if (fcpi_parm
&& (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
2356 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
| LOG_FCP_ERROR
,
2357 "9029 FCP Read Check Error Data: "
2358 "x%x x%x x%x x%x x%x\n",
2359 be32_to_cpu(fcpcmd
->fcpDl
),
2360 be32_to_cpu(fcprsp
->rspResId
),
2361 fcpi_parm
, cmnd
->cmnd
[0], scsi_status
);
2362 switch (scsi_status
) {
2364 case SAM_STAT_CHECK_CONDITION
:
2365 /* Fabric dropped a data frame. Fail any successful
2366 * command in which we detected dropped frames.
2367 * A status of good or some check conditions could
2368 * be considered a successful command.
2370 host_status
= DID_ERROR
;
2373 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
2377 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
2378 lpfc_send_scsi_error_event(vport
->phba
, vport
, lpfc_cmd
, rsp_iocb
);
2382 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
2383 * @phba: The Hba for which this call is being executed.
2384 * @pIocbIn: The command IOCBQ for the scsi cmnd.
2385 * @pIocbOut: The response IOCBQ for the scsi cmnd.
2387 * This routine assigns scsi command result by looking into response IOCB
2388 * status field appropriately. This routine handles QUEUE FULL condition as
2389 * well by ramping down device queue depth.
2392 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
2393 struct lpfc_iocbq
*pIocbOut
)
2395 struct lpfc_scsi_buf
*lpfc_cmd
=
2396 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
2397 struct lpfc_vport
*vport
= pIocbIn
->vport
;
2398 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
2399 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
2400 struct scsi_cmnd
*cmd
;
2402 struct scsi_device
*tmp_sdev
;
2404 unsigned long flags
;
2405 struct lpfc_fast_path_event
*fast_path_evt
;
2406 struct Scsi_Host
*shost
;
2407 uint32_t queue_depth
, scsi_id
;
2409 /* Sanity check on return of outstanding command */
2410 if (!(lpfc_cmd
->pCmd
))
2412 cmd
= lpfc_cmd
->pCmd
;
2413 shost
= cmd
->device
->host
;
2415 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
2416 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
2417 /* pick up SLI4 exhange busy status from HBA */
2418 lpfc_cmd
->exch_busy
= pIocbOut
->iocb_flag
& LPFC_EXCHANGE_BUSY
;
2420 if (pnode
&& NLP_CHK_NODE_ACT(pnode
))
2421 atomic_dec(&pnode
->cmd_pending
);
2423 if (lpfc_cmd
->status
) {
2424 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
2425 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
2426 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
2427 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
2428 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
2430 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
2431 "9030 FCP cmd x%x failed <%d/%d> "
2432 "status: x%x result: x%x Data: x%x x%x\n",
2434 cmd
->device
? cmd
->device
->id
: 0xffff,
2435 cmd
->device
? cmd
->device
->lun
: 0xffff,
2436 lpfc_cmd
->status
, lpfc_cmd
->result
,
2437 pIocbOut
->iocb
.ulpContext
,
2438 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
2440 switch (lpfc_cmd
->status
) {
2441 case IOSTAT_FCP_RSP_ERROR
:
2442 /* Call FCP RSP handler to determine result */
2443 lpfc_handle_fcp_err(vport
, lpfc_cmd
, pIocbOut
);
2445 case IOSTAT_NPORT_BSY
:
2446 case IOSTAT_FABRIC_BSY
:
2447 cmd
->result
= ScsiResult(DID_TRANSPORT_DISRUPTED
, 0);
2448 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
2451 fast_path_evt
->un
.fabric_evt
.event_type
=
2452 FC_REG_FABRIC_EVENT
;
2453 fast_path_evt
->un
.fabric_evt
.subcategory
=
2454 (lpfc_cmd
->status
== IOSTAT_NPORT_BSY
) ?
2455 LPFC_EVENT_PORT_BUSY
: LPFC_EVENT_FABRIC_BUSY
;
2456 if (pnode
&& NLP_CHK_NODE_ACT(pnode
)) {
2457 memcpy(&fast_path_evt
->un
.fabric_evt
.wwpn
,
2458 &pnode
->nlp_portname
,
2459 sizeof(struct lpfc_name
));
2460 memcpy(&fast_path_evt
->un
.fabric_evt
.wwnn
,
2461 &pnode
->nlp_nodename
,
2462 sizeof(struct lpfc_name
));
2464 fast_path_evt
->vport
= vport
;
2465 fast_path_evt
->work_evt
.evt
=
2466 LPFC_EVT_FASTPATH_MGMT_EVT
;
2467 spin_lock_irqsave(&phba
->hbalock
, flags
);
2468 list_add_tail(&fast_path_evt
->work_evt
.evt_listp
,
2470 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2471 lpfc_worker_wake_up(phba
);
2473 case IOSTAT_LOCAL_REJECT
:
2474 if (lpfc_cmd
->result
== IOERR_INVALID_RPI
||
2475 lpfc_cmd
->result
== IOERR_NO_RESOURCES
||
2476 lpfc_cmd
->result
== IOERR_ABORT_REQUESTED
||
2477 lpfc_cmd
->result
== IOERR_SLER_CMD_RCV_FAILURE
) {
2478 cmd
->result
= ScsiResult(DID_REQUEUE
, 0);
2482 if ((lpfc_cmd
->result
== IOERR_RX_DMA_FAILED
||
2483 lpfc_cmd
->result
== IOERR_TX_DMA_FAILED
) &&
2484 pIocbOut
->iocb
.unsli3
.sli3_bg
.bgstat
) {
2485 if (scsi_get_prot_op(cmd
) != SCSI_PROT_NORMAL
) {
2487 * This is a response for a BG enabled
2488 * cmd. Parse BG error
2490 lpfc_parse_bg_err(phba
, lpfc_cmd
,
2494 lpfc_printf_vlog(vport
, KERN_WARNING
,
2496 "9031 non-zero BGSTAT "
2497 "on unprotected cmd\n");
2501 /* else: fall through */
2503 cmd
->result
= ScsiResult(DID_ERROR
, 0);
2507 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
)
2508 || (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
2509 cmd
->result
= ScsiResult(DID_TRANSPORT_DISRUPTED
,
2512 cmd
->result
= ScsiResult(DID_OK
, 0);
2515 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
2516 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
2518 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
2519 "0710 Iodone <%d/%d> cmd %p, error "
2520 "x%x SNS x%x x%x Data: x%x x%x\n",
2521 cmd
->device
->id
, cmd
->device
->lun
, cmd
,
2522 cmd
->result
, *lp
, *(lp
+ 3), cmd
->retries
,
2523 scsi_get_resid(cmd
));
2526 lpfc_update_stats(phba
, lpfc_cmd
);
2527 result
= cmd
->result
;
2528 if (vport
->cfg_max_scsicmpl_time
&&
2529 time_after(jiffies
, lpfc_cmd
->start_time
+
2530 msecs_to_jiffies(vport
->cfg_max_scsicmpl_time
))) {
2531 spin_lock_irqsave(shost
->host_lock
, flags
);
2532 if (pnode
&& NLP_CHK_NODE_ACT(pnode
)) {
2533 if (pnode
->cmd_qdepth
>
2534 atomic_read(&pnode
->cmd_pending
) &&
2535 (atomic_read(&pnode
->cmd_pending
) >
2536 LPFC_MIN_TGT_QDEPTH
) &&
2537 ((cmd
->cmnd
[0] == READ_10
) ||
2538 (cmd
->cmnd
[0] == WRITE_10
)))
2540 atomic_read(&pnode
->cmd_pending
);
2542 pnode
->last_change_time
= jiffies
;
2544 spin_unlock_irqrestore(shost
->host_lock
, flags
);
2545 } else if (pnode
&& NLP_CHK_NODE_ACT(pnode
)) {
2546 if ((pnode
->cmd_qdepth
< vport
->cfg_tgt_queue_depth
) &&
2547 time_after(jiffies
, pnode
->last_change_time
+
2548 msecs_to_jiffies(LPFC_TGTQ_INTERVAL
))) {
2549 spin_lock_irqsave(shost
->host_lock
, flags
);
2550 depth
= pnode
->cmd_qdepth
* LPFC_TGTQ_RAMPUP_PCENT
2552 depth
= depth
? depth
: 1;
2553 pnode
->cmd_qdepth
+= depth
;
2554 if (pnode
->cmd_qdepth
> vport
->cfg_tgt_queue_depth
)
2555 pnode
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
2556 pnode
->last_change_time
= jiffies
;
2557 spin_unlock_irqrestore(shost
->host_lock
, flags
);
2561 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
2563 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
2564 queue_depth
= cmd
->device
->queue_depth
;
2565 scsi_id
= cmd
->device
->id
;
2566 cmd
->scsi_done(cmd
);
2568 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
2570 * If there is a thread waiting for command completion
2571 * wake up the thread.
2573 spin_lock_irqsave(shost
->host_lock
, flags
);
2574 lpfc_cmd
->pCmd
= NULL
;
2575 if (lpfc_cmd
->waitq
)
2576 wake_up(lpfc_cmd
->waitq
);
2577 spin_unlock_irqrestore(shost
->host_lock
, flags
);
2578 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
2583 lpfc_rampup_queue_depth(vport
, queue_depth
);
2586 * Check for queue full. If the lun is reporting queue full, then
2587 * back off the lun queue depth to prevent target overloads.
2589 if (result
== SAM_STAT_TASK_SET_FULL
&& pnode
&&
2590 NLP_CHK_NODE_ACT(pnode
)) {
2591 shost_for_each_device(tmp_sdev
, shost
) {
2592 if (tmp_sdev
->id
!= scsi_id
)
2594 depth
= scsi_track_queue_full(tmp_sdev
,
2595 tmp_sdev
->queue_depth
-1);
2598 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
2599 "0711 detected queue full - lun queue "
2600 "depth adjusted to %d.\n", depth
);
2601 lpfc_send_sdev_queuedepth_change_event(phba
, vport
,
2609 * If there is a thread waiting for command completion
2610 * wake up the thread.
2612 spin_lock_irqsave(shost
->host_lock
, flags
);
2613 lpfc_cmd
->pCmd
= NULL
;
2614 if (lpfc_cmd
->waitq
)
2615 wake_up(lpfc_cmd
->waitq
);
2616 spin_unlock_irqrestore(shost
->host_lock
, flags
);
2618 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
2622 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
2623 * @data: A pointer to the immediate command data portion of the IOCB.
2624 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
2626 * The routine copies the entire FCP command from @fcp_cmnd to @data while
2627 * byte swapping the data to big endian format for transmission on the wire.
2630 lpfc_fcpcmd_to_iocb(uint8_t *data
, struct fcp_cmnd
*fcp_cmnd
)
2633 for (i
= 0, j
= 0; i
< sizeof(struct fcp_cmnd
);
2634 i
+= sizeof(uint32_t), j
++) {
2635 ((uint32_t *)data
)[j
] = cpu_to_be32(((uint32_t *)fcp_cmnd
)[j
]);
2640 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2641 * @vport: The virtual port for which this call is being executed.
2642 * @lpfc_cmd: The scsi command which needs to send.
2643 * @pnode: Pointer to lpfc_nodelist.
2645 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2646 * to transfer for device with SLI3 interface spec.
2649 lpfc_scsi_prep_cmnd(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
2650 struct lpfc_nodelist
*pnode
)
2652 struct lpfc_hba
*phba
= vport
->phba
;
2653 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
2654 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
2655 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
2656 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
2657 int datadir
= scsi_cmnd
->sc_data_direction
;
2660 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
))
2663 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
2664 /* clear task management bits */
2665 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
2667 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
2668 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
2670 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
2672 if (scsi_populate_tag_msg(scsi_cmnd
, tag
)) {
2674 case HEAD_OF_QUEUE_TAG
:
2675 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
2677 case ORDERED_QUEUE_TAG
:
2678 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
2681 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
2685 fcp_cmnd
->fcpCntl1
= 0;
2688 * There are three possibilities here - use scatter-gather segment, use
2689 * the single mapping, or neither. Start the lpfc command prep by
2690 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2693 if (scsi_sg_count(scsi_cmnd
)) {
2694 if (datadir
== DMA_TO_DEVICE
) {
2695 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
2696 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
2697 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
2698 iocb_cmd
->ulpPU
= 0;
2700 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
2701 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
2702 phba
->fc4OutputRequests
++;
2704 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
2705 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
2706 fcp_cmnd
->fcpCntl3
= READ_DATA
;
2707 phba
->fc4InputRequests
++;
2710 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
2711 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
2712 iocb_cmd
->ulpPU
= 0;
2713 fcp_cmnd
->fcpCntl3
= 0;
2714 phba
->fc4ControlRequests
++;
2716 if (phba
->sli_rev
== 3 &&
2717 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
))
2718 lpfc_fcpcmd_to_iocb(iocb_cmd
->unsli3
.fcp_ext
.icd
, fcp_cmnd
);
2720 * Finish initializing those IOCB fields that are independent
2721 * of the scsi_cmnd request_buffer
2723 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
2724 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
2725 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
2727 piocbq
->iocb
.ulpFCP2Rcvy
= 0;
2729 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
2730 piocbq
->context1
= lpfc_cmd
;
2731 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
2732 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
2733 piocbq
->vport
= vport
;
2737 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
2738 * @vport: The virtual port for which this call is being executed.
2739 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2740 * @lun: Logical unit number.
2741 * @task_mgmt_cmd: SCSI task management command.
2743 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2744 * for device with SLI-3 interface spec.
2751 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport
*vport
,
2752 struct lpfc_scsi_buf
*lpfc_cmd
,
2754 uint8_t task_mgmt_cmd
)
2756 struct lpfc_iocbq
*piocbq
;
2758 struct fcp_cmnd
*fcp_cmnd
;
2759 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
2760 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
2762 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2763 ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)
2766 piocbq
= &(lpfc_cmd
->cur_iocbq
);
2767 piocbq
->vport
= vport
;
2769 piocb
= &piocbq
->iocb
;
2771 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
2772 /* Clear out any old data in the FCP command area */
2773 memset(fcp_cmnd
, 0, sizeof(struct fcp_cmnd
));
2774 int_to_scsilun(lun
, &fcp_cmnd
->fcp_lun
);
2775 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
2776 if (vport
->phba
->sli_rev
== 3 &&
2777 !(vport
->phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
))
2778 lpfc_fcpcmd_to_iocb(piocb
->unsli3
.fcp_ext
.icd
, fcp_cmnd
);
2779 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
2780 piocb
->ulpContext
= ndlp
->nlp_rpi
;
2781 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
2782 piocb
->ulpFCP2Rcvy
= 1;
2784 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
2786 /* ulpTimeout is only one byte */
2787 if (lpfc_cmd
->timeout
> 0xff) {
2789 * Do not timeout the command at the firmware level.
2790 * The driver will provide the timeout mechanism.
2792 piocb
->ulpTimeout
= 0;
2794 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
2796 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
2797 lpfc_sli4_set_rsp_sgl_last(vport
->phba
, lpfc_cmd
);
2803 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2804 * @phba: The hba struct for which this call is being executed.
2805 * @dev_grp: The HBA PCI-Device group number.
2807 * This routine sets up the SCSI interface API function jump table in @phba
2809 * Returns: 0 - success, -ENODEV - failure.
2812 lpfc_scsi_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
2815 phba
->lpfc_scsi_unprep_dma_buf
= lpfc_scsi_unprep_dma_buf
;
2816 phba
->lpfc_scsi_prep_cmnd
= lpfc_scsi_prep_cmnd
;
2819 case LPFC_PCI_DEV_LP
:
2820 phba
->lpfc_new_scsi_buf
= lpfc_new_scsi_buf_s3
;
2821 phba
->lpfc_scsi_prep_dma_buf
= lpfc_scsi_prep_dma_buf_s3
;
2822 phba
->lpfc_release_scsi_buf
= lpfc_release_scsi_buf_s3
;
2823 phba
->lpfc_get_scsi_buf
= lpfc_get_scsi_buf_s3
;
2825 case LPFC_PCI_DEV_OC
:
2826 phba
->lpfc_new_scsi_buf
= lpfc_new_scsi_buf_s4
;
2827 phba
->lpfc_scsi_prep_dma_buf
= lpfc_scsi_prep_dma_buf_s4
;
2828 phba
->lpfc_release_scsi_buf
= lpfc_release_scsi_buf_s4
;
2829 phba
->lpfc_get_scsi_buf
= lpfc_get_scsi_buf_s4
;
2832 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2833 "1418 Invalid HBA PCI-device group: 0x%x\n",
2838 phba
->lpfc_rampdown_queue_depth
= lpfc_rampdown_queue_depth
;
2839 phba
->lpfc_scsi_cmd_iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
2844 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2845 * @phba: The Hba for which this call is being executed.
2846 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2847 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2849 * This routine is IOCB completion routine for device reset and target reset
2850 * routine. This routine release scsi buffer associated with lpfc_cmd.
2853 lpfc_tskmgmt_def_cmpl(struct lpfc_hba
*phba
,
2854 struct lpfc_iocbq
*cmdiocbq
,
2855 struct lpfc_iocbq
*rspiocbq
)
2857 struct lpfc_scsi_buf
*lpfc_cmd
=
2858 (struct lpfc_scsi_buf
*) cmdiocbq
->context1
;
2860 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
2865 * lpfc_info - Info entry point of scsi_host_template data structure
2866 * @host: The scsi host for which this call is being executed.
2868 * This routine provides module information about hba.
2871 * Pointer to char - Success.
2874 lpfc_info(struct Scsi_Host
*host
)
2876 struct lpfc_vport
*vport
= (struct lpfc_vport
*) host
->hostdata
;
2877 struct lpfc_hba
*phba
= vport
->phba
;
2879 static char lpfcinfobuf
[384];
2881 memset(lpfcinfobuf
,0,384);
2882 if (phba
&& phba
->pcidev
){
2883 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
2884 len
= strlen(lpfcinfobuf
);
2885 snprintf(lpfcinfobuf
+ len
,
2887 " on PCI bus %02x device %02x irq %d",
2888 phba
->pcidev
->bus
->number
,
2889 phba
->pcidev
->devfn
,
2891 len
= strlen(lpfcinfobuf
);
2892 if (phba
->Port
[0]) {
2893 snprintf(lpfcinfobuf
+ len
,
2898 len
= strlen(lpfcinfobuf
);
2899 if (phba
->sli4_hba
.link_state
.logical_speed
) {
2900 snprintf(lpfcinfobuf
+ len
,
2902 " Logical Link Speed: %d Mbps",
2903 phba
->sli4_hba
.link_state
.logical_speed
* 10);
2910 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
2911 * @phba: The Hba for which this call is being executed.
2913 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2914 * The default value of cfg_poll_tmo is 10 milliseconds.
2916 static __inline__
void lpfc_poll_rearm_timer(struct lpfc_hba
* phba
)
2918 unsigned long poll_tmo_expires
=
2919 (jiffies
+ msecs_to_jiffies(phba
->cfg_poll_tmo
));
2921 if (phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq_cnt
)
2922 mod_timer(&phba
->fcp_poll_timer
,
2927 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
2928 * @phba: The Hba for which this call is being executed.
2930 * This routine starts the fcp_poll_timer of @phba.
2932 void lpfc_poll_start_timer(struct lpfc_hba
* phba
)
2934 lpfc_poll_rearm_timer(phba
);
2938 * lpfc_poll_timeout - Restart polling timer
2939 * @ptr: Map to lpfc_hba data structure pointer.
2941 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2942 * and FCP Ring interrupt is disable.
2945 void lpfc_poll_timeout(unsigned long ptr
)
2947 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
2949 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
2950 lpfc_sli_handle_fast_ring_event(phba
,
2951 &phba
->sli
.ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
2953 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
2954 lpfc_poll_rearm_timer(phba
);
2959 * lpfc_queuecommand - scsi_host_template queuecommand entry point
2960 * @cmnd: Pointer to scsi_cmnd data structure.
2961 * @done: Pointer to done routine.
2963 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2964 * This routine prepares an IOCB from scsi command and provides to firmware.
2965 * The @done callback is invoked after driver finished processing the command.
2969 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2972 lpfc_queuecommand_lck(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
2974 struct Scsi_Host
*shost
= cmnd
->device
->host
;
2975 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
2976 struct lpfc_hba
*phba
= vport
->phba
;
2977 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
2978 struct lpfc_nodelist
*ndlp
;
2979 struct lpfc_scsi_buf
*lpfc_cmd
;
2980 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
2983 err
= fc_remote_port_chkready(rport
);
2986 goto out_fail_command
;
2988 ndlp
= rdata
->pnode
;
2990 if (!(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) &&
2991 scsi_get_prot_op(cmnd
) != SCSI_PROT_NORMAL
) {
2993 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
2994 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
2995 " op:%02x str=%s without registering for"
2996 " BlockGuard - Rejecting command\n",
2997 cmnd
->cmnd
[0], scsi_get_prot_op(cmnd
),
2998 dif_op_str
[scsi_get_prot_op(cmnd
)]);
2999 goto out_fail_command
;
3003 * Catch race where our node has transitioned, but the
3004 * transport is still transitioning.
3006 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
3007 cmnd
->result
= ScsiResult(DID_TRANSPORT_DISRUPTED
, 0);
3008 goto out_fail_command
;
3010 if (atomic_read(&ndlp
->cmd_pending
) >= ndlp
->cmd_qdepth
)
3013 lpfc_cmd
= lpfc_get_scsi_buf(phba
, ndlp
);
3014 if (lpfc_cmd
== NULL
) {
3015 lpfc_rampdown_queue_depth(phba
);
3017 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
3018 "0707 driver's buffer pool is empty, "
3024 * Store the midlayer's command structure for the completion phase
3025 * and complete the command initialization.
3027 lpfc_cmd
->pCmd
= cmnd
;
3028 lpfc_cmd
->rdata
= rdata
;
3029 lpfc_cmd
->timeout
= 0;
3030 lpfc_cmd
->start_time
= jiffies
;
3031 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
3032 cmnd
->scsi_done
= done
;
3034 if (scsi_get_prot_op(cmnd
) != SCSI_PROT_NORMAL
) {
3035 if (vport
->phba
->cfg_enable_bg
) {
3036 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_BG
,
3037 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
3039 cmnd
->cmnd
[0], scsi_get_prot_op(cmnd
),
3040 dif_op_str
[scsi_get_prot_op(cmnd
)]);
3041 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_BG
,
3042 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
3043 "%02x %02x %02x %02x %02x\n",
3044 cmnd
->cmnd
[0], cmnd
->cmnd
[1], cmnd
->cmnd
[2],
3045 cmnd
->cmnd
[3], cmnd
->cmnd
[4], cmnd
->cmnd
[5],
3046 cmnd
->cmnd
[6], cmnd
->cmnd
[7], cmnd
->cmnd
[8],
3048 if (cmnd
->cmnd
[0] == READ_10
)
3049 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_BG
,
3050 "9035 BLKGRD: READ @ sector %llu, "
3052 (unsigned long long)scsi_get_lba(cmnd
),
3053 blk_rq_sectors(cmnd
->request
));
3054 else if (cmnd
->cmnd
[0] == WRITE_10
)
3055 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_BG
,
3056 "9036 BLKGRD: WRITE @ sector %llu, "
3057 "count %u cmd=%p\n",
3058 (unsigned long long)scsi_get_lba(cmnd
),
3059 blk_rq_sectors(cmnd
->request
),
3063 err
= lpfc_bg_scsi_prep_dma_buf(phba
, lpfc_cmd
);
3065 if (vport
->phba
->cfg_enable_bg
) {
3066 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_BG
,
3067 "9038 BLKGRD: rcvd unprotected cmd:"
3068 "%02x op:%02x str=%s\n",
3069 cmnd
->cmnd
[0], scsi_get_prot_op(cmnd
),
3070 dif_op_str
[scsi_get_prot_op(cmnd
)]);
3071 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_BG
,
3072 "9039 BLKGRD: CDB: %02x %02x %02x "
3073 "%02x %02x %02x %02x %02x %02x %02x\n",
3074 cmnd
->cmnd
[0], cmnd
->cmnd
[1],
3075 cmnd
->cmnd
[2], cmnd
->cmnd
[3],
3076 cmnd
->cmnd
[4], cmnd
->cmnd
[5],
3077 cmnd
->cmnd
[6], cmnd
->cmnd
[7],
3078 cmnd
->cmnd
[8], cmnd
->cmnd
[9]);
3079 if (cmnd
->cmnd
[0] == READ_10
)
3080 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_BG
,
3081 "9040 dbg: READ @ sector %llu, "
3083 (unsigned long long)scsi_get_lba(cmnd
),
3084 blk_rq_sectors(cmnd
->request
));
3085 else if (cmnd
->cmnd
[0] == WRITE_10
)
3086 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_BG
,
3087 "9041 dbg: WRITE @ sector %llu, "
3088 "count %u cmd=%p\n",
3089 (unsigned long long)scsi_get_lba(cmnd
),
3090 blk_rq_sectors(cmnd
->request
), cmnd
);
3092 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_BG
,
3093 "9042 dbg: parser not implemented\n");
3095 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
3099 goto out_host_busy_free_buf
;
3101 lpfc_scsi_prep_cmnd(vport
, lpfc_cmd
, ndlp
);
3103 atomic_inc(&ndlp
->cmd_pending
);
3104 err
= lpfc_sli_issue_iocb(phba
, LPFC_FCP_RING
,
3105 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
3107 atomic_dec(&ndlp
->cmd_pending
);
3108 goto out_host_busy_free_buf
;
3110 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
3111 spin_unlock(shost
->host_lock
);
3112 lpfc_sli_handle_fast_ring_event(phba
,
3113 &phba
->sli
.ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
3115 spin_lock(shost
->host_lock
);
3116 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
3117 lpfc_poll_rearm_timer(phba
);
3122 out_host_busy_free_buf
:
3123 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
3124 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
3126 return SCSI_MLQUEUE_HOST_BUSY
;
3133 static DEF_SCSI_QCMD(lpfc_queuecommand
)
3136 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
3137 * @cmnd: Pointer to scsi_cmnd data structure.
3139 * This routine aborts @cmnd pending in base driver.
3146 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
3148 struct Scsi_Host
*shost
= cmnd
->device
->host
;
3149 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
3150 struct lpfc_hba
*phba
= vport
->phba
;
3151 struct lpfc_iocbq
*iocb
;
3152 struct lpfc_iocbq
*abtsiocb
;
3153 struct lpfc_scsi_buf
*lpfc_cmd
;
3156 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq
);
3158 ret
= fc_block_scsi_eh(cmnd
);
3161 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
3163 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
3164 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
3166 "LUN %d snum %#lx\n", ret
, cmnd
->device
->id
,
3167 cmnd
->device
->lun
, cmnd
->serial_number
);
3172 * If pCmd field of the corresponding lpfc_scsi_buf structure
3173 * points to a different SCSI command, then the driver has
3174 * already completed this command, but the midlayer did not
3175 * see the completion before the eh fired. Just return
3178 iocb
= &lpfc_cmd
->cur_iocbq
;
3179 if (lpfc_cmd
->pCmd
!= cmnd
)
3182 BUG_ON(iocb
->context1
!= lpfc_cmd
);
3184 abtsiocb
= lpfc_sli_get_iocbq(phba
);
3185 if (abtsiocb
== NULL
) {
3191 * The scsi command can not be in txq and it is in flight because the
3192 * pCmd is still pointig at the SCSI command we have to abort. There
3193 * is no need to search the txcmplq. Just send an abort to the FW.
3197 icmd
= &abtsiocb
->iocb
;
3198 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
3199 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
3200 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3201 icmd
->un
.acxri
.abortIoTag
= iocb
->sli4_xritag
;
3203 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
3206 icmd
->ulpClass
= cmd
->ulpClass
;
3208 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3209 abtsiocb
->fcp_wqidx
= iocb
->fcp_wqidx
;
3210 abtsiocb
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
3212 if (lpfc_is_link_up(phba
))
3213 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
3215 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
3217 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
3218 abtsiocb
->vport
= vport
;
3219 if (lpfc_sli_issue_iocb(phba
, LPFC_FCP_RING
, abtsiocb
, 0) ==
3221 lpfc_sli_release_iocbq(phba
, abtsiocb
);
3226 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
3227 lpfc_sli_handle_fast_ring_event(phba
,
3228 &phba
->sli
.ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
3230 lpfc_cmd
->waitq
= &waitq
;
3231 /* Wait for abort to complete */
3232 wait_event_timeout(waitq
,
3233 (lpfc_cmd
->pCmd
!= cmnd
),
3234 (2*vport
->cfg_devloss_tmo
*HZ
));
3236 spin_lock_irq(shost
->host_lock
);
3237 lpfc_cmd
->waitq
= NULL
;
3238 spin_unlock_irq(shost
->host_lock
);
3240 if (lpfc_cmd
->pCmd
== cmnd
) {
3242 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3243 "0748 abort handler timed out waiting "
3244 "for abort to complete: ret %#x, ID %d, "
3245 "LUN %d, snum %#lx\n",
3246 ret
, cmnd
->device
->id
, cmnd
->device
->lun
,
3247 cmnd
->serial_number
);
3251 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
3252 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
3253 "LUN %d snum %#lx\n", ret
, cmnd
->device
->id
,
3254 cmnd
->device
->lun
, cmnd
->serial_number
);
3259 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd
)
3261 switch (task_mgmt_cmd
) {
3262 case FCP_ABORT_TASK_SET
:
3263 return "ABORT_TASK_SET";
3264 case FCP_CLEAR_TASK_SET
:
3265 return "FCP_CLEAR_TASK_SET";
3267 return "FCP_BUS_RESET";
3269 return "FCP_LUN_RESET";
3270 case FCP_TARGET_RESET
:
3271 return "FCP_TARGET_RESET";
3273 return "FCP_CLEAR_ACA";
3274 case FCP_TERMINATE_TASK
:
3275 return "FCP_TERMINATE_TASK";
3282 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
3283 * @vport: The virtual port for which this call is being executed.
3284 * @rdata: Pointer to remote port local data
3285 * @tgt_id: Target ID of remote device.
3286 * @lun_id: Lun number for the TMF
3287 * @task_mgmt_cmd: type of TMF to send
3289 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
3297 lpfc_send_taskmgmt(struct lpfc_vport
*vport
, struct lpfc_rport_data
*rdata
,
3298 unsigned tgt_id
, unsigned int lun_id
,
3299 uint8_t task_mgmt_cmd
)
3301 struct lpfc_hba
*phba
= vport
->phba
;
3302 struct lpfc_scsi_buf
*lpfc_cmd
;
3303 struct lpfc_iocbq
*iocbq
;
3304 struct lpfc_iocbq
*iocbqrsp
;
3305 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
3309 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
))
3312 lpfc_cmd
= lpfc_get_scsi_buf(phba
, rdata
->pnode
);
3313 if (lpfc_cmd
== NULL
)
3315 lpfc_cmd
->timeout
= 60;
3316 lpfc_cmd
->rdata
= rdata
;
3318 status
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, lun_id
,
3321 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
3325 iocbq
= &lpfc_cmd
->cur_iocbq
;
3326 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
3327 if (iocbqrsp
== NULL
) {
3328 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
3332 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
3333 "0702 Issue %s to TGT %d LUN %d "
3334 "rpi x%x nlp_flag x%x\n",
3335 lpfc_taskmgmt_name(task_mgmt_cmd
), tgt_id
, lun_id
,
3336 pnode
->nlp_rpi
, pnode
->nlp_flag
);
3338 status
= lpfc_sli_issue_iocb_wait(phba
, LPFC_FCP_RING
,
3339 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
3340 if (status
!= IOCB_SUCCESS
) {
3341 if (status
== IOCB_TIMEDOUT
) {
3342 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
3343 ret
= TIMEOUT_ERROR
;
3346 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
3347 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3348 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
3349 lpfc_taskmgmt_name(task_mgmt_cmd
),
3350 tgt_id
, lun_id
, iocbqrsp
->iocb
.ulpStatus
,
3351 iocbqrsp
->iocb
.un
.ulpWord
[4]);
3352 } else if (status
== IOCB_BUSY
)
3357 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
3359 if (ret
!= TIMEOUT_ERROR
)
3360 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
3366 * lpfc_chk_tgt_mapped -
3367 * @vport: The virtual port to check on
3368 * @cmnd: Pointer to scsi_cmnd data structure.
3370 * This routine delays until the scsi target (aka rport) for the
3371 * command exists (is present and logged in) or we declare it non-existent.
3378 lpfc_chk_tgt_mapped(struct lpfc_vport
*vport
, struct scsi_cmnd
*cmnd
)
3380 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
3381 struct lpfc_nodelist
*pnode
;
3382 unsigned long later
;
3385 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
3386 "0797 Tgt Map rport failure: rdata x%p\n", rdata
);
3389 pnode
= rdata
->pnode
;
3391 * If target is not in a MAPPED state, delay until
3392 * target is rediscovered or devloss timeout expires.
3394 later
= msecs_to_jiffies(2 * vport
->cfg_devloss_tmo
* 1000) + jiffies
;
3395 while (time_after(later
, jiffies
)) {
3396 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
))
3398 if (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
)
3400 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3401 rdata
= cmnd
->device
->hostdata
;
3404 pnode
= rdata
->pnode
;
3406 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
) ||
3407 (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
3413 * lpfc_reset_flush_io_context -
3414 * @vport: The virtual port (scsi_host) for the flush context
3415 * @tgt_id: If aborting by Target contect - specifies the target id
3416 * @lun_id: If aborting by Lun context - specifies the lun id
3417 * @context: specifies the context level to flush at.
3419 * After a reset condition via TMF, we need to flush orphaned i/o
3420 * contexts from the adapter. This routine aborts any contexts
3421 * outstanding, then waits for their completions. The wait is
3422 * bounded by devloss_tmo though.
3429 lpfc_reset_flush_io_context(struct lpfc_vport
*vport
, uint16_t tgt_id
,
3430 uint64_t lun_id
, lpfc_ctx_cmd context
)
3432 struct lpfc_hba
*phba
= vport
->phba
;
3433 unsigned long later
;
3436 cnt
= lpfc_sli_sum_iocb(vport
, tgt_id
, lun_id
, context
);
3438 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
3439 tgt_id
, lun_id
, context
);
3440 later
= msecs_to_jiffies(2 * vport
->cfg_devloss_tmo
* 1000) + jiffies
;
3441 while (time_after(later
, jiffies
) && cnt
) {
3442 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
3443 cnt
= lpfc_sli_sum_iocb(vport
, tgt_id
, lun_id
, context
);
3446 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3447 "0724 I/O flush failure for context %s : cnt x%x\n",
3448 ((context
== LPFC_CTX_LUN
) ? "LUN" :
3449 ((context
== LPFC_CTX_TGT
) ? "TGT" :
3450 ((context
== LPFC_CTX_HOST
) ? "HOST" : "Unknown"))),
3458 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
3459 * @cmnd: Pointer to scsi_cmnd data structure.
3461 * This routine does a device reset by sending a LUN_RESET task management
3469 lpfc_device_reset_handler(struct scsi_cmnd
*cmnd
)
3471 struct Scsi_Host
*shost
= cmnd
->device
->host
;
3472 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
3473 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
3474 struct lpfc_nodelist
*pnode
;
3475 unsigned tgt_id
= cmnd
->device
->id
;
3476 unsigned int lun_id
= cmnd
->device
->lun
;
3477 struct lpfc_scsi_event_header scsi_event
;
3481 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3482 "0798 Device Reset rport failure: rdata x%p\n", rdata
);
3485 pnode
= rdata
->pnode
;
3486 status
= fc_block_scsi_eh(cmnd
);
3490 status
= lpfc_chk_tgt_mapped(vport
, cmnd
);
3491 if (status
== FAILED
) {
3492 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3493 "0721 Device Reset rport failure: rdata x%p\n", rdata
);
3497 scsi_event
.event_type
= FC_REG_SCSI_EVENT
;
3498 scsi_event
.subcategory
= LPFC_EVENT_LUNRESET
;
3499 scsi_event
.lun
= lun_id
;
3500 memcpy(scsi_event
.wwpn
, &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
3501 memcpy(scsi_event
.wwnn
, &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
3503 fc_host_post_vendor_event(shost
, fc_get_event_number(),
3504 sizeof(scsi_event
), (char *)&scsi_event
, LPFC_NL_VENDOR_ID
);
3506 status
= lpfc_send_taskmgmt(vport
, rdata
, tgt_id
, lun_id
,
3509 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3510 "0713 SCSI layer issued Device Reset (%d, %d) "
3511 "return x%x\n", tgt_id
, lun_id
, status
);
3514 * We have to clean up i/o as : they may be orphaned by the TMF;
3515 * or if the TMF failed, they may be in an indeterminate state.
3517 * We will report success if all the i/o aborts successfully.
3519 status
= lpfc_reset_flush_io_context(vport
, tgt_id
, lun_id
,
3525 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
3526 * @cmnd: Pointer to scsi_cmnd data structure.
3528 * This routine does a target reset by sending a TARGET_RESET task management
3536 lpfc_target_reset_handler(struct scsi_cmnd
*cmnd
)
3538 struct Scsi_Host
*shost
= cmnd
->device
->host
;
3539 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
3540 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
3541 struct lpfc_nodelist
*pnode
;
3542 unsigned tgt_id
= cmnd
->device
->id
;
3543 unsigned int lun_id
= cmnd
->device
->lun
;
3544 struct lpfc_scsi_event_header scsi_event
;
3548 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3549 "0799 Target Reset rport failure: rdata x%p\n", rdata
);
3552 pnode
= rdata
->pnode
;
3553 status
= fc_block_scsi_eh(cmnd
);
3557 status
= lpfc_chk_tgt_mapped(vport
, cmnd
);
3558 if (status
== FAILED
) {
3559 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3560 "0722 Target Reset rport failure: rdata x%p\n", rdata
);
3564 scsi_event
.event_type
= FC_REG_SCSI_EVENT
;
3565 scsi_event
.subcategory
= LPFC_EVENT_TGTRESET
;
3567 memcpy(scsi_event
.wwpn
, &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
3568 memcpy(scsi_event
.wwnn
, &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
3570 fc_host_post_vendor_event(shost
, fc_get_event_number(),
3571 sizeof(scsi_event
), (char *)&scsi_event
, LPFC_NL_VENDOR_ID
);
3573 status
= lpfc_send_taskmgmt(vport
, rdata
, tgt_id
, lun_id
,
3576 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3577 "0723 SCSI layer issued Target Reset (%d, %d) "
3578 "return x%x\n", tgt_id
, lun_id
, status
);
3581 * We have to clean up i/o as : they may be orphaned by the TMF;
3582 * or if the TMF failed, they may be in an indeterminate state.
3584 * We will report success if all the i/o aborts successfully.
3586 status
= lpfc_reset_flush_io_context(vport
, tgt_id
, lun_id
,
3592 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
3593 * @cmnd: Pointer to scsi_cmnd data structure.
3595 * This routine does target reset to all targets on @cmnd->device->host.
3596 * This emulates Parallel SCSI Bus Reset Semantics.
3603 lpfc_bus_reset_handler(struct scsi_cmnd
*cmnd
)
3605 struct Scsi_Host
*shost
= cmnd
->device
->host
;
3606 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
3607 struct lpfc_nodelist
*ndlp
= NULL
;
3608 struct lpfc_scsi_event_header scsi_event
;
3610 int ret
= SUCCESS
, status
, i
;
3612 scsi_event
.event_type
= FC_REG_SCSI_EVENT
;
3613 scsi_event
.subcategory
= LPFC_EVENT_BUSRESET
;
3615 memcpy(scsi_event
.wwpn
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
3616 memcpy(scsi_event
.wwnn
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
3618 fc_host_post_vendor_event(shost
, fc_get_event_number(),
3619 sizeof(scsi_event
), (char *)&scsi_event
, LPFC_NL_VENDOR_ID
);
3621 ret
= fc_block_scsi_eh(cmnd
);
3626 * Since the driver manages a single bus device, reset all
3627 * targets known to the driver. Should any target reset
3628 * fail, this routine returns failure to the midlayer.
3630 for (i
= 0; i
< LPFC_MAX_TARGET
; i
++) {
3631 /* Search for mapped node by target ID */
3633 spin_lock_irq(shost
->host_lock
);
3634 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
3635 if (!NLP_CHK_NODE_ACT(ndlp
))
3637 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
&&
3638 ndlp
->nlp_sid
== i
&&
3644 spin_unlock_irq(shost
->host_lock
);
3648 status
= lpfc_send_taskmgmt(vport
, ndlp
->rport
->dd_data
,
3649 i
, 0, FCP_TARGET_RESET
);
3651 if (status
!= SUCCESS
) {
3652 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3653 "0700 Bus Reset on target %d failed\n",
3659 * We have to clean up i/o as : they may be orphaned by the TMFs
3660 * above; or if any of the TMFs failed, they may be in an
3661 * indeterminate state.
3662 * We will report success if all the i/o aborts successfully.
3665 status
= lpfc_reset_flush_io_context(vport
, 0, 0, LPFC_CTX_HOST
);
3666 if (status
!= SUCCESS
)
3669 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3670 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret
);
3675 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
3676 * @sdev: Pointer to scsi_device.
3678 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
3679 * globally available list of scsi buffers. This routine also makes sure scsi
3680 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
3681 * of scsi buffer exists for the lifetime of the driver.
3688 lpfc_slave_alloc(struct scsi_device
*sdev
)
3690 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
3691 struct lpfc_hba
*phba
= vport
->phba
;
3692 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
3694 uint32_t num_to_alloc
= 0;
3695 int num_allocated
= 0;
3698 if (!rport
|| fc_remote_port_chkready(rport
))
3701 sdev
->hostdata
= rport
->dd_data
;
3702 sdev_cnt
= atomic_inc_return(&phba
->sdev_cnt
);
3705 * Populate the cmds_per_lun count scsi_bufs into this host's globally
3706 * available list of scsi buffers. Don't allocate more than the
3707 * HBA limit conveyed to the midlayer via the host structure. The
3708 * formula accounts for the lun_queue_depth + error handlers + 1
3709 * extra. This list of scsi bufs exists for the lifetime of the driver.
3711 total
= phba
->total_scsi_bufs
;
3712 num_to_alloc
= vport
->cfg_lun_queue_depth
+ 2;
3714 /* If allocated buffers are enough do nothing */
3715 if ((sdev_cnt
* (vport
->cfg_lun_queue_depth
+ 2)) < total
)
3718 /* Allow some exchanges to be available always to complete discovery */
3719 if (total
>= phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
3720 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
3721 "0704 At limitation of %d preallocated "
3722 "command buffers\n", total
);
3724 /* Allow some exchanges to be available always to complete discovery */
3725 } else if (total
+ num_to_alloc
>
3726 phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
3727 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
3728 "0705 Allocation request of %d "
3729 "command buffers will exceed max of %d. "
3730 "Reducing allocation request to %d.\n",
3731 num_to_alloc
, phba
->cfg_hba_queue_depth
,
3732 (phba
->cfg_hba_queue_depth
- total
));
3733 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
3735 num_allocated
= lpfc_new_scsi_buf(vport
, num_to_alloc
);
3736 if (num_to_alloc
!= num_allocated
) {
3737 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
3738 "0708 Allocation request of %d "
3739 "command buffers did not succeed. "
3740 "Allocated %d buffers.\n",
3741 num_to_alloc
, num_allocated
);
3743 if (num_allocated
> 0)
3744 phba
->total_scsi_bufs
+= num_allocated
;
3749 * lpfc_slave_configure - scsi_host_template slave_configure entry point
3750 * @sdev: Pointer to scsi_device.
3752 * This routine configures following items
3753 * - Tag command queuing support for @sdev if supported.
3754 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
3760 lpfc_slave_configure(struct scsi_device
*sdev
)
3762 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
3763 struct lpfc_hba
*phba
= vport
->phba
;
3765 if (sdev
->tagged_supported
)
3766 scsi_activate_tcq(sdev
, vport
->cfg_lun_queue_depth
);
3768 scsi_deactivate_tcq(sdev
, vport
->cfg_lun_queue_depth
);
3770 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
3771 lpfc_sli_handle_fast_ring_event(phba
,
3772 &phba
->sli
.ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
3773 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
3774 lpfc_poll_rearm_timer(phba
);
3781 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
3782 * @sdev: Pointer to scsi_device.
3784 * This routine sets @sdev hostatdata filed to null.
3787 lpfc_slave_destroy(struct scsi_device
*sdev
)
3789 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
3790 struct lpfc_hba
*phba
= vport
->phba
;
3791 atomic_dec(&phba
->sdev_cnt
);
3792 sdev
->hostdata
= NULL
;
3797 struct scsi_host_template lpfc_template
= {
3798 .module
= THIS_MODULE
,
3799 .name
= LPFC_DRIVER_NAME
,
3801 .queuecommand
= lpfc_queuecommand
,
3802 .eh_abort_handler
= lpfc_abort_handler
,
3803 .eh_device_reset_handler
= lpfc_device_reset_handler
,
3804 .eh_target_reset_handler
= lpfc_target_reset_handler
,
3805 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
3806 .slave_alloc
= lpfc_slave_alloc
,
3807 .slave_configure
= lpfc_slave_configure
,
3808 .slave_destroy
= lpfc_slave_destroy
,
3809 .scan_finished
= lpfc_scan_finished
,
3811 .sg_tablesize
= LPFC_DEFAULT_SG_SEG_CNT
,
3812 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
3813 .use_clustering
= ENABLE_CLUSTERING
,
3814 .shost_attrs
= lpfc_hba_attrs
,
3815 .max_sectors
= 0xFFFF,
3816 .vendor_id
= LPFC_NL_VENDOR_ID
,
3817 .change_queue_depth
= lpfc_change_queue_depth
,
3820 struct scsi_host_template lpfc_vport_template
= {
3821 .module
= THIS_MODULE
,
3822 .name
= LPFC_DRIVER_NAME
,
3824 .queuecommand
= lpfc_queuecommand
,
3825 .eh_abort_handler
= lpfc_abort_handler
,
3826 .eh_device_reset_handler
= lpfc_device_reset_handler
,
3827 .eh_target_reset_handler
= lpfc_target_reset_handler
,
3828 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
3829 .slave_alloc
= lpfc_slave_alloc
,
3830 .slave_configure
= lpfc_slave_configure
,
3831 .slave_destroy
= lpfc_slave_destroy
,
3832 .scan_finished
= lpfc_scan_finished
,
3834 .sg_tablesize
= LPFC_DEFAULT_SG_SEG_CNT
,
3835 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
3836 .use_clustering
= ENABLE_CLUSTERING
,
3837 .shost_attrs
= lpfc_vport_attrs
,
3838 .max_sectors
= 0xFFFF,
3839 .change_queue_depth
= lpfc_change_queue_depth
,