1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <net/checksum.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_tcq.h>
38 #include <scsi/scsi_transport_fc.h>
40 #include "lpfc_version.h"
44 #include "lpfc_sli4.h"
46 #include "lpfc_disc.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
53 #define LPFC_RESET_WAIT 2
54 #define LPFC_ABORT_WAIT 2
56 static char *dif_op_str
[] = {
66 struct scsi_dif_tuple
{
67 __be16 guard_tag
; /* Checksum */
68 __be16 app_tag
; /* Opaque storage */
69 __be32 ref_tag
; /* Target LBA or indirect LBA */
72 static struct lpfc_rport_data
*
73 lpfc_rport_data_from_scsi_device(struct scsi_device
*sdev
)
75 struct lpfc_vport
*vport
= (struct lpfc_vport
*)sdev
->host
->hostdata
;
77 if (vport
->phba
->cfg_fof
)
78 return ((struct lpfc_device_data
*)sdev
->hostdata
)->rport_data
;
80 return (struct lpfc_rport_data
*)sdev
->hostdata
;
84 lpfc_release_scsi_buf_s4(struct lpfc_hba
*phba
, struct lpfc_io_buf
*psb
);
86 lpfc_release_scsi_buf_s3(struct lpfc_hba
*phba
, struct lpfc_io_buf
*psb
);
88 lpfc_prot_group_type(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
);
90 static inline unsigned
91 lpfc_cmd_blksize(struct scsi_cmnd
*sc
)
93 return sc
->device
->sector_size
;
96 #define LPFC_CHECK_PROTECT_GUARD 1
97 #define LPFC_CHECK_PROTECT_REF 2
98 static inline unsigned
99 lpfc_cmd_protect(struct scsi_cmnd
*sc
, int flag
)
104 static inline unsigned
105 lpfc_cmd_guard_csum(struct scsi_cmnd
*sc
)
107 if (lpfc_prot_group_type(NULL
, sc
) == LPFC_PG_TYPE_NO_DIF
)
109 if (scsi_host_get_guard(sc
->device
->host
) == SHOST_DIX_GUARD_IP
)
115 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
116 * @phba: Pointer to HBA object.
117 * @lpfc_cmd: lpfc scsi command object pointer.
119 * This function is called from the lpfc_prep_task_mgmt_cmd function to
120 * set the last bit in the response sge entry.
123 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba
*phba
,
124 struct lpfc_io_buf
*lpfc_cmd
)
126 struct sli4_sge
*sgl
= (struct sli4_sge
*)lpfc_cmd
->dma_sgl
;
129 sgl
->word2
= le32_to_cpu(sgl
->word2
);
130 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
131 sgl
->word2
= cpu_to_le32(sgl
->word2
);
136 * lpfc_update_stats - Update statistical data for the command completion
137 * @vport: The virtual port on which this call is executing.
138 * @lpfc_cmd: lpfc scsi command object pointer.
140 * This function is called when there is a command completion and this
141 * function updates the statistical data for the command completion.
144 lpfc_update_stats(struct lpfc_vport
*vport
, struct lpfc_io_buf
*lpfc_cmd
)
146 struct lpfc_hba
*phba
= vport
->phba
;
147 struct lpfc_rport_data
*rdata
;
148 struct lpfc_nodelist
*pnode
;
149 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
151 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
152 unsigned long latency
;
155 if (!vport
->stat_data_enabled
||
156 vport
->stat_data_blocked
||
160 latency
= jiffies_to_msecs((long)jiffies
- (long)lpfc_cmd
->start_time
);
161 rdata
= lpfc_cmd
->rdata
;
162 pnode
= rdata
->pnode
;
164 spin_lock_irqsave(shost
->host_lock
, flags
);
167 (phba
->bucket_type
== LPFC_NO_BUCKET
)) {
168 spin_unlock_irqrestore(shost
->host_lock
, flags
);
172 if (phba
->bucket_type
== LPFC_LINEAR_BUCKET
) {
173 i
= (latency
+ phba
->bucket_step
- 1 - phba
->bucket_base
)/
175 /* check array subscript bounds */
178 else if (i
>= LPFC_MAX_BUCKET_COUNT
)
179 i
= LPFC_MAX_BUCKET_COUNT
- 1;
181 for (i
= 0; i
< LPFC_MAX_BUCKET_COUNT
-1; i
++)
182 if (latency
<= (phba
->bucket_base
+
183 ((1<<i
)*phba
->bucket_step
)))
187 pnode
->lat_data
[i
].cmd_count
++;
188 spin_unlock_irqrestore(shost
->host_lock
, flags
);
192 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
193 * @phba: The Hba for which this call is being executed.
195 * This routine is called when there is resource error in driver or firmware.
196 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
197 * posts at most 1 event each second. This routine wakes up worker thread of
198 * @phba to process WORKER_RAM_DOWN_EVENT event.
200 * This routine should be called with no lock held.
203 lpfc_rampdown_queue_depth(struct lpfc_hba
*phba
)
207 unsigned long expires
;
209 spin_lock_irqsave(&phba
->hbalock
, flags
);
210 atomic_inc(&phba
->num_rsrc_err
);
211 phba
->last_rsrc_error_time
= jiffies
;
213 expires
= phba
->last_ramp_down_time
+ QUEUE_RAMP_DOWN_INTERVAL
;
214 if (time_after(expires
, jiffies
)) {
215 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
219 phba
->last_ramp_down_time
= jiffies
;
221 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
223 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
224 evt_posted
= phba
->pport
->work_port_events
& WORKER_RAMP_DOWN_QUEUE
;
226 phba
->pport
->work_port_events
|= WORKER_RAMP_DOWN_QUEUE
;
227 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
230 lpfc_worker_wake_up(phba
);
235 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
236 * @phba: The Hba for which this call is being executed.
238 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
239 * thread.This routine reduces queue depth for all scsi device on each vport
240 * associated with @phba.
243 lpfc_ramp_down_queue_handler(struct lpfc_hba
*phba
)
245 struct lpfc_vport
**vports
;
246 struct Scsi_Host
*shost
;
247 struct scsi_device
*sdev
;
248 unsigned long new_queue_depth
;
249 unsigned long num_rsrc_err
, num_cmd_success
;
252 num_rsrc_err
= atomic_read(&phba
->num_rsrc_err
);
253 num_cmd_success
= atomic_read(&phba
->num_cmd_success
);
256 * The error and success command counters are global per
257 * driver instance. If another handler has already
258 * operated on this error event, just exit.
260 if (num_rsrc_err
== 0)
263 vports
= lpfc_create_vport_work_array(phba
);
265 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
266 shost
= lpfc_shost_from_vport(vports
[i
]);
267 shost_for_each_device(sdev
, shost
) {
269 sdev
->queue_depth
* num_rsrc_err
/
270 (num_rsrc_err
+ num_cmd_success
);
271 if (!new_queue_depth
)
272 new_queue_depth
= sdev
->queue_depth
- 1;
274 new_queue_depth
= sdev
->queue_depth
-
276 scsi_change_queue_depth(sdev
, new_queue_depth
);
279 lpfc_destroy_vport_work_array(phba
, vports
);
280 atomic_set(&phba
->num_rsrc_err
, 0);
281 atomic_set(&phba
->num_cmd_success
, 0);
285 * lpfc_scsi_dev_block - set all scsi hosts to block state
286 * @phba: Pointer to HBA context object.
288 * This function walks vport list and set each SCSI host to block state
289 * by invoking fc_remote_port_delete() routine. This function is invoked
290 * with EEH when device's PCI slot has been permanently disabled.
293 lpfc_scsi_dev_block(struct lpfc_hba
*phba
)
295 struct lpfc_vport
**vports
;
296 struct Scsi_Host
*shost
;
297 struct scsi_device
*sdev
;
298 struct fc_rport
*rport
;
301 vports
= lpfc_create_vport_work_array(phba
);
303 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
304 shost
= lpfc_shost_from_vport(vports
[i
]);
305 shost_for_each_device(sdev
, shost
) {
306 rport
= starget_to_rport(scsi_target(sdev
));
307 fc_remote_port_delete(rport
);
310 lpfc_destroy_vport_work_array(phba
, vports
);
314 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
315 * @vport: The virtual port for which this call being executed.
316 * @num_to_alloc: The requested number of buffers to allocate.
318 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
319 * the scsi buffer contains all the necessary information needed to initiate
320 * a SCSI I/O. The non-DMAable buffer region contains information to build
321 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
322 * and the initial BPL. In addition to allocating memory, the FCP CMND and
323 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
326 * int - number of scsi buffers that were allocated.
327 * 0 = failure, less than num_to_alloc is a partial failure.
330 lpfc_new_scsi_buf_s3(struct lpfc_vport
*vport
, int num_to_alloc
)
332 struct lpfc_hba
*phba
= vport
->phba
;
333 struct lpfc_io_buf
*psb
;
334 struct ulp_bde64
*bpl
;
336 dma_addr_t pdma_phys_fcp_cmd
;
337 dma_addr_t pdma_phys_fcp_rsp
;
338 dma_addr_t pdma_phys_sgl
;
342 bpl_size
= phba
->cfg_sg_dma_buf_size
-
343 (sizeof(struct fcp_cmnd
) + sizeof(struct fcp_rsp
));
345 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
346 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
347 num_to_alloc
, phba
->cfg_sg_dma_buf_size
,
348 (int)sizeof(struct fcp_cmnd
),
349 (int)sizeof(struct fcp_rsp
), bpl_size
);
351 for (bcnt
= 0; bcnt
< num_to_alloc
; bcnt
++) {
352 psb
= kzalloc(sizeof(struct lpfc_io_buf
), GFP_KERNEL
);
357 * Get memory from the pci pool to map the virt space to pci
358 * bus space for an I/O. The DMA buffer includes space for the
359 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
360 * necessary to support the sg_tablesize.
362 psb
->data
= dma_pool_zalloc(phba
->lpfc_sg_dma_buf_pool
,
363 GFP_KERNEL
, &psb
->dma_handle
);
370 /* Allocate iotag for psb->cur_iocbq. */
371 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
373 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
374 psb
->data
, psb
->dma_handle
);
378 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
380 psb
->fcp_cmnd
= psb
->data
;
381 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
382 psb
->dma_sgl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
383 sizeof(struct fcp_rsp
);
385 /* Initialize local short-hand pointers. */
386 bpl
= (struct ulp_bde64
*)psb
->dma_sgl
;
387 pdma_phys_fcp_cmd
= psb
->dma_handle
;
388 pdma_phys_fcp_rsp
= psb
->dma_handle
+ sizeof(struct fcp_cmnd
);
389 pdma_phys_sgl
= psb
->dma_handle
+ sizeof(struct fcp_cmnd
) +
390 sizeof(struct fcp_rsp
);
393 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
394 * are sg list bdes. Initialize the first two and leave the
395 * rest for queuecommand.
397 bpl
[0].addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd
));
398 bpl
[0].addrLow
= le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd
));
399 bpl
[0].tus
.f
.bdeSize
= sizeof(struct fcp_cmnd
);
400 bpl
[0].tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
401 bpl
[0].tus
.w
= le32_to_cpu(bpl
[0].tus
.w
);
403 /* Setup the physical region for the FCP RSP */
404 bpl
[1].addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp
));
405 bpl
[1].addrLow
= le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp
));
406 bpl
[1].tus
.f
.bdeSize
= sizeof(struct fcp_rsp
);
407 bpl
[1].tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
408 bpl
[1].tus
.w
= le32_to_cpu(bpl
[1].tus
.w
);
411 * Since the IOCB for the FCP I/O is built into this
412 * lpfc_scsi_buf, initialize it with all known data now.
414 iocb
= &psb
->cur_iocbq
.iocb
;
415 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
416 if ((phba
->sli_rev
== 3) &&
417 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
)) {
418 /* fill in immediate fcp command BDE */
419 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDE_IMMED
;
420 iocb
->un
.fcpi64
.bdl
.bdeSize
= sizeof(struct fcp_cmnd
);
421 iocb
->un
.fcpi64
.bdl
.addrLow
= offsetof(IOCB_t
,
423 iocb
->un
.fcpi64
.bdl
.addrHigh
= 0;
424 iocb
->ulpBdeCount
= 0;
426 /* fill in response BDE */
427 iocb
->unsli3
.fcp_ext
.rbde
.tus
.f
.bdeFlags
=
429 iocb
->unsli3
.fcp_ext
.rbde
.tus
.f
.bdeSize
=
430 sizeof(struct fcp_rsp
);
431 iocb
->unsli3
.fcp_ext
.rbde
.addrLow
=
432 putPaddrLow(pdma_phys_fcp_rsp
);
433 iocb
->unsli3
.fcp_ext
.rbde
.addrHigh
=
434 putPaddrHigh(pdma_phys_fcp_rsp
);
436 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
437 iocb
->un
.fcpi64
.bdl
.bdeSize
=
438 (2 * sizeof(struct ulp_bde64
));
439 iocb
->un
.fcpi64
.bdl
.addrLow
=
440 putPaddrLow(pdma_phys_sgl
);
441 iocb
->un
.fcpi64
.bdl
.addrHigh
=
442 putPaddrHigh(pdma_phys_sgl
);
443 iocb
->ulpBdeCount
= 1;
446 iocb
->ulpClass
= CLASS3
;
447 psb
->status
= IOSTAT_SUCCESS
;
448 /* Put it back into the SCSI buffer list */
449 psb
->cur_iocbq
.context1
= psb
;
450 spin_lock_init(&psb
->buf_lock
);
451 lpfc_release_scsi_buf_s3(phba
, psb
);
459 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
460 * @vport: pointer to lpfc vport data structure.
462 * This routine is invoked by the vport cleanup for deletions and the cleanup
463 * for an ndlp on removal.
466 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport
*vport
)
468 struct lpfc_hba
*phba
= vport
->phba
;
469 struct lpfc_io_buf
*psb
, *next_psb
;
470 struct lpfc_sli4_hdw_queue
*qp
;
471 unsigned long iflag
= 0;
474 if (!(vport
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
))
477 spin_lock_irqsave(&phba
->hbalock
, iflag
);
478 for (idx
= 0; idx
< phba
->cfg_hdw_queue
; idx
++) {
479 qp
= &phba
->sli4_hba
.hdwq
[idx
];
481 spin_lock(&qp
->abts_io_buf_list_lock
);
482 list_for_each_entry_safe(psb
, next_psb
,
483 &qp
->lpfc_abts_io_buf_list
, list
) {
484 if (psb
->cur_iocbq
.iocb_flag
& LPFC_IO_NVME
)
487 if (psb
->rdata
&& psb
->rdata
->pnode
&&
488 psb
->rdata
->pnode
->vport
== vport
)
491 spin_unlock(&qp
->abts_io_buf_list_lock
);
493 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
497 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
498 * @phba: pointer to lpfc hba data structure.
499 * @axri: pointer to the fcp xri abort wcqe structure.
500 * @idx: index into hdwq
502 * This routine is invoked by the worker thread to process a SLI4 fast-path
503 * FCP or NVME aborted xri.
506 lpfc_sli4_io_xri_aborted(struct lpfc_hba
*phba
,
507 struct sli4_wcqe_xri_aborted
*axri
, int idx
)
509 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
510 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
511 struct lpfc_io_buf
*psb
, *next_psb
;
512 struct lpfc_sli4_hdw_queue
*qp
;
513 unsigned long iflag
= 0;
514 struct lpfc_iocbq
*iocbq
;
516 struct lpfc_nodelist
*ndlp
;
518 struct lpfc_sli_ring
*pring
= phba
->sli4_hba
.els_wq
->pring
;
520 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
))
523 qp
= &phba
->sli4_hba
.hdwq
[idx
];
524 spin_lock_irqsave(&phba
->hbalock
, iflag
);
525 spin_lock(&qp
->abts_io_buf_list_lock
);
526 list_for_each_entry_safe(psb
, next_psb
,
527 &qp
->lpfc_abts_io_buf_list
, list
) {
528 if (psb
->cur_iocbq
.sli4_xritag
== xri
) {
529 list_del_init(&psb
->list
);
530 psb
->flags
&= ~LPFC_SBUF_XBUSY
;
531 psb
->status
= IOSTAT_SUCCESS
;
532 if (psb
->cur_iocbq
.iocb_flag
& LPFC_IO_NVME
) {
533 qp
->abts_nvme_io_bufs
--;
534 spin_unlock(&qp
->abts_io_buf_list_lock
);
535 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
536 lpfc_sli4_nvme_xri_aborted(phba
, axri
, psb
);
539 qp
->abts_scsi_io_bufs
--;
540 spin_unlock(&qp
->abts_io_buf_list_lock
);
542 if (psb
->rdata
&& psb
->rdata
->pnode
)
543 ndlp
= psb
->rdata
->pnode
;
547 rrq_empty
= list_empty(&phba
->active_rrq_list
);
548 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
550 lpfc_set_rrq_active(phba
, ndlp
,
551 psb
->cur_iocbq
.sli4_lxritag
, rxid
, 1);
552 lpfc_sli4_abts_err_handler(phba
, ndlp
, axri
);
554 lpfc_release_scsi_buf_s4(phba
, psb
);
556 lpfc_worker_wake_up(phba
);
560 spin_unlock(&qp
->abts_io_buf_list_lock
);
561 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
562 iocbq
= phba
->sli
.iocbq_lookup
[i
];
564 if (!(iocbq
->iocb_flag
& LPFC_IO_FCP
) ||
565 (iocbq
->iocb_flag
& LPFC_IO_LIBDFC
))
567 if (iocbq
->sli4_xritag
!= xri
)
569 psb
= container_of(iocbq
, struct lpfc_io_buf
, cur_iocbq
);
570 psb
->flags
&= ~LPFC_SBUF_XBUSY
;
571 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
572 if (!list_empty(&pring
->txq
))
573 lpfc_worker_wake_up(phba
);
577 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
581 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
582 * @phba: The HBA for which this call is being executed.
583 * @ndlp: pointer to a node-list data structure.
584 * @cmnd: Pointer to scsi_cmnd data structure.
586 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
587 * and returns to caller.
591 * Pointer to lpfc_scsi_buf - Success
593 static struct lpfc_io_buf
*
594 lpfc_get_scsi_buf_s3(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
595 struct scsi_cmnd
*cmnd
)
597 struct lpfc_io_buf
*lpfc_cmd
= NULL
;
598 struct list_head
*scsi_buf_list_get
= &phba
->lpfc_scsi_buf_list_get
;
599 unsigned long iflag
= 0;
601 spin_lock_irqsave(&phba
->scsi_buf_list_get_lock
, iflag
);
602 list_remove_head(scsi_buf_list_get
, lpfc_cmd
, struct lpfc_io_buf
,
605 spin_lock(&phba
->scsi_buf_list_put_lock
);
606 list_splice(&phba
->lpfc_scsi_buf_list_put
,
607 &phba
->lpfc_scsi_buf_list_get
);
608 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_put
);
609 list_remove_head(scsi_buf_list_get
, lpfc_cmd
,
610 struct lpfc_io_buf
, list
);
611 spin_unlock(&phba
->scsi_buf_list_put_lock
);
613 spin_unlock_irqrestore(&phba
->scsi_buf_list_get_lock
, iflag
);
615 if (lpfc_ndlp_check_qdepth(phba
, ndlp
) && lpfc_cmd
) {
616 atomic_inc(&ndlp
->cmd_pending
);
617 lpfc_cmd
->flags
|= LPFC_SBUF_BUMP_QDEPTH
;
622 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
623 * @phba: The HBA for which this call is being executed.
624 * @ndlp: pointer to a node-list data structure.
625 * @cmnd: Pointer to scsi_cmnd data structure.
627 * This routine removes a scsi buffer from head of @hdwq io_buf_list
628 * and returns to caller.
632 * Pointer to lpfc_scsi_buf - Success
634 static struct lpfc_io_buf
*
635 lpfc_get_scsi_buf_s4(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
636 struct scsi_cmnd
*cmnd
)
638 struct lpfc_io_buf
*lpfc_cmd
;
639 struct lpfc_sli4_hdw_queue
*qp
;
640 struct sli4_sge
*sgl
;
641 dma_addr_t pdma_phys_fcp_rsp
;
642 dma_addr_t pdma_phys_fcp_cmd
;
645 struct fcp_cmd_rsp_buf
*tmp
= NULL
;
647 cpu
= raw_smp_processor_id();
648 if (cmnd
&& phba
->cfg_fcp_io_sched
== LPFC_FCP_SCHED_BY_HDWQ
) {
649 tag
= blk_mq_unique_tag(cmnd
->request
);
650 idx
= blk_mq_unique_tag_to_hwq(tag
);
652 idx
= phba
->sli4_hba
.cpu_map
[cpu
].hdwq
;
655 lpfc_cmd
= lpfc_get_io_buf(phba
, ndlp
, idx
,
656 !phba
->cfg_xri_rebalancing
);
658 qp
= &phba
->sli4_hba
.hdwq
[idx
];
663 /* Setup key fields in buffer that may have been changed
664 * if other protocols used this buffer.
666 lpfc_cmd
->cur_iocbq
.iocb_flag
= LPFC_IO_FCP
;
667 lpfc_cmd
->prot_seg_cnt
= 0;
668 lpfc_cmd
->seg_cnt
= 0;
669 lpfc_cmd
->timeout
= 0;
671 lpfc_cmd
->start_time
= jiffies
;
672 lpfc_cmd
->waitq
= NULL
;
674 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
675 lpfc_cmd
->prot_data_type
= 0;
677 tmp
= lpfc_get_cmd_rsp_buf_per_hdwq(phba
, lpfc_cmd
);
679 lpfc_release_io_buf(phba
, lpfc_cmd
, lpfc_cmd
->hdwq
);
683 lpfc_cmd
->fcp_cmnd
= tmp
->fcp_cmnd
;
684 lpfc_cmd
->fcp_rsp
= tmp
->fcp_rsp
;
687 * The first two SGEs are the FCP_CMD and FCP_RSP.
688 * The balance are sg list bdes. Initialize the
689 * first two and leave the rest for queuecommand.
691 sgl
= (struct sli4_sge
*)lpfc_cmd
->dma_sgl
;
692 pdma_phys_fcp_cmd
= tmp
->fcp_cmd_rsp_dma_handle
;
693 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd
));
694 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd
));
695 sgl
->word2
= le32_to_cpu(sgl
->word2
);
696 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
697 sgl
->word2
= cpu_to_le32(sgl
->word2
);
698 sgl
->sge_len
= cpu_to_le32(sizeof(struct fcp_cmnd
));
701 /* Setup the physical region for the FCP RSP */
702 pdma_phys_fcp_rsp
= pdma_phys_fcp_cmd
+ sizeof(struct fcp_cmnd
);
703 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp
));
704 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp
));
705 sgl
->word2
= le32_to_cpu(sgl
->word2
);
706 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
707 sgl
->word2
= cpu_to_le32(sgl
->word2
);
708 sgl
->sge_len
= cpu_to_le32(sizeof(struct fcp_rsp
));
710 if (lpfc_ndlp_check_qdepth(phba
, ndlp
)) {
711 atomic_inc(&ndlp
->cmd_pending
);
712 lpfc_cmd
->flags
|= LPFC_SBUF_BUMP_QDEPTH
;
717 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
718 * @phba: The HBA for which this call is being executed.
719 * @ndlp: pointer to a node-list data structure.
720 * @cmnd: Pointer to scsi_cmnd data structure.
722 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
723 * and returns to caller.
727 * Pointer to lpfc_scsi_buf - Success
729 static struct lpfc_io_buf
*
730 lpfc_get_scsi_buf(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
731 struct scsi_cmnd
*cmnd
)
733 return phba
->lpfc_get_scsi_buf(phba
, ndlp
, cmnd
);
737 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
738 * @phba: The Hba for which this call is being executed.
739 * @psb: The scsi buffer which is being released.
741 * This routine releases @psb scsi buffer by adding it to tail of @phba
742 * lpfc_scsi_buf_list list.
745 lpfc_release_scsi_buf_s3(struct lpfc_hba
*phba
, struct lpfc_io_buf
*psb
)
747 unsigned long iflag
= 0;
750 psb
->prot_seg_cnt
= 0;
752 spin_lock_irqsave(&phba
->scsi_buf_list_put_lock
, iflag
);
754 psb
->cur_iocbq
.iocb_flag
= LPFC_IO_FCP
;
755 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list_put
);
756 spin_unlock_irqrestore(&phba
->scsi_buf_list_put_lock
, iflag
);
760 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
761 * @phba: The Hba for which this call is being executed.
762 * @psb: The scsi buffer which is being released.
764 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
765 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
766 * and cannot be reused for at least RA_TOV amount of time if it was
770 lpfc_release_scsi_buf_s4(struct lpfc_hba
*phba
, struct lpfc_io_buf
*psb
)
772 struct lpfc_sli4_hdw_queue
*qp
;
773 unsigned long iflag
= 0;
776 psb
->prot_seg_cnt
= 0;
779 if (psb
->flags
& LPFC_SBUF_XBUSY
) {
780 spin_lock_irqsave(&qp
->abts_io_buf_list_lock
, iflag
);
782 list_add_tail(&psb
->list
, &qp
->lpfc_abts_io_buf_list
);
783 qp
->abts_scsi_io_bufs
++;
784 spin_unlock_irqrestore(&qp
->abts_io_buf_list_lock
, iflag
);
786 lpfc_release_io_buf(phba
, (struct lpfc_io_buf
*)psb
, qp
);
791 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
792 * @phba: The Hba for which this call is being executed.
793 * @psb: The scsi buffer which is being released.
795 * This routine releases @psb scsi buffer by adding it to tail of @phba
796 * lpfc_scsi_buf_list list.
799 lpfc_release_scsi_buf(struct lpfc_hba
*phba
, struct lpfc_io_buf
*psb
)
801 if ((psb
->flags
& LPFC_SBUF_BUMP_QDEPTH
) && psb
->ndlp
)
802 atomic_dec(&psb
->ndlp
->cmd_pending
);
804 psb
->flags
&= ~LPFC_SBUF_BUMP_QDEPTH
;
805 phba
->lpfc_release_scsi_buf(phba
, psb
);
809 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
810 * @data: A pointer to the immediate command data portion of the IOCB.
811 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
813 * The routine copies the entire FCP command from @fcp_cmnd to @data while
814 * byte swapping the data to big endian format for transmission on the wire.
817 lpfc_fcpcmd_to_iocb(u8
*data
, struct fcp_cmnd
*fcp_cmnd
)
821 for (i
= 0, j
= 0; i
< sizeof(struct fcp_cmnd
);
822 i
+= sizeof(uint32_t), j
++) {
823 ((uint32_t *)data
)[j
] = cpu_to_be32(((uint32_t *)fcp_cmnd
)[j
]);
828 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
829 * @phba: The Hba for which this call is being executed.
830 * @lpfc_cmd: The scsi buffer which is going to be mapped.
832 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
833 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
834 * through sg elements and format the bde. This routine also initializes all
835 * IOCB fields which are dependent on scsi command request buffer.
842 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_cmd
)
844 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
845 struct scatterlist
*sgel
= NULL
;
846 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
847 struct ulp_bde64
*bpl
= (struct ulp_bde64
*)lpfc_cmd
->dma_sgl
;
848 struct lpfc_iocbq
*iocbq
= &lpfc_cmd
->cur_iocbq
;
849 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
850 struct ulp_bde64
*data_bde
= iocb_cmd
->unsli3
.fcp_ext
.dbde
;
852 uint32_t num_bde
= 0;
853 int nseg
, datadir
= scsi_cmnd
->sc_data_direction
;
856 * There are three possibilities here - use scatter-gather segment, use
857 * the single mapping, or neither. Start the lpfc command prep by
858 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
862 if (scsi_sg_count(scsi_cmnd
)) {
864 * The driver stores the segment count returned from pci_map_sg
865 * because this a count of dma-mappings used to map the use_sg
866 * pages. They are not guaranteed to be the same for those
867 * architectures that implement an IOMMU.
870 nseg
= dma_map_sg(&phba
->pcidev
->dev
, scsi_sglist(scsi_cmnd
),
871 scsi_sg_count(scsi_cmnd
), datadir
);
875 lpfc_cmd
->seg_cnt
= nseg
;
876 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
877 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
878 "9064 BLKGRD: %s: Too many sg segments"
879 " from dma_map_sg. Config %d, seg_cnt"
880 " %d\n", __func__
, phba
->cfg_sg_seg_cnt
,
882 WARN_ON_ONCE(lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
);
883 lpfc_cmd
->seg_cnt
= 0;
884 scsi_dma_unmap(scsi_cmnd
);
889 * The driver established a maximum scatter-gather segment count
890 * during probe that limits the number of sg elements in any
891 * single scsi command. Just run through the seg_cnt and format
893 * When using SLI-3 the driver will try to fit all the BDEs into
894 * the IOCB. If it can't then the BDEs get added to a BPL as it
895 * does for SLI-2 mode.
897 scsi_for_each_sg(scsi_cmnd
, sgel
, nseg
, num_bde
) {
898 physaddr
= sg_dma_address(sgel
);
899 if (phba
->sli_rev
== 3 &&
900 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) &&
901 !(iocbq
->iocb_flag
& DSS_SECURITY_OP
) &&
902 nseg
<= LPFC_EXT_DATA_BDE_COUNT
) {
903 data_bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
904 data_bde
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
905 data_bde
->addrLow
= putPaddrLow(physaddr
);
906 data_bde
->addrHigh
= putPaddrHigh(physaddr
);
909 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
910 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
911 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
913 le32_to_cpu(putPaddrLow(physaddr
));
915 le32_to_cpu(putPaddrHigh(physaddr
));
922 * Finish initializing those IOCB fields that are dependent on the
923 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
924 * explicitly reinitialized and for SLI-3 the extended bde count is
925 * explicitly reinitialized since all iocb memory resources are reused.
927 if (phba
->sli_rev
== 3 &&
928 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) &&
929 !(iocbq
->iocb_flag
& DSS_SECURITY_OP
)) {
930 if (num_bde
> LPFC_EXT_DATA_BDE_COUNT
) {
932 * The extended IOCB format can only fit 3 BDE or a BPL.
933 * This I/O has more than 3 BDE so the 1st data bde will
934 * be a BPL that is filled in here.
936 physaddr
= lpfc_cmd
->dma_handle
;
937 data_bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BLP_64
;
938 data_bde
->tus
.f
.bdeSize
= (num_bde
*
939 sizeof(struct ulp_bde64
));
940 physaddr
+= (sizeof(struct fcp_cmnd
) +
941 sizeof(struct fcp_rsp
) +
942 (2 * sizeof(struct ulp_bde64
)));
943 data_bde
->addrHigh
= putPaddrHigh(physaddr
);
944 data_bde
->addrLow
= putPaddrLow(physaddr
);
945 /* ebde count includes the response bde and data bpl */
946 iocb_cmd
->unsli3
.fcp_ext
.ebde_count
= 2;
948 /* ebde count includes the response bde and data bdes */
949 iocb_cmd
->unsli3
.fcp_ext
.ebde_count
= (num_bde
+ 1);
952 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
=
953 ((num_bde
+ 2) * sizeof(struct ulp_bde64
));
954 iocb_cmd
->unsli3
.fcp_ext
.ebde_count
= (num_bde
+ 1);
956 fcp_cmnd
->fcpDl
= cpu_to_be32(scsi_bufflen(scsi_cmnd
));
959 * Due to difference in data length between DIF/non-DIF paths,
960 * we need to set word 4 of IOCB here
962 iocb_cmd
->un
.fcpi
.fcpi_parm
= scsi_bufflen(scsi_cmnd
);
963 lpfc_fcpcmd_to_iocb(iocb_cmd
->unsli3
.fcp_ext
.icd
, fcp_cmnd
);
967 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
969 /* Return BG_ERR_INIT if error injection is detected by Initiator */
970 #define BG_ERR_INIT 0x1
971 /* Return BG_ERR_TGT if error injection is detected by Target */
972 #define BG_ERR_TGT 0x2
973 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
974 #define BG_ERR_SWAP 0x10
976 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
979 #define BG_ERR_CHECK 0x20
982 * lpfc_bg_err_inject - Determine if we should inject an error
983 * @phba: The Hba for which this call is being executed.
984 * @sc: The SCSI command to examine
985 * @reftag: (out) BlockGuard reference tag for transmitted data
986 * @apptag: (out) BlockGuard application tag for transmitted data
987 * @new_guard: (in) Value to replace CRC with if needed
989 * Returns BG_ERR_* bit mask or 0 if request ignored
992 lpfc_bg_err_inject(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
993 uint32_t *reftag
, uint16_t *apptag
, uint32_t new_guard
)
995 struct scatterlist
*sgpe
; /* s/g prot entry */
996 struct lpfc_io_buf
*lpfc_cmd
= NULL
;
997 struct scsi_dif_tuple
*src
= NULL
;
998 struct lpfc_nodelist
*ndlp
;
999 struct lpfc_rport_data
*rdata
;
1000 uint32_t op
= scsi_get_prot_op(sc
);
1007 if (op
== SCSI_PROT_NORMAL
)
1010 sgpe
= scsi_prot_sglist(sc
);
1011 lba
= scsi_get_lba(sc
);
1013 /* First check if we need to match the LBA */
1014 if (phba
->lpfc_injerr_lba
!= LPFC_INJERR_LBA_OFF
) {
1015 blksize
= lpfc_cmd_blksize(sc
);
1016 numblks
= (scsi_bufflen(sc
) + blksize
- 1) / blksize
;
1018 /* Make sure we have the right LBA if one is specified */
1019 if ((phba
->lpfc_injerr_lba
< lba
) ||
1020 (phba
->lpfc_injerr_lba
>= (lba
+ numblks
)))
1023 blockoff
= phba
->lpfc_injerr_lba
- lba
;
1024 numblks
= sg_dma_len(sgpe
) /
1025 sizeof(struct scsi_dif_tuple
);
1026 if (numblks
< blockoff
)
1031 /* Next check if we need to match the remote NPortID or WWPN */
1032 rdata
= lpfc_rport_data_from_scsi_device(sc
->device
);
1033 if (rdata
&& rdata
->pnode
) {
1034 ndlp
= rdata
->pnode
;
1036 /* Make sure we have the right NPortID if one is specified */
1037 if (phba
->lpfc_injerr_nportid
&&
1038 (phba
->lpfc_injerr_nportid
!= ndlp
->nlp_DID
))
1042 * Make sure we have the right WWPN if one is specified.
1043 * wwn[0] should be a non-zero NAA in a good WWPN.
1045 if (phba
->lpfc_injerr_wwpn
.u
.wwn
[0] &&
1046 (memcmp(&ndlp
->nlp_portname
, &phba
->lpfc_injerr_wwpn
,
1047 sizeof(struct lpfc_name
)) != 0))
1051 /* Setup a ptr to the protection data if the SCSI host provides it */
1053 src
= (struct scsi_dif_tuple
*)sg_virt(sgpe
);
1055 lpfc_cmd
= (struct lpfc_io_buf
*)sc
->host_scribble
;
1058 /* Should we change the Reference Tag */
1060 if (phba
->lpfc_injerr_wref_cnt
) {
1062 case SCSI_PROT_WRITE_PASS
:
1065 * For WRITE_PASS, force the error
1066 * to be sent on the wire. It should
1067 * be detected by the Target.
1068 * If blockoff != 0 error will be
1069 * inserted in middle of the IO.
1072 lpfc_printf_log(phba
, KERN_ERR
,
1074 "9076 BLKGRD: Injecting reftag error: "
1075 "write lba x%lx + x%x oldrefTag x%x\n",
1076 (unsigned long)lba
, blockoff
,
1077 be32_to_cpu(src
->ref_tag
));
1080 * Save the old ref_tag so we can
1081 * restore it on completion.
1084 lpfc_cmd
->prot_data_type
=
1086 lpfc_cmd
->prot_data_segment
=
1088 lpfc_cmd
->prot_data
=
1091 src
->ref_tag
= cpu_to_be32(0xDEADBEEF);
1092 phba
->lpfc_injerr_wref_cnt
--;
1093 if (phba
->lpfc_injerr_wref_cnt
== 0) {
1094 phba
->lpfc_injerr_nportid
= 0;
1095 phba
->lpfc_injerr_lba
=
1096 LPFC_INJERR_LBA_OFF
;
1097 memset(&phba
->lpfc_injerr_wwpn
,
1098 0, sizeof(struct lpfc_name
));
1100 rc
= BG_ERR_TGT
| BG_ERR_CHECK
;
1105 case SCSI_PROT_WRITE_INSERT
:
1107 * For WRITE_INSERT, force the error
1108 * to be sent on the wire. It should be
1109 * detected by the Target.
1111 /* DEADBEEF will be the reftag on the wire */
1112 *reftag
= 0xDEADBEEF;
1113 phba
->lpfc_injerr_wref_cnt
--;
1114 if (phba
->lpfc_injerr_wref_cnt
== 0) {
1115 phba
->lpfc_injerr_nportid
= 0;
1116 phba
->lpfc_injerr_lba
=
1117 LPFC_INJERR_LBA_OFF
;
1118 memset(&phba
->lpfc_injerr_wwpn
,
1119 0, sizeof(struct lpfc_name
));
1121 rc
= BG_ERR_TGT
| BG_ERR_CHECK
;
1123 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1124 "9078 BLKGRD: Injecting reftag error: "
1125 "write lba x%lx\n", (unsigned long)lba
);
1127 case SCSI_PROT_WRITE_STRIP
:
1129 * For WRITE_STRIP and WRITE_PASS,
1130 * force the error on data
1131 * being copied from SLI-Host to SLI-Port.
1133 *reftag
= 0xDEADBEEF;
1134 phba
->lpfc_injerr_wref_cnt
--;
1135 if (phba
->lpfc_injerr_wref_cnt
== 0) {
1136 phba
->lpfc_injerr_nportid
= 0;
1137 phba
->lpfc_injerr_lba
=
1138 LPFC_INJERR_LBA_OFF
;
1139 memset(&phba
->lpfc_injerr_wwpn
,
1140 0, sizeof(struct lpfc_name
));
1144 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1145 "9077 BLKGRD: Injecting reftag error: "
1146 "write lba x%lx\n", (unsigned long)lba
);
1150 if (phba
->lpfc_injerr_rref_cnt
) {
1152 case SCSI_PROT_READ_INSERT
:
1153 case SCSI_PROT_READ_STRIP
:
1154 case SCSI_PROT_READ_PASS
:
1156 * For READ_STRIP and READ_PASS, force the
1157 * error on data being read off the wire. It
1158 * should force an IO error to the driver.
1160 *reftag
= 0xDEADBEEF;
1161 phba
->lpfc_injerr_rref_cnt
--;
1162 if (phba
->lpfc_injerr_rref_cnt
== 0) {
1163 phba
->lpfc_injerr_nportid
= 0;
1164 phba
->lpfc_injerr_lba
=
1165 LPFC_INJERR_LBA_OFF
;
1166 memset(&phba
->lpfc_injerr_wwpn
,
1167 0, sizeof(struct lpfc_name
));
1171 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1172 "9079 BLKGRD: Injecting reftag error: "
1173 "read lba x%lx\n", (unsigned long)lba
);
1179 /* Should we change the Application Tag */
1181 if (phba
->lpfc_injerr_wapp_cnt
) {
1183 case SCSI_PROT_WRITE_PASS
:
1186 * For WRITE_PASS, force the error
1187 * to be sent on the wire. It should
1188 * be detected by the Target.
1189 * If blockoff != 0 error will be
1190 * inserted in middle of the IO.
1193 lpfc_printf_log(phba
, KERN_ERR
,
1195 "9080 BLKGRD: Injecting apptag error: "
1196 "write lba x%lx + x%x oldappTag x%x\n",
1197 (unsigned long)lba
, blockoff
,
1198 be16_to_cpu(src
->app_tag
));
1201 * Save the old app_tag so we can
1202 * restore it on completion.
1205 lpfc_cmd
->prot_data_type
=
1207 lpfc_cmd
->prot_data_segment
=
1209 lpfc_cmd
->prot_data
=
1212 src
->app_tag
= cpu_to_be16(0xDEAD);
1213 phba
->lpfc_injerr_wapp_cnt
--;
1214 if (phba
->lpfc_injerr_wapp_cnt
== 0) {
1215 phba
->lpfc_injerr_nportid
= 0;
1216 phba
->lpfc_injerr_lba
=
1217 LPFC_INJERR_LBA_OFF
;
1218 memset(&phba
->lpfc_injerr_wwpn
,
1219 0, sizeof(struct lpfc_name
));
1221 rc
= BG_ERR_TGT
| BG_ERR_CHECK
;
1225 case SCSI_PROT_WRITE_INSERT
:
1227 * For WRITE_INSERT, force the
1228 * error to be sent on the wire. It should be
1229 * detected by the Target.
1231 /* DEAD will be the apptag on the wire */
1233 phba
->lpfc_injerr_wapp_cnt
--;
1234 if (phba
->lpfc_injerr_wapp_cnt
== 0) {
1235 phba
->lpfc_injerr_nportid
= 0;
1236 phba
->lpfc_injerr_lba
=
1237 LPFC_INJERR_LBA_OFF
;
1238 memset(&phba
->lpfc_injerr_wwpn
,
1239 0, sizeof(struct lpfc_name
));
1241 rc
= BG_ERR_TGT
| BG_ERR_CHECK
;
1243 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1244 "0813 BLKGRD: Injecting apptag error: "
1245 "write lba x%lx\n", (unsigned long)lba
);
1247 case SCSI_PROT_WRITE_STRIP
:
1249 * For WRITE_STRIP and WRITE_PASS,
1250 * force the error on data
1251 * being copied from SLI-Host to SLI-Port.
1254 phba
->lpfc_injerr_wapp_cnt
--;
1255 if (phba
->lpfc_injerr_wapp_cnt
== 0) {
1256 phba
->lpfc_injerr_nportid
= 0;
1257 phba
->lpfc_injerr_lba
=
1258 LPFC_INJERR_LBA_OFF
;
1259 memset(&phba
->lpfc_injerr_wwpn
,
1260 0, sizeof(struct lpfc_name
));
1264 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1265 "0812 BLKGRD: Injecting apptag error: "
1266 "write lba x%lx\n", (unsigned long)lba
);
1270 if (phba
->lpfc_injerr_rapp_cnt
) {
1272 case SCSI_PROT_READ_INSERT
:
1273 case SCSI_PROT_READ_STRIP
:
1274 case SCSI_PROT_READ_PASS
:
1276 * For READ_STRIP and READ_PASS, force the
1277 * error on data being read off the wire. It
1278 * should force an IO error to the driver.
1281 phba
->lpfc_injerr_rapp_cnt
--;
1282 if (phba
->lpfc_injerr_rapp_cnt
== 0) {
1283 phba
->lpfc_injerr_nportid
= 0;
1284 phba
->lpfc_injerr_lba
=
1285 LPFC_INJERR_LBA_OFF
;
1286 memset(&phba
->lpfc_injerr_wwpn
,
1287 0, sizeof(struct lpfc_name
));
1291 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1292 "0814 BLKGRD: Injecting apptag error: "
1293 "read lba x%lx\n", (unsigned long)lba
);
1300 /* Should we change the Guard Tag */
1302 if (phba
->lpfc_injerr_wgrd_cnt
) {
1304 case SCSI_PROT_WRITE_PASS
:
1308 case SCSI_PROT_WRITE_INSERT
:
1310 * For WRITE_INSERT, force the
1311 * error to be sent on the wire. It should be
1312 * detected by the Target.
1314 phba
->lpfc_injerr_wgrd_cnt
--;
1315 if (phba
->lpfc_injerr_wgrd_cnt
== 0) {
1316 phba
->lpfc_injerr_nportid
= 0;
1317 phba
->lpfc_injerr_lba
=
1318 LPFC_INJERR_LBA_OFF
;
1319 memset(&phba
->lpfc_injerr_wwpn
,
1320 0, sizeof(struct lpfc_name
));
1323 rc
|= BG_ERR_TGT
| BG_ERR_SWAP
;
1324 /* Signals the caller to swap CRC->CSUM */
1326 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1327 "0817 BLKGRD: Injecting guard error: "
1328 "write lba x%lx\n", (unsigned long)lba
);
1330 case SCSI_PROT_WRITE_STRIP
:
1332 * For WRITE_STRIP and WRITE_PASS,
1333 * force the error on data
1334 * being copied from SLI-Host to SLI-Port.
1336 phba
->lpfc_injerr_wgrd_cnt
--;
1337 if (phba
->lpfc_injerr_wgrd_cnt
== 0) {
1338 phba
->lpfc_injerr_nportid
= 0;
1339 phba
->lpfc_injerr_lba
=
1340 LPFC_INJERR_LBA_OFF
;
1341 memset(&phba
->lpfc_injerr_wwpn
,
1342 0, sizeof(struct lpfc_name
));
1345 rc
= BG_ERR_INIT
| BG_ERR_SWAP
;
1346 /* Signals the caller to swap CRC->CSUM */
1348 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1349 "0816 BLKGRD: Injecting guard error: "
1350 "write lba x%lx\n", (unsigned long)lba
);
1354 if (phba
->lpfc_injerr_rgrd_cnt
) {
1356 case SCSI_PROT_READ_INSERT
:
1357 case SCSI_PROT_READ_STRIP
:
1358 case SCSI_PROT_READ_PASS
:
1360 * For READ_STRIP and READ_PASS, force the
1361 * error on data being read off the wire. It
1362 * should force an IO error to the driver.
1364 phba
->lpfc_injerr_rgrd_cnt
--;
1365 if (phba
->lpfc_injerr_rgrd_cnt
== 0) {
1366 phba
->lpfc_injerr_nportid
= 0;
1367 phba
->lpfc_injerr_lba
=
1368 LPFC_INJERR_LBA_OFF
;
1369 memset(&phba
->lpfc_injerr_wwpn
,
1370 0, sizeof(struct lpfc_name
));
1373 rc
= BG_ERR_INIT
| BG_ERR_SWAP
;
1374 /* Signals the caller to swap CRC->CSUM */
1376 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1377 "0818 BLKGRD: Injecting guard error: "
1378 "read lba x%lx\n", (unsigned long)lba
);
1388 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1389 * the specified SCSI command.
1390 * @phba: The Hba for which this call is being executed.
1391 * @sc: The SCSI command to examine
1392 * @txop: (out) BlockGuard operation for transmitted data
1393 * @rxop: (out) BlockGuard operation for received data
1395 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1399 lpfc_sc_to_bg_opcodes(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1400 uint8_t *txop
, uint8_t *rxop
)
1404 if (lpfc_cmd_guard_csum(sc
)) {
1405 switch (scsi_get_prot_op(sc
)) {
1406 case SCSI_PROT_READ_INSERT
:
1407 case SCSI_PROT_WRITE_STRIP
:
1408 *rxop
= BG_OP_IN_NODIF_OUT_CSUM
;
1409 *txop
= BG_OP_IN_CSUM_OUT_NODIF
;
1412 case SCSI_PROT_READ_STRIP
:
1413 case SCSI_PROT_WRITE_INSERT
:
1414 *rxop
= BG_OP_IN_CRC_OUT_NODIF
;
1415 *txop
= BG_OP_IN_NODIF_OUT_CRC
;
1418 case SCSI_PROT_READ_PASS
:
1419 case SCSI_PROT_WRITE_PASS
:
1420 *rxop
= BG_OP_IN_CRC_OUT_CSUM
;
1421 *txop
= BG_OP_IN_CSUM_OUT_CRC
;
1424 case SCSI_PROT_NORMAL
:
1426 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1427 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1428 scsi_get_prot_op(sc
));
1434 switch (scsi_get_prot_op(sc
)) {
1435 case SCSI_PROT_READ_STRIP
:
1436 case SCSI_PROT_WRITE_INSERT
:
1437 *rxop
= BG_OP_IN_CRC_OUT_NODIF
;
1438 *txop
= BG_OP_IN_NODIF_OUT_CRC
;
1441 case SCSI_PROT_READ_PASS
:
1442 case SCSI_PROT_WRITE_PASS
:
1443 *rxop
= BG_OP_IN_CRC_OUT_CRC
;
1444 *txop
= BG_OP_IN_CRC_OUT_CRC
;
1447 case SCSI_PROT_READ_INSERT
:
1448 case SCSI_PROT_WRITE_STRIP
:
1449 *rxop
= BG_OP_IN_NODIF_OUT_CRC
;
1450 *txop
= BG_OP_IN_CRC_OUT_NODIF
;
1453 case SCSI_PROT_NORMAL
:
1455 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1456 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1457 scsi_get_prot_op(sc
));
1466 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1468 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1469 * the specified SCSI command in order to force a guard tag error.
1470 * @phba: The Hba for which this call is being executed.
1471 * @sc: The SCSI command to examine
1472 * @txop: (out) BlockGuard operation for transmitted data
1473 * @rxop: (out) BlockGuard operation for received data
1475 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1479 lpfc_bg_err_opcodes(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1480 uint8_t *txop
, uint8_t *rxop
)
1484 if (lpfc_cmd_guard_csum(sc
)) {
1485 switch (scsi_get_prot_op(sc
)) {
1486 case SCSI_PROT_READ_INSERT
:
1487 case SCSI_PROT_WRITE_STRIP
:
1488 *rxop
= BG_OP_IN_NODIF_OUT_CRC
;
1489 *txop
= BG_OP_IN_CRC_OUT_NODIF
;
1492 case SCSI_PROT_READ_STRIP
:
1493 case SCSI_PROT_WRITE_INSERT
:
1494 *rxop
= BG_OP_IN_CSUM_OUT_NODIF
;
1495 *txop
= BG_OP_IN_NODIF_OUT_CSUM
;
1498 case SCSI_PROT_READ_PASS
:
1499 case SCSI_PROT_WRITE_PASS
:
1500 *rxop
= BG_OP_IN_CSUM_OUT_CRC
;
1501 *txop
= BG_OP_IN_CRC_OUT_CSUM
;
1504 case SCSI_PROT_NORMAL
:
1510 switch (scsi_get_prot_op(sc
)) {
1511 case SCSI_PROT_READ_STRIP
:
1512 case SCSI_PROT_WRITE_INSERT
:
1513 *rxop
= BG_OP_IN_CSUM_OUT_NODIF
;
1514 *txop
= BG_OP_IN_NODIF_OUT_CSUM
;
1517 case SCSI_PROT_READ_PASS
:
1518 case SCSI_PROT_WRITE_PASS
:
1519 *rxop
= BG_OP_IN_CSUM_OUT_CSUM
;
1520 *txop
= BG_OP_IN_CSUM_OUT_CSUM
;
1523 case SCSI_PROT_READ_INSERT
:
1524 case SCSI_PROT_WRITE_STRIP
:
1525 *rxop
= BG_OP_IN_NODIF_OUT_CSUM
;
1526 *txop
= BG_OP_IN_CSUM_OUT_NODIF
;
1529 case SCSI_PROT_NORMAL
:
1540 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1541 * @phba: The Hba for which this call is being executed.
1542 * @sc: pointer to scsi command we're working on
1543 * @bpl: pointer to buffer list for protection groups
1544 * @datasegcnt: number of segments of data that have been dma mapped
1546 * This function sets up BPL buffer list for protection groups of
1547 * type LPFC_PG_TYPE_NO_DIF
1549 * This is usually used when the HBA is instructed to generate
1550 * DIFs and insert them into data stream (or strip DIF from
1551 * incoming data stream)
1553 * The buffer list consists of just one protection group described
1555 * +-------------------------+
1556 * start of prot group --> | PDE_5 |
1557 * +-------------------------+
1559 * +-------------------------+
1561 * +-------------------------+
1562 * |more Data BDE's ... (opt)|
1563 * +-------------------------+
1566 * Note: Data s/g buffers have been dma mapped
1568 * Returns the number of BDEs added to the BPL.
1571 lpfc_bg_setup_bpl(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1572 struct ulp_bde64
*bpl
, int datasegcnt
)
1574 struct scatterlist
*sgde
= NULL
; /* s/g data entry */
1575 struct lpfc_pde5
*pde5
= NULL
;
1576 struct lpfc_pde6
*pde6
= NULL
;
1577 dma_addr_t physaddr
;
1578 int i
= 0, num_bde
= 0, status
;
1579 int datadir
= sc
->sc_data_direction
;
1580 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1583 uint32_t checking
= 1;
1587 status
= lpfc_sc_to_bg_opcodes(phba
, sc
, &txop
, &rxop
);
1591 /* extract some info from the scsi command for pde*/
1592 reftag
= (uint32_t)scsi_get_lba(sc
); /* Truncate LBA */
1594 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1595 rc
= lpfc_bg_err_inject(phba
, sc
, &reftag
, NULL
, 1);
1597 if (rc
& BG_ERR_SWAP
)
1598 lpfc_bg_err_opcodes(phba
, sc
, &txop
, &rxop
);
1599 if (rc
& BG_ERR_CHECK
)
1604 /* setup PDE5 with what we have */
1605 pde5
= (struct lpfc_pde5
*) bpl
;
1606 memset(pde5
, 0, sizeof(struct lpfc_pde5
));
1607 bf_set(pde5_type
, pde5
, LPFC_PDE5_DESCRIPTOR
);
1609 /* Endianness conversion if necessary for PDE5 */
1610 pde5
->word0
= cpu_to_le32(pde5
->word0
);
1611 pde5
->reftag
= cpu_to_le32(reftag
);
1613 /* advance bpl and increment bde count */
1616 pde6
= (struct lpfc_pde6
*) bpl
;
1618 /* setup PDE6 with the rest of the info */
1619 memset(pde6
, 0, sizeof(struct lpfc_pde6
));
1620 bf_set(pde6_type
, pde6
, LPFC_PDE6_DESCRIPTOR
);
1621 bf_set(pde6_optx
, pde6
, txop
);
1622 bf_set(pde6_oprx
, pde6
, rxop
);
1625 * We only need to check the data on READs, for WRITEs
1626 * protection data is automatically generated, not checked.
1628 if (datadir
== DMA_FROM_DEVICE
) {
1629 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_GUARD
))
1630 bf_set(pde6_ce
, pde6
, checking
);
1632 bf_set(pde6_ce
, pde6
, 0);
1634 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_REF
))
1635 bf_set(pde6_re
, pde6
, checking
);
1637 bf_set(pde6_re
, pde6
, 0);
1639 bf_set(pde6_ai
, pde6
, 1);
1640 bf_set(pde6_ae
, pde6
, 0);
1641 bf_set(pde6_apptagval
, pde6
, 0);
1643 /* Endianness conversion if necessary for PDE6 */
1644 pde6
->word0
= cpu_to_le32(pde6
->word0
);
1645 pde6
->word1
= cpu_to_le32(pde6
->word1
);
1646 pde6
->word2
= cpu_to_le32(pde6
->word2
);
1648 /* advance bpl and increment bde count */
1652 /* assumption: caller has already run dma_map_sg on command data */
1653 scsi_for_each_sg(sc
, sgde
, datasegcnt
, i
) {
1654 physaddr
= sg_dma_address(sgde
);
1655 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
1656 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
1657 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgde
);
1658 if (datadir
== DMA_TO_DEVICE
)
1659 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1661 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
1662 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1672 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1673 * @phba: The Hba for which this call is being executed.
1674 * @sc: pointer to scsi command we're working on
1675 * @bpl: pointer to buffer list for protection groups
1676 * @datacnt: number of segments of data that have been dma mapped
1677 * @protcnt: number of segment of protection data that have been dma mapped
1679 * This function sets up BPL buffer list for protection groups of
1680 * type LPFC_PG_TYPE_DIF
1682 * This is usually used when DIFs are in their own buffers,
1683 * separate from the data. The HBA can then by instructed
1684 * to place the DIFs in the outgoing stream. For read operations,
1685 * The HBA could extract the DIFs and place it in DIF buffers.
1687 * The buffer list for this type consists of one or more of the
1688 * protection groups described below:
1689 * +-------------------------+
1690 * start of first prot group --> | PDE_5 |
1691 * +-------------------------+
1693 * +-------------------------+
1694 * | PDE_7 (Prot BDE) |
1695 * +-------------------------+
1697 * +-------------------------+
1698 * |more Data BDE's ... (opt)|
1699 * +-------------------------+
1700 * start of new prot group --> | PDE_5 |
1701 * +-------------------------+
1703 * +-------------------------+
1705 * Note: It is assumed that both data and protection s/g buffers have been
1708 * Returns the number of BDEs added to the BPL.
1711 lpfc_bg_setup_bpl_prot(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1712 struct ulp_bde64
*bpl
, int datacnt
, int protcnt
)
1714 struct scatterlist
*sgde
= NULL
; /* s/g data entry */
1715 struct scatterlist
*sgpe
= NULL
; /* s/g prot entry */
1716 struct lpfc_pde5
*pde5
= NULL
;
1717 struct lpfc_pde6
*pde6
= NULL
;
1718 struct lpfc_pde7
*pde7
= NULL
;
1719 dma_addr_t dataphysaddr
, protphysaddr
;
1720 unsigned short curr_data
= 0, curr_prot
= 0;
1721 unsigned int split_offset
;
1722 unsigned int protgroup_len
, protgroup_offset
= 0, protgroup_remainder
;
1723 unsigned int protgrp_blks
, protgrp_bytes
;
1724 unsigned int remainder
, subtotal
;
1726 int datadir
= sc
->sc_data_direction
;
1727 unsigned char pgdone
= 0, alldone
= 0;
1729 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1732 uint32_t checking
= 1;
1737 sgpe
= scsi_prot_sglist(sc
);
1738 sgde
= scsi_sglist(sc
);
1740 if (!sgpe
|| !sgde
) {
1741 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1742 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1747 status
= lpfc_sc_to_bg_opcodes(phba
, sc
, &txop
, &rxop
);
1751 /* extract some info from the scsi command */
1752 blksize
= lpfc_cmd_blksize(sc
);
1753 reftag
= (uint32_t)scsi_get_lba(sc
); /* Truncate LBA */
1755 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1756 rc
= lpfc_bg_err_inject(phba
, sc
, &reftag
, NULL
, 1);
1758 if (rc
& BG_ERR_SWAP
)
1759 lpfc_bg_err_opcodes(phba
, sc
, &txop
, &rxop
);
1760 if (rc
& BG_ERR_CHECK
)
1767 /* Check to see if we ran out of space */
1768 if (num_bde
>= (phba
->cfg_total_seg_cnt
- 2))
1771 /* setup PDE5 with what we have */
1772 pde5
= (struct lpfc_pde5
*) bpl
;
1773 memset(pde5
, 0, sizeof(struct lpfc_pde5
));
1774 bf_set(pde5_type
, pde5
, LPFC_PDE5_DESCRIPTOR
);
1776 /* Endianness conversion if necessary for PDE5 */
1777 pde5
->word0
= cpu_to_le32(pde5
->word0
);
1778 pde5
->reftag
= cpu_to_le32(reftag
);
1780 /* advance bpl and increment bde count */
1783 pde6
= (struct lpfc_pde6
*) bpl
;
1785 /* setup PDE6 with the rest of the info */
1786 memset(pde6
, 0, sizeof(struct lpfc_pde6
));
1787 bf_set(pde6_type
, pde6
, LPFC_PDE6_DESCRIPTOR
);
1788 bf_set(pde6_optx
, pde6
, txop
);
1789 bf_set(pde6_oprx
, pde6
, rxop
);
1791 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_GUARD
))
1792 bf_set(pde6_ce
, pde6
, checking
);
1794 bf_set(pde6_ce
, pde6
, 0);
1796 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_REF
))
1797 bf_set(pde6_re
, pde6
, checking
);
1799 bf_set(pde6_re
, pde6
, 0);
1801 bf_set(pde6_ai
, pde6
, 1);
1802 bf_set(pde6_ae
, pde6
, 0);
1803 bf_set(pde6_apptagval
, pde6
, 0);
1805 /* Endianness conversion if necessary for PDE6 */
1806 pde6
->word0
= cpu_to_le32(pde6
->word0
);
1807 pde6
->word1
= cpu_to_le32(pde6
->word1
);
1808 pde6
->word2
= cpu_to_le32(pde6
->word2
);
1810 /* advance bpl and increment bde count */
1814 /* setup the first BDE that points to protection buffer */
1815 protphysaddr
= sg_dma_address(sgpe
) + protgroup_offset
;
1816 protgroup_len
= sg_dma_len(sgpe
) - protgroup_offset
;
1818 /* must be integer multiple of the DIF block length */
1819 BUG_ON(protgroup_len
% 8);
1821 pde7
= (struct lpfc_pde7
*) bpl
;
1822 memset(pde7
, 0, sizeof(struct lpfc_pde7
));
1823 bf_set(pde7_type
, pde7
, LPFC_PDE7_DESCRIPTOR
);
1825 pde7
->addrHigh
= le32_to_cpu(putPaddrHigh(protphysaddr
));
1826 pde7
->addrLow
= le32_to_cpu(putPaddrLow(protphysaddr
));
1828 protgrp_blks
= protgroup_len
/ 8;
1829 protgrp_bytes
= protgrp_blks
* blksize
;
1831 /* check if this pde is crossing the 4K boundary; if so split */
1832 if ((pde7
->addrLow
& 0xfff) + protgroup_len
> 0x1000) {
1833 protgroup_remainder
= 0x1000 - (pde7
->addrLow
& 0xfff);
1834 protgroup_offset
+= protgroup_remainder
;
1835 protgrp_blks
= protgroup_remainder
/ 8;
1836 protgrp_bytes
= protgrp_blks
* blksize
;
1838 protgroup_offset
= 0;
1844 /* setup BDE's for data blocks associated with DIF data */
1846 subtotal
= 0; /* total bytes processed for current prot grp */
1848 /* Check to see if we ran out of space */
1849 if (num_bde
>= phba
->cfg_total_seg_cnt
)
1853 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1854 "9065 BLKGRD:%s Invalid data segment\n",
1859 dataphysaddr
= sg_dma_address(sgde
) + split_offset
;
1860 bpl
->addrLow
= le32_to_cpu(putPaddrLow(dataphysaddr
));
1861 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(dataphysaddr
));
1863 remainder
= sg_dma_len(sgde
) - split_offset
;
1865 if ((subtotal
+ remainder
) <= protgrp_bytes
) {
1866 /* we can use this whole buffer */
1867 bpl
->tus
.f
.bdeSize
= remainder
;
1870 if ((subtotal
+ remainder
) == protgrp_bytes
)
1873 /* must split this buffer with next prot grp */
1874 bpl
->tus
.f
.bdeSize
= protgrp_bytes
- subtotal
;
1875 split_offset
+= bpl
->tus
.f
.bdeSize
;
1878 subtotal
+= bpl
->tus
.f
.bdeSize
;
1880 if (datadir
== DMA_TO_DEVICE
)
1881 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1883 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
1884 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1892 /* Move to the next s/g segment if possible */
1893 sgde
= sg_next(sgde
);
1897 if (protgroup_offset
) {
1898 /* update the reference tag */
1899 reftag
+= protgrp_blks
;
1905 if (curr_prot
== protcnt
) {
1907 } else if (curr_prot
< protcnt
) {
1908 /* advance to next prot buffer */
1909 sgpe
= sg_next(sgpe
);
1912 /* update the reference tag */
1913 reftag
+= protgrp_blks
;
1915 /* if we're here, we have a bug */
1916 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1917 "9054 BLKGRD: bug in %s\n", __func__
);
1927 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1928 * @phba: The Hba for which this call is being executed.
1929 * @sc: pointer to scsi command we're working on
1930 * @sgl: pointer to buffer list for protection groups
1931 * @datasegcnt: number of segments of data that have been dma mapped
1932 * @lpfc_cmd: lpfc scsi command object pointer.
1934 * This function sets up SGL buffer list for protection groups of
1935 * type LPFC_PG_TYPE_NO_DIF
1937 * This is usually used when the HBA is instructed to generate
1938 * DIFs and insert them into data stream (or strip DIF from
1939 * incoming data stream)
1941 * The buffer list consists of just one protection group described
1943 * +-------------------------+
1944 * start of prot group --> | DI_SEED |
1945 * +-------------------------+
1947 * +-------------------------+
1948 * |more Data SGE's ... (opt)|
1949 * +-------------------------+
1952 * Note: Data s/g buffers have been dma mapped
1954 * Returns the number of SGEs added to the SGL.
1957 lpfc_bg_setup_sgl(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1958 struct sli4_sge
*sgl
, int datasegcnt
,
1959 struct lpfc_io_buf
*lpfc_cmd
)
1961 struct scatterlist
*sgde
= NULL
; /* s/g data entry */
1962 struct sli4_sge_diseed
*diseed
= NULL
;
1963 dma_addr_t physaddr
;
1964 int i
= 0, num_sge
= 0, status
;
1967 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1970 uint32_t checking
= 1;
1972 uint32_t dma_offset
= 0;
1973 struct sli4_hybrid_sgl
*sgl_xtra
= NULL
;
1975 bool lsp_just_set
= false;
1977 status
= lpfc_sc_to_bg_opcodes(phba
, sc
, &txop
, &rxop
);
1981 /* extract some info from the scsi command for pde*/
1982 reftag
= (uint32_t)scsi_get_lba(sc
); /* Truncate LBA */
1984 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1985 rc
= lpfc_bg_err_inject(phba
, sc
, &reftag
, NULL
, 1);
1987 if (rc
& BG_ERR_SWAP
)
1988 lpfc_bg_err_opcodes(phba
, sc
, &txop
, &rxop
);
1989 if (rc
& BG_ERR_CHECK
)
1994 /* setup DISEED with what we have */
1995 diseed
= (struct sli4_sge_diseed
*) sgl
;
1996 memset(diseed
, 0, sizeof(struct sli4_sge_diseed
));
1997 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DISEED
);
1999 /* Endianness conversion if necessary */
2000 diseed
->ref_tag
= cpu_to_le32(reftag
);
2001 diseed
->ref_tag_tran
= diseed
->ref_tag
;
2004 * We only need to check the data on READs, for WRITEs
2005 * protection data is automatically generated, not checked.
2007 if (sc
->sc_data_direction
== DMA_FROM_DEVICE
) {
2008 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_GUARD
))
2009 bf_set(lpfc_sli4_sge_dif_ce
, diseed
, checking
);
2011 bf_set(lpfc_sli4_sge_dif_ce
, diseed
, 0);
2013 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_REF
))
2014 bf_set(lpfc_sli4_sge_dif_re
, diseed
, checking
);
2016 bf_set(lpfc_sli4_sge_dif_re
, diseed
, 0);
2019 /* setup DISEED with the rest of the info */
2020 bf_set(lpfc_sli4_sge_dif_optx
, diseed
, txop
);
2021 bf_set(lpfc_sli4_sge_dif_oprx
, diseed
, rxop
);
2023 bf_set(lpfc_sli4_sge_dif_ai
, diseed
, 1);
2024 bf_set(lpfc_sli4_sge_dif_me
, diseed
, 0);
2026 /* Endianness conversion if necessary for DISEED */
2027 diseed
->word2
= cpu_to_le32(diseed
->word2
);
2028 diseed
->word3
= cpu_to_le32(diseed
->word3
);
2030 /* advance bpl and increment sge count */
2034 /* assumption: caller has already run dma_map_sg on command data */
2035 sgde
= scsi_sglist(sc
);
2037 for (i
= 0; i
< datasegcnt
; i
++) {
2041 /* do we need to expand the segment */
2042 if (!lsp_just_set
&& !((j
+ 1) % phba
->border_sge_num
) &&
2043 ((datasegcnt
- 1) != i
)) {
2045 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_LSP
);
2047 sgl_xtra
= lpfc_get_sgl_per_hdwq(phba
, lpfc_cmd
);
2049 if (unlikely(!sgl_xtra
)) {
2050 lpfc_cmd
->seg_cnt
= 0;
2053 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(
2054 sgl_xtra
->dma_phys_sgl
));
2055 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(
2056 sgl_xtra
->dma_phys_sgl
));
2059 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
2062 if (!(bf_get(lpfc_sli4_sge_type
, sgl
) & LPFC_SGE_TYPE_LSP
)) {
2063 if ((datasegcnt
- 1) == i
)
2064 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
2065 physaddr
= sg_dma_address(sgde
);
2066 dma_len
= sg_dma_len(sgde
);
2067 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(physaddr
));
2068 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(physaddr
));
2070 bf_set(lpfc_sli4_sge_offset
, sgl
, dma_offset
);
2071 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2072 sgl
->sge_len
= cpu_to_le32(dma_len
);
2074 dma_offset
+= dma_len
;
2075 sgde
= sg_next(sgde
);
2079 lsp_just_set
= false;
2082 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2083 sgl
->sge_len
= cpu_to_le32(phba
->cfg_sg_dma_buf_size
);
2085 sgl
= (struct sli4_sge
*)sgl_xtra
->dma_sgl
;
2088 lsp_just_set
= true;
2100 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2101 * @phba: The Hba for which this call is being executed.
2102 * @sc: pointer to scsi command we're working on
2103 * @sgl: pointer to buffer list for protection groups
2104 * @datacnt: number of segments of data that have been dma mapped
2105 * @protcnt: number of segment of protection data that have been dma mapped
2106 * @lpfc_cmd: lpfc scsi command object pointer.
2108 * This function sets up SGL buffer list for protection groups of
2109 * type LPFC_PG_TYPE_DIF
2111 * This is usually used when DIFs are in their own buffers,
2112 * separate from the data. The HBA can then by instructed
2113 * to place the DIFs in the outgoing stream. For read operations,
2114 * The HBA could extract the DIFs and place it in DIF buffers.
2116 * The buffer list for this type consists of one or more of the
2117 * protection groups described below:
2118 * +-------------------------+
2119 * start of first prot group --> | DISEED |
2120 * +-------------------------+
2121 * | DIF (Prot SGE) |
2122 * +-------------------------+
2124 * +-------------------------+
2125 * |more Data SGE's ... (opt)|
2126 * +-------------------------+
2127 * start of new prot group --> | DISEED |
2128 * +-------------------------+
2130 * +-------------------------+
2132 * Note: It is assumed that both data and protection s/g buffers have been
2135 * Returns the number of SGEs added to the SGL.
2138 lpfc_bg_setup_sgl_prot(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
2139 struct sli4_sge
*sgl
, int datacnt
, int protcnt
,
2140 struct lpfc_io_buf
*lpfc_cmd
)
2142 struct scatterlist
*sgde
= NULL
; /* s/g data entry */
2143 struct scatterlist
*sgpe
= NULL
; /* s/g prot entry */
2144 struct sli4_sge_diseed
*diseed
= NULL
;
2145 dma_addr_t dataphysaddr
, protphysaddr
;
2146 unsigned short curr_data
= 0, curr_prot
= 0;
2147 unsigned int split_offset
;
2148 unsigned int protgroup_len
, protgroup_offset
= 0, protgroup_remainder
;
2149 unsigned int protgrp_blks
, protgrp_bytes
;
2150 unsigned int remainder
, subtotal
;
2152 unsigned char pgdone
= 0, alldone
= 0;
2157 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2160 uint32_t checking
= 1;
2161 uint32_t dma_offset
= 0;
2162 int num_sge
= 0, j
= 2;
2163 struct sli4_hybrid_sgl
*sgl_xtra
= NULL
;
2165 sgpe
= scsi_prot_sglist(sc
);
2166 sgde
= scsi_sglist(sc
);
2168 if (!sgpe
|| !sgde
) {
2169 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2170 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2175 status
= lpfc_sc_to_bg_opcodes(phba
, sc
, &txop
, &rxop
);
2179 /* extract some info from the scsi command */
2180 blksize
= lpfc_cmd_blksize(sc
);
2181 reftag
= (uint32_t)scsi_get_lba(sc
); /* Truncate LBA */
2183 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2184 rc
= lpfc_bg_err_inject(phba
, sc
, &reftag
, NULL
, 1);
2186 if (rc
& BG_ERR_SWAP
)
2187 lpfc_bg_err_opcodes(phba
, sc
, &txop
, &rxop
);
2188 if (rc
& BG_ERR_CHECK
)
2195 /* Check to see if we ran out of space */
2196 if ((num_sge
>= (phba
->cfg_total_seg_cnt
- 2)) &&
2200 /* DISEED and DIF have to be together */
2201 if (!((j
+ 1) % phba
->border_sge_num
) ||
2202 !((j
+ 2) % phba
->border_sge_num
) ||
2203 !((j
+ 3) % phba
->border_sge_num
)) {
2207 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_LSP
);
2209 sgl_xtra
= lpfc_get_sgl_per_hdwq(phba
, lpfc_cmd
);
2211 if (unlikely(!sgl_xtra
)) {
2214 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(
2215 sgl_xtra
->dma_phys_sgl
));
2216 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(
2217 sgl_xtra
->dma_phys_sgl
));
2220 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2221 sgl
->sge_len
= cpu_to_le32(phba
->cfg_sg_dma_buf_size
);
2223 sgl
= (struct sli4_sge
*)sgl_xtra
->dma_sgl
;
2227 /* setup DISEED with what we have */
2228 diseed
= (struct sli4_sge_diseed
*) sgl
;
2229 memset(diseed
, 0, sizeof(struct sli4_sge_diseed
));
2230 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DISEED
);
2232 /* Endianness conversion if necessary */
2233 diseed
->ref_tag
= cpu_to_le32(reftag
);
2234 diseed
->ref_tag_tran
= diseed
->ref_tag
;
2236 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_GUARD
)) {
2237 bf_set(lpfc_sli4_sge_dif_ce
, diseed
, checking
);
2240 bf_set(lpfc_sli4_sge_dif_ce
, diseed
, 0);
2242 * When in this mode, the hardware will replace
2243 * the guard tag from the host with a
2244 * newly generated good CRC for the wire.
2245 * Switch to raw mode here to avoid this
2246 * behavior. What the host sends gets put on the wire.
2248 if (txop
== BG_OP_IN_CRC_OUT_CRC
) {
2249 txop
= BG_OP_RAW_MODE
;
2250 rxop
= BG_OP_RAW_MODE
;
2255 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_REF
))
2256 bf_set(lpfc_sli4_sge_dif_re
, diseed
, checking
);
2258 bf_set(lpfc_sli4_sge_dif_re
, diseed
, 0);
2260 /* setup DISEED with the rest of the info */
2261 bf_set(lpfc_sli4_sge_dif_optx
, diseed
, txop
);
2262 bf_set(lpfc_sli4_sge_dif_oprx
, diseed
, rxop
);
2264 bf_set(lpfc_sli4_sge_dif_ai
, diseed
, 1);
2265 bf_set(lpfc_sli4_sge_dif_me
, diseed
, 0);
2267 /* Endianness conversion if necessary for DISEED */
2268 diseed
->word2
= cpu_to_le32(diseed
->word2
);
2269 diseed
->word3
= cpu_to_le32(diseed
->word3
);
2271 /* advance sgl and increment bde count */
2277 /* setup the first BDE that points to protection buffer */
2278 protphysaddr
= sg_dma_address(sgpe
) + protgroup_offset
;
2279 protgroup_len
= sg_dma_len(sgpe
) - protgroup_offset
;
2281 /* must be integer multiple of the DIF block length */
2282 BUG_ON(protgroup_len
% 8);
2284 /* Now setup DIF SGE */
2286 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DIF
);
2287 sgl
->addr_hi
= le32_to_cpu(putPaddrHigh(protphysaddr
));
2288 sgl
->addr_lo
= le32_to_cpu(putPaddrLow(protphysaddr
));
2289 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2292 protgrp_blks
= protgroup_len
/ 8;
2293 protgrp_bytes
= protgrp_blks
* blksize
;
2295 /* check if DIF SGE is crossing the 4K boundary; if so split */
2296 if ((sgl
->addr_lo
& 0xfff) + protgroup_len
> 0x1000) {
2297 protgroup_remainder
= 0x1000 - (sgl
->addr_lo
& 0xfff);
2298 protgroup_offset
+= protgroup_remainder
;
2299 protgrp_blks
= protgroup_remainder
/ 8;
2300 protgrp_bytes
= protgrp_blks
* blksize
;
2302 protgroup_offset
= 0;
2308 /* setup SGE's for data blocks associated with DIF data */
2310 subtotal
= 0; /* total bytes processed for current prot grp */
2316 /* Check to see if we ran out of space */
2317 if ((num_sge
>= phba
->cfg_total_seg_cnt
) &&
2322 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2323 "9086 BLKGRD:%s Invalid data segment\n",
2328 if (!((j
+ 1) % phba
->border_sge_num
)) {
2332 bf_set(lpfc_sli4_sge_type
, sgl
,
2335 sgl_xtra
= lpfc_get_sgl_per_hdwq(phba
,
2338 if (unlikely(!sgl_xtra
)) {
2341 sgl
->addr_lo
= cpu_to_le32(
2342 putPaddrLow(sgl_xtra
->dma_phys_sgl
));
2343 sgl
->addr_hi
= cpu_to_le32(
2344 putPaddrHigh(sgl_xtra
->dma_phys_sgl
));
2347 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2348 sgl
->sge_len
= cpu_to_le32(
2349 phba
->cfg_sg_dma_buf_size
);
2351 sgl
= (struct sli4_sge
*)sgl_xtra
->dma_sgl
;
2353 dataphysaddr
= sg_dma_address(sgde
) +
2356 remainder
= sg_dma_len(sgde
) - split_offset
;
2358 if ((subtotal
+ remainder
) <= protgrp_bytes
) {
2359 /* we can use this whole buffer */
2360 dma_len
= remainder
;
2363 if ((subtotal
+ remainder
) ==
2367 /* must split this buffer with next
2370 dma_len
= protgrp_bytes
- subtotal
;
2371 split_offset
+= dma_len
;
2374 subtotal
+= dma_len
;
2377 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(
2379 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(
2381 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
2382 bf_set(lpfc_sli4_sge_offset
, sgl
, dma_offset
);
2383 bf_set(lpfc_sli4_sge_type
, sgl
,
2384 LPFC_SGE_TYPE_DATA
);
2386 sgl
->sge_len
= cpu_to_le32(dma_len
);
2387 dma_offset
+= dma_len
;
2398 /* Move to the next s/g segment if possible */
2399 sgde
= sg_next(sgde
);
2407 if (protgroup_offset
) {
2408 /* update the reference tag */
2409 reftag
+= protgrp_blks
;
2414 if (curr_prot
== protcnt
) {
2415 /* mark the last SGL */
2417 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
2419 } else if (curr_prot
< protcnt
) {
2420 /* advance to next prot buffer */
2421 sgpe
= sg_next(sgpe
);
2423 /* update the reference tag */
2424 reftag
+= protgrp_blks
;
2426 /* if we're here, we have a bug */
2427 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2428 "9085 BLKGRD: bug in %s\n", __func__
);
2439 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2440 * @phba: The Hba for which this call is being executed.
2441 * @sc: pointer to scsi command we're working on
2443 * Given a SCSI command that supports DIF, determine composition of protection
2444 * groups involved in setting up buffer lists
2446 * Returns: Protection group type (with or without DIF)
2450 lpfc_prot_group_type(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
)
2452 int ret
= LPFC_PG_TYPE_INVALID
;
2453 unsigned char op
= scsi_get_prot_op(sc
);
2456 case SCSI_PROT_READ_STRIP
:
2457 case SCSI_PROT_WRITE_INSERT
:
2458 ret
= LPFC_PG_TYPE_NO_DIF
;
2460 case SCSI_PROT_READ_INSERT
:
2461 case SCSI_PROT_WRITE_STRIP
:
2462 case SCSI_PROT_READ_PASS
:
2463 case SCSI_PROT_WRITE_PASS
:
2464 ret
= LPFC_PG_TYPE_DIF_BUF
;
2468 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2469 "9021 Unsupported protection op:%d\n",
2477 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2478 * @phba: The Hba for which this call is being executed.
2479 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2481 * Adjust the data length to account for how much data
2482 * is actually on the wire.
2484 * returns the adjusted data length
2487 lpfc_bg_scsi_adjust_dl(struct lpfc_hba
*phba
,
2488 struct lpfc_io_buf
*lpfc_cmd
)
2490 struct scsi_cmnd
*sc
= lpfc_cmd
->pCmd
;
2493 fcpdl
= scsi_bufflen(sc
);
2495 /* Check if there is protection data on the wire */
2496 if (sc
->sc_data_direction
== DMA_FROM_DEVICE
) {
2497 /* Read check for protection data */
2498 if (scsi_get_prot_op(sc
) == SCSI_PROT_READ_INSERT
)
2502 /* Write check for protection data */
2503 if (scsi_get_prot_op(sc
) == SCSI_PROT_WRITE_STRIP
)
2508 * If we are in DIF Type 1 mode every data block has a 8 byte
2509 * DIF (trailer) attached to it. Must ajust FCP data length
2510 * to account for the protection data.
2512 fcpdl
+= (fcpdl
/ lpfc_cmd_blksize(sc
)) * 8;
2518 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2519 * @phba: The Hba for which this call is being executed.
2520 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2522 * This is the protection/DIF aware version of
2523 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2524 * two functions eventually, but for now, it's here.
2525 * RETURNS 0 - SUCCESS,
2526 * 1 - Failed DMA map, retry.
2527 * 2 - Invalid scsi cmd or prot-type. Do not rety.
2530 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba
*phba
,
2531 struct lpfc_io_buf
*lpfc_cmd
)
2533 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
2534 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
2535 struct ulp_bde64
*bpl
= (struct ulp_bde64
*)lpfc_cmd
->dma_sgl
;
2536 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
2537 uint32_t num_bde
= 0;
2538 int datasegcnt
, protsegcnt
, datadir
= scsi_cmnd
->sc_data_direction
;
2539 int prot_group_type
= 0;
2542 struct lpfc_vport
*vport
= phba
->pport
;
2545 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2546 * fcp_rsp regions to the first data bde entry
2549 if (scsi_sg_count(scsi_cmnd
)) {
2551 * The driver stores the segment count returned from pci_map_sg
2552 * because this a count of dma-mappings used to map the use_sg
2553 * pages. They are not guaranteed to be the same for those
2554 * architectures that implement an IOMMU.
2556 datasegcnt
= dma_map_sg(&phba
->pcidev
->dev
,
2557 scsi_sglist(scsi_cmnd
),
2558 scsi_sg_count(scsi_cmnd
), datadir
);
2559 if (unlikely(!datasegcnt
))
2562 lpfc_cmd
->seg_cnt
= datasegcnt
;
2564 /* First check if data segment count from SCSI Layer is good */
2565 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
2566 WARN_ON_ONCE(lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
);
2571 prot_group_type
= lpfc_prot_group_type(phba
, scsi_cmnd
);
2573 switch (prot_group_type
) {
2574 case LPFC_PG_TYPE_NO_DIF
:
2576 /* Here we need to add a PDE5 and PDE6 to the count */
2577 if ((lpfc_cmd
->seg_cnt
+ 2) > phba
->cfg_total_seg_cnt
) {
2582 num_bde
= lpfc_bg_setup_bpl(phba
, scsi_cmnd
, bpl
,
2584 /* we should have 2 or more entries in buffer list */
2591 case LPFC_PG_TYPE_DIF_BUF
:
2593 * This type indicates that protection buffers are
2594 * passed to the driver, so that needs to be prepared
2597 protsegcnt
= dma_map_sg(&phba
->pcidev
->dev
,
2598 scsi_prot_sglist(scsi_cmnd
),
2599 scsi_prot_sg_count(scsi_cmnd
), datadir
);
2600 if (unlikely(!protsegcnt
)) {
2601 scsi_dma_unmap(scsi_cmnd
);
2605 lpfc_cmd
->prot_seg_cnt
= protsegcnt
;
2608 * There is a minimun of 4 BPLs used for every
2609 * protection data segment.
2611 if ((lpfc_cmd
->prot_seg_cnt
* 4) >
2612 (phba
->cfg_total_seg_cnt
- 2)) {
2617 num_bde
= lpfc_bg_setup_bpl_prot(phba
, scsi_cmnd
, bpl
,
2618 datasegcnt
, protsegcnt
);
2619 /* we should have 3 or more entries in buffer list */
2620 if ((num_bde
< 3) ||
2621 (num_bde
> phba
->cfg_total_seg_cnt
)) {
2627 case LPFC_PG_TYPE_INVALID
:
2629 scsi_dma_unmap(scsi_cmnd
);
2630 lpfc_cmd
->seg_cnt
= 0;
2632 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2633 "9022 Unexpected protection group %i\n",
2640 * Finish initializing those IOCB fields that are dependent on the
2641 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2642 * reinitialized since all iocb memory resources are used many times
2643 * for transmit, receive, and continuation bpl's.
2645 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof(struct ulp_bde64
));
2646 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+= (num_bde
* sizeof(struct ulp_bde64
));
2647 iocb_cmd
->ulpBdeCount
= 1;
2648 iocb_cmd
->ulpLe
= 1;
2650 fcpdl
= lpfc_bg_scsi_adjust_dl(phba
, lpfc_cmd
);
2651 fcp_cmnd
->fcpDl
= be32_to_cpu(fcpdl
);
2654 * Due to difference in data length between DIF/non-DIF paths,
2655 * we need to set word 4 of IOCB here
2657 iocb_cmd
->un
.fcpi
.fcpi_parm
= fcpdl
;
2660 * For First burst, we may need to adjust the initial transfer
2663 if (iocb_cmd
->un
.fcpi
.fcpi_XRdy
&&
2664 (fcpdl
< vport
->cfg_first_burst_size
))
2665 iocb_cmd
->un
.fcpi
.fcpi_XRdy
= fcpdl
;
2669 if (lpfc_cmd
->seg_cnt
)
2670 scsi_dma_unmap(scsi_cmnd
);
2671 if (lpfc_cmd
->prot_seg_cnt
)
2672 dma_unmap_sg(&phba
->pcidev
->dev
, scsi_prot_sglist(scsi_cmnd
),
2673 scsi_prot_sg_count(scsi_cmnd
),
2674 scsi_cmnd
->sc_data_direction
);
2676 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2677 "9023 Cannot setup S/G List for HBA"
2678 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2679 lpfc_cmd
->seg_cnt
, lpfc_cmd
->prot_seg_cnt
,
2680 phba
->cfg_total_seg_cnt
, phba
->cfg_sg_seg_cnt
,
2681 prot_group_type
, num_bde
);
2683 lpfc_cmd
->seg_cnt
= 0;
2684 lpfc_cmd
->prot_seg_cnt
= 0;
2689 * This function calcuates the T10 DIF guard tag
2690 * on the specified data using a CRC algorithmn
2694 lpfc_bg_crc(uint8_t *data
, int count
)
2699 crc
= crc_t10dif(data
, count
);
2700 x
= cpu_to_be16(crc
);
2705 * This function calcuates the T10 DIF guard tag
2706 * on the specified data using a CSUM algorithmn
2707 * using ip_compute_csum.
2710 lpfc_bg_csum(uint8_t *data
, int count
)
2714 ret
= ip_compute_csum(data
, count
);
2719 * This function examines the protection data to try to determine
2720 * what type of T10-DIF error occurred.
2723 lpfc_calc_bg_err(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_cmd
)
2725 struct scatterlist
*sgpe
; /* s/g prot entry */
2726 struct scatterlist
*sgde
; /* s/g data entry */
2727 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
2728 struct scsi_dif_tuple
*src
= NULL
;
2729 uint8_t *data_src
= NULL
;
2731 uint16_t start_app_tag
, app_tag
;
2732 uint32_t start_ref_tag
, ref_tag
;
2733 int prot
, protsegcnt
;
2734 int err_type
, len
, data_len
;
2735 int chk_ref
, chk_app
, chk_guard
;
2739 err_type
= BGS_GUARD_ERR_MASK
;
2743 /* First check to see if there is protection data to examine */
2744 prot
= scsi_get_prot_op(cmd
);
2745 if ((prot
== SCSI_PROT_READ_STRIP
) ||
2746 (prot
== SCSI_PROT_WRITE_INSERT
) ||
2747 (prot
== SCSI_PROT_NORMAL
))
2750 /* Currently the driver just supports ref_tag and guard_tag checking */
2755 /* Setup a ptr to the protection data provided by the SCSI host */
2756 sgpe
= scsi_prot_sglist(cmd
);
2757 protsegcnt
= lpfc_cmd
->prot_seg_cnt
;
2759 if (sgpe
&& protsegcnt
) {
2762 * We will only try to verify guard tag if the segment
2763 * data length is a multiple of the blksize.
2765 sgde
= scsi_sglist(cmd
);
2766 blksize
= lpfc_cmd_blksize(cmd
);
2767 data_src
= (uint8_t *)sg_virt(sgde
);
2768 data_len
= sgde
->length
;
2769 if ((data_len
& (blksize
- 1)) == 0)
2772 src
= (struct scsi_dif_tuple
*)sg_virt(sgpe
);
2773 start_ref_tag
= (uint32_t)scsi_get_lba(cmd
); /* Truncate LBA */
2774 start_app_tag
= src
->app_tag
;
2776 while (src
&& protsegcnt
) {
2780 * First check to see if a protection data
2783 if ((src
->ref_tag
== T10_PI_REF_ESCAPE
) ||
2784 (src
->app_tag
== T10_PI_APP_ESCAPE
)) {
2789 /* First Guard Tag checking */
2791 guard_tag
= src
->guard_tag
;
2792 if (lpfc_cmd_guard_csum(cmd
))
2793 sum
= lpfc_bg_csum(data_src
,
2796 sum
= lpfc_bg_crc(data_src
,
2798 if ((guard_tag
!= sum
)) {
2799 err_type
= BGS_GUARD_ERR_MASK
;
2804 /* Reference Tag checking */
2805 ref_tag
= be32_to_cpu(src
->ref_tag
);
2806 if (chk_ref
&& (ref_tag
!= start_ref_tag
)) {
2807 err_type
= BGS_REFTAG_ERR_MASK
;
2812 /* App Tag checking */
2813 app_tag
= src
->app_tag
;
2814 if (chk_app
&& (app_tag
!= start_app_tag
)) {
2815 err_type
= BGS_APPTAG_ERR_MASK
;
2819 len
-= sizeof(struct scsi_dif_tuple
);
2824 data_src
+= blksize
;
2825 data_len
-= blksize
;
2828 * Are we at the end of the Data segment?
2829 * The data segment is only used for Guard
2832 if (chk_guard
&& (data_len
== 0)) {
2834 sgde
= sg_next(sgde
);
2838 data_src
= (uint8_t *)sg_virt(sgde
);
2839 data_len
= sgde
->length
;
2840 if ((data_len
& (blksize
- 1)) == 0)
2845 /* Goto the next Protection data segment */
2846 sgpe
= sg_next(sgpe
);
2848 src
= (struct scsi_dif_tuple
*)sg_virt(sgpe
);
2857 if (err_type
== BGS_GUARD_ERR_MASK
) {
2858 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
2860 cmd
->result
= DRIVER_SENSE
<< 24 | DID_ABORT
<< 16 |
2861 SAM_STAT_CHECK_CONDITION
;
2862 phba
->bg_guard_err_cnt
++;
2863 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
2864 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2865 (unsigned long)scsi_get_lba(cmd
),
2868 } else if (err_type
== BGS_REFTAG_ERR_MASK
) {
2869 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
2871 cmd
->result
= DRIVER_SENSE
<< 24 | DID_ABORT
<< 16 |
2872 SAM_STAT_CHECK_CONDITION
;
2874 phba
->bg_reftag_err_cnt
++;
2875 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
2876 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2877 (unsigned long)scsi_get_lba(cmd
),
2878 ref_tag
, start_ref_tag
);
2880 } else if (err_type
== BGS_APPTAG_ERR_MASK
) {
2881 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
2883 cmd
->result
= DRIVER_SENSE
<< 24 | DID_ABORT
<< 16 |
2884 SAM_STAT_CHECK_CONDITION
;
2886 phba
->bg_apptag_err_cnt
++;
2887 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
2888 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2889 (unsigned long)scsi_get_lba(cmd
),
2890 app_tag
, start_app_tag
);
2895 * This function checks for BlockGuard errors detected by
2896 * the HBA. In case of errors, the ASC/ASCQ fields in the
2897 * sense buffer will be set accordingly, paired with
2898 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2899 * detected corruption.
2902 * 0 - No error found
2903 * 1 - BlockGuard error found
2904 * -1 - Internal error (bad profile, ...etc)
2907 lpfc_sli4_parse_bg_err(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_cmd
,
2908 struct lpfc_wcqe_complete
*wcqe
)
2910 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
2912 u32 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
2915 u64 failing_sector
= 0;
2917 if (status
== CQE_STATUS_DI_ERROR
) {
2918 if (bf_get(lpfc_wcqe_c_bg_ge
, wcqe
)) /* Guard Check failed */
2919 bgstat
|= BGS_GUARD_ERR_MASK
;
2920 if (bf_get(lpfc_wcqe_c_bg_ae
, wcqe
)) /* AppTag Check failed */
2921 bgstat
|= BGS_APPTAG_ERR_MASK
;
2922 if (bf_get(lpfc_wcqe_c_bg_re
, wcqe
)) /* RefTag Check failed */
2923 bgstat
|= BGS_REFTAG_ERR_MASK
;
2925 /* Check to see if there was any good data before the error */
2926 if (bf_get(lpfc_wcqe_c_bg_tdpv
, wcqe
)) {
2927 bgstat
|= BGS_HI_WATER_MARK_PRESENT_MASK
;
2928 bghm
= wcqe
->total_data_placed
;
2932 * Set ALL the error bits to indicate we don't know what
2933 * type of error it is.
2936 bgstat
|= (BGS_REFTAG_ERR_MASK
| BGS_APPTAG_ERR_MASK
|
2937 BGS_GUARD_ERR_MASK
);
2940 if (lpfc_bgs_get_guard_err(bgstat
)) {
2943 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
2945 cmd
->result
= DRIVER_SENSE
<< 24 | DID_ABORT
<< 16 |
2946 SAM_STAT_CHECK_CONDITION
;
2947 phba
->bg_guard_err_cnt
++;
2948 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
2949 "9059 BLKGRD: Guard Tag error in cmd"
2950 " 0x%x lba 0x%llx blk cnt 0x%x "
2951 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
2952 (unsigned long long)scsi_get_lba(cmd
),
2953 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
2956 if (lpfc_bgs_get_reftag_err(bgstat
)) {
2959 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
2961 cmd
->result
= DRIVER_SENSE
<< 24 | DID_ABORT
<< 16 |
2962 SAM_STAT_CHECK_CONDITION
;
2964 phba
->bg_reftag_err_cnt
++;
2965 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
2966 "9060 BLKGRD: Ref Tag error in cmd"
2967 " 0x%x lba 0x%llx blk cnt 0x%x "
2968 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
2969 (unsigned long long)scsi_get_lba(cmd
),
2970 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
2973 if (lpfc_bgs_get_apptag_err(bgstat
)) {
2976 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
2978 cmd
->result
= DRIVER_SENSE
<< 24 | DID_ABORT
<< 16 |
2979 SAM_STAT_CHECK_CONDITION
;
2981 phba
->bg_apptag_err_cnt
++;
2982 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
2983 "9062 BLKGRD: App Tag error in cmd"
2984 " 0x%x lba 0x%llx blk cnt 0x%x "
2985 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
2986 (unsigned long long)scsi_get_lba(cmd
),
2987 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
2990 if (lpfc_bgs_get_hi_water_mark_present(bgstat
)) {
2992 * setup sense data descriptor 0 per SPC-4 as an information
2993 * field, and put the failing LBA in it.
2994 * This code assumes there was also a guard/app/ref tag error
2997 cmd
->sense_buffer
[7] = 0xc; /* Additional sense length */
2998 cmd
->sense_buffer
[8] = 0; /* Information descriptor type */
2999 cmd
->sense_buffer
[9] = 0xa; /* Additional descriptor length */
3000 cmd
->sense_buffer
[10] = 0x80; /* Validity bit */
3002 /* bghm is a "on the wire" FC frame based count */
3003 switch (scsi_get_prot_op(cmd
)) {
3004 case SCSI_PROT_READ_INSERT
:
3005 case SCSI_PROT_WRITE_STRIP
:
3006 bghm
/= cmd
->device
->sector_size
;
3008 case SCSI_PROT_READ_STRIP
:
3009 case SCSI_PROT_WRITE_INSERT
:
3010 case SCSI_PROT_READ_PASS
:
3011 case SCSI_PROT_WRITE_PASS
:
3012 bghm
/= (cmd
->device
->sector_size
+
3013 sizeof(struct scsi_dif_tuple
));
3017 failing_sector
= scsi_get_lba(cmd
);
3018 failing_sector
+= bghm
;
3020 /* Descriptor Information */
3021 put_unaligned_be64(failing_sector
, &cmd
->sense_buffer
[12]);
3025 /* No error was reported - problem in FW? */
3026 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3027 "9068 BLKGRD: Unknown error in cmd"
3028 " 0x%x lba 0x%llx blk cnt 0x%x "
3029 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3030 (unsigned long long)scsi_get_lba(cmd
),
3031 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3033 /* Calcuate what type of error it was */
3034 lpfc_calc_bg_err(phba
, lpfc_cmd
);
3040 * This function checks for BlockGuard errors detected by
3041 * the HBA. In case of errors, the ASC/ASCQ fields in the
3042 * sense buffer will be set accordingly, paired with
3043 * ILLEGAL_REQUEST to signal to the kernel that the HBA
3044 * detected corruption.
3047 * 0 - No error found
3048 * 1 - BlockGuard error found
3049 * -1 - Internal error (bad profile, ...etc)
3052 lpfc_parse_bg_err(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_cmd
,
3053 struct lpfc_iocbq
*pIocbOut
)
3055 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
3056 struct sli3_bg_fields
*bgf
= &pIocbOut
->iocb
.unsli3
.sli3_bg
;
3058 uint32_t bghm
= bgf
->bghm
;
3059 uint32_t bgstat
= bgf
->bgstat
;
3060 uint64_t failing_sector
= 0;
3062 if (lpfc_bgs_get_invalid_prof(bgstat
)) {
3063 cmd
->result
= DID_ERROR
<< 16;
3064 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3065 "9072 BLKGRD: Invalid BG Profile in cmd"
3066 " 0x%x lba 0x%llx blk cnt 0x%x "
3067 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3068 (unsigned long long)scsi_get_lba(cmd
),
3069 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3074 if (lpfc_bgs_get_uninit_dif_block(bgstat
)) {
3075 cmd
->result
= DID_ERROR
<< 16;
3076 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3077 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
3078 " 0x%x lba 0x%llx blk cnt 0x%x "
3079 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3080 (unsigned long long)scsi_get_lba(cmd
),
3081 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3086 if (lpfc_bgs_get_guard_err(bgstat
)) {
3089 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
3091 cmd
->result
= DRIVER_SENSE
<< 24 | DID_ABORT
<< 16 |
3092 SAM_STAT_CHECK_CONDITION
;
3093 phba
->bg_guard_err_cnt
++;
3094 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3095 "9055 BLKGRD: Guard Tag error in cmd"
3096 " 0x%x lba 0x%llx blk cnt 0x%x "
3097 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3098 (unsigned long long)scsi_get_lba(cmd
),
3099 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3102 if (lpfc_bgs_get_reftag_err(bgstat
)) {
3105 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
3107 cmd
->result
= DRIVER_SENSE
<< 24 | DID_ABORT
<< 16 |
3108 SAM_STAT_CHECK_CONDITION
;
3110 phba
->bg_reftag_err_cnt
++;
3111 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3112 "9056 BLKGRD: Ref Tag error in cmd"
3113 " 0x%x lba 0x%llx blk cnt 0x%x "
3114 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3115 (unsigned long long)scsi_get_lba(cmd
),
3116 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3119 if (lpfc_bgs_get_apptag_err(bgstat
)) {
3122 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
3124 cmd
->result
= DRIVER_SENSE
<< 24 | DID_ABORT
<< 16 |
3125 SAM_STAT_CHECK_CONDITION
;
3127 phba
->bg_apptag_err_cnt
++;
3128 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3129 "9061 BLKGRD: App Tag error in cmd"
3130 " 0x%x lba 0x%llx blk cnt 0x%x "
3131 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3132 (unsigned long long)scsi_get_lba(cmd
),
3133 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3136 if (lpfc_bgs_get_hi_water_mark_present(bgstat
)) {
3138 * setup sense data descriptor 0 per SPC-4 as an information
3139 * field, and put the failing LBA in it.
3140 * This code assumes there was also a guard/app/ref tag error
3143 cmd
->sense_buffer
[7] = 0xc; /* Additional sense length */
3144 cmd
->sense_buffer
[8] = 0; /* Information descriptor type */
3145 cmd
->sense_buffer
[9] = 0xa; /* Additional descriptor length */
3146 cmd
->sense_buffer
[10] = 0x80; /* Validity bit */
3148 /* bghm is a "on the wire" FC frame based count */
3149 switch (scsi_get_prot_op(cmd
)) {
3150 case SCSI_PROT_READ_INSERT
:
3151 case SCSI_PROT_WRITE_STRIP
:
3152 bghm
/= cmd
->device
->sector_size
;
3154 case SCSI_PROT_READ_STRIP
:
3155 case SCSI_PROT_WRITE_INSERT
:
3156 case SCSI_PROT_READ_PASS
:
3157 case SCSI_PROT_WRITE_PASS
:
3158 bghm
/= (cmd
->device
->sector_size
+
3159 sizeof(struct scsi_dif_tuple
));
3163 failing_sector
= scsi_get_lba(cmd
);
3164 failing_sector
+= bghm
;
3166 /* Descriptor Information */
3167 put_unaligned_be64(failing_sector
, &cmd
->sense_buffer
[12]);
3171 /* No error was reported - problem in FW? */
3172 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3173 "9057 BLKGRD: Unknown error in cmd"
3174 " 0x%x lba 0x%llx blk cnt 0x%x "
3175 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3176 (unsigned long long)scsi_get_lba(cmd
),
3177 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3179 /* Calcuate what type of error it was */
3180 lpfc_calc_bg_err(phba
, lpfc_cmd
);
3187 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3188 * @phba: The Hba for which this call is being executed.
3189 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3191 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3192 * field of @lpfc_cmd for device with SLI-4 interface spec.
3195 * 2 - Error - Do not retry
3200 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_cmd
)
3202 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
3203 struct scatterlist
*sgel
= NULL
;
3204 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
3205 struct sli4_sge
*sgl
= (struct sli4_sge
*)lpfc_cmd
->dma_sgl
;
3206 struct sli4_sge
*first_data_sgl
;
3207 struct lpfc_iocbq
*pwqeq
= &lpfc_cmd
->cur_iocbq
;
3208 struct lpfc_vport
*vport
= phba
->pport
;
3209 union lpfc_wqe128
*wqe
= &pwqeq
->wqe
;
3210 dma_addr_t physaddr
;
3211 uint32_t num_bde
= 0;
3213 uint32_t dma_offset
= 0;
3215 struct ulp_bde64
*bde
;
3216 bool lsp_just_set
= false;
3217 struct sli4_hybrid_sgl
*sgl_xtra
= NULL
;
3220 * There are three possibilities here - use scatter-gather segment, use
3221 * the single mapping, or neither. Start the lpfc command prep by
3222 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3225 if (scsi_sg_count(scsi_cmnd
)) {
3227 * The driver stores the segment count returned from pci_map_sg
3228 * because this a count of dma-mappings used to map the use_sg
3229 * pages. They are not guaranteed to be the same for those
3230 * architectures that implement an IOMMU.
3233 nseg
= scsi_dma_map(scsi_cmnd
);
3234 if (unlikely(nseg
<= 0))
3237 /* clear the last flag in the fcp_rsp map entry */
3238 sgl
->word2
= le32_to_cpu(sgl
->word2
);
3239 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
3240 sgl
->word2
= cpu_to_le32(sgl
->word2
);
3242 first_data_sgl
= sgl
;
3243 lpfc_cmd
->seg_cnt
= nseg
;
3244 if (!phba
->cfg_xpsgl
&&
3245 lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
3246 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3248 " %s: Too many sg segments from "
3249 "dma_map_sg. Config %d, seg_cnt %d\n",
3250 __func__
, phba
->cfg_sg_seg_cnt
,
3252 WARN_ON_ONCE(lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
);
3253 lpfc_cmd
->seg_cnt
= 0;
3254 scsi_dma_unmap(scsi_cmnd
);
3259 * The driver established a maximum scatter-gather segment count
3260 * during probe that limits the number of sg elements in any
3261 * single scsi command. Just run through the seg_cnt and format
3263 * When using SLI-3 the driver will try to fit all the BDEs into
3264 * the IOCB. If it can't then the BDEs get added to a BPL as it
3265 * does for SLI-2 mode.
3268 /* for tracking segment boundaries */
3269 sgel
= scsi_sglist(scsi_cmnd
);
3271 for (i
= 0; i
< nseg
; i
++) {
3273 if ((num_bde
+ 1) == nseg
) {
3274 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
3275 bf_set(lpfc_sli4_sge_type
, sgl
,
3276 LPFC_SGE_TYPE_DATA
);
3278 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
3280 /* do we need to expand the segment */
3281 if (!lsp_just_set
&&
3282 !((j
+ 1) % phba
->border_sge_num
) &&
3283 ((nseg
- 1) != i
)) {
3285 bf_set(lpfc_sli4_sge_type
, sgl
,
3288 sgl_xtra
= lpfc_get_sgl_per_hdwq(
3291 if (unlikely(!sgl_xtra
)) {
3292 lpfc_cmd
->seg_cnt
= 0;
3293 scsi_dma_unmap(scsi_cmnd
);
3296 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(
3297 sgl_xtra
->dma_phys_sgl
));
3298 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(
3299 sgl_xtra
->dma_phys_sgl
));
3302 bf_set(lpfc_sli4_sge_type
, sgl
,
3303 LPFC_SGE_TYPE_DATA
);
3307 if (!(bf_get(lpfc_sli4_sge_type
, sgl
) &
3308 LPFC_SGE_TYPE_LSP
)) {
3309 if ((nseg
- 1) == i
)
3310 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
3312 physaddr
= sg_dma_address(sgel
);
3313 dma_len
= sg_dma_len(sgel
);
3314 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(
3316 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(
3319 bf_set(lpfc_sli4_sge_offset
, sgl
, dma_offset
);
3320 sgl
->word2
= cpu_to_le32(sgl
->word2
);
3321 sgl
->sge_len
= cpu_to_le32(dma_len
);
3323 dma_offset
+= dma_len
;
3324 sgel
= sg_next(sgel
);
3327 lsp_just_set
= false;
3330 sgl
->word2
= cpu_to_le32(sgl
->word2
);
3331 sgl
->sge_len
= cpu_to_le32(
3332 phba
->cfg_sg_dma_buf_size
);
3334 sgl
= (struct sli4_sge
*)sgl_xtra
->dma_sgl
;
3337 lsp_just_set
= true;
3343 * Setup the first Payload BDE. For FCoE we just key off
3344 * Performance Hints, for FC we use lpfc_enable_pbde.
3345 * We populate words 13-15 of IOCB/WQE.
3347 if ((phba
->sli3_options
& LPFC_SLI4_PERFH_ENABLED
) ||
3348 phba
->cfg_enable_pbde
) {
3349 bde
= (struct ulp_bde64
*)
3351 bde
->addrLow
= first_data_sgl
->addr_lo
;
3352 bde
->addrHigh
= first_data_sgl
->addr_hi
;
3353 bde
->tus
.f
.bdeSize
=
3354 le32_to_cpu(first_data_sgl
->sge_len
);
3355 bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
3356 bde
->tus
.w
= cpu_to_le32(bde
->tus
.w
);
3359 memset(&wqe
->words
[13], 0, (sizeof(uint32_t) * 3));
3363 /* clear the last flag in the fcp_rsp map entry */
3364 sgl
->word2
= le32_to_cpu(sgl
->word2
);
3365 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
3366 sgl
->word2
= cpu_to_le32(sgl
->word2
);
3368 if ((phba
->sli3_options
& LPFC_SLI4_PERFH_ENABLED
) ||
3369 phba
->cfg_enable_pbde
) {
3370 bde
= (struct ulp_bde64
*)
3372 memset(bde
, 0, (sizeof(uint32_t) * 3));
3377 if (phba
->cfg_enable_pbde
)
3378 bf_set(wqe_pbde
, &wqe
->generic
.wqe_com
, 1);
3381 * Finish initializing those IOCB fields that are dependent on the
3382 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3383 * explicitly reinitialized.
3384 * all iocb memory resources are reused.
3386 fcp_cmnd
->fcpDl
= cpu_to_be32(scsi_bufflen(scsi_cmnd
));
3387 /* Set first-burst provided it was successfully negotiated */
3388 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) &&
3389 vport
->cfg_first_burst_size
&&
3390 scsi_cmnd
->sc_data_direction
== DMA_TO_DEVICE
) {
3391 u32 init_len
, total_len
;
3393 total_len
= be32_to_cpu(fcp_cmnd
->fcpDl
);
3394 init_len
= min(total_len
, vport
->cfg_first_burst_size
);
3397 wqe
->fcp_iwrite
.initial_xfer_len
= init_len
;
3398 wqe
->fcp_iwrite
.total_xfer_len
= total_len
;
3401 wqe
->fcp_iwrite
.total_xfer_len
=
3402 be32_to_cpu(fcp_cmnd
->fcpDl
);
3406 * If the OAS driver feature is enabled and the lun is enabled for
3407 * OAS, set the oas iocb related flags.
3409 if ((phba
->cfg_fof
) && ((struct lpfc_device_data
*)
3410 scsi_cmnd
->device
->hostdata
)->oas_enabled
) {
3411 lpfc_cmd
->cur_iocbq
.iocb_flag
|= (LPFC_IO_OAS
| LPFC_IO_FOF
);
3412 lpfc_cmd
->cur_iocbq
.priority
= ((struct lpfc_device_data
*)
3413 scsi_cmnd
->device
->hostdata
)->priority
;
3416 bf_set(wqe_oas
, &wqe
->generic
.wqe_com
, 1);
3417 bf_set(wqe_ccpe
, &wqe
->generic
.wqe_com
, 1);
3419 if (lpfc_cmd
->cur_iocbq
.priority
)
3420 bf_set(wqe_ccp
, &wqe
->generic
.wqe_com
,
3421 (lpfc_cmd
->cur_iocbq
.priority
<< 1));
3423 bf_set(wqe_ccp
, &wqe
->generic
.wqe_com
,
3424 (phba
->cfg_XLanePriority
<< 1));
3431 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3432 * @phba: The Hba for which this call is being executed.
3433 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3435 * This is the protection/DIF aware version of
3436 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3437 * two functions eventually, but for now, it's here
3439 * 2 - Error - Do not retry
3444 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba
*phba
,
3445 struct lpfc_io_buf
*lpfc_cmd
)
3447 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
3448 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
3449 struct sli4_sge
*sgl
= (struct sli4_sge
*)(lpfc_cmd
->dma_sgl
);
3450 struct lpfc_iocbq
*pwqeq
= &lpfc_cmd
->cur_iocbq
;
3451 union lpfc_wqe128
*wqe
= &pwqeq
->wqe
;
3452 uint32_t num_sge
= 0;
3453 int datasegcnt
, protsegcnt
, datadir
= scsi_cmnd
->sc_data_direction
;
3454 int prot_group_type
= 0;
3457 struct lpfc_vport
*vport
= phba
->pport
;
3460 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3461 * fcp_rsp regions to the first data sge entry
3463 if (scsi_sg_count(scsi_cmnd
)) {
3465 * The driver stores the segment count returned from pci_map_sg
3466 * because this a count of dma-mappings used to map the use_sg
3467 * pages. They are not guaranteed to be the same for those
3468 * architectures that implement an IOMMU.
3470 datasegcnt
= dma_map_sg(&phba
->pcidev
->dev
,
3471 scsi_sglist(scsi_cmnd
),
3472 scsi_sg_count(scsi_cmnd
), datadir
);
3473 if (unlikely(!datasegcnt
))
3477 /* clear the last flag in the fcp_rsp map entry */
3478 sgl
->word2
= le32_to_cpu(sgl
->word2
);
3479 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
3480 sgl
->word2
= cpu_to_le32(sgl
->word2
);
3483 lpfc_cmd
->seg_cnt
= datasegcnt
;
3485 /* First check if data segment count from SCSI Layer is good */
3486 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
&&
3488 WARN_ON_ONCE(lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
);
3493 prot_group_type
= lpfc_prot_group_type(phba
, scsi_cmnd
);
3495 switch (prot_group_type
) {
3496 case LPFC_PG_TYPE_NO_DIF
:
3497 /* Here we need to add a DISEED to the count */
3498 if (((lpfc_cmd
->seg_cnt
+ 1) >
3499 phba
->cfg_total_seg_cnt
) &&
3505 num_sge
= lpfc_bg_setup_sgl(phba
, scsi_cmnd
, sgl
,
3506 datasegcnt
, lpfc_cmd
);
3508 /* we should have 2 or more entries in buffer list */
3515 case LPFC_PG_TYPE_DIF_BUF
:
3517 * This type indicates that protection buffers are
3518 * passed to the driver, so that needs to be prepared
3521 protsegcnt
= dma_map_sg(&phba
->pcidev
->dev
,
3522 scsi_prot_sglist(scsi_cmnd
),
3523 scsi_prot_sg_count(scsi_cmnd
), datadir
);
3524 if (unlikely(!protsegcnt
)) {
3525 scsi_dma_unmap(scsi_cmnd
);
3529 lpfc_cmd
->prot_seg_cnt
= protsegcnt
;
3531 * There is a minimun of 3 SGEs used for every
3532 * protection data segment.
3534 if (((lpfc_cmd
->prot_seg_cnt
* 3) >
3535 (phba
->cfg_total_seg_cnt
- 2)) &&
3541 num_sge
= lpfc_bg_setup_sgl_prot(phba
, scsi_cmnd
, sgl
,
3542 datasegcnt
, protsegcnt
, lpfc_cmd
);
3544 /* we should have 3 or more entries in buffer list */
3546 (num_sge
> phba
->cfg_total_seg_cnt
&&
3547 !phba
->cfg_xpsgl
)) {
3553 case LPFC_PG_TYPE_INVALID
:
3555 scsi_dma_unmap(scsi_cmnd
);
3556 lpfc_cmd
->seg_cnt
= 0;
3558 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3559 "9083 Unexpected protection group %i\n",
3565 switch (scsi_get_prot_op(scsi_cmnd
)) {
3566 case SCSI_PROT_WRITE_STRIP
:
3567 case SCSI_PROT_READ_STRIP
:
3568 lpfc_cmd
->cur_iocbq
.iocb_flag
|= LPFC_IO_DIF_STRIP
;
3570 case SCSI_PROT_WRITE_INSERT
:
3571 case SCSI_PROT_READ_INSERT
:
3572 lpfc_cmd
->cur_iocbq
.iocb_flag
|= LPFC_IO_DIF_INSERT
;
3574 case SCSI_PROT_WRITE_PASS
:
3575 case SCSI_PROT_READ_PASS
:
3576 lpfc_cmd
->cur_iocbq
.iocb_flag
|= LPFC_IO_DIF_PASS
;
3580 fcpdl
= lpfc_bg_scsi_adjust_dl(phba
, lpfc_cmd
);
3581 fcp_cmnd
->fcpDl
= be32_to_cpu(fcpdl
);
3583 /* Set first-burst provided it was successfully negotiated */
3584 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) &&
3585 vport
->cfg_first_burst_size
&&
3586 scsi_cmnd
->sc_data_direction
== DMA_TO_DEVICE
) {
3587 u32 init_len
, total_len
;
3589 total_len
= be32_to_cpu(fcp_cmnd
->fcpDl
);
3590 init_len
= min(total_len
, vport
->cfg_first_burst_size
);
3593 wqe
->fcp_iwrite
.initial_xfer_len
= init_len
;
3594 wqe
->fcp_iwrite
.total_xfer_len
= total_len
;
3597 wqe
->fcp_iwrite
.total_xfer_len
=
3598 be32_to_cpu(fcp_cmnd
->fcpDl
);
3602 * If the OAS driver feature is enabled and the lun is enabled for
3603 * OAS, set the oas iocb related flags.
3605 if ((phba
->cfg_fof
) && ((struct lpfc_device_data
*)
3606 scsi_cmnd
->device
->hostdata
)->oas_enabled
) {
3607 lpfc_cmd
->cur_iocbq
.iocb_flag
|= (LPFC_IO_OAS
| LPFC_IO_FOF
);
3610 bf_set(wqe_oas
, &wqe
->generic
.wqe_com
, 1);
3611 bf_set(wqe_ccpe
, &wqe
->generic
.wqe_com
, 1);
3612 bf_set(wqe_ccp
, &wqe
->generic
.wqe_com
,
3613 (phba
->cfg_XLanePriority
<< 1));
3616 /* Word 7. DIF Flags */
3617 if (lpfc_cmd
->cur_iocbq
.iocb_flag
& LPFC_IO_DIF_PASS
)
3618 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_PASSTHRU
);
3619 else if (lpfc_cmd
->cur_iocbq
.iocb_flag
& LPFC_IO_DIF_STRIP
)
3620 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_STRIP
);
3621 else if (lpfc_cmd
->cur_iocbq
.iocb_flag
& LPFC_IO_DIF_INSERT
)
3622 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_INSERT
);
3624 lpfc_cmd
->cur_iocbq
.iocb_flag
&= ~(LPFC_IO_DIF_PASS
|
3625 LPFC_IO_DIF_STRIP
| LPFC_IO_DIF_INSERT
);
3629 if (lpfc_cmd
->seg_cnt
)
3630 scsi_dma_unmap(scsi_cmnd
);
3631 if (lpfc_cmd
->prot_seg_cnt
)
3632 dma_unmap_sg(&phba
->pcidev
->dev
, scsi_prot_sglist(scsi_cmnd
),
3633 scsi_prot_sg_count(scsi_cmnd
),
3634 scsi_cmnd
->sc_data_direction
);
3636 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3637 "9084 Cannot setup S/G List for HBA"
3638 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3639 lpfc_cmd
->seg_cnt
, lpfc_cmd
->prot_seg_cnt
,
3640 phba
->cfg_total_seg_cnt
, phba
->cfg_sg_seg_cnt
,
3641 prot_group_type
, num_sge
);
3643 lpfc_cmd
->seg_cnt
= 0;
3644 lpfc_cmd
->prot_seg_cnt
= 0;
3649 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3650 * @phba: The Hba for which this call is being executed.
3651 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3653 * This routine wraps the actual DMA mapping function pointer from the
3661 lpfc_scsi_prep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_cmd
)
3663 return phba
->lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
3667 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3669 * @phba: The Hba for which this call is being executed.
3670 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3672 * This routine wraps the actual DMA mapping function pointer from the
3680 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_cmd
)
3682 return phba
->lpfc_bg_scsi_prep_dma_buf(phba
, lpfc_cmd
);
3686 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
3688 * @phba: The Hba for which this call is being executed.
3689 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3690 * @tmo: Timeout value for IO
3692 * This routine initializes IOCB/WQE data structure from scsi command
3699 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport
*vport
, struct lpfc_io_buf
*lpfc_cmd
,
3702 return vport
->phba
->lpfc_scsi_prep_cmnd_buf(vport
, lpfc_cmd
, tmo
);
3706 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3707 * @phba: Pointer to hba context object.
3708 * @vport: Pointer to vport object.
3709 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3710 * @rsp_iocb: Pointer to response iocb object which reported error.
3712 * This function posts an event when there is a SCSI command reporting
3713 * error from the scsi device.
3716 lpfc_send_scsi_error_event(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
,
3717 struct lpfc_io_buf
*lpfc_cmd
, uint32_t fcpi_parm
) {
3718 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
3719 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
3720 uint32_t resp_info
= fcprsp
->rspStatus2
;
3721 uint32_t scsi_status
= fcprsp
->rspStatus3
;
3722 struct lpfc_fast_path_event
*fast_path_evt
= NULL
;
3723 struct lpfc_nodelist
*pnode
= lpfc_cmd
->rdata
->pnode
;
3724 unsigned long flags
;
3729 /* If there is queuefull or busy condition send a scsi event */
3730 if ((cmnd
->result
== SAM_STAT_TASK_SET_FULL
) ||
3731 (cmnd
->result
== SAM_STAT_BUSY
)) {
3732 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
3735 fast_path_evt
->un
.scsi_evt
.event_type
=
3737 fast_path_evt
->un
.scsi_evt
.subcategory
=
3738 (cmnd
->result
== SAM_STAT_TASK_SET_FULL
) ?
3739 LPFC_EVENT_QFULL
: LPFC_EVENT_DEVBSY
;
3740 fast_path_evt
->un
.scsi_evt
.lun
= cmnd
->device
->lun
;
3741 memcpy(&fast_path_evt
->un
.scsi_evt
.wwpn
,
3742 &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
3743 memcpy(&fast_path_evt
->un
.scsi_evt
.wwnn
,
3744 &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
3745 } else if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
&&
3746 ((cmnd
->cmnd
[0] == READ_10
) || (cmnd
->cmnd
[0] == WRITE_10
))) {
3747 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
3750 fast_path_evt
->un
.check_cond_evt
.scsi_event
.event_type
=
3752 fast_path_evt
->un
.check_cond_evt
.scsi_event
.subcategory
=
3753 LPFC_EVENT_CHECK_COND
;
3754 fast_path_evt
->un
.check_cond_evt
.scsi_event
.lun
=
3756 memcpy(&fast_path_evt
->un
.check_cond_evt
.scsi_event
.wwpn
,
3757 &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
3758 memcpy(&fast_path_evt
->un
.check_cond_evt
.scsi_event
.wwnn
,
3759 &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
3760 fast_path_evt
->un
.check_cond_evt
.sense_key
=
3761 cmnd
->sense_buffer
[2] & 0xf;
3762 fast_path_evt
->un
.check_cond_evt
.asc
= cmnd
->sense_buffer
[12];
3763 fast_path_evt
->un
.check_cond_evt
.ascq
= cmnd
->sense_buffer
[13];
3764 } else if ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) &&
3766 ((be32_to_cpu(fcprsp
->rspResId
) != fcpi_parm
) ||
3767 ((scsi_status
== SAM_STAT_GOOD
) &&
3768 !(resp_info
& (RESID_UNDER
| RESID_OVER
))))) {
3770 * If status is good or resid does not match with fcp_param and
3771 * there is valid fcpi_parm, then there is a read_check error
3773 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
3776 fast_path_evt
->un
.read_check_error
.header
.event_type
=
3777 FC_REG_FABRIC_EVENT
;
3778 fast_path_evt
->un
.read_check_error
.header
.subcategory
=
3779 LPFC_EVENT_FCPRDCHKERR
;
3780 memcpy(&fast_path_evt
->un
.read_check_error
.header
.wwpn
,
3781 &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
3782 memcpy(&fast_path_evt
->un
.read_check_error
.header
.wwnn
,
3783 &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
3784 fast_path_evt
->un
.read_check_error
.lun
= cmnd
->device
->lun
;
3785 fast_path_evt
->un
.read_check_error
.opcode
= cmnd
->cmnd
[0];
3786 fast_path_evt
->un
.read_check_error
.fcpiparam
=
3791 fast_path_evt
->vport
= vport
;
3792 spin_lock_irqsave(&phba
->hbalock
, flags
);
3793 list_add_tail(&fast_path_evt
->work_evt
.evt_listp
, &phba
->work_list
);
3794 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3795 lpfc_worker_wake_up(phba
);
3800 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3801 * @phba: The HBA for which this call is being executed.
3802 * @psb: The scsi buffer which is going to be un-mapped.
3804 * This routine does DMA un-mapping of scatter gather list of scsi command
3805 * field of @lpfc_cmd for device with SLI-3 interface spec.
3808 lpfc_scsi_unprep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_io_buf
*psb
)
3811 * There are only two special cases to consider. (1) the scsi command
3812 * requested scatter-gather usage or (2) the scsi command allocated
3813 * a request buffer, but did not request use_sg. There is a third
3814 * case, but it does not require resource deallocation.
3816 if (psb
->seg_cnt
> 0)
3817 scsi_dma_unmap(psb
->pCmd
);
3818 if (psb
->prot_seg_cnt
> 0)
3819 dma_unmap_sg(&phba
->pcidev
->dev
, scsi_prot_sglist(psb
->pCmd
),
3820 scsi_prot_sg_count(psb
->pCmd
),
3821 psb
->pCmd
->sc_data_direction
);
3825 * lpfc_handler_fcp_err - FCP response handler
3826 * @vport: The virtual port for which this call is being executed.
3827 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3828 * @rsp_iocb: The response IOCB which contains FCP error.
3830 * This routine is called to process response IOCB with status field
3831 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3832 * based upon SCSI and FCP error.
3835 lpfc_handle_fcp_err(struct lpfc_vport
*vport
, struct lpfc_io_buf
*lpfc_cmd
,
3838 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
3839 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
3840 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
3841 uint32_t resp_info
= fcprsp
->rspStatus2
;
3842 uint32_t scsi_status
= fcprsp
->rspStatus3
;
3844 uint32_t host_status
= DID_OK
;
3845 uint32_t rsplen
= 0;
3847 uint32_t logit
= LOG_FCP
| LOG_FCP_ERROR
;
3851 * If this is a task management command, there is no
3852 * scsi packet associated with this lpfc_cmd. The driver
3855 if (fcpcmd
->fcpCntl2
) {
3860 if (resp_info
& RSP_LEN_VALID
) {
3861 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
3862 if (rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) {
3863 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3864 "2719 Invalid response length: "
3865 "tgt x%x lun x%llx cmnd x%x rsplen "
3866 "x%x\n", cmnd
->device
->id
,
3867 cmnd
->device
->lun
, cmnd
->cmnd
[0],
3869 host_status
= DID_ERROR
;
3872 if (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
) {
3873 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
3874 "2757 Protocol failure detected during "
3875 "processing of FCP I/O op: "
3876 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3878 cmnd
->device
->lun
, cmnd
->cmnd
[0],
3880 host_status
= DID_ERROR
;
3885 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
3886 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
3887 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
3888 snslen
= SCSI_SENSE_BUFFERSIZE
;
3890 if (resp_info
& RSP_LEN_VALID
)
3891 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
3892 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
3894 lp
= (uint32_t *)cmnd
->sense_buffer
;
3896 /* special handling for under run conditions */
3897 if (!scsi_status
&& (resp_info
& RESID_UNDER
)) {
3898 /* don't log under runs if fcp set... */
3899 if (vport
->cfg_log_verbose
& LOG_FCP
)
3900 logit
= LOG_FCP_ERROR
;
3901 /* unless operator says so */
3902 if (vport
->cfg_log_verbose
& LOG_FCP_UNDER
)
3903 logit
= LOG_FCP_UNDER
;
3906 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
3907 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3908 "Data: x%x x%x x%x x%x x%x\n",
3909 cmnd
->cmnd
[0], scsi_status
,
3910 be32_to_cpu(*lp
), be32_to_cpu(*(lp
+ 3)), resp_info
,
3911 be32_to_cpu(fcprsp
->rspResId
),
3912 be32_to_cpu(fcprsp
->rspSnsLen
),
3913 be32_to_cpu(fcprsp
->rspRspLen
),
3916 scsi_set_resid(cmnd
, 0);
3917 fcpDl
= be32_to_cpu(fcpcmd
->fcpDl
);
3918 if (resp_info
& RESID_UNDER
) {
3919 scsi_set_resid(cmnd
, be32_to_cpu(fcprsp
->rspResId
));
3921 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP_UNDER
,
3922 "9025 FCP Underrun, expected %d, "
3923 "residual %d Data: x%x x%x x%x\n",
3925 scsi_get_resid(cmnd
), fcpi_parm
, cmnd
->cmnd
[0],
3929 * If there is an under run, check if under run reported by
3930 * storage array is same as the under run reported by HBA.
3931 * If this is not same, there is a dropped frame.
3933 if (fcpi_parm
&& (scsi_get_resid(cmnd
) != fcpi_parm
)) {
3934 lpfc_printf_vlog(vport
, KERN_WARNING
,
3935 LOG_FCP
| LOG_FCP_ERROR
,
3936 "9026 FCP Read Check Error "
3937 "and Underrun Data: x%x x%x x%x x%x\n",
3939 scsi_get_resid(cmnd
), fcpi_parm
,
3941 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
3942 host_status
= DID_ERROR
;
3945 * The cmnd->underflow is the minimum number of bytes that must
3946 * be transferred for this command. Provided a sense condition
3947 * is not present, make sure the actual amount transferred is at
3948 * least the underflow value or fail.
3950 if (!(resp_info
& SNS_LEN_VALID
) &&
3951 (scsi_status
== SAM_STAT_GOOD
) &&
3952 (scsi_bufflen(cmnd
) - scsi_get_resid(cmnd
)
3953 < cmnd
->underflow
)) {
3954 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
3955 "9027 FCP command x%x residual "
3956 "underrun converted to error "
3957 "Data: x%x x%x x%x\n",
3958 cmnd
->cmnd
[0], scsi_bufflen(cmnd
),
3959 scsi_get_resid(cmnd
), cmnd
->underflow
);
3960 host_status
= DID_ERROR
;
3962 } else if (resp_info
& RESID_OVER
) {
3963 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
3964 "9028 FCP command x%x residual overrun error. "
3965 "Data: x%x x%x\n", cmnd
->cmnd
[0],
3966 scsi_bufflen(cmnd
), scsi_get_resid(cmnd
));
3967 host_status
= DID_ERROR
;
3970 * Check SLI validation that all the transfer was actually done
3971 * (fcpi_parm should be zero). Apply check only to reads.
3973 } else if (fcpi_parm
) {
3974 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
| LOG_FCP_ERROR
,
3975 "9029 FCP %s Check Error Data: "
3976 "x%x x%x x%x x%x x%x\n",
3977 ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) ?
3979 fcpDl
, be32_to_cpu(fcprsp
->rspResId
),
3980 fcpi_parm
, cmnd
->cmnd
[0], scsi_status
);
3982 /* There is some issue with the LPe12000 that causes it
3983 * to miscalculate the fcpi_parm and falsely trip this
3984 * recovery logic. Detect this case and don't error when true.
3986 if (fcpi_parm
> fcpDl
)
3989 switch (scsi_status
) {
3991 case SAM_STAT_CHECK_CONDITION
:
3992 /* Fabric dropped a data frame. Fail any successful
3993 * command in which we detected dropped frames.
3994 * A status of good or some check conditions could
3995 * be considered a successful command.
3997 host_status
= DID_ERROR
;
4000 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
4004 cmnd
->result
= host_status
<< 16 | scsi_status
;
4005 lpfc_send_scsi_error_event(vport
->phba
, vport
, lpfc_cmd
, fcpi_parm
);
4009 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
4010 * @phba: The hba for which this call is being executed.
4011 * @pwqeIn: The command WQE for the scsi cmnd.
4012 * @pwqeOut: The response WQE for the scsi cmnd.
4014 * This routine assigns scsi command result by looking into response WQE
4015 * status field appropriately. This routine handles QUEUE FULL condition as
4016 * well by ramping down device queue depth.
4019 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pwqeIn
,
4020 struct lpfc_wcqe_complete
*wcqe
)
4022 struct lpfc_io_buf
*lpfc_cmd
=
4023 (struct lpfc_io_buf
*)pwqeIn
->context1
;
4024 struct lpfc_vport
*vport
= pwqeIn
->vport
;
4025 struct lpfc_rport_data
*rdata
;
4026 struct lpfc_nodelist
*ndlp
;
4027 struct scsi_cmnd
*cmd
;
4028 unsigned long flags
;
4029 struct lpfc_fast_path_event
*fast_path_evt
;
4030 struct Scsi_Host
*shost
;
4031 u32 logit
= LOG_FCP
;
4033 unsigned long iflags
= 0;
4035 /* Sanity check on return of outstanding command */
4037 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
4038 "9032 Null lpfc_cmd pointer. No "
4039 "release, skip completion\n");
4043 rdata
= lpfc_cmd
->rdata
;
4044 ndlp
= rdata
->pnode
;
4046 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
4047 /* TOREMOVE - currently this flag is checked during
4048 * the release of lpfc_iocbq. Remove once we move
4049 * to lpfc_wqe_job construct.
4051 * This needs to be done outside buf_lock
4053 spin_lock_irqsave(&phba
->hbalock
, iflags
);
4054 lpfc_cmd
->cur_iocbq
.iocb_flag
|= LPFC_EXCHANGE_BUSY
;
4055 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
4058 /* Guard against abort handler being called at same time */
4059 spin_lock(&lpfc_cmd
->buf_lock
);
4061 /* Sanity check on return of outstanding command */
4062 cmd
= lpfc_cmd
->pCmd
;
4063 if (!cmd
|| !phba
) {
4064 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
4065 "9042 I/O completion: Not an active IO\n");
4066 spin_unlock(&lpfc_cmd
->buf_lock
);
4067 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
4070 idx
= lpfc_cmd
->cur_iocbq
.hba_wqidx
;
4071 if (phba
->sli4_hba
.hdwq
)
4072 phba
->sli4_hba
.hdwq
[idx
].scsi_cstat
.io_cmpls
++;
4074 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4075 if (unlikely(phba
->hdwqstat_on
& LPFC_CHECK_SCSI_IO
))
4076 this_cpu_inc(phba
->sli4_hba
.c_stat
->cmpl_io
);
4078 shost
= cmd
->device
->host
;
4080 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
4081 lpfc_cmd
->status
= (status
& LPFC_IOCB_STATUS_MASK
);
4082 lpfc_cmd
->result
= (wcqe
->parameter
& IOERR_PARAM_MASK
);
4084 lpfc_cmd
->flags
&= ~LPFC_SBUF_XBUSY
;
4085 if (bf_get(lpfc_wcqe_c_xb
, wcqe
))
4086 lpfc_cmd
->flags
|= LPFC_SBUF_XBUSY
;
4088 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4089 if (lpfc_cmd
->prot_data_type
) {
4090 struct scsi_dif_tuple
*src
= NULL
;
4092 src
= (struct scsi_dif_tuple
*)lpfc_cmd
->prot_data_segment
;
4094 * Used to restore any changes to protection
4095 * data for error injection.
4097 switch (lpfc_cmd
->prot_data_type
) {
4098 case LPFC_INJERR_REFTAG
:
4100 lpfc_cmd
->prot_data
;
4102 case LPFC_INJERR_APPTAG
:
4104 (uint16_t)lpfc_cmd
->prot_data
;
4106 case LPFC_INJERR_GUARD
:
4108 (uint16_t)lpfc_cmd
->prot_data
;
4114 lpfc_cmd
->prot_data
= 0;
4115 lpfc_cmd
->prot_data_type
= 0;
4116 lpfc_cmd
->prot_data_segment
= NULL
;
4119 if (unlikely(lpfc_cmd
->status
)) {
4120 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
4121 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
4122 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
4123 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
4124 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
4125 if (lpfc_cmd
->status
== IOSTAT_FCP_RSP_ERROR
&&
4126 !lpfc_cmd
->fcp_rsp
->rspStatus3
&&
4127 (lpfc_cmd
->fcp_rsp
->rspStatus2
& RESID_UNDER
) &&
4128 !(vport
->cfg_log_verbose
& LOG_FCP_UNDER
))
4131 logit
= LOG_FCP
| LOG_FCP_UNDER
;
4132 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
4133 "9034 FCP cmd x%x failed <%d/%lld> "
4134 "status: x%x result: x%x "
4135 "sid: x%x did: x%x oxid: x%x "
4136 "Data: x%x x%x x%x\n",
4138 cmd
->device
? cmd
->device
->id
: 0xffff,
4139 cmd
->device
? cmd
->device
->lun
: 0xffff,
4140 lpfc_cmd
->status
, lpfc_cmd
->result
,
4142 (ndlp
) ? ndlp
->nlp_DID
: 0,
4143 lpfc_cmd
->cur_iocbq
.sli4_xritag
,
4144 wcqe
->parameter
, wcqe
->total_data_placed
,
4145 lpfc_cmd
->cur_iocbq
.iotag
);
4148 switch (lpfc_cmd
->status
) {
4149 case IOSTAT_SUCCESS
:
4150 cmd
->result
= DID_OK
<< 16;
4152 case IOSTAT_FCP_RSP_ERROR
:
4153 lpfc_handle_fcp_err(vport
, lpfc_cmd
,
4154 pwqeIn
->wqe
.fcp_iread
.total_xfer_len
-
4155 wcqe
->total_data_placed
);
4157 case IOSTAT_NPORT_BSY
:
4158 case IOSTAT_FABRIC_BSY
:
4159 cmd
->result
= DID_TRANSPORT_DISRUPTED
<< 16;
4160 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
4163 fast_path_evt
->un
.fabric_evt
.event_type
=
4164 FC_REG_FABRIC_EVENT
;
4165 fast_path_evt
->un
.fabric_evt
.subcategory
=
4166 (lpfc_cmd
->status
== IOSTAT_NPORT_BSY
) ?
4167 LPFC_EVENT_PORT_BUSY
: LPFC_EVENT_FABRIC_BUSY
;
4169 memcpy(&fast_path_evt
->un
.fabric_evt
.wwpn
,
4170 &ndlp
->nlp_portname
,
4171 sizeof(struct lpfc_name
));
4172 memcpy(&fast_path_evt
->un
.fabric_evt
.wwnn
,
4173 &ndlp
->nlp_nodename
,
4174 sizeof(struct lpfc_name
));
4176 fast_path_evt
->vport
= vport
;
4177 fast_path_evt
->work_evt
.evt
=
4178 LPFC_EVT_FASTPATH_MGMT_EVT
;
4179 spin_lock_irqsave(&phba
->hbalock
, flags
);
4180 list_add_tail(&fast_path_evt
->work_evt
.evt_listp
,
4182 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
4183 lpfc_worker_wake_up(phba
);
4184 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
4185 "9035 Fabric/Node busy FCP cmd x%x failed"
4187 "status: x%x result: x%x "
4188 "sid: x%x did: x%x oxid: x%x "
4189 "Data: x%x x%x x%x\n",
4191 cmd
->device
? cmd
->device
->id
: 0xffff,
4192 cmd
->device
? cmd
->device
->lun
: 0xffff,
4193 lpfc_cmd
->status
, lpfc_cmd
->result
,
4195 (ndlp
) ? ndlp
->nlp_DID
: 0,
4196 lpfc_cmd
->cur_iocbq
.sli4_xritag
,
4198 wcqe
->total_data_placed
,
4199 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
4201 case IOSTAT_REMOTE_STOP
:
4203 /* This I/O was aborted by the target, we don't
4204 * know the rxid and because we did not send the
4205 * ABTS we cannot generate and RRQ.
4207 lpfc_set_rrq_active(phba
, ndlp
,
4208 lpfc_cmd
->cur_iocbq
.sli4_lxritag
,
4212 case IOSTAT_LOCAL_REJECT
:
4213 if (lpfc_cmd
->result
& IOERR_DRVR_MASK
)
4214 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
4215 if (lpfc_cmd
->result
== IOERR_ELXSEC_KEY_UNWRAP_ERROR
||
4217 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR
||
4218 lpfc_cmd
->result
== IOERR_ELXSEC_CRYPTO_ERROR
||
4220 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR
) {
4221 cmd
->result
= DID_NO_CONNECT
<< 16;
4224 if (lpfc_cmd
->result
== IOERR_INVALID_RPI
||
4225 lpfc_cmd
->result
== IOERR_NO_RESOURCES
||
4226 lpfc_cmd
->result
== IOERR_ABORT_REQUESTED
||
4227 lpfc_cmd
->result
== IOERR_SLER_CMD_RCV_FAILURE
) {
4228 cmd
->result
= DID_REQUEUE
<< 16;
4231 if ((lpfc_cmd
->result
== IOERR_RX_DMA_FAILED
||
4232 lpfc_cmd
->result
== IOERR_TX_DMA_FAILED
) &&
4233 status
== CQE_STATUS_DI_ERROR
) {
4234 if (scsi_get_prot_op(cmd
) !=
4237 * This is a response for a BG enabled
4238 * cmd. Parse BG error
4240 lpfc_sli4_parse_bg_err(phba
, lpfc_cmd
,
4244 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_BG
,
4245 "9040 non-zero BGSTAT on unprotected cmd\n");
4247 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
4248 "9036 Local Reject FCP cmd x%x failed"
4250 "status: x%x result: x%x "
4251 "sid: x%x did: x%x oxid: x%x "
4252 "Data: x%x x%x x%x\n",
4254 cmd
->device
? cmd
->device
->id
: 0xffff,
4255 cmd
->device
? cmd
->device
->lun
: 0xffff,
4256 lpfc_cmd
->status
, lpfc_cmd
->result
,
4258 (ndlp
) ? ndlp
->nlp_DID
: 0,
4259 lpfc_cmd
->cur_iocbq
.sli4_xritag
,
4261 wcqe
->total_data_placed
,
4262 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
4265 if (lpfc_cmd
->status
>= IOSTAT_CNT
)
4266 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
4267 cmd
->result
= DID_ERROR
<< 16;
4268 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_IOERR
,
4269 "9037 FCP Completion Error: xri %x "
4270 "status x%x result x%x [x%x] "
4272 lpfc_cmd
->cur_iocbq
.sli4_xritag
,
4273 lpfc_cmd
->status
, lpfc_cmd
->result
,
4275 wcqe
->total_data_placed
);
4277 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
4278 u32
*lp
= (u32
*)cmd
->sense_buffer
;
4280 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
4281 "9039 Iodone <%d/%llu> cmd x%p, error "
4282 "x%x SNS x%x x%x Data: x%x x%x\n",
4283 cmd
->device
->id
, cmd
->device
->lun
, cmd
,
4284 cmd
->result
, *lp
, *(lp
+ 3), cmd
->retries
,
4285 scsi_get_resid(cmd
));
4288 lpfc_update_stats(vport
, lpfc_cmd
);
4290 if (vport
->cfg_max_scsicmpl_time
&&
4291 time_after(jiffies
, lpfc_cmd
->start_time
+
4292 msecs_to_jiffies(vport
->cfg_max_scsicmpl_time
))) {
4293 spin_lock_irqsave(shost
->host_lock
, flags
);
4295 if (ndlp
->cmd_qdepth
>
4296 atomic_read(&ndlp
->cmd_pending
) &&
4297 (atomic_read(&ndlp
->cmd_pending
) >
4298 LPFC_MIN_TGT_QDEPTH
) &&
4299 (cmd
->cmnd
[0] == READ_10
||
4300 cmd
->cmnd
[0] == WRITE_10
))
4302 atomic_read(&ndlp
->cmd_pending
);
4304 ndlp
->last_change_time
= jiffies
;
4306 spin_unlock_irqrestore(shost
->host_lock
, flags
);
4308 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
4310 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4311 if (lpfc_cmd
->ts_cmd_start
) {
4312 lpfc_cmd
->ts_isr_cmpl
= lpfc_cmd
->cur_iocbq
.isr_timestamp
;
4313 lpfc_cmd
->ts_data_io
= ktime_get_ns();
4314 phba
->ktime_last_cmd
= lpfc_cmd
->ts_data_io
;
4315 lpfc_io_ktime(phba
, lpfc_cmd
);
4318 lpfc_cmd
->pCmd
= NULL
;
4319 spin_unlock(&lpfc_cmd
->buf_lock
);
4321 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4322 cmd
->scsi_done(cmd
);
4325 * If there is an abort thread waiting for command completion
4326 * wake up the thread.
4328 spin_lock(&lpfc_cmd
->buf_lock
);
4329 lpfc_cmd
->cur_iocbq
.iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
4330 if (lpfc_cmd
->waitq
)
4331 wake_up(lpfc_cmd
->waitq
);
4332 spin_unlock(&lpfc_cmd
->buf_lock
);
4334 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
4338 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
4339 * @phba: The Hba for which this call is being executed.
4340 * @pIocbIn: The command IOCBQ for the scsi cmnd.
4341 * @pIocbOut: The response IOCBQ for the scsi cmnd.
4343 * This routine assigns scsi command result by looking into response IOCB
4344 * status field appropriately. This routine handles QUEUE FULL condition as
4345 * well by ramping down device queue depth.
4348 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
4349 struct lpfc_iocbq
*pIocbOut
)
4351 struct lpfc_io_buf
*lpfc_cmd
=
4352 (struct lpfc_io_buf
*) pIocbIn
->context1
;
4353 struct lpfc_vport
*vport
= pIocbIn
->vport
;
4354 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
4355 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
4356 struct scsi_cmnd
*cmd
;
4357 unsigned long flags
;
4358 struct lpfc_fast_path_event
*fast_path_evt
;
4359 struct Scsi_Host
*shost
;
4361 uint32_t logit
= LOG_FCP
;
4363 /* Guard against abort handler being called at same time */
4364 spin_lock(&lpfc_cmd
->buf_lock
);
4366 /* Sanity check on return of outstanding command */
4367 cmd
= lpfc_cmd
->pCmd
;
4368 if (!cmd
|| !phba
) {
4369 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
4370 "2621 IO completion: Not an active IO\n");
4371 spin_unlock(&lpfc_cmd
->buf_lock
);
4375 idx
= lpfc_cmd
->cur_iocbq
.hba_wqidx
;
4376 if (phba
->sli4_hba
.hdwq
)
4377 phba
->sli4_hba
.hdwq
[idx
].scsi_cstat
.io_cmpls
++;
4379 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4380 if (unlikely(phba
->hdwqstat_on
& LPFC_CHECK_SCSI_IO
))
4381 this_cpu_inc(phba
->sli4_hba
.c_stat
->cmpl_io
);
4383 shost
= cmd
->device
->host
;
4385 lpfc_cmd
->result
= (pIocbOut
->iocb
.un
.ulpWord
[4] & IOERR_PARAM_MASK
);
4386 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
4387 /* pick up SLI4 exhange busy status from HBA */
4388 if (pIocbOut
->iocb_flag
& LPFC_EXCHANGE_BUSY
)
4389 lpfc_cmd
->flags
|= LPFC_SBUF_XBUSY
;
4391 lpfc_cmd
->flags
&= ~LPFC_SBUF_XBUSY
;
4393 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4394 if (lpfc_cmd
->prot_data_type
) {
4395 struct scsi_dif_tuple
*src
= NULL
;
4397 src
= (struct scsi_dif_tuple
*)lpfc_cmd
->prot_data_segment
;
4399 * Used to restore any changes to protection
4400 * data for error injection.
4402 switch (lpfc_cmd
->prot_data_type
) {
4403 case LPFC_INJERR_REFTAG
:
4405 lpfc_cmd
->prot_data
;
4407 case LPFC_INJERR_APPTAG
:
4409 (uint16_t)lpfc_cmd
->prot_data
;
4411 case LPFC_INJERR_GUARD
:
4413 (uint16_t)lpfc_cmd
->prot_data
;
4419 lpfc_cmd
->prot_data
= 0;
4420 lpfc_cmd
->prot_data_type
= 0;
4421 lpfc_cmd
->prot_data_segment
= NULL
;
4425 if (unlikely(lpfc_cmd
->status
)) {
4426 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
4427 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
4428 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
4429 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
4430 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
4431 if (lpfc_cmd
->status
== IOSTAT_FCP_RSP_ERROR
&&
4432 !lpfc_cmd
->fcp_rsp
->rspStatus3
&&
4433 (lpfc_cmd
->fcp_rsp
->rspStatus2
& RESID_UNDER
) &&
4434 !(vport
->cfg_log_verbose
& LOG_FCP_UNDER
))
4437 logit
= LOG_FCP
| LOG_FCP_UNDER
;
4438 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
4439 "9030 FCP cmd x%x failed <%d/%lld> "
4440 "status: x%x result: x%x "
4441 "sid: x%x did: x%x oxid: x%x "
4444 cmd
->device
? cmd
->device
->id
: 0xffff,
4445 cmd
->device
? cmd
->device
->lun
: 0xffff,
4446 lpfc_cmd
->status
, lpfc_cmd
->result
,
4448 (pnode
) ? pnode
->nlp_DID
: 0,
4449 phba
->sli_rev
== LPFC_SLI_REV4
?
4450 lpfc_cmd
->cur_iocbq
.sli4_xritag
: 0xffff,
4451 pIocbOut
->iocb
.ulpContext
,
4452 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
4454 switch (lpfc_cmd
->status
) {
4455 case IOSTAT_FCP_RSP_ERROR
:
4456 /* Call FCP RSP handler to determine result */
4457 lpfc_handle_fcp_err(vport
, lpfc_cmd
,
4458 pIocbOut
->iocb
.un
.fcpi
.fcpi_parm
);
4460 case IOSTAT_NPORT_BSY
:
4461 case IOSTAT_FABRIC_BSY
:
4462 cmd
->result
= DID_TRANSPORT_DISRUPTED
<< 16;
4463 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
4466 fast_path_evt
->un
.fabric_evt
.event_type
=
4467 FC_REG_FABRIC_EVENT
;
4468 fast_path_evt
->un
.fabric_evt
.subcategory
=
4469 (lpfc_cmd
->status
== IOSTAT_NPORT_BSY
) ?
4470 LPFC_EVENT_PORT_BUSY
: LPFC_EVENT_FABRIC_BUSY
;
4472 memcpy(&fast_path_evt
->un
.fabric_evt
.wwpn
,
4473 &pnode
->nlp_portname
,
4474 sizeof(struct lpfc_name
));
4475 memcpy(&fast_path_evt
->un
.fabric_evt
.wwnn
,
4476 &pnode
->nlp_nodename
,
4477 sizeof(struct lpfc_name
));
4479 fast_path_evt
->vport
= vport
;
4480 fast_path_evt
->work_evt
.evt
=
4481 LPFC_EVT_FASTPATH_MGMT_EVT
;
4482 spin_lock_irqsave(&phba
->hbalock
, flags
);
4483 list_add_tail(&fast_path_evt
->work_evt
.evt_listp
,
4485 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
4486 lpfc_worker_wake_up(phba
);
4488 case IOSTAT_LOCAL_REJECT
:
4489 case IOSTAT_REMOTE_STOP
:
4490 if (lpfc_cmd
->result
== IOERR_ELXSEC_KEY_UNWRAP_ERROR
||
4492 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR
||
4493 lpfc_cmd
->result
== IOERR_ELXSEC_CRYPTO_ERROR
||
4495 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR
) {
4496 cmd
->result
= DID_NO_CONNECT
<< 16;
4499 if (lpfc_cmd
->result
== IOERR_INVALID_RPI
||
4500 lpfc_cmd
->result
== IOERR_NO_RESOURCES
||
4501 lpfc_cmd
->result
== IOERR_ABORT_REQUESTED
||
4502 lpfc_cmd
->result
== IOERR_SLER_CMD_RCV_FAILURE
) {
4503 cmd
->result
= DID_REQUEUE
<< 16;
4506 if ((lpfc_cmd
->result
== IOERR_RX_DMA_FAILED
||
4507 lpfc_cmd
->result
== IOERR_TX_DMA_FAILED
) &&
4508 pIocbOut
->iocb
.unsli3
.sli3_bg
.bgstat
) {
4509 if (scsi_get_prot_op(cmd
) != SCSI_PROT_NORMAL
) {
4511 * This is a response for a BG enabled
4512 * cmd. Parse BG error
4514 lpfc_parse_bg_err(phba
, lpfc_cmd
,
4518 lpfc_printf_vlog(vport
, KERN_WARNING
,
4520 "9031 non-zero BGSTAT "
4521 "on unprotected cmd\n");
4524 if ((lpfc_cmd
->status
== IOSTAT_REMOTE_STOP
)
4525 && (phba
->sli_rev
== LPFC_SLI_REV4
)
4527 /* This IO was aborted by the target, we don't
4528 * know the rxid and because we did not send the
4529 * ABTS we cannot generate and RRQ.
4531 lpfc_set_rrq_active(phba
, pnode
,
4532 lpfc_cmd
->cur_iocbq
.sli4_lxritag
,
4537 cmd
->result
= DID_ERROR
<< 16;
4541 if (!pnode
|| (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
4542 cmd
->result
= DID_TRANSPORT_DISRUPTED
<< 16 |
4545 cmd
->result
= DID_OK
<< 16;
4547 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
4548 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
4550 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
4551 "0710 Iodone <%d/%llu> cmd x%px, error "
4552 "x%x SNS x%x x%x Data: x%x x%x\n",
4553 cmd
->device
->id
, cmd
->device
->lun
, cmd
,
4554 cmd
->result
, *lp
, *(lp
+ 3), cmd
->retries
,
4555 scsi_get_resid(cmd
));
4558 lpfc_update_stats(vport
, lpfc_cmd
);
4559 if (vport
->cfg_max_scsicmpl_time
&&
4560 time_after(jiffies
, lpfc_cmd
->start_time
+
4561 msecs_to_jiffies(vport
->cfg_max_scsicmpl_time
))) {
4562 spin_lock_irqsave(shost
->host_lock
, flags
);
4564 if (pnode
->cmd_qdepth
>
4565 atomic_read(&pnode
->cmd_pending
) &&
4566 (atomic_read(&pnode
->cmd_pending
) >
4567 LPFC_MIN_TGT_QDEPTH
) &&
4568 ((cmd
->cmnd
[0] == READ_10
) ||
4569 (cmd
->cmnd
[0] == WRITE_10
)))
4571 atomic_read(&pnode
->cmd_pending
);
4573 pnode
->last_change_time
= jiffies
;
4575 spin_unlock_irqrestore(shost
->host_lock
, flags
);
4577 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
4579 lpfc_cmd
->pCmd
= NULL
;
4580 spin_unlock(&lpfc_cmd
->buf_lock
);
4582 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4583 if (lpfc_cmd
->ts_cmd_start
) {
4584 lpfc_cmd
->ts_isr_cmpl
= pIocbIn
->isr_timestamp
;
4585 lpfc_cmd
->ts_data_io
= ktime_get_ns();
4586 phba
->ktime_last_cmd
= lpfc_cmd
->ts_data_io
;
4587 lpfc_io_ktime(phba
, lpfc_cmd
);
4590 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4591 cmd
->scsi_done(cmd
);
4594 * If there is an abort thread waiting for command completion
4595 * wake up the thread.
4597 spin_lock(&lpfc_cmd
->buf_lock
);
4598 lpfc_cmd
->cur_iocbq
.iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
4599 if (lpfc_cmd
->waitq
)
4600 wake_up(lpfc_cmd
->waitq
);
4601 spin_unlock(&lpfc_cmd
->buf_lock
);
4603 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
4607 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
4608 * @phba: Pointer to vport object for which I/O is executed
4609 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4610 * @tmo: timeout value for the IO
4612 * Based on the data-direction of the command, initialize IOCB
4613 * in the I/O buffer. Fill in the IOCB fields which are independent
4614 * of the scsi buffer
4616 * RETURNS 0 - SUCCESS,
4618 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport
*vport
,
4619 struct lpfc_io_buf
*lpfc_cmd
,
4622 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
4623 struct lpfc_iocbq
*piocbq
= &lpfc_cmd
->cur_iocbq
;
4624 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
4625 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
4626 struct lpfc_nodelist
*pnode
= lpfc_cmd
->ndlp
;
4627 int datadir
= scsi_cmnd
->sc_data_direction
;
4630 piocbq
->iocb
.un
.fcpi
.fcpi_XRdy
= 0;
4633 * There are three possibilities here - use scatter-gather segment, use
4634 * the single mapping, or neither. Start the lpfc command prep by
4635 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4638 if (scsi_sg_count(scsi_cmnd
)) {
4639 if (datadir
== DMA_TO_DEVICE
) {
4640 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
4641 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
4642 if (vport
->cfg_first_burst_size
&&
4643 (pnode
->nlp_flag
& NLP_FIRSTBURST
)) {
4646 fcpdl
= scsi_bufflen(scsi_cmnd
);
4647 xrdy_len
= min(fcpdl
,
4648 vport
->cfg_first_burst_size
);
4649 piocbq
->iocb
.un
.fcpi
.fcpi_XRdy
= xrdy_len
;
4651 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
4653 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
4654 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
4655 fcp_cmnd
->fcpCntl3
= READ_DATA
;
4658 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
4659 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
4660 iocb_cmd
->ulpPU
= 0;
4661 fcp_cmnd
->fcpCntl3
= 0;
4665 * Finish initializing those IOCB fields that are independent
4666 * of the scsi_cmnd request_buffer
4668 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
4669 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
4670 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
4672 piocbq
->iocb
.ulpFCP2Rcvy
= 0;
4674 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
4675 piocbq
->context1
= lpfc_cmd
;
4676 if (!piocbq
->iocb_cmpl
)
4677 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
4678 piocbq
->iocb
.ulpTimeout
= tmo
;
4679 piocbq
->vport
= vport
;
4684 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
4685 * @phba: Pointer to vport object for which I/O is executed
4686 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4687 * @tmo: timeout value for the IO
4689 * Based on the data-direction of the command copy WQE template
4690 * to I/O buffer WQE. Fill in the WQE fields which are independent
4691 * of the scsi buffer
4693 * RETURNS 0 - SUCCESS,
4695 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport
*vport
,
4696 struct lpfc_io_buf
*lpfc_cmd
,
4699 struct lpfc_hba
*phba
= vport
->phba
;
4700 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
4701 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
4702 struct lpfc_sli4_hdw_queue
*hdwq
= NULL
;
4703 struct lpfc_iocbq
*pwqeq
= &lpfc_cmd
->cur_iocbq
;
4704 struct lpfc_nodelist
*pnode
= lpfc_cmd
->ndlp
;
4705 union lpfc_wqe128
*wqe
= &pwqeq
->wqe
;
4706 u16 idx
= lpfc_cmd
->hdwq_no
;
4707 int datadir
= scsi_cmnd
->sc_data_direction
;
4709 hdwq
= &phba
->sli4_hba
.hdwq
[idx
];
4711 /* Initialize 64 bytes only */
4712 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
4715 * There are three possibilities here - use scatter-gather segment, use
4716 * the single mapping, or neither.
4718 if (scsi_sg_count(scsi_cmnd
)) {
4719 if (datadir
== DMA_TO_DEVICE
) {
4720 /* From the iwrite template, initialize words 7 - 11 */
4721 memcpy(&wqe
->words
[7],
4722 &lpfc_iwrite_cmd_template
.words
[7],
4723 sizeof(uint32_t) * 5);
4725 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
4727 hdwq
->scsi_cstat
.output_requests
++;
4729 /* From the iread template, initialize words 7 - 11 */
4730 memcpy(&wqe
->words
[7],
4731 &lpfc_iread_cmd_template
.words
[7],
4732 sizeof(uint32_t) * 5);
4735 bf_set(wqe_tmo
, &wqe
->fcp_iread
.wqe_com
, tmo
);
4737 fcp_cmnd
->fcpCntl3
= READ_DATA
;
4739 hdwq
->scsi_cstat
.input_requests
++;
4742 /* From the icmnd template, initialize words 4 - 11 */
4743 memcpy(&wqe
->words
[4], &lpfc_icmnd_cmd_template
.words
[4],
4744 sizeof(uint32_t) * 8);
4747 bf_set(wqe_tmo
, &wqe
->fcp_icmd
.wqe_com
, tmo
);
4749 fcp_cmnd
->fcpCntl3
= 0;
4751 hdwq
->scsi_cstat
.control_requests
++;
4755 * Finish initializing those WQE fields that are independent
4756 * of the request_buffer
4760 bf_set(payload_offset_len
, &wqe
->fcp_icmd
,
4761 sizeof(struct fcp_cmnd
) + sizeof(struct fcp_rsp
));
4764 bf_set(wqe_ctxt_tag
, &wqe
->generic
.wqe_com
,
4765 phba
->sli4_hba
.rpi_ids
[pnode
->nlp_rpi
]);
4766 bf_set(wqe_xri_tag
, &wqe
->generic
.wqe_com
, pwqeq
->sli4_xritag
);
4769 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
4770 bf_set(wqe_erp
, &wqe
->generic
.wqe_com
, 1);
4772 bf_set(wqe_class
, &wqe
->generic
.wqe_com
,
4773 (pnode
->nlp_fcp_info
& 0x0f));
4776 wqe
->generic
.wqe_com
.abort_tag
= pwqeq
->iotag
;
4779 bf_set(wqe_reqtag
, &wqe
->generic
.wqe_com
, pwqeq
->iotag
);
4781 pwqeq
->vport
= vport
;
4782 pwqeq
->vport
= vport
;
4783 pwqeq
->context1
= lpfc_cmd
;
4784 pwqeq
->hba_wqidx
= lpfc_cmd
->hdwq_no
;
4785 pwqeq
->wqe_cmpl
= lpfc_fcp_io_cmd_wqe_cmpl
;
4791 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4792 * @vport: The virtual port for which this call is being executed.
4793 * @lpfc_cmd: The scsi command which needs to send.
4794 * @pnode: Pointer to lpfc_nodelist.
4796 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4797 * to transfer for device with SLI3 interface spec.
4800 lpfc_scsi_prep_cmnd(struct lpfc_vport
*vport
, struct lpfc_io_buf
*lpfc_cmd
,
4801 struct lpfc_nodelist
*pnode
)
4803 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
4804 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
4810 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
4811 /* clear task management bits */
4812 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
4814 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
4815 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
4817 ptr
= &fcp_cmnd
->fcpCdb
[0];
4818 memcpy(ptr
, scsi_cmnd
->cmnd
, scsi_cmnd
->cmd_len
);
4819 if (scsi_cmnd
->cmd_len
< LPFC_FCP_CDB_LEN
) {
4820 ptr
+= scsi_cmnd
->cmd_len
;
4821 memset(ptr
, 0, (LPFC_FCP_CDB_LEN
- scsi_cmnd
->cmd_len
));
4824 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
4826 lpfc_scsi_prep_cmnd_buf(vport
, lpfc_cmd
, lpfc_cmd
->timeout
);
4832 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4833 * @vport: The virtual port for which this call is being executed.
4834 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4835 * @lun: Logical unit number.
4836 * @task_mgmt_cmd: SCSI task management command.
4838 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4839 * for device with SLI-3 interface spec.
4846 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport
*vport
,
4847 struct lpfc_io_buf
*lpfc_cmd
,
4849 uint8_t task_mgmt_cmd
)
4851 struct lpfc_iocbq
*piocbq
;
4853 struct fcp_cmnd
*fcp_cmnd
;
4854 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
4855 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
4857 if (!ndlp
|| ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)
4860 piocbq
= &(lpfc_cmd
->cur_iocbq
);
4861 piocbq
->vport
= vport
;
4863 piocb
= &piocbq
->iocb
;
4865 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
4866 /* Clear out any old data in the FCP command area */
4867 memset(fcp_cmnd
, 0, sizeof(struct fcp_cmnd
));
4868 int_to_scsilun(lun
, &fcp_cmnd
->fcp_lun
);
4869 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
4870 if (vport
->phba
->sli_rev
== 3 &&
4871 !(vport
->phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
))
4872 lpfc_fcpcmd_to_iocb(piocb
->unsli3
.fcp_ext
.icd
, fcp_cmnd
);
4873 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
4874 piocb
->ulpContext
= ndlp
->nlp_rpi
;
4875 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4877 vport
->phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
4879 piocb
->ulpFCP2Rcvy
= (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) ? 1 : 0;
4880 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
4882 piocb
->un
.fcpi
.fcpi_parm
= 0;
4884 /* ulpTimeout is only one byte */
4885 if (lpfc_cmd
->timeout
> 0xff) {
4887 * Do not timeout the command at the firmware level.
4888 * The driver will provide the timeout mechanism.
4890 piocb
->ulpTimeout
= 0;
4892 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
4894 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
4895 lpfc_sli4_set_rsp_sgl_last(vport
->phba
, lpfc_cmd
);
4901 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4902 * @phba: The hba struct for which this call is being executed.
4903 * @dev_grp: The HBA PCI-Device group number.
4905 * This routine sets up the SCSI interface API function jump table in @phba
4907 * Returns: 0 - success, -ENODEV - failure.
4910 lpfc_scsi_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
4913 phba
->lpfc_scsi_unprep_dma_buf
= lpfc_scsi_unprep_dma_buf
;
4916 case LPFC_PCI_DEV_LP
:
4917 phba
->lpfc_scsi_prep_dma_buf
= lpfc_scsi_prep_dma_buf_s3
;
4918 phba
->lpfc_bg_scsi_prep_dma_buf
= lpfc_bg_scsi_prep_dma_buf_s3
;
4919 phba
->lpfc_release_scsi_buf
= lpfc_release_scsi_buf_s3
;
4920 phba
->lpfc_get_scsi_buf
= lpfc_get_scsi_buf_s3
;
4921 phba
->lpfc_scsi_prep_cmnd_buf
= lpfc_scsi_prep_cmnd_buf_s3
;
4923 case LPFC_PCI_DEV_OC
:
4924 phba
->lpfc_scsi_prep_dma_buf
= lpfc_scsi_prep_dma_buf_s4
;
4925 phba
->lpfc_bg_scsi_prep_dma_buf
= lpfc_bg_scsi_prep_dma_buf_s4
;
4926 phba
->lpfc_release_scsi_buf
= lpfc_release_scsi_buf_s4
;
4927 phba
->lpfc_get_scsi_buf
= lpfc_get_scsi_buf_s4
;
4928 phba
->lpfc_scsi_prep_cmnd_buf
= lpfc_scsi_prep_cmnd_buf_s4
;
4931 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4932 "1418 Invalid HBA PCI-device group: 0x%x\n",
4936 phba
->lpfc_rampdown_queue_depth
= lpfc_rampdown_queue_depth
;
4937 phba
->lpfc_scsi_cmd_iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
4942 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4943 * @phba: The Hba for which this call is being executed.
4944 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4945 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4947 * This routine is IOCB completion routine for device reset and target reset
4948 * routine. This routine release scsi buffer associated with lpfc_cmd.
4951 lpfc_tskmgmt_def_cmpl(struct lpfc_hba
*phba
,
4952 struct lpfc_iocbq
*cmdiocbq
,
4953 struct lpfc_iocbq
*rspiocbq
)
4955 struct lpfc_io_buf
*lpfc_cmd
=
4956 (struct lpfc_io_buf
*) cmdiocbq
->context1
;
4958 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
4963 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
4964 * if issuing a pci_bus_reset is possibly unsafe
4965 * @phba: lpfc_hba pointer.
4968 * Walks the bus_list to ensure only PCI devices with Emulex
4969 * vendor id, device ids that support hot reset, and only one occurrence
4973 * -EBADSLT, detected invalid device
4977 lpfc_check_pci_resettable(struct lpfc_hba
*phba
)
4979 const struct pci_dev
*pdev
= phba
->pcidev
;
4980 struct pci_dev
*ptr
= NULL
;
4983 /* Walk the list of devices on the pci_dev's bus */
4984 list_for_each_entry(ptr
, &pdev
->bus
->devices
, bus_list
) {
4985 /* Check for Emulex Vendor ID */
4986 if (ptr
->vendor
!= PCI_VENDOR_ID_EMULEX
) {
4987 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4988 "8346 Non-Emulex vendor found: "
4989 "0x%04x\n", ptr
->vendor
);
4993 /* Check for valid Emulex Device ID */
4994 switch (ptr
->device
) {
4995 case PCI_DEVICE_ID_LANCER_FC
:
4996 case PCI_DEVICE_ID_LANCER_G6_FC
:
4997 case PCI_DEVICE_ID_LANCER_G7_FC
:
5000 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5001 "8347 Invalid device found: "
5002 "0x%04x\n", ptr
->device
);
5006 /* Check for only one function 0 ID to ensure only one HBA on
5009 if (ptr
->devfn
== 0) {
5010 if (++counter
> 1) {
5011 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5012 "8348 More than one device on "
5013 "secondary bus found\n");
5023 * lpfc_info - Info entry point of scsi_host_template data structure
5024 * @host: The scsi host for which this call is being executed.
5026 * This routine provides module information about hba.
5029 * Pointer to char - Success.
5032 lpfc_info(struct Scsi_Host
*host
)
5034 struct lpfc_vport
*vport
= (struct lpfc_vport
*) host
->hostdata
;
5035 struct lpfc_hba
*phba
= vport
->phba
;
5037 static char lpfcinfobuf
[384];
5038 char tmp
[384] = {0};
5040 memset(lpfcinfobuf
, 0, sizeof(lpfcinfobuf
));
5041 if (phba
&& phba
->pcidev
){
5042 /* Model Description */
5043 scnprintf(tmp
, sizeof(tmp
), phba
->ModelDesc
);
5044 if (strlcat(lpfcinfobuf
, tmp
, sizeof(lpfcinfobuf
)) >=
5045 sizeof(lpfcinfobuf
))
5049 scnprintf(tmp
, sizeof(tmp
),
5050 " on PCI bus %02x device %02x irq %d",
5051 phba
->pcidev
->bus
->number
, phba
->pcidev
->devfn
,
5053 if (strlcat(lpfcinfobuf
, tmp
, sizeof(lpfcinfobuf
)) >=
5054 sizeof(lpfcinfobuf
))
5058 if (phba
->Port
[0]) {
5059 scnprintf(tmp
, sizeof(tmp
), " port %s", phba
->Port
);
5060 if (strlcat(lpfcinfobuf
, tmp
, sizeof(lpfcinfobuf
)) >=
5061 sizeof(lpfcinfobuf
))
5066 link_speed
= lpfc_sli_port_speed_get(phba
);
5067 if (link_speed
!= 0) {
5068 scnprintf(tmp
, sizeof(tmp
),
5069 " Logical Link Speed: %d Mbps", link_speed
);
5070 if (strlcat(lpfcinfobuf
, tmp
, sizeof(lpfcinfobuf
)) >=
5071 sizeof(lpfcinfobuf
))
5075 /* PCI resettable */
5076 if (!lpfc_check_pci_resettable(phba
)) {
5077 scnprintf(tmp
, sizeof(tmp
), " PCI resettable");
5078 strlcat(lpfcinfobuf
, tmp
, sizeof(lpfcinfobuf
));
5087 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
5088 * @phba: The Hba for which this call is being executed.
5090 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
5091 * The default value of cfg_poll_tmo is 10 milliseconds.
5093 static __inline__
void lpfc_poll_rearm_timer(struct lpfc_hba
* phba
)
5095 unsigned long poll_tmo_expires
=
5096 (jiffies
+ msecs_to_jiffies(phba
->cfg_poll_tmo
));
5098 if (!list_empty(&phba
->sli
.sli3_ring
[LPFC_FCP_RING
].txcmplq
))
5099 mod_timer(&phba
->fcp_poll_timer
,
5104 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
5105 * @phba: The Hba for which this call is being executed.
5107 * This routine starts the fcp_poll_timer of @phba.
5109 void lpfc_poll_start_timer(struct lpfc_hba
* phba
)
5111 lpfc_poll_rearm_timer(phba
);
5115 * lpfc_poll_timeout - Restart polling timer
5116 * @t: Timer construct where lpfc_hba data structure pointer is obtained.
5118 * This routine restarts fcp_poll timer, when FCP ring polling is enable
5119 * and FCP Ring interrupt is disable.
5121 void lpfc_poll_timeout(struct timer_list
*t
)
5123 struct lpfc_hba
*phba
= from_timer(phba
, t
, fcp_poll_timer
);
5125 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
5126 lpfc_sli_handle_fast_ring_event(phba
,
5127 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
5129 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
5130 lpfc_poll_rearm_timer(phba
);
5135 * lpfc_queuecommand - scsi_host_template queuecommand entry point
5136 * @shost: kernel scsi host pointer.
5137 * @cmnd: Pointer to scsi_cmnd data structure.
5139 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
5140 * This routine prepares an IOCB from scsi command and provides to firmware.
5141 * The @done callback is invoked after driver finished processing the command.
5145 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
5148 lpfc_queuecommand(struct Scsi_Host
*shost
, struct scsi_cmnd
*cmnd
)
5150 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
5151 struct lpfc_hba
*phba
= vport
->phba
;
5152 struct lpfc_rport_data
*rdata
;
5153 struct lpfc_nodelist
*ndlp
;
5154 struct lpfc_io_buf
*lpfc_cmd
;
5155 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
5157 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5158 uint64_t start
= 0L;
5161 start
= ktime_get_ns();
5164 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
5166 /* sanity check on references */
5167 if (unlikely(!rdata
) || unlikely(!rport
))
5168 goto out_fail_command
;
5170 err
= fc_remote_port_chkready(rport
);
5173 goto out_fail_command
;
5175 ndlp
= rdata
->pnode
;
5177 if ((scsi_get_prot_op(cmnd
) != SCSI_PROT_NORMAL
) &&
5178 (!(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
))) {
5180 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5181 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5182 " op:%02x str=%s without registering for"
5183 " BlockGuard - Rejecting command\n",
5184 cmnd
->cmnd
[0], scsi_get_prot_op(cmnd
),
5185 dif_op_str
[scsi_get_prot_op(cmnd
)]);
5186 goto out_fail_command
;
5190 * Catch race where our node has transitioned, but the
5191 * transport is still transitioning.
5195 if (lpfc_ndlp_check_qdepth(phba
, ndlp
)) {
5196 if (atomic_read(&ndlp
->cmd_pending
) >= ndlp
->cmd_qdepth
) {
5197 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP_ERROR
,
5198 "3377 Target Queue Full, scsi Id:%d "
5199 "Qdepth:%d Pending command:%d"
5200 " WWNN:%02x:%02x:%02x:%02x:"
5201 "%02x:%02x:%02x:%02x, "
5202 " WWPN:%02x:%02x:%02x:%02x:"
5203 "%02x:%02x:%02x:%02x",
5204 ndlp
->nlp_sid
, ndlp
->cmd_qdepth
,
5205 atomic_read(&ndlp
->cmd_pending
),
5206 ndlp
->nlp_nodename
.u
.wwn
[0],
5207 ndlp
->nlp_nodename
.u
.wwn
[1],
5208 ndlp
->nlp_nodename
.u
.wwn
[2],
5209 ndlp
->nlp_nodename
.u
.wwn
[3],
5210 ndlp
->nlp_nodename
.u
.wwn
[4],
5211 ndlp
->nlp_nodename
.u
.wwn
[5],
5212 ndlp
->nlp_nodename
.u
.wwn
[6],
5213 ndlp
->nlp_nodename
.u
.wwn
[7],
5214 ndlp
->nlp_portname
.u
.wwn
[0],
5215 ndlp
->nlp_portname
.u
.wwn
[1],
5216 ndlp
->nlp_portname
.u
.wwn
[2],
5217 ndlp
->nlp_portname
.u
.wwn
[3],
5218 ndlp
->nlp_portname
.u
.wwn
[4],
5219 ndlp
->nlp_portname
.u
.wwn
[5],
5220 ndlp
->nlp_portname
.u
.wwn
[6],
5221 ndlp
->nlp_portname
.u
.wwn
[7]);
5226 lpfc_cmd
= lpfc_get_scsi_buf(phba
, ndlp
, cmnd
);
5227 if (lpfc_cmd
== NULL
) {
5228 lpfc_rampdown_queue_depth(phba
);
5230 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP_ERROR
,
5231 "0707 driver's buffer pool is empty, "
5237 * Store the midlayer's command structure for the completion phase
5238 * and complete the command initialization.
5240 lpfc_cmd
->pCmd
= cmnd
;
5241 lpfc_cmd
->rdata
= rdata
;
5242 lpfc_cmd
->ndlp
= ndlp
;
5243 lpfc_cmd
->cur_iocbq
.iocb_cmpl
= NULL
;
5244 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
5246 err
= lpfc_scsi_prep_cmnd(vport
, lpfc_cmd
, ndlp
);
5248 goto out_host_busy_release_buf
;
5250 if (scsi_get_prot_op(cmnd
) != SCSI_PROT_NORMAL
) {
5251 if (vport
->phba
->cfg_enable_bg
) {
5252 lpfc_printf_vlog(vport
,
5253 KERN_INFO
, LOG_SCSI_CMD
,
5254 "9033 BLKGRD: rcvd %s cmd:x%x "
5255 "sector x%llx cnt %u pt %x\n",
5256 dif_op_str
[scsi_get_prot_op(cmnd
)],
5258 (unsigned long long)scsi_get_lba(cmnd
),
5259 blk_rq_sectors(cmnd
->request
),
5260 (cmnd
->cmnd
[1]>>5));
5262 err
= lpfc_bg_scsi_prep_dma_buf(phba
, lpfc_cmd
);
5264 if (vport
->phba
->cfg_enable_bg
) {
5265 lpfc_printf_vlog(vport
,
5266 KERN_INFO
, LOG_SCSI_CMD
,
5267 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
5268 "x%x sector x%llx cnt %u pt %x\n",
5270 (unsigned long long)scsi_get_lba(cmnd
),
5271 blk_rq_sectors(cmnd
->request
),
5272 (cmnd
->cmnd
[1]>>5));
5274 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
5277 if (unlikely(err
)) {
5279 cmnd
->result
= DID_ERROR
<< 16;
5280 goto out_fail_command_release_buf
;
5282 goto out_host_busy_free_buf
;
5286 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5287 if (unlikely(phba
->hdwqstat_on
& LPFC_CHECK_SCSI_IO
))
5288 this_cpu_inc(phba
->sli4_hba
.c_stat
->xmt_io
);
5290 /* Issue I/O to adapter */
5291 err
= lpfc_sli_issue_fcp_io(phba
, LPFC_FCP_RING
,
5292 &lpfc_cmd
->cur_iocbq
,
5294 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5296 lpfc_cmd
->ts_cmd_start
= start
;
5297 lpfc_cmd
->ts_last_cmd
= phba
->ktime_last_cmd
;
5298 lpfc_cmd
->ts_cmd_wqput
= ktime_get_ns();
5300 lpfc_cmd
->ts_cmd_start
= 0;
5304 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
5305 "3376 FCP could not issue IOCB err %x "
5306 "FCP cmd x%x <%d/%llu> "
5307 "sid: x%x did: x%x oxid: x%x "
5308 "Data: x%x x%x x%x x%x\n",
5310 cmnd
->device
? cmnd
->device
->id
: 0xffff,
5311 cmnd
->device
? cmnd
->device
->lun
: (u64
)-1,
5312 vport
->fc_myDID
, ndlp
->nlp_DID
,
5313 phba
->sli_rev
== LPFC_SLI_REV4
?
5314 lpfc_cmd
->cur_iocbq
.sli4_xritag
: 0xffff,
5315 phba
->sli_rev
== LPFC_SLI_REV4
?
5316 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
] :
5317 lpfc_cmd
->cur_iocbq
.iocb
.ulpContext
,
5318 lpfc_cmd
->cur_iocbq
.iotag
,
5319 phba
->sli_rev
== LPFC_SLI_REV4
?
5321 &lpfc_cmd
->cur_iocbq
.wqe
.generic
.wqe_com
) :
5322 lpfc_cmd
->cur_iocbq
.iocb
.ulpTimeout
,
5324 (cmnd
->request
->timeout
/ 1000));
5326 goto out_host_busy_free_buf
;
5329 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
5330 lpfc_sli_handle_fast_ring_event(phba
,
5331 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
5333 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
5334 lpfc_poll_rearm_timer(phba
);
5337 if (phba
->cfg_xri_rebalancing
)
5338 lpfc_keep_pvt_pool_above_lowwm(phba
, lpfc_cmd
->hdwq_no
);
5342 out_host_busy_free_buf
:
5343 idx
= lpfc_cmd
->hdwq_no
;
5344 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
5345 if (phba
->sli4_hba
.hdwq
) {
5346 switch (lpfc_cmd
->fcp_cmnd
->fcpCntl3
) {
5348 phba
->sli4_hba
.hdwq
[idx
].scsi_cstat
.output_requests
--;
5351 phba
->sli4_hba
.hdwq
[idx
].scsi_cstat
.input_requests
--;
5354 phba
->sli4_hba
.hdwq
[idx
].scsi_cstat
.control_requests
--;
5357 out_host_busy_release_buf
:
5358 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
5360 return SCSI_MLQUEUE_HOST_BUSY
;
5363 return SCSI_MLQUEUE_TARGET_BUSY
;
5365 out_fail_command_release_buf
:
5366 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
5369 cmnd
->scsi_done(cmnd
);
5375 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
5376 * @cmnd: Pointer to scsi_cmnd data structure.
5378 * This routine aborts @cmnd pending in base driver.
5385 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
5387 struct Scsi_Host
*shost
= cmnd
->device
->host
;
5388 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
5389 struct lpfc_hba
*phba
= vport
->phba
;
5390 struct lpfc_iocbq
*iocb
;
5391 struct lpfc_io_buf
*lpfc_cmd
;
5392 int ret
= SUCCESS
, status
= 0;
5393 struct lpfc_sli_ring
*pring_s4
= NULL
;
5394 struct lpfc_sli_ring
*pring
= NULL
;
5396 unsigned long flags
;
5397 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq
);
5399 status
= fc_block_scsi_eh(cmnd
);
5400 if (status
!= 0 && status
!= SUCCESS
)
5403 lpfc_cmd
= (struct lpfc_io_buf
*)cmnd
->host_scribble
;
5407 spin_lock_irqsave(&phba
->hbalock
, flags
);
5408 /* driver queued commands are in process of being flushed */
5409 if (phba
->hba_flag
& HBA_IOQ_FLUSH
) {
5410 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
5411 "3168 SCSI Layer abort requested I/O has been "
5412 "flushed by LLD.\n");
5417 /* Guard against IO completion being called at same time */
5418 spin_lock(&lpfc_cmd
->buf_lock
);
5420 if (!lpfc_cmd
->pCmd
) {
5421 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
5422 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5423 "x%x ID %d LUN %llu\n",
5424 SUCCESS
, cmnd
->device
->id
, cmnd
->device
->lun
);
5425 goto out_unlock_buf
;
5428 iocb
= &lpfc_cmd
->cur_iocbq
;
5429 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
5430 pring_s4
= phba
->sli4_hba
.hdwq
[iocb
->hba_wqidx
].io_wq
->pring
;
5433 goto out_unlock_buf
;
5435 spin_lock(&pring_s4
->ring_lock
);
5437 /* the command is in process of being cancelled */
5438 if (!(iocb
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
)) {
5439 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
5440 "3169 SCSI Layer abort requested I/O has been "
5441 "cancelled by LLD.\n");
5443 goto out_unlock_ring
;
5446 * If pCmd field of the corresponding lpfc_io_buf structure
5447 * points to a different SCSI command, then the driver has
5448 * already completed this command, but the midlayer did not
5449 * see the completion before the eh fired. Just return SUCCESS.
5451 if (lpfc_cmd
->pCmd
!= cmnd
) {
5452 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
5453 "3170 SCSI Layer abort requested I/O has been "
5454 "completed by LLD.\n");
5455 goto out_unlock_ring
;
5458 BUG_ON(iocb
->context1
!= lpfc_cmd
);
5460 /* abort issued in recovery is still in progress */
5461 if (iocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
5462 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
5463 "3389 SCSI Layer I/O Abort Request is pending\n");
5464 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5465 spin_unlock(&pring_s4
->ring_lock
);
5466 spin_unlock(&lpfc_cmd
->buf_lock
);
5467 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5471 lpfc_cmd
->waitq
= &waitq
;
5472 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
5473 spin_unlock(&pring_s4
->ring_lock
);
5474 ret_val
= lpfc_sli4_issue_abort_iotag(phba
, iocb
,
5475 lpfc_sli4_abort_fcp_cmpl
);
5477 pring
= &phba
->sli
.sli3_ring
[LPFC_FCP_RING
];
5478 ret_val
= lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
,
5479 lpfc_sli_abort_fcp_cmpl
);
5482 if (ret_val
!= IOCB_SUCCESS
) {
5483 /* Indicate the IO is not being aborted by the driver. */
5484 lpfc_cmd
->waitq
= NULL
;
5485 spin_unlock(&lpfc_cmd
->buf_lock
);
5486 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5491 /* no longer need the lock after this point */
5492 spin_unlock(&lpfc_cmd
->buf_lock
);
5493 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5495 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
5496 lpfc_sli_handle_fast_ring_event(phba
,
5497 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
5501 * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
5502 * for abort to complete.
5504 wait_event_timeout(waitq
,
5505 (lpfc_cmd
->pCmd
!= cmnd
),
5506 msecs_to_jiffies(2*vport
->cfg_devloss_tmo
*1000));
5508 spin_lock(&lpfc_cmd
->buf_lock
);
5510 if (lpfc_cmd
->pCmd
== cmnd
) {
5512 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5513 "0748 abort handler timed out waiting "
5514 "for aborting I/O (xri:x%x) to complete: "
5515 "ret %#x, ID %d, LUN %llu\n",
5516 iocb
->sli4_xritag
, ret
,
5517 cmnd
->device
->id
, cmnd
->device
->lun
);
5520 lpfc_cmd
->waitq
= NULL
;
5522 spin_unlock(&lpfc_cmd
->buf_lock
);
5526 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5527 spin_unlock(&pring_s4
->ring_lock
);
5529 spin_unlock(&lpfc_cmd
->buf_lock
);
5531 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5533 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
5534 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5535 "LUN %llu\n", ret
, cmnd
->device
->id
,
5541 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd
)
5543 switch (task_mgmt_cmd
) {
5544 case FCP_ABORT_TASK_SET
:
5545 return "ABORT_TASK_SET";
5546 case FCP_CLEAR_TASK_SET
:
5547 return "FCP_CLEAR_TASK_SET";
5549 return "FCP_BUS_RESET";
5551 return "FCP_LUN_RESET";
5552 case FCP_TARGET_RESET
:
5553 return "FCP_TARGET_RESET";
5555 return "FCP_CLEAR_ACA";
5556 case FCP_TERMINATE_TASK
:
5557 return "FCP_TERMINATE_TASK";
5565 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
5566 * @vport: The virtual port for which this call is being executed.
5567 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
5569 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
5576 lpfc_check_fcp_rsp(struct lpfc_vport
*vport
, struct lpfc_io_buf
*lpfc_cmd
)
5578 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
5581 uint8_t rsp_info_code
;
5586 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
5587 "0703 fcp_rsp is missing\n");
5589 rsp_info
= fcprsp
->rspStatus2
;
5590 rsp_len
= be32_to_cpu(fcprsp
->rspRspLen
);
5591 rsp_info_code
= fcprsp
->rspInfo3
;
5594 lpfc_printf_vlog(vport
, KERN_INFO
,
5596 "0706 fcp_rsp valid 0x%x,"
5597 " rsp len=%d code 0x%x\n",
5599 rsp_len
, rsp_info_code
);
5601 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
5602 * field specifies the number of valid bytes of FCP_RSP_INFO.
5603 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
5605 if ((fcprsp
->rspStatus2
& RSP_LEN_VALID
) &&
5606 ((rsp_len
== 8) || (rsp_len
== 4))) {
5607 switch (rsp_info_code
) {
5608 case RSP_NO_FAILURE
:
5609 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
5610 "0715 Task Mgmt No Failure\n");
5613 case RSP_TM_NOT_SUPPORTED
: /* TM rejected */
5614 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
5615 "0716 Task Mgmt Target "
5618 case RSP_TM_NOT_COMPLETED
: /* TM failed */
5619 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
5620 "0717 Task Mgmt Target "
5623 case RSP_TM_INVALID_LU
: /* TM to invalid LU! */
5624 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
5625 "0718 Task Mgmt to invalid "
5636 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5637 * @vport: The virtual port for which this call is being executed.
5638 * @cmnd: Pointer to scsi_cmnd data structure.
5639 * @tgt_id: Target ID of remote device.
5640 * @lun_id: Lun number for the TMF
5641 * @task_mgmt_cmd: type of TMF to send
5643 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5651 lpfc_send_taskmgmt(struct lpfc_vport
*vport
, struct scsi_cmnd
*cmnd
,
5652 unsigned int tgt_id
, uint64_t lun_id
,
5653 uint8_t task_mgmt_cmd
)
5655 struct lpfc_hba
*phba
= vport
->phba
;
5656 struct lpfc_io_buf
*lpfc_cmd
;
5657 struct lpfc_iocbq
*iocbq
;
5658 struct lpfc_iocbq
*iocbqrsp
;
5659 struct lpfc_rport_data
*rdata
;
5660 struct lpfc_nodelist
*pnode
;
5664 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
5665 if (!rdata
|| !rdata
->pnode
)
5667 pnode
= rdata
->pnode
;
5669 lpfc_cmd
= lpfc_get_scsi_buf(phba
, pnode
, NULL
);
5670 if (lpfc_cmd
== NULL
)
5672 lpfc_cmd
->timeout
= phba
->cfg_task_mgmt_tmo
;
5673 lpfc_cmd
->rdata
= rdata
;
5674 lpfc_cmd
->pCmd
= cmnd
;
5675 lpfc_cmd
->ndlp
= pnode
;
5677 status
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, lun_id
,
5680 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
5684 iocbq
= &lpfc_cmd
->cur_iocbq
;
5685 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
5686 if (iocbqrsp
== NULL
) {
5687 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
5690 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
5692 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
5693 "0702 Issue %s to TGT %d LUN %llu "
5694 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5695 lpfc_taskmgmt_name(task_mgmt_cmd
), tgt_id
, lun_id
,
5696 pnode
->nlp_rpi
, pnode
->nlp_flag
, iocbq
->sli4_xritag
,
5699 status
= lpfc_sli_issue_iocb_wait(phba
, LPFC_FCP_RING
,
5700 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
5701 if ((status
!= IOCB_SUCCESS
) ||
5702 (iocbqrsp
->iocb
.ulpStatus
!= IOSTAT_SUCCESS
)) {
5703 if (status
!= IOCB_SUCCESS
||
5704 iocbqrsp
->iocb
.ulpStatus
!= IOSTAT_FCP_RSP_ERROR
)
5705 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5706 "0727 TMF %s to TGT %d LUN %llu "
5707 "failed (%d, %d) iocb_flag x%x\n",
5708 lpfc_taskmgmt_name(task_mgmt_cmd
),
5710 iocbqrsp
->iocb
.ulpStatus
,
5711 iocbqrsp
->iocb
.un
.ulpWord
[4],
5713 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5714 if (status
== IOCB_SUCCESS
) {
5715 if (iocbqrsp
->iocb
.ulpStatus
== IOSTAT_FCP_RSP_ERROR
)
5716 /* Something in the FCP_RSP was invalid.
5717 * Check conditions */
5718 ret
= lpfc_check_fcp_rsp(vport
, lpfc_cmd
);
5721 } else if (status
== IOCB_TIMEDOUT
) {
5722 ret
= TIMEOUT_ERROR
;
5729 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
5731 if (ret
!= TIMEOUT_ERROR
)
5732 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
5738 * lpfc_chk_tgt_mapped -
5739 * @vport: The virtual port to check on
5740 * @cmnd: Pointer to scsi_cmnd data structure.
5742 * This routine delays until the scsi target (aka rport) for the
5743 * command exists (is present and logged in) or we declare it non-existent.
5750 lpfc_chk_tgt_mapped(struct lpfc_vport
*vport
, struct scsi_cmnd
*cmnd
)
5752 struct lpfc_rport_data
*rdata
;
5753 struct lpfc_nodelist
*pnode
;
5754 unsigned long later
;
5756 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
5758 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
5759 "0797 Tgt Map rport failure: rdata x%px\n", rdata
);
5762 pnode
= rdata
->pnode
;
5764 * If target is not in a MAPPED state, delay until
5765 * target is rediscovered or devloss timeout expires.
5767 later
= msecs_to_jiffies(2 * vport
->cfg_devloss_tmo
* 1000) + jiffies
;
5768 while (time_after(later
, jiffies
)) {
5771 if (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
)
5773 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5774 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
5777 pnode
= rdata
->pnode
;
5779 if (!pnode
|| (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
5785 * lpfc_reset_flush_io_context -
5786 * @vport: The virtual port (scsi_host) for the flush context
5787 * @tgt_id: If aborting by Target contect - specifies the target id
5788 * @lun_id: If aborting by Lun context - specifies the lun id
5789 * @context: specifies the context level to flush at.
5791 * After a reset condition via TMF, we need to flush orphaned i/o
5792 * contexts from the adapter. This routine aborts any contexts
5793 * outstanding, then waits for their completions. The wait is
5794 * bounded by devloss_tmo though.
5801 lpfc_reset_flush_io_context(struct lpfc_vport
*vport
, uint16_t tgt_id
,
5802 uint64_t lun_id
, lpfc_ctx_cmd context
)
5804 struct lpfc_hba
*phba
= vport
->phba
;
5805 unsigned long later
;
5808 cnt
= lpfc_sli_sum_iocb(vport
, tgt_id
, lun_id
, context
);
5810 lpfc_sli_abort_taskmgmt(vport
,
5811 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
5812 tgt_id
, lun_id
, context
);
5813 later
= msecs_to_jiffies(2 * vport
->cfg_devloss_tmo
* 1000) + jiffies
;
5814 while (time_after(later
, jiffies
) && cnt
) {
5815 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5816 cnt
= lpfc_sli_sum_iocb(vport
, tgt_id
, lun_id
, context
);
5819 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5820 "0724 I/O flush failure for context %s : cnt x%x\n",
5821 ((context
== LPFC_CTX_LUN
) ? "LUN" :
5822 ((context
== LPFC_CTX_TGT
) ? "TGT" :
5823 ((context
== LPFC_CTX_HOST
) ? "HOST" : "Unknown"))),
5831 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5832 * @cmnd: Pointer to scsi_cmnd data structure.
5834 * This routine does a device reset by sending a LUN_RESET task management
5842 lpfc_device_reset_handler(struct scsi_cmnd
*cmnd
)
5844 struct Scsi_Host
*shost
= cmnd
->device
->host
;
5845 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
5846 struct lpfc_rport_data
*rdata
;
5847 struct lpfc_nodelist
*pnode
;
5848 unsigned tgt_id
= cmnd
->device
->id
;
5849 uint64_t lun_id
= cmnd
->device
->lun
;
5850 struct lpfc_scsi_event_header scsi_event
;
5853 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
5854 if (!rdata
|| !rdata
->pnode
) {
5855 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5856 "0798 Device Reset rdata failure: rdata x%px\n",
5860 pnode
= rdata
->pnode
;
5861 status
= fc_block_scsi_eh(cmnd
);
5862 if (status
!= 0 && status
!= SUCCESS
)
5865 status
= lpfc_chk_tgt_mapped(vport
, cmnd
);
5866 if (status
== FAILED
) {
5867 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5868 "0721 Device Reset rport failure: rdata x%px\n", rdata
);
5872 scsi_event
.event_type
= FC_REG_SCSI_EVENT
;
5873 scsi_event
.subcategory
= LPFC_EVENT_LUNRESET
;
5874 scsi_event
.lun
= lun_id
;
5875 memcpy(scsi_event
.wwpn
, &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
5876 memcpy(scsi_event
.wwnn
, &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
5878 fc_host_post_vendor_event(shost
, fc_get_event_number(),
5879 sizeof(scsi_event
), (char *)&scsi_event
, LPFC_NL_VENDOR_ID
);
5881 status
= lpfc_send_taskmgmt(vport
, cmnd
, tgt_id
, lun_id
,
5884 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5885 "0713 SCSI layer issued Device Reset (%d, %llu) "
5886 "return x%x\n", tgt_id
, lun_id
, status
);
5889 * We have to clean up i/o as : they may be orphaned by the TMF;
5890 * or if the TMF failed, they may be in an indeterminate state.
5892 * We will report success if all the i/o aborts successfully.
5894 if (status
== SUCCESS
)
5895 status
= lpfc_reset_flush_io_context(vport
, tgt_id
, lun_id
,
5902 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5903 * @cmnd: Pointer to scsi_cmnd data structure.
5905 * This routine does a target reset by sending a TARGET_RESET task management
5913 lpfc_target_reset_handler(struct scsi_cmnd
*cmnd
)
5915 struct Scsi_Host
*shost
= cmnd
->device
->host
;
5916 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
5917 struct lpfc_rport_data
*rdata
;
5918 struct lpfc_nodelist
*pnode
;
5919 unsigned tgt_id
= cmnd
->device
->id
;
5920 uint64_t lun_id
= cmnd
->device
->lun
;
5921 struct lpfc_scsi_event_header scsi_event
;
5924 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
5925 if (!rdata
|| !rdata
->pnode
) {
5926 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5927 "0799 Target Reset rdata failure: rdata x%px\n",
5931 pnode
= rdata
->pnode
;
5932 status
= fc_block_scsi_eh(cmnd
);
5933 if (status
!= 0 && status
!= SUCCESS
)
5936 status
= lpfc_chk_tgt_mapped(vport
, cmnd
);
5937 if (status
== FAILED
) {
5938 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5939 "0722 Target Reset rport failure: rdata x%px\n", rdata
);
5941 spin_lock_irq(&pnode
->lock
);
5942 pnode
->nlp_flag
&= ~NLP_NPR_ADISC
;
5943 pnode
->nlp_fcp_info
&= ~NLP_FCP_2_DEVICE
;
5944 spin_unlock_irq(&pnode
->lock
);
5946 lpfc_reset_flush_io_context(vport
, tgt_id
, lun_id
,
5948 return FAST_IO_FAIL
;
5951 scsi_event
.event_type
= FC_REG_SCSI_EVENT
;
5952 scsi_event
.subcategory
= LPFC_EVENT_TGTRESET
;
5954 memcpy(scsi_event
.wwpn
, &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
5955 memcpy(scsi_event
.wwnn
, &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
5957 fc_host_post_vendor_event(shost
, fc_get_event_number(),
5958 sizeof(scsi_event
), (char *)&scsi_event
, LPFC_NL_VENDOR_ID
);
5960 status
= lpfc_send_taskmgmt(vport
, cmnd
, tgt_id
, lun_id
,
5963 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
5964 "0723 SCSI layer issued Target Reset (%d, %llu) "
5965 "return x%x\n", tgt_id
, lun_id
, status
);
5968 * We have to clean up i/o as : they may be orphaned by the TMF;
5969 * or if the TMF failed, they may be in an indeterminate state.
5971 * We will report success if all the i/o aborts successfully.
5973 if (status
== SUCCESS
)
5974 status
= lpfc_reset_flush_io_context(vport
, tgt_id
, lun_id
,
5980 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5981 * @cmnd: Pointer to scsi_cmnd data structure.
5983 * This routine does target reset to all targets on @cmnd->device->host.
5984 * This emulates Parallel SCSI Bus Reset Semantics.
5991 lpfc_bus_reset_handler(struct scsi_cmnd
*cmnd
)
5993 struct Scsi_Host
*shost
= cmnd
->device
->host
;
5994 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
5995 struct lpfc_nodelist
*ndlp
= NULL
;
5996 struct lpfc_scsi_event_header scsi_event
;
5998 int ret
= SUCCESS
, status
, i
;
6000 scsi_event
.event_type
= FC_REG_SCSI_EVENT
;
6001 scsi_event
.subcategory
= LPFC_EVENT_BUSRESET
;
6003 memcpy(scsi_event
.wwpn
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
6004 memcpy(scsi_event
.wwnn
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
6006 fc_host_post_vendor_event(shost
, fc_get_event_number(),
6007 sizeof(scsi_event
), (char *)&scsi_event
, LPFC_NL_VENDOR_ID
);
6009 status
= fc_block_scsi_eh(cmnd
);
6010 if (status
!= 0 && status
!= SUCCESS
)
6014 * Since the driver manages a single bus device, reset all
6015 * targets known to the driver. Should any target reset
6016 * fail, this routine returns failure to the midlayer.
6018 for (i
= 0; i
< LPFC_MAX_TARGET
; i
++) {
6019 /* Search for mapped node by target ID */
6021 spin_lock_irq(shost
->host_lock
);
6022 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
6024 if (vport
->phba
->cfg_fcp2_no_tgt_reset
&&
6025 (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
))
6027 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
&&
6028 ndlp
->nlp_sid
== i
&&
6030 ndlp
->nlp_type
& NLP_FCP_TARGET
) {
6035 spin_unlock_irq(shost
->host_lock
);
6039 status
= lpfc_send_taskmgmt(vport
, cmnd
,
6040 i
, 0, FCP_TARGET_RESET
);
6042 if (status
!= SUCCESS
) {
6043 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
6044 "0700 Bus Reset on target %d failed\n",
6050 * We have to clean up i/o as : they may be orphaned by the TMFs
6051 * above; or if any of the TMFs failed, they may be in an
6052 * indeterminate state.
6053 * We will report success if all the i/o aborts successfully.
6056 status
= lpfc_reset_flush_io_context(vport
, 0, 0, LPFC_CTX_HOST
);
6057 if (status
!= SUCCESS
)
6060 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
6061 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret
);
6066 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
6067 * @cmnd: Pointer to scsi_cmnd data structure.
6069 * This routine does host reset to the adaptor port. It brings the HBA
6070 * offline, performs a board restart, and then brings the board back online.
6071 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
6072 * reject all outstanding SCSI commands to the host and error returned
6073 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
6074 * of error handling, it will only return error if resetting of the adapter
6075 * is not successful; in all other cases, will return success.
6082 lpfc_host_reset_handler(struct scsi_cmnd
*cmnd
)
6084 struct Scsi_Host
*shost
= cmnd
->device
->host
;
6085 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
6086 struct lpfc_hba
*phba
= vport
->phba
;
6087 int rc
, ret
= SUCCESS
;
6089 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
6090 "3172 SCSI layer issued Host Reset Data:\n");
6092 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
6094 rc
= lpfc_sli_brdrestart(phba
);
6098 rc
= lpfc_online(phba
);
6102 lpfc_unblock_mgmt_io(phba
);
6106 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
6107 "3323 Failed host reset\n");
6108 lpfc_unblock_mgmt_io(phba
);
6113 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
6114 * @sdev: Pointer to scsi_device.
6116 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
6117 * globally available list of scsi buffers. This routine also makes sure scsi
6118 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
6119 * of scsi buffer exists for the lifetime of the driver.
6126 lpfc_slave_alloc(struct scsi_device
*sdev
)
6128 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
6129 struct lpfc_hba
*phba
= vport
->phba
;
6130 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
6132 uint32_t num_to_alloc
= 0;
6133 int num_allocated
= 0;
6135 struct lpfc_device_data
*device_data
;
6136 unsigned long flags
;
6137 struct lpfc_name target_wwpn
;
6139 if (!rport
|| fc_remote_port_chkready(rport
))
6142 if (phba
->cfg_fof
) {
6145 * Check to see if the device data structure for the lun
6146 * exists. If not, create one.
6149 u64_to_wwn(rport
->port_name
, target_wwpn
.u
.wwn
);
6150 spin_lock_irqsave(&phba
->devicelock
, flags
);
6151 device_data
= __lpfc_get_device_data(phba
,
6153 &vport
->fc_portname
,
6157 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
6158 device_data
= lpfc_create_device_data(phba
,
6159 &vport
->fc_portname
,
6162 phba
->cfg_XLanePriority
,
6166 spin_lock_irqsave(&phba
->devicelock
, flags
);
6167 list_add_tail(&device_data
->listentry
, &phba
->luns
);
6169 device_data
->rport_data
= rport
->dd_data
;
6170 device_data
->available
= true;
6171 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
6172 sdev
->hostdata
= device_data
;
6174 sdev
->hostdata
= rport
->dd_data
;
6176 sdev_cnt
= atomic_inc_return(&phba
->sdev_cnt
);
6178 /* For SLI4, all IO buffers are pre-allocated */
6179 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6182 /* This code path is now ONLY for SLI3 adapters */
6185 * Populate the cmds_per_lun count scsi_bufs into this host's globally
6186 * available list of scsi buffers. Don't allocate more than the
6187 * HBA limit conveyed to the midlayer via the host structure. The
6188 * formula accounts for the lun_queue_depth + error handlers + 1
6189 * extra. This list of scsi bufs exists for the lifetime of the driver.
6191 total
= phba
->total_scsi_bufs
;
6192 num_to_alloc
= vport
->cfg_lun_queue_depth
+ 2;
6194 /* If allocated buffers are enough do nothing */
6195 if ((sdev_cnt
* (vport
->cfg_lun_queue_depth
+ 2)) < total
)
6198 /* Allow some exchanges to be available always to complete discovery */
6199 if (total
>= phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
6200 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
6201 "0704 At limitation of %d preallocated "
6202 "command buffers\n", total
);
6204 /* Allow some exchanges to be available always to complete discovery */
6205 } else if (total
+ num_to_alloc
>
6206 phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
6207 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
6208 "0705 Allocation request of %d "
6209 "command buffers will exceed max of %d. "
6210 "Reducing allocation request to %d.\n",
6211 num_to_alloc
, phba
->cfg_hba_queue_depth
,
6212 (phba
->cfg_hba_queue_depth
- total
));
6213 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
6215 num_allocated
= lpfc_new_scsi_buf_s3(vport
, num_to_alloc
);
6216 if (num_to_alloc
!= num_allocated
) {
6217 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
6218 "0708 Allocation request of %d "
6219 "command buffers did not succeed. "
6220 "Allocated %d buffers.\n",
6221 num_to_alloc
, num_allocated
);
6223 if (num_allocated
> 0)
6224 phba
->total_scsi_bufs
+= num_allocated
;
6229 * lpfc_slave_configure - scsi_host_template slave_configure entry point
6230 * @sdev: Pointer to scsi_device.
6232 * This routine configures following items
6233 * - Tag command queuing support for @sdev if supported.
6234 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
6240 lpfc_slave_configure(struct scsi_device
*sdev
)
6242 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
6243 struct lpfc_hba
*phba
= vport
->phba
;
6245 scsi_change_queue_depth(sdev
, vport
->cfg_lun_queue_depth
);
6247 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
6248 lpfc_sli_handle_fast_ring_event(phba
,
6249 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
6250 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
6251 lpfc_poll_rearm_timer(phba
);
6258 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
6259 * @sdev: Pointer to scsi_device.
6261 * This routine sets @sdev hostatdata filed to null.
6264 lpfc_slave_destroy(struct scsi_device
*sdev
)
6266 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
6267 struct lpfc_hba
*phba
= vport
->phba
;
6268 unsigned long flags
;
6269 struct lpfc_device_data
*device_data
= sdev
->hostdata
;
6271 atomic_dec(&phba
->sdev_cnt
);
6272 if ((phba
->cfg_fof
) && (device_data
)) {
6273 spin_lock_irqsave(&phba
->devicelock
, flags
);
6274 device_data
->available
= false;
6275 if (!device_data
->oas_enabled
)
6276 lpfc_delete_device_data(phba
, device_data
);
6277 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
6279 sdev
->hostdata
= NULL
;
6284 * lpfc_create_device_data - creates and initializes device data structure for OAS
6285 * @phba: Pointer to host bus adapter structure.
6286 * @vport_wwpn: Pointer to vport's wwpn information
6287 * @target_wwpn: Pointer to target's wwpn information
6288 * @lun: Lun on target
6290 * @atomic_create: Flag to indicate if memory should be allocated using the
6291 * GFP_ATOMIC flag or not.
6293 * This routine creates a device data structure which will contain identifying
6294 * information for the device (host wwpn, target wwpn, lun), state of OAS,
6295 * whether or not the corresponding lun is available by the system,
6296 * and pointer to the rport data.
6300 * Pointer to lpfc_device_data - Success
6302 struct lpfc_device_data
*
6303 lpfc_create_device_data(struct lpfc_hba
*phba
, struct lpfc_name
*vport_wwpn
,
6304 struct lpfc_name
*target_wwpn
, uint64_t lun
,
6305 uint32_t pri
, bool atomic_create
)
6308 struct lpfc_device_data
*lun_info
;
6311 if (unlikely(!phba
) || !vport_wwpn
|| !target_wwpn
||
6315 /* Attempt to create the device data to contain lun info */
6318 memory_flags
= GFP_ATOMIC
;
6320 memory_flags
= GFP_KERNEL
;
6321 lun_info
= mempool_alloc(phba
->device_data_mem_pool
, memory_flags
);
6324 INIT_LIST_HEAD(&lun_info
->listentry
);
6325 lun_info
->rport_data
= NULL
;
6326 memcpy(&lun_info
->device_id
.vport_wwpn
, vport_wwpn
,
6327 sizeof(struct lpfc_name
));
6328 memcpy(&lun_info
->device_id
.target_wwpn
, target_wwpn
,
6329 sizeof(struct lpfc_name
));
6330 lun_info
->device_id
.lun
= lun
;
6331 lun_info
->oas_enabled
= false;
6332 lun_info
->priority
= pri
;
6333 lun_info
->available
= false;
6338 * lpfc_delete_device_data - frees a device data structure for OAS
6339 * @phba: Pointer to host bus adapter structure.
6340 * @lun_info: Pointer to device data structure to free.
6342 * This routine frees the previously allocated device data structure passed.
6346 lpfc_delete_device_data(struct lpfc_hba
*phba
,
6347 struct lpfc_device_data
*lun_info
)
6350 if (unlikely(!phba
) || !lun_info
||
6354 if (!list_empty(&lun_info
->listentry
))
6355 list_del(&lun_info
->listentry
);
6356 mempool_free(lun_info
, phba
->device_data_mem_pool
);
6361 * __lpfc_get_device_data - returns the device data for the specified lun
6362 * @phba: Pointer to host bus adapter structure.
6363 * @list: Point to list to search.
6364 * @vport_wwpn: Pointer to vport's wwpn information
6365 * @target_wwpn: Pointer to target's wwpn information
6366 * @lun: Lun on target
6368 * This routine searches the list passed for the specified lun's device data.
6369 * This function does not hold locks, it is the responsibility of the caller
6370 * to ensure the proper lock is held before calling the function.
6374 * Pointer to lpfc_device_data - Success
6376 struct lpfc_device_data
*
6377 __lpfc_get_device_data(struct lpfc_hba
*phba
, struct list_head
*list
,
6378 struct lpfc_name
*vport_wwpn
,
6379 struct lpfc_name
*target_wwpn
, uint64_t lun
)
6382 struct lpfc_device_data
*lun_info
;
6384 if (unlikely(!phba
) || !list
|| !vport_wwpn
|| !target_wwpn
||
6388 /* Check to see if the lun is already enabled for OAS. */
6390 list_for_each_entry(lun_info
, list
, listentry
) {
6391 if ((memcmp(&lun_info
->device_id
.vport_wwpn
, vport_wwpn
,
6392 sizeof(struct lpfc_name
)) == 0) &&
6393 (memcmp(&lun_info
->device_id
.target_wwpn
, target_wwpn
,
6394 sizeof(struct lpfc_name
)) == 0) &&
6395 (lun_info
->device_id
.lun
== lun
))
6403 * lpfc_find_next_oas_lun - searches for the next oas lun
6404 * @phba: Pointer to host bus adapter structure.
6405 * @vport_wwpn: Pointer to vport's wwpn information
6406 * @target_wwpn: Pointer to target's wwpn information
6407 * @starting_lun: Pointer to the lun to start searching for
6408 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
6409 * @found_target_wwpn: Pointer to the found lun's target wwpn information
6410 * @found_lun: Pointer to the found lun.
6411 * @found_lun_status: Pointer to status of the found lun.
6412 * @found_lun_pri: Pointer to priority of the found lun.
6414 * This routine searches the luns list for the specified lun
6415 * or the first lun for the vport/target. If the vport wwpn contains
6416 * a zero value then a specific vport is not specified. In this case
6417 * any vport which contains the lun will be considered a match. If the
6418 * target wwpn contains a zero value then a specific target is not specified.
6419 * In this case any target which contains the lun will be considered a
6420 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
6421 * are returned. The function will also return the next lun if available.
6422 * If the next lun is not found, starting_lun parameter will be set to
6430 lpfc_find_next_oas_lun(struct lpfc_hba
*phba
, struct lpfc_name
*vport_wwpn
,
6431 struct lpfc_name
*target_wwpn
, uint64_t *starting_lun
,
6432 struct lpfc_name
*found_vport_wwpn
,
6433 struct lpfc_name
*found_target_wwpn
,
6434 uint64_t *found_lun
,
6435 uint32_t *found_lun_status
,
6436 uint32_t *found_lun_pri
)
6439 unsigned long flags
;
6440 struct lpfc_device_data
*lun_info
;
6441 struct lpfc_device_id
*device_id
;
6445 if (unlikely(!phba
) || !vport_wwpn
|| !target_wwpn
||
6446 !starting_lun
|| !found_vport_wwpn
||
6447 !found_target_wwpn
|| !found_lun
|| !found_lun_status
||
6448 (*starting_lun
== NO_MORE_OAS_LUN
) ||
6452 lun
= *starting_lun
;
6453 *found_lun
= NO_MORE_OAS_LUN
;
6454 *starting_lun
= NO_MORE_OAS_LUN
;
6456 /* Search for lun or the lun closet in value */
6458 spin_lock_irqsave(&phba
->devicelock
, flags
);
6459 list_for_each_entry(lun_info
, &phba
->luns
, listentry
) {
6460 if (((wwn_to_u64(vport_wwpn
->u
.wwn
) == 0) ||
6461 (memcmp(&lun_info
->device_id
.vport_wwpn
, vport_wwpn
,
6462 sizeof(struct lpfc_name
)) == 0)) &&
6463 ((wwn_to_u64(target_wwpn
->u
.wwn
) == 0) ||
6464 (memcmp(&lun_info
->device_id
.target_wwpn
, target_wwpn
,
6465 sizeof(struct lpfc_name
)) == 0)) &&
6466 (lun_info
->oas_enabled
)) {
6467 device_id
= &lun_info
->device_id
;
6469 ((lun
== FIND_FIRST_OAS_LUN
) ||
6470 (device_id
->lun
== lun
))) {
6471 *found_lun
= device_id
->lun
;
6472 memcpy(found_vport_wwpn
,
6473 &device_id
->vport_wwpn
,
6474 sizeof(struct lpfc_name
));
6475 memcpy(found_target_wwpn
,
6476 &device_id
->target_wwpn
,
6477 sizeof(struct lpfc_name
));
6478 if (lun_info
->available
)
6480 OAS_LUN_STATUS_EXISTS
;
6482 *found_lun_status
= 0;
6483 *found_lun_pri
= lun_info
->priority
;
6484 if (phba
->cfg_oas_flags
& OAS_FIND_ANY_VPORT
)
6485 memset(vport_wwpn
, 0x0,
6486 sizeof(struct lpfc_name
));
6487 if (phba
->cfg_oas_flags
& OAS_FIND_ANY_TARGET
)
6488 memset(target_wwpn
, 0x0,
6489 sizeof(struct lpfc_name
));
6492 *starting_lun
= device_id
->lun
;
6493 memcpy(vport_wwpn
, &device_id
->vport_wwpn
,
6494 sizeof(struct lpfc_name
));
6495 memcpy(target_wwpn
, &device_id
->target_wwpn
,
6496 sizeof(struct lpfc_name
));
6501 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
6506 * lpfc_enable_oas_lun - enables a lun for OAS operations
6507 * @phba: Pointer to host bus adapter structure.
6508 * @vport_wwpn: Pointer to vport's wwpn information
6509 * @target_wwpn: Pointer to target's wwpn information
6513 * This routine enables a lun for oas operations. The routines does so by
6514 * doing the following :
6516 * 1) Checks to see if the device data for the lun has been created.
6517 * 2) If found, sets the OAS enabled flag if not set and returns.
6518 * 3) Otherwise, creates a device data structure.
6519 * 4) If successfully created, indicates the device data is for an OAS lun,
6520 * indicates the lun is not available and add to the list of luns.
6527 lpfc_enable_oas_lun(struct lpfc_hba
*phba
, struct lpfc_name
*vport_wwpn
,
6528 struct lpfc_name
*target_wwpn
, uint64_t lun
, uint8_t pri
)
6531 struct lpfc_device_data
*lun_info
;
6532 unsigned long flags
;
6534 if (unlikely(!phba
) || !vport_wwpn
|| !target_wwpn
||
6538 spin_lock_irqsave(&phba
->devicelock
, flags
);
6540 /* Check to see if the device data for the lun has been created */
6541 lun_info
= __lpfc_get_device_data(phba
, &phba
->luns
, vport_wwpn
,
6544 if (!lun_info
->oas_enabled
)
6545 lun_info
->oas_enabled
= true;
6546 lun_info
->priority
= pri
;
6547 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
6551 /* Create an lun info structure and add to list of luns */
6552 lun_info
= lpfc_create_device_data(phba
, vport_wwpn
, target_wwpn
, lun
,
6555 lun_info
->oas_enabled
= true;
6556 lun_info
->priority
= pri
;
6557 lun_info
->available
= false;
6558 list_add_tail(&lun_info
->listentry
, &phba
->luns
);
6559 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
6562 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
6567 * lpfc_disable_oas_lun - disables a lun for OAS operations
6568 * @phba: Pointer to host bus adapter structure.
6569 * @vport_wwpn: Pointer to vport's wwpn information
6570 * @target_wwpn: Pointer to target's wwpn information
6574 * This routine disables a lun for oas operations. The routines does so by
6575 * doing the following :
6577 * 1) Checks to see if the device data for the lun is created.
6578 * 2) If present, clears the flag indicating this lun is for OAS.
6579 * 3) If the lun is not available by the system, the device data is
6587 lpfc_disable_oas_lun(struct lpfc_hba
*phba
, struct lpfc_name
*vport_wwpn
,
6588 struct lpfc_name
*target_wwpn
, uint64_t lun
, uint8_t pri
)
6591 struct lpfc_device_data
*lun_info
;
6592 unsigned long flags
;
6594 if (unlikely(!phba
) || !vport_wwpn
|| !target_wwpn
||
6598 spin_lock_irqsave(&phba
->devicelock
, flags
);
6600 /* Check to see if the lun is available. */
6601 lun_info
= __lpfc_get_device_data(phba
,
6602 &phba
->luns
, vport_wwpn
,
6605 lun_info
->oas_enabled
= false;
6606 lun_info
->priority
= pri
;
6607 if (!lun_info
->available
)
6608 lpfc_delete_device_data(phba
, lun_info
);
6609 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
6613 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
6618 lpfc_no_command(struct Scsi_Host
*shost
, struct scsi_cmnd
*cmnd
)
6620 return SCSI_MLQUEUE_HOST_BUSY
;
6624 lpfc_no_handler(struct scsi_cmnd
*cmnd
)
6630 lpfc_no_slave(struct scsi_device
*sdev
)
6635 struct scsi_host_template lpfc_template_nvme
= {
6636 .module
= THIS_MODULE
,
6637 .name
= LPFC_DRIVER_NAME
,
6638 .proc_name
= LPFC_DRIVER_NAME
,
6640 .queuecommand
= lpfc_no_command
,
6641 .eh_abort_handler
= lpfc_no_handler
,
6642 .eh_device_reset_handler
= lpfc_no_handler
,
6643 .eh_target_reset_handler
= lpfc_no_handler
,
6644 .eh_bus_reset_handler
= lpfc_no_handler
,
6645 .eh_host_reset_handler
= lpfc_no_handler
,
6646 .slave_alloc
= lpfc_no_slave
,
6647 .slave_configure
= lpfc_no_slave
,
6648 .scan_finished
= lpfc_scan_finished
,
6652 .shost_attrs
= lpfc_hba_attrs
,
6653 .max_sectors
= 0xFFFFFFFF,
6654 .vendor_id
= LPFC_NL_VENDOR_ID
,
6655 .track_queue_depth
= 0,
6658 struct scsi_host_template lpfc_template
= {
6659 .module
= THIS_MODULE
,
6660 .name
= LPFC_DRIVER_NAME
,
6661 .proc_name
= LPFC_DRIVER_NAME
,
6663 .queuecommand
= lpfc_queuecommand
,
6664 .eh_timed_out
= fc_eh_timed_out
,
6665 .eh_abort_handler
= lpfc_abort_handler
,
6666 .eh_device_reset_handler
= lpfc_device_reset_handler
,
6667 .eh_target_reset_handler
= lpfc_target_reset_handler
,
6668 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
6669 .eh_host_reset_handler
= lpfc_host_reset_handler
,
6670 .slave_alloc
= lpfc_slave_alloc
,
6671 .slave_configure
= lpfc_slave_configure
,
6672 .slave_destroy
= lpfc_slave_destroy
,
6673 .scan_finished
= lpfc_scan_finished
,
6675 .sg_tablesize
= LPFC_DEFAULT_SG_SEG_CNT
,
6676 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
6677 .shost_attrs
= lpfc_hba_attrs
,
6678 .max_sectors
= 0xFFFFFFFF,
6679 .vendor_id
= LPFC_NL_VENDOR_ID
,
6680 .change_queue_depth
= scsi_change_queue_depth
,
6681 .track_queue_depth
= 1,