Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / scsi / lpfc / lpfc_scsi.c
blob3b989f720937a2c09314a496c580712096634559
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <net/checksum.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_tcq.h>
38 #include <scsi/scsi_transport_fc.h>
40 #include "lpfc_version.h"
41 #include "lpfc_hw4.h"
42 #include "lpfc_hw.h"
43 #include "lpfc_sli.h"
44 #include "lpfc_sli4.h"
45 #include "lpfc_nl.h"
46 #include "lpfc_disc.h"
47 #include "lpfc.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
53 #define LPFC_RESET_WAIT 2
54 #define LPFC_ABORT_WAIT 2
56 static char *dif_op_str[] = {
57 "PROT_NORMAL",
58 "PROT_READ_INSERT",
59 "PROT_WRITE_STRIP",
60 "PROT_READ_STRIP",
61 "PROT_WRITE_INSERT",
62 "PROT_READ_PASS",
63 "PROT_WRITE_PASS",
66 struct scsi_dif_tuple {
67 __be16 guard_tag; /* Checksum */
68 __be16 app_tag; /* Opaque storage */
69 __be32 ref_tag; /* Target LBA or indirect LBA */
72 static struct lpfc_rport_data *
73 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
75 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
77 if (vport->phba->cfg_fof)
78 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
79 else
80 return (struct lpfc_rport_data *)sdev->hostdata;
83 static void
84 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
85 static void
86 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87 static int
88 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
90 static inline unsigned
91 lpfc_cmd_blksize(struct scsi_cmnd *sc)
93 return sc->device->sector_size;
96 #define LPFC_CHECK_PROTECT_GUARD 1
97 #define LPFC_CHECK_PROTECT_REF 2
98 static inline unsigned
99 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
101 return 1;
104 static inline unsigned
105 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
107 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
108 return 0;
109 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
110 return 1;
111 return 0;
115 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
116 * @phba: Pointer to HBA object.
117 * @lpfc_cmd: lpfc scsi command object pointer.
119 * This function is called from the lpfc_prep_task_mgmt_cmd function to
120 * set the last bit in the response sge entry.
122 static void
123 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
124 struct lpfc_io_buf *lpfc_cmd)
126 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
127 if (sgl) {
128 sgl += 1;
129 sgl->word2 = le32_to_cpu(sgl->word2);
130 bf_set(lpfc_sli4_sge_last, sgl, 1);
131 sgl->word2 = cpu_to_le32(sgl->word2);
136 * lpfc_update_stats - Update statistical data for the command completion
137 * @vport: The virtual port on which this call is executing.
138 * @lpfc_cmd: lpfc scsi command object pointer.
140 * This function is called when there is a command completion and this
141 * function updates the statistical data for the command completion.
143 static void
144 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
146 struct lpfc_hba *phba = vport->phba;
147 struct lpfc_rport_data *rdata;
148 struct lpfc_nodelist *pnode;
149 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
150 unsigned long flags;
151 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
152 unsigned long latency;
153 int i;
155 if (!vport->stat_data_enabled ||
156 vport->stat_data_blocked ||
157 (cmd->result))
158 return;
160 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
161 rdata = lpfc_cmd->rdata;
162 pnode = rdata->pnode;
164 spin_lock_irqsave(shost->host_lock, flags);
165 if (!pnode ||
166 !pnode->lat_data ||
167 (phba->bucket_type == LPFC_NO_BUCKET)) {
168 spin_unlock_irqrestore(shost->host_lock, flags);
169 return;
172 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
173 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
174 phba->bucket_step;
175 /* check array subscript bounds */
176 if (i < 0)
177 i = 0;
178 else if (i >= LPFC_MAX_BUCKET_COUNT)
179 i = LPFC_MAX_BUCKET_COUNT - 1;
180 } else {
181 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
182 if (latency <= (phba->bucket_base +
183 ((1<<i)*phba->bucket_step)))
184 break;
187 pnode->lat_data[i].cmd_count++;
188 spin_unlock_irqrestore(shost->host_lock, flags);
192 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
193 * @phba: The Hba for which this call is being executed.
195 * This routine is called when there is resource error in driver or firmware.
196 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
197 * posts at most 1 event each second. This routine wakes up worker thread of
198 * @phba to process WORKER_RAM_DOWN_EVENT event.
200 * This routine should be called with no lock held.
202 void
203 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
205 unsigned long flags;
206 uint32_t evt_posted;
207 unsigned long expires;
209 spin_lock_irqsave(&phba->hbalock, flags);
210 atomic_inc(&phba->num_rsrc_err);
211 phba->last_rsrc_error_time = jiffies;
213 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
214 if (time_after(expires, jiffies)) {
215 spin_unlock_irqrestore(&phba->hbalock, flags);
216 return;
219 phba->last_ramp_down_time = jiffies;
221 spin_unlock_irqrestore(&phba->hbalock, flags);
223 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
224 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
225 if (!evt_posted)
226 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
227 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
229 if (!evt_posted)
230 lpfc_worker_wake_up(phba);
231 return;
235 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
236 * @phba: The Hba for which this call is being executed.
238 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
239 * thread.This routine reduces queue depth for all scsi device on each vport
240 * associated with @phba.
242 void
243 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
245 struct lpfc_vport **vports;
246 struct Scsi_Host *shost;
247 struct scsi_device *sdev;
248 unsigned long new_queue_depth;
249 unsigned long num_rsrc_err, num_cmd_success;
250 int i;
252 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
253 num_cmd_success = atomic_read(&phba->num_cmd_success);
256 * The error and success command counters are global per
257 * driver instance. If another handler has already
258 * operated on this error event, just exit.
260 if (num_rsrc_err == 0)
261 return;
263 vports = lpfc_create_vport_work_array(phba);
264 if (vports != NULL)
265 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
266 shost = lpfc_shost_from_vport(vports[i]);
267 shost_for_each_device(sdev, shost) {
268 new_queue_depth =
269 sdev->queue_depth * num_rsrc_err /
270 (num_rsrc_err + num_cmd_success);
271 if (!new_queue_depth)
272 new_queue_depth = sdev->queue_depth - 1;
273 else
274 new_queue_depth = sdev->queue_depth -
275 new_queue_depth;
276 scsi_change_queue_depth(sdev, new_queue_depth);
279 lpfc_destroy_vport_work_array(phba, vports);
280 atomic_set(&phba->num_rsrc_err, 0);
281 atomic_set(&phba->num_cmd_success, 0);
285 * lpfc_scsi_dev_block - set all scsi hosts to block state
286 * @phba: Pointer to HBA context object.
288 * This function walks vport list and set each SCSI host to block state
289 * by invoking fc_remote_port_delete() routine. This function is invoked
290 * with EEH when device's PCI slot has been permanently disabled.
292 void
293 lpfc_scsi_dev_block(struct lpfc_hba *phba)
295 struct lpfc_vport **vports;
296 struct Scsi_Host *shost;
297 struct scsi_device *sdev;
298 struct fc_rport *rport;
299 int i;
301 vports = lpfc_create_vport_work_array(phba);
302 if (vports != NULL)
303 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
304 shost = lpfc_shost_from_vport(vports[i]);
305 shost_for_each_device(sdev, shost) {
306 rport = starget_to_rport(scsi_target(sdev));
307 fc_remote_port_delete(rport);
310 lpfc_destroy_vport_work_array(phba, vports);
314 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
315 * @vport: The virtual port for which this call being executed.
316 * @num_to_alloc: The requested number of buffers to allocate.
318 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
319 * the scsi buffer contains all the necessary information needed to initiate
320 * a SCSI I/O. The non-DMAable buffer region contains information to build
321 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
322 * and the initial BPL. In addition to allocating memory, the FCP CMND and
323 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
325 * Return codes:
326 * int - number of scsi buffers that were allocated.
327 * 0 = failure, less than num_to_alloc is a partial failure.
329 static int
330 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
332 struct lpfc_hba *phba = vport->phba;
333 struct lpfc_io_buf *psb;
334 struct ulp_bde64 *bpl;
335 IOCB_t *iocb;
336 dma_addr_t pdma_phys_fcp_cmd;
337 dma_addr_t pdma_phys_fcp_rsp;
338 dma_addr_t pdma_phys_sgl;
339 uint16_t iotag;
340 int bcnt, bpl_size;
342 bpl_size = phba->cfg_sg_dma_buf_size -
343 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
345 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
346 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
347 num_to_alloc, phba->cfg_sg_dma_buf_size,
348 (int)sizeof(struct fcp_cmnd),
349 (int)sizeof(struct fcp_rsp), bpl_size);
351 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
352 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
353 if (!psb)
354 break;
357 * Get memory from the pci pool to map the virt space to pci
358 * bus space for an I/O. The DMA buffer includes space for the
359 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
360 * necessary to support the sg_tablesize.
362 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
363 GFP_KERNEL, &psb->dma_handle);
364 if (!psb->data) {
365 kfree(psb);
366 break;
370 /* Allocate iotag for psb->cur_iocbq. */
371 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
372 if (iotag == 0) {
373 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
374 psb->data, psb->dma_handle);
375 kfree(psb);
376 break;
378 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
380 psb->fcp_cmnd = psb->data;
381 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
382 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
383 sizeof(struct fcp_rsp);
385 /* Initialize local short-hand pointers. */
386 bpl = (struct ulp_bde64 *)psb->dma_sgl;
387 pdma_phys_fcp_cmd = psb->dma_handle;
388 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
389 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
390 sizeof(struct fcp_rsp);
393 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
394 * are sg list bdes. Initialize the first two and leave the
395 * rest for queuecommand.
397 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
398 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
399 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
400 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
401 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
403 /* Setup the physical region for the FCP RSP */
404 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
405 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
406 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
407 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
408 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
411 * Since the IOCB for the FCP I/O is built into this
412 * lpfc_scsi_buf, initialize it with all known data now.
414 iocb = &psb->cur_iocbq.iocb;
415 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
416 if ((phba->sli_rev == 3) &&
417 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
418 /* fill in immediate fcp command BDE */
419 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
420 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
421 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
422 unsli3.fcp_ext.icd);
423 iocb->un.fcpi64.bdl.addrHigh = 0;
424 iocb->ulpBdeCount = 0;
425 iocb->ulpLe = 0;
426 /* fill in response BDE */
427 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
428 BUFF_TYPE_BDE_64;
429 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
430 sizeof(struct fcp_rsp);
431 iocb->unsli3.fcp_ext.rbde.addrLow =
432 putPaddrLow(pdma_phys_fcp_rsp);
433 iocb->unsli3.fcp_ext.rbde.addrHigh =
434 putPaddrHigh(pdma_phys_fcp_rsp);
435 } else {
436 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
437 iocb->un.fcpi64.bdl.bdeSize =
438 (2 * sizeof(struct ulp_bde64));
439 iocb->un.fcpi64.bdl.addrLow =
440 putPaddrLow(pdma_phys_sgl);
441 iocb->un.fcpi64.bdl.addrHigh =
442 putPaddrHigh(pdma_phys_sgl);
443 iocb->ulpBdeCount = 1;
444 iocb->ulpLe = 1;
446 iocb->ulpClass = CLASS3;
447 psb->status = IOSTAT_SUCCESS;
448 /* Put it back into the SCSI buffer list */
449 psb->cur_iocbq.context1 = psb;
450 spin_lock_init(&psb->buf_lock);
451 lpfc_release_scsi_buf_s3(phba, psb);
455 return bcnt;
459 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
460 * @vport: pointer to lpfc vport data structure.
462 * This routine is invoked by the vport cleanup for deletions and the cleanup
463 * for an ndlp on removal.
465 void
466 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
468 struct lpfc_hba *phba = vport->phba;
469 struct lpfc_io_buf *psb, *next_psb;
470 struct lpfc_sli4_hdw_queue *qp;
471 unsigned long iflag = 0;
472 int idx;
474 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
475 return;
477 spin_lock_irqsave(&phba->hbalock, iflag);
478 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
479 qp = &phba->sli4_hba.hdwq[idx];
481 spin_lock(&qp->abts_io_buf_list_lock);
482 list_for_each_entry_safe(psb, next_psb,
483 &qp->lpfc_abts_io_buf_list, list) {
484 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
485 continue;
487 if (psb->rdata && psb->rdata->pnode &&
488 psb->rdata->pnode->vport == vport)
489 psb->rdata = NULL;
491 spin_unlock(&qp->abts_io_buf_list_lock);
493 spin_unlock_irqrestore(&phba->hbalock, iflag);
497 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
498 * @phba: pointer to lpfc hba data structure.
499 * @axri: pointer to the fcp xri abort wcqe structure.
500 * @idx: index into hdwq
502 * This routine is invoked by the worker thread to process a SLI4 fast-path
503 * FCP or NVME aborted xri.
505 void
506 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
507 struct sli4_wcqe_xri_aborted *axri, int idx)
509 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
510 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
511 struct lpfc_io_buf *psb, *next_psb;
512 struct lpfc_sli4_hdw_queue *qp;
513 unsigned long iflag = 0;
514 struct lpfc_iocbq *iocbq;
515 int i;
516 struct lpfc_nodelist *ndlp;
517 int rrq_empty = 0;
518 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
520 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
521 return;
523 qp = &phba->sli4_hba.hdwq[idx];
524 spin_lock_irqsave(&phba->hbalock, iflag);
525 spin_lock(&qp->abts_io_buf_list_lock);
526 list_for_each_entry_safe(psb, next_psb,
527 &qp->lpfc_abts_io_buf_list, list) {
528 if (psb->cur_iocbq.sli4_xritag == xri) {
529 list_del_init(&psb->list);
530 psb->flags &= ~LPFC_SBUF_XBUSY;
531 psb->status = IOSTAT_SUCCESS;
532 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
533 qp->abts_nvme_io_bufs--;
534 spin_unlock(&qp->abts_io_buf_list_lock);
535 spin_unlock_irqrestore(&phba->hbalock, iflag);
536 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
537 return;
539 qp->abts_scsi_io_bufs--;
540 spin_unlock(&qp->abts_io_buf_list_lock);
542 if (psb->rdata && psb->rdata->pnode)
543 ndlp = psb->rdata->pnode;
544 else
545 ndlp = NULL;
547 rrq_empty = list_empty(&phba->active_rrq_list);
548 spin_unlock_irqrestore(&phba->hbalock, iflag);
549 if (ndlp) {
550 lpfc_set_rrq_active(phba, ndlp,
551 psb->cur_iocbq.sli4_lxritag, rxid, 1);
552 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
554 lpfc_release_scsi_buf_s4(phba, psb);
555 if (rrq_empty)
556 lpfc_worker_wake_up(phba);
557 return;
560 spin_unlock(&qp->abts_io_buf_list_lock);
561 for (i = 1; i <= phba->sli.last_iotag; i++) {
562 iocbq = phba->sli.iocbq_lookup[i];
564 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
565 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
566 continue;
567 if (iocbq->sli4_xritag != xri)
568 continue;
569 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
570 psb->flags &= ~LPFC_SBUF_XBUSY;
571 spin_unlock_irqrestore(&phba->hbalock, iflag);
572 if (!list_empty(&pring->txq))
573 lpfc_worker_wake_up(phba);
574 return;
577 spin_unlock_irqrestore(&phba->hbalock, iflag);
581 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
582 * @phba: The HBA for which this call is being executed.
583 * @ndlp: pointer to a node-list data structure.
584 * @cmnd: Pointer to scsi_cmnd data structure.
586 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
587 * and returns to caller.
589 * Return codes:
590 * NULL - Error
591 * Pointer to lpfc_scsi_buf - Success
593 static struct lpfc_io_buf *
594 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
595 struct scsi_cmnd *cmnd)
597 struct lpfc_io_buf *lpfc_cmd = NULL;
598 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
599 unsigned long iflag = 0;
601 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
602 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
603 list);
604 if (!lpfc_cmd) {
605 spin_lock(&phba->scsi_buf_list_put_lock);
606 list_splice(&phba->lpfc_scsi_buf_list_put,
607 &phba->lpfc_scsi_buf_list_get);
608 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
609 list_remove_head(scsi_buf_list_get, lpfc_cmd,
610 struct lpfc_io_buf, list);
611 spin_unlock(&phba->scsi_buf_list_put_lock);
613 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
615 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
616 atomic_inc(&ndlp->cmd_pending);
617 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
619 return lpfc_cmd;
622 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
623 * @phba: The HBA for which this call is being executed.
624 * @ndlp: pointer to a node-list data structure.
625 * @cmnd: Pointer to scsi_cmnd data structure.
627 * This routine removes a scsi buffer from head of @hdwq io_buf_list
628 * and returns to caller.
630 * Return codes:
631 * NULL - Error
632 * Pointer to lpfc_scsi_buf - Success
634 static struct lpfc_io_buf *
635 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
636 struct scsi_cmnd *cmnd)
638 struct lpfc_io_buf *lpfc_cmd;
639 struct lpfc_sli4_hdw_queue *qp;
640 struct sli4_sge *sgl;
641 dma_addr_t pdma_phys_fcp_rsp;
642 dma_addr_t pdma_phys_fcp_cmd;
643 uint32_t cpu, idx;
644 int tag;
645 struct fcp_cmd_rsp_buf *tmp = NULL;
647 cpu = raw_smp_processor_id();
648 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
649 tag = blk_mq_unique_tag(cmnd->request);
650 idx = blk_mq_unique_tag_to_hwq(tag);
651 } else {
652 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
655 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
656 !phba->cfg_xri_rebalancing);
657 if (!lpfc_cmd) {
658 qp = &phba->sli4_hba.hdwq[idx];
659 qp->empty_io_bufs++;
660 return NULL;
663 /* Setup key fields in buffer that may have been changed
664 * if other protocols used this buffer.
666 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
667 lpfc_cmd->prot_seg_cnt = 0;
668 lpfc_cmd->seg_cnt = 0;
669 lpfc_cmd->timeout = 0;
670 lpfc_cmd->flags = 0;
671 lpfc_cmd->start_time = jiffies;
672 lpfc_cmd->waitq = NULL;
673 lpfc_cmd->cpu = cpu;
674 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
675 lpfc_cmd->prot_data_type = 0;
676 #endif
677 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
678 if (!tmp) {
679 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
680 return NULL;
683 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
684 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
687 * The first two SGEs are the FCP_CMD and FCP_RSP.
688 * The balance are sg list bdes. Initialize the
689 * first two and leave the rest for queuecommand.
691 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
692 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
693 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
694 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
695 sgl->word2 = le32_to_cpu(sgl->word2);
696 bf_set(lpfc_sli4_sge_last, sgl, 0);
697 sgl->word2 = cpu_to_le32(sgl->word2);
698 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
699 sgl++;
701 /* Setup the physical region for the FCP RSP */
702 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
703 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
704 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
705 sgl->word2 = le32_to_cpu(sgl->word2);
706 bf_set(lpfc_sli4_sge_last, sgl, 1);
707 sgl->word2 = cpu_to_le32(sgl->word2);
708 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
710 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
711 atomic_inc(&ndlp->cmd_pending);
712 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
714 return lpfc_cmd;
717 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
718 * @phba: The HBA for which this call is being executed.
719 * @ndlp: pointer to a node-list data structure.
720 * @cmnd: Pointer to scsi_cmnd data structure.
722 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
723 * and returns to caller.
725 * Return codes:
726 * NULL - Error
727 * Pointer to lpfc_scsi_buf - Success
729 static struct lpfc_io_buf*
730 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
731 struct scsi_cmnd *cmnd)
733 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
737 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
738 * @phba: The Hba for which this call is being executed.
739 * @psb: The scsi buffer which is being released.
741 * This routine releases @psb scsi buffer by adding it to tail of @phba
742 * lpfc_scsi_buf_list list.
744 static void
745 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
747 unsigned long iflag = 0;
749 psb->seg_cnt = 0;
750 psb->prot_seg_cnt = 0;
752 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
753 psb->pCmd = NULL;
754 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
755 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
756 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
760 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
761 * @phba: The Hba for which this call is being executed.
762 * @psb: The scsi buffer which is being released.
764 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
765 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
766 * and cannot be reused for at least RA_TOV amount of time if it was
767 * aborted.
769 static void
770 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
772 struct lpfc_sli4_hdw_queue *qp;
773 unsigned long iflag = 0;
775 psb->seg_cnt = 0;
776 psb->prot_seg_cnt = 0;
778 qp = psb->hdwq;
779 if (psb->flags & LPFC_SBUF_XBUSY) {
780 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
781 psb->pCmd = NULL;
782 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
783 qp->abts_scsi_io_bufs++;
784 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
785 } else {
786 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
791 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
792 * @phba: The Hba for which this call is being executed.
793 * @psb: The scsi buffer which is being released.
795 * This routine releases @psb scsi buffer by adding it to tail of @phba
796 * lpfc_scsi_buf_list list.
798 static void
799 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
801 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
802 atomic_dec(&psb->ndlp->cmd_pending);
804 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
805 phba->lpfc_release_scsi_buf(phba, psb);
809 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
810 * @data: A pointer to the immediate command data portion of the IOCB.
811 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
813 * The routine copies the entire FCP command from @fcp_cmnd to @data while
814 * byte swapping the data to big endian format for transmission on the wire.
816 static void
817 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
819 int i, j;
821 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
822 i += sizeof(uint32_t), j++) {
823 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
828 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
829 * @phba: The Hba for which this call is being executed.
830 * @lpfc_cmd: The scsi buffer which is going to be mapped.
832 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
833 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
834 * through sg elements and format the bde. This routine also initializes all
835 * IOCB fields which are dependent on scsi command request buffer.
837 * Return codes:
838 * 1 - Error
839 * 0 - Success
841 static int
842 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
844 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
845 struct scatterlist *sgel = NULL;
846 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
847 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
848 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
849 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
850 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
851 dma_addr_t physaddr;
852 uint32_t num_bde = 0;
853 int nseg, datadir = scsi_cmnd->sc_data_direction;
856 * There are three possibilities here - use scatter-gather segment, use
857 * the single mapping, or neither. Start the lpfc command prep by
858 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
859 * data bde entry.
861 bpl += 2;
862 if (scsi_sg_count(scsi_cmnd)) {
864 * The driver stores the segment count returned from pci_map_sg
865 * because this a count of dma-mappings used to map the use_sg
866 * pages. They are not guaranteed to be the same for those
867 * architectures that implement an IOMMU.
870 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
871 scsi_sg_count(scsi_cmnd), datadir);
872 if (unlikely(!nseg))
873 return 1;
875 lpfc_cmd->seg_cnt = nseg;
876 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
877 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
878 "9064 BLKGRD: %s: Too many sg segments"
879 " from dma_map_sg. Config %d, seg_cnt"
880 " %d\n", __func__, phba->cfg_sg_seg_cnt,
881 lpfc_cmd->seg_cnt);
882 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
883 lpfc_cmd->seg_cnt = 0;
884 scsi_dma_unmap(scsi_cmnd);
885 return 2;
889 * The driver established a maximum scatter-gather segment count
890 * during probe that limits the number of sg elements in any
891 * single scsi command. Just run through the seg_cnt and format
892 * the bde's.
893 * When using SLI-3 the driver will try to fit all the BDEs into
894 * the IOCB. If it can't then the BDEs get added to a BPL as it
895 * does for SLI-2 mode.
897 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
898 physaddr = sg_dma_address(sgel);
899 if (phba->sli_rev == 3 &&
900 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
901 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
902 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
903 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
904 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
905 data_bde->addrLow = putPaddrLow(physaddr);
906 data_bde->addrHigh = putPaddrHigh(physaddr);
907 data_bde++;
908 } else {
909 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
910 bpl->tus.f.bdeSize = sg_dma_len(sgel);
911 bpl->tus.w = le32_to_cpu(bpl->tus.w);
912 bpl->addrLow =
913 le32_to_cpu(putPaddrLow(physaddr));
914 bpl->addrHigh =
915 le32_to_cpu(putPaddrHigh(physaddr));
916 bpl++;
922 * Finish initializing those IOCB fields that are dependent on the
923 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
924 * explicitly reinitialized and for SLI-3 the extended bde count is
925 * explicitly reinitialized since all iocb memory resources are reused.
927 if (phba->sli_rev == 3 &&
928 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
929 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
930 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
932 * The extended IOCB format can only fit 3 BDE or a BPL.
933 * This I/O has more than 3 BDE so the 1st data bde will
934 * be a BPL that is filled in here.
936 physaddr = lpfc_cmd->dma_handle;
937 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
938 data_bde->tus.f.bdeSize = (num_bde *
939 sizeof(struct ulp_bde64));
940 physaddr += (sizeof(struct fcp_cmnd) +
941 sizeof(struct fcp_rsp) +
942 (2 * sizeof(struct ulp_bde64)));
943 data_bde->addrHigh = putPaddrHigh(physaddr);
944 data_bde->addrLow = putPaddrLow(physaddr);
945 /* ebde count includes the response bde and data bpl */
946 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
947 } else {
948 /* ebde count includes the response bde and data bdes */
949 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
951 } else {
952 iocb_cmd->un.fcpi64.bdl.bdeSize =
953 ((num_bde + 2) * sizeof(struct ulp_bde64));
954 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
956 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
959 * Due to difference in data length between DIF/non-DIF paths,
960 * we need to set word 4 of IOCB here
962 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
963 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
964 return 0;
967 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
969 /* Return BG_ERR_INIT if error injection is detected by Initiator */
970 #define BG_ERR_INIT 0x1
971 /* Return BG_ERR_TGT if error injection is detected by Target */
972 #define BG_ERR_TGT 0x2
973 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
974 #define BG_ERR_SWAP 0x10
976 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
977 * error injection
979 #define BG_ERR_CHECK 0x20
982 * lpfc_bg_err_inject - Determine if we should inject an error
983 * @phba: The Hba for which this call is being executed.
984 * @sc: The SCSI command to examine
985 * @reftag: (out) BlockGuard reference tag for transmitted data
986 * @apptag: (out) BlockGuard application tag for transmitted data
987 * @new_guard: (in) Value to replace CRC with if needed
989 * Returns BG_ERR_* bit mask or 0 if request ignored
991 static int
992 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
993 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
995 struct scatterlist *sgpe; /* s/g prot entry */
996 struct lpfc_io_buf *lpfc_cmd = NULL;
997 struct scsi_dif_tuple *src = NULL;
998 struct lpfc_nodelist *ndlp;
999 struct lpfc_rport_data *rdata;
1000 uint32_t op = scsi_get_prot_op(sc);
1001 uint32_t blksize;
1002 uint32_t numblks;
1003 sector_t lba;
1004 int rc = 0;
1005 int blockoff = 0;
1007 if (op == SCSI_PROT_NORMAL)
1008 return 0;
1010 sgpe = scsi_prot_sglist(sc);
1011 lba = scsi_get_lba(sc);
1013 /* First check if we need to match the LBA */
1014 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1015 blksize = lpfc_cmd_blksize(sc);
1016 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1018 /* Make sure we have the right LBA if one is specified */
1019 if ((phba->lpfc_injerr_lba < lba) ||
1020 (phba->lpfc_injerr_lba >= (lba + numblks)))
1021 return 0;
1022 if (sgpe) {
1023 blockoff = phba->lpfc_injerr_lba - lba;
1024 numblks = sg_dma_len(sgpe) /
1025 sizeof(struct scsi_dif_tuple);
1026 if (numblks < blockoff)
1027 blockoff = numblks;
1031 /* Next check if we need to match the remote NPortID or WWPN */
1032 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1033 if (rdata && rdata->pnode) {
1034 ndlp = rdata->pnode;
1036 /* Make sure we have the right NPortID if one is specified */
1037 if (phba->lpfc_injerr_nportid &&
1038 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1039 return 0;
1042 * Make sure we have the right WWPN if one is specified.
1043 * wwn[0] should be a non-zero NAA in a good WWPN.
1045 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1046 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1047 sizeof(struct lpfc_name)) != 0))
1048 return 0;
1051 /* Setup a ptr to the protection data if the SCSI host provides it */
1052 if (sgpe) {
1053 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1054 src += blockoff;
1055 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1058 /* Should we change the Reference Tag */
1059 if (reftag) {
1060 if (phba->lpfc_injerr_wref_cnt) {
1061 switch (op) {
1062 case SCSI_PROT_WRITE_PASS:
1063 if (src) {
1065 * For WRITE_PASS, force the error
1066 * to be sent on the wire. It should
1067 * be detected by the Target.
1068 * If blockoff != 0 error will be
1069 * inserted in middle of the IO.
1072 lpfc_printf_log(phba, KERN_ERR,
1073 LOG_TRACE_EVENT,
1074 "9076 BLKGRD: Injecting reftag error: "
1075 "write lba x%lx + x%x oldrefTag x%x\n",
1076 (unsigned long)lba, blockoff,
1077 be32_to_cpu(src->ref_tag));
1080 * Save the old ref_tag so we can
1081 * restore it on completion.
1083 if (lpfc_cmd) {
1084 lpfc_cmd->prot_data_type =
1085 LPFC_INJERR_REFTAG;
1086 lpfc_cmd->prot_data_segment =
1087 src;
1088 lpfc_cmd->prot_data =
1089 src->ref_tag;
1091 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1092 phba->lpfc_injerr_wref_cnt--;
1093 if (phba->lpfc_injerr_wref_cnt == 0) {
1094 phba->lpfc_injerr_nportid = 0;
1095 phba->lpfc_injerr_lba =
1096 LPFC_INJERR_LBA_OFF;
1097 memset(&phba->lpfc_injerr_wwpn,
1098 0, sizeof(struct lpfc_name));
1100 rc = BG_ERR_TGT | BG_ERR_CHECK;
1102 break;
1104 fallthrough;
1105 case SCSI_PROT_WRITE_INSERT:
1107 * For WRITE_INSERT, force the error
1108 * to be sent on the wire. It should be
1109 * detected by the Target.
1111 /* DEADBEEF will be the reftag on the wire */
1112 *reftag = 0xDEADBEEF;
1113 phba->lpfc_injerr_wref_cnt--;
1114 if (phba->lpfc_injerr_wref_cnt == 0) {
1115 phba->lpfc_injerr_nportid = 0;
1116 phba->lpfc_injerr_lba =
1117 LPFC_INJERR_LBA_OFF;
1118 memset(&phba->lpfc_injerr_wwpn,
1119 0, sizeof(struct lpfc_name));
1121 rc = BG_ERR_TGT | BG_ERR_CHECK;
1123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1124 "9078 BLKGRD: Injecting reftag error: "
1125 "write lba x%lx\n", (unsigned long)lba);
1126 break;
1127 case SCSI_PROT_WRITE_STRIP:
1129 * For WRITE_STRIP and WRITE_PASS,
1130 * force the error on data
1131 * being copied from SLI-Host to SLI-Port.
1133 *reftag = 0xDEADBEEF;
1134 phba->lpfc_injerr_wref_cnt--;
1135 if (phba->lpfc_injerr_wref_cnt == 0) {
1136 phba->lpfc_injerr_nportid = 0;
1137 phba->lpfc_injerr_lba =
1138 LPFC_INJERR_LBA_OFF;
1139 memset(&phba->lpfc_injerr_wwpn,
1140 0, sizeof(struct lpfc_name));
1142 rc = BG_ERR_INIT;
1144 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1145 "9077 BLKGRD: Injecting reftag error: "
1146 "write lba x%lx\n", (unsigned long)lba);
1147 break;
1150 if (phba->lpfc_injerr_rref_cnt) {
1151 switch (op) {
1152 case SCSI_PROT_READ_INSERT:
1153 case SCSI_PROT_READ_STRIP:
1154 case SCSI_PROT_READ_PASS:
1156 * For READ_STRIP and READ_PASS, force the
1157 * error on data being read off the wire. It
1158 * should force an IO error to the driver.
1160 *reftag = 0xDEADBEEF;
1161 phba->lpfc_injerr_rref_cnt--;
1162 if (phba->lpfc_injerr_rref_cnt == 0) {
1163 phba->lpfc_injerr_nportid = 0;
1164 phba->lpfc_injerr_lba =
1165 LPFC_INJERR_LBA_OFF;
1166 memset(&phba->lpfc_injerr_wwpn,
1167 0, sizeof(struct lpfc_name));
1169 rc = BG_ERR_INIT;
1171 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1172 "9079 BLKGRD: Injecting reftag error: "
1173 "read lba x%lx\n", (unsigned long)lba);
1174 break;
1179 /* Should we change the Application Tag */
1180 if (apptag) {
1181 if (phba->lpfc_injerr_wapp_cnt) {
1182 switch (op) {
1183 case SCSI_PROT_WRITE_PASS:
1184 if (src) {
1186 * For WRITE_PASS, force the error
1187 * to be sent on the wire. It should
1188 * be detected by the Target.
1189 * If blockoff != 0 error will be
1190 * inserted in middle of the IO.
1193 lpfc_printf_log(phba, KERN_ERR,
1194 LOG_TRACE_EVENT,
1195 "9080 BLKGRD: Injecting apptag error: "
1196 "write lba x%lx + x%x oldappTag x%x\n",
1197 (unsigned long)lba, blockoff,
1198 be16_to_cpu(src->app_tag));
1201 * Save the old app_tag so we can
1202 * restore it on completion.
1204 if (lpfc_cmd) {
1205 lpfc_cmd->prot_data_type =
1206 LPFC_INJERR_APPTAG;
1207 lpfc_cmd->prot_data_segment =
1208 src;
1209 lpfc_cmd->prot_data =
1210 src->app_tag;
1212 src->app_tag = cpu_to_be16(0xDEAD);
1213 phba->lpfc_injerr_wapp_cnt--;
1214 if (phba->lpfc_injerr_wapp_cnt == 0) {
1215 phba->lpfc_injerr_nportid = 0;
1216 phba->lpfc_injerr_lba =
1217 LPFC_INJERR_LBA_OFF;
1218 memset(&phba->lpfc_injerr_wwpn,
1219 0, sizeof(struct lpfc_name));
1221 rc = BG_ERR_TGT | BG_ERR_CHECK;
1222 break;
1224 fallthrough;
1225 case SCSI_PROT_WRITE_INSERT:
1227 * For WRITE_INSERT, force the
1228 * error to be sent on the wire. It should be
1229 * detected by the Target.
1231 /* DEAD will be the apptag on the wire */
1232 *apptag = 0xDEAD;
1233 phba->lpfc_injerr_wapp_cnt--;
1234 if (phba->lpfc_injerr_wapp_cnt == 0) {
1235 phba->lpfc_injerr_nportid = 0;
1236 phba->lpfc_injerr_lba =
1237 LPFC_INJERR_LBA_OFF;
1238 memset(&phba->lpfc_injerr_wwpn,
1239 0, sizeof(struct lpfc_name));
1241 rc = BG_ERR_TGT | BG_ERR_CHECK;
1243 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1244 "0813 BLKGRD: Injecting apptag error: "
1245 "write lba x%lx\n", (unsigned long)lba);
1246 break;
1247 case SCSI_PROT_WRITE_STRIP:
1249 * For WRITE_STRIP and WRITE_PASS,
1250 * force the error on data
1251 * being copied from SLI-Host to SLI-Port.
1253 *apptag = 0xDEAD;
1254 phba->lpfc_injerr_wapp_cnt--;
1255 if (phba->lpfc_injerr_wapp_cnt == 0) {
1256 phba->lpfc_injerr_nportid = 0;
1257 phba->lpfc_injerr_lba =
1258 LPFC_INJERR_LBA_OFF;
1259 memset(&phba->lpfc_injerr_wwpn,
1260 0, sizeof(struct lpfc_name));
1262 rc = BG_ERR_INIT;
1264 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1265 "0812 BLKGRD: Injecting apptag error: "
1266 "write lba x%lx\n", (unsigned long)lba);
1267 break;
1270 if (phba->lpfc_injerr_rapp_cnt) {
1271 switch (op) {
1272 case SCSI_PROT_READ_INSERT:
1273 case SCSI_PROT_READ_STRIP:
1274 case SCSI_PROT_READ_PASS:
1276 * For READ_STRIP and READ_PASS, force the
1277 * error on data being read off the wire. It
1278 * should force an IO error to the driver.
1280 *apptag = 0xDEAD;
1281 phba->lpfc_injerr_rapp_cnt--;
1282 if (phba->lpfc_injerr_rapp_cnt == 0) {
1283 phba->lpfc_injerr_nportid = 0;
1284 phba->lpfc_injerr_lba =
1285 LPFC_INJERR_LBA_OFF;
1286 memset(&phba->lpfc_injerr_wwpn,
1287 0, sizeof(struct lpfc_name));
1289 rc = BG_ERR_INIT;
1291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1292 "0814 BLKGRD: Injecting apptag error: "
1293 "read lba x%lx\n", (unsigned long)lba);
1294 break;
1300 /* Should we change the Guard Tag */
1301 if (new_guard) {
1302 if (phba->lpfc_injerr_wgrd_cnt) {
1303 switch (op) {
1304 case SCSI_PROT_WRITE_PASS:
1305 rc = BG_ERR_CHECK;
1306 fallthrough;
1308 case SCSI_PROT_WRITE_INSERT:
1310 * For WRITE_INSERT, force the
1311 * error to be sent on the wire. It should be
1312 * detected by the Target.
1314 phba->lpfc_injerr_wgrd_cnt--;
1315 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1316 phba->lpfc_injerr_nportid = 0;
1317 phba->lpfc_injerr_lba =
1318 LPFC_INJERR_LBA_OFF;
1319 memset(&phba->lpfc_injerr_wwpn,
1320 0, sizeof(struct lpfc_name));
1323 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1324 /* Signals the caller to swap CRC->CSUM */
1326 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1327 "0817 BLKGRD: Injecting guard error: "
1328 "write lba x%lx\n", (unsigned long)lba);
1329 break;
1330 case SCSI_PROT_WRITE_STRIP:
1332 * For WRITE_STRIP and WRITE_PASS,
1333 * force the error on data
1334 * being copied from SLI-Host to SLI-Port.
1336 phba->lpfc_injerr_wgrd_cnt--;
1337 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1338 phba->lpfc_injerr_nportid = 0;
1339 phba->lpfc_injerr_lba =
1340 LPFC_INJERR_LBA_OFF;
1341 memset(&phba->lpfc_injerr_wwpn,
1342 0, sizeof(struct lpfc_name));
1345 rc = BG_ERR_INIT | BG_ERR_SWAP;
1346 /* Signals the caller to swap CRC->CSUM */
1348 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1349 "0816 BLKGRD: Injecting guard error: "
1350 "write lba x%lx\n", (unsigned long)lba);
1351 break;
1354 if (phba->lpfc_injerr_rgrd_cnt) {
1355 switch (op) {
1356 case SCSI_PROT_READ_INSERT:
1357 case SCSI_PROT_READ_STRIP:
1358 case SCSI_PROT_READ_PASS:
1360 * For READ_STRIP and READ_PASS, force the
1361 * error on data being read off the wire. It
1362 * should force an IO error to the driver.
1364 phba->lpfc_injerr_rgrd_cnt--;
1365 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1366 phba->lpfc_injerr_nportid = 0;
1367 phba->lpfc_injerr_lba =
1368 LPFC_INJERR_LBA_OFF;
1369 memset(&phba->lpfc_injerr_wwpn,
1370 0, sizeof(struct lpfc_name));
1373 rc = BG_ERR_INIT | BG_ERR_SWAP;
1374 /* Signals the caller to swap CRC->CSUM */
1376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1377 "0818 BLKGRD: Injecting guard error: "
1378 "read lba x%lx\n", (unsigned long)lba);
1383 return rc;
1385 #endif
1388 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1389 * the specified SCSI command.
1390 * @phba: The Hba for which this call is being executed.
1391 * @sc: The SCSI command to examine
1392 * @txop: (out) BlockGuard operation for transmitted data
1393 * @rxop: (out) BlockGuard operation for received data
1395 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1398 static int
1399 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1400 uint8_t *txop, uint8_t *rxop)
1402 uint8_t ret = 0;
1404 if (lpfc_cmd_guard_csum(sc)) {
1405 switch (scsi_get_prot_op(sc)) {
1406 case SCSI_PROT_READ_INSERT:
1407 case SCSI_PROT_WRITE_STRIP:
1408 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1409 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1410 break;
1412 case SCSI_PROT_READ_STRIP:
1413 case SCSI_PROT_WRITE_INSERT:
1414 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1415 *txop = BG_OP_IN_NODIF_OUT_CRC;
1416 break;
1418 case SCSI_PROT_READ_PASS:
1419 case SCSI_PROT_WRITE_PASS:
1420 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1421 *txop = BG_OP_IN_CSUM_OUT_CRC;
1422 break;
1424 case SCSI_PROT_NORMAL:
1425 default:
1426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1427 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1428 scsi_get_prot_op(sc));
1429 ret = 1;
1430 break;
1433 } else {
1434 switch (scsi_get_prot_op(sc)) {
1435 case SCSI_PROT_READ_STRIP:
1436 case SCSI_PROT_WRITE_INSERT:
1437 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1438 *txop = BG_OP_IN_NODIF_OUT_CRC;
1439 break;
1441 case SCSI_PROT_READ_PASS:
1442 case SCSI_PROT_WRITE_PASS:
1443 *rxop = BG_OP_IN_CRC_OUT_CRC;
1444 *txop = BG_OP_IN_CRC_OUT_CRC;
1445 break;
1447 case SCSI_PROT_READ_INSERT:
1448 case SCSI_PROT_WRITE_STRIP:
1449 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1450 *txop = BG_OP_IN_CRC_OUT_NODIF;
1451 break;
1453 case SCSI_PROT_NORMAL:
1454 default:
1455 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1456 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1457 scsi_get_prot_op(sc));
1458 ret = 1;
1459 break;
1463 return ret;
1466 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1468 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1469 * the specified SCSI command in order to force a guard tag error.
1470 * @phba: The Hba for which this call is being executed.
1471 * @sc: The SCSI command to examine
1472 * @txop: (out) BlockGuard operation for transmitted data
1473 * @rxop: (out) BlockGuard operation for received data
1475 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1478 static int
1479 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1480 uint8_t *txop, uint8_t *rxop)
1482 uint8_t ret = 0;
1484 if (lpfc_cmd_guard_csum(sc)) {
1485 switch (scsi_get_prot_op(sc)) {
1486 case SCSI_PROT_READ_INSERT:
1487 case SCSI_PROT_WRITE_STRIP:
1488 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1489 *txop = BG_OP_IN_CRC_OUT_NODIF;
1490 break;
1492 case SCSI_PROT_READ_STRIP:
1493 case SCSI_PROT_WRITE_INSERT:
1494 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1495 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1496 break;
1498 case SCSI_PROT_READ_PASS:
1499 case SCSI_PROT_WRITE_PASS:
1500 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1501 *txop = BG_OP_IN_CRC_OUT_CSUM;
1502 break;
1504 case SCSI_PROT_NORMAL:
1505 default:
1506 break;
1509 } else {
1510 switch (scsi_get_prot_op(sc)) {
1511 case SCSI_PROT_READ_STRIP:
1512 case SCSI_PROT_WRITE_INSERT:
1513 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1514 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1515 break;
1517 case SCSI_PROT_READ_PASS:
1518 case SCSI_PROT_WRITE_PASS:
1519 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1520 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1521 break;
1523 case SCSI_PROT_READ_INSERT:
1524 case SCSI_PROT_WRITE_STRIP:
1525 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1526 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1527 break;
1529 case SCSI_PROT_NORMAL:
1530 default:
1531 break;
1535 return ret;
1537 #endif
1540 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1541 * @phba: The Hba for which this call is being executed.
1542 * @sc: pointer to scsi command we're working on
1543 * @bpl: pointer to buffer list for protection groups
1544 * @datasegcnt: number of segments of data that have been dma mapped
1546 * This function sets up BPL buffer list for protection groups of
1547 * type LPFC_PG_TYPE_NO_DIF
1549 * This is usually used when the HBA is instructed to generate
1550 * DIFs and insert them into data stream (or strip DIF from
1551 * incoming data stream)
1553 * The buffer list consists of just one protection group described
1554 * below:
1555 * +-------------------------+
1556 * start of prot group --> | PDE_5 |
1557 * +-------------------------+
1558 * | PDE_6 |
1559 * +-------------------------+
1560 * | Data BDE |
1561 * +-------------------------+
1562 * |more Data BDE's ... (opt)|
1563 * +-------------------------+
1566 * Note: Data s/g buffers have been dma mapped
1568 * Returns the number of BDEs added to the BPL.
1570 static int
1571 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1572 struct ulp_bde64 *bpl, int datasegcnt)
1574 struct scatterlist *sgde = NULL; /* s/g data entry */
1575 struct lpfc_pde5 *pde5 = NULL;
1576 struct lpfc_pde6 *pde6 = NULL;
1577 dma_addr_t physaddr;
1578 int i = 0, num_bde = 0, status;
1579 int datadir = sc->sc_data_direction;
1580 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1581 uint32_t rc;
1582 #endif
1583 uint32_t checking = 1;
1584 uint32_t reftag;
1585 uint8_t txop, rxop;
1587 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1588 if (status)
1589 goto out;
1591 /* extract some info from the scsi command for pde*/
1592 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1594 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1595 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1596 if (rc) {
1597 if (rc & BG_ERR_SWAP)
1598 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1599 if (rc & BG_ERR_CHECK)
1600 checking = 0;
1602 #endif
1604 /* setup PDE5 with what we have */
1605 pde5 = (struct lpfc_pde5 *) bpl;
1606 memset(pde5, 0, sizeof(struct lpfc_pde5));
1607 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1609 /* Endianness conversion if necessary for PDE5 */
1610 pde5->word0 = cpu_to_le32(pde5->word0);
1611 pde5->reftag = cpu_to_le32(reftag);
1613 /* advance bpl and increment bde count */
1614 num_bde++;
1615 bpl++;
1616 pde6 = (struct lpfc_pde6 *) bpl;
1618 /* setup PDE6 with the rest of the info */
1619 memset(pde6, 0, sizeof(struct lpfc_pde6));
1620 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1621 bf_set(pde6_optx, pde6, txop);
1622 bf_set(pde6_oprx, pde6, rxop);
1625 * We only need to check the data on READs, for WRITEs
1626 * protection data is automatically generated, not checked.
1628 if (datadir == DMA_FROM_DEVICE) {
1629 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1630 bf_set(pde6_ce, pde6, checking);
1631 else
1632 bf_set(pde6_ce, pde6, 0);
1634 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1635 bf_set(pde6_re, pde6, checking);
1636 else
1637 bf_set(pde6_re, pde6, 0);
1639 bf_set(pde6_ai, pde6, 1);
1640 bf_set(pde6_ae, pde6, 0);
1641 bf_set(pde6_apptagval, pde6, 0);
1643 /* Endianness conversion if necessary for PDE6 */
1644 pde6->word0 = cpu_to_le32(pde6->word0);
1645 pde6->word1 = cpu_to_le32(pde6->word1);
1646 pde6->word2 = cpu_to_le32(pde6->word2);
1648 /* advance bpl and increment bde count */
1649 num_bde++;
1650 bpl++;
1652 /* assumption: caller has already run dma_map_sg on command data */
1653 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1654 physaddr = sg_dma_address(sgde);
1655 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1656 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1657 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1658 if (datadir == DMA_TO_DEVICE)
1659 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1660 else
1661 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1662 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1663 bpl++;
1664 num_bde++;
1667 out:
1668 return num_bde;
1672 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1673 * @phba: The Hba for which this call is being executed.
1674 * @sc: pointer to scsi command we're working on
1675 * @bpl: pointer to buffer list for protection groups
1676 * @datacnt: number of segments of data that have been dma mapped
1677 * @protcnt: number of segment of protection data that have been dma mapped
1679 * This function sets up BPL buffer list for protection groups of
1680 * type LPFC_PG_TYPE_DIF
1682 * This is usually used when DIFs are in their own buffers,
1683 * separate from the data. The HBA can then by instructed
1684 * to place the DIFs in the outgoing stream. For read operations,
1685 * The HBA could extract the DIFs and place it in DIF buffers.
1687 * The buffer list for this type consists of one or more of the
1688 * protection groups described below:
1689 * +-------------------------+
1690 * start of first prot group --> | PDE_5 |
1691 * +-------------------------+
1692 * | PDE_6 |
1693 * +-------------------------+
1694 * | PDE_7 (Prot BDE) |
1695 * +-------------------------+
1696 * | Data BDE |
1697 * +-------------------------+
1698 * |more Data BDE's ... (opt)|
1699 * +-------------------------+
1700 * start of new prot group --> | PDE_5 |
1701 * +-------------------------+
1702 * | ... |
1703 * +-------------------------+
1705 * Note: It is assumed that both data and protection s/g buffers have been
1706 * mapped for DMA
1708 * Returns the number of BDEs added to the BPL.
1710 static int
1711 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1712 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1714 struct scatterlist *sgde = NULL; /* s/g data entry */
1715 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1716 struct lpfc_pde5 *pde5 = NULL;
1717 struct lpfc_pde6 *pde6 = NULL;
1718 struct lpfc_pde7 *pde7 = NULL;
1719 dma_addr_t dataphysaddr, protphysaddr;
1720 unsigned short curr_data = 0, curr_prot = 0;
1721 unsigned int split_offset;
1722 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1723 unsigned int protgrp_blks, protgrp_bytes;
1724 unsigned int remainder, subtotal;
1725 int status;
1726 int datadir = sc->sc_data_direction;
1727 unsigned char pgdone = 0, alldone = 0;
1728 unsigned blksize;
1729 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1730 uint32_t rc;
1731 #endif
1732 uint32_t checking = 1;
1733 uint32_t reftag;
1734 uint8_t txop, rxop;
1735 int num_bde = 0;
1737 sgpe = scsi_prot_sglist(sc);
1738 sgde = scsi_sglist(sc);
1740 if (!sgpe || !sgde) {
1741 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1742 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1743 sgpe, sgde);
1744 return 0;
1747 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1748 if (status)
1749 goto out;
1751 /* extract some info from the scsi command */
1752 blksize = lpfc_cmd_blksize(sc);
1753 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1755 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1756 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1757 if (rc) {
1758 if (rc & BG_ERR_SWAP)
1759 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1760 if (rc & BG_ERR_CHECK)
1761 checking = 0;
1763 #endif
1765 split_offset = 0;
1766 do {
1767 /* Check to see if we ran out of space */
1768 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1769 return num_bde + 3;
1771 /* setup PDE5 with what we have */
1772 pde5 = (struct lpfc_pde5 *) bpl;
1773 memset(pde5, 0, sizeof(struct lpfc_pde5));
1774 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1776 /* Endianness conversion if necessary for PDE5 */
1777 pde5->word0 = cpu_to_le32(pde5->word0);
1778 pde5->reftag = cpu_to_le32(reftag);
1780 /* advance bpl and increment bde count */
1781 num_bde++;
1782 bpl++;
1783 pde6 = (struct lpfc_pde6 *) bpl;
1785 /* setup PDE6 with the rest of the info */
1786 memset(pde6, 0, sizeof(struct lpfc_pde6));
1787 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1788 bf_set(pde6_optx, pde6, txop);
1789 bf_set(pde6_oprx, pde6, rxop);
1791 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1792 bf_set(pde6_ce, pde6, checking);
1793 else
1794 bf_set(pde6_ce, pde6, 0);
1796 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1797 bf_set(pde6_re, pde6, checking);
1798 else
1799 bf_set(pde6_re, pde6, 0);
1801 bf_set(pde6_ai, pde6, 1);
1802 bf_set(pde6_ae, pde6, 0);
1803 bf_set(pde6_apptagval, pde6, 0);
1805 /* Endianness conversion if necessary for PDE6 */
1806 pde6->word0 = cpu_to_le32(pde6->word0);
1807 pde6->word1 = cpu_to_le32(pde6->word1);
1808 pde6->word2 = cpu_to_le32(pde6->word2);
1810 /* advance bpl and increment bde count */
1811 num_bde++;
1812 bpl++;
1814 /* setup the first BDE that points to protection buffer */
1815 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1816 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1818 /* must be integer multiple of the DIF block length */
1819 BUG_ON(protgroup_len % 8);
1821 pde7 = (struct lpfc_pde7 *) bpl;
1822 memset(pde7, 0, sizeof(struct lpfc_pde7));
1823 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1825 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1826 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1828 protgrp_blks = protgroup_len / 8;
1829 protgrp_bytes = protgrp_blks * blksize;
1831 /* check if this pde is crossing the 4K boundary; if so split */
1832 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1833 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1834 protgroup_offset += protgroup_remainder;
1835 protgrp_blks = protgroup_remainder / 8;
1836 protgrp_bytes = protgrp_blks * blksize;
1837 } else {
1838 protgroup_offset = 0;
1839 curr_prot++;
1842 num_bde++;
1844 /* setup BDE's for data blocks associated with DIF data */
1845 pgdone = 0;
1846 subtotal = 0; /* total bytes processed for current prot grp */
1847 while (!pgdone) {
1848 /* Check to see if we ran out of space */
1849 if (num_bde >= phba->cfg_total_seg_cnt)
1850 return num_bde + 1;
1852 if (!sgde) {
1853 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1854 "9065 BLKGRD:%s Invalid data segment\n",
1855 __func__);
1856 return 0;
1858 bpl++;
1859 dataphysaddr = sg_dma_address(sgde) + split_offset;
1860 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1861 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1863 remainder = sg_dma_len(sgde) - split_offset;
1865 if ((subtotal + remainder) <= protgrp_bytes) {
1866 /* we can use this whole buffer */
1867 bpl->tus.f.bdeSize = remainder;
1868 split_offset = 0;
1870 if ((subtotal + remainder) == protgrp_bytes)
1871 pgdone = 1;
1872 } else {
1873 /* must split this buffer with next prot grp */
1874 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1875 split_offset += bpl->tus.f.bdeSize;
1878 subtotal += bpl->tus.f.bdeSize;
1880 if (datadir == DMA_TO_DEVICE)
1881 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1882 else
1883 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1884 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1886 num_bde++;
1887 curr_data++;
1889 if (split_offset)
1890 break;
1892 /* Move to the next s/g segment if possible */
1893 sgde = sg_next(sgde);
1897 if (protgroup_offset) {
1898 /* update the reference tag */
1899 reftag += protgrp_blks;
1900 bpl++;
1901 continue;
1904 /* are we done ? */
1905 if (curr_prot == protcnt) {
1906 alldone = 1;
1907 } else if (curr_prot < protcnt) {
1908 /* advance to next prot buffer */
1909 sgpe = sg_next(sgpe);
1910 bpl++;
1912 /* update the reference tag */
1913 reftag += protgrp_blks;
1914 } else {
1915 /* if we're here, we have a bug */
1916 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1917 "9054 BLKGRD: bug in %s\n", __func__);
1920 } while (!alldone);
1921 out:
1923 return num_bde;
1927 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1928 * @phba: The Hba for which this call is being executed.
1929 * @sc: pointer to scsi command we're working on
1930 * @sgl: pointer to buffer list for protection groups
1931 * @datasegcnt: number of segments of data that have been dma mapped
1932 * @lpfc_cmd: lpfc scsi command object pointer.
1934 * This function sets up SGL buffer list for protection groups of
1935 * type LPFC_PG_TYPE_NO_DIF
1937 * This is usually used when the HBA is instructed to generate
1938 * DIFs and insert them into data stream (or strip DIF from
1939 * incoming data stream)
1941 * The buffer list consists of just one protection group described
1942 * below:
1943 * +-------------------------+
1944 * start of prot group --> | DI_SEED |
1945 * +-------------------------+
1946 * | Data SGE |
1947 * +-------------------------+
1948 * |more Data SGE's ... (opt)|
1949 * +-------------------------+
1952 * Note: Data s/g buffers have been dma mapped
1954 * Returns the number of SGEs added to the SGL.
1956 static int
1957 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1958 struct sli4_sge *sgl, int datasegcnt,
1959 struct lpfc_io_buf *lpfc_cmd)
1961 struct scatterlist *sgde = NULL; /* s/g data entry */
1962 struct sli4_sge_diseed *diseed = NULL;
1963 dma_addr_t physaddr;
1964 int i = 0, num_sge = 0, status;
1965 uint32_t reftag;
1966 uint8_t txop, rxop;
1967 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1968 uint32_t rc;
1969 #endif
1970 uint32_t checking = 1;
1971 uint32_t dma_len;
1972 uint32_t dma_offset = 0;
1973 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1974 int j;
1975 bool lsp_just_set = false;
1977 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1978 if (status)
1979 goto out;
1981 /* extract some info from the scsi command for pde*/
1982 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1984 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1985 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1986 if (rc) {
1987 if (rc & BG_ERR_SWAP)
1988 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1989 if (rc & BG_ERR_CHECK)
1990 checking = 0;
1992 #endif
1994 /* setup DISEED with what we have */
1995 diseed = (struct sli4_sge_diseed *) sgl;
1996 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
1997 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
1999 /* Endianness conversion if necessary */
2000 diseed->ref_tag = cpu_to_le32(reftag);
2001 diseed->ref_tag_tran = diseed->ref_tag;
2004 * We only need to check the data on READs, for WRITEs
2005 * protection data is automatically generated, not checked.
2007 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2008 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2009 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2010 else
2011 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2013 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2014 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2015 else
2016 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2019 /* setup DISEED with the rest of the info */
2020 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2021 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2023 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2024 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2026 /* Endianness conversion if necessary for DISEED */
2027 diseed->word2 = cpu_to_le32(diseed->word2);
2028 diseed->word3 = cpu_to_le32(diseed->word3);
2030 /* advance bpl and increment sge count */
2031 num_sge++;
2032 sgl++;
2034 /* assumption: caller has already run dma_map_sg on command data */
2035 sgde = scsi_sglist(sc);
2036 j = 3;
2037 for (i = 0; i < datasegcnt; i++) {
2038 /* clear it */
2039 sgl->word2 = 0;
2041 /* do we need to expand the segment */
2042 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2043 ((datasegcnt - 1) != i)) {
2044 /* set LSP type */
2045 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2047 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2049 if (unlikely(!sgl_xtra)) {
2050 lpfc_cmd->seg_cnt = 0;
2051 return 0;
2053 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2054 sgl_xtra->dma_phys_sgl));
2055 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2056 sgl_xtra->dma_phys_sgl));
2058 } else {
2059 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2062 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2063 if ((datasegcnt - 1) == i)
2064 bf_set(lpfc_sli4_sge_last, sgl, 1);
2065 physaddr = sg_dma_address(sgde);
2066 dma_len = sg_dma_len(sgde);
2067 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2068 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2070 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2071 sgl->word2 = cpu_to_le32(sgl->word2);
2072 sgl->sge_len = cpu_to_le32(dma_len);
2074 dma_offset += dma_len;
2075 sgde = sg_next(sgde);
2077 sgl++;
2078 num_sge++;
2079 lsp_just_set = false;
2081 } else {
2082 sgl->word2 = cpu_to_le32(sgl->word2);
2083 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2085 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2086 i = i - 1;
2088 lsp_just_set = true;
2091 j++;
2095 out:
2096 return num_sge;
2100 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2101 * @phba: The Hba for which this call is being executed.
2102 * @sc: pointer to scsi command we're working on
2103 * @sgl: pointer to buffer list for protection groups
2104 * @datacnt: number of segments of data that have been dma mapped
2105 * @protcnt: number of segment of protection data that have been dma mapped
2106 * @lpfc_cmd: lpfc scsi command object pointer.
2108 * This function sets up SGL buffer list for protection groups of
2109 * type LPFC_PG_TYPE_DIF
2111 * This is usually used when DIFs are in their own buffers,
2112 * separate from the data. The HBA can then by instructed
2113 * to place the DIFs in the outgoing stream. For read operations,
2114 * The HBA could extract the DIFs and place it in DIF buffers.
2116 * The buffer list for this type consists of one or more of the
2117 * protection groups described below:
2118 * +-------------------------+
2119 * start of first prot group --> | DISEED |
2120 * +-------------------------+
2121 * | DIF (Prot SGE) |
2122 * +-------------------------+
2123 * | Data SGE |
2124 * +-------------------------+
2125 * |more Data SGE's ... (opt)|
2126 * +-------------------------+
2127 * start of new prot group --> | DISEED |
2128 * +-------------------------+
2129 * | ... |
2130 * +-------------------------+
2132 * Note: It is assumed that both data and protection s/g buffers have been
2133 * mapped for DMA
2135 * Returns the number of SGEs added to the SGL.
2137 static int
2138 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2139 struct sli4_sge *sgl, int datacnt, int protcnt,
2140 struct lpfc_io_buf *lpfc_cmd)
2142 struct scatterlist *sgde = NULL; /* s/g data entry */
2143 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2144 struct sli4_sge_diseed *diseed = NULL;
2145 dma_addr_t dataphysaddr, protphysaddr;
2146 unsigned short curr_data = 0, curr_prot = 0;
2147 unsigned int split_offset;
2148 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2149 unsigned int protgrp_blks, protgrp_bytes;
2150 unsigned int remainder, subtotal;
2151 int status;
2152 unsigned char pgdone = 0, alldone = 0;
2153 unsigned blksize;
2154 uint32_t reftag;
2155 uint8_t txop, rxop;
2156 uint32_t dma_len;
2157 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2158 uint32_t rc;
2159 #endif
2160 uint32_t checking = 1;
2161 uint32_t dma_offset = 0;
2162 int num_sge = 0, j = 2;
2163 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2165 sgpe = scsi_prot_sglist(sc);
2166 sgde = scsi_sglist(sc);
2168 if (!sgpe || !sgde) {
2169 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2170 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2171 sgpe, sgde);
2172 return 0;
2175 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2176 if (status)
2177 goto out;
2179 /* extract some info from the scsi command */
2180 blksize = lpfc_cmd_blksize(sc);
2181 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2183 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2184 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2185 if (rc) {
2186 if (rc & BG_ERR_SWAP)
2187 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2188 if (rc & BG_ERR_CHECK)
2189 checking = 0;
2191 #endif
2193 split_offset = 0;
2194 do {
2195 /* Check to see if we ran out of space */
2196 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2197 !(phba->cfg_xpsgl))
2198 return num_sge + 3;
2200 /* DISEED and DIF have to be together */
2201 if (!((j + 1) % phba->border_sge_num) ||
2202 !((j + 2) % phba->border_sge_num) ||
2203 !((j + 3) % phba->border_sge_num)) {
2204 sgl->word2 = 0;
2206 /* set LSP type */
2207 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2209 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2211 if (unlikely(!sgl_xtra)) {
2212 goto out;
2213 } else {
2214 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2215 sgl_xtra->dma_phys_sgl));
2216 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2217 sgl_xtra->dma_phys_sgl));
2220 sgl->word2 = cpu_to_le32(sgl->word2);
2221 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2223 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2224 j = 0;
2227 /* setup DISEED with what we have */
2228 diseed = (struct sli4_sge_diseed *) sgl;
2229 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2230 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2232 /* Endianness conversion if necessary */
2233 diseed->ref_tag = cpu_to_le32(reftag);
2234 diseed->ref_tag_tran = diseed->ref_tag;
2236 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2237 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2239 } else {
2240 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2242 * When in this mode, the hardware will replace
2243 * the guard tag from the host with a
2244 * newly generated good CRC for the wire.
2245 * Switch to raw mode here to avoid this
2246 * behavior. What the host sends gets put on the wire.
2248 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2249 txop = BG_OP_RAW_MODE;
2250 rxop = BG_OP_RAW_MODE;
2255 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2256 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2257 else
2258 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2260 /* setup DISEED with the rest of the info */
2261 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2262 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2264 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2265 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2267 /* Endianness conversion if necessary for DISEED */
2268 diseed->word2 = cpu_to_le32(diseed->word2);
2269 diseed->word3 = cpu_to_le32(diseed->word3);
2271 /* advance sgl and increment bde count */
2272 num_sge++;
2274 sgl++;
2275 j++;
2277 /* setup the first BDE that points to protection buffer */
2278 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2279 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2281 /* must be integer multiple of the DIF block length */
2282 BUG_ON(protgroup_len % 8);
2284 /* Now setup DIF SGE */
2285 sgl->word2 = 0;
2286 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2287 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2288 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2289 sgl->word2 = cpu_to_le32(sgl->word2);
2290 sgl->sge_len = 0;
2292 protgrp_blks = protgroup_len / 8;
2293 protgrp_bytes = protgrp_blks * blksize;
2295 /* check if DIF SGE is crossing the 4K boundary; if so split */
2296 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2297 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2298 protgroup_offset += protgroup_remainder;
2299 protgrp_blks = protgroup_remainder / 8;
2300 protgrp_bytes = protgrp_blks * blksize;
2301 } else {
2302 protgroup_offset = 0;
2303 curr_prot++;
2306 num_sge++;
2308 /* setup SGE's for data blocks associated with DIF data */
2309 pgdone = 0;
2310 subtotal = 0; /* total bytes processed for current prot grp */
2312 sgl++;
2313 j++;
2315 while (!pgdone) {
2316 /* Check to see if we ran out of space */
2317 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2318 !phba->cfg_xpsgl)
2319 return num_sge + 1;
2321 if (!sgde) {
2322 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2323 "9086 BLKGRD:%s Invalid data segment\n",
2324 __func__);
2325 return 0;
2328 if (!((j + 1) % phba->border_sge_num)) {
2329 sgl->word2 = 0;
2331 /* set LSP type */
2332 bf_set(lpfc_sli4_sge_type, sgl,
2333 LPFC_SGE_TYPE_LSP);
2335 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2336 lpfc_cmd);
2338 if (unlikely(!sgl_xtra)) {
2339 goto out;
2340 } else {
2341 sgl->addr_lo = cpu_to_le32(
2342 putPaddrLow(sgl_xtra->dma_phys_sgl));
2343 sgl->addr_hi = cpu_to_le32(
2344 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2347 sgl->word2 = cpu_to_le32(sgl->word2);
2348 sgl->sge_len = cpu_to_le32(
2349 phba->cfg_sg_dma_buf_size);
2351 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2352 } else {
2353 dataphysaddr = sg_dma_address(sgde) +
2354 split_offset;
2356 remainder = sg_dma_len(sgde) - split_offset;
2358 if ((subtotal + remainder) <= protgrp_bytes) {
2359 /* we can use this whole buffer */
2360 dma_len = remainder;
2361 split_offset = 0;
2363 if ((subtotal + remainder) ==
2364 protgrp_bytes)
2365 pgdone = 1;
2366 } else {
2367 /* must split this buffer with next
2368 * prot grp
2370 dma_len = protgrp_bytes - subtotal;
2371 split_offset += dma_len;
2374 subtotal += dma_len;
2376 sgl->word2 = 0;
2377 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2378 dataphysaddr));
2379 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2380 dataphysaddr));
2381 bf_set(lpfc_sli4_sge_last, sgl, 0);
2382 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2383 bf_set(lpfc_sli4_sge_type, sgl,
2384 LPFC_SGE_TYPE_DATA);
2386 sgl->sge_len = cpu_to_le32(dma_len);
2387 dma_offset += dma_len;
2389 num_sge++;
2390 curr_data++;
2392 if (split_offset) {
2393 sgl++;
2394 j++;
2395 break;
2398 /* Move to the next s/g segment if possible */
2399 sgde = sg_next(sgde);
2401 sgl++;
2404 j++;
2407 if (protgroup_offset) {
2408 /* update the reference tag */
2409 reftag += protgrp_blks;
2410 continue;
2413 /* are we done ? */
2414 if (curr_prot == protcnt) {
2415 /* mark the last SGL */
2416 sgl--;
2417 bf_set(lpfc_sli4_sge_last, sgl, 1);
2418 alldone = 1;
2419 } else if (curr_prot < protcnt) {
2420 /* advance to next prot buffer */
2421 sgpe = sg_next(sgpe);
2423 /* update the reference tag */
2424 reftag += protgrp_blks;
2425 } else {
2426 /* if we're here, we have a bug */
2427 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2428 "9085 BLKGRD: bug in %s\n", __func__);
2431 } while (!alldone);
2433 out:
2435 return num_sge;
2439 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2440 * @phba: The Hba for which this call is being executed.
2441 * @sc: pointer to scsi command we're working on
2443 * Given a SCSI command that supports DIF, determine composition of protection
2444 * groups involved in setting up buffer lists
2446 * Returns: Protection group type (with or without DIF)
2449 static int
2450 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2452 int ret = LPFC_PG_TYPE_INVALID;
2453 unsigned char op = scsi_get_prot_op(sc);
2455 switch (op) {
2456 case SCSI_PROT_READ_STRIP:
2457 case SCSI_PROT_WRITE_INSERT:
2458 ret = LPFC_PG_TYPE_NO_DIF;
2459 break;
2460 case SCSI_PROT_READ_INSERT:
2461 case SCSI_PROT_WRITE_STRIP:
2462 case SCSI_PROT_READ_PASS:
2463 case SCSI_PROT_WRITE_PASS:
2464 ret = LPFC_PG_TYPE_DIF_BUF;
2465 break;
2466 default:
2467 if (phba)
2468 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2469 "9021 Unsupported protection op:%d\n",
2470 op);
2471 break;
2473 return ret;
2477 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2478 * @phba: The Hba for which this call is being executed.
2479 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2481 * Adjust the data length to account for how much data
2482 * is actually on the wire.
2484 * returns the adjusted data length
2486 static int
2487 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2488 struct lpfc_io_buf *lpfc_cmd)
2490 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2491 int fcpdl;
2493 fcpdl = scsi_bufflen(sc);
2495 /* Check if there is protection data on the wire */
2496 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2497 /* Read check for protection data */
2498 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2499 return fcpdl;
2501 } else {
2502 /* Write check for protection data */
2503 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2504 return fcpdl;
2508 * If we are in DIF Type 1 mode every data block has a 8 byte
2509 * DIF (trailer) attached to it. Must ajust FCP data length
2510 * to account for the protection data.
2512 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2514 return fcpdl;
2518 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2519 * @phba: The Hba for which this call is being executed.
2520 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2522 * This is the protection/DIF aware version of
2523 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2524 * two functions eventually, but for now, it's here.
2525 * RETURNS 0 - SUCCESS,
2526 * 1 - Failed DMA map, retry.
2527 * 2 - Invalid scsi cmd or prot-type. Do not rety.
2529 static int
2530 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2531 struct lpfc_io_buf *lpfc_cmd)
2533 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2534 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2535 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2536 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2537 uint32_t num_bde = 0;
2538 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2539 int prot_group_type = 0;
2540 int fcpdl;
2541 int ret = 1;
2542 struct lpfc_vport *vport = phba->pport;
2545 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2546 * fcp_rsp regions to the first data bde entry
2548 bpl += 2;
2549 if (scsi_sg_count(scsi_cmnd)) {
2551 * The driver stores the segment count returned from pci_map_sg
2552 * because this a count of dma-mappings used to map the use_sg
2553 * pages. They are not guaranteed to be the same for those
2554 * architectures that implement an IOMMU.
2556 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2557 scsi_sglist(scsi_cmnd),
2558 scsi_sg_count(scsi_cmnd), datadir);
2559 if (unlikely(!datasegcnt))
2560 return 1;
2562 lpfc_cmd->seg_cnt = datasegcnt;
2564 /* First check if data segment count from SCSI Layer is good */
2565 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2566 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2567 ret = 2;
2568 goto err;
2571 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2573 switch (prot_group_type) {
2574 case LPFC_PG_TYPE_NO_DIF:
2576 /* Here we need to add a PDE5 and PDE6 to the count */
2577 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2578 ret = 2;
2579 goto err;
2582 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2583 datasegcnt);
2584 /* we should have 2 or more entries in buffer list */
2585 if (num_bde < 2) {
2586 ret = 2;
2587 goto err;
2589 break;
2591 case LPFC_PG_TYPE_DIF_BUF:
2593 * This type indicates that protection buffers are
2594 * passed to the driver, so that needs to be prepared
2595 * for DMA
2597 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2598 scsi_prot_sglist(scsi_cmnd),
2599 scsi_prot_sg_count(scsi_cmnd), datadir);
2600 if (unlikely(!protsegcnt)) {
2601 scsi_dma_unmap(scsi_cmnd);
2602 return 1;
2605 lpfc_cmd->prot_seg_cnt = protsegcnt;
2608 * There is a minimun of 4 BPLs used for every
2609 * protection data segment.
2611 if ((lpfc_cmd->prot_seg_cnt * 4) >
2612 (phba->cfg_total_seg_cnt - 2)) {
2613 ret = 2;
2614 goto err;
2617 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2618 datasegcnt, protsegcnt);
2619 /* we should have 3 or more entries in buffer list */
2620 if ((num_bde < 3) ||
2621 (num_bde > phba->cfg_total_seg_cnt)) {
2622 ret = 2;
2623 goto err;
2625 break;
2627 case LPFC_PG_TYPE_INVALID:
2628 default:
2629 scsi_dma_unmap(scsi_cmnd);
2630 lpfc_cmd->seg_cnt = 0;
2632 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2633 "9022 Unexpected protection group %i\n",
2634 prot_group_type);
2635 return 2;
2640 * Finish initializing those IOCB fields that are dependent on the
2641 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2642 * reinitialized since all iocb memory resources are used many times
2643 * for transmit, receive, and continuation bpl's.
2645 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2646 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2647 iocb_cmd->ulpBdeCount = 1;
2648 iocb_cmd->ulpLe = 1;
2650 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2651 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2654 * Due to difference in data length between DIF/non-DIF paths,
2655 * we need to set word 4 of IOCB here
2657 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2660 * For First burst, we may need to adjust the initial transfer
2661 * length for DIF
2663 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2664 (fcpdl < vport->cfg_first_burst_size))
2665 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2667 return 0;
2668 err:
2669 if (lpfc_cmd->seg_cnt)
2670 scsi_dma_unmap(scsi_cmnd);
2671 if (lpfc_cmd->prot_seg_cnt)
2672 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2673 scsi_prot_sg_count(scsi_cmnd),
2674 scsi_cmnd->sc_data_direction);
2676 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2677 "9023 Cannot setup S/G List for HBA"
2678 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2679 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2680 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2681 prot_group_type, num_bde);
2683 lpfc_cmd->seg_cnt = 0;
2684 lpfc_cmd->prot_seg_cnt = 0;
2685 return ret;
2689 * This function calcuates the T10 DIF guard tag
2690 * on the specified data using a CRC algorithmn
2691 * using crc_t10dif.
2693 static uint16_t
2694 lpfc_bg_crc(uint8_t *data, int count)
2696 uint16_t crc = 0;
2697 uint16_t x;
2699 crc = crc_t10dif(data, count);
2700 x = cpu_to_be16(crc);
2701 return x;
2705 * This function calcuates the T10 DIF guard tag
2706 * on the specified data using a CSUM algorithmn
2707 * using ip_compute_csum.
2709 static uint16_t
2710 lpfc_bg_csum(uint8_t *data, int count)
2712 uint16_t ret;
2714 ret = ip_compute_csum(data, count);
2715 return ret;
2719 * This function examines the protection data to try to determine
2720 * what type of T10-DIF error occurred.
2722 static void
2723 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2725 struct scatterlist *sgpe; /* s/g prot entry */
2726 struct scatterlist *sgde; /* s/g data entry */
2727 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2728 struct scsi_dif_tuple *src = NULL;
2729 uint8_t *data_src = NULL;
2730 uint16_t guard_tag;
2731 uint16_t start_app_tag, app_tag;
2732 uint32_t start_ref_tag, ref_tag;
2733 int prot, protsegcnt;
2734 int err_type, len, data_len;
2735 int chk_ref, chk_app, chk_guard;
2736 uint16_t sum;
2737 unsigned blksize;
2739 err_type = BGS_GUARD_ERR_MASK;
2740 sum = 0;
2741 guard_tag = 0;
2743 /* First check to see if there is protection data to examine */
2744 prot = scsi_get_prot_op(cmd);
2745 if ((prot == SCSI_PROT_READ_STRIP) ||
2746 (prot == SCSI_PROT_WRITE_INSERT) ||
2747 (prot == SCSI_PROT_NORMAL))
2748 goto out;
2750 /* Currently the driver just supports ref_tag and guard_tag checking */
2751 chk_ref = 1;
2752 chk_app = 0;
2753 chk_guard = 0;
2755 /* Setup a ptr to the protection data provided by the SCSI host */
2756 sgpe = scsi_prot_sglist(cmd);
2757 protsegcnt = lpfc_cmd->prot_seg_cnt;
2759 if (sgpe && protsegcnt) {
2762 * We will only try to verify guard tag if the segment
2763 * data length is a multiple of the blksize.
2765 sgde = scsi_sglist(cmd);
2766 blksize = lpfc_cmd_blksize(cmd);
2767 data_src = (uint8_t *)sg_virt(sgde);
2768 data_len = sgde->length;
2769 if ((data_len & (blksize - 1)) == 0)
2770 chk_guard = 1;
2772 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2773 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2774 start_app_tag = src->app_tag;
2775 len = sgpe->length;
2776 while (src && protsegcnt) {
2777 while (len) {
2780 * First check to see if a protection data
2781 * check is valid
2783 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2784 (src->app_tag == T10_PI_APP_ESCAPE)) {
2785 start_ref_tag++;
2786 goto skipit;
2789 /* First Guard Tag checking */
2790 if (chk_guard) {
2791 guard_tag = src->guard_tag;
2792 if (lpfc_cmd_guard_csum(cmd))
2793 sum = lpfc_bg_csum(data_src,
2794 blksize);
2795 else
2796 sum = lpfc_bg_crc(data_src,
2797 blksize);
2798 if ((guard_tag != sum)) {
2799 err_type = BGS_GUARD_ERR_MASK;
2800 goto out;
2804 /* Reference Tag checking */
2805 ref_tag = be32_to_cpu(src->ref_tag);
2806 if (chk_ref && (ref_tag != start_ref_tag)) {
2807 err_type = BGS_REFTAG_ERR_MASK;
2808 goto out;
2810 start_ref_tag++;
2812 /* App Tag checking */
2813 app_tag = src->app_tag;
2814 if (chk_app && (app_tag != start_app_tag)) {
2815 err_type = BGS_APPTAG_ERR_MASK;
2816 goto out;
2818 skipit:
2819 len -= sizeof(struct scsi_dif_tuple);
2820 if (len < 0)
2821 len = 0;
2822 src++;
2824 data_src += blksize;
2825 data_len -= blksize;
2828 * Are we at the end of the Data segment?
2829 * The data segment is only used for Guard
2830 * tag checking.
2832 if (chk_guard && (data_len == 0)) {
2833 chk_guard = 0;
2834 sgde = sg_next(sgde);
2835 if (!sgde)
2836 goto out;
2838 data_src = (uint8_t *)sg_virt(sgde);
2839 data_len = sgde->length;
2840 if ((data_len & (blksize - 1)) == 0)
2841 chk_guard = 1;
2845 /* Goto the next Protection data segment */
2846 sgpe = sg_next(sgpe);
2847 if (sgpe) {
2848 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2849 len = sgpe->length;
2850 } else {
2851 src = NULL;
2853 protsegcnt--;
2856 out:
2857 if (err_type == BGS_GUARD_ERR_MASK) {
2858 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2859 0x10, 0x1);
2860 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2861 SAM_STAT_CHECK_CONDITION;
2862 phba->bg_guard_err_cnt++;
2863 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2864 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2865 (unsigned long)scsi_get_lba(cmd),
2866 sum, guard_tag);
2868 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2869 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2870 0x10, 0x3);
2871 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2872 SAM_STAT_CHECK_CONDITION;
2874 phba->bg_reftag_err_cnt++;
2875 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2876 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2877 (unsigned long)scsi_get_lba(cmd),
2878 ref_tag, start_ref_tag);
2880 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2881 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2882 0x10, 0x2);
2883 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2884 SAM_STAT_CHECK_CONDITION;
2886 phba->bg_apptag_err_cnt++;
2887 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2888 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2889 (unsigned long)scsi_get_lba(cmd),
2890 app_tag, start_app_tag);
2895 * This function checks for BlockGuard errors detected by
2896 * the HBA. In case of errors, the ASC/ASCQ fields in the
2897 * sense buffer will be set accordingly, paired with
2898 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2899 * detected corruption.
2901 * Returns:
2902 * 0 - No error found
2903 * 1 - BlockGuard error found
2904 * -1 - Internal error (bad profile, ...etc)
2906 static int
2907 lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2908 struct lpfc_wcqe_complete *wcqe)
2910 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2911 int ret = 0;
2912 u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
2913 u32 bghm = 0;
2914 u32 bgstat = 0;
2915 u64 failing_sector = 0;
2917 if (status == CQE_STATUS_DI_ERROR) {
2918 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
2919 bgstat |= BGS_GUARD_ERR_MASK;
2920 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */
2921 bgstat |= BGS_APPTAG_ERR_MASK;
2922 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */
2923 bgstat |= BGS_REFTAG_ERR_MASK;
2925 /* Check to see if there was any good data before the error */
2926 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2927 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2928 bghm = wcqe->total_data_placed;
2932 * Set ALL the error bits to indicate we don't know what
2933 * type of error it is.
2935 if (!bgstat)
2936 bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
2937 BGS_GUARD_ERR_MASK);
2940 if (lpfc_bgs_get_guard_err(bgstat)) {
2941 ret = 1;
2943 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2944 0x10, 0x1);
2945 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2946 SAM_STAT_CHECK_CONDITION;
2947 phba->bg_guard_err_cnt++;
2948 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2949 "9059 BLKGRD: Guard Tag error in cmd"
2950 " 0x%x lba 0x%llx blk cnt 0x%x "
2951 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2952 (unsigned long long)scsi_get_lba(cmd),
2953 blk_rq_sectors(cmd->request), bgstat, bghm);
2956 if (lpfc_bgs_get_reftag_err(bgstat)) {
2957 ret = 1;
2959 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2960 0x10, 0x3);
2961 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2962 SAM_STAT_CHECK_CONDITION;
2964 phba->bg_reftag_err_cnt++;
2965 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2966 "9060 BLKGRD: Ref Tag error in cmd"
2967 " 0x%x lba 0x%llx blk cnt 0x%x "
2968 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2969 (unsigned long long)scsi_get_lba(cmd),
2970 blk_rq_sectors(cmd->request), bgstat, bghm);
2973 if (lpfc_bgs_get_apptag_err(bgstat)) {
2974 ret = 1;
2976 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2977 0x10, 0x2);
2978 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2979 SAM_STAT_CHECK_CONDITION;
2981 phba->bg_apptag_err_cnt++;
2982 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2983 "9062 BLKGRD: App Tag error in cmd"
2984 " 0x%x lba 0x%llx blk cnt 0x%x "
2985 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2986 (unsigned long long)scsi_get_lba(cmd),
2987 blk_rq_sectors(cmd->request), bgstat, bghm);
2990 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2992 * setup sense data descriptor 0 per SPC-4 as an information
2993 * field, and put the failing LBA in it.
2994 * This code assumes there was also a guard/app/ref tag error
2995 * indication.
2997 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
2998 cmd->sense_buffer[8] = 0; /* Information descriptor type */
2999 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
3000 cmd->sense_buffer[10] = 0x80; /* Validity bit */
3002 /* bghm is a "on the wire" FC frame based count */
3003 switch (scsi_get_prot_op(cmd)) {
3004 case SCSI_PROT_READ_INSERT:
3005 case SCSI_PROT_WRITE_STRIP:
3006 bghm /= cmd->device->sector_size;
3007 break;
3008 case SCSI_PROT_READ_STRIP:
3009 case SCSI_PROT_WRITE_INSERT:
3010 case SCSI_PROT_READ_PASS:
3011 case SCSI_PROT_WRITE_PASS:
3012 bghm /= (cmd->device->sector_size +
3013 sizeof(struct scsi_dif_tuple));
3014 break;
3017 failing_sector = scsi_get_lba(cmd);
3018 failing_sector += bghm;
3020 /* Descriptor Information */
3021 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3024 if (!ret) {
3025 /* No error was reported - problem in FW? */
3026 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3027 "9068 BLKGRD: Unknown error in cmd"
3028 " 0x%x lba 0x%llx blk cnt 0x%x "
3029 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3030 (unsigned long long)scsi_get_lba(cmd),
3031 blk_rq_sectors(cmd->request), bgstat, bghm);
3033 /* Calcuate what type of error it was */
3034 lpfc_calc_bg_err(phba, lpfc_cmd);
3036 return ret;
3040 * This function checks for BlockGuard errors detected by
3041 * the HBA. In case of errors, the ASC/ASCQ fields in the
3042 * sense buffer will be set accordingly, paired with
3043 * ILLEGAL_REQUEST to signal to the kernel that the HBA
3044 * detected corruption.
3046 * Returns:
3047 * 0 - No error found
3048 * 1 - BlockGuard error found
3049 * -1 - Internal error (bad profile, ...etc)
3051 static int
3052 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
3053 struct lpfc_iocbq *pIocbOut)
3055 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3056 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3057 int ret = 0;
3058 uint32_t bghm = bgf->bghm;
3059 uint32_t bgstat = bgf->bgstat;
3060 uint64_t failing_sector = 0;
3062 if (lpfc_bgs_get_invalid_prof(bgstat)) {
3063 cmd->result = DID_ERROR << 16;
3064 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3065 "9072 BLKGRD: Invalid BG Profile in cmd"
3066 " 0x%x lba 0x%llx blk cnt 0x%x "
3067 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3068 (unsigned long long)scsi_get_lba(cmd),
3069 blk_rq_sectors(cmd->request), bgstat, bghm);
3070 ret = (-1);
3071 goto out;
3074 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3075 cmd->result = DID_ERROR << 16;
3076 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3077 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
3078 " 0x%x lba 0x%llx blk cnt 0x%x "
3079 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3080 (unsigned long long)scsi_get_lba(cmd),
3081 blk_rq_sectors(cmd->request), bgstat, bghm);
3082 ret = (-1);
3083 goto out;
3086 if (lpfc_bgs_get_guard_err(bgstat)) {
3087 ret = 1;
3089 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3090 0x10, 0x1);
3091 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3092 SAM_STAT_CHECK_CONDITION;
3093 phba->bg_guard_err_cnt++;
3094 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3095 "9055 BLKGRD: Guard Tag error in cmd"
3096 " 0x%x lba 0x%llx blk cnt 0x%x "
3097 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3098 (unsigned long long)scsi_get_lba(cmd),
3099 blk_rq_sectors(cmd->request), bgstat, bghm);
3102 if (lpfc_bgs_get_reftag_err(bgstat)) {
3103 ret = 1;
3105 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3106 0x10, 0x3);
3107 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3108 SAM_STAT_CHECK_CONDITION;
3110 phba->bg_reftag_err_cnt++;
3111 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3112 "9056 BLKGRD: Ref Tag error in cmd"
3113 " 0x%x lba 0x%llx blk cnt 0x%x "
3114 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3115 (unsigned long long)scsi_get_lba(cmd),
3116 blk_rq_sectors(cmd->request), bgstat, bghm);
3119 if (lpfc_bgs_get_apptag_err(bgstat)) {
3120 ret = 1;
3122 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3123 0x10, 0x2);
3124 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3125 SAM_STAT_CHECK_CONDITION;
3127 phba->bg_apptag_err_cnt++;
3128 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3129 "9061 BLKGRD: App Tag error in cmd"
3130 " 0x%x lba 0x%llx blk cnt 0x%x "
3131 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3132 (unsigned long long)scsi_get_lba(cmd),
3133 blk_rq_sectors(cmd->request), bgstat, bghm);
3136 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3138 * setup sense data descriptor 0 per SPC-4 as an information
3139 * field, and put the failing LBA in it.
3140 * This code assumes there was also a guard/app/ref tag error
3141 * indication.
3143 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
3144 cmd->sense_buffer[8] = 0; /* Information descriptor type */
3145 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
3146 cmd->sense_buffer[10] = 0x80; /* Validity bit */
3148 /* bghm is a "on the wire" FC frame based count */
3149 switch (scsi_get_prot_op(cmd)) {
3150 case SCSI_PROT_READ_INSERT:
3151 case SCSI_PROT_WRITE_STRIP:
3152 bghm /= cmd->device->sector_size;
3153 break;
3154 case SCSI_PROT_READ_STRIP:
3155 case SCSI_PROT_WRITE_INSERT:
3156 case SCSI_PROT_READ_PASS:
3157 case SCSI_PROT_WRITE_PASS:
3158 bghm /= (cmd->device->sector_size +
3159 sizeof(struct scsi_dif_tuple));
3160 break;
3163 failing_sector = scsi_get_lba(cmd);
3164 failing_sector += bghm;
3166 /* Descriptor Information */
3167 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3170 if (!ret) {
3171 /* No error was reported - problem in FW? */
3172 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3173 "9057 BLKGRD: Unknown error in cmd"
3174 " 0x%x lba 0x%llx blk cnt 0x%x "
3175 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3176 (unsigned long long)scsi_get_lba(cmd),
3177 blk_rq_sectors(cmd->request), bgstat, bghm);
3179 /* Calcuate what type of error it was */
3180 lpfc_calc_bg_err(phba, lpfc_cmd);
3182 out:
3183 return ret;
3187 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3188 * @phba: The Hba for which this call is being executed.
3189 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3191 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3192 * field of @lpfc_cmd for device with SLI-4 interface spec.
3194 * Return codes:
3195 * 2 - Error - Do not retry
3196 * 1 - Error - Retry
3197 * 0 - Success
3199 static int
3200 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3202 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3203 struct scatterlist *sgel = NULL;
3204 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3205 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3206 struct sli4_sge *first_data_sgl;
3207 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3208 struct lpfc_vport *vport = phba->pport;
3209 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3210 dma_addr_t physaddr;
3211 uint32_t num_bde = 0;
3212 uint32_t dma_len;
3213 uint32_t dma_offset = 0;
3214 int nseg, i, j;
3215 struct ulp_bde64 *bde;
3216 bool lsp_just_set = false;
3217 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3220 * There are three possibilities here - use scatter-gather segment, use
3221 * the single mapping, or neither. Start the lpfc command prep by
3222 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3223 * data bde entry.
3225 if (scsi_sg_count(scsi_cmnd)) {
3227 * The driver stores the segment count returned from pci_map_sg
3228 * because this a count of dma-mappings used to map the use_sg
3229 * pages. They are not guaranteed to be the same for those
3230 * architectures that implement an IOMMU.
3233 nseg = scsi_dma_map(scsi_cmnd);
3234 if (unlikely(nseg <= 0))
3235 return 1;
3236 sgl += 1;
3237 /* clear the last flag in the fcp_rsp map entry */
3238 sgl->word2 = le32_to_cpu(sgl->word2);
3239 bf_set(lpfc_sli4_sge_last, sgl, 0);
3240 sgl->word2 = cpu_to_le32(sgl->word2);
3241 sgl += 1;
3242 first_data_sgl = sgl;
3243 lpfc_cmd->seg_cnt = nseg;
3244 if (!phba->cfg_xpsgl &&
3245 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3246 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3247 "9074 BLKGRD:"
3248 " %s: Too many sg segments from "
3249 "dma_map_sg. Config %d, seg_cnt %d\n",
3250 __func__, phba->cfg_sg_seg_cnt,
3251 lpfc_cmd->seg_cnt);
3252 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3253 lpfc_cmd->seg_cnt = 0;
3254 scsi_dma_unmap(scsi_cmnd);
3255 return 2;
3259 * The driver established a maximum scatter-gather segment count
3260 * during probe that limits the number of sg elements in any
3261 * single scsi command. Just run through the seg_cnt and format
3262 * the sge's.
3263 * When using SLI-3 the driver will try to fit all the BDEs into
3264 * the IOCB. If it can't then the BDEs get added to a BPL as it
3265 * does for SLI-2 mode.
3268 /* for tracking segment boundaries */
3269 sgel = scsi_sglist(scsi_cmnd);
3270 j = 2;
3271 for (i = 0; i < nseg; i++) {
3272 sgl->word2 = 0;
3273 if ((num_bde + 1) == nseg) {
3274 bf_set(lpfc_sli4_sge_last, sgl, 1);
3275 bf_set(lpfc_sli4_sge_type, sgl,
3276 LPFC_SGE_TYPE_DATA);
3277 } else {
3278 bf_set(lpfc_sli4_sge_last, sgl, 0);
3280 /* do we need to expand the segment */
3281 if (!lsp_just_set &&
3282 !((j + 1) % phba->border_sge_num) &&
3283 ((nseg - 1) != i)) {
3284 /* set LSP type */
3285 bf_set(lpfc_sli4_sge_type, sgl,
3286 LPFC_SGE_TYPE_LSP);
3288 sgl_xtra = lpfc_get_sgl_per_hdwq(
3289 phba, lpfc_cmd);
3291 if (unlikely(!sgl_xtra)) {
3292 lpfc_cmd->seg_cnt = 0;
3293 scsi_dma_unmap(scsi_cmnd);
3294 return 1;
3296 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3297 sgl_xtra->dma_phys_sgl));
3298 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3299 sgl_xtra->dma_phys_sgl));
3301 } else {
3302 bf_set(lpfc_sli4_sge_type, sgl,
3303 LPFC_SGE_TYPE_DATA);
3307 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3308 LPFC_SGE_TYPE_LSP)) {
3309 if ((nseg - 1) == i)
3310 bf_set(lpfc_sli4_sge_last, sgl, 1);
3312 physaddr = sg_dma_address(sgel);
3313 dma_len = sg_dma_len(sgel);
3314 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3315 physaddr));
3316 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3317 physaddr));
3319 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3320 sgl->word2 = cpu_to_le32(sgl->word2);
3321 sgl->sge_len = cpu_to_le32(dma_len);
3323 dma_offset += dma_len;
3324 sgel = sg_next(sgel);
3326 sgl++;
3327 lsp_just_set = false;
3329 } else {
3330 sgl->word2 = cpu_to_le32(sgl->word2);
3331 sgl->sge_len = cpu_to_le32(
3332 phba->cfg_sg_dma_buf_size);
3334 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3335 i = i - 1;
3337 lsp_just_set = true;
3340 j++;
3343 * Setup the first Payload BDE. For FCoE we just key off
3344 * Performance Hints, for FC we use lpfc_enable_pbde.
3345 * We populate words 13-15 of IOCB/WQE.
3347 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3348 phba->cfg_enable_pbde) {
3349 bde = (struct ulp_bde64 *)
3350 &wqe->words[13];
3351 bde->addrLow = first_data_sgl->addr_lo;
3352 bde->addrHigh = first_data_sgl->addr_hi;
3353 bde->tus.f.bdeSize =
3354 le32_to_cpu(first_data_sgl->sge_len);
3355 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3356 bde->tus.w = cpu_to_le32(bde->tus.w);
3358 } else {
3359 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3361 } else {
3362 sgl += 1;
3363 /* clear the last flag in the fcp_rsp map entry */
3364 sgl->word2 = le32_to_cpu(sgl->word2);
3365 bf_set(lpfc_sli4_sge_last, sgl, 1);
3366 sgl->word2 = cpu_to_le32(sgl->word2);
3368 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3369 phba->cfg_enable_pbde) {
3370 bde = (struct ulp_bde64 *)
3371 &wqe->words[13];
3372 memset(bde, 0, (sizeof(uint32_t) * 3));
3376 /* Word 11 */
3377 if (phba->cfg_enable_pbde)
3378 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3381 * Finish initializing those IOCB fields that are dependent on the
3382 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3383 * explicitly reinitialized.
3384 * all iocb memory resources are reused.
3386 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3387 /* Set first-burst provided it was successfully negotiated */
3388 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3389 vport->cfg_first_burst_size &&
3390 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3391 u32 init_len, total_len;
3393 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3394 init_len = min(total_len, vport->cfg_first_burst_size);
3396 /* Word 4 & 5 */
3397 wqe->fcp_iwrite.initial_xfer_len = init_len;
3398 wqe->fcp_iwrite.total_xfer_len = total_len;
3399 } else {
3400 /* Word 4 */
3401 wqe->fcp_iwrite.total_xfer_len =
3402 be32_to_cpu(fcp_cmnd->fcpDl);
3406 * If the OAS driver feature is enabled and the lun is enabled for
3407 * OAS, set the oas iocb related flags.
3409 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3410 scsi_cmnd->device->hostdata)->oas_enabled) {
3411 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3412 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3413 scsi_cmnd->device->hostdata)->priority;
3415 /* Word 10 */
3416 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3417 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3419 if (lpfc_cmd->cur_iocbq.priority)
3420 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3421 (lpfc_cmd->cur_iocbq.priority << 1));
3422 else
3423 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3424 (phba->cfg_XLanePriority << 1));
3427 return 0;
3431 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3432 * @phba: The Hba for which this call is being executed.
3433 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3435 * This is the protection/DIF aware version of
3436 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3437 * two functions eventually, but for now, it's here
3438 * Return codes:
3439 * 2 - Error - Do not retry
3440 * 1 - Error - Retry
3441 * 0 - Success
3443 static int
3444 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3445 struct lpfc_io_buf *lpfc_cmd)
3447 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3448 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3449 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3450 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3451 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3452 uint32_t num_sge = 0;
3453 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3454 int prot_group_type = 0;
3455 int fcpdl;
3456 int ret = 1;
3457 struct lpfc_vport *vport = phba->pport;
3460 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3461 * fcp_rsp regions to the first data sge entry
3463 if (scsi_sg_count(scsi_cmnd)) {
3465 * The driver stores the segment count returned from pci_map_sg
3466 * because this a count of dma-mappings used to map the use_sg
3467 * pages. They are not guaranteed to be the same for those
3468 * architectures that implement an IOMMU.
3470 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3471 scsi_sglist(scsi_cmnd),
3472 scsi_sg_count(scsi_cmnd), datadir);
3473 if (unlikely(!datasegcnt))
3474 return 1;
3476 sgl += 1;
3477 /* clear the last flag in the fcp_rsp map entry */
3478 sgl->word2 = le32_to_cpu(sgl->word2);
3479 bf_set(lpfc_sli4_sge_last, sgl, 0);
3480 sgl->word2 = cpu_to_le32(sgl->word2);
3482 sgl += 1;
3483 lpfc_cmd->seg_cnt = datasegcnt;
3485 /* First check if data segment count from SCSI Layer is good */
3486 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3487 !phba->cfg_xpsgl) {
3488 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3489 ret = 2;
3490 goto err;
3493 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3495 switch (prot_group_type) {
3496 case LPFC_PG_TYPE_NO_DIF:
3497 /* Here we need to add a DISEED to the count */
3498 if (((lpfc_cmd->seg_cnt + 1) >
3499 phba->cfg_total_seg_cnt) &&
3500 !phba->cfg_xpsgl) {
3501 ret = 2;
3502 goto err;
3505 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3506 datasegcnt, lpfc_cmd);
3508 /* we should have 2 or more entries in buffer list */
3509 if (num_sge < 2) {
3510 ret = 2;
3511 goto err;
3513 break;
3515 case LPFC_PG_TYPE_DIF_BUF:
3517 * This type indicates that protection buffers are
3518 * passed to the driver, so that needs to be prepared
3519 * for DMA
3521 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3522 scsi_prot_sglist(scsi_cmnd),
3523 scsi_prot_sg_count(scsi_cmnd), datadir);
3524 if (unlikely(!protsegcnt)) {
3525 scsi_dma_unmap(scsi_cmnd);
3526 return 1;
3529 lpfc_cmd->prot_seg_cnt = protsegcnt;
3531 * There is a minimun of 3 SGEs used for every
3532 * protection data segment.
3534 if (((lpfc_cmd->prot_seg_cnt * 3) >
3535 (phba->cfg_total_seg_cnt - 2)) &&
3536 !phba->cfg_xpsgl) {
3537 ret = 2;
3538 goto err;
3541 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3542 datasegcnt, protsegcnt, lpfc_cmd);
3544 /* we should have 3 or more entries in buffer list */
3545 if (num_sge < 3 ||
3546 (num_sge > phba->cfg_total_seg_cnt &&
3547 !phba->cfg_xpsgl)) {
3548 ret = 2;
3549 goto err;
3551 break;
3553 case LPFC_PG_TYPE_INVALID:
3554 default:
3555 scsi_dma_unmap(scsi_cmnd);
3556 lpfc_cmd->seg_cnt = 0;
3558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3559 "9083 Unexpected protection group %i\n",
3560 prot_group_type);
3561 return 2;
3565 switch (scsi_get_prot_op(scsi_cmnd)) {
3566 case SCSI_PROT_WRITE_STRIP:
3567 case SCSI_PROT_READ_STRIP:
3568 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3569 break;
3570 case SCSI_PROT_WRITE_INSERT:
3571 case SCSI_PROT_READ_INSERT:
3572 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3573 break;
3574 case SCSI_PROT_WRITE_PASS:
3575 case SCSI_PROT_READ_PASS:
3576 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3577 break;
3580 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3581 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3583 /* Set first-burst provided it was successfully negotiated */
3584 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3585 vport->cfg_first_burst_size &&
3586 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3587 u32 init_len, total_len;
3589 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3590 init_len = min(total_len, vport->cfg_first_burst_size);
3592 /* Word 4 & 5 */
3593 wqe->fcp_iwrite.initial_xfer_len = init_len;
3594 wqe->fcp_iwrite.total_xfer_len = total_len;
3595 } else {
3596 /* Word 4 */
3597 wqe->fcp_iwrite.total_xfer_len =
3598 be32_to_cpu(fcp_cmnd->fcpDl);
3602 * If the OAS driver feature is enabled and the lun is enabled for
3603 * OAS, set the oas iocb related flags.
3605 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3606 scsi_cmnd->device->hostdata)->oas_enabled) {
3607 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3609 /* Word 10 */
3610 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3611 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3612 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3613 (phba->cfg_XLanePriority << 1));
3616 /* Word 7. DIF Flags */
3617 if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
3618 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3619 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
3620 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3621 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
3622 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3624 lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
3625 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3627 return 0;
3628 err:
3629 if (lpfc_cmd->seg_cnt)
3630 scsi_dma_unmap(scsi_cmnd);
3631 if (lpfc_cmd->prot_seg_cnt)
3632 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3633 scsi_prot_sg_count(scsi_cmnd),
3634 scsi_cmnd->sc_data_direction);
3636 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3637 "9084 Cannot setup S/G List for HBA"
3638 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3639 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3640 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3641 prot_group_type, num_sge);
3643 lpfc_cmd->seg_cnt = 0;
3644 lpfc_cmd->prot_seg_cnt = 0;
3645 return ret;
3649 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3650 * @phba: The Hba for which this call is being executed.
3651 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3653 * This routine wraps the actual DMA mapping function pointer from the
3654 * lpfc_hba struct.
3656 * Return codes:
3657 * 1 - Error
3658 * 0 - Success
3660 static inline int
3661 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3663 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3667 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3668 * using BlockGuard.
3669 * @phba: The Hba for which this call is being executed.
3670 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3672 * This routine wraps the actual DMA mapping function pointer from the
3673 * lpfc_hba struct.
3675 * Return codes:
3676 * 1 - Error
3677 * 0 - Success
3679 static inline int
3680 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3682 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3686 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
3687 * buffer
3688 * @phba: The Hba for which this call is being executed.
3689 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3690 * @tmo: Timeout value for IO
3692 * This routine initializes IOCB/WQE data structure from scsi command
3694 * Return codes:
3695 * 1 - Error
3696 * 0 - Success
3698 static inline int
3699 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3700 uint8_t tmo)
3702 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3706 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3707 * @phba: Pointer to hba context object.
3708 * @vport: Pointer to vport object.
3709 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3710 * @rsp_iocb: Pointer to response iocb object which reported error.
3712 * This function posts an event when there is a SCSI command reporting
3713 * error from the scsi device.
3715 static void
3716 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3717 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3718 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3719 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3720 uint32_t resp_info = fcprsp->rspStatus2;
3721 uint32_t scsi_status = fcprsp->rspStatus3;
3722 struct lpfc_fast_path_event *fast_path_evt = NULL;
3723 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3724 unsigned long flags;
3726 if (!pnode)
3727 return;
3729 /* If there is queuefull or busy condition send a scsi event */
3730 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3731 (cmnd->result == SAM_STAT_BUSY)) {
3732 fast_path_evt = lpfc_alloc_fast_evt(phba);
3733 if (!fast_path_evt)
3734 return;
3735 fast_path_evt->un.scsi_evt.event_type =
3736 FC_REG_SCSI_EVENT;
3737 fast_path_evt->un.scsi_evt.subcategory =
3738 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3739 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3740 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3741 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3742 &pnode->nlp_portname, sizeof(struct lpfc_name));
3743 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3744 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3745 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3746 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3747 fast_path_evt = lpfc_alloc_fast_evt(phba);
3748 if (!fast_path_evt)
3749 return;
3750 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3751 FC_REG_SCSI_EVENT;
3752 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3753 LPFC_EVENT_CHECK_COND;
3754 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3755 cmnd->device->lun;
3756 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3757 &pnode->nlp_portname, sizeof(struct lpfc_name));
3758 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3759 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3760 fast_path_evt->un.check_cond_evt.sense_key =
3761 cmnd->sense_buffer[2] & 0xf;
3762 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3763 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3764 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3765 fcpi_parm &&
3766 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3767 ((scsi_status == SAM_STAT_GOOD) &&
3768 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3770 * If status is good or resid does not match with fcp_param and
3771 * there is valid fcpi_parm, then there is a read_check error
3773 fast_path_evt = lpfc_alloc_fast_evt(phba);
3774 if (!fast_path_evt)
3775 return;
3776 fast_path_evt->un.read_check_error.header.event_type =
3777 FC_REG_FABRIC_EVENT;
3778 fast_path_evt->un.read_check_error.header.subcategory =
3779 LPFC_EVENT_FCPRDCHKERR;
3780 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3781 &pnode->nlp_portname, sizeof(struct lpfc_name));
3782 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3783 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3784 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3785 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3786 fast_path_evt->un.read_check_error.fcpiparam =
3787 fcpi_parm;
3788 } else
3789 return;
3791 fast_path_evt->vport = vport;
3792 spin_lock_irqsave(&phba->hbalock, flags);
3793 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3794 spin_unlock_irqrestore(&phba->hbalock, flags);
3795 lpfc_worker_wake_up(phba);
3796 return;
3800 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3801 * @phba: The HBA for which this call is being executed.
3802 * @psb: The scsi buffer which is going to be un-mapped.
3804 * This routine does DMA un-mapping of scatter gather list of scsi command
3805 * field of @lpfc_cmd for device with SLI-3 interface spec.
3807 static void
3808 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3811 * There are only two special cases to consider. (1) the scsi command
3812 * requested scatter-gather usage or (2) the scsi command allocated
3813 * a request buffer, but did not request use_sg. There is a third
3814 * case, but it does not require resource deallocation.
3816 if (psb->seg_cnt > 0)
3817 scsi_dma_unmap(psb->pCmd);
3818 if (psb->prot_seg_cnt > 0)
3819 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3820 scsi_prot_sg_count(psb->pCmd),
3821 psb->pCmd->sc_data_direction);
3825 * lpfc_handler_fcp_err - FCP response handler
3826 * @vport: The virtual port for which this call is being executed.
3827 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3828 * @rsp_iocb: The response IOCB which contains FCP error.
3830 * This routine is called to process response IOCB with status field
3831 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3832 * based upon SCSI and FCP error.
3834 static void
3835 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3836 uint32_t fcpi_parm)
3838 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3839 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3840 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3841 uint32_t resp_info = fcprsp->rspStatus2;
3842 uint32_t scsi_status = fcprsp->rspStatus3;
3843 uint32_t *lp;
3844 uint32_t host_status = DID_OK;
3845 uint32_t rsplen = 0;
3846 uint32_t fcpDl;
3847 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3851 * If this is a task management command, there is no
3852 * scsi packet associated with this lpfc_cmd. The driver
3853 * consumes it.
3855 if (fcpcmd->fcpCntl2) {
3856 scsi_status = 0;
3857 goto out;
3860 if (resp_info & RSP_LEN_VALID) {
3861 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3862 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3863 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3864 "2719 Invalid response length: "
3865 "tgt x%x lun x%llx cmnd x%x rsplen "
3866 "x%x\n", cmnd->device->id,
3867 cmnd->device->lun, cmnd->cmnd[0],
3868 rsplen);
3869 host_status = DID_ERROR;
3870 goto out;
3872 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3873 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3874 "2757 Protocol failure detected during "
3875 "processing of FCP I/O op: "
3876 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3877 cmnd->device->id,
3878 cmnd->device->lun, cmnd->cmnd[0],
3879 fcprsp->rspInfo3);
3880 host_status = DID_ERROR;
3881 goto out;
3885 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3886 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3887 if (snslen > SCSI_SENSE_BUFFERSIZE)
3888 snslen = SCSI_SENSE_BUFFERSIZE;
3890 if (resp_info & RSP_LEN_VALID)
3891 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3892 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3894 lp = (uint32_t *)cmnd->sense_buffer;
3896 /* special handling for under run conditions */
3897 if (!scsi_status && (resp_info & RESID_UNDER)) {
3898 /* don't log under runs if fcp set... */
3899 if (vport->cfg_log_verbose & LOG_FCP)
3900 logit = LOG_FCP_ERROR;
3901 /* unless operator says so */
3902 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3903 logit = LOG_FCP_UNDER;
3906 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3907 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3908 "Data: x%x x%x x%x x%x x%x\n",
3909 cmnd->cmnd[0], scsi_status,
3910 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3911 be32_to_cpu(fcprsp->rspResId),
3912 be32_to_cpu(fcprsp->rspSnsLen),
3913 be32_to_cpu(fcprsp->rspRspLen),
3914 fcprsp->rspInfo3);
3916 scsi_set_resid(cmnd, 0);
3917 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3918 if (resp_info & RESID_UNDER) {
3919 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3921 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3922 "9025 FCP Underrun, expected %d, "
3923 "residual %d Data: x%x x%x x%x\n",
3924 fcpDl,
3925 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3926 cmnd->underflow);
3929 * If there is an under run, check if under run reported by
3930 * storage array is same as the under run reported by HBA.
3931 * If this is not same, there is a dropped frame.
3933 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3934 lpfc_printf_vlog(vport, KERN_WARNING,
3935 LOG_FCP | LOG_FCP_ERROR,
3936 "9026 FCP Read Check Error "
3937 "and Underrun Data: x%x x%x x%x x%x\n",
3938 fcpDl,
3939 scsi_get_resid(cmnd), fcpi_parm,
3940 cmnd->cmnd[0]);
3941 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3942 host_status = DID_ERROR;
3945 * The cmnd->underflow is the minimum number of bytes that must
3946 * be transferred for this command. Provided a sense condition
3947 * is not present, make sure the actual amount transferred is at
3948 * least the underflow value or fail.
3950 if (!(resp_info & SNS_LEN_VALID) &&
3951 (scsi_status == SAM_STAT_GOOD) &&
3952 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3953 < cmnd->underflow)) {
3954 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3955 "9027 FCP command x%x residual "
3956 "underrun converted to error "
3957 "Data: x%x x%x x%x\n",
3958 cmnd->cmnd[0], scsi_bufflen(cmnd),
3959 scsi_get_resid(cmnd), cmnd->underflow);
3960 host_status = DID_ERROR;
3962 } else if (resp_info & RESID_OVER) {
3963 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3964 "9028 FCP command x%x residual overrun error. "
3965 "Data: x%x x%x\n", cmnd->cmnd[0],
3966 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3967 host_status = DID_ERROR;
3970 * Check SLI validation that all the transfer was actually done
3971 * (fcpi_parm should be zero). Apply check only to reads.
3973 } else if (fcpi_parm) {
3974 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3975 "9029 FCP %s Check Error Data: "
3976 "x%x x%x x%x x%x x%x\n",
3977 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3978 "Read" : "Write"),
3979 fcpDl, be32_to_cpu(fcprsp->rspResId),
3980 fcpi_parm, cmnd->cmnd[0], scsi_status);
3982 /* There is some issue with the LPe12000 that causes it
3983 * to miscalculate the fcpi_parm and falsely trip this
3984 * recovery logic. Detect this case and don't error when true.
3986 if (fcpi_parm > fcpDl)
3987 goto out;
3989 switch (scsi_status) {
3990 case SAM_STAT_GOOD:
3991 case SAM_STAT_CHECK_CONDITION:
3992 /* Fabric dropped a data frame. Fail any successful
3993 * command in which we detected dropped frames.
3994 * A status of good or some check conditions could
3995 * be considered a successful command.
3997 host_status = DID_ERROR;
3998 break;
4000 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4003 out:
4004 cmnd->result = host_status << 16 | scsi_status;
4005 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4009 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
4010 * @phba: The hba for which this call is being executed.
4011 * @pwqeIn: The command WQE for the scsi cmnd.
4012 * @pwqeOut: The response WQE for the scsi cmnd.
4014 * This routine assigns scsi command result by looking into response WQE
4015 * status field appropriately. This routine handles QUEUE FULL condition as
4016 * well by ramping down device queue depth.
4018 static void
4019 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4020 struct lpfc_wcqe_complete *wcqe)
4022 struct lpfc_io_buf *lpfc_cmd =
4023 (struct lpfc_io_buf *)pwqeIn->context1;
4024 struct lpfc_vport *vport = pwqeIn->vport;
4025 struct lpfc_rport_data *rdata;
4026 struct lpfc_nodelist *ndlp;
4027 struct scsi_cmnd *cmd;
4028 unsigned long flags;
4029 struct lpfc_fast_path_event *fast_path_evt;
4030 struct Scsi_Host *shost;
4031 u32 logit = LOG_FCP;
4032 u32 status, idx;
4033 unsigned long iflags = 0;
4035 /* Sanity check on return of outstanding command */
4036 if (!lpfc_cmd) {
4037 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4038 "9032 Null lpfc_cmd pointer. No "
4039 "release, skip completion\n");
4040 return;
4043 rdata = lpfc_cmd->rdata;
4044 ndlp = rdata->pnode;
4046 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4047 /* TOREMOVE - currently this flag is checked during
4048 * the release of lpfc_iocbq. Remove once we move
4049 * to lpfc_wqe_job construct.
4051 * This needs to be done outside buf_lock
4053 spin_lock_irqsave(&phba->hbalock, iflags);
4054 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
4055 spin_unlock_irqrestore(&phba->hbalock, iflags);
4058 /* Guard against abort handler being called at same time */
4059 spin_lock(&lpfc_cmd->buf_lock);
4061 /* Sanity check on return of outstanding command */
4062 cmd = lpfc_cmd->pCmd;
4063 if (!cmd || !phba) {
4064 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4065 "9042 I/O completion: Not an active IO\n");
4066 spin_unlock(&lpfc_cmd->buf_lock);
4067 lpfc_release_scsi_buf(phba, lpfc_cmd);
4068 return;
4070 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4071 if (phba->sli4_hba.hdwq)
4072 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4074 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4075 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4076 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4077 #endif
4078 shost = cmd->device->host;
4080 status = bf_get(lpfc_wcqe_c_status, wcqe);
4081 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4082 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4084 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4085 if (bf_get(lpfc_wcqe_c_xb, wcqe))
4086 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4088 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4089 if (lpfc_cmd->prot_data_type) {
4090 struct scsi_dif_tuple *src = NULL;
4092 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4094 * Used to restore any changes to protection
4095 * data for error injection.
4097 switch (lpfc_cmd->prot_data_type) {
4098 case LPFC_INJERR_REFTAG:
4099 src->ref_tag =
4100 lpfc_cmd->prot_data;
4101 break;
4102 case LPFC_INJERR_APPTAG:
4103 src->app_tag =
4104 (uint16_t)lpfc_cmd->prot_data;
4105 break;
4106 case LPFC_INJERR_GUARD:
4107 src->guard_tag =
4108 (uint16_t)lpfc_cmd->prot_data;
4109 break;
4110 default:
4111 break;
4114 lpfc_cmd->prot_data = 0;
4115 lpfc_cmd->prot_data_type = 0;
4116 lpfc_cmd->prot_data_segment = NULL;
4118 #endif
4119 if (unlikely(lpfc_cmd->status)) {
4120 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4121 (lpfc_cmd->result & IOERR_DRVR_MASK))
4122 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4123 else if (lpfc_cmd->status >= IOSTAT_CNT)
4124 lpfc_cmd->status = IOSTAT_DEFAULT;
4125 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4126 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4127 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4128 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4129 logit = 0;
4130 else
4131 logit = LOG_FCP | LOG_FCP_UNDER;
4132 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4133 "9034 FCP cmd x%x failed <%d/%lld> "
4134 "status: x%x result: x%x "
4135 "sid: x%x did: x%x oxid: x%x "
4136 "Data: x%x x%x x%x\n",
4137 cmd->cmnd[0],
4138 cmd->device ? cmd->device->id : 0xffff,
4139 cmd->device ? cmd->device->lun : 0xffff,
4140 lpfc_cmd->status, lpfc_cmd->result,
4141 vport->fc_myDID,
4142 (ndlp) ? ndlp->nlp_DID : 0,
4143 lpfc_cmd->cur_iocbq.sli4_xritag,
4144 wcqe->parameter, wcqe->total_data_placed,
4145 lpfc_cmd->cur_iocbq.iotag);
4148 switch (lpfc_cmd->status) {
4149 case IOSTAT_SUCCESS:
4150 cmd->result = DID_OK << 16;
4151 break;
4152 case IOSTAT_FCP_RSP_ERROR:
4153 lpfc_handle_fcp_err(vport, lpfc_cmd,
4154 pwqeIn->wqe.fcp_iread.total_xfer_len -
4155 wcqe->total_data_placed);
4156 break;
4157 case IOSTAT_NPORT_BSY:
4158 case IOSTAT_FABRIC_BSY:
4159 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4160 fast_path_evt = lpfc_alloc_fast_evt(phba);
4161 if (!fast_path_evt)
4162 break;
4163 fast_path_evt->un.fabric_evt.event_type =
4164 FC_REG_FABRIC_EVENT;
4165 fast_path_evt->un.fabric_evt.subcategory =
4166 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4167 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4168 if (ndlp) {
4169 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4170 &ndlp->nlp_portname,
4171 sizeof(struct lpfc_name));
4172 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4173 &ndlp->nlp_nodename,
4174 sizeof(struct lpfc_name));
4176 fast_path_evt->vport = vport;
4177 fast_path_evt->work_evt.evt =
4178 LPFC_EVT_FASTPATH_MGMT_EVT;
4179 spin_lock_irqsave(&phba->hbalock, flags);
4180 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4181 &phba->work_list);
4182 spin_unlock_irqrestore(&phba->hbalock, flags);
4183 lpfc_worker_wake_up(phba);
4184 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4185 "9035 Fabric/Node busy FCP cmd x%x failed"
4186 " <%d/%lld> "
4187 "status: x%x result: x%x "
4188 "sid: x%x did: x%x oxid: x%x "
4189 "Data: x%x x%x x%x\n",
4190 cmd->cmnd[0],
4191 cmd->device ? cmd->device->id : 0xffff,
4192 cmd->device ? cmd->device->lun : 0xffff,
4193 lpfc_cmd->status, lpfc_cmd->result,
4194 vport->fc_myDID,
4195 (ndlp) ? ndlp->nlp_DID : 0,
4196 lpfc_cmd->cur_iocbq.sli4_xritag,
4197 wcqe->parameter,
4198 wcqe->total_data_placed,
4199 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4200 break;
4201 case IOSTAT_REMOTE_STOP:
4202 if (ndlp) {
4203 /* This I/O was aborted by the target, we don't
4204 * know the rxid and because we did not send the
4205 * ABTS we cannot generate and RRQ.
4207 lpfc_set_rrq_active(phba, ndlp,
4208 lpfc_cmd->cur_iocbq.sli4_lxritag,
4209 0, 0);
4211 fallthrough;
4212 case IOSTAT_LOCAL_REJECT:
4213 if (lpfc_cmd->result & IOERR_DRVR_MASK)
4214 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4215 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4216 lpfc_cmd->result ==
4217 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4218 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4219 lpfc_cmd->result ==
4220 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4221 cmd->result = DID_NO_CONNECT << 16;
4222 break;
4224 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4225 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4226 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4227 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4228 cmd->result = DID_REQUEUE << 16;
4229 break;
4231 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4232 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4233 status == CQE_STATUS_DI_ERROR) {
4234 if (scsi_get_prot_op(cmd) !=
4235 SCSI_PROT_NORMAL) {
4237 * This is a response for a BG enabled
4238 * cmd. Parse BG error
4240 lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
4241 wcqe);
4242 break;
4244 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4245 "9040 non-zero BGSTAT on unprotected cmd\n");
4247 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4248 "9036 Local Reject FCP cmd x%x failed"
4249 " <%d/%lld> "
4250 "status: x%x result: x%x "
4251 "sid: x%x did: x%x oxid: x%x "
4252 "Data: x%x x%x x%x\n",
4253 cmd->cmnd[0],
4254 cmd->device ? cmd->device->id : 0xffff,
4255 cmd->device ? cmd->device->lun : 0xffff,
4256 lpfc_cmd->status, lpfc_cmd->result,
4257 vport->fc_myDID,
4258 (ndlp) ? ndlp->nlp_DID : 0,
4259 lpfc_cmd->cur_iocbq.sli4_xritag,
4260 wcqe->parameter,
4261 wcqe->total_data_placed,
4262 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4263 fallthrough;
4264 default:
4265 if (lpfc_cmd->status >= IOSTAT_CNT)
4266 lpfc_cmd->status = IOSTAT_DEFAULT;
4267 cmd->result = DID_ERROR << 16;
4268 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
4269 "9037 FCP Completion Error: xri %x "
4270 "status x%x result x%x [x%x] "
4271 "placed x%x\n",
4272 lpfc_cmd->cur_iocbq.sli4_xritag,
4273 lpfc_cmd->status, lpfc_cmd->result,
4274 wcqe->parameter,
4275 wcqe->total_data_placed);
4277 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4278 u32 *lp = (u32 *)cmd->sense_buffer;
4280 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4281 "9039 Iodone <%d/%llu> cmd x%p, error "
4282 "x%x SNS x%x x%x Data: x%x x%x\n",
4283 cmd->device->id, cmd->device->lun, cmd,
4284 cmd->result, *lp, *(lp + 3), cmd->retries,
4285 scsi_get_resid(cmd));
4288 lpfc_update_stats(vport, lpfc_cmd);
4290 if (vport->cfg_max_scsicmpl_time &&
4291 time_after(jiffies, lpfc_cmd->start_time +
4292 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4293 spin_lock_irqsave(shost->host_lock, flags);
4294 if (ndlp) {
4295 if (ndlp->cmd_qdepth >
4296 atomic_read(&ndlp->cmd_pending) &&
4297 (atomic_read(&ndlp->cmd_pending) >
4298 LPFC_MIN_TGT_QDEPTH) &&
4299 (cmd->cmnd[0] == READ_10 ||
4300 cmd->cmnd[0] == WRITE_10))
4301 ndlp->cmd_qdepth =
4302 atomic_read(&ndlp->cmd_pending);
4304 ndlp->last_change_time = jiffies;
4306 spin_unlock_irqrestore(shost->host_lock, flags);
4308 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4310 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4311 if (lpfc_cmd->ts_cmd_start) {
4312 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4313 lpfc_cmd->ts_data_io = ktime_get_ns();
4314 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4315 lpfc_io_ktime(phba, lpfc_cmd);
4317 #endif
4318 lpfc_cmd->pCmd = NULL;
4319 spin_unlock(&lpfc_cmd->buf_lock);
4321 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4322 cmd->scsi_done(cmd);
4325 * If there is an abort thread waiting for command completion
4326 * wake up the thread.
4328 spin_lock(&lpfc_cmd->buf_lock);
4329 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4330 if (lpfc_cmd->waitq)
4331 wake_up(lpfc_cmd->waitq);
4332 spin_unlock(&lpfc_cmd->buf_lock);
4334 lpfc_release_scsi_buf(phba, lpfc_cmd);
4338 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
4339 * @phba: The Hba for which this call is being executed.
4340 * @pIocbIn: The command IOCBQ for the scsi cmnd.
4341 * @pIocbOut: The response IOCBQ for the scsi cmnd.
4343 * This routine assigns scsi command result by looking into response IOCB
4344 * status field appropriately. This routine handles QUEUE FULL condition as
4345 * well by ramping down device queue depth.
4347 static void
4348 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4349 struct lpfc_iocbq *pIocbOut)
4351 struct lpfc_io_buf *lpfc_cmd =
4352 (struct lpfc_io_buf *) pIocbIn->context1;
4353 struct lpfc_vport *vport = pIocbIn->vport;
4354 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4355 struct lpfc_nodelist *pnode = rdata->pnode;
4356 struct scsi_cmnd *cmd;
4357 unsigned long flags;
4358 struct lpfc_fast_path_event *fast_path_evt;
4359 struct Scsi_Host *shost;
4360 int idx;
4361 uint32_t logit = LOG_FCP;
4363 /* Guard against abort handler being called at same time */
4364 spin_lock(&lpfc_cmd->buf_lock);
4366 /* Sanity check on return of outstanding command */
4367 cmd = lpfc_cmd->pCmd;
4368 if (!cmd || !phba) {
4369 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4370 "2621 IO completion: Not an active IO\n");
4371 spin_unlock(&lpfc_cmd->buf_lock);
4372 return;
4375 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4376 if (phba->sli4_hba.hdwq)
4377 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4379 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4380 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4381 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4382 #endif
4383 shost = cmd->device->host;
4385 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4386 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4387 /* pick up SLI4 exhange busy status from HBA */
4388 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
4389 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4390 else
4391 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4393 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4394 if (lpfc_cmd->prot_data_type) {
4395 struct scsi_dif_tuple *src = NULL;
4397 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4399 * Used to restore any changes to protection
4400 * data for error injection.
4402 switch (lpfc_cmd->prot_data_type) {
4403 case LPFC_INJERR_REFTAG:
4404 src->ref_tag =
4405 lpfc_cmd->prot_data;
4406 break;
4407 case LPFC_INJERR_APPTAG:
4408 src->app_tag =
4409 (uint16_t)lpfc_cmd->prot_data;
4410 break;
4411 case LPFC_INJERR_GUARD:
4412 src->guard_tag =
4413 (uint16_t)lpfc_cmd->prot_data;
4414 break;
4415 default:
4416 break;
4419 lpfc_cmd->prot_data = 0;
4420 lpfc_cmd->prot_data_type = 0;
4421 lpfc_cmd->prot_data_segment = NULL;
4423 #endif
4425 if (unlikely(lpfc_cmd->status)) {
4426 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4427 (lpfc_cmd->result & IOERR_DRVR_MASK))
4428 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4429 else if (lpfc_cmd->status >= IOSTAT_CNT)
4430 lpfc_cmd->status = IOSTAT_DEFAULT;
4431 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4432 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4433 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4434 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4435 logit = 0;
4436 else
4437 logit = LOG_FCP | LOG_FCP_UNDER;
4438 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4439 "9030 FCP cmd x%x failed <%d/%lld> "
4440 "status: x%x result: x%x "
4441 "sid: x%x did: x%x oxid: x%x "
4442 "Data: x%x x%x\n",
4443 cmd->cmnd[0],
4444 cmd->device ? cmd->device->id : 0xffff,
4445 cmd->device ? cmd->device->lun : 0xffff,
4446 lpfc_cmd->status, lpfc_cmd->result,
4447 vport->fc_myDID,
4448 (pnode) ? pnode->nlp_DID : 0,
4449 phba->sli_rev == LPFC_SLI_REV4 ?
4450 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4451 pIocbOut->iocb.ulpContext,
4452 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4454 switch (lpfc_cmd->status) {
4455 case IOSTAT_FCP_RSP_ERROR:
4456 /* Call FCP RSP handler to determine result */
4457 lpfc_handle_fcp_err(vport, lpfc_cmd,
4458 pIocbOut->iocb.un.fcpi.fcpi_parm);
4459 break;
4460 case IOSTAT_NPORT_BSY:
4461 case IOSTAT_FABRIC_BSY:
4462 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4463 fast_path_evt = lpfc_alloc_fast_evt(phba);
4464 if (!fast_path_evt)
4465 break;
4466 fast_path_evt->un.fabric_evt.event_type =
4467 FC_REG_FABRIC_EVENT;
4468 fast_path_evt->un.fabric_evt.subcategory =
4469 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4470 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4471 if (pnode) {
4472 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4473 &pnode->nlp_portname,
4474 sizeof(struct lpfc_name));
4475 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4476 &pnode->nlp_nodename,
4477 sizeof(struct lpfc_name));
4479 fast_path_evt->vport = vport;
4480 fast_path_evt->work_evt.evt =
4481 LPFC_EVT_FASTPATH_MGMT_EVT;
4482 spin_lock_irqsave(&phba->hbalock, flags);
4483 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4484 &phba->work_list);
4485 spin_unlock_irqrestore(&phba->hbalock, flags);
4486 lpfc_worker_wake_up(phba);
4487 break;
4488 case IOSTAT_LOCAL_REJECT:
4489 case IOSTAT_REMOTE_STOP:
4490 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4491 lpfc_cmd->result ==
4492 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4493 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4494 lpfc_cmd->result ==
4495 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4496 cmd->result = DID_NO_CONNECT << 16;
4497 break;
4499 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4500 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4501 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4502 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4503 cmd->result = DID_REQUEUE << 16;
4504 break;
4506 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4507 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4508 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4509 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4511 * This is a response for a BG enabled
4512 * cmd. Parse BG error
4514 lpfc_parse_bg_err(phba, lpfc_cmd,
4515 pIocbOut);
4516 break;
4517 } else {
4518 lpfc_printf_vlog(vport, KERN_WARNING,
4519 LOG_BG,
4520 "9031 non-zero BGSTAT "
4521 "on unprotected cmd\n");
4524 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4525 && (phba->sli_rev == LPFC_SLI_REV4)
4526 && pnode) {
4527 /* This IO was aborted by the target, we don't
4528 * know the rxid and because we did not send the
4529 * ABTS we cannot generate and RRQ.
4531 lpfc_set_rrq_active(phba, pnode,
4532 lpfc_cmd->cur_iocbq.sli4_lxritag,
4533 0, 0);
4535 fallthrough;
4536 default:
4537 cmd->result = DID_ERROR << 16;
4538 break;
4541 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4542 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4543 SAM_STAT_BUSY;
4544 } else
4545 cmd->result = DID_OK << 16;
4547 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4548 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4550 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4551 "0710 Iodone <%d/%llu> cmd x%px, error "
4552 "x%x SNS x%x x%x Data: x%x x%x\n",
4553 cmd->device->id, cmd->device->lun, cmd,
4554 cmd->result, *lp, *(lp + 3), cmd->retries,
4555 scsi_get_resid(cmd));
4558 lpfc_update_stats(vport, lpfc_cmd);
4559 if (vport->cfg_max_scsicmpl_time &&
4560 time_after(jiffies, lpfc_cmd->start_time +
4561 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4562 spin_lock_irqsave(shost->host_lock, flags);
4563 if (pnode) {
4564 if (pnode->cmd_qdepth >
4565 atomic_read(&pnode->cmd_pending) &&
4566 (atomic_read(&pnode->cmd_pending) >
4567 LPFC_MIN_TGT_QDEPTH) &&
4568 ((cmd->cmnd[0] == READ_10) ||
4569 (cmd->cmnd[0] == WRITE_10)))
4570 pnode->cmd_qdepth =
4571 atomic_read(&pnode->cmd_pending);
4573 pnode->last_change_time = jiffies;
4575 spin_unlock_irqrestore(shost->host_lock, flags);
4577 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4579 lpfc_cmd->pCmd = NULL;
4580 spin_unlock(&lpfc_cmd->buf_lock);
4582 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4583 if (lpfc_cmd->ts_cmd_start) {
4584 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4585 lpfc_cmd->ts_data_io = ktime_get_ns();
4586 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4587 lpfc_io_ktime(phba, lpfc_cmd);
4589 #endif
4590 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4591 cmd->scsi_done(cmd);
4594 * If there is an abort thread waiting for command completion
4595 * wake up the thread.
4597 spin_lock(&lpfc_cmd->buf_lock);
4598 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4599 if (lpfc_cmd->waitq)
4600 wake_up(lpfc_cmd->waitq);
4601 spin_unlock(&lpfc_cmd->buf_lock);
4603 lpfc_release_scsi_buf(phba, lpfc_cmd);
4607 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
4608 * @phba: Pointer to vport object for which I/O is executed
4609 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4610 * @tmo: timeout value for the IO
4612 * Based on the data-direction of the command, initialize IOCB
4613 * in the I/O buffer. Fill in the IOCB fields which are independent
4614 * of the scsi buffer
4616 * RETURNS 0 - SUCCESS,
4618 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4619 struct lpfc_io_buf *lpfc_cmd,
4620 uint8_t tmo)
4622 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4623 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
4624 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4625 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4626 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4627 int datadir = scsi_cmnd->sc_data_direction;
4628 u32 fcpdl;
4630 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4633 * There are three possibilities here - use scatter-gather segment, use
4634 * the single mapping, or neither. Start the lpfc command prep by
4635 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4636 * data bde entry.
4638 if (scsi_sg_count(scsi_cmnd)) {
4639 if (datadir == DMA_TO_DEVICE) {
4640 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4641 iocb_cmd->ulpPU = PARM_READ_CHECK;
4642 if (vport->cfg_first_burst_size &&
4643 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4644 u32 xrdy_len;
4646 fcpdl = scsi_bufflen(scsi_cmnd);
4647 xrdy_len = min(fcpdl,
4648 vport->cfg_first_burst_size);
4649 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
4651 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4652 } else {
4653 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4654 iocb_cmd->ulpPU = PARM_READ_CHECK;
4655 fcp_cmnd->fcpCntl3 = READ_DATA;
4657 } else {
4658 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4659 iocb_cmd->un.fcpi.fcpi_parm = 0;
4660 iocb_cmd->ulpPU = 0;
4661 fcp_cmnd->fcpCntl3 = 0;
4665 * Finish initializing those IOCB fields that are independent
4666 * of the scsi_cmnd request_buffer
4668 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4669 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4670 piocbq->iocb.ulpFCP2Rcvy = 1;
4671 else
4672 piocbq->iocb.ulpFCP2Rcvy = 0;
4674 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4675 piocbq->context1 = lpfc_cmd;
4676 if (!piocbq->iocb_cmpl)
4677 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4678 piocbq->iocb.ulpTimeout = tmo;
4679 piocbq->vport = vport;
4680 return 0;
4684 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
4685 * @phba: Pointer to vport object for which I/O is executed
4686 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4687 * @tmo: timeout value for the IO
4689 * Based on the data-direction of the command copy WQE template
4690 * to I/O buffer WQE. Fill in the WQE fields which are independent
4691 * of the scsi buffer
4693 * RETURNS 0 - SUCCESS,
4695 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4696 struct lpfc_io_buf *lpfc_cmd,
4697 uint8_t tmo)
4699 struct lpfc_hba *phba = vport->phba;
4700 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4701 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4702 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4703 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4704 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4705 union lpfc_wqe128 *wqe = &pwqeq->wqe;
4706 u16 idx = lpfc_cmd->hdwq_no;
4707 int datadir = scsi_cmnd->sc_data_direction;
4709 hdwq = &phba->sli4_hba.hdwq[idx];
4711 /* Initialize 64 bytes only */
4712 memset(wqe, 0, sizeof(union lpfc_wqe128));
4715 * There are three possibilities here - use scatter-gather segment, use
4716 * the single mapping, or neither.
4718 if (scsi_sg_count(scsi_cmnd)) {
4719 if (datadir == DMA_TO_DEVICE) {
4720 /* From the iwrite template, initialize words 7 - 11 */
4721 memcpy(&wqe->words[7],
4722 &lpfc_iwrite_cmd_template.words[7],
4723 sizeof(uint32_t) * 5);
4725 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4726 if (hdwq)
4727 hdwq->scsi_cstat.output_requests++;
4728 } else {
4729 /* From the iread template, initialize words 7 - 11 */
4730 memcpy(&wqe->words[7],
4731 &lpfc_iread_cmd_template.words[7],
4732 sizeof(uint32_t) * 5);
4734 /* Word 7 */
4735 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4737 fcp_cmnd->fcpCntl3 = READ_DATA;
4738 if (hdwq)
4739 hdwq->scsi_cstat.input_requests++;
4741 } else {
4742 /* From the icmnd template, initialize words 4 - 11 */
4743 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4744 sizeof(uint32_t) * 8);
4746 /* Word 7 */
4747 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4749 fcp_cmnd->fcpCntl3 = 0;
4750 if (hdwq)
4751 hdwq->scsi_cstat.control_requests++;
4755 * Finish initializing those WQE fields that are independent
4756 * of the request_buffer
4759 /* Word 3 */
4760 bf_set(payload_offset_len, &wqe->fcp_icmd,
4761 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4763 /* Word 6 */
4764 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4765 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4766 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4768 /* Word 7*/
4769 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4770 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4772 bf_set(wqe_class, &wqe->generic.wqe_com,
4773 (pnode->nlp_fcp_info & 0x0f));
4775 /* Word 8 */
4776 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4778 /* Word 9 */
4779 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4781 pwqeq->vport = vport;
4782 pwqeq->vport = vport;
4783 pwqeq->context1 = lpfc_cmd;
4784 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
4785 pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
4787 return 0;
4791 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4792 * @vport: The virtual port for which this call is being executed.
4793 * @lpfc_cmd: The scsi command which needs to send.
4794 * @pnode: Pointer to lpfc_nodelist.
4796 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4797 * to transfer for device with SLI3 interface spec.
4799 static int
4800 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4801 struct lpfc_nodelist *pnode)
4803 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4804 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4805 u8 *ptr;
4807 if (!pnode)
4808 return 0;
4810 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4811 /* clear task management bits */
4812 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4814 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4815 &lpfc_cmd->fcp_cmnd->fcp_lun);
4817 ptr = &fcp_cmnd->fcpCdb[0];
4818 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4819 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4820 ptr += scsi_cmnd->cmd_len;
4821 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4824 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4826 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
4828 return 0;
4832 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4833 * @vport: The virtual port for which this call is being executed.
4834 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4835 * @lun: Logical unit number.
4836 * @task_mgmt_cmd: SCSI task management command.
4838 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4839 * for device with SLI-3 interface spec.
4841 * Return codes:
4842 * 0 - Error
4843 * 1 - Success
4845 static int
4846 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4847 struct lpfc_io_buf *lpfc_cmd,
4848 uint64_t lun,
4849 uint8_t task_mgmt_cmd)
4851 struct lpfc_iocbq *piocbq;
4852 IOCB_t *piocb;
4853 struct fcp_cmnd *fcp_cmnd;
4854 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4855 struct lpfc_nodelist *ndlp = rdata->pnode;
4857 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4858 return 0;
4860 piocbq = &(lpfc_cmd->cur_iocbq);
4861 piocbq->vport = vport;
4863 piocb = &piocbq->iocb;
4865 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4866 /* Clear out any old data in the FCP command area */
4867 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4868 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4869 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4870 if (vport->phba->sli_rev == 3 &&
4871 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4872 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4873 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4874 piocb->ulpContext = ndlp->nlp_rpi;
4875 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4876 piocb->ulpContext =
4877 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4879 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4880 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4881 piocb->ulpPU = 0;
4882 piocb->un.fcpi.fcpi_parm = 0;
4884 /* ulpTimeout is only one byte */
4885 if (lpfc_cmd->timeout > 0xff) {
4887 * Do not timeout the command at the firmware level.
4888 * The driver will provide the timeout mechanism.
4890 piocb->ulpTimeout = 0;
4891 } else
4892 piocb->ulpTimeout = lpfc_cmd->timeout;
4894 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4895 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4897 return 1;
4901 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4902 * @phba: The hba struct for which this call is being executed.
4903 * @dev_grp: The HBA PCI-Device group number.
4905 * This routine sets up the SCSI interface API function jump table in @phba
4906 * struct.
4907 * Returns: 0 - success, -ENODEV - failure.
4910 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4913 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4915 switch (dev_grp) {
4916 case LPFC_PCI_DEV_LP:
4917 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4918 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4919 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4920 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4921 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
4922 break;
4923 case LPFC_PCI_DEV_OC:
4924 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4925 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4926 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4927 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4928 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
4929 break;
4930 default:
4931 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4932 "1418 Invalid HBA PCI-device group: 0x%x\n",
4933 dev_grp);
4934 return -ENODEV;
4936 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4937 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4938 return 0;
4942 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4943 * @phba: The Hba for which this call is being executed.
4944 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4945 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4947 * This routine is IOCB completion routine for device reset and target reset
4948 * routine. This routine release scsi buffer associated with lpfc_cmd.
4950 static void
4951 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4952 struct lpfc_iocbq *cmdiocbq,
4953 struct lpfc_iocbq *rspiocbq)
4955 struct lpfc_io_buf *lpfc_cmd =
4956 (struct lpfc_io_buf *) cmdiocbq->context1;
4957 if (lpfc_cmd)
4958 lpfc_release_scsi_buf(phba, lpfc_cmd);
4959 return;
4963 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
4964 * if issuing a pci_bus_reset is possibly unsafe
4965 * @phba: lpfc_hba pointer.
4967 * Description:
4968 * Walks the bus_list to ensure only PCI devices with Emulex
4969 * vendor id, device ids that support hot reset, and only one occurrence
4970 * of function 0.
4972 * Returns:
4973 * -EBADSLT, detected invalid device
4974 * 0, successful
4977 lpfc_check_pci_resettable(struct lpfc_hba *phba)
4979 const struct pci_dev *pdev = phba->pcidev;
4980 struct pci_dev *ptr = NULL;
4981 u8 counter = 0;
4983 /* Walk the list of devices on the pci_dev's bus */
4984 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4985 /* Check for Emulex Vendor ID */
4986 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4987 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4988 "8346 Non-Emulex vendor found: "
4989 "0x%04x\n", ptr->vendor);
4990 return -EBADSLT;
4993 /* Check for valid Emulex Device ID */
4994 switch (ptr->device) {
4995 case PCI_DEVICE_ID_LANCER_FC:
4996 case PCI_DEVICE_ID_LANCER_G6_FC:
4997 case PCI_DEVICE_ID_LANCER_G7_FC:
4998 break;
4999 default:
5000 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5001 "8347 Invalid device found: "
5002 "0x%04x\n", ptr->device);
5003 return -EBADSLT;
5006 /* Check for only one function 0 ID to ensure only one HBA on
5007 * secondary bus
5009 if (ptr->devfn == 0) {
5010 if (++counter > 1) {
5011 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5012 "8348 More than one device on "
5013 "secondary bus found\n");
5014 return -EBADSLT;
5019 return 0;
5023 * lpfc_info - Info entry point of scsi_host_template data structure
5024 * @host: The scsi host for which this call is being executed.
5026 * This routine provides module information about hba.
5028 * Reutrn code:
5029 * Pointer to char - Success.
5031 const char *
5032 lpfc_info(struct Scsi_Host *host)
5034 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5035 struct lpfc_hba *phba = vport->phba;
5036 int link_speed = 0;
5037 static char lpfcinfobuf[384];
5038 char tmp[384] = {0};
5040 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
5041 if (phba && phba->pcidev){
5042 /* Model Description */
5043 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5044 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5045 sizeof(lpfcinfobuf))
5046 goto buffer_done;
5048 /* PCI Info */
5049 scnprintf(tmp, sizeof(tmp),
5050 " on PCI bus %02x device %02x irq %d",
5051 phba->pcidev->bus->number, phba->pcidev->devfn,
5052 phba->pcidev->irq);
5053 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5054 sizeof(lpfcinfobuf))
5055 goto buffer_done;
5057 /* Port Number */
5058 if (phba->Port[0]) {
5059 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5060 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5061 sizeof(lpfcinfobuf))
5062 goto buffer_done;
5065 /* Link Speed */
5066 link_speed = lpfc_sli_port_speed_get(phba);
5067 if (link_speed != 0) {
5068 scnprintf(tmp, sizeof(tmp),
5069 " Logical Link Speed: %d Mbps", link_speed);
5070 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5071 sizeof(lpfcinfobuf))
5072 goto buffer_done;
5075 /* PCI resettable */
5076 if (!lpfc_check_pci_resettable(phba)) {
5077 scnprintf(tmp, sizeof(tmp), " PCI resettable");
5078 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5082 buffer_done:
5083 return lpfcinfobuf;
5087 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
5088 * @phba: The Hba for which this call is being executed.
5090 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
5091 * The default value of cfg_poll_tmo is 10 milliseconds.
5093 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5095 unsigned long poll_tmo_expires =
5096 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5098 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5099 mod_timer(&phba->fcp_poll_timer,
5100 poll_tmo_expires);
5104 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
5105 * @phba: The Hba for which this call is being executed.
5107 * This routine starts the fcp_poll_timer of @phba.
5109 void lpfc_poll_start_timer(struct lpfc_hba * phba)
5111 lpfc_poll_rearm_timer(phba);
5115 * lpfc_poll_timeout - Restart polling timer
5116 * @t: Timer construct where lpfc_hba data structure pointer is obtained.
5118 * This routine restarts fcp_poll timer, when FCP ring polling is enable
5119 * and FCP Ring interrupt is disable.
5121 void lpfc_poll_timeout(struct timer_list *t)
5123 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5125 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5126 lpfc_sli_handle_fast_ring_event(phba,
5127 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5129 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5130 lpfc_poll_rearm_timer(phba);
5135 * lpfc_queuecommand - scsi_host_template queuecommand entry point
5136 * @shost: kernel scsi host pointer.
5137 * @cmnd: Pointer to scsi_cmnd data structure.
5139 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
5140 * This routine prepares an IOCB from scsi command and provides to firmware.
5141 * The @done callback is invoked after driver finished processing the command.
5143 * Return value :
5144 * 0 - Success
5145 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
5147 static int
5148 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5150 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5151 struct lpfc_hba *phba = vport->phba;
5152 struct lpfc_rport_data *rdata;
5153 struct lpfc_nodelist *ndlp;
5154 struct lpfc_io_buf *lpfc_cmd;
5155 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5156 int err, idx;
5157 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5158 uint64_t start = 0L;
5160 if (phba->ktime_on)
5161 start = ktime_get_ns();
5162 #endif
5164 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5166 /* sanity check on references */
5167 if (unlikely(!rdata) || unlikely(!rport))
5168 goto out_fail_command;
5170 err = fc_remote_port_chkready(rport);
5171 if (err) {
5172 cmnd->result = err;
5173 goto out_fail_command;
5175 ndlp = rdata->pnode;
5177 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
5178 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5181 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5182 " op:%02x str=%s without registering for"
5183 " BlockGuard - Rejecting command\n",
5184 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5185 dif_op_str[scsi_get_prot_op(cmnd)]);
5186 goto out_fail_command;
5190 * Catch race where our node has transitioned, but the
5191 * transport is still transitioning.
5193 if (!ndlp)
5194 goto out_tgt_busy;
5195 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5196 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5197 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5198 "3377 Target Queue Full, scsi Id:%d "
5199 "Qdepth:%d Pending command:%d"
5200 " WWNN:%02x:%02x:%02x:%02x:"
5201 "%02x:%02x:%02x:%02x, "
5202 " WWPN:%02x:%02x:%02x:%02x:"
5203 "%02x:%02x:%02x:%02x",
5204 ndlp->nlp_sid, ndlp->cmd_qdepth,
5205 atomic_read(&ndlp->cmd_pending),
5206 ndlp->nlp_nodename.u.wwn[0],
5207 ndlp->nlp_nodename.u.wwn[1],
5208 ndlp->nlp_nodename.u.wwn[2],
5209 ndlp->nlp_nodename.u.wwn[3],
5210 ndlp->nlp_nodename.u.wwn[4],
5211 ndlp->nlp_nodename.u.wwn[5],
5212 ndlp->nlp_nodename.u.wwn[6],
5213 ndlp->nlp_nodename.u.wwn[7],
5214 ndlp->nlp_portname.u.wwn[0],
5215 ndlp->nlp_portname.u.wwn[1],
5216 ndlp->nlp_portname.u.wwn[2],
5217 ndlp->nlp_portname.u.wwn[3],
5218 ndlp->nlp_portname.u.wwn[4],
5219 ndlp->nlp_portname.u.wwn[5],
5220 ndlp->nlp_portname.u.wwn[6],
5221 ndlp->nlp_portname.u.wwn[7]);
5222 goto out_tgt_busy;
5226 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5227 if (lpfc_cmd == NULL) {
5228 lpfc_rampdown_queue_depth(phba);
5230 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5231 "0707 driver's buffer pool is empty, "
5232 "IO busied\n");
5233 goto out_host_busy;
5237 * Store the midlayer's command structure for the completion phase
5238 * and complete the command initialization.
5240 lpfc_cmd->pCmd = cmnd;
5241 lpfc_cmd->rdata = rdata;
5242 lpfc_cmd->ndlp = ndlp;
5243 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
5244 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
5246 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5247 if (err)
5248 goto out_host_busy_release_buf;
5250 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
5251 if (vport->phba->cfg_enable_bg) {
5252 lpfc_printf_vlog(vport,
5253 KERN_INFO, LOG_SCSI_CMD,
5254 "9033 BLKGRD: rcvd %s cmd:x%x "
5255 "sector x%llx cnt %u pt %x\n",
5256 dif_op_str[scsi_get_prot_op(cmnd)],
5257 cmnd->cmnd[0],
5258 (unsigned long long)scsi_get_lba(cmnd),
5259 blk_rq_sectors(cmnd->request),
5260 (cmnd->cmnd[1]>>5));
5262 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5263 } else {
5264 if (vport->phba->cfg_enable_bg) {
5265 lpfc_printf_vlog(vport,
5266 KERN_INFO, LOG_SCSI_CMD,
5267 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
5268 "x%x sector x%llx cnt %u pt %x\n",
5269 cmnd->cmnd[0],
5270 (unsigned long long)scsi_get_lba(cmnd),
5271 blk_rq_sectors(cmnd->request),
5272 (cmnd->cmnd[1]>>5));
5274 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5277 if (unlikely(err)) {
5278 if (err == 2) {
5279 cmnd->result = DID_ERROR << 16;
5280 goto out_fail_command_release_buf;
5282 goto out_host_busy_free_buf;
5286 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5287 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5288 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5289 #endif
5290 /* Issue I/O to adapter */
5291 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
5292 &lpfc_cmd->cur_iocbq,
5293 SLI_IOCB_RET_IOCB);
5294 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5295 if (start) {
5296 lpfc_cmd->ts_cmd_start = start;
5297 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5298 lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5299 } else {
5300 lpfc_cmd->ts_cmd_start = 0;
5302 #endif
5303 if (err) {
5304 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5305 "3376 FCP could not issue IOCB err %x "
5306 "FCP cmd x%x <%d/%llu> "
5307 "sid: x%x did: x%x oxid: x%x "
5308 "Data: x%x x%x x%x x%x\n",
5309 err, cmnd->cmnd[0],
5310 cmnd->device ? cmnd->device->id : 0xffff,
5311 cmnd->device ? cmnd->device->lun : (u64)-1,
5312 vport->fc_myDID, ndlp->nlp_DID,
5313 phba->sli_rev == LPFC_SLI_REV4 ?
5314 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
5315 phba->sli_rev == LPFC_SLI_REV4 ?
5316 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5317 lpfc_cmd->cur_iocbq.iocb.ulpContext,
5318 lpfc_cmd->cur_iocbq.iotag,
5319 phba->sli_rev == LPFC_SLI_REV4 ?
5320 bf_get(wqe_tmo,
5321 &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
5322 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
5323 (uint32_t)
5324 (cmnd->request->timeout / 1000));
5326 goto out_host_busy_free_buf;
5329 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5330 lpfc_sli_handle_fast_ring_event(phba,
5331 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5333 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5334 lpfc_poll_rearm_timer(phba);
5337 if (phba->cfg_xri_rebalancing)
5338 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5340 return 0;
5342 out_host_busy_free_buf:
5343 idx = lpfc_cmd->hdwq_no;
5344 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5345 if (phba->sli4_hba.hdwq) {
5346 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5347 case WRITE_DATA:
5348 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5349 break;
5350 case READ_DATA:
5351 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5352 break;
5353 default:
5354 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5357 out_host_busy_release_buf:
5358 lpfc_release_scsi_buf(phba, lpfc_cmd);
5359 out_host_busy:
5360 return SCSI_MLQUEUE_HOST_BUSY;
5362 out_tgt_busy:
5363 return SCSI_MLQUEUE_TARGET_BUSY;
5365 out_fail_command_release_buf:
5366 lpfc_release_scsi_buf(phba, lpfc_cmd);
5368 out_fail_command:
5369 cmnd->scsi_done(cmnd);
5370 return 0;
5375 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
5376 * @cmnd: Pointer to scsi_cmnd data structure.
5378 * This routine aborts @cmnd pending in base driver.
5380 * Return code :
5381 * 0x2003 - Error
5382 * 0x2002 - Success
5384 static int
5385 lpfc_abort_handler(struct scsi_cmnd *cmnd)
5387 struct Scsi_Host *shost = cmnd->device->host;
5388 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5389 struct lpfc_hba *phba = vport->phba;
5390 struct lpfc_iocbq *iocb;
5391 struct lpfc_io_buf *lpfc_cmd;
5392 int ret = SUCCESS, status = 0;
5393 struct lpfc_sli_ring *pring_s4 = NULL;
5394 struct lpfc_sli_ring *pring = NULL;
5395 int ret_val;
5396 unsigned long flags;
5397 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
5399 status = fc_block_scsi_eh(cmnd);
5400 if (status != 0 && status != SUCCESS)
5401 return status;
5403 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5404 if (!lpfc_cmd)
5405 return ret;
5407 spin_lock_irqsave(&phba->hbalock, flags);
5408 /* driver queued commands are in process of being flushed */
5409 if (phba->hba_flag & HBA_IOQ_FLUSH) {
5410 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5411 "3168 SCSI Layer abort requested I/O has been "
5412 "flushed by LLD.\n");
5413 ret = FAILED;
5414 goto out_unlock;
5417 /* Guard against IO completion being called at same time */
5418 spin_lock(&lpfc_cmd->buf_lock);
5420 if (!lpfc_cmd->pCmd) {
5421 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5422 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5423 "x%x ID %d LUN %llu\n",
5424 SUCCESS, cmnd->device->id, cmnd->device->lun);
5425 goto out_unlock_buf;
5428 iocb = &lpfc_cmd->cur_iocbq;
5429 if (phba->sli_rev == LPFC_SLI_REV4) {
5430 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5431 if (!pring_s4) {
5432 ret = FAILED;
5433 goto out_unlock_buf;
5435 spin_lock(&pring_s4->ring_lock);
5437 /* the command is in process of being cancelled */
5438 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
5439 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5440 "3169 SCSI Layer abort requested I/O has been "
5441 "cancelled by LLD.\n");
5442 ret = FAILED;
5443 goto out_unlock_ring;
5446 * If pCmd field of the corresponding lpfc_io_buf structure
5447 * points to a different SCSI command, then the driver has
5448 * already completed this command, but the midlayer did not
5449 * see the completion before the eh fired. Just return SUCCESS.
5451 if (lpfc_cmd->pCmd != cmnd) {
5452 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5453 "3170 SCSI Layer abort requested I/O has been "
5454 "completed by LLD.\n");
5455 goto out_unlock_ring;
5458 BUG_ON(iocb->context1 != lpfc_cmd);
5460 /* abort issued in recovery is still in progress */
5461 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
5462 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5463 "3389 SCSI Layer I/O Abort Request is pending\n");
5464 if (phba->sli_rev == LPFC_SLI_REV4)
5465 spin_unlock(&pring_s4->ring_lock);
5466 spin_unlock(&lpfc_cmd->buf_lock);
5467 spin_unlock_irqrestore(&phba->hbalock, flags);
5468 goto wait_for_cmpl;
5471 lpfc_cmd->waitq = &waitq;
5472 if (phba->sli_rev == LPFC_SLI_REV4) {
5473 spin_unlock(&pring_s4->ring_lock);
5474 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5475 lpfc_sli4_abort_fcp_cmpl);
5476 } else {
5477 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5478 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5479 lpfc_sli_abort_fcp_cmpl);
5482 if (ret_val != IOCB_SUCCESS) {
5483 /* Indicate the IO is not being aborted by the driver. */
5484 lpfc_cmd->waitq = NULL;
5485 spin_unlock(&lpfc_cmd->buf_lock);
5486 spin_unlock_irqrestore(&phba->hbalock, flags);
5487 ret = FAILED;
5488 goto out;
5491 /* no longer need the lock after this point */
5492 spin_unlock(&lpfc_cmd->buf_lock);
5493 spin_unlock_irqrestore(&phba->hbalock, flags);
5495 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5496 lpfc_sli_handle_fast_ring_event(phba,
5497 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5499 wait_for_cmpl:
5501 * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
5502 * for abort to complete.
5504 wait_event_timeout(waitq,
5505 (lpfc_cmd->pCmd != cmnd),
5506 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
5508 spin_lock(&lpfc_cmd->buf_lock);
5510 if (lpfc_cmd->pCmd == cmnd) {
5511 ret = FAILED;
5512 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5513 "0748 abort handler timed out waiting "
5514 "for aborting I/O (xri:x%x) to complete: "
5515 "ret %#x, ID %d, LUN %llu\n",
5516 iocb->sli4_xritag, ret,
5517 cmnd->device->id, cmnd->device->lun);
5520 lpfc_cmd->waitq = NULL;
5522 spin_unlock(&lpfc_cmd->buf_lock);
5523 goto out;
5525 out_unlock_ring:
5526 if (phba->sli_rev == LPFC_SLI_REV4)
5527 spin_unlock(&pring_s4->ring_lock);
5528 out_unlock_buf:
5529 spin_unlock(&lpfc_cmd->buf_lock);
5530 out_unlock:
5531 spin_unlock_irqrestore(&phba->hbalock, flags);
5532 out:
5533 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5534 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5535 "LUN %llu\n", ret, cmnd->device->id,
5536 cmnd->device->lun);
5537 return ret;
5540 static char *
5541 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
5543 switch (task_mgmt_cmd) {
5544 case FCP_ABORT_TASK_SET:
5545 return "ABORT_TASK_SET";
5546 case FCP_CLEAR_TASK_SET:
5547 return "FCP_CLEAR_TASK_SET";
5548 case FCP_BUS_RESET:
5549 return "FCP_BUS_RESET";
5550 case FCP_LUN_RESET:
5551 return "FCP_LUN_RESET";
5552 case FCP_TARGET_RESET:
5553 return "FCP_TARGET_RESET";
5554 case FCP_CLEAR_ACA:
5555 return "FCP_CLEAR_ACA";
5556 case FCP_TERMINATE_TASK:
5557 return "FCP_TERMINATE_TASK";
5558 default:
5559 return "unknown";
5565 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
5566 * @vport: The virtual port for which this call is being executed.
5567 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
5569 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
5571 * Return code :
5572 * 0x2003 - Error
5573 * 0x2002 - Success
5575 static int
5576 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
5578 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
5579 uint32_t rsp_info;
5580 uint32_t rsp_len;
5581 uint8_t rsp_info_code;
5582 int ret = FAILED;
5585 if (fcprsp == NULL)
5586 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5587 "0703 fcp_rsp is missing\n");
5588 else {
5589 rsp_info = fcprsp->rspStatus2;
5590 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
5591 rsp_info_code = fcprsp->rspInfo3;
5594 lpfc_printf_vlog(vport, KERN_INFO,
5595 LOG_FCP,
5596 "0706 fcp_rsp valid 0x%x,"
5597 " rsp len=%d code 0x%x\n",
5598 rsp_info,
5599 rsp_len, rsp_info_code);
5601 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
5602 * field specifies the number of valid bytes of FCP_RSP_INFO.
5603 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
5605 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
5606 ((rsp_len == 8) || (rsp_len == 4))) {
5607 switch (rsp_info_code) {
5608 case RSP_NO_FAILURE:
5609 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5610 "0715 Task Mgmt No Failure\n");
5611 ret = SUCCESS;
5612 break;
5613 case RSP_TM_NOT_SUPPORTED: /* TM rejected */
5614 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5615 "0716 Task Mgmt Target "
5616 "reject\n");
5617 break;
5618 case RSP_TM_NOT_COMPLETED: /* TM failed */
5619 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5620 "0717 Task Mgmt Target "
5621 "failed TM\n");
5622 break;
5623 case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5624 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5625 "0718 Task Mgmt to invalid "
5626 "LUN\n");
5627 break;
5631 return ret;
5636 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5637 * @vport: The virtual port for which this call is being executed.
5638 * @cmnd: Pointer to scsi_cmnd data structure.
5639 * @tgt_id: Target ID of remote device.
5640 * @lun_id: Lun number for the TMF
5641 * @task_mgmt_cmd: type of TMF to send
5643 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5644 * a remote port.
5646 * Return Code:
5647 * 0x2003 - Error
5648 * 0x2002 - Success.
5650 static int
5651 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
5652 unsigned int tgt_id, uint64_t lun_id,
5653 uint8_t task_mgmt_cmd)
5655 struct lpfc_hba *phba = vport->phba;
5656 struct lpfc_io_buf *lpfc_cmd;
5657 struct lpfc_iocbq *iocbq;
5658 struct lpfc_iocbq *iocbqrsp;
5659 struct lpfc_rport_data *rdata;
5660 struct lpfc_nodelist *pnode;
5661 int ret;
5662 int status;
5664 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5665 if (!rdata || !rdata->pnode)
5666 return FAILED;
5667 pnode = rdata->pnode;
5669 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
5670 if (lpfc_cmd == NULL)
5671 return FAILED;
5672 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5673 lpfc_cmd->rdata = rdata;
5674 lpfc_cmd->pCmd = cmnd;
5675 lpfc_cmd->ndlp = pnode;
5677 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5678 task_mgmt_cmd);
5679 if (!status) {
5680 lpfc_release_scsi_buf(phba, lpfc_cmd);
5681 return FAILED;
5684 iocbq = &lpfc_cmd->cur_iocbq;
5685 iocbqrsp = lpfc_sli_get_iocbq(phba);
5686 if (iocbqrsp == NULL) {
5687 lpfc_release_scsi_buf(phba, lpfc_cmd);
5688 return FAILED;
5690 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5692 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5693 "0702 Issue %s to TGT %d LUN %llu "
5694 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5695 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5696 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5697 iocbq->iocb_flag);
5699 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5700 iocbq, iocbqrsp, lpfc_cmd->timeout);
5701 if ((status != IOCB_SUCCESS) ||
5702 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5703 if (status != IOCB_SUCCESS ||
5704 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
5705 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5706 "0727 TMF %s to TGT %d LUN %llu "
5707 "failed (%d, %d) iocb_flag x%x\n",
5708 lpfc_taskmgmt_name(task_mgmt_cmd),
5709 tgt_id, lun_id,
5710 iocbqrsp->iocb.ulpStatus,
5711 iocbqrsp->iocb.un.ulpWord[4],
5712 iocbq->iocb_flag);
5713 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5714 if (status == IOCB_SUCCESS) {
5715 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5716 /* Something in the FCP_RSP was invalid.
5717 * Check conditions */
5718 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5719 else
5720 ret = FAILED;
5721 } else if (status == IOCB_TIMEDOUT) {
5722 ret = TIMEOUT_ERROR;
5723 } else {
5724 ret = FAILED;
5726 } else
5727 ret = SUCCESS;
5729 lpfc_sli_release_iocbq(phba, iocbqrsp);
5731 if (ret != TIMEOUT_ERROR)
5732 lpfc_release_scsi_buf(phba, lpfc_cmd);
5734 return ret;
5738 * lpfc_chk_tgt_mapped -
5739 * @vport: The virtual port to check on
5740 * @cmnd: Pointer to scsi_cmnd data structure.
5742 * This routine delays until the scsi target (aka rport) for the
5743 * command exists (is present and logged in) or we declare it non-existent.
5745 * Return code :
5746 * 0x2003 - Error
5747 * 0x2002 - Success
5749 static int
5750 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5752 struct lpfc_rport_data *rdata;
5753 struct lpfc_nodelist *pnode;
5754 unsigned long later;
5756 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5757 if (!rdata) {
5758 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5759 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
5760 return FAILED;
5762 pnode = rdata->pnode;
5764 * If target is not in a MAPPED state, delay until
5765 * target is rediscovered or devloss timeout expires.
5767 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5768 while (time_after(later, jiffies)) {
5769 if (!pnode)
5770 return FAILED;
5771 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5772 return SUCCESS;
5773 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5774 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5775 if (!rdata)
5776 return FAILED;
5777 pnode = rdata->pnode;
5779 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5780 return FAILED;
5781 return SUCCESS;
5785 * lpfc_reset_flush_io_context -
5786 * @vport: The virtual port (scsi_host) for the flush context
5787 * @tgt_id: If aborting by Target contect - specifies the target id
5788 * @lun_id: If aborting by Lun context - specifies the lun id
5789 * @context: specifies the context level to flush at.
5791 * After a reset condition via TMF, we need to flush orphaned i/o
5792 * contexts from the adapter. This routine aborts any contexts
5793 * outstanding, then waits for their completions. The wait is
5794 * bounded by devloss_tmo though.
5796 * Return code :
5797 * 0x2003 - Error
5798 * 0x2002 - Success
5800 static int
5801 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5802 uint64_t lun_id, lpfc_ctx_cmd context)
5804 struct lpfc_hba *phba = vport->phba;
5805 unsigned long later;
5806 int cnt;
5808 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5809 if (cnt)
5810 lpfc_sli_abort_taskmgmt(vport,
5811 &phba->sli.sli3_ring[LPFC_FCP_RING],
5812 tgt_id, lun_id, context);
5813 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5814 while (time_after(later, jiffies) && cnt) {
5815 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5816 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5818 if (cnt) {
5819 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5820 "0724 I/O flush failure for context %s : cnt x%x\n",
5821 ((context == LPFC_CTX_LUN) ? "LUN" :
5822 ((context == LPFC_CTX_TGT) ? "TGT" :
5823 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5824 cnt);
5825 return FAILED;
5827 return SUCCESS;
5831 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5832 * @cmnd: Pointer to scsi_cmnd data structure.
5834 * This routine does a device reset by sending a LUN_RESET task management
5835 * command.
5837 * Return code :
5838 * 0x2003 - Error
5839 * 0x2002 - Success
5841 static int
5842 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5844 struct Scsi_Host *shost = cmnd->device->host;
5845 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5846 struct lpfc_rport_data *rdata;
5847 struct lpfc_nodelist *pnode;
5848 unsigned tgt_id = cmnd->device->id;
5849 uint64_t lun_id = cmnd->device->lun;
5850 struct lpfc_scsi_event_header scsi_event;
5851 int status;
5853 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5854 if (!rdata || !rdata->pnode) {
5855 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5856 "0798 Device Reset rdata failure: rdata x%px\n",
5857 rdata);
5858 return FAILED;
5860 pnode = rdata->pnode;
5861 status = fc_block_scsi_eh(cmnd);
5862 if (status != 0 && status != SUCCESS)
5863 return status;
5865 status = lpfc_chk_tgt_mapped(vport, cmnd);
5866 if (status == FAILED) {
5867 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5868 "0721 Device Reset rport failure: rdata x%px\n", rdata);
5869 return FAILED;
5872 scsi_event.event_type = FC_REG_SCSI_EVENT;
5873 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5874 scsi_event.lun = lun_id;
5875 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5876 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5878 fc_host_post_vendor_event(shost, fc_get_event_number(),
5879 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5881 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5882 FCP_LUN_RESET);
5884 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5885 "0713 SCSI layer issued Device Reset (%d, %llu) "
5886 "return x%x\n", tgt_id, lun_id, status);
5889 * We have to clean up i/o as : they may be orphaned by the TMF;
5890 * or if the TMF failed, they may be in an indeterminate state.
5891 * So, continue on.
5892 * We will report success if all the i/o aborts successfully.
5894 if (status == SUCCESS)
5895 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5896 LPFC_CTX_LUN);
5898 return status;
5902 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5903 * @cmnd: Pointer to scsi_cmnd data structure.
5905 * This routine does a target reset by sending a TARGET_RESET task management
5906 * command.
5908 * Return code :
5909 * 0x2003 - Error
5910 * 0x2002 - Success
5912 static int
5913 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5915 struct Scsi_Host *shost = cmnd->device->host;
5916 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5917 struct lpfc_rport_data *rdata;
5918 struct lpfc_nodelist *pnode;
5919 unsigned tgt_id = cmnd->device->id;
5920 uint64_t lun_id = cmnd->device->lun;
5921 struct lpfc_scsi_event_header scsi_event;
5922 int status;
5924 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5925 if (!rdata || !rdata->pnode) {
5926 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5927 "0799 Target Reset rdata failure: rdata x%px\n",
5928 rdata);
5929 return FAILED;
5931 pnode = rdata->pnode;
5932 status = fc_block_scsi_eh(cmnd);
5933 if (status != 0 && status != SUCCESS)
5934 return status;
5936 status = lpfc_chk_tgt_mapped(vport, cmnd);
5937 if (status == FAILED) {
5938 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5939 "0722 Target Reset rport failure: rdata x%px\n", rdata);
5940 if (pnode) {
5941 spin_lock_irq(&pnode->lock);
5942 pnode->nlp_flag &= ~NLP_NPR_ADISC;
5943 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5944 spin_unlock_irq(&pnode->lock);
5946 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5947 LPFC_CTX_TGT);
5948 return FAST_IO_FAIL;
5951 scsi_event.event_type = FC_REG_SCSI_EVENT;
5952 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5953 scsi_event.lun = 0;
5954 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5955 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5957 fc_host_post_vendor_event(shost, fc_get_event_number(),
5958 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5960 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5961 FCP_TARGET_RESET);
5963 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5964 "0723 SCSI layer issued Target Reset (%d, %llu) "
5965 "return x%x\n", tgt_id, lun_id, status);
5968 * We have to clean up i/o as : they may be orphaned by the TMF;
5969 * or if the TMF failed, they may be in an indeterminate state.
5970 * So, continue on.
5971 * We will report success if all the i/o aborts successfully.
5973 if (status == SUCCESS)
5974 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5975 LPFC_CTX_TGT);
5976 return status;
5980 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5981 * @cmnd: Pointer to scsi_cmnd data structure.
5983 * This routine does target reset to all targets on @cmnd->device->host.
5984 * This emulates Parallel SCSI Bus Reset Semantics.
5986 * Return code :
5987 * 0x2003 - Error
5988 * 0x2002 - Success
5990 static int
5991 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5993 struct Scsi_Host *shost = cmnd->device->host;
5994 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5995 struct lpfc_nodelist *ndlp = NULL;
5996 struct lpfc_scsi_event_header scsi_event;
5997 int match;
5998 int ret = SUCCESS, status, i;
6000 scsi_event.event_type = FC_REG_SCSI_EVENT;
6001 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
6002 scsi_event.lun = 0;
6003 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
6004 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
6006 fc_host_post_vendor_event(shost, fc_get_event_number(),
6007 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6009 status = fc_block_scsi_eh(cmnd);
6010 if (status != 0 && status != SUCCESS)
6011 return status;
6014 * Since the driver manages a single bus device, reset all
6015 * targets known to the driver. Should any target reset
6016 * fail, this routine returns failure to the midlayer.
6018 for (i = 0; i < LPFC_MAX_TARGET; i++) {
6019 /* Search for mapped node by target ID */
6020 match = 0;
6021 spin_lock_irq(shost->host_lock);
6022 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6024 if (vport->phba->cfg_fcp2_no_tgt_reset &&
6025 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
6026 continue;
6027 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
6028 ndlp->nlp_sid == i &&
6029 ndlp->rport &&
6030 ndlp->nlp_type & NLP_FCP_TARGET) {
6031 match = 1;
6032 break;
6035 spin_unlock_irq(shost->host_lock);
6036 if (!match)
6037 continue;
6039 status = lpfc_send_taskmgmt(vport, cmnd,
6040 i, 0, FCP_TARGET_RESET);
6042 if (status != SUCCESS) {
6043 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6044 "0700 Bus Reset on target %d failed\n",
6046 ret = FAILED;
6050 * We have to clean up i/o as : they may be orphaned by the TMFs
6051 * above; or if any of the TMFs failed, they may be in an
6052 * indeterminate state.
6053 * We will report success if all the i/o aborts successfully.
6056 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
6057 if (status != SUCCESS)
6058 ret = FAILED;
6060 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6061 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
6062 return ret;
6066 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
6067 * @cmnd: Pointer to scsi_cmnd data structure.
6069 * This routine does host reset to the adaptor port. It brings the HBA
6070 * offline, performs a board restart, and then brings the board back online.
6071 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
6072 * reject all outstanding SCSI commands to the host and error returned
6073 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
6074 * of error handling, it will only return error if resetting of the adapter
6075 * is not successful; in all other cases, will return success.
6077 * Return code :
6078 * 0x2003 - Error
6079 * 0x2002 - Success
6081 static int
6082 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6084 struct Scsi_Host *shost = cmnd->device->host;
6085 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6086 struct lpfc_hba *phba = vport->phba;
6087 int rc, ret = SUCCESS;
6089 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6090 "3172 SCSI layer issued Host Reset Data:\n");
6092 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6093 lpfc_offline(phba);
6094 rc = lpfc_sli_brdrestart(phba);
6095 if (rc)
6096 goto error;
6098 rc = lpfc_online(phba);
6099 if (rc)
6100 goto error;
6102 lpfc_unblock_mgmt_io(phba);
6104 return ret;
6105 error:
6106 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6107 "3323 Failed host reset\n");
6108 lpfc_unblock_mgmt_io(phba);
6109 return FAILED;
6113 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
6114 * @sdev: Pointer to scsi_device.
6116 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
6117 * globally available list of scsi buffers. This routine also makes sure scsi
6118 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
6119 * of scsi buffer exists for the lifetime of the driver.
6121 * Return codes:
6122 * non-0 - Error
6123 * 0 - Success
6125 static int
6126 lpfc_slave_alloc(struct scsi_device *sdev)
6128 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6129 struct lpfc_hba *phba = vport->phba;
6130 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
6131 uint32_t total = 0;
6132 uint32_t num_to_alloc = 0;
6133 int num_allocated = 0;
6134 uint32_t sdev_cnt;
6135 struct lpfc_device_data *device_data;
6136 unsigned long flags;
6137 struct lpfc_name target_wwpn;
6139 if (!rport || fc_remote_port_chkready(rport))
6140 return -ENXIO;
6142 if (phba->cfg_fof) {
6145 * Check to see if the device data structure for the lun
6146 * exists. If not, create one.
6149 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6150 spin_lock_irqsave(&phba->devicelock, flags);
6151 device_data = __lpfc_get_device_data(phba,
6152 &phba->luns,
6153 &vport->fc_portname,
6154 &target_wwpn,
6155 sdev->lun);
6156 if (!device_data) {
6157 spin_unlock_irqrestore(&phba->devicelock, flags);
6158 device_data = lpfc_create_device_data(phba,
6159 &vport->fc_portname,
6160 &target_wwpn,
6161 sdev->lun,
6162 phba->cfg_XLanePriority,
6163 true);
6164 if (!device_data)
6165 return -ENOMEM;
6166 spin_lock_irqsave(&phba->devicelock, flags);
6167 list_add_tail(&device_data->listentry, &phba->luns);
6169 device_data->rport_data = rport->dd_data;
6170 device_data->available = true;
6171 spin_unlock_irqrestore(&phba->devicelock, flags);
6172 sdev->hostdata = device_data;
6173 } else {
6174 sdev->hostdata = rport->dd_data;
6176 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6178 /* For SLI4, all IO buffers are pre-allocated */
6179 if (phba->sli_rev == LPFC_SLI_REV4)
6180 return 0;
6182 /* This code path is now ONLY for SLI3 adapters */
6185 * Populate the cmds_per_lun count scsi_bufs into this host's globally
6186 * available list of scsi buffers. Don't allocate more than the
6187 * HBA limit conveyed to the midlayer via the host structure. The
6188 * formula accounts for the lun_queue_depth + error handlers + 1
6189 * extra. This list of scsi bufs exists for the lifetime of the driver.
6191 total = phba->total_scsi_bufs;
6192 num_to_alloc = vport->cfg_lun_queue_depth + 2;
6194 /* If allocated buffers are enough do nothing */
6195 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6196 return 0;
6198 /* Allow some exchanges to be available always to complete discovery */
6199 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6200 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6201 "0704 At limitation of %d preallocated "
6202 "command buffers\n", total);
6203 return 0;
6204 /* Allow some exchanges to be available always to complete discovery */
6205 } else if (total + num_to_alloc >
6206 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6207 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6208 "0705 Allocation request of %d "
6209 "command buffers will exceed max of %d. "
6210 "Reducing allocation request to %d.\n",
6211 num_to_alloc, phba->cfg_hba_queue_depth,
6212 (phba->cfg_hba_queue_depth - total));
6213 num_to_alloc = phba->cfg_hba_queue_depth - total;
6215 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
6216 if (num_to_alloc != num_allocated) {
6217 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6218 "0708 Allocation request of %d "
6219 "command buffers did not succeed. "
6220 "Allocated %d buffers.\n",
6221 num_to_alloc, num_allocated);
6223 if (num_allocated > 0)
6224 phba->total_scsi_bufs += num_allocated;
6225 return 0;
6229 * lpfc_slave_configure - scsi_host_template slave_configure entry point
6230 * @sdev: Pointer to scsi_device.
6232 * This routine configures following items
6233 * - Tag command queuing support for @sdev if supported.
6234 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
6236 * Return codes:
6237 * 0 - Success
6239 static int
6240 lpfc_slave_configure(struct scsi_device *sdev)
6242 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6243 struct lpfc_hba *phba = vport->phba;
6245 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
6247 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6248 lpfc_sli_handle_fast_ring_event(phba,
6249 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6250 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6251 lpfc_poll_rearm_timer(phba);
6254 return 0;
6258 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
6259 * @sdev: Pointer to scsi_device.
6261 * This routine sets @sdev hostatdata filed to null.
6263 static void
6264 lpfc_slave_destroy(struct scsi_device *sdev)
6266 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6267 struct lpfc_hba *phba = vport->phba;
6268 unsigned long flags;
6269 struct lpfc_device_data *device_data = sdev->hostdata;
6271 atomic_dec(&phba->sdev_cnt);
6272 if ((phba->cfg_fof) && (device_data)) {
6273 spin_lock_irqsave(&phba->devicelock, flags);
6274 device_data->available = false;
6275 if (!device_data->oas_enabled)
6276 lpfc_delete_device_data(phba, device_data);
6277 spin_unlock_irqrestore(&phba->devicelock, flags);
6279 sdev->hostdata = NULL;
6280 return;
6284 * lpfc_create_device_data - creates and initializes device data structure for OAS
6285 * @phba: Pointer to host bus adapter structure.
6286 * @vport_wwpn: Pointer to vport's wwpn information
6287 * @target_wwpn: Pointer to target's wwpn information
6288 * @lun: Lun on target
6289 * @pri: Priority
6290 * @atomic_create: Flag to indicate if memory should be allocated using the
6291 * GFP_ATOMIC flag or not.
6293 * This routine creates a device data structure which will contain identifying
6294 * information for the device (host wwpn, target wwpn, lun), state of OAS,
6295 * whether or not the corresponding lun is available by the system,
6296 * and pointer to the rport data.
6298 * Return codes:
6299 * NULL - Error
6300 * Pointer to lpfc_device_data - Success
6302 struct lpfc_device_data*
6303 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6304 struct lpfc_name *target_wwpn, uint64_t lun,
6305 uint32_t pri, bool atomic_create)
6308 struct lpfc_device_data *lun_info;
6309 int memory_flags;
6311 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6312 !(phba->cfg_fof))
6313 return NULL;
6315 /* Attempt to create the device data to contain lun info */
6317 if (atomic_create)
6318 memory_flags = GFP_ATOMIC;
6319 else
6320 memory_flags = GFP_KERNEL;
6321 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6322 if (!lun_info)
6323 return NULL;
6324 INIT_LIST_HEAD(&lun_info->listentry);
6325 lun_info->rport_data = NULL;
6326 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6327 sizeof(struct lpfc_name));
6328 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6329 sizeof(struct lpfc_name));
6330 lun_info->device_id.lun = lun;
6331 lun_info->oas_enabled = false;
6332 lun_info->priority = pri;
6333 lun_info->available = false;
6334 return lun_info;
6338 * lpfc_delete_device_data - frees a device data structure for OAS
6339 * @phba: Pointer to host bus adapter structure.
6340 * @lun_info: Pointer to device data structure to free.
6342 * This routine frees the previously allocated device data structure passed.
6345 void
6346 lpfc_delete_device_data(struct lpfc_hba *phba,
6347 struct lpfc_device_data *lun_info)
6350 if (unlikely(!phba) || !lun_info ||
6351 !(phba->cfg_fof))
6352 return;
6354 if (!list_empty(&lun_info->listentry))
6355 list_del(&lun_info->listentry);
6356 mempool_free(lun_info, phba->device_data_mem_pool);
6357 return;
6361 * __lpfc_get_device_data - returns the device data for the specified lun
6362 * @phba: Pointer to host bus adapter structure.
6363 * @list: Point to list to search.
6364 * @vport_wwpn: Pointer to vport's wwpn information
6365 * @target_wwpn: Pointer to target's wwpn information
6366 * @lun: Lun on target
6368 * This routine searches the list passed for the specified lun's device data.
6369 * This function does not hold locks, it is the responsibility of the caller
6370 * to ensure the proper lock is held before calling the function.
6372 * Return codes:
6373 * NULL - Error
6374 * Pointer to lpfc_device_data - Success
6376 struct lpfc_device_data*
6377 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6378 struct lpfc_name *vport_wwpn,
6379 struct lpfc_name *target_wwpn, uint64_t lun)
6382 struct lpfc_device_data *lun_info;
6384 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6385 !phba->cfg_fof)
6386 return NULL;
6388 /* Check to see if the lun is already enabled for OAS. */
6390 list_for_each_entry(lun_info, list, listentry) {
6391 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6392 sizeof(struct lpfc_name)) == 0) &&
6393 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6394 sizeof(struct lpfc_name)) == 0) &&
6395 (lun_info->device_id.lun == lun))
6396 return lun_info;
6399 return NULL;
6403 * lpfc_find_next_oas_lun - searches for the next oas lun
6404 * @phba: Pointer to host bus adapter structure.
6405 * @vport_wwpn: Pointer to vport's wwpn information
6406 * @target_wwpn: Pointer to target's wwpn information
6407 * @starting_lun: Pointer to the lun to start searching for
6408 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
6409 * @found_target_wwpn: Pointer to the found lun's target wwpn information
6410 * @found_lun: Pointer to the found lun.
6411 * @found_lun_status: Pointer to status of the found lun.
6412 * @found_lun_pri: Pointer to priority of the found lun.
6414 * This routine searches the luns list for the specified lun
6415 * or the first lun for the vport/target. If the vport wwpn contains
6416 * a zero value then a specific vport is not specified. In this case
6417 * any vport which contains the lun will be considered a match. If the
6418 * target wwpn contains a zero value then a specific target is not specified.
6419 * In this case any target which contains the lun will be considered a
6420 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
6421 * are returned. The function will also return the next lun if available.
6422 * If the next lun is not found, starting_lun parameter will be set to
6423 * NO_MORE_OAS_LUN.
6425 * Return codes:
6426 * non-0 - Error
6427 * 0 - Success
6429 bool
6430 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6431 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6432 struct lpfc_name *found_vport_wwpn,
6433 struct lpfc_name *found_target_wwpn,
6434 uint64_t *found_lun,
6435 uint32_t *found_lun_status,
6436 uint32_t *found_lun_pri)
6439 unsigned long flags;
6440 struct lpfc_device_data *lun_info;
6441 struct lpfc_device_id *device_id;
6442 uint64_t lun;
6443 bool found = false;
6445 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6446 !starting_lun || !found_vport_wwpn ||
6447 !found_target_wwpn || !found_lun || !found_lun_status ||
6448 (*starting_lun == NO_MORE_OAS_LUN) ||
6449 !phba->cfg_fof)
6450 return false;
6452 lun = *starting_lun;
6453 *found_lun = NO_MORE_OAS_LUN;
6454 *starting_lun = NO_MORE_OAS_LUN;
6456 /* Search for lun or the lun closet in value */
6458 spin_lock_irqsave(&phba->devicelock, flags);
6459 list_for_each_entry(lun_info, &phba->luns, listentry) {
6460 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
6461 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6462 sizeof(struct lpfc_name)) == 0)) &&
6463 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
6464 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6465 sizeof(struct lpfc_name)) == 0)) &&
6466 (lun_info->oas_enabled)) {
6467 device_id = &lun_info->device_id;
6468 if ((!found) &&
6469 ((lun == FIND_FIRST_OAS_LUN) ||
6470 (device_id->lun == lun))) {
6471 *found_lun = device_id->lun;
6472 memcpy(found_vport_wwpn,
6473 &device_id->vport_wwpn,
6474 sizeof(struct lpfc_name));
6475 memcpy(found_target_wwpn,
6476 &device_id->target_wwpn,
6477 sizeof(struct lpfc_name));
6478 if (lun_info->available)
6479 *found_lun_status =
6480 OAS_LUN_STATUS_EXISTS;
6481 else
6482 *found_lun_status = 0;
6483 *found_lun_pri = lun_info->priority;
6484 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
6485 memset(vport_wwpn, 0x0,
6486 sizeof(struct lpfc_name));
6487 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
6488 memset(target_wwpn, 0x0,
6489 sizeof(struct lpfc_name));
6490 found = true;
6491 } else if (found) {
6492 *starting_lun = device_id->lun;
6493 memcpy(vport_wwpn, &device_id->vport_wwpn,
6494 sizeof(struct lpfc_name));
6495 memcpy(target_wwpn, &device_id->target_wwpn,
6496 sizeof(struct lpfc_name));
6497 break;
6501 spin_unlock_irqrestore(&phba->devicelock, flags);
6502 return found;
6506 * lpfc_enable_oas_lun - enables a lun for OAS operations
6507 * @phba: Pointer to host bus adapter structure.
6508 * @vport_wwpn: Pointer to vport's wwpn information
6509 * @target_wwpn: Pointer to target's wwpn information
6510 * @lun: Lun
6511 * @pri: Priority
6513 * This routine enables a lun for oas operations. The routines does so by
6514 * doing the following :
6516 * 1) Checks to see if the device data for the lun has been created.
6517 * 2) If found, sets the OAS enabled flag if not set and returns.
6518 * 3) Otherwise, creates a device data structure.
6519 * 4) If successfully created, indicates the device data is for an OAS lun,
6520 * indicates the lun is not available and add to the list of luns.
6522 * Return codes:
6523 * false - Error
6524 * true - Success
6526 bool
6527 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6528 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6531 struct lpfc_device_data *lun_info;
6532 unsigned long flags;
6534 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6535 !phba->cfg_fof)
6536 return false;
6538 spin_lock_irqsave(&phba->devicelock, flags);
6540 /* Check to see if the device data for the lun has been created */
6541 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
6542 target_wwpn, lun);
6543 if (lun_info) {
6544 if (!lun_info->oas_enabled)
6545 lun_info->oas_enabled = true;
6546 lun_info->priority = pri;
6547 spin_unlock_irqrestore(&phba->devicelock, flags);
6548 return true;
6551 /* Create an lun info structure and add to list of luns */
6552 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
6553 pri, true);
6554 if (lun_info) {
6555 lun_info->oas_enabled = true;
6556 lun_info->priority = pri;
6557 lun_info->available = false;
6558 list_add_tail(&lun_info->listentry, &phba->luns);
6559 spin_unlock_irqrestore(&phba->devicelock, flags);
6560 return true;
6562 spin_unlock_irqrestore(&phba->devicelock, flags);
6563 return false;
6567 * lpfc_disable_oas_lun - disables a lun for OAS operations
6568 * @phba: Pointer to host bus adapter structure.
6569 * @vport_wwpn: Pointer to vport's wwpn information
6570 * @target_wwpn: Pointer to target's wwpn information
6571 * @lun: Lun
6572 * @pri: Priority
6574 * This routine disables a lun for oas operations. The routines does so by
6575 * doing the following :
6577 * 1) Checks to see if the device data for the lun is created.
6578 * 2) If present, clears the flag indicating this lun is for OAS.
6579 * 3) If the lun is not available by the system, the device data is
6580 * freed.
6582 * Return codes:
6583 * false - Error
6584 * true - Success
6586 bool
6587 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6588 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6591 struct lpfc_device_data *lun_info;
6592 unsigned long flags;
6594 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6595 !phba->cfg_fof)
6596 return false;
6598 spin_lock_irqsave(&phba->devicelock, flags);
6600 /* Check to see if the lun is available. */
6601 lun_info = __lpfc_get_device_data(phba,
6602 &phba->luns, vport_wwpn,
6603 target_wwpn, lun);
6604 if (lun_info) {
6605 lun_info->oas_enabled = false;
6606 lun_info->priority = pri;
6607 if (!lun_info->available)
6608 lpfc_delete_device_data(phba, lun_info);
6609 spin_unlock_irqrestore(&phba->devicelock, flags);
6610 return true;
6613 spin_unlock_irqrestore(&phba->devicelock, flags);
6614 return false;
6617 static int
6618 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
6620 return SCSI_MLQUEUE_HOST_BUSY;
6623 static int
6624 lpfc_no_handler(struct scsi_cmnd *cmnd)
6626 return FAILED;
6629 static int
6630 lpfc_no_slave(struct scsi_device *sdev)
6632 return -ENODEV;
6635 struct scsi_host_template lpfc_template_nvme = {
6636 .module = THIS_MODULE,
6637 .name = LPFC_DRIVER_NAME,
6638 .proc_name = LPFC_DRIVER_NAME,
6639 .info = lpfc_info,
6640 .queuecommand = lpfc_no_command,
6641 .eh_abort_handler = lpfc_no_handler,
6642 .eh_device_reset_handler = lpfc_no_handler,
6643 .eh_target_reset_handler = lpfc_no_handler,
6644 .eh_bus_reset_handler = lpfc_no_handler,
6645 .eh_host_reset_handler = lpfc_no_handler,
6646 .slave_alloc = lpfc_no_slave,
6647 .slave_configure = lpfc_no_slave,
6648 .scan_finished = lpfc_scan_finished,
6649 .this_id = -1,
6650 .sg_tablesize = 1,
6651 .cmd_per_lun = 1,
6652 .shost_attrs = lpfc_hba_attrs,
6653 .max_sectors = 0xFFFFFFFF,
6654 .vendor_id = LPFC_NL_VENDOR_ID,
6655 .track_queue_depth = 0,
6658 struct scsi_host_template lpfc_template = {
6659 .module = THIS_MODULE,
6660 .name = LPFC_DRIVER_NAME,
6661 .proc_name = LPFC_DRIVER_NAME,
6662 .info = lpfc_info,
6663 .queuecommand = lpfc_queuecommand,
6664 .eh_timed_out = fc_eh_timed_out,
6665 .eh_abort_handler = lpfc_abort_handler,
6666 .eh_device_reset_handler = lpfc_device_reset_handler,
6667 .eh_target_reset_handler = lpfc_target_reset_handler,
6668 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6669 .eh_host_reset_handler = lpfc_host_reset_handler,
6670 .slave_alloc = lpfc_slave_alloc,
6671 .slave_configure = lpfc_slave_configure,
6672 .slave_destroy = lpfc_slave_destroy,
6673 .scan_finished = lpfc_scan_finished,
6674 .this_id = -1,
6675 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6676 .cmd_per_lun = LPFC_CMD_PER_LUN,
6677 .shost_attrs = lpfc_hba_attrs,
6678 .max_sectors = 0xFFFFFFFF,
6679 .vendor_id = LPFC_NL_VENDOR_ID,
6680 .change_queue_depth = scsi_change_queue_depth,
6681 .track_queue_depth = 1,