1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42 #include "lpfc_version.h"
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_logmsg.h"
53 #include "lpfc_crtn.h"
54 #include "lpfc_vport.h"
55 #include "lpfc_debugfs.h"
57 /* NVME initiator-based functions */
59 static struct lpfc_io_buf
*
60 lpfc_get_nvme_buf(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
61 int idx
, int expedite
);
64 lpfc_release_nvme_buf(struct lpfc_hba
*, struct lpfc_io_buf
*);
66 static struct nvme_fc_port_template lpfc_nvme_template
;
68 static union lpfc_wqe128 lpfc_iread_cmd_template
;
69 static union lpfc_wqe128 lpfc_iwrite_cmd_template
;
70 static union lpfc_wqe128 lpfc_icmnd_cmd_template
;
72 /* Setup WQE templates for NVME IOs */
74 lpfc_nvme_cmd_template(void)
76 union lpfc_wqe128
*wqe
;
79 wqe
= &lpfc_iread_cmd_template
;
80 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
82 /* Word 0, 1, 2 - BDE is variable */
84 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
86 /* Word 4 - total_xfer_len is variable */
88 /* Word 5 - is zero */
90 /* Word 6 - ctxt_tag, xri_tag is variable */
93 bf_set(wqe_cmnd
, &wqe
->fcp_iread
.wqe_com
, CMD_FCP_IREAD64_WQE
);
94 bf_set(wqe_pu
, &wqe
->fcp_iread
.wqe_com
, PARM_READ_CHECK
);
95 bf_set(wqe_class
, &wqe
->fcp_iread
.wqe_com
, CLASS3
);
96 bf_set(wqe_ct
, &wqe
->fcp_iread
.wqe_com
, SLI4_CT_RPI
);
98 /* Word 8 - abort_tag is variable */
100 /* Word 9 - reqtag is variable */
102 /* Word 10 - dbde, wqes is variable */
103 bf_set(wqe_qosd
, &wqe
->fcp_iread
.wqe_com
, 0);
104 bf_set(wqe_nvme
, &wqe
->fcp_iread
.wqe_com
, 1);
105 bf_set(wqe_iod
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_IOD_READ
);
106 bf_set(wqe_lenloc
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_LENLOC_WORD4
);
107 bf_set(wqe_dbde
, &wqe
->fcp_iread
.wqe_com
, 0);
108 bf_set(wqe_wqes
, &wqe
->fcp_iread
.wqe_com
, 1);
110 /* Word 11 - pbde is variable */
111 bf_set(wqe_cmd_type
, &wqe
->fcp_iread
.wqe_com
, NVME_READ_CMD
);
112 bf_set(wqe_cqid
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
113 bf_set(wqe_pbde
, &wqe
->fcp_iread
.wqe_com
, 1);
115 /* Word 12 - is zero */
117 /* Word 13, 14, 15 - PBDE is variable */
119 /* IWRITE template */
120 wqe
= &lpfc_iwrite_cmd_template
;
121 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
123 /* Word 0, 1, 2 - BDE is variable */
125 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
127 /* Word 4 - total_xfer_len is variable */
129 /* Word 5 - initial_xfer_len is variable */
131 /* Word 6 - ctxt_tag, xri_tag is variable */
134 bf_set(wqe_cmnd
, &wqe
->fcp_iwrite
.wqe_com
, CMD_FCP_IWRITE64_WQE
);
135 bf_set(wqe_pu
, &wqe
->fcp_iwrite
.wqe_com
, PARM_READ_CHECK
);
136 bf_set(wqe_class
, &wqe
->fcp_iwrite
.wqe_com
, CLASS3
);
137 bf_set(wqe_ct
, &wqe
->fcp_iwrite
.wqe_com
, SLI4_CT_RPI
);
139 /* Word 8 - abort_tag is variable */
141 /* Word 9 - reqtag is variable */
143 /* Word 10 - dbde, wqes is variable */
144 bf_set(wqe_qosd
, &wqe
->fcp_iwrite
.wqe_com
, 0);
145 bf_set(wqe_nvme
, &wqe
->fcp_iwrite
.wqe_com
, 1);
146 bf_set(wqe_iod
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_IOD_WRITE
);
147 bf_set(wqe_lenloc
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_LENLOC_WORD4
);
148 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 0);
149 bf_set(wqe_wqes
, &wqe
->fcp_iwrite
.wqe_com
, 1);
151 /* Word 11 - pbde is variable */
152 bf_set(wqe_cmd_type
, &wqe
->fcp_iwrite
.wqe_com
, NVME_WRITE_CMD
);
153 bf_set(wqe_cqid
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
154 bf_set(wqe_pbde
, &wqe
->fcp_iwrite
.wqe_com
, 1);
156 /* Word 12 - is zero */
158 /* Word 13, 14, 15 - PBDE is variable */
161 wqe
= &lpfc_icmnd_cmd_template
;
162 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
164 /* Word 0, 1, 2 - BDE is variable */
166 /* Word 3 - payload_offset_len is variable */
168 /* Word 4, 5 - is zero */
170 /* Word 6 - ctxt_tag, xri_tag is variable */
173 bf_set(wqe_cmnd
, &wqe
->fcp_icmd
.wqe_com
, CMD_FCP_ICMND64_WQE
);
174 bf_set(wqe_pu
, &wqe
->fcp_icmd
.wqe_com
, 0);
175 bf_set(wqe_class
, &wqe
->fcp_icmd
.wqe_com
, CLASS3
);
176 bf_set(wqe_ct
, &wqe
->fcp_icmd
.wqe_com
, SLI4_CT_RPI
);
178 /* Word 8 - abort_tag is variable */
180 /* Word 9 - reqtag is variable */
182 /* Word 10 - dbde, wqes is variable */
183 bf_set(wqe_qosd
, &wqe
->fcp_icmd
.wqe_com
, 1);
184 bf_set(wqe_nvme
, &wqe
->fcp_icmd
.wqe_com
, 1);
185 bf_set(wqe_iod
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_IOD_NONE
);
186 bf_set(wqe_lenloc
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
187 bf_set(wqe_dbde
, &wqe
->fcp_icmd
.wqe_com
, 0);
188 bf_set(wqe_wqes
, &wqe
->fcp_icmd
.wqe_com
, 1);
191 bf_set(wqe_cmd_type
, &wqe
->fcp_icmd
.wqe_com
, FCP_COMMAND
);
192 bf_set(wqe_cqid
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
193 bf_set(wqe_pbde
, &wqe
->fcp_icmd
.wqe_com
, 0);
195 /* Word 12, 13, 14, 15 - is zero */
199 * lpfc_nvme_create_queue -
200 * @lpfc_pnvme: Pointer to the driver's nvme instance data
201 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
202 * @handle: An opaque driver handle used in follow-up calls.
204 * Driver registers this routine to preallocate and initialize any
205 * internal data structures to bind the @qidx to its internal IO queues.
206 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
210 * -EINVAL - Unsupported input value.
211 * -ENOMEM - Could not alloc necessary memory
214 lpfc_nvme_create_queue(struct nvme_fc_local_port
*pnvme_lport
,
215 unsigned int qidx
, u16 qsize
,
218 struct lpfc_nvme_lport
*lport
;
219 struct lpfc_vport
*vport
;
220 struct lpfc_nvme_qhandle
*qhandle
;
223 if (!pnvme_lport
->private)
226 lport
= (struct lpfc_nvme_lport
*)pnvme_lport
->private;
227 vport
= lport
->vport
;
228 qhandle
= kzalloc(sizeof(struct lpfc_nvme_qhandle
), GFP_KERNEL
);
232 qhandle
->cpu_id
= raw_smp_processor_id();
233 qhandle
->qidx
= qidx
;
235 * NVME qidx == 0 is the admin queue, so both admin queue
236 * and first IO queue will use MSI-X vector and associated
237 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
240 str
= "IO "; /* IO queue */
241 qhandle
->index
= ((qidx
- 1) %
242 lpfc_nvme_template
.max_hw_queues
);
244 str
= "ADM"; /* Admin queue */
245 qhandle
->index
= qidx
;
248 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
,
249 "6073 Binding %s HdwQueue %d (cpu %d) to "
250 "hdw_queue %d qhandle %p\n", str
,
251 qidx
, qhandle
->cpu_id
, qhandle
->index
, qhandle
);
252 *handle
= (void *)qhandle
;
257 * lpfc_nvme_delete_queue -
258 * @lpfc_pnvme: Pointer to the driver's nvme instance data
259 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
260 * @handle: An opaque driver handle from lpfc_nvme_create_queue
262 * Driver registers this routine to free
263 * any internal data structures to bind the @qidx to its internal
268 * TODO: What are the failure codes.
271 lpfc_nvme_delete_queue(struct nvme_fc_local_port
*pnvme_lport
,
275 struct lpfc_nvme_lport
*lport
;
276 struct lpfc_vport
*vport
;
278 if (!pnvme_lport
->private)
281 lport
= (struct lpfc_nvme_lport
*)pnvme_lport
->private;
282 vport
= lport
->vport
;
284 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
,
285 "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
286 lport
, qidx
, handle
);
291 lpfc_nvme_localport_delete(struct nvme_fc_local_port
*localport
)
293 struct lpfc_nvme_lport
*lport
= localport
->private;
295 lpfc_printf_vlog(lport
->vport
, KERN_INFO
, LOG_NVME
,
296 "6173 localport %p delete complete\n",
299 /* release any threads waiting for the unreg to complete */
300 if (lport
->vport
->localport
)
301 complete(lport
->lport_unreg_cmp
);
304 /* lpfc_nvme_remoteport_delete
306 * @remoteport: Pointer to an nvme transport remoteport instance.
308 * This is a template downcall. NVME transport calls this function
309 * when it has completed the unregistration of a previously
310 * registered remoteport.
316 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port
*remoteport
)
318 struct lpfc_nvme_rport
*rport
= remoteport
->private;
319 struct lpfc_vport
*vport
;
320 struct lpfc_nodelist
*ndlp
;
330 /* Remove this rport from the lport's list - memory is owned by the
331 * transport. Remove the ndlp reference for the NVME transport before
332 * calling state machine to remove the node.
334 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_DISC
,
335 "6146 remoteport delete of remoteport %p\n",
337 spin_lock_irq(&vport
->phba
->hbalock
);
339 /* The register rebind might have occurred before the delete
340 * downcall. Guard against this race.
342 if (ndlp
->upcall_flags
& NLP_WAIT_FOR_UNREG
) {
344 ndlp
->upcall_flags
&= ~NLP_WAIT_FOR_UNREG
;
346 spin_unlock_irq(&vport
->phba
->hbalock
);
348 /* Remove original register reference. The host transport
349 * won't reference this rport/remoteport any further.
358 lpfc_nvme_cmpl_gen_req(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
359 struct lpfc_wcqe_complete
*wcqe
)
361 struct lpfc_vport
*vport
= cmdwqe
->vport
;
362 struct lpfc_nvme_lport
*lport
;
364 struct nvmefc_ls_req
*pnvme_lsreq
;
365 struct lpfc_dmabuf
*buf_ptr
;
366 struct lpfc_nodelist
*ndlp
;
368 pnvme_lsreq
= (struct nvmefc_ls_req
*)cmdwqe
->context2
;
369 status
= bf_get(lpfc_wcqe_c_status
, wcqe
) & LPFC_IOCB_STATUS_MASK
;
371 if (vport
->localport
) {
372 lport
= (struct lpfc_nvme_lport
*)vport
->localport
->private;
374 atomic_inc(&lport
->fc4NvmeLsCmpls
);
376 if (bf_get(lpfc_wcqe_c_xb
, wcqe
))
377 atomic_inc(&lport
->cmpl_ls_xb
);
378 atomic_inc(&lport
->cmpl_ls_err
);
383 ndlp
= (struct lpfc_nodelist
*)cmdwqe
->context1
;
384 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_DISC
,
385 "6047 nvme cmpl Enter "
386 "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
387 "lsreg:%p bmp:%p ndlp:%p\n",
388 pnvme_lsreq
, ndlp
? ndlp
->nlp_DID
: 0,
389 cmdwqe
->sli4_xritag
, status
,
390 (wcqe
->parameter
& 0xffff),
391 cmdwqe
, pnvme_lsreq
, cmdwqe
->context3
, ndlp
);
393 lpfc_nvmeio_data(phba
, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
394 cmdwqe
->sli4_xritag
, status
, wcqe
->parameter
);
396 if (cmdwqe
->context3
) {
397 buf_ptr
= (struct lpfc_dmabuf
*)cmdwqe
->context3
;
398 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
400 cmdwqe
->context3
= NULL
;
402 if (pnvme_lsreq
->done
)
403 pnvme_lsreq
->done(pnvme_lsreq
, status
);
405 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_DISC
,
406 "6046 nvme cmpl without done call back? "
407 "Data %p DID %x Xri: %x status %x\n",
408 pnvme_lsreq
, ndlp
? ndlp
->nlp_DID
: 0,
409 cmdwqe
->sli4_xritag
, status
);
412 cmdwqe
->context1
= NULL
;
414 lpfc_sli_release_iocbq(phba
, cmdwqe
);
418 lpfc_nvme_gen_req(struct lpfc_vport
*vport
, struct lpfc_dmabuf
*bmp
,
419 struct lpfc_dmabuf
*inp
,
420 struct nvmefc_ls_req
*pnvme_lsreq
,
421 void (*cmpl
)(struct lpfc_hba
*, struct lpfc_iocbq
*,
422 struct lpfc_wcqe_complete
*),
423 struct lpfc_nodelist
*ndlp
, uint32_t num_entry
,
424 uint32_t tmo
, uint8_t retry
)
426 struct lpfc_hba
*phba
= vport
->phba
;
427 union lpfc_wqe128
*wqe
;
428 struct lpfc_iocbq
*genwqe
;
429 struct ulp_bde64
*bpl
;
430 struct ulp_bde64 bde
;
431 int i
, rc
, xmit_len
, first_len
;
433 /* Allocate buffer for command WQE */
434 genwqe
= lpfc_sli_get_iocbq(phba
);
439 memset(wqe
, 0, sizeof(union lpfc_wqe
));
441 genwqe
->context3
= (uint8_t *)bmp
;
442 genwqe
->iocb_flag
|= LPFC_IO_NVME_LS
;
444 /* Save for completion so we can release these resources */
445 genwqe
->context1
= lpfc_nlp_get(ndlp
);
446 genwqe
->context2
= (uint8_t *)pnvme_lsreq
;
447 /* Fill in payload, bp points to frame payload */
450 /* FC spec states we need 3 * ratov for CT requests */
451 tmo
= (3 * phba
->fc_ratov
);
453 /* For this command calculate the xmit length of the request bde. */
456 bpl
= (struct ulp_bde64
*)bmp
->virt
;
457 for (i
= 0; i
< num_entry
; i
++) {
458 bde
.tus
.w
= bpl
[i
].tus
.w
;
459 if (bde
.tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
461 xmit_len
+= bde
.tus
.f
.bdeSize
;
463 first_len
= xmit_len
;
466 genwqe
->rsvd2
= num_entry
;
467 genwqe
->hba_wqidx
= 0;
470 wqe
->generic
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
471 wqe
->generic
.bde
.tus
.f
.bdeSize
= first_len
;
472 wqe
->generic
.bde
.addrLow
= bpl
[0].addrLow
;
473 wqe
->generic
.bde
.addrHigh
= bpl
[0].addrHigh
;
476 wqe
->gen_req
.request_payload_len
= first_len
;
481 bf_set(wqe_dfctl
, &wqe
->gen_req
.wge_ctl
, 0);
482 bf_set(wqe_si
, &wqe
->gen_req
.wge_ctl
, 1);
483 bf_set(wqe_la
, &wqe
->gen_req
.wge_ctl
, 1);
484 bf_set(wqe_rctl
, &wqe
->gen_req
.wge_ctl
, FC_RCTL_ELS4_REQ
);
485 bf_set(wqe_type
, &wqe
->gen_req
.wge_ctl
, FC_TYPE_NVME
);
488 bf_set(wqe_ctxt_tag
, &wqe
->gen_req
.wqe_com
,
489 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
490 bf_set(wqe_xri_tag
, &wqe
->gen_req
.wqe_com
, genwqe
->sli4_xritag
);
493 bf_set(wqe_tmo
, &wqe
->gen_req
.wqe_com
, (vport
->phba
->fc_ratov
-1));
494 bf_set(wqe_class
, &wqe
->gen_req
.wqe_com
, CLASS3
);
495 bf_set(wqe_cmnd
, &wqe
->gen_req
.wqe_com
, CMD_GEN_REQUEST64_WQE
);
496 bf_set(wqe_ct
, &wqe
->gen_req
.wqe_com
, SLI4_CT_RPI
);
499 wqe
->gen_req
.wqe_com
.abort_tag
= genwqe
->iotag
;
502 bf_set(wqe_reqtag
, &wqe
->gen_req
.wqe_com
, genwqe
->iotag
);
505 bf_set(wqe_dbde
, &wqe
->gen_req
.wqe_com
, 1);
506 bf_set(wqe_iod
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_IOD_READ
);
507 bf_set(wqe_qosd
, &wqe
->gen_req
.wqe_com
, 1);
508 bf_set(wqe_lenloc
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
509 bf_set(wqe_ebde_cnt
, &wqe
->gen_req
.wqe_com
, 0);
512 bf_set(wqe_cqid
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
513 bf_set(wqe_cmd_type
, &wqe
->gen_req
.wqe_com
, OTHER_COMMAND
);
516 /* Issue GEN REQ WQE for NPORT <did> */
517 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
518 "6050 Issue GEN REQ WQE to NPORT x%x "
519 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
520 ndlp
->nlp_DID
, genwqe
->iotag
,
522 genwqe
, pnvme_lsreq
, bmp
, xmit_len
, first_len
);
523 genwqe
->wqe_cmpl
= cmpl
;
524 genwqe
->iocb_cmpl
= NULL
;
525 genwqe
->drvrTimeout
= tmo
+ LPFC_DRVR_TIMEOUT
;
526 genwqe
->vport
= vport
;
527 genwqe
->retry
= retry
;
529 lpfc_nvmeio_data(phba
, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
530 genwqe
->sli4_xritag
, genwqe
->iotag
, ndlp
->nlp_DID
);
532 rc
= lpfc_sli4_issue_wqe(phba
, &phba
->sli4_hba
.hdwq
[0], genwqe
);
534 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
535 "6045 Issue GEN REQ WQE to NPORT x%x "
537 ndlp
->nlp_DID
, genwqe
->iotag
,
539 lpfc_sli_release_iocbq(phba
, genwqe
);
546 * lpfc_nvme_ls_req - Issue an Link Service request
547 * @lpfc_pnvme: Pointer to the driver's nvme instance data
548 * @lpfc_nvme_lport: Pointer to the driver's local port data
549 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
551 * Driver registers this routine to handle any link service request
552 * from the nvme_fc transport to a remote nvme-aware port.
556 * TODO: What are the failure codes.
559 lpfc_nvme_ls_req(struct nvme_fc_local_port
*pnvme_lport
,
560 struct nvme_fc_remote_port
*pnvme_rport
,
561 struct nvmefc_ls_req
*pnvme_lsreq
)
564 struct lpfc_nvme_lport
*lport
;
565 struct lpfc_nvme_rport
*rport
;
566 struct lpfc_vport
*vport
;
567 struct lpfc_nodelist
*ndlp
;
568 struct ulp_bde64
*bpl
;
569 struct lpfc_dmabuf
*bmp
;
570 uint16_t ntype
, nstate
;
572 /* there are two dma buf in the request, actually there is one and
573 * the second one is just the start address + cmd size.
574 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
575 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
576 * because the nvem layer owns the data bufs.
577 * We do not have to break these packets open, we don't care what is in
578 * them. And we do not have to look at the resonse data, we only care
579 * that we got a response. All of the caring is going to happen in the
583 lport
= (struct lpfc_nvme_lport
*)pnvme_lport
->private;
584 rport
= (struct lpfc_nvme_rport
*)pnvme_rport
->private;
585 if (unlikely(!lport
) || unlikely(!rport
))
588 vport
= lport
->vport
;
590 if (vport
->load_flag
& FC_UNLOADING
)
593 /* Need the ndlp. It is stored in the driver's rport. */
595 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
596 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
| LOG_NVME_IOERR
,
597 "6051 Remoteport %p, rport has invalid ndlp. "
598 "Failing LS Req\n", pnvme_rport
);
602 /* The remote node has to be a mapped nvme target or an
603 * unmapped nvme initiator or it's an error.
605 ntype
= ndlp
->nlp_type
;
606 nstate
= ndlp
->nlp_state
;
607 if ((ntype
& NLP_NVME_TARGET
&& nstate
!= NLP_STE_MAPPED_NODE
) ||
608 (ntype
& NLP_NVME_INITIATOR
&& nstate
!= NLP_STE_UNMAPPED_NODE
)) {
609 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
| LOG_NVME_IOERR
,
610 "6088 DID x%06x not ready for "
611 "IO. State x%x, Type x%x\n",
612 pnvme_rport
->port_id
,
613 ndlp
->nlp_state
, ndlp
->nlp_type
);
616 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
619 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_DISC
,
620 "6044 Could not find node for DID %x\n",
621 pnvme_rport
->port_id
);
624 INIT_LIST_HEAD(&bmp
->list
);
625 bmp
->virt
= lpfc_mbuf_alloc(vport
->phba
, MEM_PRI
, &(bmp
->phys
));
627 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_DISC
,
628 "6042 Could not find node for DID %x\n",
629 pnvme_rport
->port_id
);
633 bpl
= (struct ulp_bde64
*)bmp
->virt
;
634 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pnvme_lsreq
->rqstdma
));
635 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pnvme_lsreq
->rqstdma
));
636 bpl
->tus
.f
.bdeFlags
= 0;
637 bpl
->tus
.f
.bdeSize
= pnvme_lsreq
->rqstlen
;
638 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
641 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pnvme_lsreq
->rspdma
));
642 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pnvme_lsreq
->rspdma
));
643 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
644 bpl
->tus
.f
.bdeSize
= pnvme_lsreq
->rsplen
;
645 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
647 /* Expand print to include key fields. */
648 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_DISC
,
649 "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
650 "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
652 pnvme_lport
, pnvme_rport
,
653 pnvme_lsreq
, pnvme_lsreq
->rqstlen
,
654 pnvme_lsreq
->rsplen
, &pnvme_lsreq
->rqstdma
,
655 &pnvme_lsreq
->rspdma
);
657 atomic_inc(&lport
->fc4NvmeLsRequests
);
659 /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
660 * This code allows it all to work.
662 ret
= lpfc_nvme_gen_req(vport
, bmp
, pnvme_lsreq
->rqstaddr
,
663 pnvme_lsreq
, lpfc_nvme_cmpl_gen_req
,
665 if (ret
!= WQE_SUCCESS
) {
666 atomic_inc(&lport
->xmt_ls_err
);
667 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_DISC
,
668 "6052 EXIT. issue ls wqe failed lport %p, "
669 "rport %p lsreq%p Status %x DID %x\n",
670 pnvme_lport
, pnvme_rport
, pnvme_lsreq
,
672 lpfc_mbuf_free(vport
->phba
, bmp
->virt
, bmp
->phys
);
677 /* Stub in routine and return 0 for now. */
682 * lpfc_nvme_ls_abort - Issue an Link Service request
683 * @lpfc_pnvme: Pointer to the driver's nvme instance data
684 * @lpfc_nvme_lport: Pointer to the driver's local port data
685 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
687 * Driver registers this routine to handle any link service request
688 * from the nvme_fc transport to a remote nvme-aware port.
692 * TODO: What are the failure codes.
695 lpfc_nvme_ls_abort(struct nvme_fc_local_port
*pnvme_lport
,
696 struct nvme_fc_remote_port
*pnvme_rport
,
697 struct nvmefc_ls_req
*pnvme_lsreq
)
699 struct lpfc_nvme_lport
*lport
;
700 struct lpfc_vport
*vport
;
701 struct lpfc_hba
*phba
;
702 struct lpfc_nodelist
*ndlp
;
703 LIST_HEAD(abort_list
);
704 struct lpfc_sli_ring
*pring
;
705 struct lpfc_iocbq
*wqe
, *next_wqe
;
707 lport
= (struct lpfc_nvme_lport
*)pnvme_lport
->private;
708 if (unlikely(!lport
))
710 vport
= lport
->vport
;
713 if (vport
->load_flag
& FC_UNLOADING
)
716 ndlp
= lpfc_findnode_did(vport
, pnvme_rport
->port_id
);
718 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_ABTS
,
719 "6049 Could not find node for DID %x\n",
720 pnvme_rport
->port_id
);
724 /* Expand print to include key fields. */
725 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_ABTS
,
726 "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
727 "rsplen:%d %pad %pad\n",
728 pnvme_lport
, pnvme_rport
,
729 pnvme_lsreq
, pnvme_lsreq
->rqstlen
,
730 pnvme_lsreq
->rsplen
, &pnvme_lsreq
->rqstdma
,
731 &pnvme_lsreq
->rspdma
);
734 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
735 * that need an ABTS. The IOs need to stay on the txcmplq so that
736 * the abort operation completes them successfully.
738 pring
= phba
->sli4_hba
.nvmels_wq
->pring
;
739 spin_lock_irq(&phba
->hbalock
);
740 spin_lock(&pring
->ring_lock
);
741 list_for_each_entry_safe(wqe
, next_wqe
, &pring
->txcmplq
, list
) {
742 /* Add to abort_list on on NDLP match. */
743 if (lpfc_check_sli_ndlp(phba
, pring
, wqe
, ndlp
)) {
744 wqe
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
745 list_add_tail(&wqe
->dlist
, &abort_list
);
748 spin_unlock(&pring
->ring_lock
);
749 spin_unlock_irq(&phba
->hbalock
);
751 /* Abort the targeted IOs and remove them from the abort list. */
752 list_for_each_entry_safe(wqe
, next_wqe
, &abort_list
, dlist
) {
753 atomic_inc(&lport
->xmt_ls_abort
);
754 spin_lock_irq(&phba
->hbalock
);
755 list_del_init(&wqe
->dlist
);
756 lpfc_sli_issue_abort_iotag(phba
, pring
, wqe
);
757 spin_unlock_irq(&phba
->hbalock
);
761 /* Fix up the existing sgls for NVME IO. */
763 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport
*vport
,
764 struct lpfc_io_buf
*lpfc_ncmd
,
765 struct nvmefc_fcp_req
*nCmd
)
767 struct lpfc_hba
*phba
= vport
->phba
;
768 struct sli4_sge
*sgl
;
769 union lpfc_wqe128
*wqe
;
770 uint32_t *wptr
, *dptr
;
773 * Get a local pointer to the built-in wqe and correct
774 * the cmd size to match NVME's 96 bytes and fix
778 wqe
= &lpfc_ncmd
->cur_iocbq
.wqe
;
781 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
782 * match NVME. NVME sends 96 bytes. Also, use the
783 * nvme commands command and response dma addresses
784 * rather than the virtual memory to ease the restore
787 sgl
= lpfc_ncmd
->dma_sgl
;
788 sgl
->sge_len
= cpu_to_le32(nCmd
->cmdlen
);
789 if (phba
->cfg_nvme_embed_cmd
) {
793 /* Word 0-2 - NVME CMND IU (embedded payload) */
794 wqe
->generic
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_IMMED
;
795 wqe
->generic
.bde
.tus
.f
.bdeSize
= 56;
796 wqe
->generic
.bde
.addrHigh
= 0;
797 wqe
->generic
.bde
.addrLow
= 64; /* Word 16 */
799 /* Word 10 - dbde is 0, wqes is 1 in template */
802 * Embed the payload in the last half of the WQE
803 * WQE words 16-30 get the NVME CMD IU payload
805 * WQE words 16-19 get payload Words 1-4
806 * WQE words 20-21 get payload Words 6-7
807 * WQE words 22-29 get payload Words 16-23
809 wptr
= &wqe
->words
[16]; /* WQE ptr */
810 dptr
= (uint32_t *)nCmd
->cmdaddr
; /* payload ptr */
811 dptr
++; /* Skip Word 0 in payload */
813 *wptr
++ = *dptr
++; /* Word 1 */
814 *wptr
++ = *dptr
++; /* Word 2 */
815 *wptr
++ = *dptr
++; /* Word 3 */
816 *wptr
++ = *dptr
++; /* Word 4 */
817 dptr
++; /* Skip Word 5 in payload */
818 *wptr
++ = *dptr
++; /* Word 6 */
819 *wptr
++ = *dptr
++; /* Word 7 */
820 dptr
+= 8; /* Skip Words 8-15 in payload */
821 *wptr
++ = *dptr
++; /* Word 16 */
822 *wptr
++ = *dptr
++; /* Word 17 */
823 *wptr
++ = *dptr
++; /* Word 18 */
824 *wptr
++ = *dptr
++; /* Word 19 */
825 *wptr
++ = *dptr
++; /* Word 20 */
826 *wptr
++ = *dptr
++; /* Word 21 */
827 *wptr
++ = *dptr
++; /* Word 22 */
828 *wptr
= *dptr
; /* Word 23 */
830 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(nCmd
->cmddma
));
831 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(nCmd
->cmddma
));
833 /* Word 0-2 - NVME CMND IU Inline BDE */
834 wqe
->generic
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
835 wqe
->generic
.bde
.tus
.f
.bdeSize
= nCmd
->cmdlen
;
836 wqe
->generic
.bde
.addrHigh
= sgl
->addr_hi
;
837 wqe
->generic
.bde
.addrLow
= sgl
->addr_lo
;
840 bf_set(wqe_dbde
, &wqe
->generic
.wqe_com
, 1);
841 bf_set(wqe_wqes
, &wqe
->generic
.wqe_com
, 0);
846 /* Setup the physical region for the FCP RSP */
847 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(nCmd
->rspdma
));
848 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(nCmd
->rspdma
));
849 sgl
->word2
= le32_to_cpu(sgl
->word2
);
851 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
853 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
854 sgl
->word2
= cpu_to_le32(sgl
->word2
);
855 sgl
->sge_len
= cpu_to_le32(nCmd
->rsplen
);
858 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
860 lpfc_nvme_ktime(struct lpfc_hba
*phba
,
861 struct lpfc_io_buf
*lpfc_ncmd
)
863 uint64_t seg1
, seg2
, seg3
, seg4
;
866 if (!lpfc_ncmd
->ts_last_cmd
||
867 !lpfc_ncmd
->ts_cmd_start
||
868 !lpfc_ncmd
->ts_cmd_wqput
||
869 !lpfc_ncmd
->ts_isr_cmpl
||
870 !lpfc_ncmd
->ts_data_nvme
)
873 if (lpfc_ncmd
->ts_data_nvme
< lpfc_ncmd
->ts_cmd_start
)
875 if (lpfc_ncmd
->ts_cmd_start
< lpfc_ncmd
->ts_last_cmd
)
877 if (lpfc_ncmd
->ts_cmd_wqput
< lpfc_ncmd
->ts_cmd_start
)
879 if (lpfc_ncmd
->ts_isr_cmpl
< lpfc_ncmd
->ts_cmd_wqput
)
881 if (lpfc_ncmd
->ts_data_nvme
< lpfc_ncmd
->ts_isr_cmpl
)
884 * Segment 1 - Time from Last FCP command cmpl is handed
885 * off to NVME Layer to start of next command.
886 * Segment 2 - Time from Driver receives a IO cmd start
887 * from NVME Layer to WQ put is done on IO cmd.
888 * Segment 3 - Time from Driver WQ put is done on IO cmd
889 * to MSI-X ISR for IO cmpl.
890 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
891 * cmpl is handled off to the NVME Layer.
893 seg1
= lpfc_ncmd
->ts_cmd_start
- lpfc_ncmd
->ts_last_cmd
;
894 if (seg1
> 5000000) /* 5 ms - for sequential IOs only */
897 /* Calculate times relative to start of IO */
898 seg2
= (lpfc_ncmd
->ts_cmd_wqput
- lpfc_ncmd
->ts_cmd_start
);
900 seg3
= lpfc_ncmd
->ts_isr_cmpl
- lpfc_ncmd
->ts_cmd_start
;
906 seg4
= lpfc_ncmd
->ts_data_nvme
- lpfc_ncmd
->ts_cmd_start
;
911 phba
->ktime_data_samples
++;
912 phba
->ktime_seg1_total
+= seg1
;
913 if (seg1
< phba
->ktime_seg1_min
)
914 phba
->ktime_seg1_min
= seg1
;
915 else if (seg1
> phba
->ktime_seg1_max
)
916 phba
->ktime_seg1_max
= seg1
;
917 phba
->ktime_seg2_total
+= seg2
;
918 if (seg2
< phba
->ktime_seg2_min
)
919 phba
->ktime_seg2_min
= seg2
;
920 else if (seg2
> phba
->ktime_seg2_max
)
921 phba
->ktime_seg2_max
= seg2
;
922 phba
->ktime_seg3_total
+= seg3
;
923 if (seg3
< phba
->ktime_seg3_min
)
924 phba
->ktime_seg3_min
= seg3
;
925 else if (seg3
> phba
->ktime_seg3_max
)
926 phba
->ktime_seg3_max
= seg3
;
927 phba
->ktime_seg4_total
+= seg4
;
928 if (seg4
< phba
->ktime_seg4_min
)
929 phba
->ktime_seg4_min
= seg4
;
930 else if (seg4
> phba
->ktime_seg4_max
)
931 phba
->ktime_seg4_max
= seg4
;
933 lpfc_ncmd
->ts_last_cmd
= 0;
934 lpfc_ncmd
->ts_cmd_start
= 0;
935 lpfc_ncmd
->ts_cmd_wqput
= 0;
936 lpfc_ncmd
->ts_isr_cmpl
= 0;
937 lpfc_ncmd
->ts_data_nvme
= 0;
942 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
943 * @lpfc_pnvme: Pointer to the driver's nvme instance data
944 * @lpfc_nvme_lport: Pointer to the driver's local port data
945 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
947 * Driver registers this routine as it io request handler. This
948 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
949 * data structure to the rport indicated in @lpfc_nvme_rport.
953 * TODO: What are the failure codes.
956 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pwqeIn
,
957 struct lpfc_wcqe_complete
*wcqe
)
959 struct lpfc_io_buf
*lpfc_ncmd
=
960 (struct lpfc_io_buf
*)pwqeIn
->context1
;
961 struct lpfc_vport
*vport
= pwqeIn
->vport
;
962 struct nvmefc_fcp_req
*nCmd
;
963 struct nvme_fc_ersp_iu
*ep
;
964 struct nvme_fc_cmd_iu
*cp
;
965 struct lpfc_nodelist
*ndlp
;
966 struct lpfc_nvme_fcpreq_priv
*freqpriv
;
967 struct lpfc_nvme_lport
*lport
;
968 uint32_t code
, status
, idx
;
969 uint16_t cid
, sqhd
, data
;
972 /* Sanity check on return of outstanding command */
974 lpfc_printf_vlog(vport
, KERN_ERR
,
975 LOG_NODE
| LOG_NVME_IOERR
,
976 "6071 Null lpfc_ncmd pointer. No "
977 "release, skip completion\n");
981 /* Guard against abort handler being called at same time */
982 spin_lock(&lpfc_ncmd
->buf_lock
);
984 if (!lpfc_ncmd
->nvmeCmd
) {
985 spin_unlock(&lpfc_ncmd
->buf_lock
);
986 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
| LOG_NVME_IOERR
,
987 "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
989 lpfc_ncmd
, lpfc_ncmd
->nvmeCmd
);
991 /* Release the lpfc_ncmd regardless of the missing elements. */
992 lpfc_release_nvme_buf(phba
, lpfc_ncmd
);
995 nCmd
= lpfc_ncmd
->nvmeCmd
;
996 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
998 idx
= lpfc_ncmd
->cur_iocbq
.hba_wqidx
;
999 phba
->sli4_hba
.hdwq
[idx
].nvme_cstat
.io_cmpls
++;
1001 if (vport
->localport
) {
1002 lport
= (struct lpfc_nvme_lport
*)vport
->localport
->private;
1003 if (lport
&& status
) {
1004 if (bf_get(lpfc_wcqe_c_xb
, wcqe
))
1005 atomic_inc(&lport
->cmpl_fcp_xb
);
1006 atomic_inc(&lport
->cmpl_fcp_err
);
1010 lpfc_nvmeio_data(phba
, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
1011 lpfc_ncmd
->cur_iocbq
.sli4_xritag
,
1012 status
, wcqe
->parameter
);
1014 * Catch race where our node has transitioned, but the
1015 * transport is still transitioning.
1017 ndlp
= lpfc_ncmd
->ndlp
;
1018 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
1019 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_IOERR
,
1020 "6062 Ignoring NVME cmpl. No ndlp\n");
1024 code
= bf_get(lpfc_wcqe_c_code
, wcqe
);
1025 if (code
== CQE_CODE_NVME_ERSP
) {
1026 /* For this type of CQE, we need to rebuild the rsp */
1027 ep
= (struct nvme_fc_ersp_iu
*)nCmd
->rspaddr
;
1030 * Get Command Id from cmd to plug into response. This
1031 * code is not needed in the next NVME Transport drop.
1033 cp
= (struct nvme_fc_cmd_iu
*)nCmd
->cmdaddr
;
1034 cid
= cp
->sqe
.common
.command_id
;
1037 * RSN is in CQE word 2
1038 * SQHD is in CQE Word 3 bits 15:0
1039 * Cmd Specific info is in CQE Word 1
1040 * and in CQE Word 0 bits 15:0
1042 sqhd
= bf_get(lpfc_wcqe_c_sqhead
, wcqe
);
1044 /* Now lets build the NVME ERSP IU */
1045 ep
->iu_len
= cpu_to_be16(8);
1046 ep
->rsn
= wcqe
->parameter
;
1047 ep
->xfrd_len
= cpu_to_be32(nCmd
->payload_length
);
1049 ptr
= (uint32_t *)&ep
->cqe
.result
.u64
;
1050 *ptr
++ = wcqe
->total_data_placed
;
1051 data
= bf_get(lpfc_wcqe_c_ersp0
, wcqe
);
1052 *ptr
= (uint32_t)data
;
1053 ep
->cqe
.sq_head
= sqhd
;
1054 ep
->cqe
.sq_id
= nCmd
->sqid
;
1055 ep
->cqe
.command_id
= cid
;
1058 lpfc_ncmd
->status
= IOSTAT_SUCCESS
;
1059 lpfc_ncmd
->result
= 0;
1060 nCmd
->rcv_rsplen
= LPFC_NVME_ERSP_LEN
;
1061 nCmd
->transferred_length
= nCmd
->payload_length
;
1063 lpfc_ncmd
->status
= (status
& LPFC_IOCB_STATUS_MASK
);
1064 lpfc_ncmd
->result
= (wcqe
->parameter
& IOERR_PARAM_MASK
);
1066 /* For NVME, the only failure path that results in an
1067 * IO error is when the adapter rejects it. All other
1068 * conditions are a success case and resolved by the
1070 * IOSTAT_FCP_RSP_ERROR means:
1071 * 1. Length of data received doesn't match total
1072 * transfer length in WQE
1073 * 2. If the RSP payload does NOT match these cases:
1074 * a. RSP length 12/24 bytes and all zeros
1077 switch (lpfc_ncmd
->status
) {
1078 case IOSTAT_SUCCESS
:
1079 nCmd
->transferred_length
= wcqe
->total_data_placed
;
1080 nCmd
->rcv_rsplen
= 0;
1083 case IOSTAT_FCP_RSP_ERROR
:
1084 nCmd
->transferred_length
= wcqe
->total_data_placed
;
1085 nCmd
->rcv_rsplen
= wcqe
->parameter
;
1088 if (nCmd
->rcv_rsplen
== LPFC_NVME_ERSP_LEN
)
1090 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_IOERR
,
1091 "6081 NVME Completion Protocol Error: "
1092 "xri %x status x%x result x%x "
1094 lpfc_ncmd
->cur_iocbq
.sli4_xritag
,
1095 lpfc_ncmd
->status
, lpfc_ncmd
->result
,
1096 wcqe
->total_data_placed
);
1098 case IOSTAT_LOCAL_REJECT
:
1099 /* Let fall through to set command final state. */
1100 if (lpfc_ncmd
->result
== IOERR_ABORT_REQUESTED
)
1101 lpfc_printf_vlog(vport
, KERN_INFO
,
1103 "6032 Delay Aborted cmd %p "
1104 "nvme cmd %p, xri x%x, "
1107 lpfc_ncmd
->cur_iocbq
.sli4_xritag
,
1108 bf_get(lpfc_wcqe_c_xb
, wcqe
));
1112 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_IOERR
,
1113 "6072 NVME Completion Error: xri %x "
1114 "status x%x result x%x [x%x] "
1116 lpfc_ncmd
->cur_iocbq
.sli4_xritag
,
1117 lpfc_ncmd
->status
, lpfc_ncmd
->result
,
1119 wcqe
->total_data_placed
);
1120 nCmd
->transferred_length
= 0;
1121 nCmd
->rcv_rsplen
= 0;
1122 nCmd
->status
= NVME_SC_INTERNAL
;
1126 /* pick up SLI4 exhange busy condition */
1127 if (bf_get(lpfc_wcqe_c_xb
, wcqe
))
1128 lpfc_ncmd
->flags
|= LPFC_SBUF_XBUSY
;
1130 lpfc_ncmd
->flags
&= ~LPFC_SBUF_XBUSY
;
1132 /* Update stats and complete the IO. There is
1133 * no need for dma unprep because the nvme_transport
1134 * owns the dma address.
1136 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1137 if (lpfc_ncmd
->ts_cmd_start
) {
1138 lpfc_ncmd
->ts_isr_cmpl
= pwqeIn
->isr_timestamp
;
1139 lpfc_ncmd
->ts_data_nvme
= ktime_get_ns();
1140 phba
->ktime_last_cmd
= lpfc_ncmd
->ts_data_nvme
;
1141 lpfc_nvme_ktime(phba
, lpfc_ncmd
);
1143 if (phba
->cpucheck_on
& LPFC_CHECK_NVME_IO
) {
1145 idx
= lpfc_ncmd
->cur_iocbq
.hba_wqidx
;
1146 cpu
= raw_smp_processor_id();
1147 if (cpu
< LPFC_CHECK_CPU_CNT
) {
1148 if (lpfc_ncmd
->cpu
!= cpu
)
1149 lpfc_printf_vlog(vport
,
1150 KERN_INFO
, LOG_NVME_IOERR
,
1151 "6701 CPU Check cmpl: "
1152 "cpu %d expect %d\n",
1153 cpu
, lpfc_ncmd
->cpu
);
1154 phba
->sli4_hba
.hdwq
[idx
].cpucheck_cmpl_io
[cpu
]++;
1159 /* NVME targets need completion held off until the abort exchange
1160 * completes unless the NVME Rport is getting unregistered.
1163 if (!(lpfc_ncmd
->flags
& LPFC_SBUF_XBUSY
)) {
1164 freqpriv
= nCmd
->private;
1165 freqpriv
->nvme_buf
= NULL
;
1166 lpfc_ncmd
->nvmeCmd
= NULL
;
1167 spin_unlock(&lpfc_ncmd
->buf_lock
);
1170 spin_unlock(&lpfc_ncmd
->buf_lock
);
1172 /* Call release with XB=1 to queue the IO into the abort list. */
1173 lpfc_release_nvme_buf(phba
, lpfc_ncmd
);
1178 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1179 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1180 * @lpfc_nvme_lport: Pointer to the driver's local port data
1181 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1182 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1183 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1185 * Driver registers this routine as it io request handler. This
1186 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1187 * data structure to the rport indicated in @lpfc_nvme_rport.
1191 * TODO: What are the failure codes.
1194 lpfc_nvme_prep_io_cmd(struct lpfc_vport
*vport
,
1195 struct lpfc_io_buf
*lpfc_ncmd
,
1196 struct lpfc_nodelist
*pnode
,
1197 struct lpfc_fc4_ctrl_stat
*cstat
)
1199 struct lpfc_hba
*phba
= vport
->phba
;
1200 struct nvmefc_fcp_req
*nCmd
= lpfc_ncmd
->nvmeCmd
;
1201 struct lpfc_iocbq
*pwqeq
= &(lpfc_ncmd
->cur_iocbq
);
1202 union lpfc_wqe128
*wqe
= &pwqeq
->wqe
;
1205 if (!NLP_CHK_NODE_ACT(pnode
))
1209 * There are three possibilities here - use scatter-gather segment, use
1210 * the single mapping, or neither.
1213 if (nCmd
->io_dir
== NVMEFC_FCP_WRITE
) {
1214 /* From the iwrite template, initialize words 7 - 11 */
1215 memcpy(&wqe
->words
[7],
1216 &lpfc_iwrite_cmd_template
.words
[7],
1217 sizeof(uint32_t) * 5);
1220 wqe
->fcp_iwrite
.total_xfer_len
= nCmd
->payload_length
;
1223 if ((phba
->cfg_nvme_enable_fb
) &&
1224 (pnode
->nlp_flag
& NLP_FIRSTBURST
)) {
1225 req_len
= lpfc_ncmd
->nvmeCmd
->payload_length
;
1226 if (req_len
< pnode
->nvme_fb_size
)
1227 wqe
->fcp_iwrite
.initial_xfer_len
=
1230 wqe
->fcp_iwrite
.initial_xfer_len
=
1231 pnode
->nvme_fb_size
;
1233 wqe
->fcp_iwrite
.initial_xfer_len
= 0;
1235 cstat
->output_requests
++;
1237 /* From the iread template, initialize words 7 - 11 */
1238 memcpy(&wqe
->words
[7],
1239 &lpfc_iread_cmd_template
.words
[7],
1240 sizeof(uint32_t) * 5);
1243 wqe
->fcp_iread
.total_xfer_len
= nCmd
->payload_length
;
1246 wqe
->fcp_iread
.rsrvd5
= 0;
1248 cstat
->input_requests
++;
1251 /* From the icmnd template, initialize words 4 - 11 */
1252 memcpy(&wqe
->words
[4], &lpfc_icmnd_cmd_template
.words
[4],
1253 sizeof(uint32_t) * 8);
1254 cstat
->control_requests
++;
1257 * Finish initializing those WQE fields that are independent
1258 * of the nvme_cmnd request_buffer
1262 bf_set(payload_offset_len
, &wqe
->fcp_icmd
,
1263 (nCmd
->rsplen
+ nCmd
->cmdlen
));
1266 bf_set(wqe_ctxt_tag
, &wqe
->generic
.wqe_com
,
1267 phba
->sli4_hba
.rpi_ids
[pnode
->nlp_rpi
]);
1268 bf_set(wqe_xri_tag
, &wqe
->generic
.wqe_com
, pwqeq
->sli4_xritag
);
1271 wqe
->generic
.wqe_com
.abort_tag
= pwqeq
->iotag
;
1274 bf_set(wqe_reqtag
, &wqe
->generic
.wqe_com
, pwqeq
->iotag
);
1276 /* Words 13 14 15 are for PBDE support */
1278 pwqeq
->vport
= vport
;
1284 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1285 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1286 * @lpfc_nvme_lport: Pointer to the driver's local port data
1287 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1288 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1289 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1291 * Driver registers this routine as it io request handler. This
1292 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1293 * data structure to the rport indicated in @lpfc_nvme_rport.
1297 * TODO: What are the failure codes.
1300 lpfc_nvme_prep_io_dma(struct lpfc_vport
*vport
,
1301 struct lpfc_io_buf
*lpfc_ncmd
)
1303 struct lpfc_hba
*phba
= vport
->phba
;
1304 struct nvmefc_fcp_req
*nCmd
= lpfc_ncmd
->nvmeCmd
;
1305 union lpfc_wqe128
*wqe
= &lpfc_ncmd
->cur_iocbq
.wqe
;
1306 struct sli4_sge
*sgl
= lpfc_ncmd
->dma_sgl
;
1307 struct scatterlist
*data_sg
;
1308 struct sli4_sge
*first_data_sgl
;
1309 struct ulp_bde64
*bde
;
1310 dma_addr_t physaddr
;
1311 uint32_t num_bde
= 0;
1313 uint32_t dma_offset
= 0;
1316 /* Fix up the command and response DMA stuff. */
1317 lpfc_nvme_adj_fcp_sgls(vport
, lpfc_ncmd
, nCmd
);
1320 * There are three possibilities here - use scatter-gather segment, use
1321 * the single mapping, or neither.
1325 * Jump over the cmd and rsp SGEs. The fix routine
1326 * has already adjusted for this.
1330 first_data_sgl
= sgl
;
1331 lpfc_ncmd
->seg_cnt
= nCmd
->sg_cnt
;
1332 if (lpfc_ncmd
->seg_cnt
> lpfc_nvme_template
.max_sgl_segments
) {
1333 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1334 "6058 Too many sg segments from "
1335 "NVME Transport. Max %d, "
1336 "nvmeIO sg_cnt %d\n",
1337 phba
->cfg_nvme_seg_cnt
+ 1,
1338 lpfc_ncmd
->seg_cnt
);
1339 lpfc_ncmd
->seg_cnt
= 0;
1344 * The driver established a maximum scatter-gather segment count
1345 * during probe that limits the number of sg elements in any
1346 * single nvme command. Just run through the seg_cnt and format
1349 nseg
= nCmd
->sg_cnt
;
1350 data_sg
= nCmd
->first_sgl
;
1351 for (i
= 0; i
< nseg
; i
++) {
1352 if (data_sg
== NULL
) {
1353 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1354 "6059 dptr err %d, nseg %d\n",
1356 lpfc_ncmd
->seg_cnt
= 0;
1359 physaddr
= data_sg
->dma_address
;
1360 dma_len
= data_sg
->length
;
1361 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(physaddr
));
1362 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(physaddr
));
1363 sgl
->word2
= le32_to_cpu(sgl
->word2
);
1364 if ((num_bde
+ 1) == nseg
)
1365 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
1367 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
1368 bf_set(lpfc_sli4_sge_offset
, sgl
, dma_offset
);
1369 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
1370 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1371 sgl
->sge_len
= cpu_to_le32(dma_len
);
1373 dma_offset
+= dma_len
;
1374 data_sg
= sg_next(data_sg
);
1377 if (phba
->cfg_enable_pbde
) {
1378 /* Use PBDE support for first SGL only, offset == 0 */
1380 bde
= (struct ulp_bde64
*)
1382 bde
->addrLow
= first_data_sgl
->addr_lo
;
1383 bde
->addrHigh
= first_data_sgl
->addr_hi
;
1384 bde
->tus
.f
.bdeSize
=
1385 le32_to_cpu(first_data_sgl
->sge_len
);
1386 bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1387 bde
->tus
.w
= cpu_to_le32(bde
->tus
.w
);
1388 /* wqe_pbde is 1 in template */
1390 memset(&wqe
->words
[13], 0, (sizeof(uint32_t) * 3));
1391 bf_set(wqe_pbde
, &wqe
->generic
.wqe_com
, 0);
1395 lpfc_ncmd
->seg_cnt
= 0;
1397 /* For this clause to be valid, the payload_length
1398 * and sg_cnt must zero.
1400 if (nCmd
->payload_length
!= 0) {
1401 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1402 "6063 NVME DMA Prep Err: sg_cnt %d "
1403 "payload_length x%x\n",
1404 nCmd
->sg_cnt
, nCmd
->payload_length
);
1412 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1413 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1414 * @lpfc_nvme_lport: Pointer to the driver's local port data
1415 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1416 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1417 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1419 * Driver registers this routine as it io request handler. This
1420 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1421 * data structure to the rport
1422 indicated in @lpfc_nvme_rport.
1426 * TODO: What are the failure codes.
1429 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port
*pnvme_lport
,
1430 struct nvme_fc_remote_port
*pnvme_rport
,
1431 void *hw_queue_handle
,
1432 struct nvmefc_fcp_req
*pnvme_fcreq
)
1437 struct lpfc_nvme_lport
*lport
;
1438 struct lpfc_fc4_ctrl_stat
*cstat
;
1439 struct lpfc_vport
*vport
;
1440 struct lpfc_hba
*phba
;
1441 struct lpfc_nodelist
*ndlp
;
1442 struct lpfc_io_buf
*lpfc_ncmd
;
1443 struct lpfc_nvme_rport
*rport
;
1444 struct lpfc_nvme_qhandle
*lpfc_queue_info
;
1445 struct lpfc_nvme_fcpreq_priv
*freqpriv
;
1446 struct nvme_common_command
*sqe
;
1447 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1451 /* Validate pointers. LLDD fault handling with transport does
1452 * have timing races.
1454 lport
= (struct lpfc_nvme_lport
*)pnvme_lport
->private;
1455 if (unlikely(!lport
)) {
1460 vport
= lport
->vport
;
1462 if (unlikely(!hw_queue_handle
)) {
1463 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_IOERR
,
1464 "6117 Fail IO, NULL hw_queue_handle\n");
1465 atomic_inc(&lport
->xmt_fcp_err
);
1472 if (vport
->load_flag
& FC_UNLOADING
) {
1477 if (vport
->load_flag
& FC_UNLOADING
) {
1478 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_IOERR
,
1479 "6124 Fail IO, Driver unload\n");
1480 atomic_inc(&lport
->xmt_fcp_err
);
1485 freqpriv
= pnvme_fcreq
->private;
1486 if (unlikely(!freqpriv
)) {
1487 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_IOERR
,
1488 "6158 Fail IO, NULL request data\n");
1489 atomic_inc(&lport
->xmt_fcp_err
);
1494 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1496 start
= ktime_get_ns();
1498 rport
= (struct lpfc_nvme_rport
*)pnvme_rport
->private;
1499 lpfc_queue_info
= (struct lpfc_nvme_qhandle
*)hw_queue_handle
;
1502 * Catch race where our node has transitioned, but the
1503 * transport is still transitioning.
1506 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
1507 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
| LOG_NVME_IOERR
,
1508 "6053 Fail IO, ndlp not ready: rport %p "
1509 "ndlp %p, DID x%06x\n",
1510 rport
, ndlp
, pnvme_rport
->port_id
);
1511 atomic_inc(&lport
->xmt_fcp_err
);
1516 /* The remote node has to be a mapped target or it's an error. */
1517 if ((ndlp
->nlp_type
& NLP_NVME_TARGET
) &&
1518 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
1519 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
| LOG_NVME_IOERR
,
1520 "6036 Fail IO, DID x%06x not ready for "
1521 "IO. State x%x, Type x%x Flg x%x\n",
1522 pnvme_rport
->port_id
,
1523 ndlp
->nlp_state
, ndlp
->nlp_type
,
1524 ndlp
->upcall_flags
);
1525 atomic_inc(&lport
->xmt_fcp_bad_ndlp
);
1531 /* Currently only NVME Keep alive commands should be expedited
1532 * if the driver runs out of a resource. These should only be
1533 * issued on the admin queue, qidx 0
1535 if (!lpfc_queue_info
->qidx
&& !pnvme_fcreq
->sg_cnt
) {
1536 sqe
= &((struct nvme_fc_cmd_iu
*)
1537 pnvme_fcreq
->cmdaddr
)->sqe
.common
;
1538 if (sqe
->opcode
== nvme_admin_keep_alive
)
1542 /* The node is shared with FCP IO, make sure the IO pending count does
1543 * not exceed the programmed depth.
1545 if (lpfc_ndlp_check_qdepth(phba
, ndlp
)) {
1546 if ((atomic_read(&ndlp
->cmd_pending
) >= ndlp
->cmd_qdepth
) &&
1548 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_IOERR
,
1549 "6174 Fail IO, ndlp qdepth exceeded: "
1550 "idx %d DID %x pend %d qdepth %d\n",
1551 lpfc_queue_info
->index
, ndlp
->nlp_DID
,
1552 atomic_read(&ndlp
->cmd_pending
),
1554 atomic_inc(&lport
->xmt_fcp_qdepth
);
1560 /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1561 if (phba
->cfg_fcp_io_sched
== LPFC_FCP_SCHED_BY_HDWQ
) {
1562 idx
= lpfc_queue_info
->index
;
1564 cpu
= raw_smp_processor_id();
1565 idx
= phba
->sli4_hba
.cpu_map
[cpu
].hdwq
;
1568 lpfc_ncmd
= lpfc_get_nvme_buf(phba
, ndlp
, idx
, expedite
);
1569 if (lpfc_ncmd
== NULL
) {
1570 atomic_inc(&lport
->xmt_fcp_noxri
);
1571 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_IOERR
,
1572 "6065 Fail IO, driver buffer pool is empty: "
1574 lpfc_queue_info
->index
, ndlp
->nlp_DID
);
1578 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1580 lpfc_ncmd
->ts_cmd_start
= start
;
1581 lpfc_ncmd
->ts_last_cmd
= phba
->ktime_last_cmd
;
1583 lpfc_ncmd
->ts_cmd_start
= 0;
1588 * Store the data needed by the driver to issue, abort, and complete
1590 * Do not let the IO hang out forever. There is no midlayer issuing
1591 * an abort so inform the FW of the maximum IO pending time.
1593 freqpriv
->nvme_buf
= lpfc_ncmd
;
1594 lpfc_ncmd
->nvmeCmd
= pnvme_fcreq
;
1595 lpfc_ncmd
->ndlp
= ndlp
;
1596 lpfc_ncmd
->qidx
= lpfc_queue_info
->qidx
;
1599 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1600 * This identfier was create in our hardware queue create callback
1601 * routine. The driver now is dependent on the IO queue steering from
1602 * the transport. We are trusting the upper NVME layers know which
1603 * index to use and that they have affinitized a CPU to this hardware
1604 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1606 lpfc_ncmd
->cur_iocbq
.hba_wqidx
= idx
;
1607 cstat
= &phba
->sli4_hba
.hdwq
[idx
].nvme_cstat
;
1609 lpfc_nvme_prep_io_cmd(vport
, lpfc_ncmd
, ndlp
, cstat
);
1610 ret
= lpfc_nvme_prep_io_dma(vport
, lpfc_ncmd
);
1612 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_IOERR
,
1613 "6175 Fail IO, Prep DMA: "
1615 lpfc_queue_info
->index
, ndlp
->nlp_DID
);
1616 atomic_inc(&lport
->xmt_fcp_err
);
1618 goto out_free_nvme_buf
;
1621 lpfc_nvmeio_data(phba
, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1622 lpfc_ncmd
->cur_iocbq
.sli4_xritag
,
1623 lpfc_queue_info
->index
, ndlp
->nlp_DID
);
1625 ret
= lpfc_sli4_issue_wqe(phba
, lpfc_ncmd
->hdwq
, &lpfc_ncmd
->cur_iocbq
);
1627 atomic_inc(&lport
->xmt_fcp_wqerr
);
1628 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_IOERR
,
1629 "6113 Fail IO, Could not issue WQE err %x "
1630 "sid: x%x did: x%x oxid: x%x\n",
1631 ret
, vport
->fc_myDID
, ndlp
->nlp_DID
,
1632 lpfc_ncmd
->cur_iocbq
.sli4_xritag
);
1633 goto out_free_nvme_buf
;
1636 if (phba
->cfg_xri_rebalancing
)
1637 lpfc_keep_pvt_pool_above_lowwm(phba
, lpfc_ncmd
->hdwq_no
);
1639 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1640 if (lpfc_ncmd
->ts_cmd_start
)
1641 lpfc_ncmd
->ts_cmd_wqput
= ktime_get_ns();
1643 if (phba
->cpucheck_on
& LPFC_CHECK_NVME_IO
) {
1644 cpu
= raw_smp_processor_id();
1645 if (cpu
< LPFC_CHECK_CPU_CNT
) {
1646 lpfc_ncmd
->cpu
= cpu
;
1648 lpfc_printf_vlog(vport
,
1649 KERN_INFO
, LOG_NVME_IOERR
,
1650 "6702 CPU Check cmd: "
1653 lpfc_queue_info
->index
);
1654 phba
->sli4_hba
.hdwq
[idx
].cpucheck_xmt_io
[cpu
]++;
1661 if (lpfc_ncmd
->nvmeCmd
->sg_cnt
) {
1662 if (lpfc_ncmd
->nvmeCmd
->io_dir
== NVMEFC_FCP_WRITE
)
1663 cstat
->output_requests
--;
1665 cstat
->input_requests
--;
1667 cstat
->control_requests
--;
1668 lpfc_release_nvme_buf(phba
, lpfc_ncmd
);
1674 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1675 * @phba: Pointer to HBA context object
1676 * @cmdiocb: Pointer to command iocb object.
1677 * @rspiocb: Pointer to response iocb object.
1679 * This is the callback function for any NVME FCP IO that was aborted.
1685 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1686 struct lpfc_wcqe_complete
*abts_cmpl
)
1688 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
,
1689 "6145 ABORT_XRI_CN completing on rpi x%x "
1690 "original iotag x%x, abort cmd iotag x%x "
1691 "req_tag x%x, status x%x, hwstatus x%x\n",
1692 cmdiocb
->iocb
.un
.acxri
.abortContextTag
,
1693 cmdiocb
->iocb
.un
.acxri
.abortIoTag
,
1695 bf_get(lpfc_wcqe_c_request_tag
, abts_cmpl
),
1696 bf_get(lpfc_wcqe_c_status
, abts_cmpl
),
1697 bf_get(lpfc_wcqe_c_hw_status
, abts_cmpl
));
1698 lpfc_sli_release_iocbq(phba
, cmdiocb
);
1702 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1703 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1704 * @lpfc_nvme_lport: Pointer to the driver's local port data
1705 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1706 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1707 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1709 * Driver registers this routine as its nvme request io abort handler. This
1710 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1711 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1712 * is executed asynchronously - one the target is validated as "MAPPED" and
1713 * ready for IO, the driver issues the abort request and returns.
1719 lpfc_nvme_fcp_abort(struct nvme_fc_local_port
*pnvme_lport
,
1720 struct nvme_fc_remote_port
*pnvme_rport
,
1721 void *hw_queue_handle
,
1722 struct nvmefc_fcp_req
*pnvme_fcreq
)
1724 struct lpfc_nvme_lport
*lport
;
1725 struct lpfc_vport
*vport
;
1726 struct lpfc_hba
*phba
;
1727 struct lpfc_io_buf
*lpfc_nbuf
;
1728 struct lpfc_iocbq
*abts_buf
;
1729 struct lpfc_iocbq
*nvmereq_wqe
;
1730 struct lpfc_nvme_fcpreq_priv
*freqpriv
;
1731 union lpfc_wqe128
*abts_wqe
;
1732 unsigned long flags
;
1735 /* Validate pointers. LLDD fault handling with transport does
1736 * have timing races.
1738 lport
= (struct lpfc_nvme_lport
*)pnvme_lport
->private;
1739 if (unlikely(!lport
))
1742 vport
= lport
->vport
;
1744 if (unlikely(!hw_queue_handle
)) {
1745 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_ABTS
,
1746 "6129 Fail Abort, HW Queue Handle NULL.\n");
1751 freqpriv
= pnvme_fcreq
->private;
1753 if (unlikely(!freqpriv
))
1755 if (vport
->load_flag
& FC_UNLOADING
)
1758 /* Announce entry to new IO submit field. */
1759 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_ABTS
,
1760 "6002 Abort Request to rport DID x%06x "
1761 "for nvme_fc_req %p\n",
1762 pnvme_rport
->port_id
,
1765 /* If the hba is getting reset, this flag is set. It is
1766 * cleared when the reset is complete and rings reestablished.
1768 spin_lock_irqsave(&phba
->hbalock
, flags
);
1769 /* driver queued commands are in process of being flushed */
1770 if (phba
->hba_flag
& HBA_NVME_IOQ_FLUSH
) {
1771 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1772 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_ABTS
,
1773 "6139 Driver in reset cleanup - flushing "
1774 "NVME Req now. hba_flag x%x\n",
1779 lpfc_nbuf
= freqpriv
->nvme_buf
;
1781 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1782 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_ABTS
,
1783 "6140 NVME IO req has no matching lpfc nvme "
1784 "io buffer. Skipping abort req.\n");
1786 } else if (!lpfc_nbuf
->nvmeCmd
) {
1787 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1788 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_ABTS
,
1789 "6141 lpfc NVME IO req has no nvme_fcreq "
1790 "io buffer. Skipping abort req.\n");
1793 nvmereq_wqe
= &lpfc_nbuf
->cur_iocbq
;
1795 /* Guard against IO completion being called at same time */
1796 spin_lock(&lpfc_nbuf
->buf_lock
);
1799 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1800 * state must match the nvme_fcreq passed by the nvme
1801 * transport. If they don't match, it is likely the driver
1802 * has already completed the NVME IO and the nvme transport
1803 * has not seen it yet.
1805 if (lpfc_nbuf
->nvmeCmd
!= pnvme_fcreq
) {
1806 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_ABTS
,
1807 "6143 NVME req mismatch: "
1808 "lpfc_nbuf %p nvmeCmd %p, "
1809 "pnvme_fcreq %p. Skipping Abort xri x%x\n",
1810 lpfc_nbuf
, lpfc_nbuf
->nvmeCmd
,
1811 pnvme_fcreq
, nvmereq_wqe
->sli4_xritag
);
1815 /* Don't abort IOs no longer on the pending queue. */
1816 if (!(nvmereq_wqe
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
)) {
1817 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_ABTS
,
1818 "6142 NVME IO req %p not queued - skipping "
1819 "abort req xri x%x\n",
1820 pnvme_fcreq
, nvmereq_wqe
->sli4_xritag
);
1824 atomic_inc(&lport
->xmt_fcp_abort
);
1825 lpfc_nvmeio_data(phba
, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1826 nvmereq_wqe
->sli4_xritag
,
1827 nvmereq_wqe
->hba_wqidx
, pnvme_rport
->port_id
);
1829 /* Outstanding abort is in progress */
1830 if (nvmereq_wqe
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
1831 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_ABTS
,
1832 "6144 Outstanding NVME I/O Abort Request "
1833 "still pending on nvme_fcreq %p, "
1834 "lpfc_ncmd %p xri x%x\n",
1835 pnvme_fcreq
, lpfc_nbuf
,
1836 nvmereq_wqe
->sli4_xritag
);
1840 abts_buf
= __lpfc_sli_get_iocbq(phba
);
1842 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_ABTS
,
1843 "6136 No available abort wqes. Skipping "
1844 "Abts req for nvme_fcreq %p xri x%x\n",
1845 pnvme_fcreq
, nvmereq_wqe
->sli4_xritag
);
1849 /* Ready - mark outstanding as aborted by driver. */
1850 nvmereq_wqe
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
1852 /* Complete prepping the abort wqe and issue to the FW. */
1853 abts_wqe
= &abts_buf
->wqe
;
1855 /* WQEs are reused. Clear stale data and set key fields to
1856 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1858 memset(abts_wqe
, 0, sizeof(union lpfc_wqe
));
1859 bf_set(abort_cmd_criteria
, &abts_wqe
->abort_cmd
, T_XRI_TAG
);
1862 bf_set(wqe_cmnd
, &abts_wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
1863 bf_set(wqe_class
, &abts_wqe
->abort_cmd
.wqe_com
,
1864 nvmereq_wqe
->iocb
.ulpClass
);
1866 /* word 8 - tell the FW to abort the IO associated with this
1867 * outstanding exchange ID.
1869 abts_wqe
->abort_cmd
.wqe_com
.abort_tag
= nvmereq_wqe
->sli4_xritag
;
1871 /* word 9 - this is the iotag for the abts_wqe completion. */
1872 bf_set(wqe_reqtag
, &abts_wqe
->abort_cmd
.wqe_com
,
1876 bf_set(wqe_qosd
, &abts_wqe
->abort_cmd
.wqe_com
, 1);
1877 bf_set(wqe_lenloc
, &abts_wqe
->abort_cmd
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
1880 bf_set(wqe_cmd_type
, &abts_wqe
->abort_cmd
.wqe_com
, OTHER_COMMAND
);
1881 bf_set(wqe_wqec
, &abts_wqe
->abort_cmd
.wqe_com
, 1);
1882 bf_set(wqe_cqid
, &abts_wqe
->abort_cmd
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
1884 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1885 abts_buf
->iocb_flag
|= LPFC_IO_NVME
;
1886 abts_buf
->hba_wqidx
= nvmereq_wqe
->hba_wqidx
;
1887 abts_buf
->vport
= vport
;
1888 abts_buf
->wqe_cmpl
= lpfc_nvme_abort_fcreq_cmpl
;
1889 ret_val
= lpfc_sli4_issue_wqe(phba
, lpfc_nbuf
->hdwq
, abts_buf
);
1890 spin_unlock(&lpfc_nbuf
->buf_lock
);
1891 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1893 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_ABTS
,
1894 "6137 Failed abts issue_wqe with status x%x "
1895 "for nvme_fcreq %p.\n",
1896 ret_val
, pnvme_fcreq
);
1897 lpfc_sli_release_iocbq(phba
, abts_buf
);
1901 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_ABTS
,
1902 "6138 Transport Abort NVME Request Issued for "
1903 "ox_id x%x on reqtag x%x\n",
1904 nvmereq_wqe
->sli4_xritag
,
1909 spin_unlock(&lpfc_nbuf
->buf_lock
);
1910 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1914 /* Declare and initialization an instance of the FC NVME template. */
1915 static struct nvme_fc_port_template lpfc_nvme_template
= {
1916 /* initiator-based functions */
1917 .localport_delete
= lpfc_nvme_localport_delete
,
1918 .remoteport_delete
= lpfc_nvme_remoteport_delete
,
1919 .create_queue
= lpfc_nvme_create_queue
,
1920 .delete_queue
= lpfc_nvme_delete_queue
,
1921 .ls_req
= lpfc_nvme_ls_req
,
1922 .fcp_io
= lpfc_nvme_fcp_io_submit
,
1923 .ls_abort
= lpfc_nvme_ls_abort
,
1924 .fcp_abort
= lpfc_nvme_fcp_abort
,
1927 .max_sgl_segments
= LPFC_NVME_DEFAULT_SEGS
,
1928 .max_dif_sgl_segments
= LPFC_NVME_DEFAULT_SEGS
,
1929 .dma_boundary
= 0xFFFFFFFF,
1931 /* Sizes of additional private data for data structures.
1932 * No use for the last two sizes at this time.
1934 .local_priv_sz
= sizeof(struct lpfc_nvme_lport
),
1935 .remote_priv_sz
= sizeof(struct lpfc_nvme_rport
),
1936 .lsrqst_priv_sz
= 0,
1937 .fcprqst_priv_sz
= sizeof(struct lpfc_nvme_fcpreq_priv
),
1941 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
1942 * @phba: The HBA for which this call is being executed.
1944 * This routine removes a nvme buffer from head of @hdwq io_buf_list
1945 * and returns to caller.
1949 * Pointer to lpfc_nvme_buf - Success
1951 static struct lpfc_io_buf
*
1952 lpfc_get_nvme_buf(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
1953 int idx
, int expedite
)
1955 struct lpfc_io_buf
*lpfc_ncmd
;
1956 struct lpfc_sli4_hdw_queue
*qp
;
1957 struct sli4_sge
*sgl
;
1958 struct lpfc_iocbq
*pwqeq
;
1959 union lpfc_wqe128
*wqe
;
1961 lpfc_ncmd
= lpfc_get_io_buf(phba
, NULL
, idx
, expedite
);
1964 pwqeq
= &(lpfc_ncmd
->cur_iocbq
);
1967 /* Setup key fields in buffer that may have been changed
1968 * if other protocols used this buffer.
1970 pwqeq
->iocb_flag
= LPFC_IO_NVME
;
1971 pwqeq
->wqe_cmpl
= lpfc_nvme_io_cmd_wqe_cmpl
;
1972 lpfc_ncmd
->start_time
= jiffies
;
1973 lpfc_ncmd
->flags
= 0;
1975 /* Rsp SGE will be filled in when we rcv an IO
1976 * from the NVME Layer to be sent.
1977 * The cmd is going to be embedded so we need a SKIP SGE.
1979 sgl
= lpfc_ncmd
->dma_sgl
;
1980 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
1981 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
1982 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1983 /* Fill in word 3 / sgl_len during cmd submission */
1985 /* Initialize WQE */
1986 memset(wqe
, 0, sizeof(union lpfc_wqe
));
1988 if (lpfc_ndlp_check_qdepth(phba
, ndlp
)) {
1989 atomic_inc(&ndlp
->cmd_pending
);
1990 lpfc_ncmd
->flags
|= LPFC_SBUF_BUMP_QDEPTH
;
1994 qp
= &phba
->sli4_hba
.hdwq
[idx
];
1995 qp
->empty_io_bufs
++;
2002 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2003 * @phba: The Hba for which this call is being executed.
2004 * @lpfc_ncmd: The nvme buffer which is being released.
2006 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2007 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2008 * and cannot be reused for at least RA_TOV amount of time if it was
2012 lpfc_release_nvme_buf(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_ncmd
)
2014 struct lpfc_sli4_hdw_queue
*qp
;
2015 unsigned long iflag
= 0;
2017 if ((lpfc_ncmd
->flags
& LPFC_SBUF_BUMP_QDEPTH
) && lpfc_ncmd
->ndlp
)
2018 atomic_dec(&lpfc_ncmd
->ndlp
->cmd_pending
);
2020 lpfc_ncmd
->ndlp
= NULL
;
2021 lpfc_ncmd
->flags
&= ~LPFC_SBUF_BUMP_QDEPTH
;
2023 qp
= lpfc_ncmd
->hdwq
;
2024 if (lpfc_ncmd
->flags
& LPFC_SBUF_XBUSY
) {
2025 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2026 "6310 XB release deferred for "
2027 "ox_id x%x on reqtag x%x\n",
2028 lpfc_ncmd
->cur_iocbq
.sli4_xritag
,
2029 lpfc_ncmd
->cur_iocbq
.iotag
);
2031 spin_lock_irqsave(&qp
->abts_nvme_buf_list_lock
, iflag
);
2032 list_add_tail(&lpfc_ncmd
->list
,
2033 &qp
->lpfc_abts_nvme_buf_list
);
2034 qp
->abts_nvme_io_bufs
++;
2035 spin_unlock_irqrestore(&qp
->abts_nvme_buf_list_lock
, iflag
);
2037 lpfc_release_io_buf(phba
, (struct lpfc_io_buf
*)lpfc_ncmd
, qp
);
2041 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2042 * @pvport - the lpfc_vport instance requesting a localport.
2044 * This routine is invoked to create an nvme localport instance to bind
2045 * to the nvme_fc_transport. It is called once during driver load
2046 * like lpfc_create_shost after all other services are initialized.
2047 * It requires a vport, vpi, and wwns at call time. Other localport
2048 * parameters are modified as the driver's FCID and the Fabric WWN
2053 * -ENOMEM - no heap memory available
2054 * other values - from nvme registration upcall
2057 lpfc_nvme_create_localport(struct lpfc_vport
*vport
)
2060 struct lpfc_hba
*phba
= vport
->phba
;
2061 struct nvme_fc_port_info nfcp_info
;
2062 struct nvme_fc_local_port
*localport
;
2063 struct lpfc_nvme_lport
*lport
;
2065 /* Initialize this localport instance. The vport wwn usage ensures
2066 * that NPIV is accounted for.
2068 memset(&nfcp_info
, 0, sizeof(struct nvme_fc_port_info
));
2069 nfcp_info
.port_role
= FC_PORT_ROLE_NVME_INITIATOR
;
2070 nfcp_info
.node_name
= wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
2071 nfcp_info
.port_name
= wwn_to_u64(vport
->fc_portname
.u
.wwn
);
2073 /* We need to tell the transport layer + 1 because it takes page
2074 * alignment into account. When space for the SGL is allocated we
2075 * allocate + 3, one for cmd, one for rsp and one for this alignment
2077 lpfc_nvme_template
.max_sgl_segments
= phba
->cfg_nvme_seg_cnt
+ 1;
2079 /* Advertise how many hw queues we support based on fcp_io_sched */
2080 if (phba
->cfg_fcp_io_sched
== LPFC_FCP_SCHED_BY_HDWQ
)
2081 lpfc_nvme_template
.max_hw_queues
= phba
->cfg_hdw_queue
;
2083 lpfc_nvme_template
.max_hw_queues
=
2084 phba
->sli4_hba
.num_present_cpu
;
2086 if (!IS_ENABLED(CONFIG_NVME_FC
))
2089 /* localport is allocated from the stack, but the registration
2090 * call allocates heap memory as well as the private area.
2093 ret
= nvme_fc_register_localport(&nfcp_info
, &lpfc_nvme_template
,
2094 &vport
->phba
->pcidev
->dev
, &localport
);
2096 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
| LOG_NVME_DISC
,
2097 "6005 Successfully registered local "
2098 "NVME port num %d, localP %p, private %p, "
2100 localport
->port_num
, localport
,
2102 lpfc_nvme_template
.max_sgl_segments
);
2104 /* Private is our lport size declared in the template. */
2105 lport
= (struct lpfc_nvme_lport
*)localport
->private;
2106 vport
->localport
= localport
;
2107 lport
->vport
= vport
;
2108 vport
->nvmei_support
= 1;
2110 atomic_set(&lport
->xmt_fcp_noxri
, 0);
2111 atomic_set(&lport
->xmt_fcp_bad_ndlp
, 0);
2112 atomic_set(&lport
->xmt_fcp_qdepth
, 0);
2113 atomic_set(&lport
->xmt_fcp_err
, 0);
2114 atomic_set(&lport
->xmt_fcp_wqerr
, 0);
2115 atomic_set(&lport
->xmt_fcp_abort
, 0);
2116 atomic_set(&lport
->xmt_ls_abort
, 0);
2117 atomic_set(&lport
->xmt_ls_err
, 0);
2118 atomic_set(&lport
->cmpl_fcp_xb
, 0);
2119 atomic_set(&lport
->cmpl_fcp_err
, 0);
2120 atomic_set(&lport
->cmpl_ls_xb
, 0);
2121 atomic_set(&lport
->cmpl_ls_err
, 0);
2122 atomic_set(&lport
->fc4NvmeLsRequests
, 0);
2123 atomic_set(&lport
->fc4NvmeLsCmpls
, 0);
2129 #if (IS_ENABLED(CONFIG_NVME_FC))
2130 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2132 * The driver has to wait for the host nvme transport to callback
2133 * indicating the localport has successfully unregistered all
2134 * resources. Since this is an uninterruptible wait, loop every ten
2135 * seconds and print a message indicating no progress.
2137 * An uninterruptible wait is used because of the risk of transport-to-
2138 * driver state mismatch.
2141 lpfc_nvme_lport_unreg_wait(struct lpfc_vport
*vport
,
2142 struct lpfc_nvme_lport
*lport
,
2143 struct completion
*lport_unreg_cmp
)
2148 /* Host transport has to clean up and confirm requiring an indefinite
2149 * wait. Print a message if a 10 second wait expires and renew the
2150 * wait. This is unexpected.
2152 wait_tmo
= msecs_to_jiffies(LPFC_NVME_WAIT_TMO
* 1000);
2154 ret
= wait_for_completion_timeout(lport_unreg_cmp
, wait_tmo
);
2155 if (unlikely(!ret
)) {
2156 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_IOERR
,
2157 "6176 Lport %p Localport %p wait "
2158 "timed out. Renewing.\n",
2159 lport
, vport
->localport
);
2164 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_IOERR
,
2165 "6177 Lport %p Localport %p Complete Success\n",
2166 lport
, vport
->localport
);
2171 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2172 * @pnvme: pointer to lpfc nvme data structure.
2174 * This routine is invoked to destroy all lports bound to the phba.
2175 * The lport memory was allocated by the nvme fc transport and is
2176 * released there. This routine ensures all rports bound to the
2177 * lport have been disconnected.
2181 lpfc_nvme_destroy_localport(struct lpfc_vport
*vport
)
2183 #if (IS_ENABLED(CONFIG_NVME_FC))
2184 struct nvme_fc_local_port
*localport
;
2185 struct lpfc_nvme_lport
*lport
;
2187 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp
);
2189 if (vport
->nvmei_support
== 0)
2192 localport
= vport
->localport
;
2193 lport
= (struct lpfc_nvme_lport
*)localport
->private;
2195 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
,
2196 "6011 Destroying NVME localport %p\n",
2199 /* lport's rport list is clear. Unregister
2200 * lport and release resources.
2202 lport
->lport_unreg_cmp
= &lport_unreg_cmp
;
2203 ret
= nvme_fc_unregister_localport(localport
);
2205 /* Wait for completion. This either blocks
2206 * indefinitely or succeeds
2208 lpfc_nvme_lport_unreg_wait(vport
, lport
, &lport_unreg_cmp
);
2209 vport
->localport
= NULL
;
2211 /* Regardless of the unregister upcall response, clear
2212 * nvmei_support. All rports are unregistered and the
2213 * driver will clean up.
2215 vport
->nvmei_support
= 0;
2217 lpfc_printf_vlog(vport
,
2218 KERN_INFO
, LOG_NVME_DISC
,
2219 "6009 Unregistered lport Success\n");
2221 lpfc_printf_vlog(vport
,
2222 KERN_INFO
, LOG_NVME_DISC
,
2223 "6010 Unregistered lport "
2224 "Failed, status x%x\n",
2231 lpfc_nvme_update_localport(struct lpfc_vport
*vport
)
2233 #if (IS_ENABLED(CONFIG_NVME_FC))
2234 struct nvme_fc_local_port
*localport
;
2235 struct lpfc_nvme_lport
*lport
;
2237 localport
= vport
->localport
;
2239 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NVME
,
2240 "6710 Update NVME fail. No localport\n");
2243 lport
= (struct lpfc_nvme_lport
*)localport
->private;
2245 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NVME
,
2246 "6171 Update NVME fail. localP %p, No lport\n",
2250 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
,
2251 "6012 Update NVME lport %p did x%x\n",
2252 localport
, vport
->fc_myDID
);
2254 localport
->port_id
= vport
->fc_myDID
;
2255 if (localport
->port_id
== 0)
2256 localport
->port_role
= FC_PORT_ROLE_NVME_DISCOVERY
;
2258 localport
->port_role
= FC_PORT_ROLE_NVME_INITIATOR
;
2260 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_DISC
,
2261 "6030 bound lport %p to DID x%06x\n",
2262 lport
, localport
->port_id
);
2267 lpfc_nvme_register_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2269 #if (IS_ENABLED(CONFIG_NVME_FC))
2271 struct nvme_fc_local_port
*localport
;
2272 struct lpfc_nvme_lport
*lport
;
2273 struct lpfc_nvme_rport
*rport
;
2274 struct lpfc_nvme_rport
*oldrport
;
2275 struct nvme_fc_remote_port
*remote_port
;
2276 struct nvme_fc_port_info rpinfo
;
2277 struct lpfc_nodelist
*prev_ndlp
= NULL
;
2279 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NVME_DISC
,
2280 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2281 ndlp
->nlp_DID
, ndlp
->nlp_type
);
2283 localport
= vport
->localport
;
2287 lport
= (struct lpfc_nvme_lport
*)localport
->private;
2289 /* NVME rports are not preserved across devloss.
2290 * Just register this instance. Note, rpinfo->dev_loss_tmo
2291 * is left 0 to indicate accept transport defaults. The
2292 * driver communicates port role capabilities consistent
2293 * with the PRLI response data.
2295 memset(&rpinfo
, 0, sizeof(struct nvme_fc_port_info
));
2296 rpinfo
.port_id
= ndlp
->nlp_DID
;
2297 if (ndlp
->nlp_type
& NLP_NVME_TARGET
)
2298 rpinfo
.port_role
|= FC_PORT_ROLE_NVME_TARGET
;
2299 if (ndlp
->nlp_type
& NLP_NVME_INITIATOR
)
2300 rpinfo
.port_role
|= FC_PORT_ROLE_NVME_INITIATOR
;
2302 if (ndlp
->nlp_type
& NLP_NVME_DISCOVERY
)
2303 rpinfo
.port_role
|= FC_PORT_ROLE_NVME_DISCOVERY
;
2305 rpinfo
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
2306 rpinfo
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
2308 spin_lock_irq(&vport
->phba
->hbalock
);
2309 oldrport
= lpfc_ndlp_get_nrport(ndlp
);
2310 spin_unlock_irq(&vport
->phba
->hbalock
);
2314 ret
= nvme_fc_register_remoteport(localport
, &rpinfo
, &remote_port
);
2316 /* If the ndlp already has an nrport, this is just
2317 * a resume of the existing rport. Else this is a
2320 /* Guard against an unregister/reregister
2321 * race that leaves the WAIT flag set.
2323 spin_lock_irq(&vport
->phba
->hbalock
);
2324 ndlp
->upcall_flags
&= ~NLP_WAIT_FOR_UNREG
;
2325 spin_unlock_irq(&vport
->phba
->hbalock
);
2326 rport
= remote_port
->private;
2328 /* New remoteport record does not guarantee valid
2329 * host private memory area.
2331 prev_ndlp
= oldrport
->ndlp
;
2332 if (oldrport
== remote_port
->private) {
2333 /* Same remoteport - ndlp should match.
2336 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
,
2338 "6014 Rebinding lport to "
2339 "remoteport %p wwpn 0x%llx, "
2340 "Data: x%x x%x %p %p x%x x%06x\n",
2342 remote_port
->port_name
,
2343 remote_port
->port_id
,
2344 remote_port
->port_role
,
2352 /* Sever the ndlp<->rport association
2353 * before dropping the ndlp ref from
2356 spin_lock_irq(&vport
->phba
->hbalock
);
2357 ndlp
->nrport
= NULL
;
2358 ndlp
->upcall_flags
&= ~NLP_WAIT_FOR_UNREG
;
2359 spin_unlock_irq(&vport
->phba
->hbalock
);
2361 rport
->remoteport
= NULL
;
2363 /* Reference only removed if previous NDLP is no longer
2364 * active. It might be just a swap and removing the
2365 * reference would cause a premature cleanup.
2367 if (prev_ndlp
&& prev_ndlp
!= ndlp
) {
2368 if ((!NLP_CHK_NODE_ACT(prev_ndlp
)) ||
2369 (!prev_ndlp
->nrport
))
2370 lpfc_nlp_put(prev_ndlp
);
2374 /* Clean bind the rport to the ndlp. */
2375 rport
->remoteport
= remote_port
;
2376 rport
->lport
= lport
;
2378 spin_lock_irq(&vport
->phba
->hbalock
);
2379 ndlp
->nrport
= rport
;
2380 spin_unlock_irq(&vport
->phba
->hbalock
);
2381 lpfc_printf_vlog(vport
, KERN_INFO
,
2382 LOG_NVME_DISC
| LOG_NODE
,
2383 "6022 Binding new rport to "
2384 "lport %p Remoteport %p rport %p WWNN 0x%llx, "
2385 "Rport WWPN 0x%llx DID "
2386 "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
2387 lport
, remote_port
, rport
,
2388 rpinfo
.node_name
, rpinfo
.port_name
,
2389 rpinfo
.port_id
, rpinfo
.port_role
,
2392 lpfc_printf_vlog(vport
, KERN_ERR
,
2393 LOG_NVME_DISC
| LOG_NODE
,
2394 "6031 RemotePort Registration failed "
2395 "err: %d, DID x%06x\n",
2396 ret
, ndlp
->nlp_DID
);
2405 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2407 * There is no notion of Devloss or rport recovery from the current
2408 * nvme_transport perspective. Loss of an rport just means IO cannot
2409 * be sent and recovery is completely up to the initator.
2410 * For now, the driver just unbinds the DID and port_role so that
2411 * no further IO can be issued. Changes are planned for later.
2413 * Notes - the ndlp reference count is not decremented here since
2414 * since there is no nvme_transport api for devloss. Node ref count
2415 * is only adjusted in driver unload.
2418 lpfc_nvme_unregister_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2420 #if (IS_ENABLED(CONFIG_NVME_FC))
2422 struct nvme_fc_local_port
*localport
;
2423 struct lpfc_nvme_lport
*lport
;
2424 struct lpfc_nvme_rport
*rport
;
2425 struct nvme_fc_remote_port
*remoteport
= NULL
;
2427 localport
= vport
->localport
;
2429 /* This is fundamental error. The localport is always
2430 * available until driver unload. Just exit.
2435 lport
= (struct lpfc_nvme_lport
*)localport
->private;
2439 spin_lock_irq(&vport
->phba
->hbalock
);
2440 rport
= lpfc_ndlp_get_nrport(ndlp
);
2442 remoteport
= rport
->remoteport
;
2443 spin_unlock_irq(&vport
->phba
->hbalock
);
2447 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_DISC
,
2448 "6033 Unreg nvme remoteport %p, portname x%llx, "
2449 "port_id x%06x, portstate x%x port type x%x\n",
2450 remoteport
, remoteport
->port_name
,
2451 remoteport
->port_id
, remoteport
->port_state
,
2454 /* Sanity check ndlp type. Only call for NVME ports. Don't
2455 * clear any rport state until the transport calls back.
2458 if (ndlp
->nlp_type
& NLP_NVME_TARGET
) {
2459 /* No concern about the role change on the nvme remoteport.
2460 * The transport will update it.
2462 ndlp
->upcall_flags
|= NLP_WAIT_FOR_UNREG
;
2464 /* Don't let the host nvme transport keep sending keep-alives
2465 * on this remoteport. Vport is unloading, no recovery. The
2466 * return values is ignored. The upcall is a courtesy to the
2469 if (vport
->load_flag
& FC_UNLOADING
)
2470 (void)nvme_fc_set_remoteport_devloss(remoteport
, 0);
2472 ret
= nvme_fc_unregister_remoteport(remoteport
);
2475 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_DISC
,
2476 "6167 NVME unregister failed %d "
2478 ret
, remoteport
->port_state
);
2485 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NVME_DISC
,
2486 "6168 State error: lport %p, rport%p FCID x%06x\n",
2487 vport
->localport
, ndlp
->rport
, ndlp
->nlp_DID
);
2491 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2492 * @phba: pointer to lpfc hba data structure.
2493 * @axri: pointer to the fcp xri abort wcqe structure.
2495 * This routine is invoked by the worker thread to process a SLI4 fast-path
2496 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2500 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba
*phba
,
2501 struct sli4_wcqe_xri_aborted
*axri
, int idx
)
2503 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
2504 struct lpfc_io_buf
*lpfc_ncmd
, *next_lpfc_ncmd
;
2505 struct nvmefc_fcp_req
*nvme_cmd
= NULL
;
2506 struct lpfc_nodelist
*ndlp
;
2507 struct lpfc_sli4_hdw_queue
*qp
;
2508 unsigned long iflag
= 0;
2510 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
))
2512 qp
= &phba
->sli4_hba
.hdwq
[idx
];
2513 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2514 spin_lock(&qp
->abts_nvme_buf_list_lock
);
2515 list_for_each_entry_safe(lpfc_ncmd
, next_lpfc_ncmd
,
2516 &qp
->lpfc_abts_nvme_buf_list
, list
) {
2517 if (lpfc_ncmd
->cur_iocbq
.sli4_xritag
== xri
) {
2518 list_del_init(&lpfc_ncmd
->list
);
2519 qp
->abts_nvme_io_bufs
--;
2520 lpfc_ncmd
->flags
&= ~LPFC_SBUF_XBUSY
;
2521 lpfc_ncmd
->status
= IOSTAT_SUCCESS
;
2522 spin_unlock(&qp
->abts_nvme_buf_list_lock
);
2524 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2525 ndlp
= lpfc_ncmd
->ndlp
;
2527 lpfc_sli4_abts_err_handler(phba
, ndlp
, axri
);
2529 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2530 "6311 nvme_cmd %p xri x%x tag x%x "
2531 "abort complete and xri released\n",
2532 lpfc_ncmd
->nvmeCmd
, xri
,
2533 lpfc_ncmd
->cur_iocbq
.iotag
);
2535 /* Aborted NVME commands are required to not complete
2536 * before the abort exchange command fully completes.
2537 * Once completed, it is available via the put list.
2539 if (lpfc_ncmd
->nvmeCmd
) {
2540 nvme_cmd
= lpfc_ncmd
->nvmeCmd
;
2541 nvme_cmd
->done(nvme_cmd
);
2542 lpfc_ncmd
->nvmeCmd
= NULL
;
2544 lpfc_release_nvme_buf(phba
, lpfc_ncmd
);
2548 spin_unlock(&qp
->abts_nvme_buf_list_lock
);
2549 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2551 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2552 "6312 XRI Aborted xri x%x not found\n", xri
);
2557 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2558 * @phba: Pointer to HBA context object.
2560 * This function flushes all wqes in the nvme rings and frees all resources
2561 * in the txcmplq. This function does not issue abort wqes for the IO
2562 * commands in txcmplq, they will just be returned with
2563 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2564 * slot has been permanently disabled.
2567 lpfc_nvme_wait_for_io_drain(struct lpfc_hba
*phba
)
2569 struct lpfc_sli_ring
*pring
;
2570 u32 i
, wait_cnt
= 0;
2572 if (phba
->sli_rev
< LPFC_SLI_REV4
|| !phba
->sli4_hba
.hdwq
)
2575 /* Cycle through all NVME rings and make sure all outstanding
2576 * WQEs have been removed from the txcmplqs.
2578 for (i
= 0; i
< phba
->cfg_hdw_queue
; i
++) {
2579 if (!phba
->sli4_hba
.hdwq
[i
].nvme_wq
)
2581 pring
= phba
->sli4_hba
.hdwq
[i
].nvme_wq
->pring
;
2586 /* Retrieve everything on the txcmplq */
2587 while (!list_empty(&pring
->txcmplq
)) {
2588 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1
);
2591 /* The sleep is 10mS. Every ten seconds,
2592 * dump a message. Something is wrong.
2594 if ((wait_cnt
% 1000) == 0) {
2595 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2596 "6178 NVME IO not empty, "
2597 "cnt %d\n", wait_cnt
);