mic: vop: Fix use-after-free on remove
[linux/fpc-iii.git] / drivers / scsi / lpfc / lpfc_nvme.c
blob8c9f7904222888251a010eb3ab392f4e78a25f34
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42 #include "lpfc_version.h"
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_logmsg.h"
53 #include "lpfc_crtn.h"
54 #include "lpfc_vport.h"
55 #include "lpfc_debugfs.h"
57 /* NVME initiator-based functions */
59 static struct lpfc_nvme_buf *
60 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
61 int expedite);
63 static void
64 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
66 static struct nvme_fc_port_template lpfc_nvme_template;
68 static union lpfc_wqe128 lpfc_iread_cmd_template;
69 static union lpfc_wqe128 lpfc_iwrite_cmd_template;
70 static union lpfc_wqe128 lpfc_icmnd_cmd_template;
72 /* Setup WQE templates for NVME IOs */
73 void
74 lpfc_nvme_cmd_template(void)
76 union lpfc_wqe128 *wqe;
78 /* IREAD template */
79 wqe = &lpfc_iread_cmd_template;
80 memset(wqe, 0, sizeof(union lpfc_wqe128));
82 /* Word 0, 1, 2 - BDE is variable */
84 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
86 /* Word 4 - total_xfer_len is variable */
88 /* Word 5 - is zero */
90 /* Word 6 - ctxt_tag, xri_tag is variable */
92 /* Word 7 */
93 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
94 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
95 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
96 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
98 /* Word 8 - abort_tag is variable */
100 /* Word 9 - reqtag is variable */
102 /* Word 10 - dbde, wqes is variable */
103 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
104 bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
105 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
106 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
107 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
108 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
110 /* Word 11 - pbde is variable */
111 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
112 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
113 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
115 /* Word 12 - is zero */
117 /* Word 13, 14, 15 - PBDE is variable */
119 /* IWRITE template */
120 wqe = &lpfc_iwrite_cmd_template;
121 memset(wqe, 0, sizeof(union lpfc_wqe128));
123 /* Word 0, 1, 2 - BDE is variable */
125 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
127 /* Word 4 - total_xfer_len is variable */
129 /* Word 5 - initial_xfer_len is variable */
131 /* Word 6 - ctxt_tag, xri_tag is variable */
133 /* Word 7 */
134 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
135 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
136 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
137 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
139 /* Word 8 - abort_tag is variable */
141 /* Word 9 - reqtag is variable */
143 /* Word 10 - dbde, wqes is variable */
144 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
145 bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
146 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
147 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
148 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
149 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
151 /* Word 11 - pbde is variable */
152 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
153 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
154 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
156 /* Word 12 - is zero */
158 /* Word 13, 14, 15 - PBDE is variable */
160 /* ICMND template */
161 wqe = &lpfc_icmnd_cmd_template;
162 memset(wqe, 0, sizeof(union lpfc_wqe128));
164 /* Word 0, 1, 2 - BDE is variable */
166 /* Word 3 - payload_offset_len is variable */
168 /* Word 4, 5 - is zero */
170 /* Word 6 - ctxt_tag, xri_tag is variable */
172 /* Word 7 */
173 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
174 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
175 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
176 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
178 /* Word 8 - abort_tag is variable */
180 /* Word 9 - reqtag is variable */
182 /* Word 10 - dbde, wqes is variable */
183 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
184 bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
185 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
186 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
187 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
188 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
190 /* Word 11 */
191 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
192 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
193 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
195 /* Word 12, 13, 14, 15 - is zero */
199 * lpfc_nvme_create_queue -
200 * @lpfc_pnvme: Pointer to the driver's nvme instance data
201 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
202 * @handle: An opaque driver handle used in follow-up calls.
204 * Driver registers this routine to preallocate and initialize any
205 * internal data structures to bind the @qidx to its internal IO queues.
206 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
208 * Return value :
209 * 0 - Success
210 * -EINVAL - Unsupported input value.
211 * -ENOMEM - Could not alloc necessary memory
213 static int
214 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
215 unsigned int qidx, u16 qsize,
216 void **handle)
218 struct lpfc_nvme_lport *lport;
219 struct lpfc_vport *vport;
220 struct lpfc_nvme_qhandle *qhandle;
221 char *str;
223 if (!pnvme_lport->private)
224 return -ENOMEM;
226 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
227 vport = lport->vport;
228 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
229 if (qhandle == NULL)
230 return -ENOMEM;
232 qhandle->cpu_id = smp_processor_id();
233 qhandle->qidx = qidx;
235 * NVME qidx == 0 is the admin queue, so both admin queue
236 * and first IO queue will use MSI-X vector and associated
237 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
239 if (qidx) {
240 str = "IO "; /* IO queue */
241 qhandle->index = ((qidx - 1) %
242 vport->phba->cfg_nvme_io_channel);
243 } else {
244 str = "ADM"; /* Admin queue */
245 qhandle->index = qidx;
248 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
249 "6073 Binding %s HdwQueue %d (cpu %d) to "
250 "io_channel %d qhandle %p\n", str,
251 qidx, qhandle->cpu_id, qhandle->index, qhandle);
252 *handle = (void *)qhandle;
253 return 0;
257 * lpfc_nvme_delete_queue -
258 * @lpfc_pnvme: Pointer to the driver's nvme instance data
259 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
260 * @handle: An opaque driver handle from lpfc_nvme_create_queue
262 * Driver registers this routine to free
263 * any internal data structures to bind the @qidx to its internal
264 * IO queues.
266 * Return value :
267 * 0 - Success
268 * TODO: What are the failure codes.
270 static void
271 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
272 unsigned int qidx,
273 void *handle)
275 struct lpfc_nvme_lport *lport;
276 struct lpfc_vport *vport;
278 if (!pnvme_lport->private)
279 return;
281 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
282 vport = lport->vport;
284 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
285 "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
286 lport, qidx, handle);
287 kfree(handle);
290 static void
291 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
293 struct lpfc_nvme_lport *lport = localport->private;
295 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
296 "6173 localport %p delete complete\n",
297 lport);
299 /* release any threads waiting for the unreg to complete */
300 if (lport->vport->localport)
301 complete(lport->lport_unreg_cmp);
304 /* lpfc_nvme_remoteport_delete
306 * @remoteport: Pointer to an nvme transport remoteport instance.
308 * This is a template downcall. NVME transport calls this function
309 * when it has completed the unregistration of a previously
310 * registered remoteport.
312 * Return value :
313 * None
315 void
316 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
318 struct lpfc_nvme_rport *rport = remoteport->private;
319 struct lpfc_vport *vport;
320 struct lpfc_nodelist *ndlp;
322 ndlp = rport->ndlp;
323 if (!ndlp)
324 goto rport_err;
326 vport = ndlp->vport;
327 if (!vport)
328 goto rport_err;
330 /* Remove this rport from the lport's list - memory is owned by the
331 * transport. Remove the ndlp reference for the NVME transport before
332 * calling state machine to remove the node.
334 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
335 "6146 remoteport delete of remoteport %p\n",
336 remoteport);
337 spin_lock_irq(&vport->phba->hbalock);
339 /* The register rebind might have occurred before the delete
340 * downcall. Guard against this race.
342 if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
343 ndlp->nrport = NULL;
344 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
346 spin_unlock_irq(&vport->phba->hbalock);
348 /* Remove original register reference. The host transport
349 * won't reference this rport/remoteport any further.
351 lpfc_nlp_put(ndlp);
353 rport_err:
354 return;
357 static void
358 lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
359 struct lpfc_wcqe_complete *wcqe)
361 struct lpfc_vport *vport = cmdwqe->vport;
362 struct lpfc_nvme_lport *lport;
363 uint32_t status;
364 struct nvmefc_ls_req *pnvme_lsreq;
365 struct lpfc_dmabuf *buf_ptr;
366 struct lpfc_nodelist *ndlp;
368 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
369 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
371 if (vport->localport) {
372 lport = (struct lpfc_nvme_lport *)vport->localport->private;
373 if (lport) {
374 atomic_inc(&lport->fc4NvmeLsCmpls);
375 if (status) {
376 if (bf_get(lpfc_wcqe_c_xb, wcqe))
377 atomic_inc(&lport->cmpl_ls_xb);
378 atomic_inc(&lport->cmpl_ls_err);
383 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
384 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
385 "6047 nvme cmpl Enter "
386 "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
387 "lsreg:%p bmp:%p ndlp:%p\n",
388 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
389 cmdwqe->sli4_xritag, status,
390 (wcqe->parameter & 0xffff),
391 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
393 lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
394 cmdwqe->sli4_xritag, status, wcqe->parameter);
396 if (cmdwqe->context3) {
397 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
398 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
399 kfree(buf_ptr);
400 cmdwqe->context3 = NULL;
402 if (pnvme_lsreq->done)
403 pnvme_lsreq->done(pnvme_lsreq, status);
404 else
405 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
406 "6046 nvme cmpl without done call back? "
407 "Data %p DID %x Xri: %x status %x\n",
408 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
409 cmdwqe->sli4_xritag, status);
410 if (ndlp) {
411 lpfc_nlp_put(ndlp);
412 cmdwqe->context1 = NULL;
414 lpfc_sli_release_iocbq(phba, cmdwqe);
417 static int
418 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
419 struct lpfc_dmabuf *inp,
420 struct nvmefc_ls_req *pnvme_lsreq,
421 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
422 struct lpfc_wcqe_complete *),
423 struct lpfc_nodelist *ndlp, uint32_t num_entry,
424 uint32_t tmo, uint8_t retry)
426 struct lpfc_hba *phba = vport->phba;
427 union lpfc_wqe128 *wqe;
428 struct lpfc_iocbq *genwqe;
429 struct ulp_bde64 *bpl;
430 struct ulp_bde64 bde;
431 int i, rc, xmit_len, first_len;
433 /* Allocate buffer for command WQE */
434 genwqe = lpfc_sli_get_iocbq(phba);
435 if (genwqe == NULL)
436 return 1;
438 wqe = &genwqe->wqe;
439 memset(wqe, 0, sizeof(union lpfc_wqe));
441 genwqe->context3 = (uint8_t *)bmp;
442 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
444 /* Save for completion so we can release these resources */
445 genwqe->context1 = lpfc_nlp_get(ndlp);
446 genwqe->context2 = (uint8_t *)pnvme_lsreq;
447 /* Fill in payload, bp points to frame payload */
449 if (!tmo)
450 /* FC spec states we need 3 * ratov for CT requests */
451 tmo = (3 * phba->fc_ratov);
453 /* For this command calculate the xmit length of the request bde. */
454 xmit_len = 0;
455 first_len = 0;
456 bpl = (struct ulp_bde64 *)bmp->virt;
457 for (i = 0; i < num_entry; i++) {
458 bde.tus.w = bpl[i].tus.w;
459 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
460 break;
461 xmit_len += bde.tus.f.bdeSize;
462 if (i == 0)
463 first_len = xmit_len;
466 genwqe->rsvd2 = num_entry;
467 genwqe->hba_wqidx = 0;
469 /* Words 0 - 2 */
470 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
471 wqe->generic.bde.tus.f.bdeSize = first_len;
472 wqe->generic.bde.addrLow = bpl[0].addrLow;
473 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
475 /* Word 3 */
476 wqe->gen_req.request_payload_len = first_len;
478 /* Word 4 */
480 /* Word 5 */
481 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
482 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
483 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
484 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
485 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
487 /* Word 6 */
488 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
489 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
490 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
492 /* Word 7 */
493 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
494 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
495 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
496 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
498 /* Word 8 */
499 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
501 /* Word 9 */
502 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
504 /* Word 10 */
505 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
506 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
507 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
508 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
509 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
511 /* Word 11 */
512 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
513 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
516 /* Issue GEN REQ WQE for NPORT <did> */
517 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
518 "6050 Issue GEN REQ WQE to NPORT x%x "
519 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
520 ndlp->nlp_DID, genwqe->iotag,
521 vport->port_state,
522 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
523 genwqe->wqe_cmpl = cmpl;
524 genwqe->iocb_cmpl = NULL;
525 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
526 genwqe->vport = vport;
527 genwqe->retry = retry;
529 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
530 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
532 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
533 if (rc) {
534 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
535 "6045 Issue GEN REQ WQE to NPORT x%x "
536 "Data: x%x x%x\n",
537 ndlp->nlp_DID, genwqe->iotag,
538 vport->port_state);
539 lpfc_sli_release_iocbq(phba, genwqe);
540 return 1;
542 return 0;
546 * lpfc_nvme_ls_req - Issue an Link Service request
547 * @lpfc_pnvme: Pointer to the driver's nvme instance data
548 * @lpfc_nvme_lport: Pointer to the driver's local port data
549 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
551 * Driver registers this routine to handle any link service request
552 * from the nvme_fc transport to a remote nvme-aware port.
554 * Return value :
555 * 0 - Success
556 * TODO: What are the failure codes.
558 static int
559 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
560 struct nvme_fc_remote_port *pnvme_rport,
561 struct nvmefc_ls_req *pnvme_lsreq)
563 int ret = 0;
564 struct lpfc_nvme_lport *lport;
565 struct lpfc_nvme_rport *rport;
566 struct lpfc_vport *vport;
567 struct lpfc_nodelist *ndlp;
568 struct ulp_bde64 *bpl;
569 struct lpfc_dmabuf *bmp;
570 uint16_t ntype, nstate;
572 /* there are two dma buf in the request, actually there is one and
573 * the second one is just the start address + cmd size.
574 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
575 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
576 * because the nvem layer owns the data bufs.
577 * We do not have to break these packets open, we don't care what is in
578 * them. And we do not have to look at the resonse data, we only care
579 * that we got a response. All of the caring is going to happen in the
580 * nvme-fc layer.
583 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
584 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
585 if (unlikely(!lport) || unlikely(!rport))
586 return -EINVAL;
588 vport = lport->vport;
590 if (vport->load_flag & FC_UNLOADING)
591 return -ENODEV;
593 /* Need the ndlp. It is stored in the driver's rport. */
594 ndlp = rport->ndlp;
595 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
596 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
597 "6051 Remoteport %p, rport has invalid ndlp. "
598 "Failing LS Req\n", pnvme_rport);
599 return -ENODEV;
602 /* The remote node has to be a mapped nvme target or an
603 * unmapped nvme initiator or it's an error.
605 ntype = ndlp->nlp_type;
606 nstate = ndlp->nlp_state;
607 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
608 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
609 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
610 "6088 DID x%06x not ready for "
611 "IO. State x%x, Type x%x\n",
612 pnvme_rport->port_id,
613 ndlp->nlp_state, ndlp->nlp_type);
614 return -ENODEV;
616 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
617 if (!bmp) {
619 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
620 "6044 Could not find node for DID %x\n",
621 pnvme_rport->port_id);
622 return 2;
624 INIT_LIST_HEAD(&bmp->list);
625 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
626 if (!bmp->virt) {
627 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
628 "6042 Could not find node for DID %x\n",
629 pnvme_rport->port_id);
630 kfree(bmp);
631 return 3;
633 bpl = (struct ulp_bde64 *)bmp->virt;
634 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
635 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
636 bpl->tus.f.bdeFlags = 0;
637 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
638 bpl->tus.w = le32_to_cpu(bpl->tus.w);
639 bpl++;
641 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
642 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
643 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
644 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
645 bpl->tus.w = le32_to_cpu(bpl->tus.w);
647 /* Expand print to include key fields. */
648 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
649 "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
650 "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
651 ndlp->nlp_DID,
652 pnvme_lport, pnvme_rport,
653 pnvme_lsreq, pnvme_lsreq->rqstlen,
654 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
655 &pnvme_lsreq->rspdma);
657 atomic_inc(&lport->fc4NvmeLsRequests);
659 /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
660 * This code allows it all to work.
662 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
663 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
664 ndlp, 2, 30, 0);
665 if (ret != WQE_SUCCESS) {
666 atomic_inc(&lport->xmt_ls_err);
667 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
668 "6052 EXIT. issue ls wqe failed lport %p, "
669 "rport %p lsreq%p Status %x DID %x\n",
670 pnvme_lport, pnvme_rport, pnvme_lsreq,
671 ret, ndlp->nlp_DID);
672 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
673 kfree(bmp);
674 return ret;
677 /* Stub in routine and return 0 for now. */
678 return ret;
682 * lpfc_nvme_ls_abort - Issue an Link Service request
683 * @lpfc_pnvme: Pointer to the driver's nvme instance data
684 * @lpfc_nvme_lport: Pointer to the driver's local port data
685 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
687 * Driver registers this routine to handle any link service request
688 * from the nvme_fc transport to a remote nvme-aware port.
690 * Return value :
691 * 0 - Success
692 * TODO: What are the failure codes.
694 static void
695 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
696 struct nvme_fc_remote_port *pnvme_rport,
697 struct nvmefc_ls_req *pnvme_lsreq)
699 struct lpfc_nvme_lport *lport;
700 struct lpfc_vport *vport;
701 struct lpfc_hba *phba;
702 struct lpfc_nodelist *ndlp;
703 LIST_HEAD(abort_list);
704 struct lpfc_sli_ring *pring;
705 struct lpfc_iocbq *wqe, *next_wqe;
707 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
708 if (unlikely(!lport))
709 return;
710 vport = lport->vport;
711 phba = vport->phba;
713 if (vport->load_flag & FC_UNLOADING)
714 return;
716 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
717 if (!ndlp) {
718 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
719 "6049 Could not find node for DID %x\n",
720 pnvme_rport->port_id);
721 return;
724 /* Expand print to include key fields. */
725 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
726 "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
727 "rsplen:%d %pad %pad\n",
728 pnvme_lport, pnvme_rport,
729 pnvme_lsreq, pnvme_lsreq->rqstlen,
730 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
731 &pnvme_lsreq->rspdma);
734 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
735 * that need an ABTS. The IOs need to stay on the txcmplq so that
736 * the abort operation completes them successfully.
738 pring = phba->sli4_hba.nvmels_wq->pring;
739 spin_lock_irq(&phba->hbalock);
740 spin_lock(&pring->ring_lock);
741 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
742 /* Add to abort_list on on NDLP match. */
743 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
744 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
745 list_add_tail(&wqe->dlist, &abort_list);
748 spin_unlock(&pring->ring_lock);
749 spin_unlock_irq(&phba->hbalock);
751 /* Abort the targeted IOs and remove them from the abort list. */
752 list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
753 atomic_inc(&lport->xmt_ls_abort);
754 spin_lock_irq(&phba->hbalock);
755 list_del_init(&wqe->dlist);
756 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
757 spin_unlock_irq(&phba->hbalock);
761 /* Fix up the existing sgls for NVME IO. */
762 static inline void
763 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
764 struct lpfc_nvme_buf *lpfc_ncmd,
765 struct nvmefc_fcp_req *nCmd)
767 struct lpfc_hba *phba = vport->phba;
768 struct sli4_sge *sgl;
769 union lpfc_wqe128 *wqe;
770 uint32_t *wptr, *dptr;
773 * Get a local pointer to the built-in wqe and correct
774 * the cmd size to match NVME's 96 bytes and fix
775 * the dma address.
778 wqe = &lpfc_ncmd->cur_iocbq.wqe;
781 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
782 * match NVME. NVME sends 96 bytes. Also, use the
783 * nvme commands command and response dma addresses
784 * rather than the virtual memory to ease the restore
785 * operation.
787 sgl = lpfc_ncmd->nvme_sgl;
788 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
789 if (phba->cfg_nvme_embed_cmd) {
790 sgl->addr_hi = 0;
791 sgl->addr_lo = 0;
793 /* Word 0-2 - NVME CMND IU (embedded payload) */
794 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
795 wqe->generic.bde.tus.f.bdeSize = 56;
796 wqe->generic.bde.addrHigh = 0;
797 wqe->generic.bde.addrLow = 64; /* Word 16 */
799 /* Word 10 - dbde is 0, wqes is 1 in template */
802 * Embed the payload in the last half of the WQE
803 * WQE words 16-30 get the NVME CMD IU payload
805 * WQE words 16-19 get payload Words 1-4
806 * WQE words 20-21 get payload Words 6-7
807 * WQE words 22-29 get payload Words 16-23
809 wptr = &wqe->words[16]; /* WQE ptr */
810 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
811 dptr++; /* Skip Word 0 in payload */
813 *wptr++ = *dptr++; /* Word 1 */
814 *wptr++ = *dptr++; /* Word 2 */
815 *wptr++ = *dptr++; /* Word 3 */
816 *wptr++ = *dptr++; /* Word 4 */
817 dptr++; /* Skip Word 5 in payload */
818 *wptr++ = *dptr++; /* Word 6 */
819 *wptr++ = *dptr++; /* Word 7 */
820 dptr += 8; /* Skip Words 8-15 in payload */
821 *wptr++ = *dptr++; /* Word 16 */
822 *wptr++ = *dptr++; /* Word 17 */
823 *wptr++ = *dptr++; /* Word 18 */
824 *wptr++ = *dptr++; /* Word 19 */
825 *wptr++ = *dptr++; /* Word 20 */
826 *wptr++ = *dptr++; /* Word 21 */
827 *wptr++ = *dptr++; /* Word 22 */
828 *wptr = *dptr; /* Word 23 */
829 } else {
830 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
831 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
833 /* Word 0-2 - NVME CMND IU Inline BDE */
834 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
835 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
836 wqe->generic.bde.addrHigh = sgl->addr_hi;
837 wqe->generic.bde.addrLow = sgl->addr_lo;
839 /* Word 10 */
840 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
841 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
844 sgl++;
846 /* Setup the physical region for the FCP RSP */
847 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
848 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
849 sgl->word2 = le32_to_cpu(sgl->word2);
850 if (nCmd->sg_cnt)
851 bf_set(lpfc_sli4_sge_last, sgl, 0);
852 else
853 bf_set(lpfc_sli4_sge_last, sgl, 1);
854 sgl->word2 = cpu_to_le32(sgl->word2);
855 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
858 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
859 static void
860 lpfc_nvme_ktime(struct lpfc_hba *phba,
861 struct lpfc_nvme_buf *lpfc_ncmd)
863 uint64_t seg1, seg2, seg3, seg4;
864 uint64_t segsum;
866 if (!lpfc_ncmd->ts_last_cmd ||
867 !lpfc_ncmd->ts_cmd_start ||
868 !lpfc_ncmd->ts_cmd_wqput ||
869 !lpfc_ncmd->ts_isr_cmpl ||
870 !lpfc_ncmd->ts_data_nvme)
871 return;
873 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
874 return;
875 if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
876 return;
877 if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
878 return;
879 if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
880 return;
881 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
882 return;
884 * Segment 1 - Time from Last FCP command cmpl is handed
885 * off to NVME Layer to start of next command.
886 * Segment 2 - Time from Driver receives a IO cmd start
887 * from NVME Layer to WQ put is done on IO cmd.
888 * Segment 3 - Time from Driver WQ put is done on IO cmd
889 * to MSI-X ISR for IO cmpl.
890 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
891 * cmpl is handled off to the NVME Layer.
893 seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
894 if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
895 seg1 = 0;
897 /* Calculate times relative to start of IO */
898 seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
899 segsum = seg2;
900 seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
901 if (segsum > seg3)
902 return;
903 seg3 -= segsum;
904 segsum += seg3;
906 seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
907 if (segsum > seg4)
908 return;
909 seg4 -= segsum;
911 phba->ktime_data_samples++;
912 phba->ktime_seg1_total += seg1;
913 if (seg1 < phba->ktime_seg1_min)
914 phba->ktime_seg1_min = seg1;
915 else if (seg1 > phba->ktime_seg1_max)
916 phba->ktime_seg1_max = seg1;
917 phba->ktime_seg2_total += seg2;
918 if (seg2 < phba->ktime_seg2_min)
919 phba->ktime_seg2_min = seg2;
920 else if (seg2 > phba->ktime_seg2_max)
921 phba->ktime_seg2_max = seg2;
922 phba->ktime_seg3_total += seg3;
923 if (seg3 < phba->ktime_seg3_min)
924 phba->ktime_seg3_min = seg3;
925 else if (seg3 > phba->ktime_seg3_max)
926 phba->ktime_seg3_max = seg3;
927 phba->ktime_seg4_total += seg4;
928 if (seg4 < phba->ktime_seg4_min)
929 phba->ktime_seg4_min = seg4;
930 else if (seg4 > phba->ktime_seg4_max)
931 phba->ktime_seg4_max = seg4;
933 lpfc_ncmd->ts_last_cmd = 0;
934 lpfc_ncmd->ts_cmd_start = 0;
935 lpfc_ncmd->ts_cmd_wqput = 0;
936 lpfc_ncmd->ts_isr_cmpl = 0;
937 lpfc_ncmd->ts_data_nvme = 0;
939 #endif
942 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
943 * @lpfc_pnvme: Pointer to the driver's nvme instance data
944 * @lpfc_nvme_lport: Pointer to the driver's local port data
945 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
947 * Driver registers this routine as it io request handler. This
948 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
949 * data structure to the rport indicated in @lpfc_nvme_rport.
951 * Return value :
952 * 0 - Success
953 * TODO: What are the failure codes.
955 static void
956 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
957 struct lpfc_wcqe_complete *wcqe)
959 struct lpfc_nvme_buf *lpfc_ncmd =
960 (struct lpfc_nvme_buf *)pwqeIn->context1;
961 struct lpfc_vport *vport = pwqeIn->vport;
962 struct nvmefc_fcp_req *nCmd;
963 struct nvme_fc_ersp_iu *ep;
964 struct nvme_fc_cmd_iu *cp;
965 struct lpfc_nvme_rport *rport;
966 struct lpfc_nodelist *ndlp;
967 struct lpfc_nvme_fcpreq_priv *freqpriv;
968 struct lpfc_nvme_lport *lport;
969 struct lpfc_nvme_ctrl_stat *cstat;
970 unsigned long flags;
971 uint32_t code, status, idx;
972 uint16_t cid, sqhd, data;
973 uint32_t *ptr;
975 /* Sanity check on return of outstanding command */
976 if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
977 if (!lpfc_ncmd) {
978 lpfc_printf_vlog(vport, KERN_ERR,
979 LOG_NODE | LOG_NVME_IOERR,
980 "6071 Null lpfc_ncmd pointer. No "
981 "release, skip completion\n");
982 return;
985 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
986 "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
987 "nvmeCmd %p nrport %p\n",
988 lpfc_ncmd, lpfc_ncmd->nvmeCmd,
989 lpfc_ncmd->nrport);
991 /* Release the lpfc_ncmd regardless of the missing elements. */
992 lpfc_release_nvme_buf(phba, lpfc_ncmd);
993 return;
995 nCmd = lpfc_ncmd->nvmeCmd;
996 rport = lpfc_ncmd->nrport;
997 status = bf_get(lpfc_wcqe_c_status, wcqe);
999 if (vport->localport) {
1000 lport = (struct lpfc_nvme_lport *)vport->localport->private;
1001 if (lport) {
1002 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1003 cstat = &lport->cstat[idx];
1004 atomic_inc(&cstat->fc4NvmeIoCmpls);
1005 if (status) {
1006 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1007 atomic_inc(&lport->cmpl_fcp_xb);
1008 atomic_inc(&lport->cmpl_fcp_err);
1013 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
1014 lpfc_ncmd->cur_iocbq.sli4_xritag,
1015 status, wcqe->parameter);
1017 * Catch race where our node has transitioned, but the
1018 * transport is still transitioning.
1020 ndlp = rport->ndlp;
1021 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1022 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
1023 "6061 rport %p, DID x%06x node not ready.\n",
1024 rport, rport->remoteport->port_id);
1026 ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
1027 if (!ndlp) {
1028 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1029 "6062 Ignoring NVME cmpl. No ndlp\n");
1030 goto out_err;
1034 code = bf_get(lpfc_wcqe_c_code, wcqe);
1035 if (code == CQE_CODE_NVME_ERSP) {
1036 /* For this type of CQE, we need to rebuild the rsp */
1037 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1040 * Get Command Id from cmd to plug into response. This
1041 * code is not needed in the next NVME Transport drop.
1043 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1044 cid = cp->sqe.common.command_id;
1047 * RSN is in CQE word 2
1048 * SQHD is in CQE Word 3 bits 15:0
1049 * Cmd Specific info is in CQE Word 1
1050 * and in CQE Word 0 bits 15:0
1052 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1054 /* Now lets build the NVME ERSP IU */
1055 ep->iu_len = cpu_to_be16(8);
1056 ep->rsn = wcqe->parameter;
1057 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1058 ep->rsvd12 = 0;
1059 ptr = (uint32_t *)&ep->cqe.result.u64;
1060 *ptr++ = wcqe->total_data_placed;
1061 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1062 *ptr = (uint32_t)data;
1063 ep->cqe.sq_head = sqhd;
1064 ep->cqe.sq_id = nCmd->sqid;
1065 ep->cqe.command_id = cid;
1066 ep->cqe.status = 0;
1068 lpfc_ncmd->status = IOSTAT_SUCCESS;
1069 lpfc_ncmd->result = 0;
1070 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1071 nCmd->transferred_length = nCmd->payload_length;
1072 } else {
1073 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1074 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1076 /* For NVME, the only failure path that results in an
1077 * IO error is when the adapter rejects it. All other
1078 * conditions are a success case and resolved by the
1079 * transport.
1080 * IOSTAT_FCP_RSP_ERROR means:
1081 * 1. Length of data received doesn't match total
1082 * transfer length in WQE
1083 * 2. If the RSP payload does NOT match these cases:
1084 * a. RSP length 12/24 bytes and all zeros
1085 * b. NVME ERSP
1087 switch (lpfc_ncmd->status) {
1088 case IOSTAT_SUCCESS:
1089 nCmd->transferred_length = wcqe->total_data_placed;
1090 nCmd->rcv_rsplen = 0;
1091 nCmd->status = 0;
1092 break;
1093 case IOSTAT_FCP_RSP_ERROR:
1094 nCmd->transferred_length = wcqe->total_data_placed;
1095 nCmd->rcv_rsplen = wcqe->parameter;
1096 nCmd->status = 0;
1097 /* Sanity check */
1098 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
1099 break;
1100 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1101 "6081 NVME Completion Protocol Error: "
1102 "xri %x status x%x result x%x "
1103 "placed x%x\n",
1104 lpfc_ncmd->cur_iocbq.sli4_xritag,
1105 lpfc_ncmd->status, lpfc_ncmd->result,
1106 wcqe->total_data_placed);
1107 break;
1108 case IOSTAT_LOCAL_REJECT:
1109 /* Let fall through to set command final state. */
1110 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1111 lpfc_printf_vlog(vport, KERN_INFO,
1112 LOG_NVME_IOERR,
1113 "6032 Delay Aborted cmd %p "
1114 "nvme cmd %p, xri x%x, "
1115 "xb %d\n",
1116 lpfc_ncmd, nCmd,
1117 lpfc_ncmd->cur_iocbq.sli4_xritag,
1118 bf_get(lpfc_wcqe_c_xb, wcqe));
1119 default:
1120 out_err:
1121 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1122 "6072 NVME Completion Error: xri %x "
1123 "status x%x result x%x placed x%x\n",
1124 lpfc_ncmd->cur_iocbq.sli4_xritag,
1125 lpfc_ncmd->status, lpfc_ncmd->result,
1126 wcqe->total_data_placed);
1127 nCmd->transferred_length = 0;
1128 nCmd->rcv_rsplen = 0;
1129 nCmd->status = NVME_SC_INTERNAL;
1133 /* pick up SLI4 exhange busy condition */
1134 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1135 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1136 else
1137 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1139 /* Update stats and complete the IO. There is
1140 * no need for dma unprep because the nvme_transport
1141 * owns the dma address.
1143 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1144 if (lpfc_ncmd->ts_cmd_start) {
1145 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1146 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
1147 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
1148 lpfc_nvme_ktime(phba, lpfc_ncmd);
1150 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1151 if (lpfc_ncmd->cpu != smp_processor_id())
1152 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1153 "6701 CPU Check cmpl: "
1154 "cpu %d expect %d\n",
1155 smp_processor_id(), lpfc_ncmd->cpu);
1156 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
1157 phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
1159 #endif
1161 /* NVME targets need completion held off until the abort exchange
1162 * completes unless the NVME Rport is getting unregistered.
1165 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1166 freqpriv = nCmd->private;
1167 freqpriv->nvme_buf = NULL;
1168 nCmd->done(nCmd);
1169 lpfc_ncmd->nvmeCmd = NULL;
1172 spin_lock_irqsave(&phba->hbalock, flags);
1173 lpfc_ncmd->nrport = NULL;
1174 spin_unlock_irqrestore(&phba->hbalock, flags);
1176 /* Call release with XB=1 to queue the IO into the abort list. */
1177 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1182 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1183 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1184 * @lpfc_nvme_lport: Pointer to the driver's local port data
1185 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1186 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1187 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1189 * Driver registers this routine as it io request handler. This
1190 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1191 * data structure to the rport indicated in @lpfc_nvme_rport.
1193 * Return value :
1194 * 0 - Success
1195 * TODO: What are the failure codes.
1197 static int
1198 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1199 struct lpfc_nvme_buf *lpfc_ncmd,
1200 struct lpfc_nodelist *pnode,
1201 struct lpfc_nvme_ctrl_stat *cstat)
1203 struct lpfc_hba *phba = vport->phba;
1204 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1205 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1206 union lpfc_wqe128 *wqe = &pwqeq->wqe;
1207 uint32_t req_len;
1209 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1210 return -EINVAL;
1213 * There are three possibilities here - use scatter-gather segment, use
1214 * the single mapping, or neither.
1216 if (nCmd->sg_cnt) {
1217 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1218 /* From the iwrite template, initialize words 7 - 11 */
1219 memcpy(&wqe->words[7],
1220 &lpfc_iwrite_cmd_template.words[7],
1221 sizeof(uint32_t) * 5);
1223 /* Word 4 */
1224 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1226 /* Word 5 */
1227 if ((phba->cfg_nvme_enable_fb) &&
1228 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1229 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1230 if (req_len < pnode->nvme_fb_size)
1231 wqe->fcp_iwrite.initial_xfer_len =
1232 req_len;
1233 else
1234 wqe->fcp_iwrite.initial_xfer_len =
1235 pnode->nvme_fb_size;
1236 } else {
1237 wqe->fcp_iwrite.initial_xfer_len = 0;
1239 atomic_inc(&cstat->fc4NvmeOutputRequests);
1240 } else {
1241 /* From the iread template, initialize words 7 - 11 */
1242 memcpy(&wqe->words[7],
1243 &lpfc_iread_cmd_template.words[7],
1244 sizeof(uint32_t) * 5);
1246 /* Word 4 */
1247 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1249 /* Word 5 */
1250 wqe->fcp_iread.rsrvd5 = 0;
1252 atomic_inc(&cstat->fc4NvmeInputRequests);
1254 } else {
1255 /* From the icmnd template, initialize words 4 - 11 */
1256 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1257 sizeof(uint32_t) * 8);
1258 atomic_inc(&cstat->fc4NvmeControlRequests);
1261 * Finish initializing those WQE fields that are independent
1262 * of the nvme_cmnd request_buffer
1265 /* Word 3 */
1266 bf_set(payload_offset_len, &wqe->fcp_icmd,
1267 (nCmd->rsplen + nCmd->cmdlen));
1269 /* Word 6 */
1270 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1271 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1272 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1274 /* Word 8 */
1275 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1277 /* Word 9 */
1278 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1280 /* Words 13 14 15 are for PBDE support */
1282 pwqeq->vport = vport;
1283 return 0;
1288 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1289 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1290 * @lpfc_nvme_lport: Pointer to the driver's local port data
1291 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1292 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1293 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1295 * Driver registers this routine as it io request handler. This
1296 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1297 * data structure to the rport indicated in @lpfc_nvme_rport.
1299 * Return value :
1300 * 0 - Success
1301 * TODO: What are the failure codes.
1303 static int
1304 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1305 struct lpfc_nvme_buf *lpfc_ncmd)
1307 struct lpfc_hba *phba = vport->phba;
1308 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1309 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1310 struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
1311 struct scatterlist *data_sg;
1312 struct sli4_sge *first_data_sgl;
1313 struct ulp_bde64 *bde;
1314 dma_addr_t physaddr;
1315 uint32_t num_bde = 0;
1316 uint32_t dma_len;
1317 uint32_t dma_offset = 0;
1318 int nseg, i;
1320 /* Fix up the command and response DMA stuff. */
1321 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1324 * There are three possibilities here - use scatter-gather segment, use
1325 * the single mapping, or neither.
1327 if (nCmd->sg_cnt) {
1329 * Jump over the cmd and rsp SGEs. The fix routine
1330 * has already adjusted for this.
1332 sgl += 2;
1334 first_data_sgl = sgl;
1335 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1336 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1337 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1338 "6058 Too many sg segments from "
1339 "NVME Transport. Max %d, "
1340 "nvmeIO sg_cnt %d\n",
1341 phba->cfg_nvme_seg_cnt + 1,
1342 lpfc_ncmd->seg_cnt);
1343 lpfc_ncmd->seg_cnt = 0;
1344 return 1;
1348 * The driver established a maximum scatter-gather segment count
1349 * during probe that limits the number of sg elements in any
1350 * single nvme command. Just run through the seg_cnt and format
1351 * the sge's.
1353 nseg = nCmd->sg_cnt;
1354 data_sg = nCmd->first_sgl;
1355 for (i = 0; i < nseg; i++) {
1356 if (data_sg == NULL) {
1357 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1358 "6059 dptr err %d, nseg %d\n",
1359 i, nseg);
1360 lpfc_ncmd->seg_cnt = 0;
1361 return 1;
1363 physaddr = data_sg->dma_address;
1364 dma_len = data_sg->length;
1365 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1366 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1367 sgl->word2 = le32_to_cpu(sgl->word2);
1368 if ((num_bde + 1) == nseg)
1369 bf_set(lpfc_sli4_sge_last, sgl, 1);
1370 else
1371 bf_set(lpfc_sli4_sge_last, sgl, 0);
1372 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1373 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1374 sgl->word2 = cpu_to_le32(sgl->word2);
1375 sgl->sge_len = cpu_to_le32(dma_len);
1377 dma_offset += dma_len;
1378 data_sg = sg_next(data_sg);
1379 sgl++;
1381 if (phba->cfg_enable_pbde) {
1382 /* Use PBDE support for first SGL only, offset == 0 */
1383 /* Words 13-15 */
1384 bde = (struct ulp_bde64 *)
1385 &wqe->words[13];
1386 bde->addrLow = first_data_sgl->addr_lo;
1387 bde->addrHigh = first_data_sgl->addr_hi;
1388 bde->tus.f.bdeSize =
1389 le32_to_cpu(first_data_sgl->sge_len);
1390 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1391 bde->tus.w = cpu_to_le32(bde->tus.w);
1392 /* wqe_pbde is 1 in template */
1393 } else {
1394 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1395 bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1398 } else {
1399 /* For this clause to be valid, the payload_length
1400 * and sg_cnt must zero.
1402 if (nCmd->payload_length != 0) {
1403 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1404 "6063 NVME DMA Prep Err: sg_cnt %d "
1405 "payload_length x%x\n",
1406 nCmd->sg_cnt, nCmd->payload_length);
1407 return 1;
1410 return 0;
1414 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1415 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1416 * @lpfc_nvme_lport: Pointer to the driver's local port data
1417 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1418 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1419 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1421 * Driver registers this routine as it io request handler. This
1422 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1423 * data structure to the rport
1424 indicated in @lpfc_nvme_rport.
1426 * Return value :
1427 * 0 - Success
1428 * TODO: What are the failure codes.
1430 static int
1431 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1432 struct nvme_fc_remote_port *pnvme_rport,
1433 void *hw_queue_handle,
1434 struct nvmefc_fcp_req *pnvme_fcreq)
1436 int ret = 0;
1437 int expedite = 0;
1438 int idx;
1439 struct lpfc_nvme_lport *lport;
1440 struct lpfc_nvme_ctrl_stat *cstat;
1441 struct lpfc_vport *vport;
1442 struct lpfc_hba *phba;
1443 struct lpfc_nodelist *ndlp;
1444 struct lpfc_nvme_buf *lpfc_ncmd;
1445 struct lpfc_nvme_rport *rport;
1446 struct lpfc_nvme_qhandle *lpfc_queue_info;
1447 struct lpfc_nvme_fcpreq_priv *freqpriv;
1448 struct nvme_common_command *sqe;
1449 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1450 uint64_t start = 0;
1451 #endif
1453 /* Validate pointers. LLDD fault handling with transport does
1454 * have timing races.
1456 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1457 if (unlikely(!lport)) {
1458 ret = -EINVAL;
1459 goto out_fail;
1462 vport = lport->vport;
1464 if (unlikely(!hw_queue_handle)) {
1465 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1466 "6117 Fail IO, NULL hw_queue_handle\n");
1467 atomic_inc(&lport->xmt_fcp_err);
1468 ret = -EBUSY;
1469 goto out_fail;
1472 phba = vport->phba;
1474 if (vport->load_flag & FC_UNLOADING) {
1475 ret = -ENODEV;
1476 goto out_fail;
1479 if (vport->load_flag & FC_UNLOADING) {
1480 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1481 "6124 Fail IO, Driver unload\n");
1482 atomic_inc(&lport->xmt_fcp_err);
1483 ret = -ENODEV;
1484 goto out_fail;
1487 freqpriv = pnvme_fcreq->private;
1488 if (unlikely(!freqpriv)) {
1489 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1490 "6158 Fail IO, NULL request data\n");
1491 atomic_inc(&lport->xmt_fcp_err);
1492 ret = -EINVAL;
1493 goto out_fail;
1496 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1497 if (phba->ktime_on)
1498 start = ktime_get_ns();
1499 #endif
1500 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1501 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1504 * Catch race where our node has transitioned, but the
1505 * transport is still transitioning.
1507 ndlp = rport->ndlp;
1508 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1509 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1510 "6053 Fail IO, ndlp not ready: rport %p "
1511 "ndlp %p, DID x%06x\n",
1512 rport, ndlp, pnvme_rport->port_id);
1513 atomic_inc(&lport->xmt_fcp_err);
1514 ret = -EBUSY;
1515 goto out_fail;
1518 /* The remote node has to be a mapped target or it's an error. */
1519 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1520 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1521 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1522 "6036 Fail IO, DID x%06x not ready for "
1523 "IO. State x%x, Type x%x Flg x%x\n",
1524 pnvme_rport->port_id,
1525 ndlp->nlp_state, ndlp->nlp_type,
1526 ndlp->upcall_flags);
1527 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1528 ret = -EBUSY;
1529 goto out_fail;
1533 /* Currently only NVME Keep alive commands should be expedited
1534 * if the driver runs out of a resource. These should only be
1535 * issued on the admin queue, qidx 0
1537 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1538 sqe = &((struct nvme_fc_cmd_iu *)
1539 pnvme_fcreq->cmdaddr)->sqe.common;
1540 if (sqe->opcode == nvme_admin_keep_alive)
1541 expedite = 1;
1544 /* The node is shared with FCP IO, make sure the IO pending count does
1545 * not exceed the programmed depth.
1547 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1548 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1549 !expedite) {
1550 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1551 "6174 Fail IO, ndlp qdepth exceeded: "
1552 "idx %d DID %x pend %d qdepth %d\n",
1553 lpfc_queue_info->index, ndlp->nlp_DID,
1554 atomic_read(&ndlp->cmd_pending),
1555 ndlp->cmd_qdepth);
1556 atomic_inc(&lport->xmt_fcp_qdepth);
1557 ret = -EBUSY;
1558 goto out_fail;
1562 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
1563 if (lpfc_ncmd == NULL) {
1564 atomic_inc(&lport->xmt_fcp_noxri);
1565 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1566 "6065 Fail IO, driver buffer pool is empty: "
1567 "idx %d DID %x\n",
1568 lpfc_queue_info->index, ndlp->nlp_DID);
1569 ret = -EBUSY;
1570 goto out_fail;
1572 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1573 if (start) {
1574 lpfc_ncmd->ts_cmd_start = start;
1575 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1576 } else {
1577 lpfc_ncmd->ts_cmd_start = 0;
1579 #endif
1582 * Store the data needed by the driver to issue, abort, and complete
1583 * an IO.
1584 * Do not let the IO hang out forever. There is no midlayer issuing
1585 * an abort so inform the FW of the maximum IO pending time.
1587 freqpriv->nvme_buf = lpfc_ncmd;
1588 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1589 lpfc_ncmd->nrport = rport;
1590 lpfc_ncmd->ndlp = ndlp;
1591 lpfc_ncmd->start_time = jiffies;
1594 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1595 * This identfier was create in our hardware queue create callback
1596 * routine. The driver now is dependent on the IO queue steering from
1597 * the transport. We are trusting the upper NVME layers know which
1598 * index to use and that they have affinitized a CPU to this hardware
1599 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1601 idx = lpfc_queue_info->index;
1602 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1603 cstat = &lport->cstat[idx];
1605 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1606 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1607 if (ret) {
1608 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1609 "6175 Fail IO, Prep DMA: "
1610 "idx %d DID %x\n",
1611 lpfc_queue_info->index, ndlp->nlp_DID);
1612 atomic_inc(&lport->xmt_fcp_err);
1613 ret = -ENOMEM;
1614 goto out_free_nvme_buf;
1617 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1618 lpfc_ncmd->cur_iocbq.sli4_xritag,
1619 lpfc_queue_info->index, ndlp->nlp_DID);
1621 ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
1622 if (ret) {
1623 atomic_inc(&lport->xmt_fcp_wqerr);
1624 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1625 "6113 Fail IO, Could not issue WQE err %x "
1626 "sid: x%x did: x%x oxid: x%x\n",
1627 ret, vport->fc_myDID, ndlp->nlp_DID,
1628 lpfc_ncmd->cur_iocbq.sli4_xritag);
1629 goto out_free_nvme_buf;
1632 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1633 if (lpfc_ncmd->ts_cmd_start)
1634 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1636 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1637 lpfc_ncmd->cpu = smp_processor_id();
1638 if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
1639 /* Check for admin queue */
1640 if (lpfc_queue_info->qidx) {
1641 lpfc_printf_vlog(vport,
1642 KERN_ERR, LOG_NVME_IOERR,
1643 "6702 CPU Check cmd: "
1644 "cpu %d wq %d\n",
1645 lpfc_ncmd->cpu,
1646 lpfc_queue_info->index);
1648 lpfc_ncmd->cpu = lpfc_queue_info->index;
1650 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
1651 phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
1653 #endif
1654 return 0;
1656 out_free_nvme_buf:
1657 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1658 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1659 atomic_dec(&cstat->fc4NvmeOutputRequests);
1660 else
1661 atomic_dec(&cstat->fc4NvmeInputRequests);
1662 } else
1663 atomic_dec(&cstat->fc4NvmeControlRequests);
1664 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1665 out_fail:
1666 return ret;
1670 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1671 * @phba: Pointer to HBA context object
1672 * @cmdiocb: Pointer to command iocb object.
1673 * @rspiocb: Pointer to response iocb object.
1675 * This is the callback function for any NVME FCP IO that was aborted.
1677 * Return value:
1678 * None
1680 void
1681 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1682 struct lpfc_wcqe_complete *abts_cmpl)
1684 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1685 "6145 ABORT_XRI_CN completing on rpi x%x "
1686 "original iotag x%x, abort cmd iotag x%x "
1687 "req_tag x%x, status x%x, hwstatus x%x\n",
1688 cmdiocb->iocb.un.acxri.abortContextTag,
1689 cmdiocb->iocb.un.acxri.abortIoTag,
1690 cmdiocb->iotag,
1691 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1692 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1693 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1694 lpfc_sli_release_iocbq(phba, cmdiocb);
1698 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1699 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1700 * @lpfc_nvme_lport: Pointer to the driver's local port data
1701 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1702 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1703 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1705 * Driver registers this routine as its nvme request io abort handler. This
1706 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1707 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1708 * is executed asynchronously - one the target is validated as "MAPPED" and
1709 * ready for IO, the driver issues the abort request and returns.
1711 * Return value:
1712 * None
1714 static void
1715 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1716 struct nvme_fc_remote_port *pnvme_rport,
1717 void *hw_queue_handle,
1718 struct nvmefc_fcp_req *pnvme_fcreq)
1720 struct lpfc_nvme_lport *lport;
1721 struct lpfc_vport *vport;
1722 struct lpfc_hba *phba;
1723 struct lpfc_nvme_buf *lpfc_nbuf;
1724 struct lpfc_iocbq *abts_buf;
1725 struct lpfc_iocbq *nvmereq_wqe;
1726 struct lpfc_nvme_fcpreq_priv *freqpriv;
1727 union lpfc_wqe128 *abts_wqe;
1728 unsigned long flags;
1729 int ret_val;
1731 /* Validate pointers. LLDD fault handling with transport does
1732 * have timing races.
1734 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1735 if (unlikely(!lport))
1736 return;
1738 vport = lport->vport;
1740 if (unlikely(!hw_queue_handle)) {
1741 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1742 "6129 Fail Abort, HW Queue Handle NULL.\n");
1743 return;
1746 phba = vport->phba;
1747 freqpriv = pnvme_fcreq->private;
1749 if (unlikely(!freqpriv))
1750 return;
1751 if (vport->load_flag & FC_UNLOADING)
1752 return;
1754 /* Announce entry to new IO submit field. */
1755 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1756 "6002 Abort Request to rport DID x%06x "
1757 "for nvme_fc_req %p\n",
1758 pnvme_rport->port_id,
1759 pnvme_fcreq);
1761 /* If the hba is getting reset, this flag is set. It is
1762 * cleared when the reset is complete and rings reestablished.
1764 spin_lock_irqsave(&phba->hbalock, flags);
1765 /* driver queued commands are in process of being flushed */
1766 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1767 spin_unlock_irqrestore(&phba->hbalock, flags);
1768 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1769 "6139 Driver in reset cleanup - flushing "
1770 "NVME Req now. hba_flag x%x\n",
1771 phba->hba_flag);
1772 return;
1775 lpfc_nbuf = freqpriv->nvme_buf;
1776 if (!lpfc_nbuf) {
1777 spin_unlock_irqrestore(&phba->hbalock, flags);
1778 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1779 "6140 NVME IO req has no matching lpfc nvme "
1780 "io buffer. Skipping abort req.\n");
1781 return;
1782 } else if (!lpfc_nbuf->nvmeCmd) {
1783 spin_unlock_irqrestore(&phba->hbalock, flags);
1784 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1785 "6141 lpfc NVME IO req has no nvme_fcreq "
1786 "io buffer. Skipping abort req.\n");
1787 return;
1789 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1792 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1793 * state must match the nvme_fcreq passed by the nvme
1794 * transport. If they don't match, it is likely the driver
1795 * has already completed the NVME IO and the nvme transport
1796 * has not seen it yet.
1798 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1799 spin_unlock_irqrestore(&phba->hbalock, flags);
1800 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1801 "6143 NVME req mismatch: "
1802 "lpfc_nbuf %p nvmeCmd %p, "
1803 "pnvme_fcreq %p. Skipping Abort xri x%x\n",
1804 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1805 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1806 return;
1809 /* Don't abort IOs no longer on the pending queue. */
1810 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1811 spin_unlock_irqrestore(&phba->hbalock, flags);
1812 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1813 "6142 NVME IO req %p not queued - skipping "
1814 "abort req xri x%x\n",
1815 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1816 return;
1819 atomic_inc(&lport->xmt_fcp_abort);
1820 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1821 nvmereq_wqe->sli4_xritag,
1822 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1824 /* Outstanding abort is in progress */
1825 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1826 spin_unlock_irqrestore(&phba->hbalock, flags);
1827 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1828 "6144 Outstanding NVME I/O Abort Request "
1829 "still pending on nvme_fcreq %p, "
1830 "lpfc_ncmd %p xri x%x\n",
1831 pnvme_fcreq, lpfc_nbuf,
1832 nvmereq_wqe->sli4_xritag);
1833 return;
1836 abts_buf = __lpfc_sli_get_iocbq(phba);
1837 if (!abts_buf) {
1838 spin_unlock_irqrestore(&phba->hbalock, flags);
1839 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1840 "6136 No available abort wqes. Skipping "
1841 "Abts req for nvme_fcreq %p xri x%x\n",
1842 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1843 return;
1846 /* Ready - mark outstanding as aborted by driver. */
1847 nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1849 /* Complete prepping the abort wqe and issue to the FW. */
1850 abts_wqe = &abts_buf->wqe;
1852 /* WQEs are reused. Clear stale data and set key fields to
1853 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1855 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1856 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1858 /* word 7 */
1859 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1860 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1861 nvmereq_wqe->iocb.ulpClass);
1863 /* word 8 - tell the FW to abort the IO associated with this
1864 * outstanding exchange ID.
1866 abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1868 /* word 9 - this is the iotag for the abts_wqe completion. */
1869 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1870 abts_buf->iotag);
1872 /* word 10 */
1873 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1874 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1876 /* word 11 */
1877 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1878 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1879 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1881 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1882 abts_buf->iocb_flag |= LPFC_IO_NVME;
1883 abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1884 abts_buf->vport = vport;
1885 abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1886 ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
1887 spin_unlock_irqrestore(&phba->hbalock, flags);
1888 if (ret_val) {
1889 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1890 "6137 Failed abts issue_wqe with status x%x "
1891 "for nvme_fcreq %p.\n",
1892 ret_val, pnvme_fcreq);
1893 lpfc_sli_release_iocbq(phba, abts_buf);
1894 return;
1897 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1898 "6138 Transport Abort NVME Request Issued for "
1899 "ox_id x%x on reqtag x%x\n",
1900 nvmereq_wqe->sli4_xritag,
1901 abts_buf->iotag);
1904 /* Declare and initialization an instance of the FC NVME template. */
1905 static struct nvme_fc_port_template lpfc_nvme_template = {
1906 /* initiator-based functions */
1907 .localport_delete = lpfc_nvme_localport_delete,
1908 .remoteport_delete = lpfc_nvme_remoteport_delete,
1909 .create_queue = lpfc_nvme_create_queue,
1910 .delete_queue = lpfc_nvme_delete_queue,
1911 .ls_req = lpfc_nvme_ls_req,
1912 .fcp_io = lpfc_nvme_fcp_io_submit,
1913 .ls_abort = lpfc_nvme_ls_abort,
1914 .fcp_abort = lpfc_nvme_fcp_abort,
1916 .max_hw_queues = 1,
1917 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1918 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1919 .dma_boundary = 0xFFFFFFFF,
1921 /* Sizes of additional private data for data structures.
1922 * No use for the last two sizes at this time.
1924 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1925 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1926 .lsrqst_priv_sz = 0,
1927 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1931 * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
1932 * @phba: pointer to lpfc hba data structure.
1933 * @nblist: pointer to nvme buffer list.
1934 * @count: number of scsi buffers on the list.
1936 * This routine is invoked to post a block of @count scsi sgl pages from a
1937 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
1938 * No Lock is held.
1941 static int
1942 lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
1943 struct list_head *nblist,
1944 int count)
1946 struct lpfc_nvme_buf *lpfc_ncmd;
1947 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
1948 struct sgl_page_pairs *sgl_pg_pairs;
1949 void *viraddr;
1950 LPFC_MBOXQ_t *mbox;
1951 uint32_t reqlen, alloclen, pg_pairs;
1952 uint32_t mbox_tmo;
1953 uint16_t xritag_start = 0;
1954 int rc = 0;
1955 uint32_t shdr_status, shdr_add_status;
1956 dma_addr_t pdma_phys_bpl1;
1957 union lpfc_sli4_cfg_shdr *shdr;
1959 /* Calculate the requested length of the dma memory */
1960 reqlen = count * sizeof(struct sgl_page_pairs) +
1961 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
1962 if (reqlen > SLI4_PAGE_SIZE) {
1963 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1964 "6118 Block sgl registration required DMA "
1965 "size (%d) great than a page\n", reqlen);
1966 return -ENOMEM;
1968 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1969 if (!mbox) {
1970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1971 "6119 Failed to allocate mbox cmd memory\n");
1972 return -ENOMEM;
1975 /* Allocate DMA memory and set up the non-embedded mailbox command */
1976 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1977 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
1978 LPFC_SLI4_MBX_NEMBED);
1980 if (alloclen < reqlen) {
1981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1982 "6120 Allocated DMA memory size (%d) is "
1983 "less than the requested DMA memory "
1984 "size (%d)\n", alloclen, reqlen);
1985 lpfc_sli4_mbox_cmd_free(phba, mbox);
1986 return -ENOMEM;
1989 /* Get the first SGE entry from the non-embedded DMA memory */
1990 viraddr = mbox->sge_array->addr[0];
1992 /* Set up the SGL pages in the non-embedded DMA pages */
1993 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
1994 sgl_pg_pairs = &sgl->sgl_pg_pairs;
1996 pg_pairs = 0;
1997 list_for_each_entry(lpfc_ncmd, nblist, list) {
1998 /* Set up the sge entry */
1999 sgl_pg_pairs->sgl_pg0_addr_lo =
2000 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
2001 sgl_pg_pairs->sgl_pg0_addr_hi =
2002 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
2003 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
2004 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
2005 SGL_PAGE_SIZE;
2006 else
2007 pdma_phys_bpl1 = 0;
2008 sgl_pg_pairs->sgl_pg1_addr_lo =
2009 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
2010 sgl_pg_pairs->sgl_pg1_addr_hi =
2011 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
2012 /* Keep the first xritag on the list */
2013 if (pg_pairs == 0)
2014 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
2015 sgl_pg_pairs++;
2016 pg_pairs++;
2018 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
2019 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
2020 /* Perform endian conversion if necessary */
2021 sgl->word0 = cpu_to_le32(sgl->word0);
2023 if (!phba->sli4_hba.intr_enable)
2024 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
2025 else {
2026 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
2027 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
2029 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
2030 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2031 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2032 if (rc != MBX_TIMEOUT)
2033 lpfc_sli4_mbox_cmd_free(phba, mbox);
2034 if (shdr_status || shdr_add_status || rc) {
2035 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2036 "6125 POST_SGL_BLOCK mailbox command failed "
2037 "status x%x add_status x%x mbx status x%x\n",
2038 shdr_status, shdr_add_status, rc);
2039 rc = -ENXIO;
2041 return rc;
2045 * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
2046 * @phba: pointer to lpfc hba data structure.
2047 * @post_nblist: pointer to the nvme buffer list.
2049 * This routine walks a list of nvme buffers that was passed in. It attempts
2050 * to construct blocks of nvme buffer sgls which contains contiguous xris and
2051 * uses the non-embedded SGL block post mailbox commands to post to the port.
2052 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
2053 * embedded SGL post mailbox command for posting. The @post_nblist passed in
2054 * must be local list, thus no lock is needed when manipulate the list.
2056 * Returns: 0 = failure, non-zero number of successfully posted buffers.
2058 static int
2059 lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
2060 struct list_head *post_nblist, int sb_count)
2062 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2063 int status, sgl_size;
2064 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
2065 dma_addr_t pdma_phys_sgl1;
2066 int last_xritag = NO_XRI;
2067 int cur_xritag;
2068 LIST_HEAD(prep_nblist);
2069 LIST_HEAD(blck_nblist);
2070 LIST_HEAD(nvme_nblist);
2072 /* sanity check */
2073 if (sb_count <= 0)
2074 return -EINVAL;
2076 sgl_size = phba->cfg_sg_dma_buf_size;
2078 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
2079 list_del_init(&lpfc_ncmd->list);
2080 block_cnt++;
2081 if ((last_xritag != NO_XRI) &&
2082 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
2083 /* a hole in xri block, form a sgl posting block */
2084 list_splice_init(&prep_nblist, &blck_nblist);
2085 post_cnt = block_cnt - 1;
2086 /* prepare list for next posting block */
2087 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
2088 block_cnt = 1;
2089 } else {
2090 /* prepare list for next posting block */
2091 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
2092 /* enough sgls for non-embed sgl mbox command */
2093 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
2094 list_splice_init(&prep_nblist, &blck_nblist);
2095 post_cnt = block_cnt;
2096 block_cnt = 0;
2099 num_posting++;
2100 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
2102 /* end of repost sgl list condition for NVME buffers */
2103 if (num_posting == sb_count) {
2104 if (post_cnt == 0) {
2105 /* last sgl posting block */
2106 list_splice_init(&prep_nblist, &blck_nblist);
2107 post_cnt = block_cnt;
2108 } else if (block_cnt == 1) {
2109 /* last single sgl with non-contiguous xri */
2110 if (sgl_size > SGL_PAGE_SIZE)
2111 pdma_phys_sgl1 =
2112 lpfc_ncmd->dma_phys_sgl +
2113 SGL_PAGE_SIZE;
2114 else
2115 pdma_phys_sgl1 = 0;
2116 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
2117 status = lpfc_sli4_post_sgl(phba,
2118 lpfc_ncmd->dma_phys_sgl,
2119 pdma_phys_sgl1, cur_xritag);
2120 if (status) {
2121 /* failure, put on abort nvme list */
2122 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
2123 } else {
2124 /* success, put on NVME buffer list */
2125 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2126 lpfc_ncmd->status = IOSTAT_SUCCESS;
2127 num_posted++;
2129 /* success, put on NVME buffer sgl list */
2130 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
2134 /* continue until a nembed page worth of sgls */
2135 if (post_cnt == 0)
2136 continue;
2138 /* post block of NVME buffer list sgls */
2139 status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
2140 post_cnt);
2142 /* don't reset xirtag due to hole in xri block */
2143 if (block_cnt == 0)
2144 last_xritag = NO_XRI;
2146 /* reset NVME buffer post count for next round of posting */
2147 post_cnt = 0;
2149 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
2150 while (!list_empty(&blck_nblist)) {
2151 list_remove_head(&blck_nblist, lpfc_ncmd,
2152 struct lpfc_nvme_buf, list);
2153 if (status) {
2154 /* failure, put on abort nvme list */
2155 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
2156 } else {
2157 /* success, put on NVME buffer list */
2158 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2159 lpfc_ncmd->status = IOSTAT_SUCCESS;
2160 num_posted++;
2162 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
2165 /* Push NVME buffers with sgl posted to the available list */
2166 while (!list_empty(&nvme_nblist)) {
2167 list_remove_head(&nvme_nblist, lpfc_ncmd,
2168 struct lpfc_nvme_buf, list);
2169 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2171 return num_posted;
2175 * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
2176 * @phba: pointer to lpfc hba data structure.
2178 * This routine walks the list of nvme buffers that have been allocated and
2179 * repost them to the port by using SGL block post. This is needed after a
2180 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
2181 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
2182 * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
2184 * Returns: 0 = success, non-zero failure.
2187 lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
2189 LIST_HEAD(post_nblist);
2190 int num_posted, rc = 0;
2192 /* get all NVME buffers need to repost to a local list */
2193 spin_lock_irq(&phba->nvme_buf_list_get_lock);
2194 spin_lock(&phba->nvme_buf_list_put_lock);
2195 list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
2196 list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
2197 phba->get_nvme_bufs = 0;
2198 phba->put_nvme_bufs = 0;
2199 spin_unlock(&phba->nvme_buf_list_put_lock);
2200 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
2202 /* post the list of nvme buffer sgls to port if available */
2203 if (!list_empty(&post_nblist)) {
2204 num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
2205 phba->sli4_hba.nvme_xri_cnt);
2206 /* failed to post any nvme buffer, return error */
2207 if (num_posted == 0)
2208 rc = -EIO;
2210 return rc;
2214 * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
2215 * @vport: The virtual port for which this call being executed.
2216 * @num_to_allocate: The requested number of buffers to allocate.
2218 * This routine allocates nvme buffers for device with SLI-4 interface spec,
2219 * the nvme buffer contains all the necessary information needed to initiate
2220 * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
2221 * them on a list, it post them to the port by using SGL block post.
2223 * Return codes:
2224 * int - number of nvme buffers that were allocated and posted.
2225 * 0 = failure, less than num_to_alloc is a partial failure.
2227 static int
2228 lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
2230 struct lpfc_hba *phba = vport->phba;
2231 struct lpfc_nvme_buf *lpfc_ncmd;
2232 struct lpfc_iocbq *pwqeq;
2233 union lpfc_wqe128 *wqe;
2234 struct sli4_sge *sgl;
2235 dma_addr_t pdma_phys_sgl;
2236 uint16_t iotag, lxri = 0;
2237 int bcnt, num_posted;
2238 LIST_HEAD(prep_nblist);
2239 LIST_HEAD(post_nblist);
2240 LIST_HEAD(nvme_nblist);
2242 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
2243 lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
2244 if (!lpfc_ncmd)
2245 break;
2247 * Get memory from the pci pool to map the virt space to
2248 * pci bus space for an I/O. The DMA buffer includes the
2249 * number of SGE's necessary to support the sg_tablesize.
2251 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
2252 GFP_KERNEL,
2253 &lpfc_ncmd->dma_handle);
2254 if (!lpfc_ncmd->data) {
2255 kfree(lpfc_ncmd);
2256 break;
2259 lxri = lpfc_sli4_next_xritag(phba);
2260 if (lxri == NO_XRI) {
2261 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
2262 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
2263 kfree(lpfc_ncmd);
2264 break;
2266 pwqeq = &(lpfc_ncmd->cur_iocbq);
2267 wqe = &pwqeq->wqe;
2269 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
2270 iotag = lpfc_sli_next_iotag(phba, pwqeq);
2271 if (iotag == 0) {
2272 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
2273 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
2274 kfree(lpfc_ncmd);
2275 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2276 "6121 Failed to allocated IOTAG for"
2277 " XRI:0x%x\n", lxri);
2278 lpfc_sli4_free_xri(phba, lxri);
2279 break;
2281 pwqeq->sli4_lxritag = lxri;
2282 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
2283 pwqeq->iocb_flag |= LPFC_IO_NVME;
2284 pwqeq->context1 = lpfc_ncmd;
2285 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
2287 /* Initialize local short-hand pointers. */
2288 lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
2289 sgl = lpfc_ncmd->nvme_sgl;
2290 pdma_phys_sgl = lpfc_ncmd->dma_handle;
2291 lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
2293 /* Rsp SGE will be filled in when we rcv an IO
2294 * from the NVME Layer to be sent.
2295 * The cmd is going to be embedded so we need a SKIP SGE.
2297 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2298 bf_set(lpfc_sli4_sge_last, sgl, 0);
2299 sgl->word2 = cpu_to_le32(sgl->word2);
2300 /* Fill in word 3 / sgl_len during cmd submission */
2302 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
2304 /* Initialize WQE */
2305 memset(wqe, 0, sizeof(union lpfc_wqe));
2307 /* add the nvme buffer to a post list */
2308 list_add_tail(&lpfc_ncmd->list, &post_nblist);
2309 spin_lock_irq(&phba->nvme_buf_list_get_lock);
2310 phba->sli4_hba.nvme_xri_cnt++;
2311 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
2313 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
2314 "6114 Allocate %d out of %d requested new NVME "
2315 "buffers\n", bcnt, num_to_alloc);
2317 /* post the list of nvme buffer sgls to port if available */
2318 if (!list_empty(&post_nblist))
2319 num_posted = lpfc_post_nvme_sgl_list(phba,
2320 &post_nblist, bcnt);
2321 else
2322 num_posted = 0;
2324 return num_posted;
2327 static inline struct lpfc_nvme_buf *
2328 lpfc_nvme_buf(struct lpfc_hba *phba)
2330 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2332 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2333 &phba->lpfc_nvme_buf_list_get, list) {
2334 list_del_init(&lpfc_ncmd->list);
2335 phba->get_nvme_bufs--;
2336 return lpfc_ncmd;
2338 return NULL;
2342 * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
2343 * @phba: The HBA for which this call is being executed.
2345 * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
2346 * and returns to caller.
2348 * Return codes:
2349 * NULL - Error
2350 * Pointer to lpfc_nvme_buf - Success
2352 static struct lpfc_nvme_buf *
2353 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
2354 int expedite)
2356 struct lpfc_nvme_buf *lpfc_ncmd = NULL;
2357 unsigned long iflag = 0;
2359 spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
2360 if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
2361 lpfc_ncmd = lpfc_nvme_buf(phba);
2362 if (!lpfc_ncmd) {
2363 spin_lock(&phba->nvme_buf_list_put_lock);
2364 list_splice(&phba->lpfc_nvme_buf_list_put,
2365 &phba->lpfc_nvme_buf_list_get);
2366 phba->get_nvme_bufs += phba->put_nvme_bufs;
2367 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
2368 phba->put_nvme_bufs = 0;
2369 spin_unlock(&phba->nvme_buf_list_put_lock);
2370 if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
2371 lpfc_ncmd = lpfc_nvme_buf(phba);
2373 spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
2375 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
2376 atomic_inc(&ndlp->cmd_pending);
2377 lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
2379 return lpfc_ncmd;
2383 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2384 * @phba: The Hba for which this call is being executed.
2385 * @lpfc_ncmd: The nvme buffer which is being released.
2387 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2388 * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2389 * and cannot be reused for at least RA_TOV amount of time if it was
2390 * aborted.
2392 static void
2393 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2395 unsigned long iflag = 0;
2397 if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2398 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2400 lpfc_ncmd->nonsg_phys = 0;
2401 lpfc_ncmd->ndlp = NULL;
2402 lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
2404 if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
2405 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2406 "6310 XB release deferred for "
2407 "ox_id x%x on reqtag x%x\n",
2408 lpfc_ncmd->cur_iocbq.sli4_xritag,
2409 lpfc_ncmd->cur_iocbq.iotag);
2411 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
2412 iflag);
2413 list_add_tail(&lpfc_ncmd->list,
2414 &phba->sli4_hba.lpfc_abts_nvme_buf_list);
2415 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
2416 iflag);
2417 } else {
2418 lpfc_ncmd->nvmeCmd = NULL;
2419 lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
2420 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
2421 list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
2422 phba->put_nvme_bufs++;
2423 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
2428 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2429 * @pvport - the lpfc_vport instance requesting a localport.
2431 * This routine is invoked to create an nvme localport instance to bind
2432 * to the nvme_fc_transport. It is called once during driver load
2433 * like lpfc_create_shost after all other services are initialized.
2434 * It requires a vport, vpi, and wwns at call time. Other localport
2435 * parameters are modified as the driver's FCID and the Fabric WWN
2436 * are established.
2438 * Return codes
2439 * 0 - successful
2440 * -ENOMEM - no heap memory available
2441 * other values - from nvme registration upcall
2444 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2446 int ret = 0;
2447 struct lpfc_hba *phba = vport->phba;
2448 struct nvme_fc_port_info nfcp_info;
2449 struct nvme_fc_local_port *localport;
2450 struct lpfc_nvme_lport *lport;
2451 struct lpfc_nvme_ctrl_stat *cstat;
2452 int len, i;
2454 /* Initialize this localport instance. The vport wwn usage ensures
2455 * that NPIV is accounted for.
2457 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2458 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2459 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2460 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2462 /* We need to tell the transport layer + 1 because it takes page
2463 * alignment into account. When space for the SGL is allocated we
2464 * allocate + 3, one for cmd, one for rsp and one for this alignment
2466 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2467 lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
2469 cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
2470 phba->cfg_nvme_io_channel), GFP_KERNEL);
2471 if (!cstat)
2472 return -ENOMEM;
2474 /* localport is allocated from the stack, but the registration
2475 * call allocates heap memory as well as the private area.
2477 #if (IS_ENABLED(CONFIG_NVME_FC))
2478 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2479 &vport->phba->pcidev->dev, &localport);
2480 #else
2481 ret = -ENOMEM;
2482 #endif
2483 if (!ret) {
2484 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2485 "6005 Successfully registered local "
2486 "NVME port num %d, localP %p, private %p, "
2487 "sg_seg %d\n",
2488 localport->port_num, localport,
2489 localport->private,
2490 lpfc_nvme_template.max_sgl_segments);
2492 /* Private is our lport size declared in the template. */
2493 lport = (struct lpfc_nvme_lport *)localport->private;
2494 vport->localport = localport;
2495 lport->vport = vport;
2496 lport->cstat = cstat;
2497 vport->nvmei_support = 1;
2499 atomic_set(&lport->xmt_fcp_noxri, 0);
2500 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2501 atomic_set(&lport->xmt_fcp_qdepth, 0);
2502 atomic_set(&lport->xmt_fcp_err, 0);
2503 atomic_set(&lport->xmt_fcp_wqerr, 0);
2504 atomic_set(&lport->xmt_fcp_abort, 0);
2505 atomic_set(&lport->xmt_ls_abort, 0);
2506 atomic_set(&lport->xmt_ls_err, 0);
2507 atomic_set(&lport->cmpl_fcp_xb, 0);
2508 atomic_set(&lport->cmpl_fcp_err, 0);
2509 atomic_set(&lport->cmpl_ls_xb, 0);
2510 atomic_set(&lport->cmpl_ls_err, 0);
2511 atomic_set(&lport->fc4NvmeLsRequests, 0);
2512 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2514 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
2515 cstat = &lport->cstat[i];
2516 atomic_set(&cstat->fc4NvmeInputRequests, 0);
2517 atomic_set(&cstat->fc4NvmeOutputRequests, 0);
2518 atomic_set(&cstat->fc4NvmeControlRequests, 0);
2519 atomic_set(&cstat->fc4NvmeIoCmpls, 0);
2522 /* Don't post more new bufs if repost already recovered
2523 * the nvme sgls.
2525 if (phba->sli4_hba.nvme_xri_cnt == 0) {
2526 len = lpfc_new_nvme_buf(vport,
2527 phba->sli4_hba.nvme_xri_max);
2528 vport->phba->total_nvme_bufs += len;
2530 } else {
2531 kfree(cstat);
2534 return ret;
2537 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2539 * The driver has to wait for the host nvme transport to callback
2540 * indicating the localport has successfully unregistered all
2541 * resources. Since this is an uninterruptible wait, loop every ten
2542 * seconds and print a message indicating no progress.
2544 * An uninterruptible wait is used because of the risk of transport-to-
2545 * driver state mismatch.
2547 void
2548 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2549 struct lpfc_nvme_lport *lport,
2550 struct completion *lport_unreg_cmp)
2552 #if (IS_ENABLED(CONFIG_NVME_FC))
2553 u32 wait_tmo;
2554 int ret;
2556 /* Host transport has to clean up and confirm requiring an indefinite
2557 * wait. Print a message if a 10 second wait expires and renew the
2558 * wait. This is unexpected.
2560 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2561 while (true) {
2562 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2563 if (unlikely(!ret)) {
2564 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2565 "6176 Lport %p Localport %p wait "
2566 "timed out. Renewing.\n",
2567 lport, vport->localport);
2568 continue;
2570 break;
2572 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2573 "6177 Lport %p Localport %p Complete Success\n",
2574 lport, vport->localport);
2575 #endif
2579 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2580 * @pnvme: pointer to lpfc nvme data structure.
2582 * This routine is invoked to destroy all lports bound to the phba.
2583 * The lport memory was allocated by the nvme fc transport and is
2584 * released there. This routine ensures all rports bound to the
2585 * lport have been disconnected.
2588 void
2589 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2591 #if (IS_ENABLED(CONFIG_NVME_FC))
2592 struct nvme_fc_local_port *localport;
2593 struct lpfc_nvme_lport *lport;
2594 struct lpfc_nvme_ctrl_stat *cstat;
2595 int ret;
2596 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2598 if (vport->nvmei_support == 0)
2599 return;
2601 localport = vport->localport;
2602 lport = (struct lpfc_nvme_lport *)localport->private;
2603 cstat = lport->cstat;
2605 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2606 "6011 Destroying NVME localport %p\n",
2607 localport);
2609 /* lport's rport list is clear. Unregister
2610 * lport and release resources.
2612 lport->lport_unreg_cmp = &lport_unreg_cmp;
2613 ret = nvme_fc_unregister_localport(localport);
2615 /* Wait for completion. This either blocks
2616 * indefinitely or succeeds
2618 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2619 vport->localport = NULL;
2620 kfree(cstat);
2622 /* Regardless of the unregister upcall response, clear
2623 * nvmei_support. All rports are unregistered and the
2624 * driver will clean up.
2626 vport->nvmei_support = 0;
2627 if (ret == 0) {
2628 lpfc_printf_vlog(vport,
2629 KERN_INFO, LOG_NVME_DISC,
2630 "6009 Unregistered lport Success\n");
2631 } else {
2632 lpfc_printf_vlog(vport,
2633 KERN_INFO, LOG_NVME_DISC,
2634 "6010 Unregistered lport "
2635 "Failed, status x%x\n",
2636 ret);
2638 #endif
2641 void
2642 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2644 #if (IS_ENABLED(CONFIG_NVME_FC))
2645 struct nvme_fc_local_port *localport;
2646 struct lpfc_nvme_lport *lport;
2648 localport = vport->localport;
2649 if (!localport) {
2650 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2651 "6710 Update NVME fail. No localport\n");
2652 return;
2654 lport = (struct lpfc_nvme_lport *)localport->private;
2655 if (!lport) {
2656 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2657 "6171 Update NVME fail. localP %p, No lport\n",
2658 localport);
2659 return;
2661 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2662 "6012 Update NVME lport %p did x%x\n",
2663 localport, vport->fc_myDID);
2665 localport->port_id = vport->fc_myDID;
2666 if (localport->port_id == 0)
2667 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2668 else
2669 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2671 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2672 "6030 bound lport %p to DID x%06x\n",
2673 lport, localport->port_id);
2674 #endif
2678 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2680 #if (IS_ENABLED(CONFIG_NVME_FC))
2681 int ret = 0;
2682 struct nvme_fc_local_port *localport;
2683 struct lpfc_nvme_lport *lport;
2684 struct lpfc_nvme_rport *rport;
2685 struct lpfc_nvme_rport *oldrport;
2686 struct nvme_fc_remote_port *remote_port;
2687 struct nvme_fc_port_info rpinfo;
2688 struct lpfc_nodelist *prev_ndlp = NULL;
2690 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2691 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2692 ndlp->nlp_DID, ndlp->nlp_type);
2694 localport = vport->localport;
2695 if (!localport)
2696 return 0;
2698 lport = (struct lpfc_nvme_lport *)localport->private;
2700 /* NVME rports are not preserved across devloss.
2701 * Just register this instance. Note, rpinfo->dev_loss_tmo
2702 * is left 0 to indicate accept transport defaults. The
2703 * driver communicates port role capabilities consistent
2704 * with the PRLI response data.
2706 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2707 rpinfo.port_id = ndlp->nlp_DID;
2708 if (ndlp->nlp_type & NLP_NVME_TARGET)
2709 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2710 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2711 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2713 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2714 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2716 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2717 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2719 spin_lock_irq(&vport->phba->hbalock);
2720 oldrport = lpfc_ndlp_get_nrport(ndlp);
2721 spin_unlock_irq(&vport->phba->hbalock);
2722 if (!oldrport)
2723 lpfc_nlp_get(ndlp);
2725 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2726 if (!ret) {
2727 /* If the ndlp already has an nrport, this is just
2728 * a resume of the existing rport. Else this is a
2729 * new rport.
2731 /* Guard against an unregister/reregister
2732 * race that leaves the WAIT flag set.
2734 spin_lock_irq(&vport->phba->hbalock);
2735 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2736 spin_unlock_irq(&vport->phba->hbalock);
2737 rport = remote_port->private;
2738 if (oldrport) {
2739 /* New remoteport record does not guarantee valid
2740 * host private memory area.
2742 prev_ndlp = oldrport->ndlp;
2743 if (oldrport == remote_port->private) {
2744 /* Same remoteport - ndlp should match.
2745 * Just reuse.
2747 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2748 LOG_NVME_DISC,
2749 "6014 Rebinding lport to "
2750 "remoteport %p wwpn 0x%llx, "
2751 "Data: x%x x%x %p %p x%x x%06x\n",
2752 remote_port,
2753 remote_port->port_name,
2754 remote_port->port_id,
2755 remote_port->port_role,
2756 prev_ndlp,
2757 ndlp,
2758 ndlp->nlp_type,
2759 ndlp->nlp_DID);
2760 return 0;
2763 /* Sever the ndlp<->rport association
2764 * before dropping the ndlp ref from
2765 * register.
2767 spin_lock_irq(&vport->phba->hbalock);
2768 ndlp->nrport = NULL;
2769 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2770 spin_unlock_irq(&vport->phba->hbalock);
2771 rport->ndlp = NULL;
2772 rport->remoteport = NULL;
2774 /* Reference only removed if previous NDLP is no longer
2775 * active. It might be just a swap and removing the
2776 * reference would cause a premature cleanup.
2778 if (prev_ndlp && prev_ndlp != ndlp) {
2779 if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
2780 (!prev_ndlp->nrport))
2781 lpfc_nlp_put(prev_ndlp);
2785 /* Clean bind the rport to the ndlp. */
2786 rport->remoteport = remote_port;
2787 rport->lport = lport;
2788 rport->ndlp = ndlp;
2789 spin_lock_irq(&vport->phba->hbalock);
2790 ndlp->nrport = rport;
2791 spin_unlock_irq(&vport->phba->hbalock);
2792 lpfc_printf_vlog(vport, KERN_INFO,
2793 LOG_NVME_DISC | LOG_NODE,
2794 "6022 Binding new rport to "
2795 "lport %p Remoteport %p rport %p WWNN 0x%llx, "
2796 "Rport WWPN 0x%llx DID "
2797 "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
2798 lport, remote_port, rport,
2799 rpinfo.node_name, rpinfo.port_name,
2800 rpinfo.port_id, rpinfo.port_role,
2801 ndlp, prev_ndlp);
2802 } else {
2803 lpfc_printf_vlog(vport, KERN_ERR,
2804 LOG_NVME_DISC | LOG_NODE,
2805 "6031 RemotePort Registration failed "
2806 "err: %d, DID x%06x\n",
2807 ret, ndlp->nlp_DID);
2810 return ret;
2811 #else
2812 return 0;
2813 #endif
2816 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2818 * There is no notion of Devloss or rport recovery from the current
2819 * nvme_transport perspective. Loss of an rport just means IO cannot
2820 * be sent and recovery is completely up to the initator.
2821 * For now, the driver just unbinds the DID and port_role so that
2822 * no further IO can be issued. Changes are planned for later.
2824 * Notes - the ndlp reference count is not decremented here since
2825 * since there is no nvme_transport api for devloss. Node ref count
2826 * is only adjusted in driver unload.
2828 void
2829 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2831 #if (IS_ENABLED(CONFIG_NVME_FC))
2832 int ret;
2833 struct nvme_fc_local_port *localport;
2834 struct lpfc_nvme_lport *lport;
2835 struct lpfc_nvme_rport *rport;
2836 struct nvme_fc_remote_port *remoteport = NULL;
2838 localport = vport->localport;
2840 /* This is fundamental error. The localport is always
2841 * available until driver unload. Just exit.
2843 if (!localport)
2844 return;
2846 lport = (struct lpfc_nvme_lport *)localport->private;
2847 if (!lport)
2848 goto input_err;
2850 spin_lock_irq(&vport->phba->hbalock);
2851 rport = lpfc_ndlp_get_nrport(ndlp);
2852 if (rport)
2853 remoteport = rport->remoteport;
2854 spin_unlock_irq(&vport->phba->hbalock);
2855 if (!remoteport)
2856 goto input_err;
2858 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2859 "6033 Unreg nvme remoteport %p, portname x%llx, "
2860 "port_id x%06x, portstate x%x port type x%x\n",
2861 remoteport, remoteport->port_name,
2862 remoteport->port_id, remoteport->port_state,
2863 ndlp->nlp_type);
2865 /* Sanity check ndlp type. Only call for NVME ports. Don't
2866 * clear any rport state until the transport calls back.
2869 if (ndlp->nlp_type & NLP_NVME_TARGET) {
2870 /* No concern about the role change on the nvme remoteport.
2871 * The transport will update it.
2873 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
2875 /* Don't let the host nvme transport keep sending keep-alives
2876 * on this remoteport. Vport is unloading, no recovery. The
2877 * return values is ignored. The upcall is a courtesy to the
2878 * transport.
2880 if (vport->load_flag & FC_UNLOADING)
2881 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2883 ret = nvme_fc_unregister_remoteport(remoteport);
2884 if (ret != 0) {
2885 lpfc_nlp_put(ndlp);
2886 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2887 "6167 NVME unregister failed %d "
2888 "port_state x%x\n",
2889 ret, remoteport->port_state);
2892 return;
2894 input_err:
2895 #endif
2896 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2897 "6168 State error: lport %p, rport%p FCID x%06x\n",
2898 vport->localport, ndlp->rport, ndlp->nlp_DID);
2902 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2903 * @phba: pointer to lpfc hba data structure.
2904 * @axri: pointer to the fcp xri abort wcqe structure.
2906 * This routine is invoked by the worker thread to process a SLI4 fast-path
2907 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2908 * here.
2910 void
2911 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2912 struct sli4_wcqe_xri_aborted *axri)
2914 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2915 struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
2916 struct nvmefc_fcp_req *nvme_cmd = NULL;
2917 struct lpfc_nodelist *ndlp;
2918 unsigned long iflag = 0;
2920 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2921 return;
2922 spin_lock_irqsave(&phba->hbalock, iflag);
2923 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2924 list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
2925 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
2926 list) {
2927 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
2928 list_del_init(&lpfc_ncmd->list);
2929 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2930 lpfc_ncmd->status = IOSTAT_SUCCESS;
2931 spin_unlock(
2932 &phba->sli4_hba.abts_nvme_buf_list_lock);
2934 spin_unlock_irqrestore(&phba->hbalock, iflag);
2935 ndlp = lpfc_ncmd->ndlp;
2936 if (ndlp)
2937 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2939 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2940 "6311 nvme_cmd %p xri x%x tag x%x "
2941 "abort complete and xri released\n",
2942 lpfc_ncmd->nvmeCmd, xri,
2943 lpfc_ncmd->cur_iocbq.iotag);
2945 /* Aborted NVME commands are required to not complete
2946 * before the abort exchange command fully completes.
2947 * Once completed, it is available via the put list.
2949 if (lpfc_ncmd->nvmeCmd) {
2950 nvme_cmd = lpfc_ncmd->nvmeCmd;
2951 nvme_cmd->done(nvme_cmd);
2952 lpfc_ncmd->nvmeCmd = NULL;
2954 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2955 return;
2958 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2959 spin_unlock_irqrestore(&phba->hbalock, iflag);
2961 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2962 "6312 XRI Aborted xri x%x not found\n", xri);
2967 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2968 * @phba: Pointer to HBA context object.
2970 * This function flushes all wqes in the nvme rings and frees all resources
2971 * in the txcmplq. This function does not issue abort wqes for the IO
2972 * commands in txcmplq, they will just be returned with
2973 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2974 * slot has been permanently disabled.
2976 void
2977 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2979 struct lpfc_sli_ring *pring;
2980 u32 i, wait_cnt = 0;
2982 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
2983 return;
2985 /* Cycle through all NVME rings and make sure all outstanding
2986 * WQEs have been removed from the txcmplqs.
2988 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
2989 pring = phba->sli4_hba.nvme_wq[i]->pring;
2991 if (!pring)
2992 continue;
2994 /* Retrieve everything on the txcmplq */
2995 while (!list_empty(&pring->txcmplq)) {
2996 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2997 wait_cnt++;
2999 /* The sleep is 10mS. Every ten seconds,
3000 * dump a message. Something is wrong.
3002 if ((wait_cnt % 1000) == 0) {
3003 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3004 "6178 NVME IO not empty, "
3005 "cnt %d\n", wait_cnt);