mic: vop: Fix use-after-free on remove
[linux/fpc-iii.git] / drivers / scsi / lpfc / lpfc_nvmet.c
blob95fee83090eb7bf03fa2f9ea2263e06b761e6ede
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60 struct lpfc_nvmet_rcv_ctx *,
61 dma_addr_t rspbuf,
62 uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64 struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66 struct lpfc_nvmet_rcv_ctx *,
67 uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69 struct lpfc_nvmet_rcv_ctx *,
70 uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72 struct lpfc_nvmet_rcv_ctx *,
73 uint32_t, uint16_t);
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75 struct lpfc_nvmet_rcv_ctx *);
77 static union lpfc_wqe128 lpfc_tsend_cmd_template;
78 static union lpfc_wqe128 lpfc_treceive_cmd_template;
79 static union lpfc_wqe128 lpfc_trsp_cmd_template;
81 /* Setup WQE templates for NVME IOs */
82 void
83 lpfc_nvmet_cmd_template(void)
85 union lpfc_wqe128 *wqe;
87 /* TSEND template */
88 wqe = &lpfc_tsend_cmd_template;
89 memset(wqe, 0, sizeof(union lpfc_wqe128));
91 /* Word 0, 1, 2 - BDE is variable */
93 /* Word 3 - payload_offset_len is zero */
95 /* Word 4 - relative_offset is variable */
97 /* Word 5 - is zero */
99 /* Word 6 - ctxt_tag, xri_tag is variable */
101 /* Word 7 - wqe_ar is variable */
102 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
103 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
104 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
105 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
106 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
108 /* Word 8 - abort_tag is variable */
110 /* Word 9 - reqtag, rcvoxid is variable */
112 /* Word 10 - wqes, xc is variable */
113 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
114 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
115 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
116 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
117 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
118 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
120 /* Word 11 - sup, irsp, irsplen is variable */
121 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
122 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
123 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
124 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
125 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
126 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
128 /* Word 12 - fcp_data_len is variable */
130 /* Word 13, 14, 15 - PBDE is zero */
132 /* TRECEIVE template */
133 wqe = &lpfc_treceive_cmd_template;
134 memset(wqe, 0, sizeof(union lpfc_wqe128));
136 /* Word 0, 1, 2 - BDE is variable */
138 /* Word 3 */
139 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
141 /* Word 4 - relative_offset is variable */
143 /* Word 5 - is zero */
145 /* Word 6 - ctxt_tag, xri_tag is variable */
147 /* Word 7 */
148 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
149 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
150 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
151 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
152 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
154 /* Word 8 - abort_tag is variable */
156 /* Word 9 - reqtag, rcvoxid is variable */
158 /* Word 10 - xc is variable */
159 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
160 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
161 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
162 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
163 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
164 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
166 /* Word 11 - pbde is variable */
167 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
168 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
169 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
170 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
171 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
172 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
174 /* Word 12 - fcp_data_len is variable */
176 /* Word 13, 14, 15 - PBDE is variable */
178 /* TRSP template */
179 wqe = &lpfc_trsp_cmd_template;
180 memset(wqe, 0, sizeof(union lpfc_wqe128));
182 /* Word 0, 1, 2 - BDE is variable */
184 /* Word 3 - response_len is variable */
186 /* Word 4, 5 - is zero */
188 /* Word 6 - ctxt_tag, xri_tag is variable */
190 /* Word 7 */
191 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
192 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
193 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
194 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
195 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
197 /* Word 8 - abort_tag is variable */
199 /* Word 9 - reqtag is variable */
201 /* Word 10 wqes, xc is variable */
202 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
203 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
204 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
205 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
206 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
207 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
209 /* Word 11 irsp, irsplen is variable */
210 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
211 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
212 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
213 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
214 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
215 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
217 /* Word 12, 13, 14, 15 - is zero */
220 void
221 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
223 unsigned long iflag;
225 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
226 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
227 ctxp->oxid, ctxp->flag);
229 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
230 if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
231 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
232 iflag);
233 return;
235 ctxp->flag |= LPFC_NVMET_CTX_RLS;
236 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
237 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
241 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
242 * @phba: Pointer to HBA context object.
243 * @cmdwqe: Pointer to driver command WQE object.
244 * @wcqe: Pointer to driver response CQE object.
246 * The function is called from SLI ring event handler with no
247 * lock held. This function is the completion handler for NVME LS commands
248 * The function frees memory resources used for the NVME commands.
250 static void
251 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
252 struct lpfc_wcqe_complete *wcqe)
254 struct lpfc_nvmet_tgtport *tgtp;
255 struct nvmefc_tgt_ls_req *rsp;
256 struct lpfc_nvmet_rcv_ctx *ctxp;
257 uint32_t status, result;
259 status = bf_get(lpfc_wcqe_c_status, wcqe);
260 result = wcqe->parameter;
261 ctxp = cmdwqe->context2;
263 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
264 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
265 "6410 NVMET LS cmpl state mismatch IO x%x: "
266 "%d %d\n",
267 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
270 if (!phba->targetport)
271 goto out;
273 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
275 if (tgtp) {
276 if (status) {
277 atomic_inc(&tgtp->xmt_ls_rsp_error);
278 if (result == IOERR_ABORT_REQUESTED)
279 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
280 if (bf_get(lpfc_wcqe_c_xb, wcqe))
281 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
282 } else {
283 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
287 out:
288 rsp = &ctxp->ctx.ls_req;
290 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
291 ctxp->oxid, status, result);
293 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
294 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
295 status, result, ctxp->oxid);
297 lpfc_nlp_put(cmdwqe->context1);
298 cmdwqe->context2 = NULL;
299 cmdwqe->context3 = NULL;
300 lpfc_sli_release_iocbq(phba, cmdwqe);
301 rsp->done(rsp);
302 kfree(ctxp);
306 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
307 * @phba: HBA buffer is associated with
308 * @ctxp: context to clean up
309 * @mp: Buffer to free
311 * Description: Frees the given DMA buffer in the appropriate way given by
312 * reposting it to its associated RQ so it can be reused.
314 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
316 * Returns: None
318 void
319 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
321 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
322 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
323 struct lpfc_nvmet_tgtport *tgtp;
324 struct fc_frame_header *fc_hdr;
325 struct rqb_dmabuf *nvmebuf;
326 struct lpfc_nvmet_ctx_info *infop;
327 uint32_t *payload;
328 uint32_t size, oxid, sid, rc;
329 int cpu;
330 unsigned long iflag;
332 if (ctxp->txrdy) {
333 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
334 ctxp->txrdy_phys);
335 ctxp->txrdy = NULL;
336 ctxp->txrdy_phys = 0;
339 if (ctxp->state == LPFC_NVMET_STE_FREE) {
340 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
341 "6411 NVMET free, already free IO x%x: %d %d\n",
342 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
344 ctxp->state = LPFC_NVMET_STE_FREE;
346 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
347 if (phba->sli4_hba.nvmet_io_wait_cnt) {
348 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
349 nvmebuf, struct rqb_dmabuf,
350 hbuf.list);
351 phba->sli4_hba.nvmet_io_wait_cnt--;
352 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
353 iflag);
355 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
356 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
357 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
358 payload = (uint32_t *)(nvmebuf->dbuf.virt);
359 size = nvmebuf->bytes_recv;
360 sid = sli4_sid_from_fc_hdr(fc_hdr);
362 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
363 ctxp->wqeq = NULL;
364 ctxp->txrdy = NULL;
365 ctxp->offset = 0;
366 ctxp->phba = phba;
367 ctxp->size = size;
368 ctxp->oxid = oxid;
369 ctxp->sid = sid;
370 ctxp->state = LPFC_NVMET_STE_RCV;
371 ctxp->entry_cnt = 1;
372 ctxp->flag = 0;
373 ctxp->ctxbuf = ctx_buf;
374 ctxp->rqb_buffer = (void *)nvmebuf;
375 spin_lock_init(&ctxp->ctxlock);
377 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
378 if (ctxp->ts_cmd_nvme) {
379 ctxp->ts_cmd_nvme = ktime_get_ns();
380 ctxp->ts_nvme_data = 0;
381 ctxp->ts_data_wqput = 0;
382 ctxp->ts_isr_data = 0;
383 ctxp->ts_data_nvme = 0;
384 ctxp->ts_nvme_status = 0;
385 ctxp->ts_status_wqput = 0;
386 ctxp->ts_isr_status = 0;
387 ctxp->ts_status_nvme = 0;
389 #endif
390 atomic_inc(&tgtp->rcv_fcp_cmd_in);
392 * The calling sequence should be:
393 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
394 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
395 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
396 * the NVME command / FC header is stored.
397 * A buffer has already been reposted for this IO, so just free
398 * the nvmebuf.
400 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
401 payload, size);
403 /* Process FCP command */
404 if (rc == 0) {
405 ctxp->rqb_buffer = NULL;
406 atomic_inc(&tgtp->rcv_fcp_cmd_out);
407 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
408 return;
411 /* Processing of FCP command is deferred */
412 if (rc == -EOVERFLOW) {
413 lpfc_nvmeio_data(phba,
414 "NVMET RCV BUSY: xri x%x sz %d "
415 "from %06x\n",
416 oxid, size, sid);
417 atomic_inc(&tgtp->rcv_fcp_cmd_out);
418 return;
420 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
421 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
422 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
423 ctxp->oxid, rc,
424 atomic_read(&tgtp->rcv_fcp_cmd_in),
425 atomic_read(&tgtp->rcv_fcp_cmd_out),
426 atomic_read(&tgtp->xmt_fcp_release));
428 lpfc_nvmet_defer_release(phba, ctxp);
429 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
430 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
431 return;
433 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
436 * Use the CPU context list, from the MRQ the IO was received on
437 * (ctxp->idx), to save context structure.
439 cpu = smp_processor_id();
440 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
441 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
442 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
443 infop->nvmet_ctx_list_cnt++;
444 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
445 #endif
448 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
449 static void
450 lpfc_nvmet_ktime(struct lpfc_hba *phba,
451 struct lpfc_nvmet_rcv_ctx *ctxp)
453 uint64_t seg1, seg2, seg3, seg4, seg5;
454 uint64_t seg6, seg7, seg8, seg9, seg10;
455 uint64_t segsum;
457 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
458 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
459 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
460 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
461 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
462 return;
464 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
465 return;
466 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
467 return;
468 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
469 return;
470 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
471 return;
472 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
473 return;
474 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
475 return;
476 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
477 return;
478 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
479 return;
480 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
481 return;
482 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
483 return;
485 * Segment 1 - Time from FCP command received by MSI-X ISR
486 * to FCP command is passed to NVME Layer.
487 * Segment 2 - Time from FCP command payload handed
488 * off to NVME Layer to Driver receives a Command op
489 * from NVME Layer.
490 * Segment 3 - Time from Driver receives a Command op
491 * from NVME Layer to Command is put on WQ.
492 * Segment 4 - Time from Driver WQ put is done
493 * to MSI-X ISR for Command cmpl.
494 * Segment 5 - Time from MSI-X ISR for Command cmpl to
495 * Command cmpl is passed to NVME Layer.
496 * Segment 6 - Time from Command cmpl is passed to NVME
497 * Layer to Driver receives a RSP op from NVME Layer.
498 * Segment 7 - Time from Driver receives a RSP op from
499 * NVME Layer to WQ put is done on TRSP FCP Status.
500 * Segment 8 - Time from Driver WQ put is done on TRSP
501 * FCP Status to MSI-X ISR for TRSP cmpl.
502 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
503 * TRSP cmpl is passed to NVME Layer.
504 * Segment 10 - Time from FCP command received by
505 * MSI-X ISR to command is completed on wire.
506 * (Segments 1 thru 8) for READDATA / WRITEDATA
507 * (Segments 1 thru 4) for READDATA_RSP
509 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
510 segsum = seg1;
512 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
513 if (segsum > seg2)
514 return;
515 seg2 -= segsum;
516 segsum += seg2;
518 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
519 if (segsum > seg3)
520 return;
521 seg3 -= segsum;
522 segsum += seg3;
524 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
525 if (segsum > seg4)
526 return;
527 seg4 -= segsum;
528 segsum += seg4;
530 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
531 if (segsum > seg5)
532 return;
533 seg5 -= segsum;
534 segsum += seg5;
537 /* For auto rsp commands seg6 thru seg10 will be 0 */
538 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
539 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
540 if (segsum > seg6)
541 return;
542 seg6 -= segsum;
543 segsum += seg6;
545 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
546 if (segsum > seg7)
547 return;
548 seg7 -= segsum;
549 segsum += seg7;
551 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
552 if (segsum > seg8)
553 return;
554 seg8 -= segsum;
555 segsum += seg8;
557 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
558 if (segsum > seg9)
559 return;
560 seg9 -= segsum;
561 segsum += seg9;
563 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
564 return;
565 seg10 = (ctxp->ts_isr_status -
566 ctxp->ts_isr_cmd);
567 } else {
568 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
569 return;
570 seg6 = 0;
571 seg7 = 0;
572 seg8 = 0;
573 seg9 = 0;
574 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
577 phba->ktime_seg1_total += seg1;
578 if (seg1 < phba->ktime_seg1_min)
579 phba->ktime_seg1_min = seg1;
580 else if (seg1 > phba->ktime_seg1_max)
581 phba->ktime_seg1_max = seg1;
583 phba->ktime_seg2_total += seg2;
584 if (seg2 < phba->ktime_seg2_min)
585 phba->ktime_seg2_min = seg2;
586 else if (seg2 > phba->ktime_seg2_max)
587 phba->ktime_seg2_max = seg2;
589 phba->ktime_seg3_total += seg3;
590 if (seg3 < phba->ktime_seg3_min)
591 phba->ktime_seg3_min = seg3;
592 else if (seg3 > phba->ktime_seg3_max)
593 phba->ktime_seg3_max = seg3;
595 phba->ktime_seg4_total += seg4;
596 if (seg4 < phba->ktime_seg4_min)
597 phba->ktime_seg4_min = seg4;
598 else if (seg4 > phba->ktime_seg4_max)
599 phba->ktime_seg4_max = seg4;
601 phba->ktime_seg5_total += seg5;
602 if (seg5 < phba->ktime_seg5_min)
603 phba->ktime_seg5_min = seg5;
604 else if (seg5 > phba->ktime_seg5_max)
605 phba->ktime_seg5_max = seg5;
607 phba->ktime_data_samples++;
608 if (!seg6)
609 goto out;
611 phba->ktime_seg6_total += seg6;
612 if (seg6 < phba->ktime_seg6_min)
613 phba->ktime_seg6_min = seg6;
614 else if (seg6 > phba->ktime_seg6_max)
615 phba->ktime_seg6_max = seg6;
617 phba->ktime_seg7_total += seg7;
618 if (seg7 < phba->ktime_seg7_min)
619 phba->ktime_seg7_min = seg7;
620 else if (seg7 > phba->ktime_seg7_max)
621 phba->ktime_seg7_max = seg7;
623 phba->ktime_seg8_total += seg8;
624 if (seg8 < phba->ktime_seg8_min)
625 phba->ktime_seg8_min = seg8;
626 else if (seg8 > phba->ktime_seg8_max)
627 phba->ktime_seg8_max = seg8;
629 phba->ktime_seg9_total += seg9;
630 if (seg9 < phba->ktime_seg9_min)
631 phba->ktime_seg9_min = seg9;
632 else if (seg9 > phba->ktime_seg9_max)
633 phba->ktime_seg9_max = seg9;
634 out:
635 phba->ktime_seg10_total += seg10;
636 if (seg10 < phba->ktime_seg10_min)
637 phba->ktime_seg10_min = seg10;
638 else if (seg10 > phba->ktime_seg10_max)
639 phba->ktime_seg10_max = seg10;
640 phba->ktime_status_samples++;
642 #endif
645 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
646 * @phba: Pointer to HBA context object.
647 * @cmdwqe: Pointer to driver command WQE object.
648 * @wcqe: Pointer to driver response CQE object.
650 * The function is called from SLI ring event handler with no
651 * lock held. This function is the completion handler for NVME FCP commands
652 * The function frees memory resources used for the NVME commands.
654 static void
655 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
656 struct lpfc_wcqe_complete *wcqe)
658 struct lpfc_nvmet_tgtport *tgtp;
659 struct nvmefc_tgt_fcp_req *rsp;
660 struct lpfc_nvmet_rcv_ctx *ctxp;
661 uint32_t status, result, op, start_clean, logerr;
662 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
663 uint32_t id;
664 #endif
666 ctxp = cmdwqe->context2;
667 ctxp->flag &= ~LPFC_NVMET_IO_INP;
669 rsp = &ctxp->ctx.fcp_req;
670 op = rsp->op;
672 status = bf_get(lpfc_wcqe_c_status, wcqe);
673 result = wcqe->parameter;
675 if (phba->targetport)
676 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
677 else
678 tgtp = NULL;
680 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
681 ctxp->oxid, op, status);
683 if (status) {
684 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
685 rsp->transferred_length = 0;
686 if (tgtp) {
687 atomic_inc(&tgtp->xmt_fcp_rsp_error);
688 if (result == IOERR_ABORT_REQUESTED)
689 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
692 logerr = LOG_NVME_IOERR;
694 /* pick up SLI4 exhange busy condition */
695 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
696 ctxp->flag |= LPFC_NVMET_XBUSY;
697 logerr |= LOG_NVME_ABTS;
698 if (tgtp)
699 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
701 } else {
702 ctxp->flag &= ~LPFC_NVMET_XBUSY;
705 lpfc_printf_log(phba, KERN_INFO, logerr,
706 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
707 ctxp->oxid, status, result, ctxp->flag);
709 } else {
710 rsp->fcp_error = NVME_SC_SUCCESS;
711 if (op == NVMET_FCOP_RSP)
712 rsp->transferred_length = rsp->rsplen;
713 else
714 rsp->transferred_length = rsp->transfer_length;
715 if (tgtp)
716 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
719 if ((op == NVMET_FCOP_READDATA_RSP) ||
720 (op == NVMET_FCOP_RSP)) {
721 /* Sanity check */
722 ctxp->state = LPFC_NVMET_STE_DONE;
723 ctxp->entry_cnt++;
725 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
726 if (ctxp->ts_cmd_nvme) {
727 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
728 ctxp->ts_isr_data =
729 cmdwqe->isr_timestamp;
730 ctxp->ts_data_nvme =
731 ktime_get_ns();
732 ctxp->ts_nvme_status =
733 ctxp->ts_data_nvme;
734 ctxp->ts_status_wqput =
735 ctxp->ts_data_nvme;
736 ctxp->ts_isr_status =
737 ctxp->ts_data_nvme;
738 ctxp->ts_status_nvme =
739 ctxp->ts_data_nvme;
740 } else {
741 ctxp->ts_isr_status =
742 cmdwqe->isr_timestamp;
743 ctxp->ts_status_nvme =
744 ktime_get_ns();
747 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
748 id = smp_processor_id();
749 if (ctxp->cpu != id)
750 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
751 "6703 CPU Check cmpl: "
752 "cpu %d expect %d\n",
753 id, ctxp->cpu);
754 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
755 phba->cpucheck_cmpl_io[id]++;
757 #endif
758 rsp->done(rsp);
759 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
760 if (ctxp->ts_cmd_nvme)
761 lpfc_nvmet_ktime(phba, ctxp);
762 #endif
763 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
764 } else {
765 ctxp->entry_cnt++;
766 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
767 memset(((char *)cmdwqe) + start_clean, 0,
768 (sizeof(struct lpfc_iocbq) - start_clean));
769 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
770 if (ctxp->ts_cmd_nvme) {
771 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
772 ctxp->ts_data_nvme = ktime_get_ns();
774 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
775 id = smp_processor_id();
776 if (ctxp->cpu != id)
777 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
778 "6704 CPU Check cmdcmpl: "
779 "cpu %d expect %d\n",
780 id, ctxp->cpu);
781 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
782 phba->cpucheck_ccmpl_io[id]++;
784 #endif
785 rsp->done(rsp);
789 static int
790 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
791 struct nvmefc_tgt_ls_req *rsp)
793 struct lpfc_nvmet_rcv_ctx *ctxp =
794 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
795 struct lpfc_hba *phba = ctxp->phba;
796 struct hbq_dmabuf *nvmebuf =
797 (struct hbq_dmabuf *)ctxp->rqb_buffer;
798 struct lpfc_iocbq *nvmewqeq;
799 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
800 struct lpfc_dmabuf dmabuf;
801 struct ulp_bde64 bpl;
802 int rc;
804 if (phba->pport->load_flag & FC_UNLOADING)
805 return -ENODEV;
807 if (phba->pport->load_flag & FC_UNLOADING)
808 return -ENODEV;
810 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
811 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
813 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
814 (ctxp->entry_cnt != 1)) {
815 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
816 "6412 NVMET LS rsp state mismatch "
817 "oxid x%x: %d %d\n",
818 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
820 ctxp->state = LPFC_NVMET_STE_LS_RSP;
821 ctxp->entry_cnt++;
823 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
824 rsp->rsplen);
825 if (nvmewqeq == NULL) {
826 atomic_inc(&nvmep->xmt_ls_drop);
827 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
828 "6150 LS Drop IO x%x: Prep\n",
829 ctxp->oxid);
830 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
831 atomic_inc(&nvmep->xmt_ls_abort);
832 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
833 ctxp->sid, ctxp->oxid);
834 return -ENOMEM;
837 /* Save numBdes for bpl2sgl */
838 nvmewqeq->rsvd2 = 1;
839 nvmewqeq->hba_wqidx = 0;
840 nvmewqeq->context3 = &dmabuf;
841 dmabuf.virt = &bpl;
842 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
843 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
844 bpl.tus.f.bdeSize = rsp->rsplen;
845 bpl.tus.f.bdeFlags = 0;
846 bpl.tus.w = le32_to_cpu(bpl.tus.w);
848 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
849 nvmewqeq->iocb_cmpl = NULL;
850 nvmewqeq->context2 = ctxp;
852 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
853 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
855 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
856 if (rc == WQE_SUCCESS) {
858 * Okay to repost buffer here, but wait till cmpl
859 * before freeing ctxp and iocbq.
861 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
862 ctxp->rqb_buffer = 0;
863 atomic_inc(&nvmep->xmt_ls_rsp);
864 return 0;
866 /* Give back resources */
867 atomic_inc(&nvmep->xmt_ls_drop);
868 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
869 "6151 LS Drop IO x%x: Issue %d\n",
870 ctxp->oxid, rc);
872 lpfc_nlp_put(nvmewqeq->context1);
874 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
875 atomic_inc(&nvmep->xmt_ls_abort);
876 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
877 return -ENXIO;
880 static int
881 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
882 struct nvmefc_tgt_fcp_req *rsp)
884 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
885 struct lpfc_nvmet_rcv_ctx *ctxp =
886 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
887 struct lpfc_hba *phba = ctxp->phba;
888 struct lpfc_queue *wq;
889 struct lpfc_iocbq *nvmewqeq;
890 struct lpfc_sli_ring *pring;
891 unsigned long iflags;
892 int rc;
894 if (phba->pport->load_flag & FC_UNLOADING) {
895 rc = -ENODEV;
896 goto aerr;
899 if (phba->pport->load_flag & FC_UNLOADING) {
900 rc = -ENODEV;
901 goto aerr;
904 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
905 if (ctxp->ts_cmd_nvme) {
906 if (rsp->op == NVMET_FCOP_RSP)
907 ctxp->ts_nvme_status = ktime_get_ns();
908 else
909 ctxp->ts_nvme_data = ktime_get_ns();
911 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
912 int id = smp_processor_id();
913 ctxp->cpu = id;
914 if (id < LPFC_CHECK_CPU_CNT)
915 phba->cpucheck_xmt_io[id]++;
916 if (rsp->hwqid != id) {
917 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
918 "6705 CPU Check OP: "
919 "cpu %d expect %d\n",
920 id, rsp->hwqid);
921 ctxp->cpu = rsp->hwqid;
924 #endif
926 /* Sanity check */
927 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
928 (ctxp->state == LPFC_NVMET_STE_ABORT)) {
929 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
930 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
931 "6102 IO xri x%x aborted\n",
932 ctxp->oxid);
933 rc = -ENXIO;
934 goto aerr;
937 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
938 if (nvmewqeq == NULL) {
939 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
940 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
941 "6152 FCP Drop IO x%x: Prep\n",
942 ctxp->oxid);
943 rc = -ENXIO;
944 goto aerr;
947 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
948 nvmewqeq->iocb_cmpl = NULL;
949 nvmewqeq->context2 = ctxp;
950 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
951 ctxp->wqeq->hba_wqidx = rsp->hwqid;
953 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
954 ctxp->oxid, rsp->op, rsp->rsplen);
956 ctxp->flag |= LPFC_NVMET_IO_INP;
957 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
958 if (rc == WQE_SUCCESS) {
959 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
960 if (!ctxp->ts_cmd_nvme)
961 return 0;
962 if (rsp->op == NVMET_FCOP_RSP)
963 ctxp->ts_status_wqput = ktime_get_ns();
964 else
965 ctxp->ts_data_wqput = ktime_get_ns();
966 #endif
967 return 0;
970 if (rc == -EBUSY) {
972 * WQ was full, so queue nvmewqeq to be sent after
973 * WQE release CQE
975 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
976 wq = phba->sli4_hba.nvme_wq[rsp->hwqid];
977 pring = wq->pring;
978 spin_lock_irqsave(&pring->ring_lock, iflags);
979 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
980 wq->q_flag |= HBA_NVMET_WQFULL;
981 spin_unlock_irqrestore(&pring->ring_lock, iflags);
982 atomic_inc(&lpfc_nvmep->defer_wqfull);
983 return 0;
986 /* Give back resources */
987 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
988 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
989 "6153 FCP Drop IO x%x: Issue: %d\n",
990 ctxp->oxid, rc);
992 ctxp->wqeq->hba_wqidx = 0;
993 nvmewqeq->context2 = NULL;
994 nvmewqeq->context3 = NULL;
995 rc = -EBUSY;
996 aerr:
997 return rc;
1000 static void
1001 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1003 struct lpfc_nvmet_tgtport *tport = targetport->private;
1005 /* release any threads waiting for the unreg to complete */
1006 if (tport->phba->targetport)
1007 complete(tport->tport_unreg_cmp);
1010 static void
1011 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1012 struct nvmefc_tgt_fcp_req *req)
1014 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1015 struct lpfc_nvmet_rcv_ctx *ctxp =
1016 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1017 struct lpfc_hba *phba = ctxp->phba;
1018 struct lpfc_queue *wq;
1019 unsigned long flags;
1021 if (phba->pport->load_flag & FC_UNLOADING)
1022 return;
1024 if (phba->pport->load_flag & FC_UNLOADING)
1025 return;
1027 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1028 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1029 ctxp->oxid, ctxp->flag, ctxp->state);
1031 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1032 ctxp->oxid, ctxp->flag, ctxp->state);
1034 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1036 spin_lock_irqsave(&ctxp->ctxlock, flags);
1037 ctxp->state = LPFC_NVMET_STE_ABORT;
1039 /* Since iaab/iaar are NOT set, we need to check
1040 * if the firmware is in process of aborting IO
1042 if (ctxp->flag & LPFC_NVMET_XBUSY) {
1043 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1044 return;
1046 ctxp->flag |= LPFC_NVMET_ABORT_OP;
1048 if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1049 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1050 ctxp->oxid);
1051 wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx];
1052 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1053 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1054 return;
1057 /* An state of LPFC_NVMET_STE_RCV means we have just received
1058 * the NVME command and have not started processing it.
1059 * (by issuing any IO WQEs on this exchange yet)
1061 if (ctxp->state == LPFC_NVMET_STE_RCV)
1062 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1063 ctxp->oxid);
1064 else
1065 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1066 ctxp->oxid);
1067 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1070 static void
1071 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1072 struct nvmefc_tgt_fcp_req *rsp)
1074 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1075 struct lpfc_nvmet_rcv_ctx *ctxp =
1076 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1077 struct lpfc_hba *phba = ctxp->phba;
1078 unsigned long flags;
1079 bool aborting = false;
1081 if (ctxp->state != LPFC_NVMET_STE_DONE &&
1082 ctxp->state != LPFC_NVMET_STE_ABORT) {
1083 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1084 "6413 NVMET release bad state %d %d oxid x%x\n",
1085 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1088 spin_lock_irqsave(&ctxp->ctxlock, flags);
1089 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1090 (ctxp->flag & LPFC_NVMET_XBUSY)) {
1091 aborting = true;
1092 /* let the abort path do the real release */
1093 lpfc_nvmet_defer_release(phba, ctxp);
1095 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1097 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1098 ctxp->state, aborting);
1100 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1102 if (aborting)
1103 return;
1105 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1108 static void
1109 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1110 struct nvmefc_tgt_fcp_req *rsp)
1112 struct lpfc_nvmet_tgtport *tgtp;
1113 struct lpfc_nvmet_rcv_ctx *ctxp =
1114 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1115 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1116 struct lpfc_hba *phba = ctxp->phba;
1118 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1119 ctxp->oxid, ctxp->size, smp_processor_id());
1121 if (!nvmebuf) {
1122 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1123 "6425 Defer rcv: no buffer xri x%x: "
1124 "flg %x ste %x\n",
1125 ctxp->oxid, ctxp->flag, ctxp->state);
1126 return;
1129 tgtp = phba->targetport->private;
1130 if (tgtp)
1131 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1133 /* Free the nvmebuf since a new buffer already replaced it */
1134 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1137 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1138 .targetport_delete = lpfc_nvmet_targetport_delete,
1139 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1140 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1141 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1142 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1143 .defer_rcv = lpfc_nvmet_defer_rcv,
1145 .max_hw_queues = 1,
1146 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1147 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1148 .dma_boundary = 0xFFFFFFFF,
1150 /* optional features */
1151 .target_features = 0,
1152 /* sizes of additional private data for data structures */
1153 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1156 static void
1157 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1158 struct lpfc_nvmet_ctx_info *infop)
1160 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1161 unsigned long flags;
1163 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1164 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1165 &infop->nvmet_ctx_list, list) {
1166 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1167 list_del_init(&ctx_buf->list);
1168 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1170 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1171 ctx_buf->sglq->state = SGL_FREED;
1172 ctx_buf->sglq->ndlp = NULL;
1174 spin_lock(&phba->sli4_hba.sgl_list_lock);
1175 list_add_tail(&ctx_buf->sglq->list,
1176 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1177 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1179 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1180 kfree(ctx_buf->context);
1182 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1185 static void
1186 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1188 struct lpfc_nvmet_ctx_info *infop;
1189 int i, j;
1191 /* The first context list, MRQ 0 CPU 0 */
1192 infop = phba->sli4_hba.nvmet_ctx_info;
1193 if (!infop)
1194 return;
1196 /* Cycle the the entire CPU context list for every MRQ */
1197 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1198 for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
1199 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1200 infop++; /* next */
1203 kfree(phba->sli4_hba.nvmet_ctx_info);
1204 phba->sli4_hba.nvmet_ctx_info = NULL;
1207 static int
1208 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1210 struct lpfc_nvmet_ctxbuf *ctx_buf;
1211 struct lpfc_iocbq *nvmewqe;
1212 union lpfc_wqe128 *wqe;
1213 struct lpfc_nvmet_ctx_info *last_infop;
1214 struct lpfc_nvmet_ctx_info *infop;
1215 int i, j, idx;
1217 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1218 "6403 Allocate NVMET resources for %d XRIs\n",
1219 phba->sli4_hba.nvmet_xri_cnt);
1221 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1222 phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
1223 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1224 if (!phba->sli4_hba.nvmet_ctx_info) {
1225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1226 "6419 Failed allocate memory for "
1227 "nvmet context lists\n");
1228 return -ENOMEM;
1232 * Assuming X CPUs in the system, and Y MRQs, allocate some
1233 * lpfc_nvmet_ctx_info structures as follows:
1235 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1236 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1237 * ...
1238 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1240 * Each line represents a MRQ "silo" containing an entry for
1241 * every CPU.
1243 * MRQ X is initially assumed to be associated with CPU X, thus
1244 * contexts are initially distributed across all MRQs using
1245 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1246 * freed, the are freed to the MRQ silo based on the CPU number
1247 * of the IO completion. Thus a context that was allocated for MRQ A
1248 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1250 infop = phba->sli4_hba.nvmet_ctx_info;
1251 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1252 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1253 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1254 spin_lock_init(&infop->nvmet_ctx_list_lock);
1255 infop->nvmet_ctx_list_cnt = 0;
1256 infop++;
1261 * Setup the next CPU context info ptr for each MRQ.
1262 * MRQ 0 will cycle thru CPUs 0 - X separately from
1263 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1265 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1266 last_infop = lpfc_get_ctx_list(phba, 0, j);
1267 for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
1268 infop = lpfc_get_ctx_list(phba, i, j);
1269 infop->nvmet_ctx_next_cpu = last_infop;
1270 last_infop = infop;
1274 /* For all nvmet xris, allocate resources needed to process a
1275 * received command on a per xri basis.
1277 idx = 0;
1278 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1279 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1280 if (!ctx_buf) {
1281 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1282 "6404 Ran out of memory for NVMET\n");
1283 return -ENOMEM;
1286 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1287 GFP_KERNEL);
1288 if (!ctx_buf->context) {
1289 kfree(ctx_buf);
1290 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1291 "6405 Ran out of NVMET "
1292 "context memory\n");
1293 return -ENOMEM;
1295 ctx_buf->context->ctxbuf = ctx_buf;
1296 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1298 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1299 if (!ctx_buf->iocbq) {
1300 kfree(ctx_buf->context);
1301 kfree(ctx_buf);
1302 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1303 "6406 Ran out of NVMET iocb/WQEs\n");
1304 return -ENOMEM;
1306 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1307 nvmewqe = ctx_buf->iocbq;
1308 wqe = &nvmewqe->wqe;
1310 /* Initialize WQE */
1311 memset(wqe, 0, sizeof(union lpfc_wqe));
1313 ctx_buf->iocbq->context1 = NULL;
1314 spin_lock(&phba->sli4_hba.sgl_list_lock);
1315 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1316 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1317 if (!ctx_buf->sglq) {
1318 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1319 kfree(ctx_buf->context);
1320 kfree(ctx_buf);
1321 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1322 "6407 Ran out of NVMET XRIs\n");
1323 return -ENOMEM;
1327 * Add ctx to MRQidx context list. Our initial assumption
1328 * is MRQidx will be associated with CPUidx. This association
1329 * can change on the fly.
1331 infop = lpfc_get_ctx_list(phba, idx, idx);
1332 spin_lock(&infop->nvmet_ctx_list_lock);
1333 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1334 infop->nvmet_ctx_list_cnt++;
1335 spin_unlock(&infop->nvmet_ctx_list_lock);
1337 /* Spread ctx structures evenly across all MRQs */
1338 idx++;
1339 if (idx >= phba->cfg_nvmet_mrq)
1340 idx = 0;
1343 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1344 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1345 infop = lpfc_get_ctx_list(phba, i, j);
1346 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1347 "6408 TOTAL NVMET ctx for CPU %d "
1348 "MRQ %d: cnt %d nextcpu %p\n",
1349 i, j, infop->nvmet_ctx_list_cnt,
1350 infop->nvmet_ctx_next_cpu);
1353 return 0;
1357 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1359 struct lpfc_vport *vport = phba->pport;
1360 struct lpfc_nvmet_tgtport *tgtp;
1361 struct nvmet_fc_port_info pinfo;
1362 int error;
1364 if (phba->targetport)
1365 return 0;
1367 error = lpfc_nvmet_setup_io_context(phba);
1368 if (error)
1369 return error;
1371 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1372 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1373 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1374 pinfo.port_id = vport->fc_myDID;
1376 /* We need to tell the transport layer + 1 because it takes page
1377 * alignment into account. When space for the SGL is allocated we
1378 * allocate + 3, one for cmd, one for rsp and one for this alignment
1380 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1381 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
1382 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1384 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1385 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1386 &phba->pcidev->dev,
1387 &phba->targetport);
1388 #else
1389 error = -ENOENT;
1390 #endif
1391 if (error) {
1392 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1393 "6025 Cannot register NVME targetport x%x: "
1394 "portnm %llx nodenm %llx segs %d qs %d\n",
1395 error,
1396 pinfo.port_name, pinfo.node_name,
1397 lpfc_tgttemplate.max_sgl_segments,
1398 lpfc_tgttemplate.max_hw_queues);
1399 phba->targetport = NULL;
1400 phba->nvmet_support = 0;
1402 lpfc_nvmet_cleanup_io_context(phba);
1404 } else {
1405 tgtp = (struct lpfc_nvmet_tgtport *)
1406 phba->targetport->private;
1407 tgtp->phba = phba;
1409 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1410 "6026 Registered NVME "
1411 "targetport: %p, private %p "
1412 "portnm %llx nodenm %llx segs %d qs %d\n",
1413 phba->targetport, tgtp,
1414 pinfo.port_name, pinfo.node_name,
1415 lpfc_tgttemplate.max_sgl_segments,
1416 lpfc_tgttemplate.max_hw_queues);
1418 atomic_set(&tgtp->rcv_ls_req_in, 0);
1419 atomic_set(&tgtp->rcv_ls_req_out, 0);
1420 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1421 atomic_set(&tgtp->xmt_ls_abort, 0);
1422 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1423 atomic_set(&tgtp->xmt_ls_rsp, 0);
1424 atomic_set(&tgtp->xmt_ls_drop, 0);
1425 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1426 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1427 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1428 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1429 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1430 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1431 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1432 atomic_set(&tgtp->xmt_fcp_drop, 0);
1433 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1434 atomic_set(&tgtp->xmt_fcp_read, 0);
1435 atomic_set(&tgtp->xmt_fcp_write, 0);
1436 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1437 atomic_set(&tgtp->xmt_fcp_release, 0);
1438 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1439 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1440 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1441 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1442 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1443 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1444 atomic_set(&tgtp->xmt_fcp_abort, 0);
1445 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1446 atomic_set(&tgtp->xmt_abort_unsol, 0);
1447 atomic_set(&tgtp->xmt_abort_sol, 0);
1448 atomic_set(&tgtp->xmt_abort_rsp, 0);
1449 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1450 atomic_set(&tgtp->defer_ctx, 0);
1451 atomic_set(&tgtp->defer_fod, 0);
1452 atomic_set(&tgtp->defer_wqfull, 0);
1454 return error;
1458 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1460 struct lpfc_vport *vport = phba->pport;
1462 if (!phba->targetport)
1463 return 0;
1465 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1466 "6007 Update NVMET port %p did x%x\n",
1467 phba->targetport, vport->fc_myDID);
1469 phba->targetport->port_id = vport->fc_myDID;
1470 return 0;
1474 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1475 * @phba: pointer to lpfc hba data structure.
1476 * @axri: pointer to the nvmet xri abort wcqe structure.
1478 * This routine is invoked by the worker thread to process a SLI4 fast-path
1479 * NVMET aborted xri.
1481 void
1482 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1483 struct sli4_wcqe_xri_aborted *axri)
1485 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1486 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1487 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1488 struct lpfc_nvmet_tgtport *tgtp;
1489 struct lpfc_nodelist *ndlp;
1490 unsigned long iflag = 0;
1491 int rrq_empty = 0;
1492 bool released = false;
1494 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1495 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1497 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1498 return;
1500 if (phba->targetport) {
1501 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1502 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1505 spin_lock_irqsave(&phba->hbalock, iflag);
1506 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1507 list_for_each_entry_safe(ctxp, next_ctxp,
1508 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1509 list) {
1510 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1511 continue;
1513 /* Check if we already received a free context call
1514 * and we have completed processing an abort situation.
1516 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1517 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1518 list_del(&ctxp->list);
1519 released = true;
1521 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1522 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1524 rrq_empty = list_empty(&phba->active_rrq_list);
1525 spin_unlock_irqrestore(&phba->hbalock, iflag);
1526 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1527 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1528 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1529 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1530 lpfc_set_rrq_active(phba, ndlp,
1531 ctxp->ctxbuf->sglq->sli4_lxritag,
1532 rxid, 1);
1533 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1536 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1537 "6318 XB aborted oxid %x flg x%x (%x)\n",
1538 ctxp->oxid, ctxp->flag, released);
1539 if (released)
1540 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1542 if (rrq_empty)
1543 lpfc_worker_wake_up(phba);
1544 return;
1546 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1547 spin_unlock_irqrestore(&phba->hbalock, iflag);
1551 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1552 struct fc_frame_header *fc_hdr)
1555 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1556 struct lpfc_hba *phba = vport->phba;
1557 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1558 struct nvmefc_tgt_fcp_req *rsp;
1559 uint16_t xri;
1560 unsigned long iflag = 0;
1562 xri = be16_to_cpu(fc_hdr->fh_ox_id);
1564 spin_lock_irqsave(&phba->hbalock, iflag);
1565 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1566 list_for_each_entry_safe(ctxp, next_ctxp,
1567 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1568 list) {
1569 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1570 continue;
1572 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1573 spin_unlock_irqrestore(&phba->hbalock, iflag);
1575 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1576 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1577 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1579 lpfc_nvmeio_data(phba,
1580 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1581 xri, smp_processor_id(), 0);
1583 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1584 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1586 rsp = &ctxp->ctx.fcp_req;
1587 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1589 /* Respond with BA_ACC accordingly */
1590 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1591 return 0;
1593 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1594 spin_unlock_irqrestore(&phba->hbalock, iflag);
1596 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1597 xri, smp_processor_id(), 1);
1599 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1600 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1602 /* Respond with BA_RJT accordingly */
1603 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1604 #endif
1605 return 0;
1608 static void
1609 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1610 struct lpfc_nvmet_rcv_ctx *ctxp)
1612 struct lpfc_sli_ring *pring;
1613 struct lpfc_iocbq *nvmewqeq;
1614 struct lpfc_iocbq *next_nvmewqeq;
1615 unsigned long iflags;
1616 struct lpfc_wcqe_complete wcqe;
1617 struct lpfc_wcqe_complete *wcqep;
1619 pring = wq->pring;
1620 wcqep = &wcqe;
1622 /* Fake an ABORT error code back to cmpl routine */
1623 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1624 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1625 wcqep->parameter = IOERR_ABORT_REQUESTED;
1627 spin_lock_irqsave(&pring->ring_lock, iflags);
1628 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1629 &wq->wqfull_list, list) {
1630 if (ctxp) {
1631 /* Checking for a specific IO to flush */
1632 if (nvmewqeq->context2 == ctxp) {
1633 list_del(&nvmewqeq->list);
1634 spin_unlock_irqrestore(&pring->ring_lock,
1635 iflags);
1636 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1637 wcqep);
1638 return;
1640 continue;
1641 } else {
1642 /* Flush all IOs */
1643 list_del(&nvmewqeq->list);
1644 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1645 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1646 spin_lock_irqsave(&pring->ring_lock, iflags);
1649 if (!ctxp)
1650 wq->q_flag &= ~HBA_NVMET_WQFULL;
1651 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1654 void
1655 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1656 struct lpfc_queue *wq)
1658 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1659 struct lpfc_sli_ring *pring;
1660 struct lpfc_iocbq *nvmewqeq;
1661 unsigned long iflags;
1662 int rc;
1665 * Some WQE slots are available, so try to re-issue anything
1666 * on the WQ wqfull_list.
1668 pring = wq->pring;
1669 spin_lock_irqsave(&pring->ring_lock, iflags);
1670 while (!list_empty(&wq->wqfull_list)) {
1671 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1672 list);
1673 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1674 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
1675 spin_lock_irqsave(&pring->ring_lock, iflags);
1676 if (rc == -EBUSY) {
1677 /* WQ was full again, so put it back on the list */
1678 list_add(&nvmewqeq->list, &wq->wqfull_list);
1679 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1680 return;
1683 wq->q_flag &= ~HBA_NVMET_WQFULL;
1684 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1686 #endif
1689 void
1690 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1692 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1693 struct lpfc_nvmet_tgtport *tgtp;
1694 struct lpfc_queue *wq;
1695 uint32_t qidx;
1696 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1698 if (phba->nvmet_support == 0)
1699 return;
1700 if (phba->targetport) {
1701 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1702 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
1703 wq = phba->sli4_hba.nvme_wq[qidx];
1704 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1706 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1707 nvmet_fc_unregister_targetport(phba->targetport);
1708 wait_for_completion_timeout(&tport_unreg_cmp, 5);
1709 lpfc_nvmet_cleanup_io_context(phba);
1711 phba->targetport = NULL;
1712 #endif
1716 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1717 * @phba: pointer to lpfc hba data structure.
1718 * @pring: pointer to a SLI ring.
1719 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1721 * This routine is used for processing the WQE associated with a unsolicited
1722 * event. It first determines whether there is an existing ndlp that matches
1723 * the DID from the unsolicited WQE. If not, it will create a new one with
1724 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1725 * WQE is then used to invoke the proper routine and to set up proper state
1726 * of the discovery state machine.
1728 static void
1729 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1730 struct hbq_dmabuf *nvmebuf)
1732 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1733 struct lpfc_nvmet_tgtport *tgtp;
1734 struct fc_frame_header *fc_hdr;
1735 struct lpfc_nvmet_rcv_ctx *ctxp;
1736 uint32_t *payload;
1737 uint32_t size, oxid, sid, rc;
1739 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1740 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1742 if (!phba->targetport) {
1743 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1744 "6154 LS Drop IO x%x\n", oxid);
1745 oxid = 0;
1746 size = 0;
1747 sid = 0;
1748 ctxp = NULL;
1749 goto dropit;
1752 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1753 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1754 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1755 sid = sli4_sid_from_fc_hdr(fc_hdr);
1757 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1758 if (ctxp == NULL) {
1759 atomic_inc(&tgtp->rcv_ls_req_drop);
1760 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1761 "6155 LS Drop IO x%x: Alloc\n",
1762 oxid);
1763 dropit:
1764 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1765 "xri x%x sz %d from %06x\n",
1766 oxid, size, sid);
1767 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1768 return;
1770 ctxp->phba = phba;
1771 ctxp->size = size;
1772 ctxp->oxid = oxid;
1773 ctxp->sid = sid;
1774 ctxp->wqeq = NULL;
1775 ctxp->state = LPFC_NVMET_STE_LS_RCV;
1776 ctxp->entry_cnt = 1;
1777 ctxp->rqb_buffer = (void *)nvmebuf;
1779 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1780 oxid, size, sid);
1782 * The calling sequence should be:
1783 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1784 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1786 atomic_inc(&tgtp->rcv_ls_req_in);
1787 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1788 payload, size);
1790 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1791 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1792 "%08x %08x %08x\n", size, rc,
1793 *payload, *(payload+1), *(payload+2),
1794 *(payload+3), *(payload+4), *(payload+5));
1796 if (rc == 0) {
1797 atomic_inc(&tgtp->rcv_ls_req_out);
1798 return;
1801 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1802 oxid, size, sid);
1804 atomic_inc(&tgtp->rcv_ls_req_drop);
1805 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1806 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1807 ctxp->oxid, rc);
1809 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1810 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1812 atomic_inc(&tgtp->xmt_ls_abort);
1813 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1814 #endif
1817 static struct lpfc_nvmet_ctxbuf *
1818 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1819 struct lpfc_nvmet_ctx_info *current_infop)
1821 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1822 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1823 struct lpfc_nvmet_ctx_info *get_infop;
1824 int i;
1827 * The current_infop for the MRQ a NVME command IU was received
1828 * on is empty. Our goal is to replenish this MRQs context
1829 * list from a another CPUs.
1831 * First we need to pick a context list to start looking on.
1832 * nvmet_ctx_start_cpu has available context the last time
1833 * we needed to replenish this CPU where nvmet_ctx_next_cpu
1834 * is just the next sequential CPU for this MRQ.
1836 if (current_infop->nvmet_ctx_start_cpu)
1837 get_infop = current_infop->nvmet_ctx_start_cpu;
1838 else
1839 get_infop = current_infop->nvmet_ctx_next_cpu;
1841 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1842 if (get_infop == current_infop) {
1843 get_infop = get_infop->nvmet_ctx_next_cpu;
1844 continue;
1846 spin_lock(&get_infop->nvmet_ctx_list_lock);
1848 /* Just take the entire context list, if there are any */
1849 if (get_infop->nvmet_ctx_list_cnt) {
1850 list_splice_init(&get_infop->nvmet_ctx_list,
1851 &current_infop->nvmet_ctx_list);
1852 current_infop->nvmet_ctx_list_cnt =
1853 get_infop->nvmet_ctx_list_cnt - 1;
1854 get_infop->nvmet_ctx_list_cnt = 0;
1855 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1857 current_infop->nvmet_ctx_start_cpu = get_infop;
1858 list_remove_head(&current_infop->nvmet_ctx_list,
1859 ctx_buf, struct lpfc_nvmet_ctxbuf,
1860 list);
1861 return ctx_buf;
1864 /* Otherwise, move on to the next CPU for this MRQ */
1865 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1866 get_infop = get_infop->nvmet_ctx_next_cpu;
1869 #endif
1870 /* Nothing found, all contexts for the MRQ are in-flight */
1871 return NULL;
1875 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1876 * @phba: pointer to lpfc hba data structure.
1877 * @idx: relative index of MRQ vector
1878 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1880 * This routine is used for processing the WQE associated with a unsolicited
1881 * event. It first determines whether there is an existing ndlp that matches
1882 * the DID from the unsolicited WQE. If not, it will create a new one with
1883 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1884 * WQE is then used to invoke the proper routine and to set up proper state
1885 * of the discovery state machine.
1887 static void
1888 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1889 uint32_t idx,
1890 struct rqb_dmabuf *nvmebuf,
1891 uint64_t isr_timestamp)
1893 struct lpfc_nvmet_rcv_ctx *ctxp;
1894 struct lpfc_nvmet_tgtport *tgtp;
1895 struct fc_frame_header *fc_hdr;
1896 struct lpfc_nvmet_ctxbuf *ctx_buf;
1897 struct lpfc_nvmet_ctx_info *current_infop;
1898 uint32_t *payload;
1899 uint32_t size, oxid, sid, rc, qno;
1900 unsigned long iflag;
1901 int current_cpu;
1902 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1903 uint32_t id;
1904 #endif
1906 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
1907 return;
1909 ctx_buf = NULL;
1910 if (!nvmebuf || !phba->targetport) {
1911 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1912 "6157 NVMET FCP Drop IO\n");
1913 oxid = 0;
1914 size = 0;
1915 sid = 0;
1916 ctxp = NULL;
1917 goto dropit;
1921 * Get a pointer to the context list for this MRQ based on
1922 * the CPU this MRQ IRQ is associated with. If the CPU association
1923 * changes from our initial assumption, the context list could
1924 * be empty, thus it would need to be replenished with the
1925 * context list from another CPU for this MRQ.
1927 current_cpu = smp_processor_id();
1928 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
1929 spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
1930 if (current_infop->nvmet_ctx_list_cnt) {
1931 list_remove_head(&current_infop->nvmet_ctx_list,
1932 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1933 current_infop->nvmet_ctx_list_cnt--;
1934 } else {
1935 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
1937 spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
1939 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1940 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1941 size = nvmebuf->bytes_recv;
1943 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1944 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1945 id = smp_processor_id();
1946 if (id < LPFC_CHECK_CPU_CNT)
1947 phba->cpucheck_rcv_io[id]++;
1949 #endif
1951 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1952 oxid, size, smp_processor_id());
1954 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1956 if (!ctx_buf) {
1957 /* Queue this NVME IO to process later */
1958 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1959 list_add_tail(&nvmebuf->hbuf.list,
1960 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1961 phba->sli4_hba.nvmet_io_wait_cnt++;
1962 phba->sli4_hba.nvmet_io_wait_total++;
1963 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1964 iflag);
1966 /* Post a brand new DMA buffer to RQ */
1967 qno = nvmebuf->idx;
1968 lpfc_post_rq_buffer(
1969 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1970 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1972 atomic_inc(&tgtp->defer_ctx);
1973 return;
1976 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1977 sid = sli4_sid_from_fc_hdr(fc_hdr);
1979 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1980 if (ctxp->state != LPFC_NVMET_STE_FREE) {
1981 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1982 "6414 NVMET Context corrupt %d %d oxid x%x\n",
1983 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1985 ctxp->wqeq = NULL;
1986 ctxp->txrdy = NULL;
1987 ctxp->offset = 0;
1988 ctxp->phba = phba;
1989 ctxp->size = size;
1990 ctxp->oxid = oxid;
1991 ctxp->sid = sid;
1992 ctxp->idx = idx;
1993 ctxp->state = LPFC_NVMET_STE_RCV;
1994 ctxp->entry_cnt = 1;
1995 ctxp->flag = 0;
1996 ctxp->ctxbuf = ctx_buf;
1997 ctxp->rqb_buffer = (void *)nvmebuf;
1998 spin_lock_init(&ctxp->ctxlock);
2000 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2001 if (isr_timestamp) {
2002 ctxp->ts_isr_cmd = isr_timestamp;
2003 ctxp->ts_cmd_nvme = ktime_get_ns();
2004 ctxp->ts_nvme_data = 0;
2005 ctxp->ts_data_wqput = 0;
2006 ctxp->ts_isr_data = 0;
2007 ctxp->ts_data_nvme = 0;
2008 ctxp->ts_nvme_status = 0;
2009 ctxp->ts_status_wqput = 0;
2010 ctxp->ts_isr_status = 0;
2011 ctxp->ts_status_nvme = 0;
2012 } else {
2013 ctxp->ts_cmd_nvme = 0;
2015 #endif
2017 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2019 * The calling sequence should be:
2020 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
2021 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2022 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
2023 * the NVME command / FC header is stored, so we are free to repost
2024 * the buffer.
2026 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2027 payload, size);
2029 /* Process FCP command */
2030 if (rc == 0) {
2031 ctxp->rqb_buffer = NULL;
2032 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2033 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2034 return;
2037 /* Processing of FCP command is deferred */
2038 if (rc == -EOVERFLOW) {
2040 * Post a brand new DMA buffer to RQ and defer
2041 * freeing rcv buffer till .defer_rcv callback
2043 qno = nvmebuf->idx;
2044 lpfc_post_rq_buffer(
2045 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2046 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2048 lpfc_nvmeio_data(phba,
2049 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
2050 oxid, size, sid);
2051 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2052 atomic_inc(&tgtp->defer_fod);
2053 return;
2055 ctxp->rqb_buffer = nvmebuf;
2057 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2058 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2059 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2060 ctxp->oxid, rc,
2061 atomic_read(&tgtp->rcv_fcp_cmd_in),
2062 atomic_read(&tgtp->rcv_fcp_cmd_out),
2063 atomic_read(&tgtp->xmt_fcp_release));
2064 dropit:
2065 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2066 oxid, size, sid);
2067 if (oxid) {
2068 lpfc_nvmet_defer_release(phba, ctxp);
2069 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2070 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2071 return;
2074 if (ctx_buf)
2075 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
2077 if (nvmebuf)
2078 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2082 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2083 * @phba: pointer to lpfc hba data structure.
2084 * @pring: pointer to a SLI ring.
2085 * @nvmebuf: pointer to received nvme data structure.
2087 * This routine is used to process an unsolicited event received from a SLI
2088 * (Service Level Interface) ring. The actual processing of the data buffer
2089 * associated with the unsolicited event is done by invoking the routine
2090 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2091 * SLI RQ on which the unsolicited event was received.
2093 void
2094 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2095 struct lpfc_iocbq *piocb)
2097 struct lpfc_dmabuf *d_buf;
2098 struct hbq_dmabuf *nvmebuf;
2100 d_buf = piocb->context2;
2101 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2103 if (phba->nvmet_support == 0) {
2104 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2105 return;
2107 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2111 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2112 * @phba: pointer to lpfc hba data structure.
2113 * @idx: relative index of MRQ vector
2114 * @nvmebuf: pointer to received nvme data structure.
2116 * This routine is used to process an unsolicited event received from a SLI
2117 * (Service Level Interface) ring. The actual processing of the data buffer
2118 * associated with the unsolicited event is done by invoking the routine
2119 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2120 * SLI RQ on which the unsolicited event was received.
2122 void
2123 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2124 uint32_t idx,
2125 struct rqb_dmabuf *nvmebuf,
2126 uint64_t isr_timestamp)
2128 if (phba->nvmet_support == 0) {
2129 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2130 return;
2132 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
2133 isr_timestamp);
2137 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2138 * @phba: pointer to a host N_Port data structure.
2139 * @ctxp: Context info for NVME LS Request
2140 * @rspbuf: DMA buffer of NVME command.
2141 * @rspsize: size of the NVME command.
2143 * This routine is used for allocating a lpfc-WQE data structure from
2144 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2145 * passed into the routine for discovery state machine to issue an Extended
2146 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2147 * and preparation routine that is used by all the discovery state machine
2148 * routines and the NVME command-specific fields will be later set up by
2149 * the individual discovery machine routines after calling this routine
2150 * allocating and preparing a generic WQE data structure. It fills in the
2151 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2152 * payload and response payload (if expected). The reference count on the
2153 * ndlp is incremented by 1 and the reference to the ndlp is put into
2154 * context1 of the WQE data structure for this WQE to hold the ndlp
2155 * reference for the command's callback function to access later.
2157 * Return code
2158 * Pointer to the newly allocated/prepared nvme wqe data structure
2159 * NULL - when nvme wqe data structure allocation/preparation failed
2161 static struct lpfc_iocbq *
2162 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2163 struct lpfc_nvmet_rcv_ctx *ctxp,
2164 dma_addr_t rspbuf, uint16_t rspsize)
2166 struct lpfc_nodelist *ndlp;
2167 struct lpfc_iocbq *nvmewqe;
2168 union lpfc_wqe128 *wqe;
2170 if (!lpfc_is_link_up(phba)) {
2171 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2172 "6104 NVMET prep LS wqe: link err: "
2173 "NPORT x%x oxid:x%x ste %d\n",
2174 ctxp->sid, ctxp->oxid, ctxp->state);
2175 return NULL;
2178 /* Allocate buffer for command wqe */
2179 nvmewqe = lpfc_sli_get_iocbq(phba);
2180 if (nvmewqe == NULL) {
2181 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2182 "6105 NVMET prep LS wqe: No WQE: "
2183 "NPORT x%x oxid x%x ste %d\n",
2184 ctxp->sid, ctxp->oxid, ctxp->state);
2185 return NULL;
2188 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2189 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2190 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2191 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2192 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2193 "6106 NVMET prep LS wqe: No ndlp: "
2194 "NPORT x%x oxid x%x ste %d\n",
2195 ctxp->sid, ctxp->oxid, ctxp->state);
2196 goto nvme_wqe_free_wqeq_exit;
2198 ctxp->wqeq = nvmewqe;
2200 /* prevent preparing wqe with NULL ndlp reference */
2201 nvmewqe->context1 = lpfc_nlp_get(ndlp);
2202 if (nvmewqe->context1 == NULL)
2203 goto nvme_wqe_free_wqeq_exit;
2204 nvmewqe->context2 = ctxp;
2206 wqe = &nvmewqe->wqe;
2207 memset(wqe, 0, sizeof(union lpfc_wqe));
2209 /* Words 0 - 2 */
2210 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2211 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2212 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2213 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2215 /* Word 3 */
2217 /* Word 4 */
2219 /* Word 5 */
2220 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2221 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2222 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2223 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2224 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2226 /* Word 6 */
2227 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2228 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2229 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2231 /* Word 7 */
2232 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2233 CMD_XMIT_SEQUENCE64_WQE);
2234 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2235 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2236 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2238 /* Word 8 */
2239 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2241 /* Word 9 */
2242 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2243 /* Needs to be set by caller */
2244 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2246 /* Word 10 */
2247 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2248 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2249 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2250 LPFC_WQE_LENLOC_WORD12);
2251 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2253 /* Word 11 */
2254 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2255 LPFC_WQE_CQ_ID_DEFAULT);
2256 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2257 OTHER_COMMAND);
2259 /* Word 12 */
2260 wqe->xmit_sequence.xmit_len = rspsize;
2262 nvmewqe->retry = 1;
2263 nvmewqe->vport = phba->pport;
2264 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2265 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2267 /* Xmit NVMET response to remote NPORT <did> */
2268 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2269 "6039 Xmit NVMET LS response to remote "
2270 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2271 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2272 rspsize);
2273 return nvmewqe;
2275 nvme_wqe_free_wqeq_exit:
2276 nvmewqe->context2 = NULL;
2277 nvmewqe->context3 = NULL;
2278 lpfc_sli_release_iocbq(phba, nvmewqe);
2279 return NULL;
2283 static struct lpfc_iocbq *
2284 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2285 struct lpfc_nvmet_rcv_ctx *ctxp)
2287 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2288 struct lpfc_nvmet_tgtport *tgtp;
2289 struct sli4_sge *sgl;
2290 struct lpfc_nodelist *ndlp;
2291 struct lpfc_iocbq *nvmewqe;
2292 struct scatterlist *sgel;
2293 union lpfc_wqe128 *wqe;
2294 struct ulp_bde64 *bde;
2295 uint32_t *txrdy;
2296 dma_addr_t physaddr;
2297 int i, cnt;
2298 int do_pbde;
2299 int xc = 1;
2301 if (!lpfc_is_link_up(phba)) {
2302 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2303 "6107 NVMET prep FCP wqe: link err:"
2304 "NPORT x%x oxid x%x ste %d\n",
2305 ctxp->sid, ctxp->oxid, ctxp->state);
2306 return NULL;
2309 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2310 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2311 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2312 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2313 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2314 "6108 NVMET prep FCP wqe: no ndlp: "
2315 "NPORT x%x oxid x%x ste %d\n",
2316 ctxp->sid, ctxp->oxid, ctxp->state);
2317 return NULL;
2320 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2321 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2322 "6109 NVMET prep FCP wqe: seg cnt err: "
2323 "NPORT x%x oxid x%x ste %d cnt %d\n",
2324 ctxp->sid, ctxp->oxid, ctxp->state,
2325 phba->cfg_nvme_seg_cnt);
2326 return NULL;
2329 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2330 nvmewqe = ctxp->wqeq;
2331 if (nvmewqe == NULL) {
2332 /* Allocate buffer for command wqe */
2333 nvmewqe = ctxp->ctxbuf->iocbq;
2334 if (nvmewqe == NULL) {
2335 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2336 "6110 NVMET prep FCP wqe: No "
2337 "WQE: NPORT x%x oxid x%x ste %d\n",
2338 ctxp->sid, ctxp->oxid, ctxp->state);
2339 return NULL;
2341 ctxp->wqeq = nvmewqe;
2342 xc = 0; /* create new XRI */
2343 nvmewqe->sli4_lxritag = NO_XRI;
2344 nvmewqe->sli4_xritag = NO_XRI;
2347 /* Sanity check */
2348 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2349 (ctxp->entry_cnt == 1)) ||
2350 (ctxp->state == LPFC_NVMET_STE_DATA)) {
2351 wqe = &nvmewqe->wqe;
2352 } else {
2353 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2354 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2355 ctxp->state, ctxp->entry_cnt);
2356 return NULL;
2359 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2360 switch (rsp->op) {
2361 case NVMET_FCOP_READDATA:
2362 case NVMET_FCOP_READDATA_RSP:
2363 /* From the tsend template, initialize words 7 - 11 */
2364 memcpy(&wqe->words[7],
2365 &lpfc_tsend_cmd_template.words[7],
2366 sizeof(uint32_t) * 5);
2368 /* Words 0 - 2 : The first sg segment */
2369 sgel = &rsp->sg[0];
2370 physaddr = sg_dma_address(sgel);
2371 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2372 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2373 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2374 wqe->fcp_tsend.bde.addrHigh =
2375 cpu_to_le32(putPaddrHigh(physaddr));
2377 /* Word 3 */
2378 wqe->fcp_tsend.payload_offset_len = 0;
2380 /* Word 4 */
2381 wqe->fcp_tsend.relative_offset = ctxp->offset;
2383 /* Word 5 */
2384 wqe->fcp_tsend.reserved = 0;
2386 /* Word 6 */
2387 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2388 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2389 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2390 nvmewqe->sli4_xritag);
2392 /* Word 7 - set ar later */
2394 /* Word 8 */
2395 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2397 /* Word 9 */
2398 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2399 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2401 /* Word 10 - set wqes later, in template xc=1 */
2402 if (!xc)
2403 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2405 /* Word 11 - set sup, irsp, irsplen later */
2406 do_pbde = 0;
2408 /* Word 12 */
2409 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2411 /* Setup 2 SKIP SGEs */
2412 sgl->addr_hi = 0;
2413 sgl->addr_lo = 0;
2414 sgl->word2 = 0;
2415 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2416 sgl->word2 = cpu_to_le32(sgl->word2);
2417 sgl->sge_len = 0;
2418 sgl++;
2419 sgl->addr_hi = 0;
2420 sgl->addr_lo = 0;
2421 sgl->word2 = 0;
2422 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2423 sgl->word2 = cpu_to_le32(sgl->word2);
2424 sgl->sge_len = 0;
2425 sgl++;
2426 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2427 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2429 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2431 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2432 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2433 bf_set(wqe_sup,
2434 &wqe->fcp_tsend.wqe_com, 1);
2435 } else {
2436 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2437 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2438 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2439 ((rsp->rsplen >> 2) - 1));
2440 memcpy(&wqe->words[16], rsp->rspaddr,
2441 rsp->rsplen);
2443 } else {
2444 atomic_inc(&tgtp->xmt_fcp_read);
2446 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2447 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2449 break;
2451 case NVMET_FCOP_WRITEDATA:
2452 /* From the treceive template, initialize words 3 - 11 */
2453 memcpy(&wqe->words[3],
2454 &lpfc_treceive_cmd_template.words[3],
2455 sizeof(uint32_t) * 9);
2457 /* Words 0 - 2 : The first sg segment */
2458 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2459 GFP_KERNEL, &physaddr);
2460 if (!txrdy) {
2461 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2462 "6041 Bad txrdy buffer: oxid x%x\n",
2463 ctxp->oxid);
2464 return NULL;
2466 ctxp->txrdy = txrdy;
2467 ctxp->txrdy_phys = physaddr;
2468 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2469 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2470 wqe->fcp_treceive.bde.addrLow =
2471 cpu_to_le32(putPaddrLow(physaddr));
2472 wqe->fcp_treceive.bde.addrHigh =
2473 cpu_to_le32(putPaddrHigh(physaddr));
2475 /* Word 4 */
2476 wqe->fcp_treceive.relative_offset = ctxp->offset;
2478 /* Word 6 */
2479 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2480 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2481 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2482 nvmewqe->sli4_xritag);
2484 /* Word 7 */
2486 /* Word 8 */
2487 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2489 /* Word 9 */
2490 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2491 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2493 /* Word 10 - in template xc=1 */
2494 if (!xc)
2495 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2497 /* Word 11 - set pbde later */
2498 if (phba->cfg_enable_pbde) {
2499 do_pbde = 1;
2500 } else {
2501 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2502 do_pbde = 0;
2505 /* Word 12 */
2506 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2508 /* Setup 1 TXRDY and 1 SKIP SGE */
2509 txrdy[0] = 0;
2510 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2511 txrdy[2] = 0;
2513 sgl->addr_hi = putPaddrHigh(physaddr);
2514 sgl->addr_lo = putPaddrLow(physaddr);
2515 sgl->word2 = 0;
2516 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2517 sgl->word2 = cpu_to_le32(sgl->word2);
2518 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2519 sgl++;
2520 sgl->addr_hi = 0;
2521 sgl->addr_lo = 0;
2522 sgl->word2 = 0;
2523 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2524 sgl->word2 = cpu_to_le32(sgl->word2);
2525 sgl->sge_len = 0;
2526 sgl++;
2527 atomic_inc(&tgtp->xmt_fcp_write);
2528 break;
2530 case NVMET_FCOP_RSP:
2531 /* From the treceive template, initialize words 4 - 11 */
2532 memcpy(&wqe->words[4],
2533 &lpfc_trsp_cmd_template.words[4],
2534 sizeof(uint32_t) * 8);
2536 /* Words 0 - 2 */
2537 physaddr = rsp->rspdma;
2538 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2539 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2540 wqe->fcp_trsp.bde.addrLow =
2541 cpu_to_le32(putPaddrLow(physaddr));
2542 wqe->fcp_trsp.bde.addrHigh =
2543 cpu_to_le32(putPaddrHigh(physaddr));
2545 /* Word 3 */
2546 wqe->fcp_trsp.response_len = rsp->rsplen;
2548 /* Word 6 */
2549 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2550 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2551 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2552 nvmewqe->sli4_xritag);
2554 /* Word 7 */
2556 /* Word 8 */
2557 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2559 /* Word 9 */
2560 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2561 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2563 /* Word 10 */
2564 if (xc)
2565 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2567 /* Word 11 */
2568 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2569 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2570 /* Bad response - embed it */
2571 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2572 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2573 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2574 ((rsp->rsplen >> 2) - 1));
2575 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2577 do_pbde = 0;
2579 /* Word 12 */
2580 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2582 /* Use rspbuf, NOT sg list */
2583 rsp->sg_cnt = 0;
2584 sgl->word2 = 0;
2585 atomic_inc(&tgtp->xmt_fcp_rsp);
2586 break;
2588 default:
2589 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2590 "6064 Unknown Rsp Op %d\n",
2591 rsp->op);
2592 return NULL;
2595 nvmewqe->retry = 1;
2596 nvmewqe->vport = phba->pport;
2597 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2598 nvmewqe->context1 = ndlp;
2600 for (i = 0; i < rsp->sg_cnt; i++) {
2601 sgel = &rsp->sg[i];
2602 physaddr = sg_dma_address(sgel);
2603 cnt = sg_dma_len(sgel);
2604 sgl->addr_hi = putPaddrHigh(physaddr);
2605 sgl->addr_lo = putPaddrLow(physaddr);
2606 sgl->word2 = 0;
2607 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2608 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2609 if ((i+1) == rsp->sg_cnt)
2610 bf_set(lpfc_sli4_sge_last, sgl, 1);
2611 sgl->word2 = cpu_to_le32(sgl->word2);
2612 sgl->sge_len = cpu_to_le32(cnt);
2613 if (i == 0) {
2614 bde = (struct ulp_bde64 *)&wqe->words[13];
2615 if (do_pbde) {
2616 /* Words 13-15 (PBDE) */
2617 bde->addrLow = sgl->addr_lo;
2618 bde->addrHigh = sgl->addr_hi;
2619 bde->tus.f.bdeSize =
2620 le32_to_cpu(sgl->sge_len);
2621 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2622 bde->tus.w = cpu_to_le32(bde->tus.w);
2623 } else {
2624 memset(bde, 0, sizeof(struct ulp_bde64));
2627 sgl++;
2628 ctxp->offset += cnt;
2630 ctxp->state = LPFC_NVMET_STE_DATA;
2631 ctxp->entry_cnt++;
2632 return nvmewqe;
2636 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2637 * @phba: Pointer to HBA context object.
2638 * @cmdwqe: Pointer to driver command WQE object.
2639 * @wcqe: Pointer to driver response CQE object.
2641 * The function is called from SLI ring event handler with no
2642 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2643 * The function frees memory resources used for the NVME commands.
2645 static void
2646 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2647 struct lpfc_wcqe_complete *wcqe)
2649 struct lpfc_nvmet_rcv_ctx *ctxp;
2650 struct lpfc_nvmet_tgtport *tgtp;
2651 uint32_t status, result;
2652 unsigned long flags;
2653 bool released = false;
2655 ctxp = cmdwqe->context2;
2656 status = bf_get(lpfc_wcqe_c_status, wcqe);
2657 result = wcqe->parameter;
2659 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2660 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2661 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2663 ctxp->state = LPFC_NVMET_STE_DONE;
2665 /* Check if we already received a free context call
2666 * and we have completed processing an abort situation.
2668 spin_lock_irqsave(&ctxp->ctxlock, flags);
2669 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2670 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2671 list_del(&ctxp->list);
2672 released = true;
2674 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2675 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2676 atomic_inc(&tgtp->xmt_abort_rsp);
2678 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2679 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2680 "WCQE: %08x %08x %08x %08x\n",
2681 ctxp->oxid, ctxp->flag, released,
2682 wcqe->word0, wcqe->total_data_placed,
2683 result, wcqe->word3);
2685 cmdwqe->context2 = NULL;
2686 cmdwqe->context3 = NULL;
2688 * if transport has released ctx, then can reuse it. Otherwise,
2689 * will be recycled by transport release call.
2691 if (released)
2692 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2694 /* This is the iocbq for the abort, not the command */
2695 lpfc_sli_release_iocbq(phba, cmdwqe);
2697 /* Since iaab/iaar are NOT set, there is no work left.
2698 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2699 * should have been called already.
2704 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2705 * @phba: Pointer to HBA context object.
2706 * @cmdwqe: Pointer to driver command WQE object.
2707 * @wcqe: Pointer to driver response CQE object.
2709 * The function is called from SLI ring event handler with no
2710 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2711 * The function frees memory resources used for the NVME commands.
2713 static void
2714 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2715 struct lpfc_wcqe_complete *wcqe)
2717 struct lpfc_nvmet_rcv_ctx *ctxp;
2718 struct lpfc_nvmet_tgtport *tgtp;
2719 unsigned long flags;
2720 uint32_t status, result;
2721 bool released = false;
2723 ctxp = cmdwqe->context2;
2724 status = bf_get(lpfc_wcqe_c_status, wcqe);
2725 result = wcqe->parameter;
2727 if (!ctxp) {
2728 /* if context is clear, related io alrady complete */
2729 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2730 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2731 wcqe->word0, wcqe->total_data_placed,
2732 result, wcqe->word3);
2733 return;
2736 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2737 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2738 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2740 /* Sanity check */
2741 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2742 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2743 "6112 ABTS Wrong state:%d oxid x%x\n",
2744 ctxp->state, ctxp->oxid);
2747 /* Check if we already received a free context call
2748 * and we have completed processing an abort situation.
2750 ctxp->state = LPFC_NVMET_STE_DONE;
2751 spin_lock_irqsave(&ctxp->ctxlock, flags);
2752 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2753 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2754 list_del(&ctxp->list);
2755 released = true;
2757 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2758 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2759 atomic_inc(&tgtp->xmt_abort_rsp);
2761 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2762 "6316 ABTS cmpl xri x%x flg x%x (%x) "
2763 "WCQE: %08x %08x %08x %08x\n",
2764 ctxp->oxid, ctxp->flag, released,
2765 wcqe->word0, wcqe->total_data_placed,
2766 result, wcqe->word3);
2768 cmdwqe->context2 = NULL;
2769 cmdwqe->context3 = NULL;
2771 * if transport has released ctx, then can reuse it. Otherwise,
2772 * will be recycled by transport release call.
2774 if (released)
2775 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2777 /* Since iaab/iaar are NOT set, there is no work left.
2778 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2779 * should have been called already.
2784 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2785 * @phba: Pointer to HBA context object.
2786 * @cmdwqe: Pointer to driver command WQE object.
2787 * @wcqe: Pointer to driver response CQE object.
2789 * The function is called from SLI ring event handler with no
2790 * lock held. This function is the completion handler for NVME ABTS for LS cmds
2791 * The function frees memory resources used for the NVME commands.
2793 static void
2794 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2795 struct lpfc_wcqe_complete *wcqe)
2797 struct lpfc_nvmet_rcv_ctx *ctxp;
2798 struct lpfc_nvmet_tgtport *tgtp;
2799 uint32_t status, result;
2801 ctxp = cmdwqe->context2;
2802 status = bf_get(lpfc_wcqe_c_status, wcqe);
2803 result = wcqe->parameter;
2805 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2806 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2808 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2809 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2810 ctxp, wcqe->word0, wcqe->total_data_placed,
2811 result, wcqe->word3);
2813 if (!ctxp) {
2814 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2815 "6415 NVMET LS Abort No ctx: WCQE: "
2816 "%08x %08x %08x %08x\n",
2817 wcqe->word0, wcqe->total_data_placed,
2818 result, wcqe->word3);
2820 lpfc_sli_release_iocbq(phba, cmdwqe);
2821 return;
2824 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2825 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2826 "6416 NVMET LS abort cmpl state mismatch: "
2827 "oxid x%x: %d %d\n",
2828 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2831 cmdwqe->context2 = NULL;
2832 cmdwqe->context3 = NULL;
2833 lpfc_sli_release_iocbq(phba, cmdwqe);
2834 kfree(ctxp);
2837 static int
2838 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2839 struct lpfc_nvmet_rcv_ctx *ctxp,
2840 uint32_t sid, uint16_t xri)
2842 struct lpfc_nvmet_tgtport *tgtp;
2843 struct lpfc_iocbq *abts_wqeq;
2844 union lpfc_wqe128 *wqe_abts;
2845 struct lpfc_nodelist *ndlp;
2847 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2848 "6067 ABTS: sid %x xri x%x/x%x\n",
2849 sid, xri, ctxp->wqeq->sli4_xritag);
2851 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2853 ndlp = lpfc_findnode_did(phba->pport, sid);
2854 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2855 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2856 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2857 atomic_inc(&tgtp->xmt_abort_rsp_error);
2858 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2859 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2860 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2862 /* No failure to an ABTS request. */
2863 return 0;
2866 abts_wqeq = ctxp->wqeq;
2867 wqe_abts = &abts_wqeq->wqe;
2870 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2871 * that were initialized in lpfc_sli4_nvmet_alloc.
2873 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2875 /* Word 5 */
2876 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2877 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2878 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2879 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2880 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2882 /* Word 6 */
2883 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2884 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2885 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2886 abts_wqeq->sli4_xritag);
2888 /* Word 7 */
2889 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2890 CMD_XMIT_SEQUENCE64_WQE);
2891 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2892 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2893 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2895 /* Word 8 */
2896 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2898 /* Word 9 */
2899 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2900 /* Needs to be set by caller */
2901 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2903 /* Word 10 */
2904 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2905 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2906 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2907 LPFC_WQE_LENLOC_WORD12);
2908 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2909 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2911 /* Word 11 */
2912 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2913 LPFC_WQE_CQ_ID_DEFAULT);
2914 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2915 OTHER_COMMAND);
2917 abts_wqeq->vport = phba->pport;
2918 abts_wqeq->context1 = ndlp;
2919 abts_wqeq->context2 = ctxp;
2920 abts_wqeq->context3 = NULL;
2921 abts_wqeq->rsvd2 = 0;
2922 /* hba_wqidx should already be setup from command we are aborting */
2923 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2924 abts_wqeq->iocb.ulpLe = 1;
2926 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2927 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2928 xri, abts_wqeq->iotag);
2929 return 1;
2932 static int
2933 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2934 struct lpfc_nvmet_rcv_ctx *ctxp,
2935 uint32_t sid, uint16_t xri)
2937 struct lpfc_nvmet_tgtport *tgtp;
2938 struct lpfc_iocbq *abts_wqeq;
2939 union lpfc_wqe128 *abts_wqe;
2940 struct lpfc_nodelist *ndlp;
2941 unsigned long flags;
2942 int rc;
2944 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2945 if (!ctxp->wqeq) {
2946 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2947 ctxp->wqeq->hba_wqidx = 0;
2950 ndlp = lpfc_findnode_did(phba->pport, sid);
2951 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2952 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2953 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2954 atomic_inc(&tgtp->xmt_abort_rsp_error);
2955 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2956 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2957 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2959 /* No failure to an ABTS request. */
2960 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2961 return 0;
2964 /* Issue ABTS for this WQE based on iotag */
2965 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2966 if (!ctxp->abort_wqeq) {
2967 atomic_inc(&tgtp->xmt_abort_rsp_error);
2968 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2969 "6161 ABORT failed: No wqeqs: "
2970 "xri: x%x\n", ctxp->oxid);
2971 /* No failure to an ABTS request. */
2972 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2973 return 0;
2975 abts_wqeq = ctxp->abort_wqeq;
2976 abts_wqe = &abts_wqeq->wqe;
2977 ctxp->state = LPFC_NVMET_STE_ABORT;
2979 /* Announce entry to new IO submit field. */
2980 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2981 "6162 ABORT Request to rport DID x%06x "
2982 "for xri x%x x%x\n",
2983 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2985 /* If the hba is getting reset, this flag is set. It is
2986 * cleared when the reset is complete and rings reestablished.
2988 spin_lock_irqsave(&phba->hbalock, flags);
2989 /* driver queued commands are in process of being flushed */
2990 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2991 spin_unlock_irqrestore(&phba->hbalock, flags);
2992 atomic_inc(&tgtp->xmt_abort_rsp_error);
2993 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2994 "6163 Driver in reset cleanup - flushing "
2995 "NVME Req now. hba_flag x%x oxid x%x\n",
2996 phba->hba_flag, ctxp->oxid);
2997 lpfc_sli_release_iocbq(phba, abts_wqeq);
2998 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2999 return 0;
3002 /* Outstanding abort is in progress */
3003 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3004 spin_unlock_irqrestore(&phba->hbalock, flags);
3005 atomic_inc(&tgtp->xmt_abort_rsp_error);
3006 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3007 "6164 Outstanding NVME I/O Abort Request "
3008 "still pending on oxid x%x\n",
3009 ctxp->oxid);
3010 lpfc_sli_release_iocbq(phba, abts_wqeq);
3011 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3012 return 0;
3015 /* Ready - mark outstanding as aborted by driver. */
3016 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3018 /* WQEs are reused. Clear stale data and set key fields to
3019 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3021 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3023 /* word 3 */
3024 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3026 /* word 7 */
3027 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3028 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3030 /* word 8 - tell the FW to abort the IO associated with this
3031 * outstanding exchange ID.
3033 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3035 /* word 9 - this is the iotag for the abts_wqe completion. */
3036 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3037 abts_wqeq->iotag);
3039 /* word 10 */
3040 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3041 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3043 /* word 11 */
3044 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3045 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3046 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3048 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3049 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3050 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3051 abts_wqeq->iocb_cmpl = 0;
3052 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3053 abts_wqeq->context2 = ctxp;
3054 abts_wqeq->vport = phba->pport;
3055 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3056 spin_unlock_irqrestore(&phba->hbalock, flags);
3057 if (rc == WQE_SUCCESS) {
3058 atomic_inc(&tgtp->xmt_abort_sol);
3059 return 0;
3062 atomic_inc(&tgtp->xmt_abort_rsp_error);
3063 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3064 lpfc_sli_release_iocbq(phba, abts_wqeq);
3065 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3066 "6166 Failed ABORT issue_wqe with status x%x "
3067 "for oxid x%x.\n",
3068 rc, ctxp->oxid);
3069 return 1;
3073 static int
3074 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3075 struct lpfc_nvmet_rcv_ctx *ctxp,
3076 uint32_t sid, uint16_t xri)
3078 struct lpfc_nvmet_tgtport *tgtp;
3079 struct lpfc_iocbq *abts_wqeq;
3080 unsigned long flags;
3081 int rc;
3083 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3084 if (!ctxp->wqeq) {
3085 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3086 ctxp->wqeq->hba_wqidx = 0;
3089 if (ctxp->state == LPFC_NVMET_STE_FREE) {
3090 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3091 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3092 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3093 rc = WQE_BUSY;
3094 goto aerr;
3096 ctxp->state = LPFC_NVMET_STE_ABORT;
3097 ctxp->entry_cnt++;
3098 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3099 if (rc == 0)
3100 goto aerr;
3102 spin_lock_irqsave(&phba->hbalock, flags);
3103 abts_wqeq = ctxp->wqeq;
3104 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3105 abts_wqeq->iocb_cmpl = NULL;
3106 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3107 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3108 spin_unlock_irqrestore(&phba->hbalock, flags);
3109 if (rc == WQE_SUCCESS) {
3110 return 0;
3113 aerr:
3114 spin_lock_irqsave(&ctxp->ctxlock, flags);
3115 if (ctxp->flag & LPFC_NVMET_CTX_RLS)
3116 list_del(&ctxp->list);
3117 ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3118 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3120 atomic_inc(&tgtp->xmt_abort_rsp_error);
3121 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3122 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3123 ctxp->oxid, rc);
3124 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3125 return 1;
3128 static int
3129 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3130 struct lpfc_nvmet_rcv_ctx *ctxp,
3131 uint32_t sid, uint16_t xri)
3133 struct lpfc_nvmet_tgtport *tgtp;
3134 struct lpfc_iocbq *abts_wqeq;
3135 union lpfc_wqe128 *wqe_abts;
3136 unsigned long flags;
3137 int rc;
3139 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3140 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3141 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3142 ctxp->entry_cnt++;
3143 } else {
3144 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3145 "6418 NVMET LS abort state mismatch "
3146 "IO x%x: %d %d\n",
3147 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3148 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3151 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3152 if (!ctxp->wqeq) {
3153 /* Issue ABTS for this WQE based on iotag */
3154 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3155 if (!ctxp->wqeq) {
3156 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3157 "6068 Abort failed: No wqeqs: "
3158 "xri: x%x\n", xri);
3159 /* No failure to an ABTS request. */
3160 kfree(ctxp);
3161 return 0;
3164 abts_wqeq = ctxp->wqeq;
3165 wqe_abts = &abts_wqeq->wqe;
3167 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3168 rc = WQE_BUSY;
3169 goto out;
3172 spin_lock_irqsave(&phba->hbalock, flags);
3173 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3174 abts_wqeq->iocb_cmpl = 0;
3175 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3176 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
3177 spin_unlock_irqrestore(&phba->hbalock, flags);
3178 if (rc == WQE_SUCCESS) {
3179 atomic_inc(&tgtp->xmt_abort_unsol);
3180 return 0;
3182 out:
3183 atomic_inc(&tgtp->xmt_abort_rsp_error);
3184 abts_wqeq->context2 = NULL;
3185 abts_wqeq->context3 = NULL;
3186 lpfc_sli_release_iocbq(phba, abts_wqeq);
3187 kfree(ctxp);
3188 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3189 "6056 Failed to Issue ABTS. Status x%x\n", rc);
3190 return 0;