Linux 4.16.11
[linux/fpc-iii.git] / drivers / scsi / lpfc / lpfc_nvmet.c
blob8dbf5c9d51aad3e64ae424d8256124a21af8a585
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60 struct lpfc_nvmet_rcv_ctx *,
61 dma_addr_t rspbuf,
62 uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64 struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66 struct lpfc_nvmet_rcv_ctx *,
67 uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69 struct lpfc_nvmet_rcv_ctx *,
70 uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72 struct lpfc_nvmet_rcv_ctx *,
73 uint32_t, uint16_t);
75 void
76 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
78 unsigned long iflag;
80 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
81 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
82 ctxp->oxid, ctxp->flag);
84 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
85 if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
86 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
87 iflag);
88 return;
90 ctxp->flag |= LPFC_NVMET_CTX_RLS;
91 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
92 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
95 /**
96 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
97 * @phba: Pointer to HBA context object.
98 * @cmdwqe: Pointer to driver command WQE object.
99 * @wcqe: Pointer to driver response CQE object.
101 * The function is called from SLI ring event handler with no
102 * lock held. This function is the completion handler for NVME LS commands
103 * The function frees memory resources used for the NVME commands.
105 static void
106 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
107 struct lpfc_wcqe_complete *wcqe)
109 struct lpfc_nvmet_tgtport *tgtp;
110 struct nvmefc_tgt_ls_req *rsp;
111 struct lpfc_nvmet_rcv_ctx *ctxp;
112 uint32_t status, result;
114 status = bf_get(lpfc_wcqe_c_status, wcqe);
115 result = wcqe->parameter;
116 ctxp = cmdwqe->context2;
118 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
119 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
120 "6410 NVMET LS cmpl state mismatch IO x%x: "
121 "%d %d\n",
122 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
125 if (!phba->targetport)
126 goto out;
128 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
130 if (tgtp) {
131 if (status) {
132 atomic_inc(&tgtp->xmt_ls_rsp_error);
133 if (status == IOERR_ABORT_REQUESTED)
134 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
135 if (bf_get(lpfc_wcqe_c_xb, wcqe))
136 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
137 } else {
138 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
142 out:
143 rsp = &ctxp->ctx.ls_req;
145 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
146 ctxp->oxid, status, result);
148 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
149 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
150 status, result, ctxp->oxid);
152 lpfc_nlp_put(cmdwqe->context1);
153 cmdwqe->context2 = NULL;
154 cmdwqe->context3 = NULL;
155 lpfc_sli_release_iocbq(phba, cmdwqe);
156 rsp->done(rsp);
157 kfree(ctxp);
161 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
162 * @phba: HBA buffer is associated with
163 * @ctxp: context to clean up
164 * @mp: Buffer to free
166 * Description: Frees the given DMA buffer in the appropriate way given by
167 * reposting it to its associated RQ so it can be reused.
169 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
171 * Returns: None
173 void
174 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
176 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
177 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
178 struct lpfc_nvmet_tgtport *tgtp;
179 struct fc_frame_header *fc_hdr;
180 struct rqb_dmabuf *nvmebuf;
181 struct lpfc_nvmet_ctx_info *infop;
182 uint32_t *payload;
183 uint32_t size, oxid, sid, rc;
184 int cpu;
185 unsigned long iflag;
187 if (ctxp->txrdy) {
188 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
189 ctxp->txrdy_phys);
190 ctxp->txrdy = NULL;
191 ctxp->txrdy_phys = 0;
194 if (ctxp->state == LPFC_NVMET_STE_FREE) {
195 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
196 "6411 NVMET free, already free IO x%x: %d %d\n",
197 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
199 ctxp->state = LPFC_NVMET_STE_FREE;
201 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
202 if (phba->sli4_hba.nvmet_io_wait_cnt) {
203 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
204 nvmebuf, struct rqb_dmabuf,
205 hbuf.list);
206 phba->sli4_hba.nvmet_io_wait_cnt--;
207 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
208 iflag);
210 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
211 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
212 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
213 payload = (uint32_t *)(nvmebuf->dbuf.virt);
214 size = nvmebuf->bytes_recv;
215 sid = sli4_sid_from_fc_hdr(fc_hdr);
217 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
218 ctxp->wqeq = NULL;
219 ctxp->txrdy = NULL;
220 ctxp->offset = 0;
221 ctxp->phba = phba;
222 ctxp->size = size;
223 ctxp->oxid = oxid;
224 ctxp->sid = sid;
225 ctxp->state = LPFC_NVMET_STE_RCV;
226 ctxp->entry_cnt = 1;
227 ctxp->flag = 0;
228 ctxp->ctxbuf = ctx_buf;
229 ctxp->rqb_buffer = (void *)nvmebuf;
230 spin_lock_init(&ctxp->ctxlock);
232 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
233 if (ctxp->ts_cmd_nvme) {
234 ctxp->ts_cmd_nvme = ktime_get_ns();
235 ctxp->ts_nvme_data = 0;
236 ctxp->ts_data_wqput = 0;
237 ctxp->ts_isr_data = 0;
238 ctxp->ts_data_nvme = 0;
239 ctxp->ts_nvme_status = 0;
240 ctxp->ts_status_wqput = 0;
241 ctxp->ts_isr_status = 0;
242 ctxp->ts_status_nvme = 0;
244 #endif
245 atomic_inc(&tgtp->rcv_fcp_cmd_in);
247 * The calling sequence should be:
248 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
249 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
250 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
251 * the NVME command / FC header is stored.
252 * A buffer has already been reposted for this IO, so just free
253 * the nvmebuf.
255 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
256 payload, size);
258 /* Process FCP command */
259 if (rc == 0) {
260 atomic_inc(&tgtp->rcv_fcp_cmd_out);
261 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
262 return;
265 /* Processing of FCP command is deferred */
266 if (rc == -EOVERFLOW) {
267 lpfc_nvmeio_data(phba,
268 "NVMET RCV BUSY: xri x%x sz %d "
269 "from %06x\n",
270 oxid, size, sid);
271 /* defer repost rcv buffer till .defer_rcv callback */
272 ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
273 atomic_inc(&tgtp->rcv_fcp_cmd_out);
274 return;
276 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
277 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
278 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
279 ctxp->oxid, rc,
280 atomic_read(&tgtp->rcv_fcp_cmd_in),
281 atomic_read(&tgtp->rcv_fcp_cmd_out),
282 atomic_read(&tgtp->xmt_fcp_release));
284 lpfc_nvmet_defer_release(phba, ctxp);
285 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
286 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
287 return;
289 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
292 * Use the CPU context list, from the MRQ the IO was received on
293 * (ctxp->idx), to save context structure.
295 cpu = smp_processor_id();
296 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
297 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
298 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
299 infop->nvmet_ctx_list_cnt++;
300 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
301 #endif
304 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
305 static void
306 lpfc_nvmet_ktime(struct lpfc_hba *phba,
307 struct lpfc_nvmet_rcv_ctx *ctxp)
309 uint64_t seg1, seg2, seg3, seg4, seg5;
310 uint64_t seg6, seg7, seg8, seg9, seg10;
311 uint64_t segsum;
313 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
314 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
315 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
316 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
317 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
318 return;
320 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
321 return;
322 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
323 return;
324 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
325 return;
326 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
327 return;
328 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
329 return;
330 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
331 return;
332 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
333 return;
334 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
335 return;
336 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
337 return;
338 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
339 return;
341 * Segment 1 - Time from FCP command received by MSI-X ISR
342 * to FCP command is passed to NVME Layer.
343 * Segment 2 - Time from FCP command payload handed
344 * off to NVME Layer to Driver receives a Command op
345 * from NVME Layer.
346 * Segment 3 - Time from Driver receives a Command op
347 * from NVME Layer to Command is put on WQ.
348 * Segment 4 - Time from Driver WQ put is done
349 * to MSI-X ISR for Command cmpl.
350 * Segment 5 - Time from MSI-X ISR for Command cmpl to
351 * Command cmpl is passed to NVME Layer.
352 * Segment 6 - Time from Command cmpl is passed to NVME
353 * Layer to Driver receives a RSP op from NVME Layer.
354 * Segment 7 - Time from Driver receives a RSP op from
355 * NVME Layer to WQ put is done on TRSP FCP Status.
356 * Segment 8 - Time from Driver WQ put is done on TRSP
357 * FCP Status to MSI-X ISR for TRSP cmpl.
358 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
359 * TRSP cmpl is passed to NVME Layer.
360 * Segment 10 - Time from FCP command received by
361 * MSI-X ISR to command is completed on wire.
362 * (Segments 1 thru 8) for READDATA / WRITEDATA
363 * (Segments 1 thru 4) for READDATA_RSP
365 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
366 segsum = seg1;
368 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
369 if (segsum > seg2)
370 return;
371 seg2 -= segsum;
372 segsum += seg2;
374 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
375 if (segsum > seg3)
376 return;
377 seg3 -= segsum;
378 segsum += seg3;
380 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
381 if (segsum > seg4)
382 return;
383 seg4 -= segsum;
384 segsum += seg4;
386 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
387 if (segsum > seg5)
388 return;
389 seg5 -= segsum;
390 segsum += seg5;
393 /* For auto rsp commands seg6 thru seg10 will be 0 */
394 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
395 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
396 if (segsum > seg6)
397 return;
398 seg6 -= segsum;
399 segsum += seg6;
401 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
402 if (segsum > seg7)
403 return;
404 seg7 -= segsum;
405 segsum += seg7;
407 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
408 if (segsum > seg8)
409 return;
410 seg8 -= segsum;
411 segsum += seg8;
413 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
414 if (segsum > seg9)
415 return;
416 seg9 -= segsum;
417 segsum += seg9;
419 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
420 return;
421 seg10 = (ctxp->ts_isr_status -
422 ctxp->ts_isr_cmd);
423 } else {
424 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
425 return;
426 seg6 = 0;
427 seg7 = 0;
428 seg8 = 0;
429 seg9 = 0;
430 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
433 phba->ktime_seg1_total += seg1;
434 if (seg1 < phba->ktime_seg1_min)
435 phba->ktime_seg1_min = seg1;
436 else if (seg1 > phba->ktime_seg1_max)
437 phba->ktime_seg1_max = seg1;
439 phba->ktime_seg2_total += seg2;
440 if (seg2 < phba->ktime_seg2_min)
441 phba->ktime_seg2_min = seg2;
442 else if (seg2 > phba->ktime_seg2_max)
443 phba->ktime_seg2_max = seg2;
445 phba->ktime_seg3_total += seg3;
446 if (seg3 < phba->ktime_seg3_min)
447 phba->ktime_seg3_min = seg3;
448 else if (seg3 > phba->ktime_seg3_max)
449 phba->ktime_seg3_max = seg3;
451 phba->ktime_seg4_total += seg4;
452 if (seg4 < phba->ktime_seg4_min)
453 phba->ktime_seg4_min = seg4;
454 else if (seg4 > phba->ktime_seg4_max)
455 phba->ktime_seg4_max = seg4;
457 phba->ktime_seg5_total += seg5;
458 if (seg5 < phba->ktime_seg5_min)
459 phba->ktime_seg5_min = seg5;
460 else if (seg5 > phba->ktime_seg5_max)
461 phba->ktime_seg5_max = seg5;
463 phba->ktime_data_samples++;
464 if (!seg6)
465 goto out;
467 phba->ktime_seg6_total += seg6;
468 if (seg6 < phba->ktime_seg6_min)
469 phba->ktime_seg6_min = seg6;
470 else if (seg6 > phba->ktime_seg6_max)
471 phba->ktime_seg6_max = seg6;
473 phba->ktime_seg7_total += seg7;
474 if (seg7 < phba->ktime_seg7_min)
475 phba->ktime_seg7_min = seg7;
476 else if (seg7 > phba->ktime_seg7_max)
477 phba->ktime_seg7_max = seg7;
479 phba->ktime_seg8_total += seg8;
480 if (seg8 < phba->ktime_seg8_min)
481 phba->ktime_seg8_min = seg8;
482 else if (seg8 > phba->ktime_seg8_max)
483 phba->ktime_seg8_max = seg8;
485 phba->ktime_seg9_total += seg9;
486 if (seg9 < phba->ktime_seg9_min)
487 phba->ktime_seg9_min = seg9;
488 else if (seg9 > phba->ktime_seg9_max)
489 phba->ktime_seg9_max = seg9;
490 out:
491 phba->ktime_seg10_total += seg10;
492 if (seg10 < phba->ktime_seg10_min)
493 phba->ktime_seg10_min = seg10;
494 else if (seg10 > phba->ktime_seg10_max)
495 phba->ktime_seg10_max = seg10;
496 phba->ktime_status_samples++;
498 #endif
501 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
502 * @phba: Pointer to HBA context object.
503 * @cmdwqe: Pointer to driver command WQE object.
504 * @wcqe: Pointer to driver response CQE object.
506 * The function is called from SLI ring event handler with no
507 * lock held. This function is the completion handler for NVME FCP commands
508 * The function frees memory resources used for the NVME commands.
510 static void
511 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
512 struct lpfc_wcqe_complete *wcqe)
514 struct lpfc_nvmet_tgtport *tgtp;
515 struct nvmefc_tgt_fcp_req *rsp;
516 struct lpfc_nvmet_rcv_ctx *ctxp;
517 uint32_t status, result, op, start_clean, logerr;
518 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
519 uint32_t id;
520 #endif
522 ctxp = cmdwqe->context2;
523 ctxp->flag &= ~LPFC_NVMET_IO_INP;
525 rsp = &ctxp->ctx.fcp_req;
526 op = rsp->op;
528 status = bf_get(lpfc_wcqe_c_status, wcqe);
529 result = wcqe->parameter;
531 if (phba->targetport)
532 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
533 else
534 tgtp = NULL;
536 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
537 ctxp->oxid, op, status);
539 if (status) {
540 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
541 rsp->transferred_length = 0;
542 if (tgtp) {
543 atomic_inc(&tgtp->xmt_fcp_rsp_error);
544 if (status == IOERR_ABORT_REQUESTED)
545 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
548 logerr = LOG_NVME_IOERR;
550 /* pick up SLI4 exhange busy condition */
551 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
552 ctxp->flag |= LPFC_NVMET_XBUSY;
553 logerr |= LOG_NVME_ABTS;
554 if (tgtp)
555 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
557 } else {
558 ctxp->flag &= ~LPFC_NVMET_XBUSY;
561 lpfc_printf_log(phba, KERN_INFO, logerr,
562 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
563 ctxp->oxid, status, result, ctxp->flag);
565 } else {
566 rsp->fcp_error = NVME_SC_SUCCESS;
567 if (op == NVMET_FCOP_RSP)
568 rsp->transferred_length = rsp->rsplen;
569 else
570 rsp->transferred_length = rsp->transfer_length;
571 if (tgtp)
572 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
575 if ((op == NVMET_FCOP_READDATA_RSP) ||
576 (op == NVMET_FCOP_RSP)) {
577 /* Sanity check */
578 ctxp->state = LPFC_NVMET_STE_DONE;
579 ctxp->entry_cnt++;
581 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
582 if (ctxp->ts_cmd_nvme) {
583 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
584 ctxp->ts_isr_data =
585 cmdwqe->isr_timestamp;
586 ctxp->ts_data_nvme =
587 ktime_get_ns();
588 ctxp->ts_nvme_status =
589 ctxp->ts_data_nvme;
590 ctxp->ts_status_wqput =
591 ctxp->ts_data_nvme;
592 ctxp->ts_isr_status =
593 ctxp->ts_data_nvme;
594 ctxp->ts_status_nvme =
595 ctxp->ts_data_nvme;
596 } else {
597 ctxp->ts_isr_status =
598 cmdwqe->isr_timestamp;
599 ctxp->ts_status_nvme =
600 ktime_get_ns();
603 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
604 id = smp_processor_id();
605 if (ctxp->cpu != id)
606 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
607 "6703 CPU Check cmpl: "
608 "cpu %d expect %d\n",
609 id, ctxp->cpu);
610 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
611 phba->cpucheck_cmpl_io[id]++;
613 #endif
614 rsp->done(rsp);
615 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
616 if (ctxp->ts_cmd_nvme)
617 lpfc_nvmet_ktime(phba, ctxp);
618 #endif
619 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
620 } else {
621 ctxp->entry_cnt++;
622 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
623 memset(((char *)cmdwqe) + start_clean, 0,
624 (sizeof(struct lpfc_iocbq) - start_clean));
625 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
626 if (ctxp->ts_cmd_nvme) {
627 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
628 ctxp->ts_data_nvme = ktime_get_ns();
630 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
631 id = smp_processor_id();
632 if (ctxp->cpu != id)
633 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
634 "6704 CPU Check cmdcmpl: "
635 "cpu %d expect %d\n",
636 id, ctxp->cpu);
637 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
638 phba->cpucheck_ccmpl_io[id]++;
640 #endif
641 rsp->done(rsp);
645 static int
646 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
647 struct nvmefc_tgt_ls_req *rsp)
649 struct lpfc_nvmet_rcv_ctx *ctxp =
650 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
651 struct lpfc_hba *phba = ctxp->phba;
652 struct hbq_dmabuf *nvmebuf =
653 (struct hbq_dmabuf *)ctxp->rqb_buffer;
654 struct lpfc_iocbq *nvmewqeq;
655 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
656 struct lpfc_dmabuf dmabuf;
657 struct ulp_bde64 bpl;
658 int rc;
660 if (phba->pport->load_flag & FC_UNLOADING)
661 return -ENODEV;
663 if (phba->pport->load_flag & FC_UNLOADING)
664 return -ENODEV;
666 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
667 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
669 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
670 (ctxp->entry_cnt != 1)) {
671 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
672 "6412 NVMET LS rsp state mismatch "
673 "oxid x%x: %d %d\n",
674 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
676 ctxp->state = LPFC_NVMET_STE_LS_RSP;
677 ctxp->entry_cnt++;
679 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
680 rsp->rsplen);
681 if (nvmewqeq == NULL) {
682 atomic_inc(&nvmep->xmt_ls_drop);
683 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
684 "6150 LS Drop IO x%x: Prep\n",
685 ctxp->oxid);
686 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
687 atomic_inc(&nvmep->xmt_ls_abort);
688 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
689 ctxp->sid, ctxp->oxid);
690 return -ENOMEM;
693 /* Save numBdes for bpl2sgl */
694 nvmewqeq->rsvd2 = 1;
695 nvmewqeq->hba_wqidx = 0;
696 nvmewqeq->context3 = &dmabuf;
697 dmabuf.virt = &bpl;
698 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
699 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
700 bpl.tus.f.bdeSize = rsp->rsplen;
701 bpl.tus.f.bdeFlags = 0;
702 bpl.tus.w = le32_to_cpu(bpl.tus.w);
704 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
705 nvmewqeq->iocb_cmpl = NULL;
706 nvmewqeq->context2 = ctxp;
708 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
709 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
711 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
712 if (rc == WQE_SUCCESS) {
714 * Okay to repost buffer here, but wait till cmpl
715 * before freeing ctxp and iocbq.
717 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
718 ctxp->rqb_buffer = 0;
719 atomic_inc(&nvmep->xmt_ls_rsp);
720 return 0;
722 /* Give back resources */
723 atomic_inc(&nvmep->xmt_ls_drop);
724 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
725 "6151 LS Drop IO x%x: Issue %d\n",
726 ctxp->oxid, rc);
728 lpfc_nlp_put(nvmewqeq->context1);
730 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
731 atomic_inc(&nvmep->xmt_ls_abort);
732 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
733 return -ENXIO;
736 static int
737 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
738 struct nvmefc_tgt_fcp_req *rsp)
740 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
741 struct lpfc_nvmet_rcv_ctx *ctxp =
742 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
743 struct lpfc_hba *phba = ctxp->phba;
744 struct lpfc_iocbq *nvmewqeq;
745 int rc;
747 if (phba->pport->load_flag & FC_UNLOADING) {
748 rc = -ENODEV;
749 goto aerr;
752 if (phba->pport->load_flag & FC_UNLOADING) {
753 rc = -ENODEV;
754 goto aerr;
757 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
758 if (ctxp->ts_cmd_nvme) {
759 if (rsp->op == NVMET_FCOP_RSP)
760 ctxp->ts_nvme_status = ktime_get_ns();
761 else
762 ctxp->ts_nvme_data = ktime_get_ns();
764 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
765 int id = smp_processor_id();
766 ctxp->cpu = id;
767 if (id < LPFC_CHECK_CPU_CNT)
768 phba->cpucheck_xmt_io[id]++;
769 if (rsp->hwqid != id) {
770 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
771 "6705 CPU Check OP: "
772 "cpu %d expect %d\n",
773 id, rsp->hwqid);
774 ctxp->cpu = rsp->hwqid;
777 #endif
779 /* Sanity check */
780 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
781 (ctxp->state == LPFC_NVMET_STE_ABORT)) {
782 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
783 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
784 "6102 IO xri x%x aborted\n",
785 ctxp->oxid);
786 rc = -ENXIO;
787 goto aerr;
790 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
791 if (nvmewqeq == NULL) {
792 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
793 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
794 "6152 FCP Drop IO x%x: Prep\n",
795 ctxp->oxid);
796 rc = -ENXIO;
797 goto aerr;
800 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
801 nvmewqeq->iocb_cmpl = NULL;
802 nvmewqeq->context2 = ctxp;
803 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
804 ctxp->wqeq->hba_wqidx = rsp->hwqid;
806 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
807 ctxp->oxid, rsp->op, rsp->rsplen);
809 ctxp->flag |= LPFC_NVMET_IO_INP;
810 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
811 if (rc == WQE_SUCCESS) {
812 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
813 if (!ctxp->ts_cmd_nvme)
814 return 0;
815 if (rsp->op == NVMET_FCOP_RSP)
816 ctxp->ts_status_wqput = ktime_get_ns();
817 else
818 ctxp->ts_data_wqput = ktime_get_ns();
819 #endif
820 return 0;
823 /* Give back resources */
824 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
825 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
826 "6153 FCP Drop IO x%x: Issue: %d\n",
827 ctxp->oxid, rc);
829 ctxp->wqeq->hba_wqidx = 0;
830 nvmewqeq->context2 = NULL;
831 nvmewqeq->context3 = NULL;
832 rc = -EBUSY;
833 aerr:
834 return rc;
837 static void
838 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
840 struct lpfc_nvmet_tgtport *tport = targetport->private;
842 /* release any threads waiting for the unreg to complete */
843 complete(&tport->tport_unreg_done);
846 static void
847 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
848 struct nvmefc_tgt_fcp_req *req)
850 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
851 struct lpfc_nvmet_rcv_ctx *ctxp =
852 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
853 struct lpfc_hba *phba = ctxp->phba;
854 unsigned long flags;
856 if (phba->pport->load_flag & FC_UNLOADING)
857 return;
859 if (phba->pport->load_flag & FC_UNLOADING)
860 return;
862 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
863 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
864 ctxp->oxid, ctxp->flag, ctxp->state);
866 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
867 ctxp->oxid, ctxp->flag, ctxp->state);
869 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
871 spin_lock_irqsave(&ctxp->ctxlock, flags);
872 ctxp->state = LPFC_NVMET_STE_ABORT;
874 /* Since iaab/iaar are NOT set, we need to check
875 * if the firmware is in process of aborting IO
877 if (ctxp->flag & LPFC_NVMET_XBUSY) {
878 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
879 return;
881 ctxp->flag |= LPFC_NVMET_ABORT_OP;
883 /* An state of LPFC_NVMET_STE_RCV means we have just received
884 * the NVME command and have not started processing it.
885 * (by issuing any IO WQEs on this exchange yet)
887 if (ctxp->state == LPFC_NVMET_STE_RCV)
888 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
889 ctxp->oxid);
890 else
891 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
892 ctxp->oxid);
893 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
896 static void
897 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
898 struct nvmefc_tgt_fcp_req *rsp)
900 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
901 struct lpfc_nvmet_rcv_ctx *ctxp =
902 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
903 struct lpfc_hba *phba = ctxp->phba;
904 unsigned long flags;
905 bool aborting = false;
907 if (ctxp->state != LPFC_NVMET_STE_DONE &&
908 ctxp->state != LPFC_NVMET_STE_ABORT) {
909 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
910 "6413 NVMET release bad state %d %d oxid x%x\n",
911 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
914 spin_lock_irqsave(&ctxp->ctxlock, flags);
915 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
916 (ctxp->flag & LPFC_NVMET_XBUSY)) {
917 aborting = true;
918 /* let the abort path do the real release */
919 lpfc_nvmet_defer_release(phba, ctxp);
921 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
923 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
924 ctxp->state, aborting);
926 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
928 if (aborting)
929 return;
931 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
934 static void
935 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
936 struct nvmefc_tgt_fcp_req *rsp)
938 struct lpfc_nvmet_tgtport *tgtp;
939 struct lpfc_nvmet_rcv_ctx *ctxp =
940 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
941 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
942 struct lpfc_hba *phba = ctxp->phba;
944 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
945 ctxp->oxid, ctxp->size, smp_processor_id());
947 tgtp = phba->targetport->private;
948 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
949 if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
950 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
951 else
952 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
953 ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
956 static struct nvmet_fc_target_template lpfc_tgttemplate = {
957 .targetport_delete = lpfc_nvmet_targetport_delete,
958 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
959 .fcp_op = lpfc_nvmet_xmt_fcp_op,
960 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
961 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
962 .defer_rcv = lpfc_nvmet_defer_rcv,
964 .max_hw_queues = 1,
965 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
966 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
967 .dma_boundary = 0xFFFFFFFF,
969 /* optional features */
970 .target_features = 0,
971 /* sizes of additional private data for data structures */
972 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
975 static void
976 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
977 struct lpfc_nvmet_ctx_info *infop)
979 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
980 unsigned long flags;
982 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
983 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
984 &infop->nvmet_ctx_list, list) {
985 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
986 list_del_init(&ctx_buf->list);
987 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
989 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
990 ctx_buf->sglq->state = SGL_FREED;
991 ctx_buf->sglq->ndlp = NULL;
993 spin_lock(&phba->sli4_hba.sgl_list_lock);
994 list_add_tail(&ctx_buf->sglq->list,
995 &phba->sli4_hba.lpfc_nvmet_sgl_list);
996 spin_unlock(&phba->sli4_hba.sgl_list_lock);
998 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
999 kfree(ctx_buf->context);
1001 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1004 static void
1005 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1007 struct lpfc_nvmet_ctx_info *infop;
1008 int i, j;
1010 /* The first context list, MRQ 0 CPU 0 */
1011 infop = phba->sli4_hba.nvmet_ctx_info;
1012 if (!infop)
1013 return;
1015 /* Cycle the the entire CPU context list for every MRQ */
1016 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1017 for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
1018 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1019 infop++; /* next */
1022 kfree(phba->sli4_hba.nvmet_ctx_info);
1023 phba->sli4_hba.nvmet_ctx_info = NULL;
1026 static int
1027 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1029 struct lpfc_nvmet_ctxbuf *ctx_buf;
1030 struct lpfc_iocbq *nvmewqe;
1031 union lpfc_wqe128 *wqe;
1032 struct lpfc_nvmet_ctx_info *last_infop;
1033 struct lpfc_nvmet_ctx_info *infop;
1034 int i, j, idx;
1036 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1037 "6403 Allocate NVMET resources for %d XRIs\n",
1038 phba->sli4_hba.nvmet_xri_cnt);
1040 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1041 phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
1042 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1043 if (!phba->sli4_hba.nvmet_ctx_info) {
1044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1045 "6419 Failed allocate memory for "
1046 "nvmet context lists\n");
1047 return -ENOMEM;
1051 * Assuming X CPUs in the system, and Y MRQs, allocate some
1052 * lpfc_nvmet_ctx_info structures as follows:
1054 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1055 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1056 * ...
1057 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1059 * Each line represents a MRQ "silo" containing an entry for
1060 * every CPU.
1062 * MRQ X is initially assumed to be associated with CPU X, thus
1063 * contexts are initially distributed across all MRQs using
1064 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1065 * freed, the are freed to the MRQ silo based on the CPU number
1066 * of the IO completion. Thus a context that was allocated for MRQ A
1067 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1069 infop = phba->sli4_hba.nvmet_ctx_info;
1070 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1071 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1072 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1073 spin_lock_init(&infop->nvmet_ctx_list_lock);
1074 infop->nvmet_ctx_list_cnt = 0;
1075 infop++;
1080 * Setup the next CPU context info ptr for each MRQ.
1081 * MRQ 0 will cycle thru CPUs 0 - X separately from
1082 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1084 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1085 last_infop = lpfc_get_ctx_list(phba, 0, j);
1086 for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
1087 infop = lpfc_get_ctx_list(phba, i, j);
1088 infop->nvmet_ctx_next_cpu = last_infop;
1089 last_infop = infop;
1093 /* For all nvmet xris, allocate resources needed to process a
1094 * received command on a per xri basis.
1096 idx = 0;
1097 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1098 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1099 if (!ctx_buf) {
1100 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1101 "6404 Ran out of memory for NVMET\n");
1102 return -ENOMEM;
1105 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1106 GFP_KERNEL);
1107 if (!ctx_buf->context) {
1108 kfree(ctx_buf);
1109 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1110 "6405 Ran out of NVMET "
1111 "context memory\n");
1112 return -ENOMEM;
1114 ctx_buf->context->ctxbuf = ctx_buf;
1115 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1117 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1118 if (!ctx_buf->iocbq) {
1119 kfree(ctx_buf->context);
1120 kfree(ctx_buf);
1121 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1122 "6406 Ran out of NVMET iocb/WQEs\n");
1123 return -ENOMEM;
1125 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1126 nvmewqe = ctx_buf->iocbq;
1127 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1128 /* Initialize WQE */
1129 memset(wqe, 0, sizeof(union lpfc_wqe));
1130 /* Word 7 */
1131 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
1132 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
1133 /* Word 10 */
1134 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1135 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
1136 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
1138 ctx_buf->iocbq->context1 = NULL;
1139 spin_lock(&phba->sli4_hba.sgl_list_lock);
1140 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1141 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1142 if (!ctx_buf->sglq) {
1143 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1144 kfree(ctx_buf->context);
1145 kfree(ctx_buf);
1146 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1147 "6407 Ran out of NVMET XRIs\n");
1148 return -ENOMEM;
1152 * Add ctx to MRQidx context list. Our initial assumption
1153 * is MRQidx will be associated with CPUidx. This association
1154 * can change on the fly.
1156 infop = lpfc_get_ctx_list(phba, idx, idx);
1157 spin_lock(&infop->nvmet_ctx_list_lock);
1158 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1159 infop->nvmet_ctx_list_cnt++;
1160 spin_unlock(&infop->nvmet_ctx_list_lock);
1162 /* Spread ctx structures evenly across all MRQs */
1163 idx++;
1164 if (idx >= phba->cfg_nvmet_mrq)
1165 idx = 0;
1168 infop = phba->sli4_hba.nvmet_ctx_info;
1169 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1170 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1171 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1172 "6408 TOTAL NVMET ctx for CPU %d "
1173 "MRQ %d: cnt %d nextcpu %p\n",
1174 i, j, infop->nvmet_ctx_list_cnt,
1175 infop->nvmet_ctx_next_cpu);
1176 infop++;
1179 return 0;
1183 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1185 struct lpfc_vport *vport = phba->pport;
1186 struct lpfc_nvmet_tgtport *tgtp;
1187 struct nvmet_fc_port_info pinfo;
1188 int error;
1190 if (phba->targetport)
1191 return 0;
1193 error = lpfc_nvmet_setup_io_context(phba);
1194 if (error)
1195 return error;
1197 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1198 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1199 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1200 pinfo.port_id = vport->fc_myDID;
1202 /* Limit to LPFC_MAX_NVME_SEG_CNT.
1203 * For now need + 1 to get around NVME transport logic.
1205 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
1206 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1207 "6400 Reducing sg segment cnt to %d\n",
1208 LPFC_MAX_NVME_SEG_CNT);
1209 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
1210 } else {
1211 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
1213 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1214 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
1215 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1217 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1218 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1219 &phba->pcidev->dev,
1220 &phba->targetport);
1221 #else
1222 error = -ENOENT;
1223 #endif
1224 if (error) {
1225 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1226 "6025 Cannot register NVME targetport x%x: "
1227 "portnm %llx nodenm %llx segs %d qs %d\n",
1228 error,
1229 pinfo.port_name, pinfo.node_name,
1230 lpfc_tgttemplate.max_sgl_segments,
1231 lpfc_tgttemplate.max_hw_queues);
1232 phba->targetport = NULL;
1233 phba->nvmet_support = 0;
1235 lpfc_nvmet_cleanup_io_context(phba);
1237 } else {
1238 tgtp = (struct lpfc_nvmet_tgtport *)
1239 phba->targetport->private;
1240 tgtp->phba = phba;
1242 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1243 "6026 Registered NVME "
1244 "targetport: %p, private %p "
1245 "portnm %llx nodenm %llx segs %d qs %d\n",
1246 phba->targetport, tgtp,
1247 pinfo.port_name, pinfo.node_name,
1248 lpfc_tgttemplate.max_sgl_segments,
1249 lpfc_tgttemplate.max_hw_queues);
1251 atomic_set(&tgtp->rcv_ls_req_in, 0);
1252 atomic_set(&tgtp->rcv_ls_req_out, 0);
1253 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1254 atomic_set(&tgtp->xmt_ls_abort, 0);
1255 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1256 atomic_set(&tgtp->xmt_ls_rsp, 0);
1257 atomic_set(&tgtp->xmt_ls_drop, 0);
1258 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1259 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1260 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1261 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1262 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1263 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1264 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1265 atomic_set(&tgtp->xmt_fcp_drop, 0);
1266 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1267 atomic_set(&tgtp->xmt_fcp_read, 0);
1268 atomic_set(&tgtp->xmt_fcp_write, 0);
1269 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1270 atomic_set(&tgtp->xmt_fcp_release, 0);
1271 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1272 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1273 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1274 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1275 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1276 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1277 atomic_set(&tgtp->xmt_fcp_abort, 0);
1278 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1279 atomic_set(&tgtp->xmt_abort_unsol, 0);
1280 atomic_set(&tgtp->xmt_abort_sol, 0);
1281 atomic_set(&tgtp->xmt_abort_rsp, 0);
1282 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1284 return error;
1288 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1290 struct lpfc_vport *vport = phba->pport;
1292 if (!phba->targetport)
1293 return 0;
1295 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1296 "6007 Update NVMET port %p did x%x\n",
1297 phba->targetport, vport->fc_myDID);
1299 phba->targetport->port_id = vport->fc_myDID;
1300 return 0;
1304 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1305 * @phba: pointer to lpfc hba data structure.
1306 * @axri: pointer to the nvmet xri abort wcqe structure.
1308 * This routine is invoked by the worker thread to process a SLI4 fast-path
1309 * NVMET aborted xri.
1311 void
1312 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1313 struct sli4_wcqe_xri_aborted *axri)
1315 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1316 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1317 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1318 struct lpfc_nvmet_tgtport *tgtp;
1319 struct lpfc_nodelist *ndlp;
1320 unsigned long iflag = 0;
1321 int rrq_empty = 0;
1322 bool released = false;
1324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1325 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1327 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1328 return;
1330 if (phba->targetport) {
1331 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1332 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1335 spin_lock_irqsave(&phba->hbalock, iflag);
1336 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1337 list_for_each_entry_safe(ctxp, next_ctxp,
1338 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1339 list) {
1340 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1341 continue;
1343 /* Check if we already received a free context call
1344 * and we have completed processing an abort situation.
1346 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1347 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1348 list_del(&ctxp->list);
1349 released = true;
1351 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1352 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1354 rrq_empty = list_empty(&phba->active_rrq_list);
1355 spin_unlock_irqrestore(&phba->hbalock, iflag);
1356 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1357 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1358 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1359 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1360 lpfc_set_rrq_active(phba, ndlp,
1361 ctxp->ctxbuf->sglq->sli4_lxritag,
1362 rxid, 1);
1363 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1366 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1367 "6318 XB aborted oxid %x flg x%x (%x)\n",
1368 ctxp->oxid, ctxp->flag, released);
1369 if (released)
1370 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1372 if (rrq_empty)
1373 lpfc_worker_wake_up(phba);
1374 return;
1376 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1377 spin_unlock_irqrestore(&phba->hbalock, iflag);
1381 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1382 struct fc_frame_header *fc_hdr)
1385 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1386 struct lpfc_hba *phba = vport->phba;
1387 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1388 struct nvmefc_tgt_fcp_req *rsp;
1389 uint16_t xri;
1390 unsigned long iflag = 0;
1392 xri = be16_to_cpu(fc_hdr->fh_ox_id);
1394 spin_lock_irqsave(&phba->hbalock, iflag);
1395 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1396 list_for_each_entry_safe(ctxp, next_ctxp,
1397 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1398 list) {
1399 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1400 continue;
1402 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1403 spin_unlock_irqrestore(&phba->hbalock, iflag);
1405 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1406 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1407 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1409 lpfc_nvmeio_data(phba,
1410 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1411 xri, smp_processor_id(), 0);
1413 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1414 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1416 rsp = &ctxp->ctx.fcp_req;
1417 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1419 /* Respond with BA_ACC accordingly */
1420 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1421 return 0;
1423 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1424 spin_unlock_irqrestore(&phba->hbalock, iflag);
1426 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1427 xri, smp_processor_id(), 1);
1429 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1430 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1432 /* Respond with BA_RJT accordingly */
1433 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1434 #endif
1435 return 0;
1438 void
1439 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1441 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1442 struct lpfc_nvmet_tgtport *tgtp;
1444 if (phba->nvmet_support == 0)
1445 return;
1446 if (phba->targetport) {
1447 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1448 init_completion(&tgtp->tport_unreg_done);
1449 nvmet_fc_unregister_targetport(phba->targetport);
1450 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1451 lpfc_nvmet_cleanup_io_context(phba);
1453 phba->targetport = NULL;
1454 #endif
1458 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1459 * @phba: pointer to lpfc hba data structure.
1460 * @pring: pointer to a SLI ring.
1461 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1463 * This routine is used for processing the WQE associated with a unsolicited
1464 * event. It first determines whether there is an existing ndlp that matches
1465 * the DID from the unsolicited WQE. If not, it will create a new one with
1466 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1467 * WQE is then used to invoke the proper routine and to set up proper state
1468 * of the discovery state machine.
1470 static void
1471 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1472 struct hbq_dmabuf *nvmebuf)
1474 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1475 struct lpfc_nvmet_tgtport *tgtp;
1476 struct fc_frame_header *fc_hdr;
1477 struct lpfc_nvmet_rcv_ctx *ctxp;
1478 uint32_t *payload;
1479 uint32_t size, oxid, sid, rc;
1481 if (!nvmebuf || !phba->targetport) {
1482 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1483 "6154 LS Drop IO\n");
1484 oxid = 0;
1485 size = 0;
1486 sid = 0;
1487 ctxp = NULL;
1488 goto dropit;
1491 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1492 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1493 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1494 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1495 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1496 sid = sli4_sid_from_fc_hdr(fc_hdr);
1498 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1499 if (ctxp == NULL) {
1500 atomic_inc(&tgtp->rcv_ls_req_drop);
1501 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1502 "6155 LS Drop IO x%x: Alloc\n",
1503 oxid);
1504 dropit:
1505 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1506 "xri x%x sz %d from %06x\n",
1507 oxid, size, sid);
1508 if (nvmebuf)
1509 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1510 return;
1512 ctxp->phba = phba;
1513 ctxp->size = size;
1514 ctxp->oxid = oxid;
1515 ctxp->sid = sid;
1516 ctxp->wqeq = NULL;
1517 ctxp->state = LPFC_NVMET_STE_LS_RCV;
1518 ctxp->entry_cnt = 1;
1519 ctxp->rqb_buffer = (void *)nvmebuf;
1521 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1522 oxid, size, sid);
1524 * The calling sequence should be:
1525 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1526 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1528 atomic_inc(&tgtp->rcv_ls_req_in);
1529 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1530 payload, size);
1532 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1533 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1534 "%08x %08x %08x\n", size, rc,
1535 *payload, *(payload+1), *(payload+2),
1536 *(payload+3), *(payload+4), *(payload+5));
1538 if (rc == 0) {
1539 atomic_inc(&tgtp->rcv_ls_req_out);
1540 return;
1543 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1544 oxid, size, sid);
1546 atomic_inc(&tgtp->rcv_ls_req_drop);
1547 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1548 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1549 ctxp->oxid, rc);
1551 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1552 if (nvmebuf)
1553 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1555 atomic_inc(&tgtp->xmt_ls_abort);
1556 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1557 #endif
1560 static struct lpfc_nvmet_ctxbuf *
1561 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1562 struct lpfc_nvmet_ctx_info *current_infop)
1564 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1565 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1566 struct lpfc_nvmet_ctx_info *get_infop;
1567 int i;
1570 * The current_infop for the MRQ a NVME command IU was received
1571 * on is empty. Our goal is to replenish this MRQs context
1572 * list from a another CPUs.
1574 * First we need to pick a context list to start looking on.
1575 * nvmet_ctx_start_cpu has available context the last time
1576 * we needed to replenish this CPU where nvmet_ctx_next_cpu
1577 * is just the next sequential CPU for this MRQ.
1579 if (current_infop->nvmet_ctx_start_cpu)
1580 get_infop = current_infop->nvmet_ctx_start_cpu;
1581 else
1582 get_infop = current_infop->nvmet_ctx_next_cpu;
1584 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1585 if (get_infop == current_infop) {
1586 get_infop = get_infop->nvmet_ctx_next_cpu;
1587 continue;
1589 spin_lock(&get_infop->nvmet_ctx_list_lock);
1591 /* Just take the entire context list, if there are any */
1592 if (get_infop->nvmet_ctx_list_cnt) {
1593 list_splice_init(&get_infop->nvmet_ctx_list,
1594 &current_infop->nvmet_ctx_list);
1595 current_infop->nvmet_ctx_list_cnt =
1596 get_infop->nvmet_ctx_list_cnt - 1;
1597 get_infop->nvmet_ctx_list_cnt = 0;
1598 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1600 current_infop->nvmet_ctx_start_cpu = get_infop;
1601 list_remove_head(&current_infop->nvmet_ctx_list,
1602 ctx_buf, struct lpfc_nvmet_ctxbuf,
1603 list);
1604 return ctx_buf;
1607 /* Otherwise, move on to the next CPU for this MRQ */
1608 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1609 get_infop = get_infop->nvmet_ctx_next_cpu;
1612 #endif
1613 /* Nothing found, all contexts for the MRQ are in-flight */
1614 return NULL;
1618 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1619 * @phba: pointer to lpfc hba data structure.
1620 * @idx: relative index of MRQ vector
1621 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1623 * This routine is used for processing the WQE associated with a unsolicited
1624 * event. It first determines whether there is an existing ndlp that matches
1625 * the DID from the unsolicited WQE. If not, it will create a new one with
1626 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1627 * WQE is then used to invoke the proper routine and to set up proper state
1628 * of the discovery state machine.
1630 static void
1631 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1632 uint32_t idx,
1633 struct rqb_dmabuf *nvmebuf,
1634 uint64_t isr_timestamp)
1636 struct lpfc_nvmet_rcv_ctx *ctxp;
1637 struct lpfc_nvmet_tgtport *tgtp;
1638 struct fc_frame_header *fc_hdr;
1639 struct lpfc_nvmet_ctxbuf *ctx_buf;
1640 struct lpfc_nvmet_ctx_info *current_infop;
1641 uint32_t *payload;
1642 uint32_t size, oxid, sid, rc, qno;
1643 unsigned long iflag;
1644 int current_cpu;
1645 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1646 uint32_t id;
1647 #endif
1649 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
1650 return;
1652 ctx_buf = NULL;
1653 if (!nvmebuf || !phba->targetport) {
1654 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1655 "6157 NVMET FCP Drop IO\n");
1656 oxid = 0;
1657 size = 0;
1658 sid = 0;
1659 ctxp = NULL;
1660 goto dropit;
1664 * Get a pointer to the context list for this MRQ based on
1665 * the CPU this MRQ IRQ is associated with. If the CPU association
1666 * changes from our initial assumption, the context list could
1667 * be empty, thus it would need to be replenished with the
1668 * context list from another CPU for this MRQ.
1670 current_cpu = smp_processor_id();
1671 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
1672 spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
1673 if (current_infop->nvmet_ctx_list_cnt) {
1674 list_remove_head(&current_infop->nvmet_ctx_list,
1675 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1676 current_infop->nvmet_ctx_list_cnt--;
1677 } else {
1678 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
1680 spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
1682 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1683 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1684 size = nvmebuf->bytes_recv;
1686 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1687 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1688 id = smp_processor_id();
1689 if (id < LPFC_CHECK_CPU_CNT)
1690 phba->cpucheck_rcv_io[id]++;
1692 #endif
1694 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1695 oxid, size, smp_processor_id());
1697 if (!ctx_buf) {
1698 /* Queue this NVME IO to process later */
1699 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1700 list_add_tail(&nvmebuf->hbuf.list,
1701 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1702 phba->sli4_hba.nvmet_io_wait_cnt++;
1703 phba->sli4_hba.nvmet_io_wait_total++;
1704 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1705 iflag);
1707 /* Post a brand new DMA buffer to RQ */
1708 qno = nvmebuf->idx;
1709 lpfc_post_rq_buffer(
1710 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1711 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1712 return;
1715 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1716 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1717 sid = sli4_sid_from_fc_hdr(fc_hdr);
1719 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1720 if (ctxp->state != LPFC_NVMET_STE_FREE) {
1721 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1722 "6414 NVMET Context corrupt %d %d oxid x%x\n",
1723 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1725 ctxp->wqeq = NULL;
1726 ctxp->txrdy = NULL;
1727 ctxp->offset = 0;
1728 ctxp->phba = phba;
1729 ctxp->size = size;
1730 ctxp->oxid = oxid;
1731 ctxp->sid = sid;
1732 ctxp->idx = idx;
1733 ctxp->state = LPFC_NVMET_STE_RCV;
1734 ctxp->entry_cnt = 1;
1735 ctxp->flag = 0;
1736 ctxp->ctxbuf = ctx_buf;
1737 ctxp->rqb_buffer = (void *)nvmebuf;
1738 spin_lock_init(&ctxp->ctxlock);
1740 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1741 if (isr_timestamp) {
1742 ctxp->ts_isr_cmd = isr_timestamp;
1743 ctxp->ts_cmd_nvme = ktime_get_ns();
1744 ctxp->ts_nvme_data = 0;
1745 ctxp->ts_data_wqput = 0;
1746 ctxp->ts_isr_data = 0;
1747 ctxp->ts_data_nvme = 0;
1748 ctxp->ts_nvme_status = 0;
1749 ctxp->ts_status_wqput = 0;
1750 ctxp->ts_isr_status = 0;
1751 ctxp->ts_status_nvme = 0;
1752 } else {
1753 ctxp->ts_cmd_nvme = 0;
1755 #endif
1757 atomic_inc(&tgtp->rcv_fcp_cmd_in);
1759 * The calling sequence should be:
1760 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1761 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1762 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
1763 * the NVME command / FC header is stored, so we are free to repost
1764 * the buffer.
1766 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1767 payload, size);
1769 /* Process FCP command */
1770 if (rc == 0) {
1771 ctxp->rqb_buffer = NULL;
1772 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1773 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1774 return;
1777 /* Processing of FCP command is deferred */
1778 if (rc == -EOVERFLOW) {
1779 lpfc_nvmeio_data(phba,
1780 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
1781 oxid, size, sid);
1782 /* defer reposting rcv buffer till .defer_rcv callback */
1783 ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
1784 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1785 return;
1787 ctxp->rqb_buffer = nvmebuf;
1789 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1790 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1791 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1792 ctxp->oxid, rc,
1793 atomic_read(&tgtp->rcv_fcp_cmd_in),
1794 atomic_read(&tgtp->rcv_fcp_cmd_out),
1795 atomic_read(&tgtp->xmt_fcp_release));
1796 dropit:
1797 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1798 oxid, size, sid);
1799 if (oxid) {
1800 lpfc_nvmet_defer_release(phba, ctxp);
1801 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
1802 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1803 return;
1806 if (ctx_buf)
1807 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
1809 if (nvmebuf)
1810 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1814 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1815 * @phba: pointer to lpfc hba data structure.
1816 * @pring: pointer to a SLI ring.
1817 * @nvmebuf: pointer to received nvme data structure.
1819 * This routine is used to process an unsolicited event received from a SLI
1820 * (Service Level Interface) ring. The actual processing of the data buffer
1821 * associated with the unsolicited event is done by invoking the routine
1822 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1823 * SLI RQ on which the unsolicited event was received.
1825 void
1826 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1827 struct lpfc_iocbq *piocb)
1829 struct lpfc_dmabuf *d_buf;
1830 struct hbq_dmabuf *nvmebuf;
1832 d_buf = piocb->context2;
1833 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1835 if (phba->nvmet_support == 0) {
1836 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1837 return;
1839 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1843 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1844 * @phba: pointer to lpfc hba data structure.
1845 * @idx: relative index of MRQ vector
1846 * @nvmebuf: pointer to received nvme data structure.
1848 * This routine is used to process an unsolicited event received from a SLI
1849 * (Service Level Interface) ring. The actual processing of the data buffer
1850 * associated with the unsolicited event is done by invoking the routine
1851 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1852 * SLI RQ on which the unsolicited event was received.
1854 void
1855 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1856 uint32_t idx,
1857 struct rqb_dmabuf *nvmebuf,
1858 uint64_t isr_timestamp)
1860 if (phba->nvmet_support == 0) {
1861 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
1862 return;
1864 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
1865 isr_timestamp);
1869 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1870 * @phba: pointer to a host N_Port data structure.
1871 * @ctxp: Context info for NVME LS Request
1872 * @rspbuf: DMA buffer of NVME command.
1873 * @rspsize: size of the NVME command.
1875 * This routine is used for allocating a lpfc-WQE data structure from
1876 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1877 * passed into the routine for discovery state machine to issue an Extended
1878 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1879 * and preparation routine that is used by all the discovery state machine
1880 * routines and the NVME command-specific fields will be later set up by
1881 * the individual discovery machine routines after calling this routine
1882 * allocating and preparing a generic WQE data structure. It fills in the
1883 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1884 * payload and response payload (if expected). The reference count on the
1885 * ndlp is incremented by 1 and the reference to the ndlp is put into
1886 * context1 of the WQE data structure for this WQE to hold the ndlp
1887 * reference for the command's callback function to access later.
1889 * Return code
1890 * Pointer to the newly allocated/prepared nvme wqe data structure
1891 * NULL - when nvme wqe data structure allocation/preparation failed
1893 static struct lpfc_iocbq *
1894 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1895 struct lpfc_nvmet_rcv_ctx *ctxp,
1896 dma_addr_t rspbuf, uint16_t rspsize)
1898 struct lpfc_nodelist *ndlp;
1899 struct lpfc_iocbq *nvmewqe;
1900 union lpfc_wqe *wqe;
1902 if (!lpfc_is_link_up(phba)) {
1903 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1904 "6104 NVMET prep LS wqe: link err: "
1905 "NPORT x%x oxid:x%x ste %d\n",
1906 ctxp->sid, ctxp->oxid, ctxp->state);
1907 return NULL;
1910 /* Allocate buffer for command wqe */
1911 nvmewqe = lpfc_sli_get_iocbq(phba);
1912 if (nvmewqe == NULL) {
1913 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1914 "6105 NVMET prep LS wqe: No WQE: "
1915 "NPORT x%x oxid x%x ste %d\n",
1916 ctxp->sid, ctxp->oxid, ctxp->state);
1917 return NULL;
1920 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1921 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1922 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1923 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1924 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1925 "6106 NVMET prep LS wqe: No ndlp: "
1926 "NPORT x%x oxid x%x ste %d\n",
1927 ctxp->sid, ctxp->oxid, ctxp->state);
1928 goto nvme_wqe_free_wqeq_exit;
1930 ctxp->wqeq = nvmewqe;
1932 /* prevent preparing wqe with NULL ndlp reference */
1933 nvmewqe->context1 = lpfc_nlp_get(ndlp);
1934 if (nvmewqe->context1 == NULL)
1935 goto nvme_wqe_free_wqeq_exit;
1936 nvmewqe->context2 = ctxp;
1938 wqe = &nvmewqe->wqe;
1939 memset(wqe, 0, sizeof(union lpfc_wqe));
1941 /* Words 0 - 2 */
1942 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1943 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1944 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1945 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1947 /* Word 3 */
1949 /* Word 4 */
1951 /* Word 5 */
1952 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1953 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1954 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1955 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1956 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1958 /* Word 6 */
1959 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1960 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1961 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1963 /* Word 7 */
1964 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1965 CMD_XMIT_SEQUENCE64_WQE);
1966 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1967 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1968 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1970 /* Word 8 */
1971 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1973 /* Word 9 */
1974 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1975 /* Needs to be set by caller */
1976 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1978 /* Word 10 */
1979 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1980 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1981 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1982 LPFC_WQE_LENLOC_WORD12);
1983 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1985 /* Word 11 */
1986 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1987 LPFC_WQE_CQ_ID_DEFAULT);
1988 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1989 OTHER_COMMAND);
1991 /* Word 12 */
1992 wqe->xmit_sequence.xmit_len = rspsize;
1994 nvmewqe->retry = 1;
1995 nvmewqe->vport = phba->pport;
1996 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1997 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1999 /* Xmit NVMET response to remote NPORT <did> */
2000 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2001 "6039 Xmit NVMET LS response to remote "
2002 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2003 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2004 rspsize);
2005 return nvmewqe;
2007 nvme_wqe_free_wqeq_exit:
2008 nvmewqe->context2 = NULL;
2009 nvmewqe->context3 = NULL;
2010 lpfc_sli_release_iocbq(phba, nvmewqe);
2011 return NULL;
2015 static struct lpfc_iocbq *
2016 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2017 struct lpfc_nvmet_rcv_ctx *ctxp)
2019 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2020 struct lpfc_nvmet_tgtport *tgtp;
2021 struct sli4_sge *sgl;
2022 struct lpfc_nodelist *ndlp;
2023 struct lpfc_iocbq *nvmewqe;
2024 struct scatterlist *sgel;
2025 union lpfc_wqe128 *wqe;
2026 uint32_t *txrdy;
2027 dma_addr_t physaddr;
2028 int i, cnt;
2029 int xc = 1;
2031 if (!lpfc_is_link_up(phba)) {
2032 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2033 "6107 NVMET prep FCP wqe: link err:"
2034 "NPORT x%x oxid x%x ste %d\n",
2035 ctxp->sid, ctxp->oxid, ctxp->state);
2036 return NULL;
2039 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2040 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2041 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2042 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2043 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2044 "6108 NVMET prep FCP wqe: no ndlp: "
2045 "NPORT x%x oxid x%x ste %d\n",
2046 ctxp->sid, ctxp->oxid, ctxp->state);
2047 return NULL;
2050 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2051 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2052 "6109 NVMET prep FCP wqe: seg cnt err: "
2053 "NPORT x%x oxid x%x ste %d cnt %d\n",
2054 ctxp->sid, ctxp->oxid, ctxp->state,
2055 phba->cfg_nvme_seg_cnt);
2056 return NULL;
2059 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2060 nvmewqe = ctxp->wqeq;
2061 if (nvmewqe == NULL) {
2062 /* Allocate buffer for command wqe */
2063 nvmewqe = ctxp->ctxbuf->iocbq;
2064 if (nvmewqe == NULL) {
2065 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2066 "6110 NVMET prep FCP wqe: No "
2067 "WQE: NPORT x%x oxid x%x ste %d\n",
2068 ctxp->sid, ctxp->oxid, ctxp->state);
2069 return NULL;
2071 ctxp->wqeq = nvmewqe;
2072 xc = 0; /* create new XRI */
2073 nvmewqe->sli4_lxritag = NO_XRI;
2074 nvmewqe->sli4_xritag = NO_XRI;
2077 /* Sanity check */
2078 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2079 (ctxp->entry_cnt == 1)) ||
2080 (ctxp->state == LPFC_NVMET_STE_DATA)) {
2081 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
2082 } else {
2083 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2084 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2085 ctxp->state, ctxp->entry_cnt);
2086 return NULL;
2089 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2090 switch (rsp->op) {
2091 case NVMET_FCOP_READDATA:
2092 case NVMET_FCOP_READDATA_RSP:
2093 /* Words 0 - 2 : The first sg segment */
2094 sgel = &rsp->sg[0];
2095 physaddr = sg_dma_address(sgel);
2096 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2097 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2098 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2099 wqe->fcp_tsend.bde.addrHigh =
2100 cpu_to_le32(putPaddrHigh(physaddr));
2102 /* Word 3 */
2103 wqe->fcp_tsend.payload_offset_len = 0;
2105 /* Word 4 */
2106 wqe->fcp_tsend.relative_offset = ctxp->offset;
2108 /* Word 5 */
2110 /* Word 6 */
2111 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2112 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2113 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2114 nvmewqe->sli4_xritag);
2116 /* Word 7 */
2117 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1);
2118 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
2120 /* Word 8 */
2121 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2123 /* Word 9 */
2124 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2125 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2127 /* Word 10 */
2128 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
2129 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
2130 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
2131 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
2132 LPFC_WQE_LENLOC_WORD12);
2133 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
2134 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
2135 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
2136 if (phba->cfg_nvme_oas)
2137 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
2139 /* Word 11 */
2140 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
2141 LPFC_WQE_CQ_ID_DEFAULT);
2142 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
2143 FCP_COMMAND_TSEND);
2145 /* Word 12 */
2146 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2148 /* Setup 2 SKIP SGEs */
2149 sgl->addr_hi = 0;
2150 sgl->addr_lo = 0;
2151 sgl->word2 = 0;
2152 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2153 sgl->word2 = cpu_to_le32(sgl->word2);
2154 sgl->sge_len = 0;
2155 sgl++;
2156 sgl->addr_hi = 0;
2157 sgl->addr_lo = 0;
2158 sgl->word2 = 0;
2159 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2160 sgl->word2 = cpu_to_le32(sgl->word2);
2161 sgl->sge_len = 0;
2162 sgl++;
2163 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2164 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2165 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
2166 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
2167 (rsp->rsplen == 12)) {
2168 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
2169 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
2170 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
2171 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
2172 } else {
2173 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2174 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2175 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2176 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2177 ((rsp->rsplen >> 2) - 1));
2178 memcpy(&wqe->words[16], rsp->rspaddr,
2179 rsp->rsplen);
2181 } else {
2182 atomic_inc(&tgtp->xmt_fcp_read);
2184 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2185 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
2186 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
2187 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2188 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
2190 break;
2192 case NVMET_FCOP_WRITEDATA:
2193 /* Words 0 - 2 : The first sg segment */
2194 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2195 GFP_KERNEL, &physaddr);
2196 if (!txrdy) {
2197 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2198 "6041 Bad txrdy buffer: oxid x%x\n",
2199 ctxp->oxid);
2200 return NULL;
2202 ctxp->txrdy = txrdy;
2203 ctxp->txrdy_phys = physaddr;
2204 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2205 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2206 wqe->fcp_treceive.bde.addrLow =
2207 cpu_to_le32(putPaddrLow(physaddr));
2208 wqe->fcp_treceive.bde.addrHigh =
2209 cpu_to_le32(putPaddrHigh(physaddr));
2211 /* Word 3 */
2212 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
2214 /* Word 4 */
2215 wqe->fcp_treceive.relative_offset = ctxp->offset;
2217 /* Word 5 */
2219 /* Word 6 */
2220 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2221 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2222 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2223 nvmewqe->sli4_xritag);
2225 /* Word 7 */
2226 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1);
2227 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
2228 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
2229 CMD_FCP_TRECEIVE64_WQE);
2231 /* Word 8 */
2232 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2234 /* Word 9 */
2235 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2236 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2238 /* Word 10 */
2239 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
2240 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
2241 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
2242 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
2243 LPFC_WQE_LENLOC_WORD12);
2244 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
2245 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
2246 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
2247 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
2248 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
2249 if (phba->cfg_nvme_oas)
2250 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
2252 /* Word 11 */
2253 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
2254 LPFC_WQE_CQ_ID_DEFAULT);
2255 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
2256 FCP_COMMAND_TRECEIVE);
2257 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2259 /* Word 12 */
2260 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2262 /* Setup 1 TXRDY and 1 SKIP SGE */
2263 txrdy[0] = 0;
2264 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2265 txrdy[2] = 0;
2267 sgl->addr_hi = putPaddrHigh(physaddr);
2268 sgl->addr_lo = putPaddrLow(physaddr);
2269 sgl->word2 = 0;
2270 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2271 sgl->word2 = cpu_to_le32(sgl->word2);
2272 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2273 sgl++;
2274 sgl->addr_hi = 0;
2275 sgl->addr_lo = 0;
2276 sgl->word2 = 0;
2277 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2278 sgl->word2 = cpu_to_le32(sgl->word2);
2279 sgl->sge_len = 0;
2280 sgl++;
2281 atomic_inc(&tgtp->xmt_fcp_write);
2282 break;
2284 case NVMET_FCOP_RSP:
2285 /* Words 0 - 2 */
2286 physaddr = rsp->rspdma;
2287 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2288 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2289 wqe->fcp_trsp.bde.addrLow =
2290 cpu_to_le32(putPaddrLow(physaddr));
2291 wqe->fcp_trsp.bde.addrHigh =
2292 cpu_to_le32(putPaddrHigh(physaddr));
2294 /* Word 3 */
2295 wqe->fcp_trsp.response_len = rsp->rsplen;
2297 /* Word 4 */
2298 wqe->fcp_trsp.rsvd_4_5[0] = 0;
2301 /* Word 5 */
2303 /* Word 6 */
2304 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2305 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2306 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2307 nvmewqe->sli4_xritag);
2309 /* Word 7 */
2310 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0);
2311 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
2312 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
2314 /* Word 8 */
2315 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2317 /* Word 9 */
2318 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2319 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2321 /* Word 10 */
2322 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
2323 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
2324 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
2325 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
2326 LPFC_WQE_LENLOC_WORD3);
2327 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
2328 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
2329 if (phba->cfg_nvme_oas)
2330 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
2332 /* Word 11 */
2333 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
2334 LPFC_WQE_CQ_ID_DEFAULT);
2335 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
2336 FCP_COMMAND_TRSP);
2337 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2339 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2340 /* Good response - all zero's on wire */
2341 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
2342 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
2343 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
2344 } else {
2345 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2346 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2347 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2348 ((rsp->rsplen >> 2) - 1));
2349 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2352 /* Use rspbuf, NOT sg list */
2353 rsp->sg_cnt = 0;
2354 sgl->word2 = 0;
2355 atomic_inc(&tgtp->xmt_fcp_rsp);
2356 break;
2358 default:
2359 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2360 "6064 Unknown Rsp Op %d\n",
2361 rsp->op);
2362 return NULL;
2365 nvmewqe->retry = 1;
2366 nvmewqe->vport = phba->pport;
2367 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2368 nvmewqe->context1 = ndlp;
2370 for (i = 0; i < rsp->sg_cnt; i++) {
2371 sgel = &rsp->sg[i];
2372 physaddr = sg_dma_address(sgel);
2373 cnt = sg_dma_len(sgel);
2374 sgl->addr_hi = putPaddrHigh(physaddr);
2375 sgl->addr_lo = putPaddrLow(physaddr);
2376 sgl->word2 = 0;
2377 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2378 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2379 if ((i+1) == rsp->sg_cnt)
2380 bf_set(lpfc_sli4_sge_last, sgl, 1);
2381 sgl->word2 = cpu_to_le32(sgl->word2);
2382 sgl->sge_len = cpu_to_le32(cnt);
2383 sgl++;
2384 ctxp->offset += cnt;
2386 ctxp->state = LPFC_NVMET_STE_DATA;
2387 ctxp->entry_cnt++;
2388 return nvmewqe;
2392 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2393 * @phba: Pointer to HBA context object.
2394 * @cmdwqe: Pointer to driver command WQE object.
2395 * @wcqe: Pointer to driver response CQE object.
2397 * The function is called from SLI ring event handler with no
2398 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2399 * The function frees memory resources used for the NVME commands.
2401 static void
2402 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2403 struct lpfc_wcqe_complete *wcqe)
2405 struct lpfc_nvmet_rcv_ctx *ctxp;
2406 struct lpfc_nvmet_tgtport *tgtp;
2407 uint32_t status, result;
2408 unsigned long flags;
2409 bool released = false;
2411 ctxp = cmdwqe->context2;
2412 status = bf_get(lpfc_wcqe_c_status, wcqe);
2413 result = wcqe->parameter;
2415 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2416 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2417 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2419 ctxp->state = LPFC_NVMET_STE_DONE;
2421 /* Check if we already received a free context call
2422 * and we have completed processing an abort situation.
2424 spin_lock_irqsave(&ctxp->ctxlock, flags);
2425 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2426 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2427 list_del(&ctxp->list);
2428 released = true;
2430 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2431 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2432 atomic_inc(&tgtp->xmt_abort_rsp);
2434 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2435 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2436 "WCQE: %08x %08x %08x %08x\n",
2437 ctxp->oxid, ctxp->flag, released,
2438 wcqe->word0, wcqe->total_data_placed,
2439 result, wcqe->word3);
2441 cmdwqe->context2 = NULL;
2442 cmdwqe->context3 = NULL;
2444 * if transport has released ctx, then can reuse it. Otherwise,
2445 * will be recycled by transport release call.
2447 if (released)
2448 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2450 /* This is the iocbq for the abort, not the command */
2451 lpfc_sli_release_iocbq(phba, cmdwqe);
2453 /* Since iaab/iaar are NOT set, there is no work left.
2454 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2455 * should have been called already.
2460 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2461 * @phba: Pointer to HBA context object.
2462 * @cmdwqe: Pointer to driver command WQE object.
2463 * @wcqe: Pointer to driver response CQE object.
2465 * The function is called from SLI ring event handler with no
2466 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2467 * The function frees memory resources used for the NVME commands.
2469 static void
2470 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2471 struct lpfc_wcqe_complete *wcqe)
2473 struct lpfc_nvmet_rcv_ctx *ctxp;
2474 struct lpfc_nvmet_tgtport *tgtp;
2475 unsigned long flags;
2476 uint32_t status, result;
2477 bool released = false;
2479 ctxp = cmdwqe->context2;
2480 status = bf_get(lpfc_wcqe_c_status, wcqe);
2481 result = wcqe->parameter;
2483 if (!ctxp) {
2484 /* if context is clear, related io alrady complete */
2485 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2486 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2487 wcqe->word0, wcqe->total_data_placed,
2488 result, wcqe->word3);
2489 return;
2492 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2493 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2494 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2496 /* Sanity check */
2497 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2498 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2499 "6112 ABTS Wrong state:%d oxid x%x\n",
2500 ctxp->state, ctxp->oxid);
2503 /* Check if we already received a free context call
2504 * and we have completed processing an abort situation.
2506 ctxp->state = LPFC_NVMET_STE_DONE;
2507 spin_lock_irqsave(&ctxp->ctxlock, flags);
2508 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2509 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2510 list_del(&ctxp->list);
2511 released = true;
2513 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2514 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2515 atomic_inc(&tgtp->xmt_abort_rsp);
2517 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2518 "6316 ABTS cmpl xri x%x flg x%x (%x) "
2519 "WCQE: %08x %08x %08x %08x\n",
2520 ctxp->oxid, ctxp->flag, released,
2521 wcqe->word0, wcqe->total_data_placed,
2522 result, wcqe->word3);
2524 cmdwqe->context2 = NULL;
2525 cmdwqe->context3 = NULL;
2527 * if transport has released ctx, then can reuse it. Otherwise,
2528 * will be recycled by transport release call.
2530 if (released)
2531 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2533 /* Since iaab/iaar are NOT set, there is no work left.
2534 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2535 * should have been called already.
2540 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2541 * @phba: Pointer to HBA context object.
2542 * @cmdwqe: Pointer to driver command WQE object.
2543 * @wcqe: Pointer to driver response CQE object.
2545 * The function is called from SLI ring event handler with no
2546 * lock held. This function is the completion handler for NVME ABTS for LS cmds
2547 * The function frees memory resources used for the NVME commands.
2549 static void
2550 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2551 struct lpfc_wcqe_complete *wcqe)
2553 struct lpfc_nvmet_rcv_ctx *ctxp;
2554 struct lpfc_nvmet_tgtport *tgtp;
2555 uint32_t status, result;
2557 ctxp = cmdwqe->context2;
2558 status = bf_get(lpfc_wcqe_c_status, wcqe);
2559 result = wcqe->parameter;
2561 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2562 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2564 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2565 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2566 ctxp, wcqe->word0, wcqe->total_data_placed,
2567 result, wcqe->word3);
2569 if (!ctxp) {
2570 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2571 "6415 NVMET LS Abort No ctx: WCQE: "
2572 "%08x %08x %08x %08x\n",
2573 wcqe->word0, wcqe->total_data_placed,
2574 result, wcqe->word3);
2576 lpfc_sli_release_iocbq(phba, cmdwqe);
2577 return;
2580 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2581 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2582 "6416 NVMET LS abort cmpl state mismatch: "
2583 "oxid x%x: %d %d\n",
2584 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2587 cmdwqe->context2 = NULL;
2588 cmdwqe->context3 = NULL;
2589 lpfc_sli_release_iocbq(phba, cmdwqe);
2590 kfree(ctxp);
2593 static int
2594 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2595 struct lpfc_nvmet_rcv_ctx *ctxp,
2596 uint32_t sid, uint16_t xri)
2598 struct lpfc_nvmet_tgtport *tgtp;
2599 struct lpfc_iocbq *abts_wqeq;
2600 union lpfc_wqe *wqe_abts;
2601 struct lpfc_nodelist *ndlp;
2603 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2604 "6067 ABTS: sid %x xri x%x/x%x\n",
2605 sid, xri, ctxp->wqeq->sli4_xritag);
2607 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2609 ndlp = lpfc_findnode_did(phba->pport, sid);
2610 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2611 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2612 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2613 atomic_inc(&tgtp->xmt_abort_rsp_error);
2614 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2615 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2616 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2618 /* No failure to an ABTS request. */
2619 return 0;
2622 abts_wqeq = ctxp->wqeq;
2623 wqe_abts = &abts_wqeq->wqe;
2626 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2627 * that were initialized in lpfc_sli4_nvmet_alloc.
2629 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2631 /* Word 5 */
2632 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2633 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2634 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2635 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2636 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2638 /* Word 6 */
2639 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2640 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2641 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2642 abts_wqeq->sli4_xritag);
2644 /* Word 7 */
2645 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2646 CMD_XMIT_SEQUENCE64_WQE);
2647 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2648 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2649 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2651 /* Word 8 */
2652 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2654 /* Word 9 */
2655 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2656 /* Needs to be set by caller */
2657 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2659 /* Word 10 */
2660 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2661 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2662 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2663 LPFC_WQE_LENLOC_WORD12);
2664 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2665 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2667 /* Word 11 */
2668 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2669 LPFC_WQE_CQ_ID_DEFAULT);
2670 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2671 OTHER_COMMAND);
2673 abts_wqeq->vport = phba->pport;
2674 abts_wqeq->context1 = ndlp;
2675 abts_wqeq->context2 = ctxp;
2676 abts_wqeq->context3 = NULL;
2677 abts_wqeq->rsvd2 = 0;
2678 /* hba_wqidx should already be setup from command we are aborting */
2679 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2680 abts_wqeq->iocb.ulpLe = 1;
2682 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2683 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2684 xri, abts_wqeq->iotag);
2685 return 1;
2688 static int
2689 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2690 struct lpfc_nvmet_rcv_ctx *ctxp,
2691 uint32_t sid, uint16_t xri)
2693 struct lpfc_nvmet_tgtport *tgtp;
2694 struct lpfc_iocbq *abts_wqeq;
2695 union lpfc_wqe *abts_wqe;
2696 struct lpfc_nodelist *ndlp;
2697 unsigned long flags;
2698 int rc;
2700 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2701 if (!ctxp->wqeq) {
2702 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2703 ctxp->wqeq->hba_wqidx = 0;
2706 ndlp = lpfc_findnode_did(phba->pport, sid);
2707 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2708 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2709 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2710 atomic_inc(&tgtp->xmt_abort_rsp_error);
2711 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2712 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2713 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2715 /* No failure to an ABTS request. */
2716 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2717 return 0;
2720 /* Issue ABTS for this WQE based on iotag */
2721 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2722 if (!ctxp->abort_wqeq) {
2723 atomic_inc(&tgtp->xmt_abort_rsp_error);
2724 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2725 "6161 ABORT failed: No wqeqs: "
2726 "xri: x%x\n", ctxp->oxid);
2727 /* No failure to an ABTS request. */
2728 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2729 return 0;
2731 abts_wqeq = ctxp->abort_wqeq;
2732 abts_wqe = &abts_wqeq->wqe;
2733 ctxp->state = LPFC_NVMET_STE_ABORT;
2735 /* Announce entry to new IO submit field. */
2736 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2737 "6162 ABORT Request to rport DID x%06x "
2738 "for xri x%x x%x\n",
2739 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2741 /* If the hba is getting reset, this flag is set. It is
2742 * cleared when the reset is complete and rings reestablished.
2744 spin_lock_irqsave(&phba->hbalock, flags);
2745 /* driver queued commands are in process of being flushed */
2746 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2747 spin_unlock_irqrestore(&phba->hbalock, flags);
2748 atomic_inc(&tgtp->xmt_abort_rsp_error);
2749 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2750 "6163 Driver in reset cleanup - flushing "
2751 "NVME Req now. hba_flag x%x oxid x%x\n",
2752 phba->hba_flag, ctxp->oxid);
2753 lpfc_sli_release_iocbq(phba, abts_wqeq);
2754 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2755 return 0;
2758 /* Outstanding abort is in progress */
2759 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2760 spin_unlock_irqrestore(&phba->hbalock, flags);
2761 atomic_inc(&tgtp->xmt_abort_rsp_error);
2762 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2763 "6164 Outstanding NVME I/O Abort Request "
2764 "still pending on oxid x%x\n",
2765 ctxp->oxid);
2766 lpfc_sli_release_iocbq(phba, abts_wqeq);
2767 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2768 return 0;
2771 /* Ready - mark outstanding as aborted by driver. */
2772 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
2774 /* WQEs are reused. Clear stale data and set key fields to
2775 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
2777 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
2779 /* word 3 */
2780 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
2782 /* word 7 */
2783 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
2784 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
2786 /* word 8 - tell the FW to abort the IO associated with this
2787 * outstanding exchange ID.
2789 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
2791 /* word 9 - this is the iotag for the abts_wqe completion. */
2792 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
2793 abts_wqeq->iotag);
2795 /* word 10 */
2796 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
2797 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
2799 /* word 11 */
2800 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
2801 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
2802 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
2804 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2805 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
2806 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
2807 abts_wqeq->iocb_cmpl = 0;
2808 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
2809 abts_wqeq->context2 = ctxp;
2810 abts_wqeq->vport = phba->pport;
2811 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2812 spin_unlock_irqrestore(&phba->hbalock, flags);
2813 if (rc == WQE_SUCCESS) {
2814 atomic_inc(&tgtp->xmt_abort_sol);
2815 return 0;
2818 atomic_inc(&tgtp->xmt_abort_rsp_error);
2819 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2820 lpfc_sli_release_iocbq(phba, abts_wqeq);
2821 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2822 "6166 Failed ABORT issue_wqe with status x%x "
2823 "for oxid x%x.\n",
2824 rc, ctxp->oxid);
2825 return 1;
2829 static int
2830 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2831 struct lpfc_nvmet_rcv_ctx *ctxp,
2832 uint32_t sid, uint16_t xri)
2834 struct lpfc_nvmet_tgtport *tgtp;
2835 struct lpfc_iocbq *abts_wqeq;
2836 unsigned long flags;
2837 int rc;
2839 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2840 if (!ctxp->wqeq) {
2841 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2842 ctxp->wqeq->hba_wqidx = 0;
2845 if (ctxp->state == LPFC_NVMET_STE_FREE) {
2846 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2847 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
2848 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2849 rc = WQE_BUSY;
2850 goto aerr;
2852 ctxp->state = LPFC_NVMET_STE_ABORT;
2853 ctxp->entry_cnt++;
2854 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2855 if (rc == 0)
2856 goto aerr;
2858 spin_lock_irqsave(&phba->hbalock, flags);
2859 abts_wqeq = ctxp->wqeq;
2860 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
2861 abts_wqeq->iocb_cmpl = NULL;
2862 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
2863 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2864 spin_unlock_irqrestore(&phba->hbalock, flags);
2865 if (rc == WQE_SUCCESS) {
2866 return 0;
2869 aerr:
2870 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2871 atomic_inc(&tgtp->xmt_abort_rsp_error);
2872 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2873 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
2874 ctxp->oxid, rc);
2875 return 1;
2878 static int
2879 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2880 struct lpfc_nvmet_rcv_ctx *ctxp,
2881 uint32_t sid, uint16_t xri)
2883 struct lpfc_nvmet_tgtport *tgtp;
2884 struct lpfc_iocbq *abts_wqeq;
2885 union lpfc_wqe *wqe_abts;
2886 unsigned long flags;
2887 int rc;
2889 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
2890 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
2891 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
2892 ctxp->entry_cnt++;
2893 } else {
2894 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2895 "6418 NVMET LS abort state mismatch "
2896 "IO x%x: %d %d\n",
2897 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2898 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
2901 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2902 if (!ctxp->wqeq) {
2903 /* Issue ABTS for this WQE based on iotag */
2904 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
2905 if (!ctxp->wqeq) {
2906 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2907 "6068 Abort failed: No wqeqs: "
2908 "xri: x%x\n", xri);
2909 /* No failure to an ABTS request. */
2910 kfree(ctxp);
2911 return 0;
2914 abts_wqeq = ctxp->wqeq;
2915 wqe_abts = &abts_wqeq->wqe;
2917 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
2918 rc = WQE_BUSY;
2919 goto out;
2922 spin_lock_irqsave(&phba->hbalock, flags);
2923 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
2924 abts_wqeq->iocb_cmpl = 0;
2925 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
2926 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
2927 spin_unlock_irqrestore(&phba->hbalock, flags);
2928 if (rc == WQE_SUCCESS) {
2929 atomic_inc(&tgtp->xmt_abort_unsol);
2930 return 0;
2932 out:
2933 atomic_inc(&tgtp->xmt_abort_rsp_error);
2934 abts_wqeq->context2 = NULL;
2935 abts_wqeq->context3 = NULL;
2936 lpfc_sli_release_iocbq(phba, abts_wqeq);
2937 kfree(ctxp);
2938 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2939 "6056 Failed to Issue ABTS. Status x%x\n", rc);
2940 return 0;