1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include "lpfc_version.h"
43 #include "lpfc_sli4.h"
45 #include "lpfc_disc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
54 static struct lpfc_iocbq
*lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*,
55 struct lpfc_async_xchg_ctx
*,
58 static struct lpfc_iocbq
*lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*,
59 struct lpfc_async_xchg_ctx
*);
60 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*,
61 struct lpfc_async_xchg_ctx
*,
63 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*,
64 struct lpfc_async_xchg_ctx
*,
66 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba
*, struct lpfc_queue
*,
67 struct lpfc_async_xchg_ctx
*);
68 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct
*);
70 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf
*ctx_buf
);
72 static union lpfc_wqe128 lpfc_tsend_cmd_template
;
73 static union lpfc_wqe128 lpfc_treceive_cmd_template
;
74 static union lpfc_wqe128 lpfc_trsp_cmd_template
;
76 /* Setup WQE templates for NVME IOs */
78 lpfc_nvmet_cmd_template(void)
80 union lpfc_wqe128
*wqe
;
83 wqe
= &lpfc_tsend_cmd_template
;
84 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
86 /* Word 0, 1, 2 - BDE is variable */
88 /* Word 3 - payload_offset_len is zero */
90 /* Word 4 - relative_offset is variable */
92 /* Word 5 - is zero */
94 /* Word 6 - ctxt_tag, xri_tag is variable */
96 /* Word 7 - wqe_ar is variable */
97 bf_set(wqe_cmnd
, &wqe
->fcp_tsend
.wqe_com
, CMD_FCP_TSEND64_WQE
);
98 bf_set(wqe_pu
, &wqe
->fcp_tsend
.wqe_com
, PARM_REL_OFF
);
99 bf_set(wqe_class
, &wqe
->fcp_tsend
.wqe_com
, CLASS3
);
100 bf_set(wqe_ct
, &wqe
->fcp_tsend
.wqe_com
, SLI4_CT_RPI
);
101 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 1);
103 /* Word 8 - abort_tag is variable */
105 /* Word 9 - reqtag, rcvoxid is variable */
107 /* Word 10 - wqes, xc is variable */
108 bf_set(wqe_xchg
, &wqe
->fcp_tsend
.wqe_com
, LPFC_NVME_XCHG
);
109 bf_set(wqe_dbde
, &wqe
->fcp_tsend
.wqe_com
, 1);
110 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 0);
111 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 1);
112 bf_set(wqe_iod
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_IOD_WRITE
);
113 bf_set(wqe_lenloc
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_LENLOC_WORD12
);
115 /* Word 11 - sup, irsp, irsplen is variable */
116 bf_set(wqe_cmd_type
, &wqe
->fcp_tsend
.wqe_com
, FCP_COMMAND_TSEND
);
117 bf_set(wqe_cqid
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
118 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
119 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 0);
120 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
, 0);
121 bf_set(wqe_pbde
, &wqe
->fcp_tsend
.wqe_com
, 0);
123 /* Word 12 - fcp_data_len is variable */
125 /* Word 13, 14, 15 - PBDE is zero */
127 /* TRECEIVE template */
128 wqe
= &lpfc_treceive_cmd_template
;
129 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
131 /* Word 0, 1, 2 - BDE is variable */
134 wqe
->fcp_treceive
.payload_offset_len
= TXRDY_PAYLOAD_LEN
;
136 /* Word 4 - relative_offset is variable */
138 /* Word 5 - is zero */
140 /* Word 6 - ctxt_tag, xri_tag is variable */
143 bf_set(wqe_cmnd
, &wqe
->fcp_treceive
.wqe_com
, CMD_FCP_TRECEIVE64_WQE
);
144 bf_set(wqe_pu
, &wqe
->fcp_treceive
.wqe_com
, PARM_REL_OFF
);
145 bf_set(wqe_class
, &wqe
->fcp_treceive
.wqe_com
, CLASS3
);
146 bf_set(wqe_ct
, &wqe
->fcp_treceive
.wqe_com
, SLI4_CT_RPI
);
147 bf_set(wqe_ar
, &wqe
->fcp_treceive
.wqe_com
, 0);
149 /* Word 8 - abort_tag is variable */
151 /* Word 9 - reqtag, rcvoxid is variable */
153 /* Word 10 - xc is variable */
154 bf_set(wqe_dbde
, &wqe
->fcp_treceive
.wqe_com
, 1);
155 bf_set(wqe_wqes
, &wqe
->fcp_treceive
.wqe_com
, 0);
156 bf_set(wqe_xchg
, &wqe
->fcp_treceive
.wqe_com
, LPFC_NVME_XCHG
);
157 bf_set(wqe_iod
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_IOD_READ
);
158 bf_set(wqe_lenloc
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_LENLOC_WORD12
);
159 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 1);
161 /* Word 11 - pbde is variable */
162 bf_set(wqe_cmd_type
, &wqe
->fcp_treceive
.wqe_com
, FCP_COMMAND_TRECEIVE
);
163 bf_set(wqe_cqid
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
164 bf_set(wqe_sup
, &wqe
->fcp_treceive
.wqe_com
, 0);
165 bf_set(wqe_irsp
, &wqe
->fcp_treceive
.wqe_com
, 0);
166 bf_set(wqe_irsplen
, &wqe
->fcp_treceive
.wqe_com
, 0);
167 bf_set(wqe_pbde
, &wqe
->fcp_treceive
.wqe_com
, 1);
169 /* Word 12 - fcp_data_len is variable */
171 /* Word 13, 14, 15 - PBDE is variable */
174 wqe
= &lpfc_trsp_cmd_template
;
175 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
177 /* Word 0, 1, 2 - BDE is variable */
179 /* Word 3 - response_len is variable */
181 /* Word 4, 5 - is zero */
183 /* Word 6 - ctxt_tag, xri_tag is variable */
186 bf_set(wqe_cmnd
, &wqe
->fcp_trsp
.wqe_com
, CMD_FCP_TRSP64_WQE
);
187 bf_set(wqe_pu
, &wqe
->fcp_trsp
.wqe_com
, PARM_UNUSED
);
188 bf_set(wqe_class
, &wqe
->fcp_trsp
.wqe_com
, CLASS3
);
189 bf_set(wqe_ct
, &wqe
->fcp_trsp
.wqe_com
, SLI4_CT_RPI
);
190 bf_set(wqe_ag
, &wqe
->fcp_trsp
.wqe_com
, 1); /* wqe_ar */
192 /* Word 8 - abort_tag is variable */
194 /* Word 9 - reqtag is variable */
196 /* Word 10 wqes, xc is variable */
197 bf_set(wqe_dbde
, &wqe
->fcp_trsp
.wqe_com
, 1);
198 bf_set(wqe_xchg
, &wqe
->fcp_trsp
.wqe_com
, LPFC_NVME_XCHG
);
199 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 0);
200 bf_set(wqe_xc
, &wqe
->fcp_trsp
.wqe_com
, 0);
201 bf_set(wqe_iod
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_IOD_NONE
);
202 bf_set(wqe_lenloc
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_LENLOC_WORD3
);
204 /* Word 11 irsp, irsplen is variable */
205 bf_set(wqe_cmd_type
, &wqe
->fcp_trsp
.wqe_com
, FCP_COMMAND_TRSP
);
206 bf_set(wqe_cqid
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
207 bf_set(wqe_sup
, &wqe
->fcp_trsp
.wqe_com
, 0);
208 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 0);
209 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
, 0);
210 bf_set(wqe_pbde
, &wqe
->fcp_trsp
.wqe_com
, 0);
212 /* Word 12, 13, 14, 15 - is zero */
215 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
216 static struct lpfc_async_xchg_ctx
*
217 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba
*phba
, u16 xri
)
219 struct lpfc_async_xchg_ctx
*ctxp
;
223 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
224 list_for_each_entry(ctxp
, &phba
->sli4_hba
.t_active_ctx_list
, list
) {
225 if (ctxp
->ctxbuf
->sglq
->sli4_xritag
!= xri
)
231 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
238 static struct lpfc_async_xchg_ctx
*
239 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba
*phba
, u16 oxid
, u32 sid
)
241 struct lpfc_async_xchg_ctx
*ctxp
;
245 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
246 list_for_each_entry(ctxp
, &phba
->sli4_hba
.t_active_ctx_list
, list
) {
247 if (ctxp
->oxid
!= oxid
|| ctxp
->sid
!= sid
)
253 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
262 lpfc_nvmet_defer_release(struct lpfc_hba
*phba
,
263 struct lpfc_async_xchg_ctx
*ctxp
)
265 lockdep_assert_held(&ctxp
->ctxlock
);
267 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
268 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
269 ctxp
->oxid
, ctxp
->flag
);
271 if (ctxp
->flag
& LPFC_NVME_CTX_RLS
)
274 ctxp
->flag
|= LPFC_NVME_CTX_RLS
;
275 spin_lock(&phba
->sli4_hba
.t_active_list_lock
);
276 list_del(&ctxp
->list
);
277 spin_unlock(&phba
->sli4_hba
.t_active_list_lock
);
278 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
279 list_add_tail(&ctxp
->list
, &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
280 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
284 * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
285 * transmission of an NVME LS response.
286 * @phba: Pointer to HBA context object.
287 * @cmdwqe: Pointer to driver command WQE object.
288 * @wcqe: Pointer to driver response CQE object.
290 * The function is called from SLI ring event handler with no
291 * lock held. The function frees memory resources used for the command
292 * used to send the NVME LS RSP.
295 __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
296 struct lpfc_wcqe_complete
*wcqe
)
298 struct lpfc_async_xchg_ctx
*axchg
= cmdwqe
->context2
;
299 struct nvmefc_ls_rsp
*ls_rsp
= &axchg
->ls_rsp
;
300 uint32_t status
, result
;
302 status
= bf_get(lpfc_wcqe_c_status
, wcqe
) & LPFC_IOCB_STATUS_MASK
;
303 result
= wcqe
->parameter
;
305 if (axchg
->state
!= LPFC_NVME_STE_LS_RSP
|| axchg
->entry_cnt
!= 2) {
306 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
307 "6410 NVMEx LS cmpl state mismatch IO x%x: "
309 axchg
->oxid
, axchg
->state
, axchg
->entry_cnt
);
312 lpfc_nvmeio_data(phba
, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
313 axchg
->oxid
, status
, result
);
315 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
316 "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
317 status
, result
, axchg
->oxid
);
319 lpfc_nlp_put(cmdwqe
->context1
);
320 cmdwqe
->context2
= NULL
;
321 cmdwqe
->context3
= NULL
;
322 lpfc_sli_release_iocbq(phba
, cmdwqe
);
323 ls_rsp
->done(ls_rsp
);
324 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
325 "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
326 status
, axchg
->oxid
);
331 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
332 * @phba: Pointer to HBA context object.
333 * @cmdwqe: Pointer to driver command WQE object.
334 * @wcqe: Pointer to driver response CQE object.
336 * The function is called from SLI ring event handler with no
337 * lock held. This function is the completion handler for NVME LS commands
338 * The function updates any states and statistics, then calls the
339 * generic completion handler to free resources.
342 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
343 struct lpfc_wcqe_complete
*wcqe
)
345 struct lpfc_nvmet_tgtport
*tgtp
;
346 uint32_t status
, result
;
348 if (!phba
->targetport
)
351 status
= bf_get(lpfc_wcqe_c_status
, wcqe
) & LPFC_IOCB_STATUS_MASK
;
352 result
= wcqe
->parameter
;
354 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
357 atomic_inc(&tgtp
->xmt_ls_rsp_error
);
358 if (result
== IOERR_ABORT_REQUESTED
)
359 atomic_inc(&tgtp
->xmt_ls_rsp_aborted
);
360 if (bf_get(lpfc_wcqe_c_xb
, wcqe
))
361 atomic_inc(&tgtp
->xmt_ls_rsp_xb_set
);
363 atomic_inc(&tgtp
->xmt_ls_rsp_cmpl
);
368 __lpfc_nvme_xmt_ls_rsp_cmp(phba
, cmdwqe
, wcqe
);
372 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
373 * @phba: HBA buffer is associated with
374 * @ctx_buf: ctx buffer context
376 * Description: Frees the given DMA buffer in the appropriate way given by
377 * reposting it to its associated RQ so it can be reused.
379 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
384 lpfc_nvmet_ctxbuf_post(struct lpfc_hba
*phba
, struct lpfc_nvmet_ctxbuf
*ctx_buf
)
386 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
387 struct lpfc_async_xchg_ctx
*ctxp
= ctx_buf
->context
;
388 struct lpfc_nvmet_tgtport
*tgtp
;
389 struct fc_frame_header
*fc_hdr
;
390 struct rqb_dmabuf
*nvmebuf
;
391 struct lpfc_nvmet_ctx_info
*infop
;
392 uint32_t size
, oxid
, sid
;
396 if (ctxp
->state
== LPFC_NVME_STE_FREE
) {
397 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
398 "6411 NVMET free, already free IO x%x: %d %d\n",
399 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
402 if (ctxp
->rqb_buffer
) {
403 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
404 nvmebuf
= ctxp
->rqb_buffer
;
405 /* check if freed in another path whilst acquiring lock */
407 ctxp
->rqb_buffer
= NULL
;
408 if (ctxp
->flag
& LPFC_NVME_CTX_REUSE_WQ
) {
409 ctxp
->flag
&= ~LPFC_NVME_CTX_REUSE_WQ
;
410 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
411 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
,
414 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
416 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
419 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
422 ctxp
->state
= LPFC_NVME_STE_FREE
;
424 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
425 if (phba
->sli4_hba
.nvmet_io_wait_cnt
) {
426 list_remove_head(&phba
->sli4_hba
.lpfc_nvmet_io_wait_list
,
427 nvmebuf
, struct rqb_dmabuf
,
429 phba
->sli4_hba
.nvmet_io_wait_cnt
--;
430 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
433 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
434 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
435 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
436 size
= nvmebuf
->bytes_recv
;
437 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
439 ctxp
= (struct lpfc_async_xchg_ctx
*)ctx_buf
->context
;
446 ctxp
->state
= LPFC_NVME_STE_RCV
;
449 ctxp
->ctxbuf
= ctx_buf
;
450 ctxp
->rqb_buffer
= (void *)nvmebuf
;
451 spin_lock_init(&ctxp
->ctxlock
);
453 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
454 /* NOTE: isr time stamp is stale when context is re-assigned*/
455 if (ctxp
->ts_isr_cmd
) {
456 ctxp
->ts_cmd_nvme
= 0;
457 ctxp
->ts_nvme_data
= 0;
458 ctxp
->ts_data_wqput
= 0;
459 ctxp
->ts_isr_data
= 0;
460 ctxp
->ts_data_nvme
= 0;
461 ctxp
->ts_nvme_status
= 0;
462 ctxp
->ts_status_wqput
= 0;
463 ctxp
->ts_isr_status
= 0;
464 ctxp
->ts_status_nvme
= 0;
467 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
469 /* Indicate that a replacement buffer has been posted */
470 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
471 ctxp
->flag
|= LPFC_NVME_CTX_REUSE_WQ
;
472 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
474 if (!queue_work(phba
->wq
, &ctx_buf
->defer_work
)) {
475 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
476 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
477 "6181 Unable to queue deferred work "
479 "FCP Drop IO [x%x x%x x%x]\n",
481 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
482 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
483 atomic_read(&tgtp
->xmt_fcp_release
));
485 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
486 lpfc_nvmet_defer_release(phba
, ctxp
);
487 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
488 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, sid
, oxid
);
492 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
495 * Use the CPU context list, from the MRQ the IO was received on
496 * (ctxp->idx), to save context structure.
498 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
499 list_del_init(&ctxp
->list
);
500 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
501 cpu
= raw_smp_processor_id();
502 infop
= lpfc_get_ctx_list(phba
, cpu
, ctxp
->idx
);
503 spin_lock_irqsave(&infop
->nvmet_ctx_list_lock
, iflag
);
504 list_add_tail(&ctx_buf
->list
, &infop
->nvmet_ctx_list
);
505 infop
->nvmet_ctx_list_cnt
++;
506 spin_unlock_irqrestore(&infop
->nvmet_ctx_list_lock
, iflag
);
510 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
512 lpfc_nvmet_ktime(struct lpfc_hba
*phba
,
513 struct lpfc_async_xchg_ctx
*ctxp
)
515 uint64_t seg1
, seg2
, seg3
, seg4
, seg5
;
516 uint64_t seg6
, seg7
, seg8
, seg9
, seg10
;
519 if (!ctxp
->ts_isr_cmd
|| !ctxp
->ts_cmd_nvme
||
520 !ctxp
->ts_nvme_data
|| !ctxp
->ts_data_wqput
||
521 !ctxp
->ts_isr_data
|| !ctxp
->ts_data_nvme
||
522 !ctxp
->ts_nvme_status
|| !ctxp
->ts_status_wqput
||
523 !ctxp
->ts_isr_status
|| !ctxp
->ts_status_nvme
)
526 if (ctxp
->ts_status_nvme
< ctxp
->ts_isr_cmd
)
528 if (ctxp
->ts_isr_cmd
> ctxp
->ts_cmd_nvme
)
530 if (ctxp
->ts_cmd_nvme
> ctxp
->ts_nvme_data
)
532 if (ctxp
->ts_nvme_data
> ctxp
->ts_data_wqput
)
534 if (ctxp
->ts_data_wqput
> ctxp
->ts_isr_data
)
536 if (ctxp
->ts_isr_data
> ctxp
->ts_data_nvme
)
538 if (ctxp
->ts_data_nvme
> ctxp
->ts_nvme_status
)
540 if (ctxp
->ts_nvme_status
> ctxp
->ts_status_wqput
)
542 if (ctxp
->ts_status_wqput
> ctxp
->ts_isr_status
)
544 if (ctxp
->ts_isr_status
> ctxp
->ts_status_nvme
)
547 * Segment 1 - Time from FCP command received by MSI-X ISR
548 * to FCP command is passed to NVME Layer.
549 * Segment 2 - Time from FCP command payload handed
550 * off to NVME Layer to Driver receives a Command op
552 * Segment 3 - Time from Driver receives a Command op
553 * from NVME Layer to Command is put on WQ.
554 * Segment 4 - Time from Driver WQ put is done
555 * to MSI-X ISR for Command cmpl.
556 * Segment 5 - Time from MSI-X ISR for Command cmpl to
557 * Command cmpl is passed to NVME Layer.
558 * Segment 6 - Time from Command cmpl is passed to NVME
559 * Layer to Driver receives a RSP op from NVME Layer.
560 * Segment 7 - Time from Driver receives a RSP op from
561 * NVME Layer to WQ put is done on TRSP FCP Status.
562 * Segment 8 - Time from Driver WQ put is done on TRSP
563 * FCP Status to MSI-X ISR for TRSP cmpl.
564 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
565 * TRSP cmpl is passed to NVME Layer.
566 * Segment 10 - Time from FCP command received by
567 * MSI-X ISR to command is completed on wire.
568 * (Segments 1 thru 8) for READDATA / WRITEDATA
569 * (Segments 1 thru 4) for READDATA_RSP
571 seg1
= ctxp
->ts_cmd_nvme
- ctxp
->ts_isr_cmd
;
574 seg2
= ctxp
->ts_nvme_data
- ctxp
->ts_isr_cmd
;
580 seg3
= ctxp
->ts_data_wqput
- ctxp
->ts_isr_cmd
;
586 seg4
= ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
;
592 seg5
= ctxp
->ts_data_nvme
- ctxp
->ts_isr_cmd
;
599 /* For auto rsp commands seg6 thru seg10 will be 0 */
600 if (ctxp
->ts_nvme_status
> ctxp
->ts_data_nvme
) {
601 seg6
= ctxp
->ts_nvme_status
- ctxp
->ts_isr_cmd
;
607 seg7
= ctxp
->ts_status_wqput
- ctxp
->ts_isr_cmd
;
613 seg8
= ctxp
->ts_isr_status
- ctxp
->ts_isr_cmd
;
619 seg9
= ctxp
->ts_status_nvme
- ctxp
->ts_isr_cmd
;
625 if (ctxp
->ts_isr_status
< ctxp
->ts_isr_cmd
)
627 seg10
= (ctxp
->ts_isr_status
-
630 if (ctxp
->ts_isr_data
< ctxp
->ts_isr_cmd
)
636 seg10
= (ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
);
639 phba
->ktime_seg1_total
+= seg1
;
640 if (seg1
< phba
->ktime_seg1_min
)
641 phba
->ktime_seg1_min
= seg1
;
642 else if (seg1
> phba
->ktime_seg1_max
)
643 phba
->ktime_seg1_max
= seg1
;
645 phba
->ktime_seg2_total
+= seg2
;
646 if (seg2
< phba
->ktime_seg2_min
)
647 phba
->ktime_seg2_min
= seg2
;
648 else if (seg2
> phba
->ktime_seg2_max
)
649 phba
->ktime_seg2_max
= seg2
;
651 phba
->ktime_seg3_total
+= seg3
;
652 if (seg3
< phba
->ktime_seg3_min
)
653 phba
->ktime_seg3_min
= seg3
;
654 else if (seg3
> phba
->ktime_seg3_max
)
655 phba
->ktime_seg3_max
= seg3
;
657 phba
->ktime_seg4_total
+= seg4
;
658 if (seg4
< phba
->ktime_seg4_min
)
659 phba
->ktime_seg4_min
= seg4
;
660 else if (seg4
> phba
->ktime_seg4_max
)
661 phba
->ktime_seg4_max
= seg4
;
663 phba
->ktime_seg5_total
+= seg5
;
664 if (seg5
< phba
->ktime_seg5_min
)
665 phba
->ktime_seg5_min
= seg5
;
666 else if (seg5
> phba
->ktime_seg5_max
)
667 phba
->ktime_seg5_max
= seg5
;
669 phba
->ktime_data_samples
++;
673 phba
->ktime_seg6_total
+= seg6
;
674 if (seg6
< phba
->ktime_seg6_min
)
675 phba
->ktime_seg6_min
= seg6
;
676 else if (seg6
> phba
->ktime_seg6_max
)
677 phba
->ktime_seg6_max
= seg6
;
679 phba
->ktime_seg7_total
+= seg7
;
680 if (seg7
< phba
->ktime_seg7_min
)
681 phba
->ktime_seg7_min
= seg7
;
682 else if (seg7
> phba
->ktime_seg7_max
)
683 phba
->ktime_seg7_max
= seg7
;
685 phba
->ktime_seg8_total
+= seg8
;
686 if (seg8
< phba
->ktime_seg8_min
)
687 phba
->ktime_seg8_min
= seg8
;
688 else if (seg8
> phba
->ktime_seg8_max
)
689 phba
->ktime_seg8_max
= seg8
;
691 phba
->ktime_seg9_total
+= seg9
;
692 if (seg9
< phba
->ktime_seg9_min
)
693 phba
->ktime_seg9_min
= seg9
;
694 else if (seg9
> phba
->ktime_seg9_max
)
695 phba
->ktime_seg9_max
= seg9
;
697 phba
->ktime_seg10_total
+= seg10
;
698 if (seg10
< phba
->ktime_seg10_min
)
699 phba
->ktime_seg10_min
= seg10
;
700 else if (seg10
> phba
->ktime_seg10_max
)
701 phba
->ktime_seg10_max
= seg10
;
702 phba
->ktime_status_samples
++;
707 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
708 * @phba: Pointer to HBA context object.
709 * @cmdwqe: Pointer to driver command WQE object.
710 * @wcqe: Pointer to driver response CQE object.
712 * The function is called from SLI ring event handler with no
713 * lock held. This function is the completion handler for NVME FCP commands
714 * The function frees memory resources used for the NVME commands.
717 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
718 struct lpfc_wcqe_complete
*wcqe
)
720 struct lpfc_nvmet_tgtport
*tgtp
;
721 struct nvmefc_tgt_fcp_req
*rsp
;
722 struct lpfc_async_xchg_ctx
*ctxp
;
723 uint32_t status
, result
, op
, start_clean
, logerr
;
724 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
728 ctxp
= cmdwqe
->context2
;
729 ctxp
->flag
&= ~LPFC_NVME_IO_INP
;
731 rsp
= &ctxp
->hdlrctx
.fcp_req
;
734 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
735 result
= wcqe
->parameter
;
737 if (phba
->targetport
)
738 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
742 lpfc_nvmeio_data(phba
, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
743 ctxp
->oxid
, op
, status
);
746 rsp
->fcp_error
= NVME_SC_DATA_XFER_ERROR
;
747 rsp
->transferred_length
= 0;
749 atomic_inc(&tgtp
->xmt_fcp_rsp_error
);
750 if (result
== IOERR_ABORT_REQUESTED
)
751 atomic_inc(&tgtp
->xmt_fcp_rsp_aborted
);
754 logerr
= LOG_NVME_IOERR
;
756 /* pick up SLI4 exhange busy condition */
757 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
758 ctxp
->flag
|= LPFC_NVME_XBUSY
;
759 logerr
|= LOG_NVME_ABTS
;
761 atomic_inc(&tgtp
->xmt_fcp_rsp_xb_set
);
764 ctxp
->flag
&= ~LPFC_NVME_XBUSY
;
767 lpfc_printf_log(phba
, KERN_INFO
, logerr
,
768 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
770 ctxp
->oxid
, ctxp
->ctxbuf
->sglq
->sli4_xritag
,
771 status
, result
, ctxp
->flag
);
774 rsp
->fcp_error
= NVME_SC_SUCCESS
;
775 if (op
== NVMET_FCOP_RSP
)
776 rsp
->transferred_length
= rsp
->rsplen
;
778 rsp
->transferred_length
= rsp
->transfer_length
;
780 atomic_inc(&tgtp
->xmt_fcp_rsp_cmpl
);
783 if ((op
== NVMET_FCOP_READDATA_RSP
) ||
784 (op
== NVMET_FCOP_RSP
)) {
786 ctxp
->state
= LPFC_NVME_STE_DONE
;
789 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
790 if (ctxp
->ts_cmd_nvme
) {
791 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
793 cmdwqe
->isr_timestamp
;
796 ctxp
->ts_nvme_status
=
798 ctxp
->ts_status_wqput
=
800 ctxp
->ts_isr_status
=
802 ctxp
->ts_status_nvme
=
805 ctxp
->ts_isr_status
=
806 cmdwqe
->isr_timestamp
;
807 ctxp
->ts_status_nvme
=
813 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
814 if (ctxp
->ts_cmd_nvme
)
815 lpfc_nvmet_ktime(phba
, ctxp
);
817 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
820 start_clean
= offsetof(struct lpfc_iocbq
, iocb_flag
);
821 memset(((char *)cmdwqe
) + start_clean
, 0,
822 (sizeof(struct lpfc_iocbq
) - start_clean
));
823 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
824 if (ctxp
->ts_cmd_nvme
) {
825 ctxp
->ts_isr_data
= cmdwqe
->isr_timestamp
;
826 ctxp
->ts_data_nvme
= ktime_get_ns();
831 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
832 if (phba
->hdwqstat_on
& LPFC_CHECK_NVMET_IO
) {
833 id
= raw_smp_processor_id();
834 this_cpu_inc(phba
->sli4_hba
.c_stat
->cmpl_io
);
836 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
837 "6704 CPU Check cmdcmpl: "
838 "cpu %d expect %d\n",
845 * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
846 * an NVME LS rsp for a prior NVME LS request that was received.
847 * @axchg: pointer to exchange context for the NVME LS request the response
849 * @ls_rsp: pointer to the transport LS RSP that is to be sent
850 * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
852 * This routine is used to format and send a WQE to transmit a NVME LS
853 * Response. The response is for a prior NVME LS request that was
854 * received and posted to the transport.
857 * 0 : if response successfully transmit
858 * non-zero : if response failed to transmit, of the form -Exxx.
861 __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx
*axchg
,
862 struct nvmefc_ls_rsp
*ls_rsp
,
863 void (*xmt_ls_rsp_cmp
)(struct lpfc_hba
*phba
,
864 struct lpfc_iocbq
*cmdwqe
,
865 struct lpfc_wcqe_complete
*wcqe
))
867 struct lpfc_hba
*phba
= axchg
->phba
;
868 struct hbq_dmabuf
*nvmebuf
= (struct hbq_dmabuf
*)axchg
->rqb_buffer
;
869 struct lpfc_iocbq
*nvmewqeq
;
870 struct lpfc_dmabuf dmabuf
;
871 struct ulp_bde64 bpl
;
874 if (phba
->pport
->load_flag
& FC_UNLOADING
)
877 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
878 "6023 NVMEx LS rsp oxid x%x\n", axchg
->oxid
);
880 if (axchg
->state
!= LPFC_NVME_STE_LS_RCV
|| axchg
->entry_cnt
!= 1) {
881 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
882 "6412 NVMEx LS rsp state mismatch "
884 axchg
->oxid
, axchg
->state
, axchg
->entry_cnt
);
887 axchg
->state
= LPFC_NVME_STE_LS_RSP
;
890 nvmewqeq
= lpfc_nvmet_prep_ls_wqe(phba
, axchg
, ls_rsp
->rspdma
,
892 if (nvmewqeq
== NULL
) {
893 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
894 "6150 NVMEx LS Drop Rsp x%x: Prep\n",
900 /* Save numBdes for bpl2sgl */
902 nvmewqeq
->hba_wqidx
= 0;
903 nvmewqeq
->context3
= &dmabuf
;
905 bpl
.addrLow
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrLow
;
906 bpl
.addrHigh
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrHigh
;
907 bpl
.tus
.f
.bdeSize
= ls_rsp
->rsplen
;
908 bpl
.tus
.f
.bdeFlags
= 0;
909 bpl
.tus
.w
= le32_to_cpu(bpl
.tus
.w
);
911 * Note: although we're using stack space for the dmabuf, the
912 * call to lpfc_sli4_issue_wqe is synchronous, so it will not
913 * be referenced after it returns back to this routine.
916 nvmewqeq
->wqe_cmpl
= xmt_ls_rsp_cmp
;
917 nvmewqeq
->iocb_cmpl
= NULL
;
918 nvmewqeq
->context2
= axchg
;
920 lpfc_nvmeio_data(phba
, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
921 axchg
->oxid
, nvmewqeq
->hba_wqidx
, ls_rsp
->rsplen
);
923 rc
= lpfc_sli4_issue_wqe(phba
, axchg
->hdwq
, nvmewqeq
);
925 /* clear to be sure there's no reference */
926 nvmewqeq
->context3
= NULL
;
928 if (rc
== WQE_SUCCESS
) {
930 * Okay to repost buffer here, but wait till cmpl
931 * before freeing ctxp and iocbq.
933 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
937 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
938 "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
943 lpfc_nlp_put(nvmewqeq
->context1
);
946 /* Give back resources */
947 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
950 * As transport doesn't track completions of responses, if the rsp
951 * fails to send, the transport will effectively ignore the rsp
952 * and consider the LS done. However, the driver has an active
953 * exchange open for the LS - so be sure to abort the exchange
954 * if the response isn't sent.
956 lpfc_nvme_unsol_ls_issue_abort(phba
, axchg
, axchg
->sid
, axchg
->oxid
);
961 * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
962 * @tgtport: pointer to target port that NVME LS is to be transmit from.
963 * @ls_rsp: pointer to the transport LS RSP that is to be sent
965 * Driver registers this routine to transmit responses for received NVME
968 * This routine is used to format and send a WQE to transmit a NVME LS
969 * Response. The ls_rsp is used to reverse-map the LS to the original
970 * NVME LS request sequence, which provides addressing information for
971 * the remote port the LS to be sent to, as well as the exchange id
972 * that is the LS is bound to.
975 * 0 : if response successfully transmit
976 * non-zero : if response failed to transmit, of the form -Exxx.
979 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port
*tgtport
,
980 struct nvmefc_ls_rsp
*ls_rsp
)
982 struct lpfc_async_xchg_ctx
*axchg
=
983 container_of(ls_rsp
, struct lpfc_async_xchg_ctx
, ls_rsp
);
984 struct lpfc_nvmet_tgtport
*nvmep
= tgtport
->private;
987 if (axchg
->phba
->pport
->load_flag
& FC_UNLOADING
)
990 rc
= __lpfc_nvme_xmt_ls_rsp(axchg
, ls_rsp
, lpfc_nvmet_xmt_ls_rsp_cmp
);
993 atomic_inc(&nvmep
->xmt_ls_drop
);
995 * unless the failure is due to having already sent
996 * the response, an abort will be generated for the
997 * exchange if the rsp can't be sent.
1000 atomic_inc(&nvmep
->xmt_ls_abort
);
1004 atomic_inc(&nvmep
->xmt_ls_rsp
);
1009 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port
*tgtport
,
1010 struct nvmefc_tgt_fcp_req
*rsp
)
1012 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
1013 struct lpfc_async_xchg_ctx
*ctxp
=
1014 container_of(rsp
, struct lpfc_async_xchg_ctx
, hdlrctx
.fcp_req
);
1015 struct lpfc_hba
*phba
= ctxp
->phba
;
1016 struct lpfc_queue
*wq
;
1017 struct lpfc_iocbq
*nvmewqeq
;
1018 struct lpfc_sli_ring
*pring
;
1019 unsigned long iflags
;
1021 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1025 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
1030 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1031 if (ctxp
->ts_cmd_nvme
) {
1032 if (rsp
->op
== NVMET_FCOP_RSP
)
1033 ctxp
->ts_nvme_status
= ktime_get_ns();
1035 ctxp
->ts_nvme_data
= ktime_get_ns();
1038 /* Setup the hdw queue if not already set */
1040 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[rsp
->hwqid
];
1042 if (phba
->hdwqstat_on
& LPFC_CHECK_NVMET_IO
) {
1043 id
= raw_smp_processor_id();
1044 this_cpu_inc(phba
->sli4_hba
.c_stat
->xmt_io
);
1045 if (rsp
->hwqid
!= id
)
1046 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
1047 "6705 CPU Check OP: "
1048 "cpu %d expect %d\n",
1050 ctxp
->cpu
= id
; /* Setup cpu for cmpl check */
1055 if ((ctxp
->flag
& LPFC_NVME_ABTS_RCV
) ||
1056 (ctxp
->state
== LPFC_NVME_STE_ABORT
)) {
1057 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
1058 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1059 "6102 IO oxid x%x aborted\n",
1065 nvmewqeq
= lpfc_nvmet_prep_fcp_wqe(phba
, ctxp
);
1066 if (nvmewqeq
== NULL
) {
1067 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
1068 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1069 "6152 FCP Drop IO x%x: Prep\n",
1075 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_fcp_op_cmp
;
1076 nvmewqeq
->iocb_cmpl
= NULL
;
1077 nvmewqeq
->context2
= ctxp
;
1078 nvmewqeq
->iocb_flag
|= LPFC_IO_NVMET
;
1079 ctxp
->wqeq
->hba_wqidx
= rsp
->hwqid
;
1081 lpfc_nvmeio_data(phba
, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1082 ctxp
->oxid
, rsp
->op
, rsp
->rsplen
);
1084 ctxp
->flag
|= LPFC_NVME_IO_INP
;
1085 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
1086 if (rc
== WQE_SUCCESS
) {
1087 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1088 if (!ctxp
->ts_cmd_nvme
)
1090 if (rsp
->op
== NVMET_FCOP_RSP
)
1091 ctxp
->ts_status_wqput
= ktime_get_ns();
1093 ctxp
->ts_data_wqput
= ktime_get_ns();
1100 * WQ was full, so queue nvmewqeq to be sent after
1103 ctxp
->flag
|= LPFC_NVME_DEFER_WQFULL
;
1104 wq
= ctxp
->hdwq
->io_wq
;
1106 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1107 list_add_tail(&nvmewqeq
->list
, &wq
->wqfull_list
);
1108 wq
->q_flag
|= HBA_NVMET_WQFULL
;
1109 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1110 atomic_inc(&lpfc_nvmep
->defer_wqfull
);
1114 /* Give back resources */
1115 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
1116 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1117 "6153 FCP Drop IO x%x: Issue: %d\n",
1120 ctxp
->wqeq
->hba_wqidx
= 0;
1121 nvmewqeq
->context2
= NULL
;
1122 nvmewqeq
->context3
= NULL
;
1129 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port
*targetport
)
1131 struct lpfc_nvmet_tgtport
*tport
= targetport
->private;
1133 /* release any threads waiting for the unreg to complete */
1134 if (tport
->phba
->targetport
)
1135 complete(tport
->tport_unreg_cmp
);
1139 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
1140 struct nvmefc_tgt_fcp_req
*req
)
1142 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
1143 struct lpfc_async_xchg_ctx
*ctxp
=
1144 container_of(req
, struct lpfc_async_xchg_ctx
, hdlrctx
.fcp_req
);
1145 struct lpfc_hba
*phba
= ctxp
->phba
;
1146 struct lpfc_queue
*wq
;
1147 unsigned long flags
;
1149 if (phba
->pport
->load_flag
& FC_UNLOADING
)
1153 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[0];
1155 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1156 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1157 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1159 lpfc_nvmeio_data(phba
, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1160 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1162 atomic_inc(&lpfc_nvmep
->xmt_fcp_abort
);
1164 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
1166 /* Since iaab/iaar are NOT set, we need to check
1167 * if the firmware is in process of aborting IO
1169 if (ctxp
->flag
& (LPFC_NVME_XBUSY
| LPFC_NVME_ABORT_OP
)) {
1170 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1173 ctxp
->flag
|= LPFC_NVME_ABORT_OP
;
1175 if (ctxp
->flag
& LPFC_NVME_DEFER_WQFULL
) {
1176 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1177 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1179 wq
= ctxp
->hdwq
->io_wq
;
1180 lpfc_nvmet_wqfull_flush(phba
, wq
, ctxp
);
1183 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1185 /* A state of LPFC_NVME_STE_RCV means we have just received
1186 * the NVME command and have not started processing it.
1187 * (by issuing any IO WQEs on this exchange yet)
1189 if (ctxp
->state
== LPFC_NVME_STE_RCV
)
1190 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1193 lpfc_nvmet_sol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1198 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port
*tgtport
,
1199 struct nvmefc_tgt_fcp_req
*rsp
)
1201 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
1202 struct lpfc_async_xchg_ctx
*ctxp
=
1203 container_of(rsp
, struct lpfc_async_xchg_ctx
, hdlrctx
.fcp_req
);
1204 struct lpfc_hba
*phba
= ctxp
->phba
;
1205 unsigned long flags
;
1206 bool aborting
= false;
1208 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
1209 if (ctxp
->flag
& LPFC_NVME_XBUSY
)
1210 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
1211 "6027 NVMET release with XBUSY flag x%x"
1213 ctxp
->flag
, ctxp
->oxid
);
1214 else if (ctxp
->state
!= LPFC_NVME_STE_DONE
&&
1215 ctxp
->state
!= LPFC_NVME_STE_ABORT
)
1216 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1217 "6413 NVMET release bad state %d %d oxid x%x\n",
1218 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
1220 if ((ctxp
->flag
& LPFC_NVME_ABORT_OP
) ||
1221 (ctxp
->flag
& LPFC_NVME_XBUSY
)) {
1223 /* let the abort path do the real release */
1224 lpfc_nvmet_defer_release(phba
, ctxp
);
1226 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1228 lpfc_nvmeio_data(phba
, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp
->oxid
,
1229 ctxp
->state
, aborting
);
1231 atomic_inc(&lpfc_nvmep
->xmt_fcp_release
);
1232 ctxp
->flag
&= ~LPFC_NVME_TNOTIFY
;
1237 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1241 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port
*tgtport
,
1242 struct nvmefc_tgt_fcp_req
*rsp
)
1244 struct lpfc_nvmet_tgtport
*tgtp
;
1245 struct lpfc_async_xchg_ctx
*ctxp
=
1246 container_of(rsp
, struct lpfc_async_xchg_ctx
, hdlrctx
.fcp_req
);
1247 struct rqb_dmabuf
*nvmebuf
= ctxp
->rqb_buffer
;
1248 struct lpfc_hba
*phba
= ctxp
->phba
;
1249 unsigned long iflag
;
1252 lpfc_nvmeio_data(phba
, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1253 ctxp
->oxid
, ctxp
->size
, raw_smp_processor_id());
1256 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
1257 "6425 Defer rcv: no buffer oxid x%x: "
1259 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1263 tgtp
= phba
->targetport
->private;
1265 atomic_inc(&tgtp
->rcv_fcp_cmd_defer
);
1267 /* Free the nvmebuf since a new buffer already replaced it */
1268 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
, nvmebuf
);
1269 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1270 ctxp
->rqb_buffer
= NULL
;
1271 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1275 * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
1276 * @phba: Pointer to HBA context object
1277 * @cmdwqe: Pointer to driver command WQE object.
1278 * @wcqe: Pointer to driver response CQE object.
1280 * This function is the completion handler for NVME LS requests.
1281 * The function updates any states and statistics, then calls the
1282 * generic completion handler to finish completion of the request.
1285 lpfc_nvmet_ls_req_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
1286 struct lpfc_wcqe_complete
*wcqe
)
1288 __lpfc_nvme_ls_req_cmp(phba
, cmdwqe
->vport
, cmdwqe
, wcqe
);
1292 * lpfc_nvmet_ls_req - Issue an Link Service request
1293 * @targetport: pointer to target instance registered with nvmet transport.
1294 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
1295 * Driver sets this value to the ndlp pointer.
1296 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
1298 * Driver registers this routine to handle any link service request
1299 * from the nvme_fc transport to a remote nvme-aware port.
1303 * non-zero: various error codes, in form of -Exxx
1306 lpfc_nvmet_ls_req(struct nvmet_fc_target_port
*targetport
,
1308 struct nvmefc_ls_req
*pnvme_lsreq
)
1310 struct lpfc_nvmet_tgtport
*lpfc_nvmet
= targetport
->private;
1311 struct lpfc_hba
*phba
;
1312 struct lpfc_nodelist
*ndlp
;
1319 phba
= lpfc_nvmet
->phba
;
1320 if (phba
->pport
->load_flag
& FC_UNLOADING
)
1323 hstate
= atomic_read(&lpfc_nvmet
->state
);
1324 if (hstate
== LPFC_NVMET_INV_HOST_ACTIVE
)
1327 ndlp
= (struct lpfc_nodelist
*)hosthandle
;
1329 ret
= __lpfc_nvme_ls_req(phba
->pport
, ndlp
, pnvme_lsreq
,
1330 lpfc_nvmet_ls_req_cmp
);
1336 * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
1337 * @targetport: Transport targetport, that LS was issued from.
1338 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
1339 * Driver sets this value to the ndlp pointer.
1340 * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted
1342 * Driver registers this routine to abort an NVME LS request that is
1343 * in progress (from the transports perspective).
1346 lpfc_nvmet_ls_abort(struct nvmet_fc_target_port
*targetport
,
1348 struct nvmefc_ls_req
*pnvme_lsreq
)
1350 struct lpfc_nvmet_tgtport
*lpfc_nvmet
= targetport
->private;
1351 struct lpfc_hba
*phba
;
1352 struct lpfc_nodelist
*ndlp
;
1355 phba
= lpfc_nvmet
->phba
;
1356 if (phba
->pport
->load_flag
& FC_UNLOADING
)
1359 ndlp
= (struct lpfc_nodelist
*)hosthandle
;
1361 ret
= __lpfc_nvme_ls_abort(phba
->pport
, ndlp
, pnvme_lsreq
);
1363 atomic_inc(&lpfc_nvmet
->xmt_ls_abort
);
1367 lpfc_nvmet_host_release(void *hosthandle
)
1369 struct lpfc_nodelist
*ndlp
= hosthandle
;
1370 struct lpfc_hba
*phba
= NULL
;
1371 struct lpfc_nvmet_tgtport
*tgtp
;
1374 if (!phba
->targetport
|| !phba
->targetport
->private)
1377 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1378 "6202 NVMET XPT releasing hosthandle x%px\n",
1380 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1381 atomic_set(&tgtp
->state
, 0);
1385 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port
*tgtport
)
1387 struct lpfc_nvmet_tgtport
*tgtp
;
1388 struct lpfc_hba
*phba
;
1391 tgtp
= tgtport
->private;
1394 rc
= lpfc_issue_els_rscn(phba
->pport
, 0);
1395 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1396 "6420 NVMET subsystem change: Notification %s\n",
1397 (rc
) ? "Failed" : "Sent");
1400 static struct nvmet_fc_target_template lpfc_tgttemplate
= {
1401 .targetport_delete
= lpfc_nvmet_targetport_delete
,
1402 .xmt_ls_rsp
= lpfc_nvmet_xmt_ls_rsp
,
1403 .fcp_op
= lpfc_nvmet_xmt_fcp_op
,
1404 .fcp_abort
= lpfc_nvmet_xmt_fcp_abort
,
1405 .fcp_req_release
= lpfc_nvmet_xmt_fcp_release
,
1406 .defer_rcv
= lpfc_nvmet_defer_rcv
,
1407 .discovery_event
= lpfc_nvmet_discovery_event
,
1408 .ls_req
= lpfc_nvmet_ls_req
,
1409 .ls_abort
= lpfc_nvmet_ls_abort
,
1410 .host_release
= lpfc_nvmet_host_release
,
1413 .max_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
1414 .max_dif_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
1415 .dma_boundary
= 0xFFFFFFFF,
1417 /* optional features */
1418 .target_features
= 0,
1419 /* sizes of additional private data for data structures */
1420 .target_priv_sz
= sizeof(struct lpfc_nvmet_tgtport
),
1421 .lsrqst_priv_sz
= 0,
1425 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba
*phba
,
1426 struct lpfc_nvmet_ctx_info
*infop
)
1428 struct lpfc_nvmet_ctxbuf
*ctx_buf
, *next_ctx_buf
;
1429 unsigned long flags
;
1431 spin_lock_irqsave(&infop
->nvmet_ctx_list_lock
, flags
);
1432 list_for_each_entry_safe(ctx_buf
, next_ctx_buf
,
1433 &infop
->nvmet_ctx_list
, list
) {
1434 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1435 list_del_init(&ctx_buf
->list
);
1436 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1438 __lpfc_clear_active_sglq(phba
, ctx_buf
->sglq
->sli4_lxritag
);
1439 ctx_buf
->sglq
->state
= SGL_FREED
;
1440 ctx_buf
->sglq
->ndlp
= NULL
;
1442 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1443 list_add_tail(&ctx_buf
->sglq
->list
,
1444 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
1445 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1447 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
1448 kfree(ctx_buf
->context
);
1450 spin_unlock_irqrestore(&infop
->nvmet_ctx_list_lock
, flags
);
1454 lpfc_nvmet_cleanup_io_context(struct lpfc_hba
*phba
)
1456 struct lpfc_nvmet_ctx_info
*infop
;
1459 /* The first context list, MRQ 0 CPU 0 */
1460 infop
= phba
->sli4_hba
.nvmet_ctx_info
;
1464 /* Cycle the the entire CPU context list for every MRQ */
1465 for (i
= 0; i
< phba
->cfg_nvmet_mrq
; i
++) {
1466 for_each_present_cpu(j
) {
1467 infop
= lpfc_get_ctx_list(phba
, j
, i
);
1468 __lpfc_nvmet_clean_io_for_cpu(phba
, infop
);
1471 kfree(phba
->sli4_hba
.nvmet_ctx_info
);
1472 phba
->sli4_hba
.nvmet_ctx_info
= NULL
;
1476 lpfc_nvmet_setup_io_context(struct lpfc_hba
*phba
)
1478 struct lpfc_nvmet_ctxbuf
*ctx_buf
;
1479 struct lpfc_iocbq
*nvmewqe
;
1480 union lpfc_wqe128
*wqe
;
1481 struct lpfc_nvmet_ctx_info
*last_infop
;
1482 struct lpfc_nvmet_ctx_info
*infop
;
1485 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
,
1486 "6403 Allocate NVMET resources for %d XRIs\n",
1487 phba
->sli4_hba
.nvmet_xri_cnt
);
1489 phba
->sli4_hba
.nvmet_ctx_info
= kcalloc(
1490 phba
->sli4_hba
.num_possible_cpu
* phba
->cfg_nvmet_mrq
,
1491 sizeof(struct lpfc_nvmet_ctx_info
), GFP_KERNEL
);
1492 if (!phba
->sli4_hba
.nvmet_ctx_info
) {
1493 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1494 "6419 Failed allocate memory for "
1495 "nvmet context lists\n");
1500 * Assuming X CPUs in the system, and Y MRQs, allocate some
1501 * lpfc_nvmet_ctx_info structures as follows:
1503 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1504 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1506 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1508 * Each line represents a MRQ "silo" containing an entry for
1511 * MRQ X is initially assumed to be associated with CPU X, thus
1512 * contexts are initially distributed across all MRQs using
1513 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1514 * freed, the are freed to the MRQ silo based on the CPU number
1515 * of the IO completion. Thus a context that was allocated for MRQ A
1516 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1518 for_each_possible_cpu(i
) {
1519 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1520 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1521 INIT_LIST_HEAD(&infop
->nvmet_ctx_list
);
1522 spin_lock_init(&infop
->nvmet_ctx_list_lock
);
1523 infop
->nvmet_ctx_list_cnt
= 0;
1528 * Setup the next CPU context info ptr for each MRQ.
1529 * MRQ 0 will cycle thru CPUs 0 - X separately from
1530 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1532 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1533 last_infop
= lpfc_get_ctx_list(phba
,
1534 cpumask_first(cpu_present_mask
),
1536 for (i
= phba
->sli4_hba
.num_possible_cpu
- 1; i
>= 0; i
--) {
1537 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1538 infop
->nvmet_ctx_next_cpu
= last_infop
;
1543 /* For all nvmet xris, allocate resources needed to process a
1544 * received command on a per xri basis.
1547 cpu
= cpumask_first(cpu_present_mask
);
1548 for (i
= 0; i
< phba
->sli4_hba
.nvmet_xri_cnt
; i
++) {
1549 ctx_buf
= kzalloc(sizeof(*ctx_buf
), GFP_KERNEL
);
1551 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1552 "6404 Ran out of memory for NVMET\n");
1556 ctx_buf
->context
= kzalloc(sizeof(*ctx_buf
->context
),
1558 if (!ctx_buf
->context
) {
1560 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1561 "6405 Ran out of NVMET "
1562 "context memory\n");
1565 ctx_buf
->context
->ctxbuf
= ctx_buf
;
1566 ctx_buf
->context
->state
= LPFC_NVME_STE_FREE
;
1568 ctx_buf
->iocbq
= lpfc_sli_get_iocbq(phba
);
1569 if (!ctx_buf
->iocbq
) {
1570 kfree(ctx_buf
->context
);
1572 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1573 "6406 Ran out of NVMET iocb/WQEs\n");
1576 ctx_buf
->iocbq
->iocb_flag
= LPFC_IO_NVMET
;
1577 nvmewqe
= ctx_buf
->iocbq
;
1578 wqe
= &nvmewqe
->wqe
;
1580 /* Initialize WQE */
1581 memset(wqe
, 0, sizeof(union lpfc_wqe
));
1583 ctx_buf
->iocbq
->context1
= NULL
;
1584 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1585 ctx_buf
->sglq
= __lpfc_sli_get_nvmet_sglq(phba
, ctx_buf
->iocbq
);
1586 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1587 if (!ctx_buf
->sglq
) {
1588 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
1589 kfree(ctx_buf
->context
);
1591 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1592 "6407 Ran out of NVMET XRIs\n");
1595 INIT_WORK(&ctx_buf
->defer_work
, lpfc_nvmet_fcp_rqst_defer_work
);
1598 * Add ctx to MRQidx context list. Our initial assumption
1599 * is MRQidx will be associated with CPUidx. This association
1600 * can change on the fly.
1602 infop
= lpfc_get_ctx_list(phba
, cpu
, idx
);
1603 spin_lock(&infop
->nvmet_ctx_list_lock
);
1604 list_add_tail(&ctx_buf
->list
, &infop
->nvmet_ctx_list
);
1605 infop
->nvmet_ctx_list_cnt
++;
1606 spin_unlock(&infop
->nvmet_ctx_list_lock
);
1608 /* Spread ctx structures evenly across all MRQs */
1610 if (idx
>= phba
->cfg_nvmet_mrq
) {
1612 cpu
= cpumask_first(cpu_present_mask
);
1615 cpu
= cpumask_next(cpu
, cpu_present_mask
);
1616 if (cpu
== nr_cpu_ids
)
1617 cpu
= cpumask_first(cpu_present_mask
);
1621 for_each_present_cpu(i
) {
1622 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1623 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1624 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
| LOG_INIT
,
1625 "6408 TOTAL NVMET ctx for CPU %d "
1626 "MRQ %d: cnt %d nextcpu x%px\n",
1627 i
, j
, infop
->nvmet_ctx_list_cnt
,
1628 infop
->nvmet_ctx_next_cpu
);
1635 lpfc_nvmet_create_targetport(struct lpfc_hba
*phba
)
1637 struct lpfc_vport
*vport
= phba
->pport
;
1638 struct lpfc_nvmet_tgtport
*tgtp
;
1639 struct nvmet_fc_port_info pinfo
;
1642 if (phba
->targetport
)
1645 error
= lpfc_nvmet_setup_io_context(phba
);
1649 memset(&pinfo
, 0, sizeof(struct nvmet_fc_port_info
));
1650 pinfo
.node_name
= wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
1651 pinfo
.port_name
= wwn_to_u64(vport
->fc_portname
.u
.wwn
);
1652 pinfo
.port_id
= vport
->fc_myDID
;
1654 /* We need to tell the transport layer + 1 because it takes page
1655 * alignment into account. When space for the SGL is allocated we
1656 * allocate + 3, one for cmd, one for rsp and one for this alignment
1658 lpfc_tgttemplate
.max_sgl_segments
= phba
->cfg_nvme_seg_cnt
+ 1;
1659 lpfc_tgttemplate
.max_hw_queues
= phba
->cfg_hdw_queue
;
1660 lpfc_tgttemplate
.target_features
= NVMET_FCTGTFEAT_READDATA_RSP
;
1662 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1663 error
= nvmet_fc_register_targetport(&pinfo
, &lpfc_tgttemplate
,
1670 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1671 "6025 Cannot register NVME targetport x%x: "
1672 "portnm %llx nodenm %llx segs %d qs %d\n",
1674 pinfo
.port_name
, pinfo
.node_name
,
1675 lpfc_tgttemplate
.max_sgl_segments
,
1676 lpfc_tgttemplate
.max_hw_queues
);
1677 phba
->targetport
= NULL
;
1678 phba
->nvmet_support
= 0;
1680 lpfc_nvmet_cleanup_io_context(phba
);
1683 tgtp
= (struct lpfc_nvmet_tgtport
*)
1684 phba
->targetport
->private;
1687 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
1688 "6026 Registered NVME "
1689 "targetport: x%px, private x%px "
1690 "portnm %llx nodenm %llx segs %d qs %d\n",
1691 phba
->targetport
, tgtp
,
1692 pinfo
.port_name
, pinfo
.node_name
,
1693 lpfc_tgttemplate
.max_sgl_segments
,
1694 lpfc_tgttemplate
.max_hw_queues
);
1696 atomic_set(&tgtp
->rcv_ls_req_in
, 0);
1697 atomic_set(&tgtp
->rcv_ls_req_out
, 0);
1698 atomic_set(&tgtp
->rcv_ls_req_drop
, 0);
1699 atomic_set(&tgtp
->xmt_ls_abort
, 0);
1700 atomic_set(&tgtp
->xmt_ls_abort_cmpl
, 0);
1701 atomic_set(&tgtp
->xmt_ls_rsp
, 0);
1702 atomic_set(&tgtp
->xmt_ls_drop
, 0);
1703 atomic_set(&tgtp
->xmt_ls_rsp_error
, 0);
1704 atomic_set(&tgtp
->xmt_ls_rsp_xb_set
, 0);
1705 atomic_set(&tgtp
->xmt_ls_rsp_aborted
, 0);
1706 atomic_set(&tgtp
->xmt_ls_rsp_cmpl
, 0);
1707 atomic_set(&tgtp
->rcv_fcp_cmd_in
, 0);
1708 atomic_set(&tgtp
->rcv_fcp_cmd_out
, 0);
1709 atomic_set(&tgtp
->rcv_fcp_cmd_drop
, 0);
1710 atomic_set(&tgtp
->xmt_fcp_drop
, 0);
1711 atomic_set(&tgtp
->xmt_fcp_read_rsp
, 0);
1712 atomic_set(&tgtp
->xmt_fcp_read
, 0);
1713 atomic_set(&tgtp
->xmt_fcp_write
, 0);
1714 atomic_set(&tgtp
->xmt_fcp_rsp
, 0);
1715 atomic_set(&tgtp
->xmt_fcp_release
, 0);
1716 atomic_set(&tgtp
->xmt_fcp_rsp_cmpl
, 0);
1717 atomic_set(&tgtp
->xmt_fcp_rsp_error
, 0);
1718 atomic_set(&tgtp
->xmt_fcp_rsp_xb_set
, 0);
1719 atomic_set(&tgtp
->xmt_fcp_rsp_aborted
, 0);
1720 atomic_set(&tgtp
->xmt_fcp_rsp_drop
, 0);
1721 atomic_set(&tgtp
->xmt_fcp_xri_abort_cqe
, 0);
1722 atomic_set(&tgtp
->xmt_fcp_abort
, 0);
1723 atomic_set(&tgtp
->xmt_fcp_abort_cmpl
, 0);
1724 atomic_set(&tgtp
->xmt_abort_unsol
, 0);
1725 atomic_set(&tgtp
->xmt_abort_sol
, 0);
1726 atomic_set(&tgtp
->xmt_abort_rsp
, 0);
1727 atomic_set(&tgtp
->xmt_abort_rsp_error
, 0);
1728 atomic_set(&tgtp
->defer_ctx
, 0);
1729 atomic_set(&tgtp
->defer_fod
, 0);
1730 atomic_set(&tgtp
->defer_wqfull
, 0);
1736 lpfc_nvmet_update_targetport(struct lpfc_hba
*phba
)
1738 struct lpfc_vport
*vport
= phba
->pport
;
1740 if (!phba
->targetport
)
1743 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
,
1744 "6007 Update NVMET port x%px did x%x\n",
1745 phba
->targetport
, vport
->fc_myDID
);
1747 phba
->targetport
->port_id
= vport
->fc_myDID
;
1752 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1753 * @phba: pointer to lpfc hba data structure.
1754 * @axri: pointer to the nvmet xri abort wcqe structure.
1756 * This routine is invoked by the worker thread to process a SLI4 fast-path
1757 * NVMET aborted xri.
1760 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba
*phba
,
1761 struct sli4_wcqe_xri_aborted
*axri
)
1763 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1764 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
1765 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
1766 struct lpfc_async_xchg_ctx
*ctxp
, *next_ctxp
;
1767 struct lpfc_nvmet_tgtport
*tgtp
;
1768 struct nvmefc_tgt_fcp_req
*req
= NULL
;
1769 struct lpfc_nodelist
*ndlp
;
1770 unsigned long iflag
= 0;
1772 bool released
= false;
1774 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1775 "6317 XB aborted xri x%x rxid x%x\n", xri
, rxid
);
1777 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
))
1780 if (phba
->targetport
) {
1781 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1782 atomic_inc(&tgtp
->xmt_fcp_xri_abort_cqe
);
1785 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1786 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1787 list_for_each_entry_safe(ctxp
, next_ctxp
,
1788 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1790 if (ctxp
->ctxbuf
->sglq
->sli4_xritag
!= xri
)
1793 spin_lock(&ctxp
->ctxlock
);
1794 /* Check if we already received a free context call
1795 * and we have completed processing an abort situation.
1797 if (ctxp
->flag
& LPFC_NVME_CTX_RLS
&&
1798 !(ctxp
->flag
& LPFC_NVME_ABORT_OP
)) {
1799 list_del_init(&ctxp
->list
);
1802 ctxp
->flag
&= ~LPFC_NVME_XBUSY
;
1803 spin_unlock(&ctxp
->ctxlock
);
1804 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1806 rrq_empty
= list_empty(&phba
->active_rrq_list
);
1807 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1808 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
1810 (ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
||
1811 ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)) {
1812 lpfc_set_rrq_active(phba
, ndlp
,
1813 ctxp
->ctxbuf
->sglq
->sli4_lxritag
,
1815 lpfc_sli4_abts_err_handler(phba
, ndlp
, axri
);
1818 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1819 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1820 ctxp
->oxid
, ctxp
->flag
, released
);
1822 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1825 lpfc_worker_wake_up(phba
);
1828 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1829 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1831 ctxp
= lpfc_nvmet_get_ctx_for_xri(phba
, xri
);
1834 * Abort already done by FW, so BA_ACC sent.
1835 * However, the transport may be unaware.
1837 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1838 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1839 "flag x%x oxid x%x rxid x%x\n",
1840 xri
, ctxp
->state
, ctxp
->flag
, ctxp
->oxid
,
1843 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1844 ctxp
->flag
|= LPFC_NVME_ABTS_RCV
;
1845 ctxp
->state
= LPFC_NVME_STE_ABORT
;
1846 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1848 lpfc_nvmeio_data(phba
,
1849 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1850 xri
, raw_smp_processor_id(), 0);
1852 req
= &ctxp
->hdlrctx
.fcp_req
;
1854 nvmet_fc_rcv_fcp_abort(phba
->targetport
, req
);
1860 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport
*vport
,
1861 struct fc_frame_header
*fc_hdr
)
1863 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1864 struct lpfc_hba
*phba
= vport
->phba
;
1865 struct lpfc_async_xchg_ctx
*ctxp
, *next_ctxp
;
1866 struct nvmefc_tgt_fcp_req
*rsp
;
1869 unsigned long iflag
= 0;
1871 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
1872 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1874 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1875 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1876 list_for_each_entry_safe(ctxp
, next_ctxp
,
1877 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1879 if (ctxp
->oxid
!= oxid
|| ctxp
->sid
!= sid
)
1882 xri
= ctxp
->ctxbuf
->sglq
->sli4_xritag
;
1884 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1885 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1887 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1888 ctxp
->flag
|= LPFC_NVME_ABTS_RCV
;
1889 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1891 lpfc_nvmeio_data(phba
,
1892 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1893 xri
, raw_smp_processor_id(), 0);
1895 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1896 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri
);
1898 rsp
= &ctxp
->hdlrctx
.fcp_req
;
1899 nvmet_fc_rcv_fcp_abort(phba
->targetport
, rsp
);
1901 /* Respond with BA_ACC accordingly */
1902 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1905 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1906 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1908 /* check the wait list */
1909 if (phba
->sli4_hba
.nvmet_io_wait_cnt
) {
1910 struct rqb_dmabuf
*nvmebuf
;
1911 struct fc_frame_header
*fc_hdr_tmp
;
1916 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
1918 /* match by oxid and s_id */
1919 list_for_each_entry(nvmebuf
,
1920 &phba
->sli4_hba
.lpfc_nvmet_io_wait_list
,
1922 fc_hdr_tmp
= (struct fc_frame_header
*)
1923 (nvmebuf
->hbuf
.virt
);
1924 oxid_tmp
= be16_to_cpu(fc_hdr_tmp
->fh_ox_id
);
1925 sid_tmp
= sli4_sid_from_fc_hdr(fc_hdr_tmp
);
1926 if (oxid_tmp
!= oxid
|| sid_tmp
!= sid
)
1929 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1930 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1931 "is waiting for a ctxp\n",
1934 list_del_init(&nvmebuf
->hbuf
.list
);
1935 phba
->sli4_hba
.nvmet_io_wait_cnt
--;
1939 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
1942 /* free buffer since already posted a new DMA buffer to RQ */
1944 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
, nvmebuf
);
1945 /* Respond with BA_ACC accordingly */
1946 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1951 /* check active list */
1952 ctxp
= lpfc_nvmet_get_ctx_for_oxid(phba
, oxid
, sid
);
1954 xri
= ctxp
->ctxbuf
->sglq
->sli4_xritag
;
1956 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1957 ctxp
->flag
|= (LPFC_NVME_ABTS_RCV
| LPFC_NVME_ABORT_OP
);
1958 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1960 lpfc_nvmeio_data(phba
,
1961 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1962 xri
, raw_smp_processor_id(), 0);
1964 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1965 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1966 "flag x%x state x%x\n",
1967 ctxp
->oxid
, xri
, ctxp
->flag
, ctxp
->state
);
1969 if (ctxp
->flag
& LPFC_NVME_TNOTIFY
) {
1970 /* Notify the transport */
1971 nvmet_fc_rcv_fcp_abort(phba
->targetport
,
1972 &ctxp
->hdlrctx
.fcp_req
);
1974 cancel_work_sync(&ctxp
->ctxbuf
->defer_work
);
1975 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1976 lpfc_nvmet_defer_release(phba
, ctxp
);
1977 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1979 lpfc_nvmet_sol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1982 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1986 lpfc_nvmeio_data(phba
, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1987 oxid
, raw_smp_processor_id(), 1);
1989 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1990 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid
);
1992 /* Respond with BA_RJT accordingly */
1993 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 0);
1999 lpfc_nvmet_wqfull_flush(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
,
2000 struct lpfc_async_xchg_ctx
*ctxp
)
2002 struct lpfc_sli_ring
*pring
;
2003 struct lpfc_iocbq
*nvmewqeq
;
2004 struct lpfc_iocbq
*next_nvmewqeq
;
2005 unsigned long iflags
;
2006 struct lpfc_wcqe_complete wcqe
;
2007 struct lpfc_wcqe_complete
*wcqep
;
2012 /* Fake an ABORT error code back to cmpl routine */
2013 memset(wcqep
, 0, sizeof(struct lpfc_wcqe_complete
));
2014 bf_set(lpfc_wcqe_c_status
, wcqep
, IOSTAT_LOCAL_REJECT
);
2015 wcqep
->parameter
= IOERR_ABORT_REQUESTED
;
2017 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
2018 list_for_each_entry_safe(nvmewqeq
, next_nvmewqeq
,
2019 &wq
->wqfull_list
, list
) {
2021 /* Checking for a specific IO to flush */
2022 if (nvmewqeq
->context2
== ctxp
) {
2023 list_del(&nvmewqeq
->list
);
2024 spin_unlock_irqrestore(&pring
->ring_lock
,
2026 lpfc_nvmet_xmt_fcp_op_cmp(phba
, nvmewqeq
,
2033 list_del(&nvmewqeq
->list
);
2034 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
2035 lpfc_nvmet_xmt_fcp_op_cmp(phba
, nvmewqeq
, wcqep
);
2036 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
2040 wq
->q_flag
&= ~HBA_NVMET_WQFULL
;
2041 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
2045 lpfc_nvmet_wqfull_process(struct lpfc_hba
*phba
,
2046 struct lpfc_queue
*wq
)
2048 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2049 struct lpfc_sli_ring
*pring
;
2050 struct lpfc_iocbq
*nvmewqeq
;
2051 struct lpfc_async_xchg_ctx
*ctxp
;
2052 unsigned long iflags
;
2056 * Some WQE slots are available, so try to re-issue anything
2057 * on the WQ wqfull_list.
2060 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
2061 while (!list_empty(&wq
->wqfull_list
)) {
2062 list_remove_head(&wq
->wqfull_list
, nvmewqeq
, struct lpfc_iocbq
,
2064 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
2065 ctxp
= (struct lpfc_async_xchg_ctx
*)nvmewqeq
->context2
;
2066 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
2067 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
2069 /* WQ was full again, so put it back on the list */
2070 list_add(&nvmewqeq
->list
, &wq
->wqfull_list
);
2071 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
2074 if (rc
== WQE_SUCCESS
) {
2075 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2076 if (ctxp
->ts_cmd_nvme
) {
2077 if (ctxp
->hdlrctx
.fcp_req
.op
== NVMET_FCOP_RSP
)
2078 ctxp
->ts_status_wqput
= ktime_get_ns();
2080 ctxp
->ts_data_wqput
= ktime_get_ns();
2087 wq
->q_flag
&= ~HBA_NVMET_WQFULL
;
2088 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
2094 lpfc_nvmet_destroy_targetport(struct lpfc_hba
*phba
)
2096 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2097 struct lpfc_nvmet_tgtport
*tgtp
;
2098 struct lpfc_queue
*wq
;
2100 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp
);
2102 if (phba
->nvmet_support
== 0)
2104 if (phba
->targetport
) {
2105 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2106 for (qidx
= 0; qidx
< phba
->cfg_hdw_queue
; qidx
++) {
2107 wq
= phba
->sli4_hba
.hdwq
[qidx
].io_wq
;
2108 lpfc_nvmet_wqfull_flush(phba
, wq
, NULL
);
2110 tgtp
->tport_unreg_cmp
= &tport_unreg_cmp
;
2111 nvmet_fc_unregister_targetport(phba
->targetport
);
2112 if (!wait_for_completion_timeout(&tport_unreg_cmp
,
2113 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO
)))
2114 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2115 "6179 Unreg targetport x%px timeout "
2116 "reached.\n", phba
->targetport
);
2117 lpfc_nvmet_cleanup_io_context(phba
);
2119 phba
->targetport
= NULL
;
2124 * lpfc_nvmet_handle_lsreq - Process an NVME LS request
2125 * @phba: pointer to lpfc hba data structure.
2126 * @axchg: pointer to exchange context for the NVME LS request
2128 * This routine is used for processing an asychronously received NVME LS
2129 * request. Any remaining validation is done and the LS is then forwarded
2130 * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
2132 * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
2133 * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
2134 * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
2136 * Returns 0 if LS was handled and delivered to the transport
2137 * Returns 1 if LS failed to be handled and should be dropped
2140 lpfc_nvmet_handle_lsreq(struct lpfc_hba
*phba
,
2141 struct lpfc_async_xchg_ctx
*axchg
)
2143 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2144 struct lpfc_nvmet_tgtport
*tgtp
= phba
->targetport
->private;
2145 uint32_t *payload
= axchg
->payload
;
2148 atomic_inc(&tgtp
->rcv_ls_req_in
);
2151 * Driver passes the ndlp as the hosthandle argument allowing
2152 * the transport to generate LS requests for any associateions
2155 rc
= nvmet_fc_rcv_ls_req(phba
->targetport
, axchg
->ndlp
, &axchg
->ls_rsp
,
2156 axchg
->payload
, axchg
->size
);
2158 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
2159 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2160 "%08x %08x %08x\n", axchg
->size
, rc
,
2161 *payload
, *(payload
+1), *(payload
+2),
2162 *(payload
+3), *(payload
+4), *(payload
+5));
2165 atomic_inc(&tgtp
->rcv_ls_req_out
);
2169 atomic_inc(&tgtp
->rcv_ls_req_drop
);
2175 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf
*ctx_buf
)
2177 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2178 struct lpfc_async_xchg_ctx
*ctxp
= ctx_buf
->context
;
2179 struct lpfc_hba
*phba
= ctxp
->phba
;
2180 struct rqb_dmabuf
*nvmebuf
= ctxp
->rqb_buffer
;
2181 struct lpfc_nvmet_tgtport
*tgtp
;
2182 uint32_t *payload
, qno
;
2184 unsigned long iflags
;
2187 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2188 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2189 "oxid: x%x flg: x%x state: x%x\n",
2190 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
2191 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2192 lpfc_nvmet_defer_release(phba
, ctxp
);
2193 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2194 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
2199 if (ctxp
->flag
& LPFC_NVME_ABTS_RCV
) {
2200 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2201 "6324 IO oxid x%x aborted\n",
2206 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
2207 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2208 ctxp
->flag
|= LPFC_NVME_TNOTIFY
;
2209 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2210 if (ctxp
->ts_isr_cmd
)
2211 ctxp
->ts_cmd_nvme
= ktime_get_ns();
2214 * The calling sequence should be:
2215 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2216 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2217 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2218 * the NVME command / FC header is stored.
2219 * A buffer has already been reposted for this IO, so just free
2222 rc
= nvmet_fc_rcv_fcp_req(phba
->targetport
, &ctxp
->hdlrctx
.fcp_req
,
2223 payload
, ctxp
->size
);
2224 /* Process FCP command */
2226 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
2227 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2228 if ((ctxp
->flag
& LPFC_NVME_CTX_REUSE_WQ
) ||
2229 (nvmebuf
!= ctxp
->rqb_buffer
)) {
2230 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2233 ctxp
->rqb_buffer
= NULL
;
2234 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2235 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
); /* repost */
2239 /* Processing of FCP command is deferred */
2240 if (rc
== -EOVERFLOW
) {
2241 lpfc_nvmeio_data(phba
, "NVMET RCV BUSY: xri x%x sz %d "
2243 ctxp
->oxid
, ctxp
->size
, ctxp
->sid
);
2244 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
2245 atomic_inc(&tgtp
->defer_fod
);
2246 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2247 if (ctxp
->flag
& LPFC_NVME_CTX_REUSE_WQ
) {
2248 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2251 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2253 * Post a replacement DMA buffer to RQ and defer
2254 * freeing rcv buffer till .defer_rcv callback
2257 lpfc_post_rq_buffer(
2258 phba
, phba
->sli4_hba
.nvmet_mrq_hdr
[qno
],
2259 phba
->sli4_hba
.nvmet_mrq_data
[qno
], 1, qno
);
2262 ctxp
->flag
&= ~LPFC_NVME_TNOTIFY
;
2263 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
2264 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2265 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2267 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
2268 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
2269 atomic_read(&tgtp
->xmt_fcp_release
));
2270 lpfc_nvmeio_data(phba
, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2271 ctxp
->oxid
, ctxp
->size
, ctxp
->sid
);
2272 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2273 lpfc_nvmet_defer_release(phba
, ctxp
);
2274 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2275 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
, ctxp
->oxid
);
2280 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct
*work
)
2282 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2283 struct lpfc_nvmet_ctxbuf
*ctx_buf
=
2284 container_of(work
, struct lpfc_nvmet_ctxbuf
, defer_work
);
2286 lpfc_nvmet_process_rcv_fcp_req(ctx_buf
);
2290 static struct lpfc_nvmet_ctxbuf
*
2291 lpfc_nvmet_replenish_context(struct lpfc_hba
*phba
,
2292 struct lpfc_nvmet_ctx_info
*current_infop
)
2294 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2295 struct lpfc_nvmet_ctxbuf
*ctx_buf
= NULL
;
2296 struct lpfc_nvmet_ctx_info
*get_infop
;
2300 * The current_infop for the MRQ a NVME command IU was received
2301 * on is empty. Our goal is to replenish this MRQs context
2302 * list from a another CPUs.
2304 * First we need to pick a context list to start looking on.
2305 * nvmet_ctx_start_cpu has available context the last time
2306 * we needed to replenish this CPU where nvmet_ctx_next_cpu
2307 * is just the next sequential CPU for this MRQ.
2309 if (current_infop
->nvmet_ctx_start_cpu
)
2310 get_infop
= current_infop
->nvmet_ctx_start_cpu
;
2312 get_infop
= current_infop
->nvmet_ctx_next_cpu
;
2314 for (i
= 0; i
< phba
->sli4_hba
.num_possible_cpu
; i
++) {
2315 if (get_infop
== current_infop
) {
2316 get_infop
= get_infop
->nvmet_ctx_next_cpu
;
2319 spin_lock(&get_infop
->nvmet_ctx_list_lock
);
2321 /* Just take the entire context list, if there are any */
2322 if (get_infop
->nvmet_ctx_list_cnt
) {
2323 list_splice_init(&get_infop
->nvmet_ctx_list
,
2324 ¤t_infop
->nvmet_ctx_list
);
2325 current_infop
->nvmet_ctx_list_cnt
=
2326 get_infop
->nvmet_ctx_list_cnt
- 1;
2327 get_infop
->nvmet_ctx_list_cnt
= 0;
2328 spin_unlock(&get_infop
->nvmet_ctx_list_lock
);
2330 current_infop
->nvmet_ctx_start_cpu
= get_infop
;
2331 list_remove_head(¤t_infop
->nvmet_ctx_list
,
2332 ctx_buf
, struct lpfc_nvmet_ctxbuf
,
2337 /* Otherwise, move on to the next CPU for this MRQ */
2338 spin_unlock(&get_infop
->nvmet_ctx_list_lock
);
2339 get_infop
= get_infop
->nvmet_ctx_next_cpu
;
2343 /* Nothing found, all contexts for the MRQ are in-flight */
2348 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2349 * @phba: pointer to lpfc hba data structure.
2350 * @idx: relative index of MRQ vector
2351 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2352 * @isr_timestamp: in jiffies.
2353 * @cqflag: cq processing information regarding workload.
2355 * This routine is used for processing the WQE associated with a unsolicited
2356 * event. It first determines whether there is an existing ndlp that matches
2357 * the DID from the unsolicited WQE. If not, it will create a new one with
2358 * the DID from the unsolicited WQE. The ELS command from the unsolicited
2359 * WQE is then used to invoke the proper routine and to set up proper state
2360 * of the discovery state machine.
2363 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba
*phba
,
2365 struct rqb_dmabuf
*nvmebuf
,
2366 uint64_t isr_timestamp
,
2369 struct lpfc_async_xchg_ctx
*ctxp
;
2370 struct lpfc_nvmet_tgtport
*tgtp
;
2371 struct fc_frame_header
*fc_hdr
;
2372 struct lpfc_nvmet_ctxbuf
*ctx_buf
;
2373 struct lpfc_nvmet_ctx_info
*current_infop
;
2374 uint32_t size
, oxid
, sid
, qno
;
2375 unsigned long iflag
;
2378 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC
))
2382 if (!nvmebuf
|| !phba
->targetport
) {
2383 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2384 "6157 NVMET FCP Drop IO\n");
2386 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
2391 * Get a pointer to the context list for this MRQ based on
2392 * the CPU this MRQ IRQ is associated with. If the CPU association
2393 * changes from our initial assumption, the context list could
2394 * be empty, thus it would need to be replenished with the
2395 * context list from another CPU for this MRQ.
2397 current_cpu
= raw_smp_processor_id();
2398 current_infop
= lpfc_get_ctx_list(phba
, current_cpu
, idx
);
2399 spin_lock_irqsave(¤t_infop
->nvmet_ctx_list_lock
, iflag
);
2400 if (current_infop
->nvmet_ctx_list_cnt
) {
2401 list_remove_head(¤t_infop
->nvmet_ctx_list
,
2402 ctx_buf
, struct lpfc_nvmet_ctxbuf
, list
);
2403 current_infop
->nvmet_ctx_list_cnt
--;
2405 ctx_buf
= lpfc_nvmet_replenish_context(phba
, current_infop
);
2407 spin_unlock_irqrestore(¤t_infop
->nvmet_ctx_list_lock
, iflag
);
2409 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
2410 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
2411 size
= nvmebuf
->bytes_recv
;
2413 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2414 if (phba
->hdwqstat_on
& LPFC_CHECK_NVMET_IO
) {
2415 this_cpu_inc(phba
->sli4_hba
.c_stat
->rcv_io
);
2416 if (idx
!= current_cpu
)
2417 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
2418 "6703 CPU Check rcv: "
2419 "cpu %d expect %d\n",
2424 lpfc_nvmeio_data(phba
, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2425 oxid
, size
, raw_smp_processor_id());
2427 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2430 /* Queue this NVME IO to process later */
2431 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
2432 list_add_tail(&nvmebuf
->hbuf
.list
,
2433 &phba
->sli4_hba
.lpfc_nvmet_io_wait_list
);
2434 phba
->sli4_hba
.nvmet_io_wait_cnt
++;
2435 phba
->sli4_hba
.nvmet_io_wait_total
++;
2436 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
2439 /* Post a brand new DMA buffer to RQ */
2441 lpfc_post_rq_buffer(
2442 phba
, phba
->sli4_hba
.nvmet_mrq_hdr
[qno
],
2443 phba
->sli4_hba
.nvmet_mrq_data
[qno
], 1, qno
);
2445 atomic_inc(&tgtp
->defer_ctx
);
2449 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
2451 ctxp
= (struct lpfc_async_xchg_ctx
*)ctx_buf
->context
;
2452 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
2453 list_add_tail(&ctxp
->list
, &phba
->sli4_hba
.t_active_ctx_list
);
2454 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
2455 if (ctxp
->state
!= LPFC_NVME_STE_FREE
) {
2456 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2457 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2458 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
2467 ctxp
->state
= LPFC_NVME_STE_RCV
;
2468 ctxp
->entry_cnt
= 1;
2470 ctxp
->ctxbuf
= ctx_buf
;
2471 ctxp
->rqb_buffer
= (void *)nvmebuf
;
2473 spin_lock_init(&ctxp
->ctxlock
);
2475 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2477 ctxp
->ts_isr_cmd
= isr_timestamp
;
2478 ctxp
->ts_cmd_nvme
= 0;
2479 ctxp
->ts_nvme_data
= 0;
2480 ctxp
->ts_data_wqput
= 0;
2481 ctxp
->ts_isr_data
= 0;
2482 ctxp
->ts_data_nvme
= 0;
2483 ctxp
->ts_nvme_status
= 0;
2484 ctxp
->ts_status_wqput
= 0;
2485 ctxp
->ts_isr_status
= 0;
2486 ctxp
->ts_status_nvme
= 0;
2489 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
2490 /* check for cq processing load */
2492 lpfc_nvmet_process_rcv_fcp_req(ctx_buf
);
2496 if (!queue_work(phba
->wq
, &ctx_buf
->defer_work
)) {
2497 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
2498 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2499 "6325 Unable to queue work for oxid x%x. "
2500 "FCP Drop IO [x%x x%x x%x]\n",
2502 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
2503 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
2504 atomic_read(&tgtp
->xmt_fcp_release
));
2506 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
2507 lpfc_nvmet_defer_release(phba
, ctxp
);
2508 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
2509 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, sid
, oxid
);
2514 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2515 * @phba: pointer to lpfc hba data structure.
2516 * @idx: relative index of MRQ vector
2517 * @nvmebuf: pointer to received nvme data structure.
2518 * @isr_timestamp: in jiffies.
2519 * @cqflag: cq processing information regarding workload.
2521 * This routine is used to process an unsolicited event received from a SLI
2522 * (Service Level Interface) ring. The actual processing of the data buffer
2523 * associated with the unsolicited event is done by invoking the routine
2524 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2525 * SLI RQ on which the unsolicited event was received.
2528 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba
*phba
,
2530 struct rqb_dmabuf
*nvmebuf
,
2531 uint64_t isr_timestamp
,
2535 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2536 "3167 NVMET FCP Drop IO\n");
2539 if (phba
->nvmet_support
== 0) {
2540 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
2543 lpfc_nvmet_unsol_fcp_buffer(phba
, idx
, nvmebuf
, isr_timestamp
, cqflag
);
2547 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2548 * @phba: pointer to a host N_Port data structure.
2549 * @ctxp: Context info for NVME LS Request
2550 * @rspbuf: DMA buffer of NVME command.
2551 * @rspsize: size of the NVME command.
2553 * This routine is used for allocating a lpfc-WQE data structure from
2554 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2555 * passed into the routine for discovery state machine to issue an Extended
2556 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2557 * and preparation routine that is used by all the discovery state machine
2558 * routines and the NVME command-specific fields will be later set up by
2559 * the individual discovery machine routines after calling this routine
2560 * allocating and preparing a generic WQE data structure. It fills in the
2561 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2562 * payload and response payload (if expected). The reference count on the
2563 * ndlp is incremented by 1 and the reference to the ndlp is put into
2564 * context1 of the WQE data structure for this WQE to hold the ndlp
2565 * reference for the command's callback function to access later.
2568 * Pointer to the newly allocated/prepared nvme wqe data structure
2569 * NULL - when nvme wqe data structure allocation/preparation failed
2571 static struct lpfc_iocbq
*
2572 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*phba
,
2573 struct lpfc_async_xchg_ctx
*ctxp
,
2574 dma_addr_t rspbuf
, uint16_t rspsize
)
2576 struct lpfc_nodelist
*ndlp
;
2577 struct lpfc_iocbq
*nvmewqe
;
2578 union lpfc_wqe128
*wqe
;
2580 if (!lpfc_is_link_up(phba
)) {
2581 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2582 "6104 NVMET prep LS wqe: link err: "
2583 "NPORT x%x oxid:x%x ste %d\n",
2584 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2588 /* Allocate buffer for command wqe */
2589 nvmewqe
= lpfc_sli_get_iocbq(phba
);
2590 if (nvmewqe
== NULL
) {
2591 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2592 "6105 NVMET prep LS wqe: No WQE: "
2593 "NPORT x%x oxid x%x ste %d\n",
2594 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2598 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
2600 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2601 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2602 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2603 "6106 NVMET prep LS wqe: No ndlp: "
2604 "NPORT x%x oxid x%x ste %d\n",
2605 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2606 goto nvme_wqe_free_wqeq_exit
;
2608 ctxp
->wqeq
= nvmewqe
;
2610 /* prevent preparing wqe with NULL ndlp reference */
2611 nvmewqe
->context1
= lpfc_nlp_get(ndlp
);
2612 if (nvmewqe
->context1
== NULL
)
2613 goto nvme_wqe_free_wqeq_exit
;
2614 nvmewqe
->context2
= ctxp
;
2616 wqe
= &nvmewqe
->wqe
;
2617 memset(wqe
, 0, sizeof(union lpfc_wqe
));
2620 wqe
->xmit_sequence
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2621 wqe
->xmit_sequence
.bde
.tus
.f
.bdeSize
= rspsize
;
2622 wqe
->xmit_sequence
.bde
.addrLow
= le32_to_cpu(putPaddrLow(rspbuf
));
2623 wqe
->xmit_sequence
.bde
.addrHigh
= le32_to_cpu(putPaddrHigh(rspbuf
));
2630 bf_set(wqe_dfctl
, &wqe
->xmit_sequence
.wge_ctl
, 0);
2631 bf_set(wqe_ls
, &wqe
->xmit_sequence
.wge_ctl
, 1);
2632 bf_set(wqe_la
, &wqe
->xmit_sequence
.wge_ctl
, 0);
2633 bf_set(wqe_rctl
, &wqe
->xmit_sequence
.wge_ctl
, FC_RCTL_ELS4_REP
);
2634 bf_set(wqe_type
, &wqe
->xmit_sequence
.wge_ctl
, FC_TYPE_NVME
);
2637 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
2638 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2639 bf_set(wqe_xri_tag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->sli4_xritag
);
2642 bf_set(wqe_cmnd
, &wqe
->xmit_sequence
.wqe_com
,
2643 CMD_XMIT_SEQUENCE64_WQE
);
2644 bf_set(wqe_ct
, &wqe
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
2645 bf_set(wqe_class
, &wqe
->xmit_sequence
.wqe_com
, CLASS3
);
2646 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
2649 wqe
->xmit_sequence
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2652 bf_set(wqe_reqtag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->iotag
);
2653 /* Needs to be set by caller */
2654 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
, ctxp
->oxid
);
2657 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
2658 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
2659 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
2660 LPFC_WQE_LENLOC_WORD12
);
2661 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
2664 bf_set(wqe_cqid
, &wqe
->xmit_sequence
.wqe_com
,
2665 LPFC_WQE_CQ_ID_DEFAULT
);
2666 bf_set(wqe_cmd_type
, &wqe
->xmit_sequence
.wqe_com
,
2670 wqe
->xmit_sequence
.xmit_len
= rspsize
;
2673 nvmewqe
->vport
= phba
->pport
;
2674 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
2675 nvmewqe
->iocb_flag
|= LPFC_IO_NVME_LS
;
2677 /* Xmit NVMET response to remote NPORT <did> */
2678 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
2679 "6039 Xmit NVMET LS response to remote "
2680 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2681 ndlp
->nlp_DID
, nvmewqe
->iotag
, ctxp
->oxid
,
2685 nvme_wqe_free_wqeq_exit
:
2686 nvmewqe
->context2
= NULL
;
2687 nvmewqe
->context3
= NULL
;
2688 lpfc_sli_release_iocbq(phba
, nvmewqe
);
2693 static struct lpfc_iocbq
*
2694 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*phba
,
2695 struct lpfc_async_xchg_ctx
*ctxp
)
2697 struct nvmefc_tgt_fcp_req
*rsp
= &ctxp
->hdlrctx
.fcp_req
;
2698 struct lpfc_nvmet_tgtport
*tgtp
;
2699 struct sli4_sge
*sgl
;
2700 struct lpfc_nodelist
*ndlp
;
2701 struct lpfc_iocbq
*nvmewqe
;
2702 struct scatterlist
*sgel
;
2703 union lpfc_wqe128
*wqe
;
2704 struct ulp_bde64
*bde
;
2705 dma_addr_t physaddr
;
2710 if (!lpfc_is_link_up(phba
)) {
2711 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2712 "6107 NVMET prep FCP wqe: link err:"
2713 "NPORT x%x oxid x%x ste %d\n",
2714 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2718 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
2720 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2721 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2722 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2723 "6108 NVMET prep FCP wqe: no ndlp: "
2724 "NPORT x%x oxid x%x ste %d\n",
2725 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2729 if (rsp
->sg_cnt
> lpfc_tgttemplate
.max_sgl_segments
) {
2730 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2731 "6109 NVMET prep FCP wqe: seg cnt err: "
2732 "NPORT x%x oxid x%x ste %d cnt %d\n",
2733 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
,
2734 phba
->cfg_nvme_seg_cnt
);
2737 nsegs
= rsp
->sg_cnt
;
2739 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2740 nvmewqe
= ctxp
->wqeq
;
2741 if (nvmewqe
== NULL
) {
2742 /* Allocate buffer for command wqe */
2743 nvmewqe
= ctxp
->ctxbuf
->iocbq
;
2744 if (nvmewqe
== NULL
) {
2745 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2746 "6110 NVMET prep FCP wqe: No "
2747 "WQE: NPORT x%x oxid x%x ste %d\n",
2748 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2751 ctxp
->wqeq
= nvmewqe
;
2752 xc
= 0; /* create new XRI */
2753 nvmewqe
->sli4_lxritag
= NO_XRI
;
2754 nvmewqe
->sli4_xritag
= NO_XRI
;
2758 if (((ctxp
->state
== LPFC_NVME_STE_RCV
) &&
2759 (ctxp
->entry_cnt
== 1)) ||
2760 (ctxp
->state
== LPFC_NVME_STE_DATA
)) {
2761 wqe
= &nvmewqe
->wqe
;
2763 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2764 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2765 ctxp
->state
, ctxp
->entry_cnt
);
2769 sgl
= (struct sli4_sge
*)ctxp
->ctxbuf
->sglq
->sgl
;
2771 case NVMET_FCOP_READDATA
:
2772 case NVMET_FCOP_READDATA_RSP
:
2773 /* From the tsend template, initialize words 7 - 11 */
2774 memcpy(&wqe
->words
[7],
2775 &lpfc_tsend_cmd_template
.words
[7],
2776 sizeof(uint32_t) * 5);
2778 /* Words 0 - 2 : The first sg segment */
2780 physaddr
= sg_dma_address(sgel
);
2781 wqe
->fcp_tsend
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2782 wqe
->fcp_tsend
.bde
.tus
.f
.bdeSize
= sg_dma_len(sgel
);
2783 wqe
->fcp_tsend
.bde
.addrLow
= cpu_to_le32(putPaddrLow(physaddr
));
2784 wqe
->fcp_tsend
.bde
.addrHigh
=
2785 cpu_to_le32(putPaddrHigh(physaddr
));
2788 wqe
->fcp_tsend
.payload_offset_len
= 0;
2791 wqe
->fcp_tsend
.relative_offset
= ctxp
->offset
;
2794 wqe
->fcp_tsend
.reserved
= 0;
2797 bf_set(wqe_ctxt_tag
, &wqe
->fcp_tsend
.wqe_com
,
2798 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2799 bf_set(wqe_xri_tag
, &wqe
->fcp_tsend
.wqe_com
,
2800 nvmewqe
->sli4_xritag
);
2802 /* Word 7 - set ar later */
2805 wqe
->fcp_tsend
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2808 bf_set(wqe_reqtag
, &wqe
->fcp_tsend
.wqe_com
, nvmewqe
->iotag
);
2809 bf_set(wqe_rcvoxid
, &wqe
->fcp_tsend
.wqe_com
, ctxp
->oxid
);
2811 /* Word 10 - set wqes later, in template xc=1 */
2813 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 0);
2815 /* Word 11 - set sup, irsp, irsplen later */
2819 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
2821 /* Setup 2 SKIP SGEs */
2825 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2826 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2832 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2833 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2836 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
2837 atomic_inc(&tgtp
->xmt_fcp_read_rsp
);
2839 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2841 if (rsp
->rsplen
== LPFC_NVMET_SUCCESS_LEN
) {
2842 if (ndlp
->nlp_flag
& NLP_SUPPRESS_RSP
)
2844 &wqe
->fcp_tsend
.wqe_com
, 1);
2846 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 1);
2847 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 1);
2848 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
,
2849 ((rsp
->rsplen
>> 2) - 1));
2850 memcpy(&wqe
->words
[16], rsp
->rspaddr
,
2854 atomic_inc(&tgtp
->xmt_fcp_read
);
2856 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2857 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 0);
2861 case NVMET_FCOP_WRITEDATA
:
2862 /* From the treceive template, initialize words 3 - 11 */
2863 memcpy(&wqe
->words
[3],
2864 &lpfc_treceive_cmd_template
.words
[3],
2865 sizeof(uint32_t) * 9);
2867 /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2868 wqe
->fcp_treceive
.bde
.tus
.f
.bdeFlags
= LPFC_SGE_TYPE_SKIP
;
2869 wqe
->fcp_treceive
.bde
.tus
.f
.bdeSize
= 0;
2870 wqe
->fcp_treceive
.bde
.addrLow
= 0;
2871 wqe
->fcp_treceive
.bde
.addrHigh
= 0;
2874 wqe
->fcp_treceive
.relative_offset
= ctxp
->offset
;
2877 bf_set(wqe_ctxt_tag
, &wqe
->fcp_treceive
.wqe_com
,
2878 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2879 bf_set(wqe_xri_tag
, &wqe
->fcp_treceive
.wqe_com
,
2880 nvmewqe
->sli4_xritag
);
2885 wqe
->fcp_treceive
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2888 bf_set(wqe_reqtag
, &wqe
->fcp_treceive
.wqe_com
, nvmewqe
->iotag
);
2889 bf_set(wqe_rcvoxid
, &wqe
->fcp_treceive
.wqe_com
, ctxp
->oxid
);
2891 /* Word 10 - in template xc=1 */
2893 bf_set(wqe_xc
, &wqe
->fcp_treceive
.wqe_com
, 0);
2895 /* Word 11 - set pbde later */
2896 if (phba
->cfg_enable_pbde
) {
2899 bf_set(wqe_pbde
, &wqe
->fcp_treceive
.wqe_com
, 0);
2904 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
2906 /* Setup 2 SKIP SGEs */
2910 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2911 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2917 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2918 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2921 atomic_inc(&tgtp
->xmt_fcp_write
);
2924 case NVMET_FCOP_RSP
:
2925 /* From the treceive template, initialize words 4 - 11 */
2926 memcpy(&wqe
->words
[4],
2927 &lpfc_trsp_cmd_template
.words
[4],
2928 sizeof(uint32_t) * 8);
2931 physaddr
= rsp
->rspdma
;
2932 wqe
->fcp_trsp
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2933 wqe
->fcp_trsp
.bde
.tus
.f
.bdeSize
= rsp
->rsplen
;
2934 wqe
->fcp_trsp
.bde
.addrLow
=
2935 cpu_to_le32(putPaddrLow(physaddr
));
2936 wqe
->fcp_trsp
.bde
.addrHigh
=
2937 cpu_to_le32(putPaddrHigh(physaddr
));
2940 wqe
->fcp_trsp
.response_len
= rsp
->rsplen
;
2943 bf_set(wqe_ctxt_tag
, &wqe
->fcp_trsp
.wqe_com
,
2944 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2945 bf_set(wqe_xri_tag
, &wqe
->fcp_trsp
.wqe_com
,
2946 nvmewqe
->sli4_xritag
);
2951 wqe
->fcp_trsp
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2954 bf_set(wqe_reqtag
, &wqe
->fcp_trsp
.wqe_com
, nvmewqe
->iotag
);
2955 bf_set(wqe_rcvoxid
, &wqe
->fcp_trsp
.wqe_com
, ctxp
->oxid
);
2959 bf_set(wqe_xc
, &wqe
->fcp_trsp
.wqe_com
, 1);
2962 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2963 if (rsp
->rsplen
!= LPFC_NVMET_SUCCESS_LEN
) {
2964 /* Bad response - embed it */
2965 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 1);
2966 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 1);
2967 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
,
2968 ((rsp
->rsplen
>> 2) - 1));
2969 memcpy(&wqe
->words
[16], rsp
->rspaddr
, rsp
->rsplen
);
2974 wqe
->fcp_trsp
.rsvd_12_15
[0] = 0;
2976 /* Use rspbuf, NOT sg list */
2979 atomic_inc(&tgtp
->xmt_fcp_rsp
);
2983 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
2984 "6064 Unknown Rsp Op %d\n",
2990 nvmewqe
->vport
= phba
->pport
;
2991 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
2992 nvmewqe
->context1
= ndlp
;
2994 for_each_sg(rsp
->sg
, sgel
, nsegs
, i
) {
2995 physaddr
= sg_dma_address(sgel
);
2996 cnt
= sg_dma_len(sgel
);
2997 sgl
->addr_hi
= putPaddrHigh(physaddr
);
2998 sgl
->addr_lo
= putPaddrLow(physaddr
);
3000 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
3001 bf_set(lpfc_sli4_sge_offset
, sgl
, ctxp
->offset
);
3002 if ((i
+1) == rsp
->sg_cnt
)
3003 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
3004 sgl
->word2
= cpu_to_le32(sgl
->word2
);
3005 sgl
->sge_len
= cpu_to_le32(cnt
);
3007 bde
= (struct ulp_bde64
*)&wqe
->words
[13];
3009 /* Words 13-15 (PBDE) */
3010 bde
->addrLow
= sgl
->addr_lo
;
3011 bde
->addrHigh
= sgl
->addr_hi
;
3012 bde
->tus
.f
.bdeSize
=
3013 le32_to_cpu(sgl
->sge_len
);
3014 bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
3015 bde
->tus
.w
= cpu_to_le32(bde
->tus
.w
);
3017 memset(bde
, 0, sizeof(struct ulp_bde64
));
3021 ctxp
->offset
+= cnt
;
3023 ctxp
->state
= LPFC_NVME_STE_DATA
;
3029 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
3030 * @phba: Pointer to HBA context object.
3031 * @cmdwqe: Pointer to driver command WQE object.
3032 * @wcqe: Pointer to driver response CQE object.
3034 * The function is called from SLI ring event handler with no
3035 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3036 * The function frees memory resources used for the NVME commands.
3039 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
3040 struct lpfc_wcqe_complete
*wcqe
)
3042 struct lpfc_async_xchg_ctx
*ctxp
;
3043 struct lpfc_nvmet_tgtport
*tgtp
;
3045 unsigned long flags
;
3046 bool released
= false;
3048 ctxp
= cmdwqe
->context2
;
3049 result
= wcqe
->parameter
;
3051 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3052 if (ctxp
->flag
& LPFC_NVME_ABORT_OP
)
3053 atomic_inc(&tgtp
->xmt_fcp_abort_cmpl
);
3055 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3056 ctxp
->state
= LPFC_NVME_STE_DONE
;
3058 /* Check if we already received a free context call
3059 * and we have completed processing an abort situation.
3061 if ((ctxp
->flag
& LPFC_NVME_CTX_RLS
) &&
3062 !(ctxp
->flag
& LPFC_NVME_XBUSY
)) {
3063 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3064 list_del_init(&ctxp
->list
);
3065 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3068 ctxp
->flag
&= ~LPFC_NVME_ABORT_OP
;
3069 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3070 atomic_inc(&tgtp
->xmt_abort_rsp
);
3072 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3073 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3074 "WCQE: %08x %08x %08x %08x\n",
3075 ctxp
->oxid
, ctxp
->flag
, released
,
3076 wcqe
->word0
, wcqe
->total_data_placed
,
3077 result
, wcqe
->word3
);
3079 cmdwqe
->context2
= NULL
;
3080 cmdwqe
->context3
= NULL
;
3082 * if transport has released ctx, then can reuse it. Otherwise,
3083 * will be recycled by transport release call.
3086 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
3088 /* This is the iocbq for the abort, not the command */
3089 lpfc_sli_release_iocbq(phba
, cmdwqe
);
3091 /* Since iaab/iaar are NOT set, there is no work left.
3092 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3093 * should have been called already.
3098 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
3099 * @phba: Pointer to HBA context object.
3100 * @cmdwqe: Pointer to driver command WQE object.
3101 * @wcqe: Pointer to driver response CQE object.
3103 * The function is called from SLI ring event handler with no
3104 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3105 * The function frees memory resources used for the NVME commands.
3108 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
3109 struct lpfc_wcqe_complete
*wcqe
)
3111 struct lpfc_async_xchg_ctx
*ctxp
;
3112 struct lpfc_nvmet_tgtport
*tgtp
;
3113 unsigned long flags
;
3115 bool released
= false;
3117 ctxp
= cmdwqe
->context2
;
3118 result
= wcqe
->parameter
;
3121 /* if context is clear, related io alrady complete */
3122 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3123 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3124 wcqe
->word0
, wcqe
->total_data_placed
,
3125 result
, wcqe
->word3
);
3129 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3130 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3131 if (ctxp
->flag
& LPFC_NVME_ABORT_OP
)
3132 atomic_inc(&tgtp
->xmt_fcp_abort_cmpl
);
3135 if (ctxp
->state
!= LPFC_NVME_STE_ABORT
) {
3136 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3137 "6112 ABTS Wrong state:%d oxid x%x\n",
3138 ctxp
->state
, ctxp
->oxid
);
3141 /* Check if we already received a free context call
3142 * and we have completed processing an abort situation.
3144 ctxp
->state
= LPFC_NVME_STE_DONE
;
3145 if ((ctxp
->flag
& LPFC_NVME_CTX_RLS
) &&
3146 !(ctxp
->flag
& LPFC_NVME_XBUSY
)) {
3147 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3148 list_del_init(&ctxp
->list
);
3149 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3152 ctxp
->flag
&= ~LPFC_NVME_ABORT_OP
;
3153 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3154 atomic_inc(&tgtp
->xmt_abort_rsp
);
3156 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3157 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3158 "WCQE: %08x %08x %08x %08x\n",
3159 ctxp
->oxid
, ctxp
->flag
, released
,
3160 wcqe
->word0
, wcqe
->total_data_placed
,
3161 result
, wcqe
->word3
);
3163 cmdwqe
->context2
= NULL
;
3164 cmdwqe
->context3
= NULL
;
3166 * if transport has released ctx, then can reuse it. Otherwise,
3167 * will be recycled by transport release call.
3170 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
3172 /* Since iaab/iaar are NOT set, there is no work left.
3173 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3174 * should have been called already.
3179 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3180 * @phba: Pointer to HBA context object.
3181 * @cmdwqe: Pointer to driver command WQE object.
3182 * @wcqe: Pointer to driver response CQE object.
3184 * The function is called from SLI ring event handler with no
3185 * lock held. This function is the completion handler for NVME ABTS for LS cmds
3186 * The function frees memory resources used for the NVME commands.
3189 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
3190 struct lpfc_wcqe_complete
*wcqe
)
3192 struct lpfc_async_xchg_ctx
*ctxp
;
3193 struct lpfc_nvmet_tgtport
*tgtp
;
3196 ctxp
= cmdwqe
->context2
;
3197 result
= wcqe
->parameter
;
3199 if (phba
->nvmet_support
) {
3200 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3201 atomic_inc(&tgtp
->xmt_ls_abort_cmpl
);
3204 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3205 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3206 ctxp
, wcqe
->word0
, wcqe
->total_data_placed
,
3207 result
, wcqe
->word3
);
3210 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3211 "6415 NVMET LS Abort No ctx: WCQE: "
3212 "%08x %08x %08x %08x\n",
3213 wcqe
->word0
, wcqe
->total_data_placed
,
3214 result
, wcqe
->word3
);
3216 lpfc_sli_release_iocbq(phba
, cmdwqe
);
3220 if (ctxp
->state
!= LPFC_NVME_STE_LS_ABORT
) {
3221 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3222 "6416 NVMET LS abort cmpl state mismatch: "
3223 "oxid x%x: %d %d\n",
3224 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
3227 cmdwqe
->context2
= NULL
;
3228 cmdwqe
->context3
= NULL
;
3229 lpfc_sli_release_iocbq(phba
, cmdwqe
);
3234 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba
*phba
,
3235 struct lpfc_async_xchg_ctx
*ctxp
,
3236 uint32_t sid
, uint16_t xri
)
3238 struct lpfc_nvmet_tgtport
*tgtp
= NULL
;
3239 struct lpfc_iocbq
*abts_wqeq
;
3240 union lpfc_wqe128
*wqe_abts
;
3241 struct lpfc_nodelist
*ndlp
;
3243 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3244 "6067 ABTS: sid %x xri x%x/x%x\n",
3245 sid
, xri
, ctxp
->wqeq
->sli4_xritag
);
3247 if (phba
->nvmet_support
&& phba
->targetport
)
3248 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3250 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
3252 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
3253 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
3255 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3256 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3257 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3258 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
3260 /* No failure to an ABTS request. */
3264 abts_wqeq
= ctxp
->wqeq
;
3265 wqe_abts
= &abts_wqeq
->wqe
;
3268 * Since we zero the whole WQE, we need to ensure we set the WQE fields
3269 * that were initialized in lpfc_sli4_nvmet_alloc.
3271 memset(wqe_abts
, 0, sizeof(union lpfc_wqe
));
3274 bf_set(wqe_dfctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
3275 bf_set(wqe_ls
, &wqe_abts
->xmit_sequence
.wge_ctl
, 1);
3276 bf_set(wqe_la
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
3277 bf_set(wqe_rctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_RCTL_BA_ABTS
);
3278 bf_set(wqe_type
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_TYPE_BLS
);
3281 bf_set(wqe_ctxt_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
3282 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
3283 bf_set(wqe_xri_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
3284 abts_wqeq
->sli4_xritag
);
3287 bf_set(wqe_cmnd
, &wqe_abts
->xmit_sequence
.wqe_com
,
3288 CMD_XMIT_SEQUENCE64_WQE
);
3289 bf_set(wqe_ct
, &wqe_abts
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
3290 bf_set(wqe_class
, &wqe_abts
->xmit_sequence
.wqe_com
, CLASS3
);
3291 bf_set(wqe_pu
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
3294 wqe_abts
->xmit_sequence
.wqe_com
.abort_tag
= abts_wqeq
->iotag
;
3297 bf_set(wqe_reqtag
, &wqe_abts
->xmit_sequence
.wqe_com
, abts_wqeq
->iotag
);
3298 /* Needs to be set by caller */
3299 bf_set(wqe_rcvoxid
, &wqe_abts
->xmit_sequence
.wqe_com
, xri
);
3302 bf_set(wqe_dbde
, &wqe_abts
->xmit_sequence
.wqe_com
, 1);
3303 bf_set(wqe_iod
, &wqe_abts
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
3304 bf_set(wqe_lenloc
, &wqe_abts
->xmit_sequence
.wqe_com
,
3305 LPFC_WQE_LENLOC_WORD12
);
3306 bf_set(wqe_ebde_cnt
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
3307 bf_set(wqe_qosd
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
3310 bf_set(wqe_cqid
, &wqe_abts
->xmit_sequence
.wqe_com
,
3311 LPFC_WQE_CQ_ID_DEFAULT
);
3312 bf_set(wqe_cmd_type
, &wqe_abts
->xmit_sequence
.wqe_com
,
3315 abts_wqeq
->vport
= phba
->pport
;
3316 abts_wqeq
->context1
= ndlp
;
3317 abts_wqeq
->context2
= ctxp
;
3318 abts_wqeq
->context3
= NULL
;
3319 abts_wqeq
->rsvd2
= 0;
3320 /* hba_wqidx should already be setup from command we are aborting */
3321 abts_wqeq
->iocb
.ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
3322 abts_wqeq
->iocb
.ulpLe
= 1;
3324 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3325 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3326 xri
, abts_wqeq
->iotag
);
3331 * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
3332 * @pwqeq: Pointer to command iocb.
3333 * @xritag: Tag that uniqely identifies the local exchange resource.
3334 * @opt: Option bits -
3335 * bit 0 = inhibit sending abts on the link
3337 * This function is called with hbalock held.
3340 lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq
*pwqeq
, u16 xritag
, u8 opt
)
3342 union lpfc_wqe128
*wqe
= &pwqeq
->wqe
;
3344 /* WQEs are reused. Clear stale data and set key fields to
3345 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3347 memset(wqe
, 0, sizeof(*wqe
));
3349 if (opt
& INHIBIT_ABORT
)
3350 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 1);
3351 /* Abort specified xri tag, with the mask deliberately zeroed */
3352 bf_set(abort_cmd_criteria
, &wqe
->abort_cmd
, T_XRI_TAG
);
3354 bf_set(wqe_cmnd
, &wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
3356 /* Abort the I/O associated with this outstanding exchange ID. */
3357 wqe
->abort_cmd
.wqe_com
.abort_tag
= xritag
;
3359 /* iotag for the wqe completion. */
3360 bf_set(wqe_reqtag
, &wqe
->abort_cmd
.wqe_com
, pwqeq
->iotag
);
3362 bf_set(wqe_qosd
, &wqe
->abort_cmd
.wqe_com
, 1);
3363 bf_set(wqe_lenloc
, &wqe
->abort_cmd
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
3365 bf_set(wqe_cmd_type
, &wqe
->abort_cmd
.wqe_com
, OTHER_COMMAND
);
3366 bf_set(wqe_wqec
, &wqe
->abort_cmd
.wqe_com
, 1);
3367 bf_set(wqe_cqid
, &wqe
->abort_cmd
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
3371 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*phba
,
3372 struct lpfc_async_xchg_ctx
*ctxp
,
3373 uint32_t sid
, uint16_t xri
)
3375 struct lpfc_nvmet_tgtport
*tgtp
;
3376 struct lpfc_iocbq
*abts_wqeq
;
3377 struct lpfc_nodelist
*ndlp
;
3378 unsigned long flags
;
3382 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3384 ctxp
->wqeq
= ctxp
->ctxbuf
->iocbq
;
3385 ctxp
->wqeq
->hba_wqidx
= 0;
3388 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
3390 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
3391 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
3392 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3393 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3394 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3395 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
3397 /* No failure to an ABTS request. */
3398 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3399 ctxp
->flag
&= ~LPFC_NVME_ABORT_OP
;
3400 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3404 /* Issue ABTS for this WQE based on iotag */
3405 ctxp
->abort_wqeq
= lpfc_sli_get_iocbq(phba
);
3406 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3407 if (!ctxp
->abort_wqeq
) {
3408 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3409 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3410 "6161 ABORT failed: No wqeqs: "
3411 "xri: x%x\n", ctxp
->oxid
);
3412 /* No failure to an ABTS request. */
3413 ctxp
->flag
&= ~LPFC_NVME_ABORT_OP
;
3414 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3417 abts_wqeq
= ctxp
->abort_wqeq
;
3418 ctxp
->state
= LPFC_NVME_STE_ABORT
;
3419 opt
= (ctxp
->flag
& LPFC_NVME_ABTS_RCV
) ? INHIBIT_ABORT
: 0;
3420 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3422 /* Announce entry to new IO submit field. */
3423 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3424 "6162 ABORT Request to rport DID x%06x "
3425 "for xri x%x x%x\n",
3426 ctxp
->sid
, ctxp
->oxid
, ctxp
->wqeq
->sli4_xritag
);
3428 /* If the hba is getting reset, this flag is set. It is
3429 * cleared when the reset is complete and rings reestablished.
3431 spin_lock_irqsave(&phba
->hbalock
, flags
);
3432 /* driver queued commands are in process of being flushed */
3433 if (phba
->hba_flag
& HBA_IOQ_FLUSH
) {
3434 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3435 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3436 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3437 "6163 Driver in reset cleanup - flushing "
3438 "NVME Req now. hba_flag x%x oxid x%x\n",
3439 phba
->hba_flag
, ctxp
->oxid
);
3440 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3441 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3442 ctxp
->flag
&= ~LPFC_NVME_ABORT_OP
;
3443 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3447 /* Outstanding abort is in progress */
3448 if (abts_wqeq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
3449 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3450 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3451 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3452 "6164 Outstanding NVME I/O Abort Request "
3453 "still pending on oxid x%x\n",
3455 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3456 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3457 ctxp
->flag
&= ~LPFC_NVME_ABORT_OP
;
3458 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3462 /* Ready - mark outstanding as aborted by driver. */
3463 abts_wqeq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
3465 lpfc_nvmet_prep_abort_wqe(abts_wqeq
, ctxp
->wqeq
->sli4_xritag
, opt
);
3467 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3468 abts_wqeq
->hba_wqidx
= ctxp
->wqeq
->hba_wqidx
;
3469 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_sol_fcp_abort_cmp
;
3470 abts_wqeq
->iocb_cmpl
= NULL
;
3471 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME
;
3472 abts_wqeq
->context2
= ctxp
;
3473 abts_wqeq
->vport
= phba
->pport
;
3475 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[abts_wqeq
->hba_wqidx
];
3477 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3478 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3479 if (rc
== WQE_SUCCESS
) {
3480 atomic_inc(&tgtp
->xmt_abort_sol
);
3484 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3485 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3486 ctxp
->flag
&= ~LPFC_NVME_ABORT_OP
;
3487 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3488 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3489 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3490 "6166 Failed ABORT issue_wqe with status x%x "
3497 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*phba
,
3498 struct lpfc_async_xchg_ctx
*ctxp
,
3499 uint32_t sid
, uint16_t xri
)
3501 struct lpfc_nvmet_tgtport
*tgtp
;
3502 struct lpfc_iocbq
*abts_wqeq
;
3503 unsigned long flags
;
3504 bool released
= false;
3507 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3509 ctxp
->wqeq
= ctxp
->ctxbuf
->iocbq
;
3510 ctxp
->wqeq
->hba_wqidx
= 0;
3513 if (ctxp
->state
== LPFC_NVME_STE_FREE
) {
3514 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3515 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3516 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
3520 ctxp
->state
= LPFC_NVME_STE_ABORT
;
3522 rc
= lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
);
3526 spin_lock_irqsave(&phba
->hbalock
, flags
);
3527 abts_wqeq
= ctxp
->wqeq
;
3528 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_unsol_fcp_abort_cmp
;
3529 abts_wqeq
->iocb_cmpl
= NULL
;
3530 abts_wqeq
->iocb_flag
|= LPFC_IO_NVMET
;
3532 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[abts_wqeq
->hba_wqidx
];
3534 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3535 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3536 if (rc
== WQE_SUCCESS
) {
3541 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3542 if (ctxp
->flag
& LPFC_NVME_CTX_RLS
) {
3543 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3544 list_del_init(&ctxp
->list
);
3545 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3548 ctxp
->flag
&= ~(LPFC_NVME_ABORT_OP
| LPFC_NVME_CTX_RLS
);
3549 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3551 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3552 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3553 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3555 ctxp
->oxid
, rc
, released
);
3557 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
3562 * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
3563 * via async frame receive where the frame is not handled.
3564 * @phba: pointer to adapter structure
3565 * @ctxp: pointer to the asynchronously received received sequence
3566 * @sid: address of the remote port to send the ABTS to
3567 * @xri: oxid value to for the ABTS (other side's exchange id).
3570 lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba
*phba
,
3571 struct lpfc_async_xchg_ctx
*ctxp
,
3572 uint32_t sid
, uint16_t xri
)
3574 struct lpfc_nvmet_tgtport
*tgtp
= NULL
;
3575 struct lpfc_iocbq
*abts_wqeq
;
3576 unsigned long flags
;
3579 if ((ctxp
->state
== LPFC_NVME_STE_LS_RCV
&& ctxp
->entry_cnt
== 1) ||
3580 (ctxp
->state
== LPFC_NVME_STE_LS_RSP
&& ctxp
->entry_cnt
== 2)) {
3581 ctxp
->state
= LPFC_NVME_STE_LS_ABORT
;
3584 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3585 "6418 NVMET LS abort state mismatch "
3587 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
3588 ctxp
->state
= LPFC_NVME_STE_LS_ABORT
;
3591 if (phba
->nvmet_support
&& phba
->targetport
)
3592 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3595 /* Issue ABTS for this WQE based on iotag */
3596 ctxp
->wqeq
= lpfc_sli_get_iocbq(phba
);
3598 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3599 "6068 Abort failed: No wqeqs: "
3601 /* No failure to an ABTS request. */
3606 abts_wqeq
= ctxp
->wqeq
;
3608 if (lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
) == 0) {
3613 spin_lock_irqsave(&phba
->hbalock
, flags
);
3614 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_abort_cmp
;
3615 abts_wqeq
->iocb_cmpl
= NULL
;
3616 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME_LS
;
3617 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3618 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3619 if (rc
== WQE_SUCCESS
) {
3621 atomic_inc(&tgtp
->xmt_abort_unsol
);
3626 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3627 abts_wqeq
->context2
= NULL
;
3628 abts_wqeq
->context3
= NULL
;
3629 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3630 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3631 "6056 Failed to Issue ABTS. Status x%x\n", rc
);
3636 * lpfc_nvmet_invalidate_host
3638 * @phba: pointer to the driver instance bound to an adapter port.
3639 * @ndlp: pointer to an lpfc_nodelist type
3641 * This routine upcalls the nvmet transport to invalidate an NVME
3642 * host to which this target instance had active connections.
3645 lpfc_nvmet_invalidate_host(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
3647 struct lpfc_nvmet_tgtport
*tgtp
;
3649 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
| LOG_NVME_ABTS
,
3650 "6203 Invalidating hosthandle x%px\n",
3653 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3654 atomic_set(&tgtp
->state
, LPFC_NVMET_INV_HOST_ACTIVE
);
3656 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3657 /* Need to get the nvmet_fc_target_port pointer here.*/
3658 nvmet_fc_invalidate_host(phba
->targetport
, ndlp
);