1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
43 #include "lpfc_version.h"
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
59 static struct lpfc_iocbq
*lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*,
60 struct lpfc_nvmet_rcv_ctx
*,
63 static struct lpfc_iocbq
*lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*,
64 struct lpfc_nvmet_rcv_ctx
*);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*,
66 struct lpfc_nvmet_rcv_ctx
*,
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*,
69 struct lpfc_nvmet_rcv_ctx
*,
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba
*,
72 struct lpfc_nvmet_rcv_ctx
*,
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba
*, struct lpfc_queue
*,
75 struct lpfc_nvmet_rcv_ctx
*);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct
*);
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf
*ctx_buf
);
80 static union lpfc_wqe128 lpfc_tsend_cmd_template
;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template
;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template
;
84 /* Setup WQE templates for NVME IOs */
86 lpfc_nvmet_cmd_template(void)
88 union lpfc_wqe128
*wqe
;
91 wqe
= &lpfc_tsend_cmd_template
;
92 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
94 /* Word 0, 1, 2 - BDE is variable */
96 /* Word 3 - payload_offset_len is zero */
98 /* Word 4 - relative_offset is variable */
100 /* Word 5 - is zero */
102 /* Word 6 - ctxt_tag, xri_tag is variable */
104 /* Word 7 - wqe_ar is variable */
105 bf_set(wqe_cmnd
, &wqe
->fcp_tsend
.wqe_com
, CMD_FCP_TSEND64_WQE
);
106 bf_set(wqe_pu
, &wqe
->fcp_tsend
.wqe_com
, PARM_REL_OFF
);
107 bf_set(wqe_class
, &wqe
->fcp_tsend
.wqe_com
, CLASS3
);
108 bf_set(wqe_ct
, &wqe
->fcp_tsend
.wqe_com
, SLI4_CT_RPI
);
109 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 1);
111 /* Word 8 - abort_tag is variable */
113 /* Word 9 - reqtag, rcvoxid is variable */
115 /* Word 10 - wqes, xc is variable */
116 bf_set(wqe_nvme
, &wqe
->fcp_tsend
.wqe_com
, 1);
117 bf_set(wqe_dbde
, &wqe
->fcp_tsend
.wqe_com
, 1);
118 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 0);
119 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 1);
120 bf_set(wqe_iod
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_IOD_WRITE
);
121 bf_set(wqe_lenloc
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_LENLOC_WORD12
);
123 /* Word 11 - sup, irsp, irsplen is variable */
124 bf_set(wqe_cmd_type
, &wqe
->fcp_tsend
.wqe_com
, FCP_COMMAND_TSEND
);
125 bf_set(wqe_cqid
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
126 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
127 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 0);
128 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
, 0);
129 bf_set(wqe_pbde
, &wqe
->fcp_tsend
.wqe_com
, 0);
131 /* Word 12 - fcp_data_len is variable */
133 /* Word 13, 14, 15 - PBDE is zero */
135 /* TRECEIVE template */
136 wqe
= &lpfc_treceive_cmd_template
;
137 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
139 /* Word 0, 1, 2 - BDE is variable */
142 wqe
->fcp_treceive
.payload_offset_len
= TXRDY_PAYLOAD_LEN
;
144 /* Word 4 - relative_offset is variable */
146 /* Word 5 - is zero */
148 /* Word 6 - ctxt_tag, xri_tag is variable */
151 bf_set(wqe_cmnd
, &wqe
->fcp_treceive
.wqe_com
, CMD_FCP_TRECEIVE64_WQE
);
152 bf_set(wqe_pu
, &wqe
->fcp_treceive
.wqe_com
, PARM_REL_OFF
);
153 bf_set(wqe_class
, &wqe
->fcp_treceive
.wqe_com
, CLASS3
);
154 bf_set(wqe_ct
, &wqe
->fcp_treceive
.wqe_com
, SLI4_CT_RPI
);
155 bf_set(wqe_ar
, &wqe
->fcp_treceive
.wqe_com
, 0);
157 /* Word 8 - abort_tag is variable */
159 /* Word 9 - reqtag, rcvoxid is variable */
161 /* Word 10 - xc is variable */
162 bf_set(wqe_dbde
, &wqe
->fcp_treceive
.wqe_com
, 1);
163 bf_set(wqe_wqes
, &wqe
->fcp_treceive
.wqe_com
, 0);
164 bf_set(wqe_nvme
, &wqe
->fcp_treceive
.wqe_com
, 1);
165 bf_set(wqe_iod
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_IOD_READ
);
166 bf_set(wqe_lenloc
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_LENLOC_WORD12
);
167 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 1);
169 /* Word 11 - pbde is variable */
170 bf_set(wqe_cmd_type
, &wqe
->fcp_treceive
.wqe_com
, FCP_COMMAND_TRECEIVE
);
171 bf_set(wqe_cqid
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
172 bf_set(wqe_sup
, &wqe
->fcp_treceive
.wqe_com
, 0);
173 bf_set(wqe_irsp
, &wqe
->fcp_treceive
.wqe_com
, 0);
174 bf_set(wqe_irsplen
, &wqe
->fcp_treceive
.wqe_com
, 0);
175 bf_set(wqe_pbde
, &wqe
->fcp_treceive
.wqe_com
, 1);
177 /* Word 12 - fcp_data_len is variable */
179 /* Word 13, 14, 15 - PBDE is variable */
182 wqe
= &lpfc_trsp_cmd_template
;
183 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
185 /* Word 0, 1, 2 - BDE is variable */
187 /* Word 3 - response_len is variable */
189 /* Word 4, 5 - is zero */
191 /* Word 6 - ctxt_tag, xri_tag is variable */
194 bf_set(wqe_cmnd
, &wqe
->fcp_trsp
.wqe_com
, CMD_FCP_TRSP64_WQE
);
195 bf_set(wqe_pu
, &wqe
->fcp_trsp
.wqe_com
, PARM_UNUSED
);
196 bf_set(wqe_class
, &wqe
->fcp_trsp
.wqe_com
, CLASS3
);
197 bf_set(wqe_ct
, &wqe
->fcp_trsp
.wqe_com
, SLI4_CT_RPI
);
198 bf_set(wqe_ag
, &wqe
->fcp_trsp
.wqe_com
, 1); /* wqe_ar */
200 /* Word 8 - abort_tag is variable */
202 /* Word 9 - reqtag is variable */
204 /* Word 10 wqes, xc is variable */
205 bf_set(wqe_dbde
, &wqe
->fcp_trsp
.wqe_com
, 1);
206 bf_set(wqe_nvme
, &wqe
->fcp_trsp
.wqe_com
, 1);
207 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 0);
208 bf_set(wqe_xc
, &wqe
->fcp_trsp
.wqe_com
, 0);
209 bf_set(wqe_iod
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_IOD_NONE
);
210 bf_set(wqe_lenloc
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_LENLOC_WORD3
);
212 /* Word 11 irsp, irsplen is variable */
213 bf_set(wqe_cmd_type
, &wqe
->fcp_trsp
.wqe_com
, FCP_COMMAND_TRSP
);
214 bf_set(wqe_cqid
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
215 bf_set(wqe_sup
, &wqe
->fcp_trsp
.wqe_com
, 0);
216 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 0);
217 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
, 0);
218 bf_set(wqe_pbde
, &wqe
->fcp_trsp
.wqe_com
, 0);
220 /* Word 12, 13, 14, 15 - is zero */
223 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
224 static struct lpfc_nvmet_rcv_ctx
*
225 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba
*phba
, u16 xri
)
227 struct lpfc_nvmet_rcv_ctx
*ctxp
;
231 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
232 list_for_each_entry(ctxp
, &phba
->sli4_hba
.t_active_ctx_list
, list
) {
233 if (ctxp
->ctxbuf
->sglq
->sli4_xritag
!= xri
)
239 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
246 static struct lpfc_nvmet_rcv_ctx
*
247 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba
*phba
, u16 oxid
, u32 sid
)
249 struct lpfc_nvmet_rcv_ctx
*ctxp
;
253 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
254 list_for_each_entry(ctxp
, &phba
->sli4_hba
.t_active_ctx_list
, list
) {
255 if (ctxp
->oxid
!= oxid
|| ctxp
->sid
!= sid
)
261 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
270 lpfc_nvmet_defer_release(struct lpfc_hba
*phba
, struct lpfc_nvmet_rcv_ctx
*ctxp
)
272 lockdep_assert_held(&ctxp
->ctxlock
);
274 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
275 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
276 ctxp
->oxid
, ctxp
->flag
);
278 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
)
281 ctxp
->flag
|= LPFC_NVMET_CTX_RLS
;
282 spin_lock(&phba
->sli4_hba
.t_active_list_lock
);
283 list_del(&ctxp
->list
);
284 spin_unlock(&phba
->sli4_hba
.t_active_list_lock
);
285 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
286 list_add_tail(&ctxp
->list
, &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
287 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
291 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
292 * @phba: Pointer to HBA context object.
293 * @cmdwqe: Pointer to driver command WQE object.
294 * @wcqe: Pointer to driver response CQE object.
296 * The function is called from SLI ring event handler with no
297 * lock held. This function is the completion handler for NVME LS commands
298 * The function frees memory resources used for the NVME commands.
301 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
302 struct lpfc_wcqe_complete
*wcqe
)
304 struct lpfc_nvmet_tgtport
*tgtp
;
305 struct nvmefc_tgt_ls_req
*rsp
;
306 struct lpfc_nvmet_rcv_ctx
*ctxp
;
307 uint32_t status
, result
;
309 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
310 result
= wcqe
->parameter
;
311 ctxp
= cmdwqe
->context2
;
313 if (ctxp
->state
!= LPFC_NVMET_STE_LS_RSP
|| ctxp
->entry_cnt
!= 2) {
314 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
315 "6410 NVMET LS cmpl state mismatch IO x%x: "
317 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
320 if (!phba
->targetport
)
323 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
327 atomic_inc(&tgtp
->xmt_ls_rsp_error
);
328 if (result
== IOERR_ABORT_REQUESTED
)
329 atomic_inc(&tgtp
->xmt_ls_rsp_aborted
);
330 if (bf_get(lpfc_wcqe_c_xb
, wcqe
))
331 atomic_inc(&tgtp
->xmt_ls_rsp_xb_set
);
333 atomic_inc(&tgtp
->xmt_ls_rsp_cmpl
);
338 rsp
= &ctxp
->ctx
.ls_req
;
340 lpfc_nvmeio_data(phba
, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
341 ctxp
->oxid
, status
, result
);
343 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
344 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
345 status
, result
, ctxp
->oxid
);
347 lpfc_nlp_put(cmdwqe
->context1
);
348 cmdwqe
->context2
= NULL
;
349 cmdwqe
->context3
= NULL
;
350 lpfc_sli_release_iocbq(phba
, cmdwqe
);
356 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
357 * @phba: HBA buffer is associated with
358 * @ctxp: context to clean up
359 * @mp: Buffer to free
361 * Description: Frees the given DMA buffer in the appropriate way given by
362 * reposting it to its associated RQ so it can be reused.
364 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
369 lpfc_nvmet_ctxbuf_post(struct lpfc_hba
*phba
, struct lpfc_nvmet_ctxbuf
*ctx_buf
)
371 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
372 struct lpfc_nvmet_rcv_ctx
*ctxp
= ctx_buf
->context
;
373 struct lpfc_nvmet_tgtport
*tgtp
;
374 struct fc_frame_header
*fc_hdr
;
375 struct rqb_dmabuf
*nvmebuf
;
376 struct lpfc_nvmet_ctx_info
*infop
;
377 uint32_t size
, oxid
, sid
;
381 if (ctxp
->state
== LPFC_NVMET_STE_FREE
) {
382 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
383 "6411 NVMET free, already free IO x%x: %d %d\n",
384 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
387 if (ctxp
->rqb_buffer
) {
388 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
389 nvmebuf
= ctxp
->rqb_buffer
;
390 /* check if freed in another path whilst acquiring lock */
392 ctxp
->rqb_buffer
= NULL
;
393 if (ctxp
->flag
& LPFC_NVMET_CTX_REUSE_WQ
) {
394 ctxp
->flag
&= ~LPFC_NVMET_CTX_REUSE_WQ
;
395 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
396 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
,
399 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
401 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
404 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
407 ctxp
->state
= LPFC_NVMET_STE_FREE
;
409 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
410 if (phba
->sli4_hba
.nvmet_io_wait_cnt
) {
411 list_remove_head(&phba
->sli4_hba
.lpfc_nvmet_io_wait_list
,
412 nvmebuf
, struct rqb_dmabuf
,
414 phba
->sli4_hba
.nvmet_io_wait_cnt
--;
415 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
418 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
419 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
420 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
421 size
= nvmebuf
->bytes_recv
;
422 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
424 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)ctx_buf
->context
;
431 ctxp
->state
= LPFC_NVMET_STE_RCV
;
434 ctxp
->ctxbuf
= ctx_buf
;
435 ctxp
->rqb_buffer
= (void *)nvmebuf
;
436 spin_lock_init(&ctxp
->ctxlock
);
438 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
439 /* NOTE: isr time stamp is stale when context is re-assigned*/
440 if (ctxp
->ts_isr_cmd
) {
441 ctxp
->ts_cmd_nvme
= 0;
442 ctxp
->ts_nvme_data
= 0;
443 ctxp
->ts_data_wqput
= 0;
444 ctxp
->ts_isr_data
= 0;
445 ctxp
->ts_data_nvme
= 0;
446 ctxp
->ts_nvme_status
= 0;
447 ctxp
->ts_status_wqput
= 0;
448 ctxp
->ts_isr_status
= 0;
449 ctxp
->ts_status_nvme
= 0;
452 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
454 /* Indicate that a replacement buffer has been posted */
455 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
456 ctxp
->flag
|= LPFC_NVMET_CTX_REUSE_WQ
;
457 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
459 if (!queue_work(phba
->wq
, &ctx_buf
->defer_work
)) {
460 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
461 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
462 "6181 Unable to queue deferred work "
464 "FCP Drop IO [x%x x%x x%x]\n",
466 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
467 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
468 atomic_read(&tgtp
->xmt_fcp_release
));
470 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
471 lpfc_nvmet_defer_release(phba
, ctxp
);
472 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
473 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, sid
, oxid
);
477 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
480 * Use the CPU context list, from the MRQ the IO was received on
481 * (ctxp->idx), to save context structure.
483 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
484 list_del_init(&ctxp
->list
);
485 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
486 cpu
= raw_smp_processor_id();
487 infop
= lpfc_get_ctx_list(phba
, cpu
, ctxp
->idx
);
488 spin_lock_irqsave(&infop
->nvmet_ctx_list_lock
, iflag
);
489 list_add_tail(&ctx_buf
->list
, &infop
->nvmet_ctx_list
);
490 infop
->nvmet_ctx_list_cnt
++;
491 spin_unlock_irqrestore(&infop
->nvmet_ctx_list_lock
, iflag
);
495 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
497 lpfc_nvmet_ktime(struct lpfc_hba
*phba
,
498 struct lpfc_nvmet_rcv_ctx
*ctxp
)
500 uint64_t seg1
, seg2
, seg3
, seg4
, seg5
;
501 uint64_t seg6
, seg7
, seg8
, seg9
, seg10
;
504 if (!ctxp
->ts_isr_cmd
|| !ctxp
->ts_cmd_nvme
||
505 !ctxp
->ts_nvme_data
|| !ctxp
->ts_data_wqput
||
506 !ctxp
->ts_isr_data
|| !ctxp
->ts_data_nvme
||
507 !ctxp
->ts_nvme_status
|| !ctxp
->ts_status_wqput
||
508 !ctxp
->ts_isr_status
|| !ctxp
->ts_status_nvme
)
511 if (ctxp
->ts_status_nvme
< ctxp
->ts_isr_cmd
)
513 if (ctxp
->ts_isr_cmd
> ctxp
->ts_cmd_nvme
)
515 if (ctxp
->ts_cmd_nvme
> ctxp
->ts_nvme_data
)
517 if (ctxp
->ts_nvme_data
> ctxp
->ts_data_wqput
)
519 if (ctxp
->ts_data_wqput
> ctxp
->ts_isr_data
)
521 if (ctxp
->ts_isr_data
> ctxp
->ts_data_nvme
)
523 if (ctxp
->ts_data_nvme
> ctxp
->ts_nvme_status
)
525 if (ctxp
->ts_nvme_status
> ctxp
->ts_status_wqput
)
527 if (ctxp
->ts_status_wqput
> ctxp
->ts_isr_status
)
529 if (ctxp
->ts_isr_status
> ctxp
->ts_status_nvme
)
532 * Segment 1 - Time from FCP command received by MSI-X ISR
533 * to FCP command is passed to NVME Layer.
534 * Segment 2 - Time from FCP command payload handed
535 * off to NVME Layer to Driver receives a Command op
537 * Segment 3 - Time from Driver receives a Command op
538 * from NVME Layer to Command is put on WQ.
539 * Segment 4 - Time from Driver WQ put is done
540 * to MSI-X ISR for Command cmpl.
541 * Segment 5 - Time from MSI-X ISR for Command cmpl to
542 * Command cmpl is passed to NVME Layer.
543 * Segment 6 - Time from Command cmpl is passed to NVME
544 * Layer to Driver receives a RSP op from NVME Layer.
545 * Segment 7 - Time from Driver receives a RSP op from
546 * NVME Layer to WQ put is done on TRSP FCP Status.
547 * Segment 8 - Time from Driver WQ put is done on TRSP
548 * FCP Status to MSI-X ISR for TRSP cmpl.
549 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
550 * TRSP cmpl is passed to NVME Layer.
551 * Segment 10 - Time from FCP command received by
552 * MSI-X ISR to command is completed on wire.
553 * (Segments 1 thru 8) for READDATA / WRITEDATA
554 * (Segments 1 thru 4) for READDATA_RSP
556 seg1
= ctxp
->ts_cmd_nvme
- ctxp
->ts_isr_cmd
;
559 seg2
= ctxp
->ts_nvme_data
- ctxp
->ts_isr_cmd
;
565 seg3
= ctxp
->ts_data_wqput
- ctxp
->ts_isr_cmd
;
571 seg4
= ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
;
577 seg5
= ctxp
->ts_data_nvme
- ctxp
->ts_isr_cmd
;
584 /* For auto rsp commands seg6 thru seg10 will be 0 */
585 if (ctxp
->ts_nvme_status
> ctxp
->ts_data_nvme
) {
586 seg6
= ctxp
->ts_nvme_status
- ctxp
->ts_isr_cmd
;
592 seg7
= ctxp
->ts_status_wqput
- ctxp
->ts_isr_cmd
;
598 seg8
= ctxp
->ts_isr_status
- ctxp
->ts_isr_cmd
;
604 seg9
= ctxp
->ts_status_nvme
- ctxp
->ts_isr_cmd
;
610 if (ctxp
->ts_isr_status
< ctxp
->ts_isr_cmd
)
612 seg10
= (ctxp
->ts_isr_status
-
615 if (ctxp
->ts_isr_data
< ctxp
->ts_isr_cmd
)
621 seg10
= (ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
);
624 phba
->ktime_seg1_total
+= seg1
;
625 if (seg1
< phba
->ktime_seg1_min
)
626 phba
->ktime_seg1_min
= seg1
;
627 else if (seg1
> phba
->ktime_seg1_max
)
628 phba
->ktime_seg1_max
= seg1
;
630 phba
->ktime_seg2_total
+= seg2
;
631 if (seg2
< phba
->ktime_seg2_min
)
632 phba
->ktime_seg2_min
= seg2
;
633 else if (seg2
> phba
->ktime_seg2_max
)
634 phba
->ktime_seg2_max
= seg2
;
636 phba
->ktime_seg3_total
+= seg3
;
637 if (seg3
< phba
->ktime_seg3_min
)
638 phba
->ktime_seg3_min
= seg3
;
639 else if (seg3
> phba
->ktime_seg3_max
)
640 phba
->ktime_seg3_max
= seg3
;
642 phba
->ktime_seg4_total
+= seg4
;
643 if (seg4
< phba
->ktime_seg4_min
)
644 phba
->ktime_seg4_min
= seg4
;
645 else if (seg4
> phba
->ktime_seg4_max
)
646 phba
->ktime_seg4_max
= seg4
;
648 phba
->ktime_seg5_total
+= seg5
;
649 if (seg5
< phba
->ktime_seg5_min
)
650 phba
->ktime_seg5_min
= seg5
;
651 else if (seg5
> phba
->ktime_seg5_max
)
652 phba
->ktime_seg5_max
= seg5
;
654 phba
->ktime_data_samples
++;
658 phba
->ktime_seg6_total
+= seg6
;
659 if (seg6
< phba
->ktime_seg6_min
)
660 phba
->ktime_seg6_min
= seg6
;
661 else if (seg6
> phba
->ktime_seg6_max
)
662 phba
->ktime_seg6_max
= seg6
;
664 phba
->ktime_seg7_total
+= seg7
;
665 if (seg7
< phba
->ktime_seg7_min
)
666 phba
->ktime_seg7_min
= seg7
;
667 else if (seg7
> phba
->ktime_seg7_max
)
668 phba
->ktime_seg7_max
= seg7
;
670 phba
->ktime_seg8_total
+= seg8
;
671 if (seg8
< phba
->ktime_seg8_min
)
672 phba
->ktime_seg8_min
= seg8
;
673 else if (seg8
> phba
->ktime_seg8_max
)
674 phba
->ktime_seg8_max
= seg8
;
676 phba
->ktime_seg9_total
+= seg9
;
677 if (seg9
< phba
->ktime_seg9_min
)
678 phba
->ktime_seg9_min
= seg9
;
679 else if (seg9
> phba
->ktime_seg9_max
)
680 phba
->ktime_seg9_max
= seg9
;
682 phba
->ktime_seg10_total
+= seg10
;
683 if (seg10
< phba
->ktime_seg10_min
)
684 phba
->ktime_seg10_min
= seg10
;
685 else if (seg10
> phba
->ktime_seg10_max
)
686 phba
->ktime_seg10_max
= seg10
;
687 phba
->ktime_status_samples
++;
692 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
693 * @phba: Pointer to HBA context object.
694 * @cmdwqe: Pointer to driver command WQE object.
695 * @wcqe: Pointer to driver response CQE object.
697 * The function is called from SLI ring event handler with no
698 * lock held. This function is the completion handler for NVME FCP commands
699 * The function frees memory resources used for the NVME commands.
702 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
703 struct lpfc_wcqe_complete
*wcqe
)
705 struct lpfc_nvmet_tgtport
*tgtp
;
706 struct nvmefc_tgt_fcp_req
*rsp
;
707 struct lpfc_nvmet_rcv_ctx
*ctxp
;
708 uint32_t status
, result
, op
, start_clean
, logerr
;
709 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
713 ctxp
= cmdwqe
->context2
;
714 ctxp
->flag
&= ~LPFC_NVMET_IO_INP
;
716 rsp
= &ctxp
->ctx
.fcp_req
;
719 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
720 result
= wcqe
->parameter
;
722 if (phba
->targetport
)
723 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
727 lpfc_nvmeio_data(phba
, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
728 ctxp
->oxid
, op
, status
);
731 rsp
->fcp_error
= NVME_SC_DATA_XFER_ERROR
;
732 rsp
->transferred_length
= 0;
734 atomic_inc(&tgtp
->xmt_fcp_rsp_error
);
735 if (result
== IOERR_ABORT_REQUESTED
)
736 atomic_inc(&tgtp
->xmt_fcp_rsp_aborted
);
739 logerr
= LOG_NVME_IOERR
;
741 /* pick up SLI4 exhange busy condition */
742 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
743 ctxp
->flag
|= LPFC_NVMET_XBUSY
;
744 logerr
|= LOG_NVME_ABTS
;
746 atomic_inc(&tgtp
->xmt_fcp_rsp_xb_set
);
749 ctxp
->flag
&= ~LPFC_NVMET_XBUSY
;
752 lpfc_printf_log(phba
, KERN_INFO
, logerr
,
753 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
755 ctxp
->oxid
, ctxp
->ctxbuf
->sglq
->sli4_xritag
,
756 status
, result
, ctxp
->flag
);
759 rsp
->fcp_error
= NVME_SC_SUCCESS
;
760 if (op
== NVMET_FCOP_RSP
)
761 rsp
->transferred_length
= rsp
->rsplen
;
763 rsp
->transferred_length
= rsp
->transfer_length
;
765 atomic_inc(&tgtp
->xmt_fcp_rsp_cmpl
);
768 if ((op
== NVMET_FCOP_READDATA_RSP
) ||
769 (op
== NVMET_FCOP_RSP
)) {
771 ctxp
->state
= LPFC_NVMET_STE_DONE
;
774 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
775 if (ctxp
->ts_cmd_nvme
) {
776 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
778 cmdwqe
->isr_timestamp
;
781 ctxp
->ts_nvme_status
=
783 ctxp
->ts_status_wqput
=
785 ctxp
->ts_isr_status
=
787 ctxp
->ts_status_nvme
=
790 ctxp
->ts_isr_status
=
791 cmdwqe
->isr_timestamp
;
792 ctxp
->ts_status_nvme
=
798 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
799 if (ctxp
->ts_cmd_nvme
)
800 lpfc_nvmet_ktime(phba
, ctxp
);
802 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
805 start_clean
= offsetof(struct lpfc_iocbq
, iocb_flag
);
806 memset(((char *)cmdwqe
) + start_clean
, 0,
807 (sizeof(struct lpfc_iocbq
) - start_clean
));
808 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
809 if (ctxp
->ts_cmd_nvme
) {
810 ctxp
->ts_isr_data
= cmdwqe
->isr_timestamp
;
811 ctxp
->ts_data_nvme
= ktime_get_ns();
816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
817 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_IO
) {
818 id
= raw_smp_processor_id();
819 if (id
< LPFC_CHECK_CPU_CNT
) {
821 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
822 "6704 CPU Check cmdcmpl: "
823 "cpu %d expect %d\n",
825 phba
->sli4_hba
.hdwq
[rsp
->hwqid
].cpucheck_cmpl_io
[id
]++;
832 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port
*tgtport
,
833 struct nvmefc_tgt_ls_req
*rsp
)
835 struct lpfc_nvmet_rcv_ctx
*ctxp
=
836 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.ls_req
);
837 struct lpfc_hba
*phba
= ctxp
->phba
;
838 struct hbq_dmabuf
*nvmebuf
=
839 (struct hbq_dmabuf
*)ctxp
->rqb_buffer
;
840 struct lpfc_iocbq
*nvmewqeq
;
841 struct lpfc_nvmet_tgtport
*nvmep
= tgtport
->private;
842 struct lpfc_dmabuf dmabuf
;
843 struct ulp_bde64 bpl
;
846 if (phba
->pport
->load_flag
& FC_UNLOADING
)
849 if (phba
->pport
->load_flag
& FC_UNLOADING
)
852 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
853 "6023 NVMET LS rsp oxid x%x\n", ctxp
->oxid
);
855 if ((ctxp
->state
!= LPFC_NVMET_STE_LS_RCV
) ||
856 (ctxp
->entry_cnt
!= 1)) {
857 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
858 "6412 NVMET LS rsp state mismatch "
860 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
862 ctxp
->state
= LPFC_NVMET_STE_LS_RSP
;
865 nvmewqeq
= lpfc_nvmet_prep_ls_wqe(phba
, ctxp
, rsp
->rspdma
,
867 if (nvmewqeq
== NULL
) {
868 atomic_inc(&nvmep
->xmt_ls_drop
);
869 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
870 "6150 LS Drop IO x%x: Prep\n",
872 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
873 atomic_inc(&nvmep
->xmt_ls_abort
);
874 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
,
875 ctxp
->sid
, ctxp
->oxid
);
879 /* Save numBdes for bpl2sgl */
881 nvmewqeq
->hba_wqidx
= 0;
882 nvmewqeq
->context3
= &dmabuf
;
884 bpl
.addrLow
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrLow
;
885 bpl
.addrHigh
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrHigh
;
886 bpl
.tus
.f
.bdeSize
= rsp
->rsplen
;
887 bpl
.tus
.f
.bdeFlags
= 0;
888 bpl
.tus
.w
= le32_to_cpu(bpl
.tus
.w
);
890 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_rsp_cmp
;
891 nvmewqeq
->iocb_cmpl
= NULL
;
892 nvmewqeq
->context2
= ctxp
;
894 lpfc_nvmeio_data(phba
, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
895 ctxp
->oxid
, nvmewqeq
->hba_wqidx
, rsp
->rsplen
);
897 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
898 if (rc
== WQE_SUCCESS
) {
900 * Okay to repost buffer here, but wait till cmpl
901 * before freeing ctxp and iocbq.
903 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
904 atomic_inc(&nvmep
->xmt_ls_rsp
);
907 /* Give back resources */
908 atomic_inc(&nvmep
->xmt_ls_drop
);
909 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
910 "6151 LS Drop IO x%x: Issue %d\n",
913 lpfc_nlp_put(nvmewqeq
->context1
);
915 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
916 atomic_inc(&nvmep
->xmt_ls_abort
);
917 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
, ctxp
->sid
, ctxp
->oxid
);
922 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port
*tgtport
,
923 struct nvmefc_tgt_fcp_req
*rsp
)
925 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
926 struct lpfc_nvmet_rcv_ctx
*ctxp
=
927 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
928 struct lpfc_hba
*phba
= ctxp
->phba
;
929 struct lpfc_queue
*wq
;
930 struct lpfc_iocbq
*nvmewqeq
;
931 struct lpfc_sli_ring
*pring
;
932 unsigned long iflags
;
935 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
940 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
945 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
946 if (ctxp
->ts_cmd_nvme
) {
947 if (rsp
->op
== NVMET_FCOP_RSP
)
948 ctxp
->ts_nvme_status
= ktime_get_ns();
950 ctxp
->ts_nvme_data
= ktime_get_ns();
953 /* Setup the hdw queue if not already set */
955 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[rsp
->hwqid
];
957 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_IO
) {
958 int id
= raw_smp_processor_id();
959 if (id
< LPFC_CHECK_CPU_CNT
) {
960 if (rsp
->hwqid
!= id
)
961 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
962 "6705 CPU Check OP: "
963 "cpu %d expect %d\n",
965 phba
->sli4_hba
.hdwq
[rsp
->hwqid
].cpucheck_xmt_io
[id
]++;
967 ctxp
->cpu
= id
; /* Setup cpu for cmpl check */
972 if ((ctxp
->flag
& LPFC_NVMET_ABTS_RCV
) ||
973 (ctxp
->state
== LPFC_NVMET_STE_ABORT
)) {
974 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
975 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
976 "6102 IO oxid x%x aborted\n",
982 nvmewqeq
= lpfc_nvmet_prep_fcp_wqe(phba
, ctxp
);
983 if (nvmewqeq
== NULL
) {
984 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
985 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
986 "6152 FCP Drop IO x%x: Prep\n",
992 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_fcp_op_cmp
;
993 nvmewqeq
->iocb_cmpl
= NULL
;
994 nvmewqeq
->context2
= ctxp
;
995 nvmewqeq
->iocb_flag
|= LPFC_IO_NVMET
;
996 ctxp
->wqeq
->hba_wqidx
= rsp
->hwqid
;
998 lpfc_nvmeio_data(phba
, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
999 ctxp
->oxid
, rsp
->op
, rsp
->rsplen
);
1001 ctxp
->flag
|= LPFC_NVMET_IO_INP
;
1002 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
1003 if (rc
== WQE_SUCCESS
) {
1004 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1005 if (!ctxp
->ts_cmd_nvme
)
1007 if (rsp
->op
== NVMET_FCOP_RSP
)
1008 ctxp
->ts_status_wqput
= ktime_get_ns();
1010 ctxp
->ts_data_wqput
= ktime_get_ns();
1017 * WQ was full, so queue nvmewqeq to be sent after
1020 ctxp
->flag
|= LPFC_NVMET_DEFER_WQFULL
;
1021 wq
= ctxp
->hdwq
->io_wq
;
1023 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1024 list_add_tail(&nvmewqeq
->list
, &wq
->wqfull_list
);
1025 wq
->q_flag
|= HBA_NVMET_WQFULL
;
1026 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1027 atomic_inc(&lpfc_nvmep
->defer_wqfull
);
1031 /* Give back resources */
1032 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
1033 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1034 "6153 FCP Drop IO x%x: Issue: %d\n",
1037 ctxp
->wqeq
->hba_wqidx
= 0;
1038 nvmewqeq
->context2
= NULL
;
1039 nvmewqeq
->context3
= NULL
;
1046 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port
*targetport
)
1048 struct lpfc_nvmet_tgtport
*tport
= targetport
->private;
1050 /* release any threads waiting for the unreg to complete */
1051 if (tport
->phba
->targetport
)
1052 complete(tport
->tport_unreg_cmp
);
1056 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
1057 struct nvmefc_tgt_fcp_req
*req
)
1059 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
1060 struct lpfc_nvmet_rcv_ctx
*ctxp
=
1061 container_of(req
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
1062 struct lpfc_hba
*phba
= ctxp
->phba
;
1063 struct lpfc_queue
*wq
;
1064 unsigned long flags
;
1066 if (phba
->pport
->load_flag
& FC_UNLOADING
)
1069 if (phba
->pport
->load_flag
& FC_UNLOADING
)
1073 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[0];
1075 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1076 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1077 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1079 lpfc_nvmeio_data(phba
, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1080 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1082 atomic_inc(&lpfc_nvmep
->xmt_fcp_abort
);
1084 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
1086 /* Since iaab/iaar are NOT set, we need to check
1087 * if the firmware is in process of aborting IO
1089 if (ctxp
->flag
& (LPFC_NVMET_XBUSY
| LPFC_NVMET_ABORT_OP
)) {
1090 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1093 ctxp
->flag
|= LPFC_NVMET_ABORT_OP
;
1095 if (ctxp
->flag
& LPFC_NVMET_DEFER_WQFULL
) {
1096 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1097 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1099 wq
= ctxp
->hdwq
->io_wq
;
1100 lpfc_nvmet_wqfull_flush(phba
, wq
, ctxp
);
1103 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1105 /* An state of LPFC_NVMET_STE_RCV means we have just received
1106 * the NVME command and have not started processing it.
1107 * (by issuing any IO WQEs on this exchange yet)
1109 if (ctxp
->state
== LPFC_NVMET_STE_RCV
)
1110 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1113 lpfc_nvmet_sol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1118 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port
*tgtport
,
1119 struct nvmefc_tgt_fcp_req
*rsp
)
1121 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
1122 struct lpfc_nvmet_rcv_ctx
*ctxp
=
1123 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
1124 struct lpfc_hba
*phba
= ctxp
->phba
;
1125 unsigned long flags
;
1126 bool aborting
= false;
1128 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
1129 if (ctxp
->flag
& LPFC_NVMET_XBUSY
)
1130 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
1131 "6027 NVMET release with XBUSY flag x%x"
1133 ctxp
->flag
, ctxp
->oxid
);
1134 else if (ctxp
->state
!= LPFC_NVMET_STE_DONE
&&
1135 ctxp
->state
!= LPFC_NVMET_STE_ABORT
)
1136 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1137 "6413 NVMET release bad state %d %d oxid x%x\n",
1138 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
1140 if ((ctxp
->flag
& LPFC_NVMET_ABORT_OP
) ||
1141 (ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
1143 /* let the abort path do the real release */
1144 lpfc_nvmet_defer_release(phba
, ctxp
);
1146 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1148 lpfc_nvmeio_data(phba
, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp
->oxid
,
1149 ctxp
->state
, aborting
);
1151 atomic_inc(&lpfc_nvmep
->xmt_fcp_release
);
1152 ctxp
->flag
&= ~LPFC_NVMET_TNOTIFY
;
1157 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1161 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port
*tgtport
,
1162 struct nvmefc_tgt_fcp_req
*rsp
)
1164 struct lpfc_nvmet_tgtport
*tgtp
;
1165 struct lpfc_nvmet_rcv_ctx
*ctxp
=
1166 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
1167 struct rqb_dmabuf
*nvmebuf
= ctxp
->rqb_buffer
;
1168 struct lpfc_hba
*phba
= ctxp
->phba
;
1169 unsigned long iflag
;
1172 lpfc_nvmeio_data(phba
, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1173 ctxp
->oxid
, ctxp
->size
, raw_smp_processor_id());
1176 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
1177 "6425 Defer rcv: no buffer oxid x%x: "
1179 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1183 tgtp
= phba
->targetport
->private;
1185 atomic_inc(&tgtp
->rcv_fcp_cmd_defer
);
1187 /* Free the nvmebuf since a new buffer already replaced it */
1188 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
, nvmebuf
);
1189 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1190 ctxp
->rqb_buffer
= NULL
;
1191 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1195 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port
*tgtport
)
1197 struct lpfc_nvmet_tgtport
*tgtp
;
1198 struct lpfc_hba
*phba
;
1201 tgtp
= tgtport
->private;
1204 rc
= lpfc_issue_els_rscn(phba
->pport
, 0);
1205 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1206 "6420 NVMET subsystem change: Notification %s\n",
1207 (rc
) ? "Failed" : "Sent");
1210 static struct nvmet_fc_target_template lpfc_tgttemplate
= {
1211 .targetport_delete
= lpfc_nvmet_targetport_delete
,
1212 .xmt_ls_rsp
= lpfc_nvmet_xmt_ls_rsp
,
1213 .fcp_op
= lpfc_nvmet_xmt_fcp_op
,
1214 .fcp_abort
= lpfc_nvmet_xmt_fcp_abort
,
1215 .fcp_req_release
= lpfc_nvmet_xmt_fcp_release
,
1216 .defer_rcv
= lpfc_nvmet_defer_rcv
,
1217 .discovery_event
= lpfc_nvmet_discovery_event
,
1220 .max_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
1221 .max_dif_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
1222 .dma_boundary
= 0xFFFFFFFF,
1224 /* optional features */
1225 .target_features
= 0,
1226 /* sizes of additional private data for data structures */
1227 .target_priv_sz
= sizeof(struct lpfc_nvmet_tgtport
),
1231 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba
*phba
,
1232 struct lpfc_nvmet_ctx_info
*infop
)
1234 struct lpfc_nvmet_ctxbuf
*ctx_buf
, *next_ctx_buf
;
1235 unsigned long flags
;
1237 spin_lock_irqsave(&infop
->nvmet_ctx_list_lock
, flags
);
1238 list_for_each_entry_safe(ctx_buf
, next_ctx_buf
,
1239 &infop
->nvmet_ctx_list
, list
) {
1240 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1241 list_del_init(&ctx_buf
->list
);
1242 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1244 __lpfc_clear_active_sglq(phba
, ctx_buf
->sglq
->sli4_lxritag
);
1245 ctx_buf
->sglq
->state
= SGL_FREED
;
1246 ctx_buf
->sglq
->ndlp
= NULL
;
1248 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1249 list_add_tail(&ctx_buf
->sglq
->list
,
1250 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
1251 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1253 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
1254 kfree(ctx_buf
->context
);
1256 spin_unlock_irqrestore(&infop
->nvmet_ctx_list_lock
, flags
);
1260 lpfc_nvmet_cleanup_io_context(struct lpfc_hba
*phba
)
1262 struct lpfc_nvmet_ctx_info
*infop
;
1265 /* The first context list, MRQ 0 CPU 0 */
1266 infop
= phba
->sli4_hba
.nvmet_ctx_info
;
1270 /* Cycle the the entire CPU context list for every MRQ */
1271 for (i
= 0; i
< phba
->cfg_nvmet_mrq
; i
++) {
1272 for_each_present_cpu(j
) {
1273 infop
= lpfc_get_ctx_list(phba
, j
, i
);
1274 __lpfc_nvmet_clean_io_for_cpu(phba
, infop
);
1277 kfree(phba
->sli4_hba
.nvmet_ctx_info
);
1278 phba
->sli4_hba
.nvmet_ctx_info
= NULL
;
1282 lpfc_nvmet_setup_io_context(struct lpfc_hba
*phba
)
1284 struct lpfc_nvmet_ctxbuf
*ctx_buf
;
1285 struct lpfc_iocbq
*nvmewqe
;
1286 union lpfc_wqe128
*wqe
;
1287 struct lpfc_nvmet_ctx_info
*last_infop
;
1288 struct lpfc_nvmet_ctx_info
*infop
;
1291 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
,
1292 "6403 Allocate NVMET resources for %d XRIs\n",
1293 phba
->sli4_hba
.nvmet_xri_cnt
);
1295 phba
->sli4_hba
.nvmet_ctx_info
= kcalloc(
1296 phba
->sli4_hba
.num_possible_cpu
* phba
->cfg_nvmet_mrq
,
1297 sizeof(struct lpfc_nvmet_ctx_info
), GFP_KERNEL
);
1298 if (!phba
->sli4_hba
.nvmet_ctx_info
) {
1299 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1300 "6419 Failed allocate memory for "
1301 "nvmet context lists\n");
1306 * Assuming X CPUs in the system, and Y MRQs, allocate some
1307 * lpfc_nvmet_ctx_info structures as follows:
1309 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1310 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1312 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1314 * Each line represents a MRQ "silo" containing an entry for
1317 * MRQ X is initially assumed to be associated with CPU X, thus
1318 * contexts are initially distributed across all MRQs using
1319 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1320 * freed, the are freed to the MRQ silo based on the CPU number
1321 * of the IO completion. Thus a context that was allocated for MRQ A
1322 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1324 for_each_possible_cpu(i
) {
1325 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1326 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1327 INIT_LIST_HEAD(&infop
->nvmet_ctx_list
);
1328 spin_lock_init(&infop
->nvmet_ctx_list_lock
);
1329 infop
->nvmet_ctx_list_cnt
= 0;
1334 * Setup the next CPU context info ptr for each MRQ.
1335 * MRQ 0 will cycle thru CPUs 0 - X separately from
1336 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1338 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1339 last_infop
= lpfc_get_ctx_list(phba
,
1340 cpumask_first(cpu_present_mask
),
1342 for (i
= phba
->sli4_hba
.num_possible_cpu
- 1; i
>= 0; i
--) {
1343 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1344 infop
->nvmet_ctx_next_cpu
= last_infop
;
1349 /* For all nvmet xris, allocate resources needed to process a
1350 * received command on a per xri basis.
1353 cpu
= cpumask_first(cpu_present_mask
);
1354 for (i
= 0; i
< phba
->sli4_hba
.nvmet_xri_cnt
; i
++) {
1355 ctx_buf
= kzalloc(sizeof(*ctx_buf
), GFP_KERNEL
);
1357 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1358 "6404 Ran out of memory for NVMET\n");
1362 ctx_buf
->context
= kzalloc(sizeof(*ctx_buf
->context
),
1364 if (!ctx_buf
->context
) {
1366 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1367 "6405 Ran out of NVMET "
1368 "context memory\n");
1371 ctx_buf
->context
->ctxbuf
= ctx_buf
;
1372 ctx_buf
->context
->state
= LPFC_NVMET_STE_FREE
;
1374 ctx_buf
->iocbq
= lpfc_sli_get_iocbq(phba
);
1375 if (!ctx_buf
->iocbq
) {
1376 kfree(ctx_buf
->context
);
1378 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1379 "6406 Ran out of NVMET iocb/WQEs\n");
1382 ctx_buf
->iocbq
->iocb_flag
= LPFC_IO_NVMET
;
1383 nvmewqe
= ctx_buf
->iocbq
;
1384 wqe
= &nvmewqe
->wqe
;
1386 /* Initialize WQE */
1387 memset(wqe
, 0, sizeof(union lpfc_wqe
));
1389 ctx_buf
->iocbq
->context1
= NULL
;
1390 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1391 ctx_buf
->sglq
= __lpfc_sli_get_nvmet_sglq(phba
, ctx_buf
->iocbq
);
1392 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1393 if (!ctx_buf
->sglq
) {
1394 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
1395 kfree(ctx_buf
->context
);
1397 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1398 "6407 Ran out of NVMET XRIs\n");
1401 INIT_WORK(&ctx_buf
->defer_work
, lpfc_nvmet_fcp_rqst_defer_work
);
1404 * Add ctx to MRQidx context list. Our initial assumption
1405 * is MRQidx will be associated with CPUidx. This association
1406 * can change on the fly.
1408 infop
= lpfc_get_ctx_list(phba
, cpu
, idx
);
1409 spin_lock(&infop
->nvmet_ctx_list_lock
);
1410 list_add_tail(&ctx_buf
->list
, &infop
->nvmet_ctx_list
);
1411 infop
->nvmet_ctx_list_cnt
++;
1412 spin_unlock(&infop
->nvmet_ctx_list_lock
);
1414 /* Spread ctx structures evenly across all MRQs */
1416 if (idx
>= phba
->cfg_nvmet_mrq
) {
1418 cpu
= cpumask_first(cpu_present_mask
);
1421 cpu
= cpumask_next(cpu
, cpu_present_mask
);
1422 if (cpu
== nr_cpu_ids
)
1423 cpu
= cpumask_first(cpu_present_mask
);
1427 for_each_present_cpu(i
) {
1428 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1429 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1430 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
| LOG_INIT
,
1431 "6408 TOTAL NVMET ctx for CPU %d "
1432 "MRQ %d: cnt %d nextcpu x%px\n",
1433 i
, j
, infop
->nvmet_ctx_list_cnt
,
1434 infop
->nvmet_ctx_next_cpu
);
1441 lpfc_nvmet_create_targetport(struct lpfc_hba
*phba
)
1443 struct lpfc_vport
*vport
= phba
->pport
;
1444 struct lpfc_nvmet_tgtport
*tgtp
;
1445 struct nvmet_fc_port_info pinfo
;
1448 if (phba
->targetport
)
1451 error
= lpfc_nvmet_setup_io_context(phba
);
1455 memset(&pinfo
, 0, sizeof(struct nvmet_fc_port_info
));
1456 pinfo
.node_name
= wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
1457 pinfo
.port_name
= wwn_to_u64(vport
->fc_portname
.u
.wwn
);
1458 pinfo
.port_id
= vport
->fc_myDID
;
1460 /* We need to tell the transport layer + 1 because it takes page
1461 * alignment into account. When space for the SGL is allocated we
1462 * allocate + 3, one for cmd, one for rsp and one for this alignment
1464 lpfc_tgttemplate
.max_sgl_segments
= phba
->cfg_nvme_seg_cnt
+ 1;
1465 lpfc_tgttemplate
.max_hw_queues
= phba
->cfg_hdw_queue
;
1466 lpfc_tgttemplate
.target_features
= NVMET_FCTGTFEAT_READDATA_RSP
;
1468 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1469 error
= nvmet_fc_register_targetport(&pinfo
, &lpfc_tgttemplate
,
1476 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
1477 "6025 Cannot register NVME targetport x%x: "
1478 "portnm %llx nodenm %llx segs %d qs %d\n",
1480 pinfo
.port_name
, pinfo
.node_name
,
1481 lpfc_tgttemplate
.max_sgl_segments
,
1482 lpfc_tgttemplate
.max_hw_queues
);
1483 phba
->targetport
= NULL
;
1484 phba
->nvmet_support
= 0;
1486 lpfc_nvmet_cleanup_io_context(phba
);
1489 tgtp
= (struct lpfc_nvmet_tgtport
*)
1490 phba
->targetport
->private;
1493 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
1494 "6026 Registered NVME "
1495 "targetport: x%px, private x%px "
1496 "portnm %llx nodenm %llx segs %d qs %d\n",
1497 phba
->targetport
, tgtp
,
1498 pinfo
.port_name
, pinfo
.node_name
,
1499 lpfc_tgttemplate
.max_sgl_segments
,
1500 lpfc_tgttemplate
.max_hw_queues
);
1502 atomic_set(&tgtp
->rcv_ls_req_in
, 0);
1503 atomic_set(&tgtp
->rcv_ls_req_out
, 0);
1504 atomic_set(&tgtp
->rcv_ls_req_drop
, 0);
1505 atomic_set(&tgtp
->xmt_ls_abort
, 0);
1506 atomic_set(&tgtp
->xmt_ls_abort_cmpl
, 0);
1507 atomic_set(&tgtp
->xmt_ls_rsp
, 0);
1508 atomic_set(&tgtp
->xmt_ls_drop
, 0);
1509 atomic_set(&tgtp
->xmt_ls_rsp_error
, 0);
1510 atomic_set(&tgtp
->xmt_ls_rsp_xb_set
, 0);
1511 atomic_set(&tgtp
->xmt_ls_rsp_aborted
, 0);
1512 atomic_set(&tgtp
->xmt_ls_rsp_cmpl
, 0);
1513 atomic_set(&tgtp
->rcv_fcp_cmd_in
, 0);
1514 atomic_set(&tgtp
->rcv_fcp_cmd_out
, 0);
1515 atomic_set(&tgtp
->rcv_fcp_cmd_drop
, 0);
1516 atomic_set(&tgtp
->xmt_fcp_drop
, 0);
1517 atomic_set(&tgtp
->xmt_fcp_read_rsp
, 0);
1518 atomic_set(&tgtp
->xmt_fcp_read
, 0);
1519 atomic_set(&tgtp
->xmt_fcp_write
, 0);
1520 atomic_set(&tgtp
->xmt_fcp_rsp
, 0);
1521 atomic_set(&tgtp
->xmt_fcp_release
, 0);
1522 atomic_set(&tgtp
->xmt_fcp_rsp_cmpl
, 0);
1523 atomic_set(&tgtp
->xmt_fcp_rsp_error
, 0);
1524 atomic_set(&tgtp
->xmt_fcp_rsp_xb_set
, 0);
1525 atomic_set(&tgtp
->xmt_fcp_rsp_aborted
, 0);
1526 atomic_set(&tgtp
->xmt_fcp_rsp_drop
, 0);
1527 atomic_set(&tgtp
->xmt_fcp_xri_abort_cqe
, 0);
1528 atomic_set(&tgtp
->xmt_fcp_abort
, 0);
1529 atomic_set(&tgtp
->xmt_fcp_abort_cmpl
, 0);
1530 atomic_set(&tgtp
->xmt_abort_unsol
, 0);
1531 atomic_set(&tgtp
->xmt_abort_sol
, 0);
1532 atomic_set(&tgtp
->xmt_abort_rsp
, 0);
1533 atomic_set(&tgtp
->xmt_abort_rsp_error
, 0);
1534 atomic_set(&tgtp
->defer_ctx
, 0);
1535 atomic_set(&tgtp
->defer_fod
, 0);
1536 atomic_set(&tgtp
->defer_wqfull
, 0);
1542 lpfc_nvmet_update_targetport(struct lpfc_hba
*phba
)
1544 struct lpfc_vport
*vport
= phba
->pport
;
1546 if (!phba
->targetport
)
1549 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
,
1550 "6007 Update NVMET port x%px did x%x\n",
1551 phba
->targetport
, vport
->fc_myDID
);
1553 phba
->targetport
->port_id
= vport
->fc_myDID
;
1558 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1559 * @phba: pointer to lpfc hba data structure.
1560 * @axri: pointer to the nvmet xri abort wcqe structure.
1562 * This routine is invoked by the worker thread to process a SLI4 fast-path
1563 * NVMET aborted xri.
1566 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba
*phba
,
1567 struct sli4_wcqe_xri_aborted
*axri
)
1569 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1570 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
1571 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
1572 struct lpfc_nvmet_rcv_ctx
*ctxp
, *next_ctxp
;
1573 struct lpfc_nvmet_tgtport
*tgtp
;
1574 struct nvmefc_tgt_fcp_req
*req
= NULL
;
1575 struct lpfc_nodelist
*ndlp
;
1576 unsigned long iflag
= 0;
1578 bool released
= false;
1580 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1581 "6317 XB aborted xri x%x rxid x%x\n", xri
, rxid
);
1583 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
))
1586 if (phba
->targetport
) {
1587 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1588 atomic_inc(&tgtp
->xmt_fcp_xri_abort_cqe
);
1591 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1592 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1593 list_for_each_entry_safe(ctxp
, next_ctxp
,
1594 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1596 if (ctxp
->ctxbuf
->sglq
->sli4_xritag
!= xri
)
1599 spin_lock(&ctxp
->ctxlock
);
1600 /* Check if we already received a free context call
1601 * and we have completed processing an abort situation.
1603 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
&&
1604 !(ctxp
->flag
& LPFC_NVMET_ABORT_OP
)) {
1605 list_del_init(&ctxp
->list
);
1608 ctxp
->flag
&= ~LPFC_NVMET_XBUSY
;
1609 spin_unlock(&ctxp
->ctxlock
);
1610 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1612 rrq_empty
= list_empty(&phba
->active_rrq_list
);
1613 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1614 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
1615 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
1616 (ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
||
1617 ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)) {
1618 lpfc_set_rrq_active(phba
, ndlp
,
1619 ctxp
->ctxbuf
->sglq
->sli4_lxritag
,
1621 lpfc_sli4_abts_err_handler(phba
, ndlp
, axri
);
1624 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1625 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1626 ctxp
->oxid
, ctxp
->flag
, released
);
1628 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1631 lpfc_worker_wake_up(phba
);
1634 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1635 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1637 ctxp
= lpfc_nvmet_get_ctx_for_xri(phba
, xri
);
1640 * Abort already done by FW, so BA_ACC sent.
1641 * However, the transport may be unaware.
1643 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1644 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1645 "flag x%x oxid x%x rxid x%x\n",
1646 xri
, ctxp
->state
, ctxp
->flag
, ctxp
->oxid
,
1649 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1650 ctxp
->flag
|= LPFC_NVMET_ABTS_RCV
;
1651 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
1652 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1654 lpfc_nvmeio_data(phba
,
1655 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1656 xri
, raw_smp_processor_id(), 0);
1658 req
= &ctxp
->ctx
.fcp_req
;
1660 nvmet_fc_rcv_fcp_abort(phba
->targetport
, req
);
1666 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport
*vport
,
1667 struct fc_frame_header
*fc_hdr
)
1669 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1670 struct lpfc_hba
*phba
= vport
->phba
;
1671 struct lpfc_nvmet_rcv_ctx
*ctxp
, *next_ctxp
;
1672 struct nvmefc_tgt_fcp_req
*rsp
;
1675 unsigned long iflag
= 0;
1677 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
1678 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1680 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1681 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1682 list_for_each_entry_safe(ctxp
, next_ctxp
,
1683 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1685 if (ctxp
->oxid
!= oxid
|| ctxp
->sid
!= sid
)
1688 xri
= ctxp
->ctxbuf
->sglq
->sli4_xritag
;
1690 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1691 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1693 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1694 ctxp
->flag
|= LPFC_NVMET_ABTS_RCV
;
1695 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1697 lpfc_nvmeio_data(phba
,
1698 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1699 xri
, raw_smp_processor_id(), 0);
1701 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1702 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri
);
1704 rsp
= &ctxp
->ctx
.fcp_req
;
1705 nvmet_fc_rcv_fcp_abort(phba
->targetport
, rsp
);
1707 /* Respond with BA_ACC accordingly */
1708 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1711 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1712 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1714 /* check the wait list */
1715 if (phba
->sli4_hba
.nvmet_io_wait_cnt
) {
1716 struct rqb_dmabuf
*nvmebuf
;
1717 struct fc_frame_header
*fc_hdr_tmp
;
1722 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
1724 /* match by oxid and s_id */
1725 list_for_each_entry(nvmebuf
,
1726 &phba
->sli4_hba
.lpfc_nvmet_io_wait_list
,
1728 fc_hdr_tmp
= (struct fc_frame_header
*)
1729 (nvmebuf
->hbuf
.virt
);
1730 oxid_tmp
= be16_to_cpu(fc_hdr_tmp
->fh_ox_id
);
1731 sid_tmp
= sli4_sid_from_fc_hdr(fc_hdr_tmp
);
1732 if (oxid_tmp
!= oxid
|| sid_tmp
!= sid
)
1735 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1736 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1737 "is waiting for a ctxp\n",
1740 list_del_init(&nvmebuf
->hbuf
.list
);
1741 phba
->sli4_hba
.nvmet_io_wait_cnt
--;
1745 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
1748 /* free buffer since already posted a new DMA buffer to RQ */
1750 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
, nvmebuf
);
1751 /* Respond with BA_ACC accordingly */
1752 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1757 /* check active list */
1758 ctxp
= lpfc_nvmet_get_ctx_for_oxid(phba
, oxid
, sid
);
1760 xri
= ctxp
->ctxbuf
->sglq
->sli4_xritag
;
1762 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1763 ctxp
->flag
|= (LPFC_NVMET_ABTS_RCV
| LPFC_NVMET_ABORT_OP
);
1764 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1766 lpfc_nvmeio_data(phba
,
1767 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1768 xri
, raw_smp_processor_id(), 0);
1770 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1771 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1772 "flag x%x state x%x\n",
1773 ctxp
->oxid
, xri
, ctxp
->flag
, ctxp
->state
);
1775 if (ctxp
->flag
& LPFC_NVMET_TNOTIFY
) {
1776 /* Notify the transport */
1777 nvmet_fc_rcv_fcp_abort(phba
->targetport
,
1778 &ctxp
->ctx
.fcp_req
);
1780 cancel_work_sync(&ctxp
->ctxbuf
->defer_work
);
1781 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1782 lpfc_nvmet_defer_release(phba
, ctxp
);
1783 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1785 lpfc_nvmet_sol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1788 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1792 lpfc_nvmeio_data(phba
, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1793 oxid
, raw_smp_processor_id(), 1);
1795 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1796 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid
);
1798 /* Respond with BA_RJT accordingly */
1799 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 0);
1805 lpfc_nvmet_wqfull_flush(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
,
1806 struct lpfc_nvmet_rcv_ctx
*ctxp
)
1808 struct lpfc_sli_ring
*pring
;
1809 struct lpfc_iocbq
*nvmewqeq
;
1810 struct lpfc_iocbq
*next_nvmewqeq
;
1811 unsigned long iflags
;
1812 struct lpfc_wcqe_complete wcqe
;
1813 struct lpfc_wcqe_complete
*wcqep
;
1818 /* Fake an ABORT error code back to cmpl routine */
1819 memset(wcqep
, 0, sizeof(struct lpfc_wcqe_complete
));
1820 bf_set(lpfc_wcqe_c_status
, wcqep
, IOSTAT_LOCAL_REJECT
);
1821 wcqep
->parameter
= IOERR_ABORT_REQUESTED
;
1823 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1824 list_for_each_entry_safe(nvmewqeq
, next_nvmewqeq
,
1825 &wq
->wqfull_list
, list
) {
1827 /* Checking for a specific IO to flush */
1828 if (nvmewqeq
->context2
== ctxp
) {
1829 list_del(&nvmewqeq
->list
);
1830 spin_unlock_irqrestore(&pring
->ring_lock
,
1832 lpfc_nvmet_xmt_fcp_op_cmp(phba
, nvmewqeq
,
1839 list_del(&nvmewqeq
->list
);
1840 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1841 lpfc_nvmet_xmt_fcp_op_cmp(phba
, nvmewqeq
, wcqep
);
1842 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1846 wq
->q_flag
&= ~HBA_NVMET_WQFULL
;
1847 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1851 lpfc_nvmet_wqfull_process(struct lpfc_hba
*phba
,
1852 struct lpfc_queue
*wq
)
1854 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1855 struct lpfc_sli_ring
*pring
;
1856 struct lpfc_iocbq
*nvmewqeq
;
1857 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1858 unsigned long iflags
;
1862 * Some WQE slots are available, so try to re-issue anything
1863 * on the WQ wqfull_list.
1866 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1867 while (!list_empty(&wq
->wqfull_list
)) {
1868 list_remove_head(&wq
->wqfull_list
, nvmewqeq
, struct lpfc_iocbq
,
1870 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1871 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)nvmewqeq
->context2
;
1872 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
1873 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1875 /* WQ was full again, so put it back on the list */
1876 list_add(&nvmewqeq
->list
, &wq
->wqfull_list
);
1877 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1880 if (rc
== WQE_SUCCESS
) {
1881 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1882 if (ctxp
->ts_cmd_nvme
) {
1883 if (ctxp
->ctx
.fcp_req
.op
== NVMET_FCOP_RSP
)
1884 ctxp
->ts_status_wqput
= ktime_get_ns();
1886 ctxp
->ts_data_wqput
= ktime_get_ns();
1893 wq
->q_flag
&= ~HBA_NVMET_WQFULL
;
1894 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1900 lpfc_nvmet_destroy_targetport(struct lpfc_hba
*phba
)
1902 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1903 struct lpfc_nvmet_tgtport
*tgtp
;
1904 struct lpfc_queue
*wq
;
1906 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp
);
1908 if (phba
->nvmet_support
== 0)
1910 if (phba
->targetport
) {
1911 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1912 for (qidx
= 0; qidx
< phba
->cfg_hdw_queue
; qidx
++) {
1913 wq
= phba
->sli4_hba
.hdwq
[qidx
].io_wq
;
1914 lpfc_nvmet_wqfull_flush(phba
, wq
, NULL
);
1916 tgtp
->tport_unreg_cmp
= &tport_unreg_cmp
;
1917 nvmet_fc_unregister_targetport(phba
->targetport
);
1918 if (!wait_for_completion_timeout(tgtp
->tport_unreg_cmp
,
1919 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO
)))
1920 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1921 "6179 Unreg targetport x%px timeout "
1922 "reached.\n", phba
->targetport
);
1923 lpfc_nvmet_cleanup_io_context(phba
);
1925 phba
->targetport
= NULL
;
1930 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1931 * @phba: pointer to lpfc hba data structure.
1932 * @pring: pointer to a SLI ring.
1933 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1935 * This routine is used for processing the WQE associated with a unsolicited
1936 * event. It first determines whether there is an existing ndlp that matches
1937 * the DID from the unsolicited WQE. If not, it will create a new one with
1938 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1939 * WQE is then used to invoke the proper routine and to set up proper state
1940 * of the discovery state machine.
1943 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1944 struct hbq_dmabuf
*nvmebuf
)
1946 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1947 struct lpfc_nvmet_tgtport
*tgtp
;
1948 struct fc_frame_header
*fc_hdr
;
1949 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1951 uint32_t size
, oxid
, sid
, rc
;
1954 if (!nvmebuf
|| !phba
->targetport
) {
1955 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1956 "6154 LS Drop IO\n");
1964 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
1965 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1967 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1968 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
1969 size
= bf_get(lpfc_rcqe_length
, &nvmebuf
->cq_event
.cqe
.rcqe_cmpl
);
1970 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
1972 ctxp
= kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx
), GFP_ATOMIC
);
1974 atomic_inc(&tgtp
->rcv_ls_req_drop
);
1975 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1976 "6155 LS Drop IO x%x: Alloc\n",
1979 lpfc_nvmeio_data(phba
, "NVMET LS DROP: "
1980 "xri x%x sz %d from %06x\n",
1982 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
1990 ctxp
->state
= LPFC_NVMET_STE_LS_RCV
;
1991 ctxp
->entry_cnt
= 1;
1992 ctxp
->rqb_buffer
= (void *)nvmebuf
;
1993 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[0];
1995 lpfc_nvmeio_data(phba
, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1998 * The calling sequence should be:
1999 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
2000 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
2002 atomic_inc(&tgtp
->rcv_ls_req_in
);
2003 rc
= nvmet_fc_rcv_ls_req(phba
->targetport
, &ctxp
->ctx
.ls_req
,
2006 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
2007 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2008 "%08x %08x %08x\n", size
, rc
,
2009 *payload
, *(payload
+1), *(payload
+2),
2010 *(payload
+3), *(payload
+4), *(payload
+5));
2013 atomic_inc(&tgtp
->rcv_ls_req_out
);
2017 lpfc_nvmeio_data(phba
, "NVMET LS DROP: xri x%x sz %d from %06x\n",
2020 atomic_inc(&tgtp
->rcv_ls_req_drop
);
2021 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2022 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
2025 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
2026 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
2028 atomic_inc(&tgtp
->xmt_ls_abort
);
2029 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
, sid
, oxid
);
2034 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf
*ctx_buf
)
2036 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2037 struct lpfc_nvmet_rcv_ctx
*ctxp
= ctx_buf
->context
;
2038 struct lpfc_hba
*phba
= ctxp
->phba
;
2039 struct rqb_dmabuf
*nvmebuf
= ctxp
->rqb_buffer
;
2040 struct lpfc_nvmet_tgtport
*tgtp
;
2041 uint32_t *payload
, qno
;
2043 unsigned long iflags
;
2046 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2047 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2048 "oxid: x%x flg: x%x state: x%x\n",
2049 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
2050 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2051 lpfc_nvmet_defer_release(phba
, ctxp
);
2052 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2053 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
2058 if (ctxp
->flag
& LPFC_NVMET_ABTS_RCV
) {
2059 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2060 "6324 IO oxid x%x aborted\n",
2065 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
2066 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2067 ctxp
->flag
|= LPFC_NVMET_TNOTIFY
;
2068 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2069 if (ctxp
->ts_isr_cmd
)
2070 ctxp
->ts_cmd_nvme
= ktime_get_ns();
2073 * The calling sequence should be:
2074 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2075 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2076 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2077 * the NVME command / FC header is stored.
2078 * A buffer has already been reposted for this IO, so just free
2081 rc
= nvmet_fc_rcv_fcp_req(phba
->targetport
, &ctxp
->ctx
.fcp_req
,
2082 payload
, ctxp
->size
);
2083 /* Process FCP command */
2085 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
2086 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2087 if ((ctxp
->flag
& LPFC_NVMET_CTX_REUSE_WQ
) ||
2088 (nvmebuf
!= ctxp
->rqb_buffer
)) {
2089 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2092 ctxp
->rqb_buffer
= NULL
;
2093 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2094 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
); /* repost */
2098 /* Processing of FCP command is deferred */
2099 if (rc
== -EOVERFLOW
) {
2100 lpfc_nvmeio_data(phba
, "NVMET RCV BUSY: xri x%x sz %d "
2102 ctxp
->oxid
, ctxp
->size
, ctxp
->sid
);
2103 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
2104 atomic_inc(&tgtp
->defer_fod
);
2105 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2106 if (ctxp
->flag
& LPFC_NVMET_CTX_REUSE_WQ
) {
2107 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2110 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2112 * Post a replacement DMA buffer to RQ and defer
2113 * freeing rcv buffer till .defer_rcv callback
2116 lpfc_post_rq_buffer(
2117 phba
, phba
->sli4_hba
.nvmet_mrq_hdr
[qno
],
2118 phba
->sli4_hba
.nvmet_mrq_data
[qno
], 1, qno
);
2121 ctxp
->flag
&= ~LPFC_NVMET_TNOTIFY
;
2122 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
2123 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2124 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2126 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
2127 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
2128 atomic_read(&tgtp
->xmt_fcp_release
));
2129 lpfc_nvmeio_data(phba
, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2130 ctxp
->oxid
, ctxp
->size
, ctxp
->sid
);
2131 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2132 lpfc_nvmet_defer_release(phba
, ctxp
);
2133 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2134 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
, ctxp
->oxid
);
2139 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct
*work
)
2141 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2142 struct lpfc_nvmet_ctxbuf
*ctx_buf
=
2143 container_of(work
, struct lpfc_nvmet_ctxbuf
, defer_work
);
2145 lpfc_nvmet_process_rcv_fcp_req(ctx_buf
);
2149 static struct lpfc_nvmet_ctxbuf
*
2150 lpfc_nvmet_replenish_context(struct lpfc_hba
*phba
,
2151 struct lpfc_nvmet_ctx_info
*current_infop
)
2153 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2154 struct lpfc_nvmet_ctxbuf
*ctx_buf
= NULL
;
2155 struct lpfc_nvmet_ctx_info
*get_infop
;
2159 * The current_infop for the MRQ a NVME command IU was received
2160 * on is empty. Our goal is to replenish this MRQs context
2161 * list from a another CPUs.
2163 * First we need to pick a context list to start looking on.
2164 * nvmet_ctx_start_cpu has available context the last time
2165 * we needed to replenish this CPU where nvmet_ctx_next_cpu
2166 * is just the next sequential CPU for this MRQ.
2168 if (current_infop
->nvmet_ctx_start_cpu
)
2169 get_infop
= current_infop
->nvmet_ctx_start_cpu
;
2171 get_infop
= current_infop
->nvmet_ctx_next_cpu
;
2173 for (i
= 0; i
< phba
->sli4_hba
.num_possible_cpu
; i
++) {
2174 if (get_infop
== current_infop
) {
2175 get_infop
= get_infop
->nvmet_ctx_next_cpu
;
2178 spin_lock(&get_infop
->nvmet_ctx_list_lock
);
2180 /* Just take the entire context list, if there are any */
2181 if (get_infop
->nvmet_ctx_list_cnt
) {
2182 list_splice_init(&get_infop
->nvmet_ctx_list
,
2183 ¤t_infop
->nvmet_ctx_list
);
2184 current_infop
->nvmet_ctx_list_cnt
=
2185 get_infop
->nvmet_ctx_list_cnt
- 1;
2186 get_infop
->nvmet_ctx_list_cnt
= 0;
2187 spin_unlock(&get_infop
->nvmet_ctx_list_lock
);
2189 current_infop
->nvmet_ctx_start_cpu
= get_infop
;
2190 list_remove_head(¤t_infop
->nvmet_ctx_list
,
2191 ctx_buf
, struct lpfc_nvmet_ctxbuf
,
2196 /* Otherwise, move on to the next CPU for this MRQ */
2197 spin_unlock(&get_infop
->nvmet_ctx_list_lock
);
2198 get_infop
= get_infop
->nvmet_ctx_next_cpu
;
2202 /* Nothing found, all contexts for the MRQ are in-flight */
2207 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2208 * @phba: pointer to lpfc hba data structure.
2209 * @idx: relative index of MRQ vector
2210 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2211 * @isr_timestamp: in jiffies.
2212 * @cqflag: cq processing information regarding workload.
2214 * This routine is used for processing the WQE associated with a unsolicited
2215 * event. It first determines whether there is an existing ndlp that matches
2216 * the DID from the unsolicited WQE. If not, it will create a new one with
2217 * the DID from the unsolicited WQE. The ELS command from the unsolicited
2218 * WQE is then used to invoke the proper routine and to set up proper state
2219 * of the discovery state machine.
2222 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba
*phba
,
2224 struct rqb_dmabuf
*nvmebuf
,
2225 uint64_t isr_timestamp
,
2228 struct lpfc_nvmet_rcv_ctx
*ctxp
;
2229 struct lpfc_nvmet_tgtport
*tgtp
;
2230 struct fc_frame_header
*fc_hdr
;
2231 struct lpfc_nvmet_ctxbuf
*ctx_buf
;
2232 struct lpfc_nvmet_ctx_info
*current_infop
;
2233 uint32_t size
, oxid
, sid
, qno
;
2234 unsigned long iflag
;
2237 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC
))
2241 if (!nvmebuf
|| !phba
->targetport
) {
2242 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2243 "6157 NVMET FCP Drop IO\n");
2245 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
2250 * Get a pointer to the context list for this MRQ based on
2251 * the CPU this MRQ IRQ is associated with. If the CPU association
2252 * changes from our initial assumption, the context list could
2253 * be empty, thus it would need to be replenished with the
2254 * context list from another CPU for this MRQ.
2256 current_cpu
= raw_smp_processor_id();
2257 current_infop
= lpfc_get_ctx_list(phba
, current_cpu
, idx
);
2258 spin_lock_irqsave(¤t_infop
->nvmet_ctx_list_lock
, iflag
);
2259 if (current_infop
->nvmet_ctx_list_cnt
) {
2260 list_remove_head(¤t_infop
->nvmet_ctx_list
,
2261 ctx_buf
, struct lpfc_nvmet_ctxbuf
, list
);
2262 current_infop
->nvmet_ctx_list_cnt
--;
2264 ctx_buf
= lpfc_nvmet_replenish_context(phba
, current_infop
);
2266 spin_unlock_irqrestore(¤t_infop
->nvmet_ctx_list_lock
, iflag
);
2268 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
2269 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
2270 size
= nvmebuf
->bytes_recv
;
2272 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2273 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_RCV
) {
2274 if (current_cpu
< LPFC_CHECK_CPU_CNT
) {
2275 if (idx
!= current_cpu
)
2276 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
2277 "6703 CPU Check rcv: "
2278 "cpu %d expect %d\n",
2280 phba
->sli4_hba
.hdwq
[idx
].cpucheck_rcv_io
[current_cpu
]++;
2285 lpfc_nvmeio_data(phba
, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2286 oxid
, size
, raw_smp_processor_id());
2288 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2291 /* Queue this NVME IO to process later */
2292 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
2293 list_add_tail(&nvmebuf
->hbuf
.list
,
2294 &phba
->sli4_hba
.lpfc_nvmet_io_wait_list
);
2295 phba
->sli4_hba
.nvmet_io_wait_cnt
++;
2296 phba
->sli4_hba
.nvmet_io_wait_total
++;
2297 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
2300 /* Post a brand new DMA buffer to RQ */
2302 lpfc_post_rq_buffer(
2303 phba
, phba
->sli4_hba
.nvmet_mrq_hdr
[qno
],
2304 phba
->sli4_hba
.nvmet_mrq_data
[qno
], 1, qno
);
2306 atomic_inc(&tgtp
->defer_ctx
);
2310 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
2312 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)ctx_buf
->context
;
2313 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
2314 list_add_tail(&ctxp
->list
, &phba
->sli4_hba
.t_active_ctx_list
);
2315 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
2316 if (ctxp
->state
!= LPFC_NVMET_STE_FREE
) {
2317 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2318 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2319 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
2328 ctxp
->state
= LPFC_NVMET_STE_RCV
;
2329 ctxp
->entry_cnt
= 1;
2331 ctxp
->ctxbuf
= ctx_buf
;
2332 ctxp
->rqb_buffer
= (void *)nvmebuf
;
2334 spin_lock_init(&ctxp
->ctxlock
);
2336 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2338 ctxp
->ts_isr_cmd
= isr_timestamp
;
2339 ctxp
->ts_cmd_nvme
= 0;
2340 ctxp
->ts_nvme_data
= 0;
2341 ctxp
->ts_data_wqput
= 0;
2342 ctxp
->ts_isr_data
= 0;
2343 ctxp
->ts_data_nvme
= 0;
2344 ctxp
->ts_nvme_status
= 0;
2345 ctxp
->ts_status_wqput
= 0;
2346 ctxp
->ts_isr_status
= 0;
2347 ctxp
->ts_status_nvme
= 0;
2350 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
2351 /* check for cq processing load */
2353 lpfc_nvmet_process_rcv_fcp_req(ctx_buf
);
2357 if (!queue_work(phba
->wq
, &ctx_buf
->defer_work
)) {
2358 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
2359 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
2360 "6325 Unable to queue work for oxid x%x. "
2361 "FCP Drop IO [x%x x%x x%x]\n",
2363 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
2364 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
2365 atomic_read(&tgtp
->xmt_fcp_release
));
2367 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
2368 lpfc_nvmet_defer_release(phba
, ctxp
);
2369 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
2370 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, sid
, oxid
);
2375 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2376 * @phba: pointer to lpfc hba data structure.
2377 * @pring: pointer to a SLI ring.
2378 * @nvmebuf: pointer to received nvme data structure.
2380 * This routine is used to process an unsolicited event received from a SLI
2381 * (Service Level Interface) ring. The actual processing of the data buffer
2382 * associated with the unsolicited event is done by invoking the routine
2383 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2384 * SLI RQ on which the unsolicited event was received.
2387 lpfc_nvmet_unsol_ls_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2388 struct lpfc_iocbq
*piocb
)
2390 struct lpfc_dmabuf
*d_buf
;
2391 struct hbq_dmabuf
*nvmebuf
;
2393 d_buf
= piocb
->context2
;
2394 nvmebuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
2397 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2398 "3015 LS Drop IO\n");
2401 if (phba
->nvmet_support
== 0) {
2402 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
2405 lpfc_nvmet_unsol_ls_buffer(phba
, pring
, nvmebuf
);
2409 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2410 * @phba: pointer to lpfc hba data structure.
2411 * @idx: relative index of MRQ vector
2412 * @nvmebuf: pointer to received nvme data structure.
2413 * @isr_timestamp: in jiffies.
2414 * @cqflag: cq processing information regarding workload.
2416 * This routine is used to process an unsolicited event received from a SLI
2417 * (Service Level Interface) ring. The actual processing of the data buffer
2418 * associated with the unsolicited event is done by invoking the routine
2419 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2420 * SLI RQ on which the unsolicited event was received.
2423 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba
*phba
,
2425 struct rqb_dmabuf
*nvmebuf
,
2426 uint64_t isr_timestamp
,
2430 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2431 "3167 NVMET FCP Drop IO\n");
2434 if (phba
->nvmet_support
== 0) {
2435 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
2438 lpfc_nvmet_unsol_fcp_buffer(phba
, idx
, nvmebuf
, isr_timestamp
, cqflag
);
2442 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2443 * @phba: pointer to a host N_Port data structure.
2444 * @ctxp: Context info for NVME LS Request
2445 * @rspbuf: DMA buffer of NVME command.
2446 * @rspsize: size of the NVME command.
2448 * This routine is used for allocating a lpfc-WQE data structure from
2449 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2450 * passed into the routine for discovery state machine to issue an Extended
2451 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2452 * and preparation routine that is used by all the discovery state machine
2453 * routines and the NVME command-specific fields will be later set up by
2454 * the individual discovery machine routines after calling this routine
2455 * allocating and preparing a generic WQE data structure. It fills in the
2456 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2457 * payload and response payload (if expected). The reference count on the
2458 * ndlp is incremented by 1 and the reference to the ndlp is put into
2459 * context1 of the WQE data structure for this WQE to hold the ndlp
2460 * reference for the command's callback function to access later.
2463 * Pointer to the newly allocated/prepared nvme wqe data structure
2464 * NULL - when nvme wqe data structure allocation/preparation failed
2466 static struct lpfc_iocbq
*
2467 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*phba
,
2468 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2469 dma_addr_t rspbuf
, uint16_t rspsize
)
2471 struct lpfc_nodelist
*ndlp
;
2472 struct lpfc_iocbq
*nvmewqe
;
2473 union lpfc_wqe128
*wqe
;
2475 if (!lpfc_is_link_up(phba
)) {
2476 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
2477 "6104 NVMET prep LS wqe: link err: "
2478 "NPORT x%x oxid:x%x ste %d\n",
2479 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2483 /* Allocate buffer for command wqe */
2484 nvmewqe
= lpfc_sli_get_iocbq(phba
);
2485 if (nvmewqe
== NULL
) {
2486 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
2487 "6105 NVMET prep LS wqe: No WQE: "
2488 "NPORT x%x oxid x%x ste %d\n",
2489 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2493 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
2494 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2495 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2496 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2497 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
2498 "6106 NVMET prep LS wqe: No ndlp: "
2499 "NPORT x%x oxid x%x ste %d\n",
2500 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2501 goto nvme_wqe_free_wqeq_exit
;
2503 ctxp
->wqeq
= nvmewqe
;
2505 /* prevent preparing wqe with NULL ndlp reference */
2506 nvmewqe
->context1
= lpfc_nlp_get(ndlp
);
2507 if (nvmewqe
->context1
== NULL
)
2508 goto nvme_wqe_free_wqeq_exit
;
2509 nvmewqe
->context2
= ctxp
;
2511 wqe
= &nvmewqe
->wqe
;
2512 memset(wqe
, 0, sizeof(union lpfc_wqe
));
2515 wqe
->xmit_sequence
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2516 wqe
->xmit_sequence
.bde
.tus
.f
.bdeSize
= rspsize
;
2517 wqe
->xmit_sequence
.bde
.addrLow
= le32_to_cpu(putPaddrLow(rspbuf
));
2518 wqe
->xmit_sequence
.bde
.addrHigh
= le32_to_cpu(putPaddrHigh(rspbuf
));
2525 bf_set(wqe_dfctl
, &wqe
->xmit_sequence
.wge_ctl
, 0);
2526 bf_set(wqe_ls
, &wqe
->xmit_sequence
.wge_ctl
, 1);
2527 bf_set(wqe_la
, &wqe
->xmit_sequence
.wge_ctl
, 0);
2528 bf_set(wqe_rctl
, &wqe
->xmit_sequence
.wge_ctl
, FC_RCTL_ELS4_REP
);
2529 bf_set(wqe_type
, &wqe
->xmit_sequence
.wge_ctl
, FC_TYPE_NVME
);
2532 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
2533 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2534 bf_set(wqe_xri_tag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->sli4_xritag
);
2537 bf_set(wqe_cmnd
, &wqe
->xmit_sequence
.wqe_com
,
2538 CMD_XMIT_SEQUENCE64_WQE
);
2539 bf_set(wqe_ct
, &wqe
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
2540 bf_set(wqe_class
, &wqe
->xmit_sequence
.wqe_com
, CLASS3
);
2541 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
2544 wqe
->xmit_sequence
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2547 bf_set(wqe_reqtag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->iotag
);
2548 /* Needs to be set by caller */
2549 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
, ctxp
->oxid
);
2552 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
2553 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
2554 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
2555 LPFC_WQE_LENLOC_WORD12
);
2556 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
2559 bf_set(wqe_cqid
, &wqe
->xmit_sequence
.wqe_com
,
2560 LPFC_WQE_CQ_ID_DEFAULT
);
2561 bf_set(wqe_cmd_type
, &wqe
->xmit_sequence
.wqe_com
,
2565 wqe
->xmit_sequence
.xmit_len
= rspsize
;
2568 nvmewqe
->vport
= phba
->pport
;
2569 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
2570 nvmewqe
->iocb_flag
|= LPFC_IO_NVME_LS
;
2572 /* Xmit NVMET response to remote NPORT <did> */
2573 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
2574 "6039 Xmit NVMET LS response to remote "
2575 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2576 ndlp
->nlp_DID
, nvmewqe
->iotag
, ctxp
->oxid
,
2580 nvme_wqe_free_wqeq_exit
:
2581 nvmewqe
->context2
= NULL
;
2582 nvmewqe
->context3
= NULL
;
2583 lpfc_sli_release_iocbq(phba
, nvmewqe
);
2588 static struct lpfc_iocbq
*
2589 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*phba
,
2590 struct lpfc_nvmet_rcv_ctx
*ctxp
)
2592 struct nvmefc_tgt_fcp_req
*rsp
= &ctxp
->ctx
.fcp_req
;
2593 struct lpfc_nvmet_tgtport
*tgtp
;
2594 struct sli4_sge
*sgl
;
2595 struct lpfc_nodelist
*ndlp
;
2596 struct lpfc_iocbq
*nvmewqe
;
2597 struct scatterlist
*sgel
;
2598 union lpfc_wqe128
*wqe
;
2599 struct ulp_bde64
*bde
;
2600 dma_addr_t physaddr
;
2605 if (!lpfc_is_link_up(phba
)) {
2606 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2607 "6107 NVMET prep FCP wqe: link err:"
2608 "NPORT x%x oxid x%x ste %d\n",
2609 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2613 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
2614 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2615 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2616 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2617 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2618 "6108 NVMET prep FCP wqe: no ndlp: "
2619 "NPORT x%x oxid x%x ste %d\n",
2620 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2624 if (rsp
->sg_cnt
> lpfc_tgttemplate
.max_sgl_segments
) {
2625 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2626 "6109 NVMET prep FCP wqe: seg cnt err: "
2627 "NPORT x%x oxid x%x ste %d cnt %d\n",
2628 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
,
2629 phba
->cfg_nvme_seg_cnt
);
2633 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2634 nvmewqe
= ctxp
->wqeq
;
2635 if (nvmewqe
== NULL
) {
2636 /* Allocate buffer for command wqe */
2637 nvmewqe
= ctxp
->ctxbuf
->iocbq
;
2638 if (nvmewqe
== NULL
) {
2639 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2640 "6110 NVMET prep FCP wqe: No "
2641 "WQE: NPORT x%x oxid x%x ste %d\n",
2642 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2645 ctxp
->wqeq
= nvmewqe
;
2646 xc
= 0; /* create new XRI */
2647 nvmewqe
->sli4_lxritag
= NO_XRI
;
2648 nvmewqe
->sli4_xritag
= NO_XRI
;
2652 if (((ctxp
->state
== LPFC_NVMET_STE_RCV
) &&
2653 (ctxp
->entry_cnt
== 1)) ||
2654 (ctxp
->state
== LPFC_NVMET_STE_DATA
)) {
2655 wqe
= &nvmewqe
->wqe
;
2657 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2658 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2659 ctxp
->state
, ctxp
->entry_cnt
);
2663 sgl
= (struct sli4_sge
*)ctxp
->ctxbuf
->sglq
->sgl
;
2665 case NVMET_FCOP_READDATA
:
2666 case NVMET_FCOP_READDATA_RSP
:
2667 /* From the tsend template, initialize words 7 - 11 */
2668 memcpy(&wqe
->words
[7],
2669 &lpfc_tsend_cmd_template
.words
[7],
2670 sizeof(uint32_t) * 5);
2672 /* Words 0 - 2 : The first sg segment */
2674 physaddr
= sg_dma_address(sgel
);
2675 wqe
->fcp_tsend
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2676 wqe
->fcp_tsend
.bde
.tus
.f
.bdeSize
= sg_dma_len(sgel
);
2677 wqe
->fcp_tsend
.bde
.addrLow
= cpu_to_le32(putPaddrLow(physaddr
));
2678 wqe
->fcp_tsend
.bde
.addrHigh
=
2679 cpu_to_le32(putPaddrHigh(physaddr
));
2682 wqe
->fcp_tsend
.payload_offset_len
= 0;
2685 wqe
->fcp_tsend
.relative_offset
= ctxp
->offset
;
2688 wqe
->fcp_tsend
.reserved
= 0;
2691 bf_set(wqe_ctxt_tag
, &wqe
->fcp_tsend
.wqe_com
,
2692 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2693 bf_set(wqe_xri_tag
, &wqe
->fcp_tsend
.wqe_com
,
2694 nvmewqe
->sli4_xritag
);
2696 /* Word 7 - set ar later */
2699 wqe
->fcp_tsend
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2702 bf_set(wqe_reqtag
, &wqe
->fcp_tsend
.wqe_com
, nvmewqe
->iotag
);
2703 bf_set(wqe_rcvoxid
, &wqe
->fcp_tsend
.wqe_com
, ctxp
->oxid
);
2705 /* Word 10 - set wqes later, in template xc=1 */
2707 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 0);
2709 /* Word 11 - set sup, irsp, irsplen later */
2713 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
2715 /* Setup 2 SKIP SGEs */
2719 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2720 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2726 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2727 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2730 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
2731 atomic_inc(&tgtp
->xmt_fcp_read_rsp
);
2733 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2735 if (rsp
->rsplen
== LPFC_NVMET_SUCCESS_LEN
) {
2736 if (ndlp
->nlp_flag
& NLP_SUPPRESS_RSP
)
2738 &wqe
->fcp_tsend
.wqe_com
, 1);
2740 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 1);
2741 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 1);
2742 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
,
2743 ((rsp
->rsplen
>> 2) - 1));
2744 memcpy(&wqe
->words
[16], rsp
->rspaddr
,
2748 atomic_inc(&tgtp
->xmt_fcp_read
);
2750 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2751 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 0);
2755 case NVMET_FCOP_WRITEDATA
:
2756 /* From the treceive template, initialize words 3 - 11 */
2757 memcpy(&wqe
->words
[3],
2758 &lpfc_treceive_cmd_template
.words
[3],
2759 sizeof(uint32_t) * 9);
2761 /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2762 wqe
->fcp_treceive
.bde
.tus
.f
.bdeFlags
= LPFC_SGE_TYPE_SKIP
;
2763 wqe
->fcp_treceive
.bde
.tus
.f
.bdeSize
= 0;
2764 wqe
->fcp_treceive
.bde
.addrLow
= 0;
2765 wqe
->fcp_treceive
.bde
.addrHigh
= 0;
2768 wqe
->fcp_treceive
.relative_offset
= ctxp
->offset
;
2771 bf_set(wqe_ctxt_tag
, &wqe
->fcp_treceive
.wqe_com
,
2772 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2773 bf_set(wqe_xri_tag
, &wqe
->fcp_treceive
.wqe_com
,
2774 nvmewqe
->sli4_xritag
);
2779 wqe
->fcp_treceive
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2782 bf_set(wqe_reqtag
, &wqe
->fcp_treceive
.wqe_com
, nvmewqe
->iotag
);
2783 bf_set(wqe_rcvoxid
, &wqe
->fcp_treceive
.wqe_com
, ctxp
->oxid
);
2785 /* Word 10 - in template xc=1 */
2787 bf_set(wqe_xc
, &wqe
->fcp_treceive
.wqe_com
, 0);
2789 /* Word 11 - set pbde later */
2790 if (phba
->cfg_enable_pbde
) {
2793 bf_set(wqe_pbde
, &wqe
->fcp_treceive
.wqe_com
, 0);
2798 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
2800 /* Setup 2 SKIP SGEs */
2804 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2805 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2811 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2812 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2815 atomic_inc(&tgtp
->xmt_fcp_write
);
2818 case NVMET_FCOP_RSP
:
2819 /* From the treceive template, initialize words 4 - 11 */
2820 memcpy(&wqe
->words
[4],
2821 &lpfc_trsp_cmd_template
.words
[4],
2822 sizeof(uint32_t) * 8);
2825 physaddr
= rsp
->rspdma
;
2826 wqe
->fcp_trsp
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2827 wqe
->fcp_trsp
.bde
.tus
.f
.bdeSize
= rsp
->rsplen
;
2828 wqe
->fcp_trsp
.bde
.addrLow
=
2829 cpu_to_le32(putPaddrLow(physaddr
));
2830 wqe
->fcp_trsp
.bde
.addrHigh
=
2831 cpu_to_le32(putPaddrHigh(physaddr
));
2834 wqe
->fcp_trsp
.response_len
= rsp
->rsplen
;
2837 bf_set(wqe_ctxt_tag
, &wqe
->fcp_trsp
.wqe_com
,
2838 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2839 bf_set(wqe_xri_tag
, &wqe
->fcp_trsp
.wqe_com
,
2840 nvmewqe
->sli4_xritag
);
2845 wqe
->fcp_trsp
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2848 bf_set(wqe_reqtag
, &wqe
->fcp_trsp
.wqe_com
, nvmewqe
->iotag
);
2849 bf_set(wqe_rcvoxid
, &wqe
->fcp_trsp
.wqe_com
, ctxp
->oxid
);
2853 bf_set(wqe_xc
, &wqe
->fcp_trsp
.wqe_com
, 1);
2856 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2857 if (rsp
->rsplen
!= LPFC_NVMET_SUCCESS_LEN
) {
2858 /* Bad response - embed it */
2859 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 1);
2860 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 1);
2861 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
,
2862 ((rsp
->rsplen
>> 2) - 1));
2863 memcpy(&wqe
->words
[16], rsp
->rspaddr
, rsp
->rsplen
);
2868 wqe
->fcp_trsp
.rsvd_12_15
[0] = 0;
2870 /* Use rspbuf, NOT sg list */
2873 atomic_inc(&tgtp
->xmt_fcp_rsp
);
2877 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
2878 "6064 Unknown Rsp Op %d\n",
2884 nvmewqe
->vport
= phba
->pport
;
2885 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
2886 nvmewqe
->context1
= ndlp
;
2888 for_each_sg(rsp
->sg
, sgel
, rsp
->sg_cnt
, i
) {
2889 physaddr
= sg_dma_address(sgel
);
2890 cnt
= sg_dma_len(sgel
);
2891 sgl
->addr_hi
= putPaddrHigh(physaddr
);
2892 sgl
->addr_lo
= putPaddrLow(physaddr
);
2894 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
2895 bf_set(lpfc_sli4_sge_offset
, sgl
, ctxp
->offset
);
2896 if ((i
+1) == rsp
->sg_cnt
)
2897 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
2898 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2899 sgl
->sge_len
= cpu_to_le32(cnt
);
2901 bde
= (struct ulp_bde64
*)&wqe
->words
[13];
2903 /* Words 13-15 (PBDE) */
2904 bde
->addrLow
= sgl
->addr_lo
;
2905 bde
->addrHigh
= sgl
->addr_hi
;
2906 bde
->tus
.f
.bdeSize
=
2907 le32_to_cpu(sgl
->sge_len
);
2908 bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2909 bde
->tus
.w
= cpu_to_le32(bde
->tus
.w
);
2911 memset(bde
, 0, sizeof(struct ulp_bde64
));
2915 ctxp
->offset
+= cnt
;
2917 ctxp
->state
= LPFC_NVMET_STE_DATA
;
2923 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2924 * @phba: Pointer to HBA context object.
2925 * @cmdwqe: Pointer to driver command WQE object.
2926 * @wcqe: Pointer to driver response CQE object.
2928 * The function is called from SLI ring event handler with no
2929 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2930 * The function frees memory resources used for the NVME commands.
2933 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
2934 struct lpfc_wcqe_complete
*wcqe
)
2936 struct lpfc_nvmet_rcv_ctx
*ctxp
;
2937 struct lpfc_nvmet_tgtport
*tgtp
;
2939 unsigned long flags
;
2940 bool released
= false;
2942 ctxp
= cmdwqe
->context2
;
2943 result
= wcqe
->parameter
;
2945 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2946 if (ctxp
->flag
& LPFC_NVMET_ABORT_OP
)
2947 atomic_inc(&tgtp
->xmt_fcp_abort_cmpl
);
2949 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
2950 ctxp
->state
= LPFC_NVMET_STE_DONE
;
2952 /* Check if we already received a free context call
2953 * and we have completed processing an abort situation.
2955 if ((ctxp
->flag
& LPFC_NVMET_CTX_RLS
) &&
2956 !(ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
2957 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
2958 list_del_init(&ctxp
->list
);
2959 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
2962 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2963 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
2964 atomic_inc(&tgtp
->xmt_abort_rsp
);
2966 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2967 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
2968 "WCQE: %08x %08x %08x %08x\n",
2969 ctxp
->oxid
, ctxp
->flag
, released
,
2970 wcqe
->word0
, wcqe
->total_data_placed
,
2971 result
, wcqe
->word3
);
2973 cmdwqe
->context2
= NULL
;
2974 cmdwqe
->context3
= NULL
;
2976 * if transport has released ctx, then can reuse it. Otherwise,
2977 * will be recycled by transport release call.
2980 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
2982 /* This is the iocbq for the abort, not the command */
2983 lpfc_sli_release_iocbq(phba
, cmdwqe
);
2985 /* Since iaab/iaar are NOT set, there is no work left.
2986 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2987 * should have been called already.
2992 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2993 * @phba: Pointer to HBA context object.
2994 * @cmdwqe: Pointer to driver command WQE object.
2995 * @wcqe: Pointer to driver response CQE object.
2997 * The function is called from SLI ring event handler with no
2998 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2999 * The function frees memory resources used for the NVME commands.
3002 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
3003 struct lpfc_wcqe_complete
*wcqe
)
3005 struct lpfc_nvmet_rcv_ctx
*ctxp
;
3006 struct lpfc_nvmet_tgtport
*tgtp
;
3007 unsigned long flags
;
3009 bool released
= false;
3011 ctxp
= cmdwqe
->context2
;
3012 result
= wcqe
->parameter
;
3015 /* if context is clear, related io alrady complete */
3016 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3017 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3018 wcqe
->word0
, wcqe
->total_data_placed
,
3019 result
, wcqe
->word3
);
3023 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3024 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3025 if (ctxp
->flag
& LPFC_NVMET_ABORT_OP
)
3026 atomic_inc(&tgtp
->xmt_fcp_abort_cmpl
);
3029 if (ctxp
->state
!= LPFC_NVMET_STE_ABORT
) {
3030 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3031 "6112 ABTS Wrong state:%d oxid x%x\n",
3032 ctxp
->state
, ctxp
->oxid
);
3035 /* Check if we already received a free context call
3036 * and we have completed processing an abort situation.
3038 ctxp
->state
= LPFC_NVMET_STE_DONE
;
3039 if ((ctxp
->flag
& LPFC_NVMET_CTX_RLS
) &&
3040 !(ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
3041 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3042 list_del_init(&ctxp
->list
);
3043 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3046 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3047 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3048 atomic_inc(&tgtp
->xmt_abort_rsp
);
3050 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3051 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3052 "WCQE: %08x %08x %08x %08x\n",
3053 ctxp
->oxid
, ctxp
->flag
, released
,
3054 wcqe
->word0
, wcqe
->total_data_placed
,
3055 result
, wcqe
->word3
);
3057 cmdwqe
->context2
= NULL
;
3058 cmdwqe
->context3
= NULL
;
3060 * if transport has released ctx, then can reuse it. Otherwise,
3061 * will be recycled by transport release call.
3064 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
3066 /* Since iaab/iaar are NOT set, there is no work left.
3067 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3068 * should have been called already.
3073 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3074 * @phba: Pointer to HBA context object.
3075 * @cmdwqe: Pointer to driver command WQE object.
3076 * @wcqe: Pointer to driver response CQE object.
3078 * The function is called from SLI ring event handler with no
3079 * lock held. This function is the completion handler for NVME ABTS for LS cmds
3080 * The function frees memory resources used for the NVME commands.
3083 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
3084 struct lpfc_wcqe_complete
*wcqe
)
3086 struct lpfc_nvmet_rcv_ctx
*ctxp
;
3087 struct lpfc_nvmet_tgtport
*tgtp
;
3090 ctxp
= cmdwqe
->context2
;
3091 result
= wcqe
->parameter
;
3093 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3094 atomic_inc(&tgtp
->xmt_ls_abort_cmpl
);
3096 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3097 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3098 ctxp
, wcqe
->word0
, wcqe
->total_data_placed
,
3099 result
, wcqe
->word3
);
3102 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3103 "6415 NVMET LS Abort No ctx: WCQE: "
3104 "%08x %08x %08x %08x\n",
3105 wcqe
->word0
, wcqe
->total_data_placed
,
3106 result
, wcqe
->word3
);
3108 lpfc_sli_release_iocbq(phba
, cmdwqe
);
3112 if (ctxp
->state
!= LPFC_NVMET_STE_LS_ABORT
) {
3113 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
3114 "6416 NVMET LS abort cmpl state mismatch: "
3115 "oxid x%x: %d %d\n",
3116 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
3119 cmdwqe
->context2
= NULL
;
3120 cmdwqe
->context3
= NULL
;
3121 lpfc_sli_release_iocbq(phba
, cmdwqe
);
3126 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba
*phba
,
3127 struct lpfc_nvmet_rcv_ctx
*ctxp
,
3128 uint32_t sid
, uint16_t xri
)
3130 struct lpfc_nvmet_tgtport
*tgtp
;
3131 struct lpfc_iocbq
*abts_wqeq
;
3132 union lpfc_wqe128
*wqe_abts
;
3133 struct lpfc_nodelist
*ndlp
;
3135 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3136 "6067 ABTS: sid %x xri x%x/x%x\n",
3137 sid
, xri
, ctxp
->wqeq
->sli4_xritag
);
3139 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3141 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
3142 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
3143 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
3144 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
3145 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3146 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3147 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3148 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
3150 /* No failure to an ABTS request. */
3154 abts_wqeq
= ctxp
->wqeq
;
3155 wqe_abts
= &abts_wqeq
->wqe
;
3158 * Since we zero the whole WQE, we need to ensure we set the WQE fields
3159 * that were initialized in lpfc_sli4_nvmet_alloc.
3161 memset(wqe_abts
, 0, sizeof(union lpfc_wqe
));
3164 bf_set(wqe_dfctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
3165 bf_set(wqe_ls
, &wqe_abts
->xmit_sequence
.wge_ctl
, 1);
3166 bf_set(wqe_la
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
3167 bf_set(wqe_rctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_RCTL_BA_ABTS
);
3168 bf_set(wqe_type
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_TYPE_BLS
);
3171 bf_set(wqe_ctxt_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
3172 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
3173 bf_set(wqe_xri_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
3174 abts_wqeq
->sli4_xritag
);
3177 bf_set(wqe_cmnd
, &wqe_abts
->xmit_sequence
.wqe_com
,
3178 CMD_XMIT_SEQUENCE64_WQE
);
3179 bf_set(wqe_ct
, &wqe_abts
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
3180 bf_set(wqe_class
, &wqe_abts
->xmit_sequence
.wqe_com
, CLASS3
);
3181 bf_set(wqe_pu
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
3184 wqe_abts
->xmit_sequence
.wqe_com
.abort_tag
= abts_wqeq
->iotag
;
3187 bf_set(wqe_reqtag
, &wqe_abts
->xmit_sequence
.wqe_com
, abts_wqeq
->iotag
);
3188 /* Needs to be set by caller */
3189 bf_set(wqe_rcvoxid
, &wqe_abts
->xmit_sequence
.wqe_com
, xri
);
3192 bf_set(wqe_dbde
, &wqe_abts
->xmit_sequence
.wqe_com
, 1);
3193 bf_set(wqe_iod
, &wqe_abts
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
3194 bf_set(wqe_lenloc
, &wqe_abts
->xmit_sequence
.wqe_com
,
3195 LPFC_WQE_LENLOC_WORD12
);
3196 bf_set(wqe_ebde_cnt
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
3197 bf_set(wqe_qosd
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
3200 bf_set(wqe_cqid
, &wqe_abts
->xmit_sequence
.wqe_com
,
3201 LPFC_WQE_CQ_ID_DEFAULT
);
3202 bf_set(wqe_cmd_type
, &wqe_abts
->xmit_sequence
.wqe_com
,
3205 abts_wqeq
->vport
= phba
->pport
;
3206 abts_wqeq
->context1
= ndlp
;
3207 abts_wqeq
->context2
= ctxp
;
3208 abts_wqeq
->context3
= NULL
;
3209 abts_wqeq
->rsvd2
= 0;
3210 /* hba_wqidx should already be setup from command we are aborting */
3211 abts_wqeq
->iocb
.ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
3212 abts_wqeq
->iocb
.ulpLe
= 1;
3214 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3215 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3216 xri
, abts_wqeq
->iotag
);
3221 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*phba
,
3222 struct lpfc_nvmet_rcv_ctx
*ctxp
,
3223 uint32_t sid
, uint16_t xri
)
3225 struct lpfc_nvmet_tgtport
*tgtp
;
3226 struct lpfc_iocbq
*abts_wqeq
;
3227 struct lpfc_nodelist
*ndlp
;
3228 unsigned long flags
;
3232 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3234 ctxp
->wqeq
= ctxp
->ctxbuf
->iocbq
;
3235 ctxp
->wqeq
->hba_wqidx
= 0;
3238 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
3239 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
3240 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
3241 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
3242 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3243 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3244 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3245 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
3247 /* No failure to an ABTS request. */
3248 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3249 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3250 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3254 /* Issue ABTS for this WQE based on iotag */
3255 ctxp
->abort_wqeq
= lpfc_sli_get_iocbq(phba
);
3256 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3257 if (!ctxp
->abort_wqeq
) {
3258 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3259 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3260 "6161 ABORT failed: No wqeqs: "
3261 "xri: x%x\n", ctxp
->oxid
);
3262 /* No failure to an ABTS request. */
3263 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3264 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3267 abts_wqeq
= ctxp
->abort_wqeq
;
3268 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
3269 opt
= (ctxp
->flag
& LPFC_NVMET_ABTS_RCV
) ? INHIBIT_ABORT
: 0;
3270 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3272 /* Announce entry to new IO submit field. */
3273 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3274 "6162 ABORT Request to rport DID x%06x "
3275 "for xri x%x x%x\n",
3276 ctxp
->sid
, ctxp
->oxid
, ctxp
->wqeq
->sli4_xritag
);
3278 /* If the hba is getting reset, this flag is set. It is
3279 * cleared when the reset is complete and rings reestablished.
3281 spin_lock_irqsave(&phba
->hbalock
, flags
);
3282 /* driver queued commands are in process of being flushed */
3283 if (phba
->hba_flag
& HBA_IOQ_FLUSH
) {
3284 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3285 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3286 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
3287 "6163 Driver in reset cleanup - flushing "
3288 "NVME Req now. hba_flag x%x oxid x%x\n",
3289 phba
->hba_flag
, ctxp
->oxid
);
3290 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3291 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3292 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3293 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3297 /* Outstanding abort is in progress */
3298 if (abts_wqeq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
3299 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3300 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3301 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
3302 "6164 Outstanding NVME I/O Abort Request "
3303 "still pending on oxid x%x\n",
3305 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3306 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3307 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3308 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3312 /* Ready - mark outstanding as aborted by driver. */
3313 abts_wqeq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
3315 lpfc_nvme_prep_abort_wqe(abts_wqeq
, ctxp
->wqeq
->sli4_xritag
, opt
);
3317 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3318 abts_wqeq
->hba_wqidx
= ctxp
->wqeq
->hba_wqidx
;
3319 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_sol_fcp_abort_cmp
;
3320 abts_wqeq
->iocb_cmpl
= NULL
;
3321 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME
;
3322 abts_wqeq
->context2
= ctxp
;
3323 abts_wqeq
->vport
= phba
->pport
;
3325 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[abts_wqeq
->hba_wqidx
];
3327 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3328 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3329 if (rc
== WQE_SUCCESS
) {
3330 atomic_inc(&tgtp
->xmt_abort_sol
);
3334 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3335 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3336 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3337 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3338 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3339 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3340 "6166 Failed ABORT issue_wqe with status x%x "
3347 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*phba
,
3348 struct lpfc_nvmet_rcv_ctx
*ctxp
,
3349 uint32_t sid
, uint16_t xri
)
3351 struct lpfc_nvmet_tgtport
*tgtp
;
3352 struct lpfc_iocbq
*abts_wqeq
;
3353 unsigned long flags
;
3354 bool released
= false;
3357 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3359 ctxp
->wqeq
= ctxp
->ctxbuf
->iocbq
;
3360 ctxp
->wqeq
->hba_wqidx
= 0;
3363 if (ctxp
->state
== LPFC_NVMET_STE_FREE
) {
3364 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
3365 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3366 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
3370 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
3372 rc
= lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
);
3376 spin_lock_irqsave(&phba
->hbalock
, flags
);
3377 abts_wqeq
= ctxp
->wqeq
;
3378 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_unsol_fcp_abort_cmp
;
3379 abts_wqeq
->iocb_cmpl
= NULL
;
3380 abts_wqeq
->iocb_flag
|= LPFC_IO_NVMET
;
3382 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[abts_wqeq
->hba_wqidx
];
3384 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3385 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3386 if (rc
== WQE_SUCCESS
) {
3391 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3392 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
) {
3393 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3394 list_del_init(&ctxp
->list
);
3395 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3398 ctxp
->flag
&= ~(LPFC_NVMET_ABORT_OP
| LPFC_NVMET_CTX_RLS
);
3399 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3401 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3402 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3403 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3405 ctxp
->oxid
, rc
, released
);
3407 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
3412 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba
*phba
,
3413 struct lpfc_nvmet_rcv_ctx
*ctxp
,
3414 uint32_t sid
, uint16_t xri
)
3416 struct lpfc_nvmet_tgtport
*tgtp
;
3417 struct lpfc_iocbq
*abts_wqeq
;
3418 unsigned long flags
;
3421 if ((ctxp
->state
== LPFC_NVMET_STE_LS_RCV
&& ctxp
->entry_cnt
== 1) ||
3422 (ctxp
->state
== LPFC_NVMET_STE_LS_RSP
&& ctxp
->entry_cnt
== 2)) {
3423 ctxp
->state
= LPFC_NVMET_STE_LS_ABORT
;
3426 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
3427 "6418 NVMET LS abort state mismatch "
3429 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
3430 ctxp
->state
= LPFC_NVMET_STE_LS_ABORT
;
3433 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3435 /* Issue ABTS for this WQE based on iotag */
3436 ctxp
->wqeq
= lpfc_sli_get_iocbq(phba
);
3438 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3439 "6068 Abort failed: No wqeqs: "
3441 /* No failure to an ABTS request. */
3446 abts_wqeq
= ctxp
->wqeq
;
3448 if (lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
) == 0) {
3453 spin_lock_irqsave(&phba
->hbalock
, flags
);
3454 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_abort_cmp
;
3455 abts_wqeq
->iocb_cmpl
= NULL
;
3456 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME_LS
;
3457 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3458 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3459 if (rc
== WQE_SUCCESS
) {
3460 atomic_inc(&tgtp
->xmt_abort_unsol
);
3464 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3465 abts_wqeq
->context2
= NULL
;
3466 abts_wqeq
->context3
= NULL
;
3467 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3469 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3470 "6056 Failed to Issue ABTS. Status x%x\n", rc
);