1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
43 #include "lpfc_version.h"
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
59 static struct lpfc_iocbq
*lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*,
60 struct lpfc_nvmet_rcv_ctx
*,
63 static struct lpfc_iocbq
*lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*,
64 struct lpfc_nvmet_rcv_ctx
*);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*,
66 struct lpfc_nvmet_rcv_ctx
*,
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*,
69 struct lpfc_nvmet_rcv_ctx
*,
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba
*,
72 struct lpfc_nvmet_rcv_ctx
*,
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba
*, struct lpfc_queue
*,
75 struct lpfc_nvmet_rcv_ctx
*);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct
*);
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf
*ctx_buf
);
80 static union lpfc_wqe128 lpfc_tsend_cmd_template
;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template
;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template
;
84 /* Setup WQE templates for NVME IOs */
86 lpfc_nvmet_cmd_template(void)
88 union lpfc_wqe128
*wqe
;
91 wqe
= &lpfc_tsend_cmd_template
;
92 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
94 /* Word 0, 1, 2 - BDE is variable */
96 /* Word 3 - payload_offset_len is zero */
98 /* Word 4 - relative_offset is variable */
100 /* Word 5 - is zero */
102 /* Word 6 - ctxt_tag, xri_tag is variable */
104 /* Word 7 - wqe_ar is variable */
105 bf_set(wqe_cmnd
, &wqe
->fcp_tsend
.wqe_com
, CMD_FCP_TSEND64_WQE
);
106 bf_set(wqe_pu
, &wqe
->fcp_tsend
.wqe_com
, PARM_REL_OFF
);
107 bf_set(wqe_class
, &wqe
->fcp_tsend
.wqe_com
, CLASS3
);
108 bf_set(wqe_ct
, &wqe
->fcp_tsend
.wqe_com
, SLI4_CT_RPI
);
109 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 1);
111 /* Word 8 - abort_tag is variable */
113 /* Word 9 - reqtag, rcvoxid is variable */
115 /* Word 10 - wqes, xc is variable */
116 bf_set(wqe_nvme
, &wqe
->fcp_tsend
.wqe_com
, 1);
117 bf_set(wqe_dbde
, &wqe
->fcp_tsend
.wqe_com
, 1);
118 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 0);
119 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 1);
120 bf_set(wqe_iod
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_IOD_WRITE
);
121 bf_set(wqe_lenloc
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_LENLOC_WORD12
);
123 /* Word 11 - sup, irsp, irsplen is variable */
124 bf_set(wqe_cmd_type
, &wqe
->fcp_tsend
.wqe_com
, FCP_COMMAND_TSEND
);
125 bf_set(wqe_cqid
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
126 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
127 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 0);
128 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
, 0);
129 bf_set(wqe_pbde
, &wqe
->fcp_tsend
.wqe_com
, 0);
131 /* Word 12 - fcp_data_len is variable */
133 /* Word 13, 14, 15 - PBDE is zero */
135 /* TRECEIVE template */
136 wqe
= &lpfc_treceive_cmd_template
;
137 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
139 /* Word 0, 1, 2 - BDE is variable */
142 wqe
->fcp_treceive
.payload_offset_len
= TXRDY_PAYLOAD_LEN
;
144 /* Word 4 - relative_offset is variable */
146 /* Word 5 - is zero */
148 /* Word 6 - ctxt_tag, xri_tag is variable */
151 bf_set(wqe_cmnd
, &wqe
->fcp_treceive
.wqe_com
, CMD_FCP_TRECEIVE64_WQE
);
152 bf_set(wqe_pu
, &wqe
->fcp_treceive
.wqe_com
, PARM_REL_OFF
);
153 bf_set(wqe_class
, &wqe
->fcp_treceive
.wqe_com
, CLASS3
);
154 bf_set(wqe_ct
, &wqe
->fcp_treceive
.wqe_com
, SLI4_CT_RPI
);
155 bf_set(wqe_ar
, &wqe
->fcp_treceive
.wqe_com
, 0);
157 /* Word 8 - abort_tag is variable */
159 /* Word 9 - reqtag, rcvoxid is variable */
161 /* Word 10 - xc is variable */
162 bf_set(wqe_dbde
, &wqe
->fcp_treceive
.wqe_com
, 1);
163 bf_set(wqe_wqes
, &wqe
->fcp_treceive
.wqe_com
, 0);
164 bf_set(wqe_nvme
, &wqe
->fcp_treceive
.wqe_com
, 1);
165 bf_set(wqe_iod
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_IOD_READ
);
166 bf_set(wqe_lenloc
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_LENLOC_WORD12
);
167 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 1);
169 /* Word 11 - pbde is variable */
170 bf_set(wqe_cmd_type
, &wqe
->fcp_treceive
.wqe_com
, FCP_COMMAND_TRECEIVE
);
171 bf_set(wqe_cqid
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
172 bf_set(wqe_sup
, &wqe
->fcp_treceive
.wqe_com
, 0);
173 bf_set(wqe_irsp
, &wqe
->fcp_treceive
.wqe_com
, 0);
174 bf_set(wqe_irsplen
, &wqe
->fcp_treceive
.wqe_com
, 0);
175 bf_set(wqe_pbde
, &wqe
->fcp_treceive
.wqe_com
, 1);
177 /* Word 12 - fcp_data_len is variable */
179 /* Word 13, 14, 15 - PBDE is variable */
182 wqe
= &lpfc_trsp_cmd_template
;
183 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
185 /* Word 0, 1, 2 - BDE is variable */
187 /* Word 3 - response_len is variable */
189 /* Word 4, 5 - is zero */
191 /* Word 6 - ctxt_tag, xri_tag is variable */
194 bf_set(wqe_cmnd
, &wqe
->fcp_trsp
.wqe_com
, CMD_FCP_TRSP64_WQE
);
195 bf_set(wqe_pu
, &wqe
->fcp_trsp
.wqe_com
, PARM_UNUSED
);
196 bf_set(wqe_class
, &wqe
->fcp_trsp
.wqe_com
, CLASS3
);
197 bf_set(wqe_ct
, &wqe
->fcp_trsp
.wqe_com
, SLI4_CT_RPI
);
198 bf_set(wqe_ag
, &wqe
->fcp_trsp
.wqe_com
, 1); /* wqe_ar */
200 /* Word 8 - abort_tag is variable */
202 /* Word 9 - reqtag is variable */
204 /* Word 10 wqes, xc is variable */
205 bf_set(wqe_dbde
, &wqe
->fcp_trsp
.wqe_com
, 1);
206 bf_set(wqe_nvme
, &wqe
->fcp_trsp
.wqe_com
, 1);
207 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 0);
208 bf_set(wqe_xc
, &wqe
->fcp_trsp
.wqe_com
, 0);
209 bf_set(wqe_iod
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_IOD_NONE
);
210 bf_set(wqe_lenloc
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_LENLOC_WORD3
);
212 /* Word 11 irsp, irsplen is variable */
213 bf_set(wqe_cmd_type
, &wqe
->fcp_trsp
.wqe_com
, FCP_COMMAND_TRSP
);
214 bf_set(wqe_cqid
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
215 bf_set(wqe_sup
, &wqe
->fcp_trsp
.wqe_com
, 0);
216 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 0);
217 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
, 0);
218 bf_set(wqe_pbde
, &wqe
->fcp_trsp
.wqe_com
, 0);
220 /* Word 12, 13, 14, 15 - is zero */
223 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
224 static struct lpfc_nvmet_rcv_ctx
*
225 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba
*phba
, u16 xri
)
227 struct lpfc_nvmet_rcv_ctx
*ctxp
;
231 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
232 list_for_each_entry(ctxp
, &phba
->sli4_hba
.t_active_ctx_list
, list
) {
233 if (ctxp
->ctxbuf
->sglq
->sli4_xritag
!= xri
)
239 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
246 static struct lpfc_nvmet_rcv_ctx
*
247 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba
*phba
, u16 oxid
, u32 sid
)
249 struct lpfc_nvmet_rcv_ctx
*ctxp
;
253 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
254 list_for_each_entry(ctxp
, &phba
->sli4_hba
.t_active_ctx_list
, list
) {
255 if (ctxp
->oxid
!= oxid
|| ctxp
->sid
!= sid
)
261 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
270 lpfc_nvmet_defer_release(struct lpfc_hba
*phba
, struct lpfc_nvmet_rcv_ctx
*ctxp
)
272 lockdep_assert_held(&ctxp
->ctxlock
);
274 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
275 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
276 ctxp
->oxid
, ctxp
->flag
);
278 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
)
281 ctxp
->flag
|= LPFC_NVMET_CTX_RLS
;
282 spin_lock(&phba
->sli4_hba
.t_active_list_lock
);
283 list_del(&ctxp
->list
);
284 spin_unlock(&phba
->sli4_hba
.t_active_list_lock
);
285 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
286 list_add_tail(&ctxp
->list
, &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
287 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
291 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
292 * @phba: Pointer to HBA context object.
293 * @cmdwqe: Pointer to driver command WQE object.
294 * @wcqe: Pointer to driver response CQE object.
296 * The function is called from SLI ring event handler with no
297 * lock held. This function is the completion handler for NVME LS commands
298 * The function frees memory resources used for the NVME commands.
301 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
302 struct lpfc_wcqe_complete
*wcqe
)
304 struct lpfc_nvmet_tgtport
*tgtp
;
305 struct nvmefc_tgt_ls_req
*rsp
;
306 struct lpfc_nvmet_rcv_ctx
*ctxp
;
307 uint32_t status
, result
;
309 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
310 result
= wcqe
->parameter
;
311 ctxp
= cmdwqe
->context2
;
313 if (ctxp
->state
!= LPFC_NVMET_STE_LS_RSP
|| ctxp
->entry_cnt
!= 2) {
314 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
315 "6410 NVMET LS cmpl state mismatch IO x%x: "
317 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
320 if (!phba
->targetport
)
323 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
327 atomic_inc(&tgtp
->xmt_ls_rsp_error
);
328 if (result
== IOERR_ABORT_REQUESTED
)
329 atomic_inc(&tgtp
->xmt_ls_rsp_aborted
);
330 if (bf_get(lpfc_wcqe_c_xb
, wcqe
))
331 atomic_inc(&tgtp
->xmt_ls_rsp_xb_set
);
333 atomic_inc(&tgtp
->xmt_ls_rsp_cmpl
);
338 rsp
= &ctxp
->ctx
.ls_req
;
340 lpfc_nvmeio_data(phba
, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
341 ctxp
->oxid
, status
, result
);
343 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
344 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
345 status
, result
, ctxp
->oxid
);
347 lpfc_nlp_put(cmdwqe
->context1
);
348 cmdwqe
->context2
= NULL
;
349 cmdwqe
->context3
= NULL
;
350 lpfc_sli_release_iocbq(phba
, cmdwqe
);
356 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
357 * @phba: HBA buffer is associated with
358 * @ctxp: context to clean up
359 * @mp: Buffer to free
361 * Description: Frees the given DMA buffer in the appropriate way given by
362 * reposting it to its associated RQ so it can be reused.
364 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
369 lpfc_nvmet_ctxbuf_post(struct lpfc_hba
*phba
, struct lpfc_nvmet_ctxbuf
*ctx_buf
)
371 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
372 struct lpfc_nvmet_rcv_ctx
*ctxp
= ctx_buf
->context
;
373 struct lpfc_nvmet_tgtport
*tgtp
;
374 struct fc_frame_header
*fc_hdr
;
375 struct rqb_dmabuf
*nvmebuf
;
376 struct lpfc_nvmet_ctx_info
*infop
;
377 uint32_t size
, oxid
, sid
;
381 if (ctxp
->state
== LPFC_NVMET_STE_FREE
) {
382 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
383 "6411 NVMET free, already free IO x%x: %d %d\n",
384 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
387 if (ctxp
->rqb_buffer
) {
388 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
389 nvmebuf
= ctxp
->rqb_buffer
;
390 /* check if freed in another path whilst acquiring lock */
392 ctxp
->rqb_buffer
= NULL
;
393 if (ctxp
->flag
& LPFC_NVMET_CTX_REUSE_WQ
) {
394 ctxp
->flag
&= ~LPFC_NVMET_CTX_REUSE_WQ
;
395 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
396 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
,
399 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
401 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
404 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
407 ctxp
->state
= LPFC_NVMET_STE_FREE
;
409 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
410 if (phba
->sli4_hba
.nvmet_io_wait_cnt
) {
411 list_remove_head(&phba
->sli4_hba
.lpfc_nvmet_io_wait_list
,
412 nvmebuf
, struct rqb_dmabuf
,
414 phba
->sli4_hba
.nvmet_io_wait_cnt
--;
415 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
418 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
419 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
420 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
421 size
= nvmebuf
->bytes_recv
;
422 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
424 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)ctx_buf
->context
;
431 ctxp
->state
= LPFC_NVMET_STE_RCV
;
434 ctxp
->ctxbuf
= ctx_buf
;
435 ctxp
->rqb_buffer
= (void *)nvmebuf
;
436 spin_lock_init(&ctxp
->ctxlock
);
438 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
439 /* NOTE: isr time stamp is stale when context is re-assigned*/
440 if (ctxp
->ts_isr_cmd
) {
441 ctxp
->ts_cmd_nvme
= 0;
442 ctxp
->ts_nvme_data
= 0;
443 ctxp
->ts_data_wqput
= 0;
444 ctxp
->ts_isr_data
= 0;
445 ctxp
->ts_data_nvme
= 0;
446 ctxp
->ts_nvme_status
= 0;
447 ctxp
->ts_status_wqput
= 0;
448 ctxp
->ts_isr_status
= 0;
449 ctxp
->ts_status_nvme
= 0;
452 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
454 /* Indicate that a replacement buffer has been posted */
455 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
456 ctxp
->flag
|= LPFC_NVMET_CTX_REUSE_WQ
;
457 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
459 if (!queue_work(phba
->wq
, &ctx_buf
->defer_work
)) {
460 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
461 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
462 "6181 Unable to queue deferred work "
464 "FCP Drop IO [x%x x%x x%x]\n",
466 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
467 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
468 atomic_read(&tgtp
->xmt_fcp_release
));
470 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
471 lpfc_nvmet_defer_release(phba
, ctxp
);
472 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
473 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, sid
, oxid
);
477 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
480 * Use the CPU context list, from the MRQ the IO was received on
481 * (ctxp->idx), to save context structure.
483 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
484 list_del_init(&ctxp
->list
);
485 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
486 cpu
= raw_smp_processor_id();
487 infop
= lpfc_get_ctx_list(phba
, cpu
, ctxp
->idx
);
488 spin_lock_irqsave(&infop
->nvmet_ctx_list_lock
, iflag
);
489 list_add_tail(&ctx_buf
->list
, &infop
->nvmet_ctx_list
);
490 infop
->nvmet_ctx_list_cnt
++;
491 spin_unlock_irqrestore(&infop
->nvmet_ctx_list_lock
, iflag
);
495 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
497 lpfc_nvmet_ktime(struct lpfc_hba
*phba
,
498 struct lpfc_nvmet_rcv_ctx
*ctxp
)
500 uint64_t seg1
, seg2
, seg3
, seg4
, seg5
;
501 uint64_t seg6
, seg7
, seg8
, seg9
, seg10
;
504 if (!ctxp
->ts_isr_cmd
|| !ctxp
->ts_cmd_nvme
||
505 !ctxp
->ts_nvme_data
|| !ctxp
->ts_data_wqput
||
506 !ctxp
->ts_isr_data
|| !ctxp
->ts_data_nvme
||
507 !ctxp
->ts_nvme_status
|| !ctxp
->ts_status_wqput
||
508 !ctxp
->ts_isr_status
|| !ctxp
->ts_status_nvme
)
511 if (ctxp
->ts_status_nvme
< ctxp
->ts_isr_cmd
)
513 if (ctxp
->ts_isr_cmd
> ctxp
->ts_cmd_nvme
)
515 if (ctxp
->ts_cmd_nvme
> ctxp
->ts_nvme_data
)
517 if (ctxp
->ts_nvme_data
> ctxp
->ts_data_wqput
)
519 if (ctxp
->ts_data_wqput
> ctxp
->ts_isr_data
)
521 if (ctxp
->ts_isr_data
> ctxp
->ts_data_nvme
)
523 if (ctxp
->ts_data_nvme
> ctxp
->ts_nvme_status
)
525 if (ctxp
->ts_nvme_status
> ctxp
->ts_status_wqput
)
527 if (ctxp
->ts_status_wqput
> ctxp
->ts_isr_status
)
529 if (ctxp
->ts_isr_status
> ctxp
->ts_status_nvme
)
532 * Segment 1 - Time from FCP command received by MSI-X ISR
533 * to FCP command is passed to NVME Layer.
534 * Segment 2 - Time from FCP command payload handed
535 * off to NVME Layer to Driver receives a Command op
537 * Segment 3 - Time from Driver receives a Command op
538 * from NVME Layer to Command is put on WQ.
539 * Segment 4 - Time from Driver WQ put is done
540 * to MSI-X ISR for Command cmpl.
541 * Segment 5 - Time from MSI-X ISR for Command cmpl to
542 * Command cmpl is passed to NVME Layer.
543 * Segment 6 - Time from Command cmpl is passed to NVME
544 * Layer to Driver receives a RSP op from NVME Layer.
545 * Segment 7 - Time from Driver receives a RSP op from
546 * NVME Layer to WQ put is done on TRSP FCP Status.
547 * Segment 8 - Time from Driver WQ put is done on TRSP
548 * FCP Status to MSI-X ISR for TRSP cmpl.
549 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
550 * TRSP cmpl is passed to NVME Layer.
551 * Segment 10 - Time from FCP command received by
552 * MSI-X ISR to command is completed on wire.
553 * (Segments 1 thru 8) for READDATA / WRITEDATA
554 * (Segments 1 thru 4) for READDATA_RSP
556 seg1
= ctxp
->ts_cmd_nvme
- ctxp
->ts_isr_cmd
;
559 seg2
= ctxp
->ts_nvme_data
- ctxp
->ts_isr_cmd
;
565 seg3
= ctxp
->ts_data_wqput
- ctxp
->ts_isr_cmd
;
571 seg4
= ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
;
577 seg5
= ctxp
->ts_data_nvme
- ctxp
->ts_isr_cmd
;
584 /* For auto rsp commands seg6 thru seg10 will be 0 */
585 if (ctxp
->ts_nvme_status
> ctxp
->ts_data_nvme
) {
586 seg6
= ctxp
->ts_nvme_status
- ctxp
->ts_isr_cmd
;
592 seg7
= ctxp
->ts_status_wqput
- ctxp
->ts_isr_cmd
;
598 seg8
= ctxp
->ts_isr_status
- ctxp
->ts_isr_cmd
;
604 seg9
= ctxp
->ts_status_nvme
- ctxp
->ts_isr_cmd
;
610 if (ctxp
->ts_isr_status
< ctxp
->ts_isr_cmd
)
612 seg10
= (ctxp
->ts_isr_status
-
615 if (ctxp
->ts_isr_data
< ctxp
->ts_isr_cmd
)
621 seg10
= (ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
);
624 phba
->ktime_seg1_total
+= seg1
;
625 if (seg1
< phba
->ktime_seg1_min
)
626 phba
->ktime_seg1_min
= seg1
;
627 else if (seg1
> phba
->ktime_seg1_max
)
628 phba
->ktime_seg1_max
= seg1
;
630 phba
->ktime_seg2_total
+= seg2
;
631 if (seg2
< phba
->ktime_seg2_min
)
632 phba
->ktime_seg2_min
= seg2
;
633 else if (seg2
> phba
->ktime_seg2_max
)
634 phba
->ktime_seg2_max
= seg2
;
636 phba
->ktime_seg3_total
+= seg3
;
637 if (seg3
< phba
->ktime_seg3_min
)
638 phba
->ktime_seg3_min
= seg3
;
639 else if (seg3
> phba
->ktime_seg3_max
)
640 phba
->ktime_seg3_max
= seg3
;
642 phba
->ktime_seg4_total
+= seg4
;
643 if (seg4
< phba
->ktime_seg4_min
)
644 phba
->ktime_seg4_min
= seg4
;
645 else if (seg4
> phba
->ktime_seg4_max
)
646 phba
->ktime_seg4_max
= seg4
;
648 phba
->ktime_seg5_total
+= seg5
;
649 if (seg5
< phba
->ktime_seg5_min
)
650 phba
->ktime_seg5_min
= seg5
;
651 else if (seg5
> phba
->ktime_seg5_max
)
652 phba
->ktime_seg5_max
= seg5
;
654 phba
->ktime_data_samples
++;
658 phba
->ktime_seg6_total
+= seg6
;
659 if (seg6
< phba
->ktime_seg6_min
)
660 phba
->ktime_seg6_min
= seg6
;
661 else if (seg6
> phba
->ktime_seg6_max
)
662 phba
->ktime_seg6_max
= seg6
;
664 phba
->ktime_seg7_total
+= seg7
;
665 if (seg7
< phba
->ktime_seg7_min
)
666 phba
->ktime_seg7_min
= seg7
;
667 else if (seg7
> phba
->ktime_seg7_max
)
668 phba
->ktime_seg7_max
= seg7
;
670 phba
->ktime_seg8_total
+= seg8
;
671 if (seg8
< phba
->ktime_seg8_min
)
672 phba
->ktime_seg8_min
= seg8
;
673 else if (seg8
> phba
->ktime_seg8_max
)
674 phba
->ktime_seg8_max
= seg8
;
676 phba
->ktime_seg9_total
+= seg9
;
677 if (seg9
< phba
->ktime_seg9_min
)
678 phba
->ktime_seg9_min
= seg9
;
679 else if (seg9
> phba
->ktime_seg9_max
)
680 phba
->ktime_seg9_max
= seg9
;
682 phba
->ktime_seg10_total
+= seg10
;
683 if (seg10
< phba
->ktime_seg10_min
)
684 phba
->ktime_seg10_min
= seg10
;
685 else if (seg10
> phba
->ktime_seg10_max
)
686 phba
->ktime_seg10_max
= seg10
;
687 phba
->ktime_status_samples
++;
692 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
693 * @phba: Pointer to HBA context object.
694 * @cmdwqe: Pointer to driver command WQE object.
695 * @wcqe: Pointer to driver response CQE object.
697 * The function is called from SLI ring event handler with no
698 * lock held. This function is the completion handler for NVME FCP commands
699 * The function frees memory resources used for the NVME commands.
702 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
703 struct lpfc_wcqe_complete
*wcqe
)
705 struct lpfc_nvmet_tgtport
*tgtp
;
706 struct nvmefc_tgt_fcp_req
*rsp
;
707 struct lpfc_nvmet_rcv_ctx
*ctxp
;
708 uint32_t status
, result
, op
, start_clean
, logerr
;
709 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
713 ctxp
= cmdwqe
->context2
;
714 ctxp
->flag
&= ~LPFC_NVMET_IO_INP
;
716 rsp
= &ctxp
->ctx
.fcp_req
;
719 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
720 result
= wcqe
->parameter
;
722 if (phba
->targetport
)
723 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
727 lpfc_nvmeio_data(phba
, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
728 ctxp
->oxid
, op
, status
);
731 rsp
->fcp_error
= NVME_SC_DATA_XFER_ERROR
;
732 rsp
->transferred_length
= 0;
734 atomic_inc(&tgtp
->xmt_fcp_rsp_error
);
735 if (result
== IOERR_ABORT_REQUESTED
)
736 atomic_inc(&tgtp
->xmt_fcp_rsp_aborted
);
739 logerr
= LOG_NVME_IOERR
;
741 /* pick up SLI4 exhange busy condition */
742 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
743 ctxp
->flag
|= LPFC_NVMET_XBUSY
;
744 logerr
|= LOG_NVME_ABTS
;
746 atomic_inc(&tgtp
->xmt_fcp_rsp_xb_set
);
749 ctxp
->flag
&= ~LPFC_NVMET_XBUSY
;
752 lpfc_printf_log(phba
, KERN_INFO
, logerr
,
753 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
755 ctxp
->oxid
, ctxp
->ctxbuf
->sglq
->sli4_xritag
,
756 status
, result
, ctxp
->flag
);
759 rsp
->fcp_error
= NVME_SC_SUCCESS
;
760 if (op
== NVMET_FCOP_RSP
)
761 rsp
->transferred_length
= rsp
->rsplen
;
763 rsp
->transferred_length
= rsp
->transfer_length
;
765 atomic_inc(&tgtp
->xmt_fcp_rsp_cmpl
);
768 if ((op
== NVMET_FCOP_READDATA_RSP
) ||
769 (op
== NVMET_FCOP_RSP
)) {
771 ctxp
->state
= LPFC_NVMET_STE_DONE
;
774 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
775 if (ctxp
->ts_cmd_nvme
) {
776 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
778 cmdwqe
->isr_timestamp
;
781 ctxp
->ts_nvme_status
=
783 ctxp
->ts_status_wqput
=
785 ctxp
->ts_isr_status
=
787 ctxp
->ts_status_nvme
=
790 ctxp
->ts_isr_status
=
791 cmdwqe
->isr_timestamp
;
792 ctxp
->ts_status_nvme
=
798 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
799 if (ctxp
->ts_cmd_nvme
)
800 lpfc_nvmet_ktime(phba
, ctxp
);
802 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
805 start_clean
= offsetof(struct lpfc_iocbq
, iocb_flag
);
806 memset(((char *)cmdwqe
) + start_clean
, 0,
807 (sizeof(struct lpfc_iocbq
) - start_clean
));
808 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
809 if (ctxp
->ts_cmd_nvme
) {
810 ctxp
->ts_isr_data
= cmdwqe
->isr_timestamp
;
811 ctxp
->ts_data_nvme
= ktime_get_ns();
816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
817 if (phba
->hdwqstat_on
& LPFC_CHECK_NVMET_IO
) {
818 id
= raw_smp_processor_id();
819 this_cpu_inc(phba
->sli4_hba
.c_stat
->cmpl_io
);
821 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
822 "6704 CPU Check cmdcmpl: "
823 "cpu %d expect %d\n",
830 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port
*tgtport
,
831 struct nvmefc_tgt_ls_req
*rsp
)
833 struct lpfc_nvmet_rcv_ctx
*ctxp
=
834 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.ls_req
);
835 struct lpfc_hba
*phba
= ctxp
->phba
;
836 struct hbq_dmabuf
*nvmebuf
=
837 (struct hbq_dmabuf
*)ctxp
->rqb_buffer
;
838 struct lpfc_iocbq
*nvmewqeq
;
839 struct lpfc_nvmet_tgtport
*nvmep
= tgtport
->private;
840 struct lpfc_dmabuf dmabuf
;
841 struct ulp_bde64 bpl
;
844 if (phba
->pport
->load_flag
& FC_UNLOADING
)
847 if (phba
->pport
->load_flag
& FC_UNLOADING
)
850 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
851 "6023 NVMET LS rsp oxid x%x\n", ctxp
->oxid
);
853 if ((ctxp
->state
!= LPFC_NVMET_STE_LS_RCV
) ||
854 (ctxp
->entry_cnt
!= 1)) {
855 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
856 "6412 NVMET LS rsp state mismatch "
858 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
860 ctxp
->state
= LPFC_NVMET_STE_LS_RSP
;
863 nvmewqeq
= lpfc_nvmet_prep_ls_wqe(phba
, ctxp
, rsp
->rspdma
,
865 if (nvmewqeq
== NULL
) {
866 atomic_inc(&nvmep
->xmt_ls_drop
);
867 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
868 "6150 LS Drop IO x%x: Prep\n",
870 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
871 atomic_inc(&nvmep
->xmt_ls_abort
);
872 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
,
873 ctxp
->sid
, ctxp
->oxid
);
877 /* Save numBdes for bpl2sgl */
879 nvmewqeq
->hba_wqidx
= 0;
880 nvmewqeq
->context3
= &dmabuf
;
882 bpl
.addrLow
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrLow
;
883 bpl
.addrHigh
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrHigh
;
884 bpl
.tus
.f
.bdeSize
= rsp
->rsplen
;
885 bpl
.tus
.f
.bdeFlags
= 0;
886 bpl
.tus
.w
= le32_to_cpu(bpl
.tus
.w
);
888 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_rsp_cmp
;
889 nvmewqeq
->iocb_cmpl
= NULL
;
890 nvmewqeq
->context2
= ctxp
;
892 lpfc_nvmeio_data(phba
, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
893 ctxp
->oxid
, nvmewqeq
->hba_wqidx
, rsp
->rsplen
);
895 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
896 if (rc
== WQE_SUCCESS
) {
898 * Okay to repost buffer here, but wait till cmpl
899 * before freeing ctxp and iocbq.
901 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
902 atomic_inc(&nvmep
->xmt_ls_rsp
);
905 /* Give back resources */
906 atomic_inc(&nvmep
->xmt_ls_drop
);
907 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
908 "6151 LS Drop IO x%x: Issue %d\n",
911 lpfc_nlp_put(nvmewqeq
->context1
);
913 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
914 atomic_inc(&nvmep
->xmt_ls_abort
);
915 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
, ctxp
->sid
, ctxp
->oxid
);
920 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port
*tgtport
,
921 struct nvmefc_tgt_fcp_req
*rsp
)
923 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
924 struct lpfc_nvmet_rcv_ctx
*ctxp
=
925 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
926 struct lpfc_hba
*phba
= ctxp
->phba
;
927 struct lpfc_queue
*wq
;
928 struct lpfc_iocbq
*nvmewqeq
;
929 struct lpfc_sli_ring
*pring
;
930 unsigned long iflags
;
932 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
936 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
941 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
946 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
947 if (ctxp
->ts_cmd_nvme
) {
948 if (rsp
->op
== NVMET_FCOP_RSP
)
949 ctxp
->ts_nvme_status
= ktime_get_ns();
951 ctxp
->ts_nvme_data
= ktime_get_ns();
954 /* Setup the hdw queue if not already set */
956 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[rsp
->hwqid
];
958 if (phba
->hdwqstat_on
& LPFC_CHECK_NVMET_IO
) {
959 id
= raw_smp_processor_id();
960 this_cpu_inc(phba
->sli4_hba
.c_stat
->xmt_io
);
961 if (rsp
->hwqid
!= id
)
962 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
963 "6705 CPU Check OP: "
964 "cpu %d expect %d\n",
966 ctxp
->cpu
= id
; /* Setup cpu for cmpl check */
971 if ((ctxp
->flag
& LPFC_NVMET_ABTS_RCV
) ||
972 (ctxp
->state
== LPFC_NVMET_STE_ABORT
)) {
973 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
974 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
975 "6102 IO oxid x%x aborted\n",
981 nvmewqeq
= lpfc_nvmet_prep_fcp_wqe(phba
, ctxp
);
982 if (nvmewqeq
== NULL
) {
983 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
984 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
985 "6152 FCP Drop IO x%x: Prep\n",
991 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_fcp_op_cmp
;
992 nvmewqeq
->iocb_cmpl
= NULL
;
993 nvmewqeq
->context2
= ctxp
;
994 nvmewqeq
->iocb_flag
|= LPFC_IO_NVMET
;
995 ctxp
->wqeq
->hba_wqidx
= rsp
->hwqid
;
997 lpfc_nvmeio_data(phba
, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
998 ctxp
->oxid
, rsp
->op
, rsp
->rsplen
);
1000 ctxp
->flag
|= LPFC_NVMET_IO_INP
;
1001 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
1002 if (rc
== WQE_SUCCESS
) {
1003 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1004 if (!ctxp
->ts_cmd_nvme
)
1006 if (rsp
->op
== NVMET_FCOP_RSP
)
1007 ctxp
->ts_status_wqput
= ktime_get_ns();
1009 ctxp
->ts_data_wqput
= ktime_get_ns();
1016 * WQ was full, so queue nvmewqeq to be sent after
1019 ctxp
->flag
|= LPFC_NVMET_DEFER_WQFULL
;
1020 wq
= ctxp
->hdwq
->io_wq
;
1022 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1023 list_add_tail(&nvmewqeq
->list
, &wq
->wqfull_list
);
1024 wq
->q_flag
|= HBA_NVMET_WQFULL
;
1025 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1026 atomic_inc(&lpfc_nvmep
->defer_wqfull
);
1030 /* Give back resources */
1031 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
1032 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1033 "6153 FCP Drop IO x%x: Issue: %d\n",
1036 ctxp
->wqeq
->hba_wqidx
= 0;
1037 nvmewqeq
->context2
= NULL
;
1038 nvmewqeq
->context3
= NULL
;
1045 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port
*targetport
)
1047 struct lpfc_nvmet_tgtport
*tport
= targetport
->private;
1049 /* release any threads waiting for the unreg to complete */
1050 if (tport
->phba
->targetport
)
1051 complete(tport
->tport_unreg_cmp
);
1055 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
1056 struct nvmefc_tgt_fcp_req
*req
)
1058 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
1059 struct lpfc_nvmet_rcv_ctx
*ctxp
=
1060 container_of(req
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
1061 struct lpfc_hba
*phba
= ctxp
->phba
;
1062 struct lpfc_queue
*wq
;
1063 unsigned long flags
;
1065 if (phba
->pport
->load_flag
& FC_UNLOADING
)
1068 if (phba
->pport
->load_flag
& FC_UNLOADING
)
1072 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[0];
1074 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1075 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1076 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1078 lpfc_nvmeio_data(phba
, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1079 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1081 atomic_inc(&lpfc_nvmep
->xmt_fcp_abort
);
1083 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
1085 /* Since iaab/iaar are NOT set, we need to check
1086 * if the firmware is in process of aborting IO
1088 if (ctxp
->flag
& (LPFC_NVMET_XBUSY
| LPFC_NVMET_ABORT_OP
)) {
1089 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1092 ctxp
->flag
|= LPFC_NVMET_ABORT_OP
;
1094 if (ctxp
->flag
& LPFC_NVMET_DEFER_WQFULL
) {
1095 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1096 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1098 wq
= ctxp
->hdwq
->io_wq
;
1099 lpfc_nvmet_wqfull_flush(phba
, wq
, ctxp
);
1102 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1104 /* An state of LPFC_NVMET_STE_RCV means we have just received
1105 * the NVME command and have not started processing it.
1106 * (by issuing any IO WQEs on this exchange yet)
1108 if (ctxp
->state
== LPFC_NVMET_STE_RCV
)
1109 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1112 lpfc_nvmet_sol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1117 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port
*tgtport
,
1118 struct nvmefc_tgt_fcp_req
*rsp
)
1120 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
1121 struct lpfc_nvmet_rcv_ctx
*ctxp
=
1122 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
1123 struct lpfc_hba
*phba
= ctxp
->phba
;
1124 unsigned long flags
;
1125 bool aborting
= false;
1127 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
1128 if (ctxp
->flag
& LPFC_NVMET_XBUSY
)
1129 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
1130 "6027 NVMET release with XBUSY flag x%x"
1132 ctxp
->flag
, ctxp
->oxid
);
1133 else if (ctxp
->state
!= LPFC_NVMET_STE_DONE
&&
1134 ctxp
->state
!= LPFC_NVMET_STE_ABORT
)
1135 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1136 "6413 NVMET release bad state %d %d oxid x%x\n",
1137 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
1139 if ((ctxp
->flag
& LPFC_NVMET_ABORT_OP
) ||
1140 (ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
1142 /* let the abort path do the real release */
1143 lpfc_nvmet_defer_release(phba
, ctxp
);
1145 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1147 lpfc_nvmeio_data(phba
, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp
->oxid
,
1148 ctxp
->state
, aborting
);
1150 atomic_inc(&lpfc_nvmep
->xmt_fcp_release
);
1151 ctxp
->flag
&= ~LPFC_NVMET_TNOTIFY
;
1156 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1160 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port
*tgtport
,
1161 struct nvmefc_tgt_fcp_req
*rsp
)
1163 struct lpfc_nvmet_tgtport
*tgtp
;
1164 struct lpfc_nvmet_rcv_ctx
*ctxp
=
1165 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
1166 struct rqb_dmabuf
*nvmebuf
= ctxp
->rqb_buffer
;
1167 struct lpfc_hba
*phba
= ctxp
->phba
;
1168 unsigned long iflag
;
1171 lpfc_nvmeio_data(phba
, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1172 ctxp
->oxid
, ctxp
->size
, raw_smp_processor_id());
1175 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
1176 "6425 Defer rcv: no buffer oxid x%x: "
1178 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1182 tgtp
= phba
->targetport
->private;
1184 atomic_inc(&tgtp
->rcv_fcp_cmd_defer
);
1186 /* Free the nvmebuf since a new buffer already replaced it */
1187 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
, nvmebuf
);
1188 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1189 ctxp
->rqb_buffer
= NULL
;
1190 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1194 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port
*tgtport
)
1196 struct lpfc_nvmet_tgtport
*tgtp
;
1197 struct lpfc_hba
*phba
;
1200 tgtp
= tgtport
->private;
1203 rc
= lpfc_issue_els_rscn(phba
->pport
, 0);
1204 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1205 "6420 NVMET subsystem change: Notification %s\n",
1206 (rc
) ? "Failed" : "Sent");
1209 static struct nvmet_fc_target_template lpfc_tgttemplate
= {
1210 .targetport_delete
= lpfc_nvmet_targetport_delete
,
1211 .xmt_ls_rsp
= lpfc_nvmet_xmt_ls_rsp
,
1212 .fcp_op
= lpfc_nvmet_xmt_fcp_op
,
1213 .fcp_abort
= lpfc_nvmet_xmt_fcp_abort
,
1214 .fcp_req_release
= lpfc_nvmet_xmt_fcp_release
,
1215 .defer_rcv
= lpfc_nvmet_defer_rcv
,
1216 .discovery_event
= lpfc_nvmet_discovery_event
,
1219 .max_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
1220 .max_dif_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
1221 .dma_boundary
= 0xFFFFFFFF,
1223 /* optional features */
1224 .target_features
= 0,
1225 /* sizes of additional private data for data structures */
1226 .target_priv_sz
= sizeof(struct lpfc_nvmet_tgtport
),
1230 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba
*phba
,
1231 struct lpfc_nvmet_ctx_info
*infop
)
1233 struct lpfc_nvmet_ctxbuf
*ctx_buf
, *next_ctx_buf
;
1234 unsigned long flags
;
1236 spin_lock_irqsave(&infop
->nvmet_ctx_list_lock
, flags
);
1237 list_for_each_entry_safe(ctx_buf
, next_ctx_buf
,
1238 &infop
->nvmet_ctx_list
, list
) {
1239 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1240 list_del_init(&ctx_buf
->list
);
1241 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1243 __lpfc_clear_active_sglq(phba
, ctx_buf
->sglq
->sli4_lxritag
);
1244 ctx_buf
->sglq
->state
= SGL_FREED
;
1245 ctx_buf
->sglq
->ndlp
= NULL
;
1247 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1248 list_add_tail(&ctx_buf
->sglq
->list
,
1249 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
1250 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1252 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
1253 kfree(ctx_buf
->context
);
1255 spin_unlock_irqrestore(&infop
->nvmet_ctx_list_lock
, flags
);
1259 lpfc_nvmet_cleanup_io_context(struct lpfc_hba
*phba
)
1261 struct lpfc_nvmet_ctx_info
*infop
;
1264 /* The first context list, MRQ 0 CPU 0 */
1265 infop
= phba
->sli4_hba
.nvmet_ctx_info
;
1269 /* Cycle the the entire CPU context list for every MRQ */
1270 for (i
= 0; i
< phba
->cfg_nvmet_mrq
; i
++) {
1271 for_each_present_cpu(j
) {
1272 infop
= lpfc_get_ctx_list(phba
, j
, i
);
1273 __lpfc_nvmet_clean_io_for_cpu(phba
, infop
);
1276 kfree(phba
->sli4_hba
.nvmet_ctx_info
);
1277 phba
->sli4_hba
.nvmet_ctx_info
= NULL
;
1281 lpfc_nvmet_setup_io_context(struct lpfc_hba
*phba
)
1283 struct lpfc_nvmet_ctxbuf
*ctx_buf
;
1284 struct lpfc_iocbq
*nvmewqe
;
1285 union lpfc_wqe128
*wqe
;
1286 struct lpfc_nvmet_ctx_info
*last_infop
;
1287 struct lpfc_nvmet_ctx_info
*infop
;
1290 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
,
1291 "6403 Allocate NVMET resources for %d XRIs\n",
1292 phba
->sli4_hba
.nvmet_xri_cnt
);
1294 phba
->sli4_hba
.nvmet_ctx_info
= kcalloc(
1295 phba
->sli4_hba
.num_possible_cpu
* phba
->cfg_nvmet_mrq
,
1296 sizeof(struct lpfc_nvmet_ctx_info
), GFP_KERNEL
);
1297 if (!phba
->sli4_hba
.nvmet_ctx_info
) {
1298 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1299 "6419 Failed allocate memory for "
1300 "nvmet context lists\n");
1305 * Assuming X CPUs in the system, and Y MRQs, allocate some
1306 * lpfc_nvmet_ctx_info structures as follows:
1308 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1309 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1311 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1313 * Each line represents a MRQ "silo" containing an entry for
1316 * MRQ X is initially assumed to be associated with CPU X, thus
1317 * contexts are initially distributed across all MRQs using
1318 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1319 * freed, the are freed to the MRQ silo based on the CPU number
1320 * of the IO completion. Thus a context that was allocated for MRQ A
1321 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1323 for_each_possible_cpu(i
) {
1324 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1325 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1326 INIT_LIST_HEAD(&infop
->nvmet_ctx_list
);
1327 spin_lock_init(&infop
->nvmet_ctx_list_lock
);
1328 infop
->nvmet_ctx_list_cnt
= 0;
1333 * Setup the next CPU context info ptr for each MRQ.
1334 * MRQ 0 will cycle thru CPUs 0 - X separately from
1335 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1337 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1338 last_infop
= lpfc_get_ctx_list(phba
,
1339 cpumask_first(cpu_present_mask
),
1341 for (i
= phba
->sli4_hba
.num_possible_cpu
- 1; i
>= 0; i
--) {
1342 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1343 infop
->nvmet_ctx_next_cpu
= last_infop
;
1348 /* For all nvmet xris, allocate resources needed to process a
1349 * received command on a per xri basis.
1352 cpu
= cpumask_first(cpu_present_mask
);
1353 for (i
= 0; i
< phba
->sli4_hba
.nvmet_xri_cnt
; i
++) {
1354 ctx_buf
= kzalloc(sizeof(*ctx_buf
), GFP_KERNEL
);
1356 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1357 "6404 Ran out of memory for NVMET\n");
1361 ctx_buf
->context
= kzalloc(sizeof(*ctx_buf
->context
),
1363 if (!ctx_buf
->context
) {
1365 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1366 "6405 Ran out of NVMET "
1367 "context memory\n");
1370 ctx_buf
->context
->ctxbuf
= ctx_buf
;
1371 ctx_buf
->context
->state
= LPFC_NVMET_STE_FREE
;
1373 ctx_buf
->iocbq
= lpfc_sli_get_iocbq(phba
);
1374 if (!ctx_buf
->iocbq
) {
1375 kfree(ctx_buf
->context
);
1377 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1378 "6406 Ran out of NVMET iocb/WQEs\n");
1381 ctx_buf
->iocbq
->iocb_flag
= LPFC_IO_NVMET
;
1382 nvmewqe
= ctx_buf
->iocbq
;
1383 wqe
= &nvmewqe
->wqe
;
1385 /* Initialize WQE */
1386 memset(wqe
, 0, sizeof(union lpfc_wqe
));
1388 ctx_buf
->iocbq
->context1
= NULL
;
1389 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1390 ctx_buf
->sglq
= __lpfc_sli_get_nvmet_sglq(phba
, ctx_buf
->iocbq
);
1391 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1392 if (!ctx_buf
->sglq
) {
1393 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
1394 kfree(ctx_buf
->context
);
1396 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1397 "6407 Ran out of NVMET XRIs\n");
1400 INIT_WORK(&ctx_buf
->defer_work
, lpfc_nvmet_fcp_rqst_defer_work
);
1403 * Add ctx to MRQidx context list. Our initial assumption
1404 * is MRQidx will be associated with CPUidx. This association
1405 * can change on the fly.
1407 infop
= lpfc_get_ctx_list(phba
, cpu
, idx
);
1408 spin_lock(&infop
->nvmet_ctx_list_lock
);
1409 list_add_tail(&ctx_buf
->list
, &infop
->nvmet_ctx_list
);
1410 infop
->nvmet_ctx_list_cnt
++;
1411 spin_unlock(&infop
->nvmet_ctx_list_lock
);
1413 /* Spread ctx structures evenly across all MRQs */
1415 if (idx
>= phba
->cfg_nvmet_mrq
) {
1417 cpu
= cpumask_first(cpu_present_mask
);
1420 cpu
= cpumask_next(cpu
, cpu_present_mask
);
1421 if (cpu
== nr_cpu_ids
)
1422 cpu
= cpumask_first(cpu_present_mask
);
1426 for_each_present_cpu(i
) {
1427 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1428 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1429 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
| LOG_INIT
,
1430 "6408 TOTAL NVMET ctx for CPU %d "
1431 "MRQ %d: cnt %d nextcpu x%px\n",
1432 i
, j
, infop
->nvmet_ctx_list_cnt
,
1433 infop
->nvmet_ctx_next_cpu
);
1440 lpfc_nvmet_create_targetport(struct lpfc_hba
*phba
)
1442 struct lpfc_vport
*vport
= phba
->pport
;
1443 struct lpfc_nvmet_tgtport
*tgtp
;
1444 struct nvmet_fc_port_info pinfo
;
1447 if (phba
->targetport
)
1450 error
= lpfc_nvmet_setup_io_context(phba
);
1454 memset(&pinfo
, 0, sizeof(struct nvmet_fc_port_info
));
1455 pinfo
.node_name
= wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
1456 pinfo
.port_name
= wwn_to_u64(vport
->fc_portname
.u
.wwn
);
1457 pinfo
.port_id
= vport
->fc_myDID
;
1459 /* We need to tell the transport layer + 1 because it takes page
1460 * alignment into account. When space for the SGL is allocated we
1461 * allocate + 3, one for cmd, one for rsp and one for this alignment
1463 lpfc_tgttemplate
.max_sgl_segments
= phba
->cfg_nvme_seg_cnt
+ 1;
1464 lpfc_tgttemplate
.max_hw_queues
= phba
->cfg_hdw_queue
;
1465 lpfc_tgttemplate
.target_features
= NVMET_FCTGTFEAT_READDATA_RSP
;
1467 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1468 error
= nvmet_fc_register_targetport(&pinfo
, &lpfc_tgttemplate
,
1475 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
1476 "6025 Cannot register NVME targetport x%x: "
1477 "portnm %llx nodenm %llx segs %d qs %d\n",
1479 pinfo
.port_name
, pinfo
.node_name
,
1480 lpfc_tgttemplate
.max_sgl_segments
,
1481 lpfc_tgttemplate
.max_hw_queues
);
1482 phba
->targetport
= NULL
;
1483 phba
->nvmet_support
= 0;
1485 lpfc_nvmet_cleanup_io_context(phba
);
1488 tgtp
= (struct lpfc_nvmet_tgtport
*)
1489 phba
->targetport
->private;
1492 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
1493 "6026 Registered NVME "
1494 "targetport: x%px, private x%px "
1495 "portnm %llx nodenm %llx segs %d qs %d\n",
1496 phba
->targetport
, tgtp
,
1497 pinfo
.port_name
, pinfo
.node_name
,
1498 lpfc_tgttemplate
.max_sgl_segments
,
1499 lpfc_tgttemplate
.max_hw_queues
);
1501 atomic_set(&tgtp
->rcv_ls_req_in
, 0);
1502 atomic_set(&tgtp
->rcv_ls_req_out
, 0);
1503 atomic_set(&tgtp
->rcv_ls_req_drop
, 0);
1504 atomic_set(&tgtp
->xmt_ls_abort
, 0);
1505 atomic_set(&tgtp
->xmt_ls_abort_cmpl
, 0);
1506 atomic_set(&tgtp
->xmt_ls_rsp
, 0);
1507 atomic_set(&tgtp
->xmt_ls_drop
, 0);
1508 atomic_set(&tgtp
->xmt_ls_rsp_error
, 0);
1509 atomic_set(&tgtp
->xmt_ls_rsp_xb_set
, 0);
1510 atomic_set(&tgtp
->xmt_ls_rsp_aborted
, 0);
1511 atomic_set(&tgtp
->xmt_ls_rsp_cmpl
, 0);
1512 atomic_set(&tgtp
->rcv_fcp_cmd_in
, 0);
1513 atomic_set(&tgtp
->rcv_fcp_cmd_out
, 0);
1514 atomic_set(&tgtp
->rcv_fcp_cmd_drop
, 0);
1515 atomic_set(&tgtp
->xmt_fcp_drop
, 0);
1516 atomic_set(&tgtp
->xmt_fcp_read_rsp
, 0);
1517 atomic_set(&tgtp
->xmt_fcp_read
, 0);
1518 atomic_set(&tgtp
->xmt_fcp_write
, 0);
1519 atomic_set(&tgtp
->xmt_fcp_rsp
, 0);
1520 atomic_set(&tgtp
->xmt_fcp_release
, 0);
1521 atomic_set(&tgtp
->xmt_fcp_rsp_cmpl
, 0);
1522 atomic_set(&tgtp
->xmt_fcp_rsp_error
, 0);
1523 atomic_set(&tgtp
->xmt_fcp_rsp_xb_set
, 0);
1524 atomic_set(&tgtp
->xmt_fcp_rsp_aborted
, 0);
1525 atomic_set(&tgtp
->xmt_fcp_rsp_drop
, 0);
1526 atomic_set(&tgtp
->xmt_fcp_xri_abort_cqe
, 0);
1527 atomic_set(&tgtp
->xmt_fcp_abort
, 0);
1528 atomic_set(&tgtp
->xmt_fcp_abort_cmpl
, 0);
1529 atomic_set(&tgtp
->xmt_abort_unsol
, 0);
1530 atomic_set(&tgtp
->xmt_abort_sol
, 0);
1531 atomic_set(&tgtp
->xmt_abort_rsp
, 0);
1532 atomic_set(&tgtp
->xmt_abort_rsp_error
, 0);
1533 atomic_set(&tgtp
->defer_ctx
, 0);
1534 atomic_set(&tgtp
->defer_fod
, 0);
1535 atomic_set(&tgtp
->defer_wqfull
, 0);
1541 lpfc_nvmet_update_targetport(struct lpfc_hba
*phba
)
1543 struct lpfc_vport
*vport
= phba
->pport
;
1545 if (!phba
->targetport
)
1548 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
,
1549 "6007 Update NVMET port x%px did x%x\n",
1550 phba
->targetport
, vport
->fc_myDID
);
1552 phba
->targetport
->port_id
= vport
->fc_myDID
;
1557 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1558 * @phba: pointer to lpfc hba data structure.
1559 * @axri: pointer to the nvmet xri abort wcqe structure.
1561 * This routine is invoked by the worker thread to process a SLI4 fast-path
1562 * NVMET aborted xri.
1565 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba
*phba
,
1566 struct sli4_wcqe_xri_aborted
*axri
)
1568 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1569 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
1570 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
1571 struct lpfc_nvmet_rcv_ctx
*ctxp
, *next_ctxp
;
1572 struct lpfc_nvmet_tgtport
*tgtp
;
1573 struct nvmefc_tgt_fcp_req
*req
= NULL
;
1574 struct lpfc_nodelist
*ndlp
;
1575 unsigned long iflag
= 0;
1577 bool released
= false;
1579 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1580 "6317 XB aborted xri x%x rxid x%x\n", xri
, rxid
);
1582 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
))
1585 if (phba
->targetport
) {
1586 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1587 atomic_inc(&tgtp
->xmt_fcp_xri_abort_cqe
);
1590 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1591 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1592 list_for_each_entry_safe(ctxp
, next_ctxp
,
1593 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1595 if (ctxp
->ctxbuf
->sglq
->sli4_xritag
!= xri
)
1598 spin_lock(&ctxp
->ctxlock
);
1599 /* Check if we already received a free context call
1600 * and we have completed processing an abort situation.
1602 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
&&
1603 !(ctxp
->flag
& LPFC_NVMET_ABORT_OP
)) {
1604 list_del_init(&ctxp
->list
);
1607 ctxp
->flag
&= ~LPFC_NVMET_XBUSY
;
1608 spin_unlock(&ctxp
->ctxlock
);
1609 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1611 rrq_empty
= list_empty(&phba
->active_rrq_list
);
1612 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1613 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
1614 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
1615 (ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
||
1616 ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)) {
1617 lpfc_set_rrq_active(phba
, ndlp
,
1618 ctxp
->ctxbuf
->sglq
->sli4_lxritag
,
1620 lpfc_sli4_abts_err_handler(phba
, ndlp
, axri
);
1623 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1624 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1625 ctxp
->oxid
, ctxp
->flag
, released
);
1627 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1630 lpfc_worker_wake_up(phba
);
1633 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1634 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1636 ctxp
= lpfc_nvmet_get_ctx_for_xri(phba
, xri
);
1639 * Abort already done by FW, so BA_ACC sent.
1640 * However, the transport may be unaware.
1642 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1643 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1644 "flag x%x oxid x%x rxid x%x\n",
1645 xri
, ctxp
->state
, ctxp
->flag
, ctxp
->oxid
,
1648 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1649 ctxp
->flag
|= LPFC_NVMET_ABTS_RCV
;
1650 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
1651 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1653 lpfc_nvmeio_data(phba
,
1654 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1655 xri
, raw_smp_processor_id(), 0);
1657 req
= &ctxp
->ctx
.fcp_req
;
1659 nvmet_fc_rcv_fcp_abort(phba
->targetport
, req
);
1665 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport
*vport
,
1666 struct fc_frame_header
*fc_hdr
)
1668 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1669 struct lpfc_hba
*phba
= vport
->phba
;
1670 struct lpfc_nvmet_rcv_ctx
*ctxp
, *next_ctxp
;
1671 struct nvmefc_tgt_fcp_req
*rsp
;
1674 unsigned long iflag
= 0;
1676 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
1677 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1679 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1680 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1681 list_for_each_entry_safe(ctxp
, next_ctxp
,
1682 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1684 if (ctxp
->oxid
!= oxid
|| ctxp
->sid
!= sid
)
1687 xri
= ctxp
->ctxbuf
->sglq
->sli4_xritag
;
1689 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1690 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1692 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1693 ctxp
->flag
|= LPFC_NVMET_ABTS_RCV
;
1694 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1696 lpfc_nvmeio_data(phba
,
1697 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1698 xri
, raw_smp_processor_id(), 0);
1700 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1701 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri
);
1703 rsp
= &ctxp
->ctx
.fcp_req
;
1704 nvmet_fc_rcv_fcp_abort(phba
->targetport
, rsp
);
1706 /* Respond with BA_ACC accordingly */
1707 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1710 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1711 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1713 /* check the wait list */
1714 if (phba
->sli4_hba
.nvmet_io_wait_cnt
) {
1715 struct rqb_dmabuf
*nvmebuf
;
1716 struct fc_frame_header
*fc_hdr_tmp
;
1721 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
1723 /* match by oxid and s_id */
1724 list_for_each_entry(nvmebuf
,
1725 &phba
->sli4_hba
.lpfc_nvmet_io_wait_list
,
1727 fc_hdr_tmp
= (struct fc_frame_header
*)
1728 (nvmebuf
->hbuf
.virt
);
1729 oxid_tmp
= be16_to_cpu(fc_hdr_tmp
->fh_ox_id
);
1730 sid_tmp
= sli4_sid_from_fc_hdr(fc_hdr_tmp
);
1731 if (oxid_tmp
!= oxid
|| sid_tmp
!= sid
)
1734 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1735 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1736 "is waiting for a ctxp\n",
1739 list_del_init(&nvmebuf
->hbuf
.list
);
1740 phba
->sli4_hba
.nvmet_io_wait_cnt
--;
1744 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
1747 /* free buffer since already posted a new DMA buffer to RQ */
1749 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
, nvmebuf
);
1750 /* Respond with BA_ACC accordingly */
1751 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1756 /* check active list */
1757 ctxp
= lpfc_nvmet_get_ctx_for_oxid(phba
, oxid
, sid
);
1759 xri
= ctxp
->ctxbuf
->sglq
->sli4_xritag
;
1761 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1762 ctxp
->flag
|= (LPFC_NVMET_ABTS_RCV
| LPFC_NVMET_ABORT_OP
);
1763 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1765 lpfc_nvmeio_data(phba
,
1766 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1767 xri
, raw_smp_processor_id(), 0);
1769 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1770 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1771 "flag x%x state x%x\n",
1772 ctxp
->oxid
, xri
, ctxp
->flag
, ctxp
->state
);
1774 if (ctxp
->flag
& LPFC_NVMET_TNOTIFY
) {
1775 /* Notify the transport */
1776 nvmet_fc_rcv_fcp_abort(phba
->targetport
,
1777 &ctxp
->ctx
.fcp_req
);
1779 cancel_work_sync(&ctxp
->ctxbuf
->defer_work
);
1780 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1781 lpfc_nvmet_defer_release(phba
, ctxp
);
1782 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1784 lpfc_nvmet_sol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1787 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1791 lpfc_nvmeio_data(phba
, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1792 oxid
, raw_smp_processor_id(), 1);
1794 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1795 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid
);
1797 /* Respond with BA_RJT accordingly */
1798 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 0);
1804 lpfc_nvmet_wqfull_flush(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
,
1805 struct lpfc_nvmet_rcv_ctx
*ctxp
)
1807 struct lpfc_sli_ring
*pring
;
1808 struct lpfc_iocbq
*nvmewqeq
;
1809 struct lpfc_iocbq
*next_nvmewqeq
;
1810 unsigned long iflags
;
1811 struct lpfc_wcqe_complete wcqe
;
1812 struct lpfc_wcqe_complete
*wcqep
;
1817 /* Fake an ABORT error code back to cmpl routine */
1818 memset(wcqep
, 0, sizeof(struct lpfc_wcqe_complete
));
1819 bf_set(lpfc_wcqe_c_status
, wcqep
, IOSTAT_LOCAL_REJECT
);
1820 wcqep
->parameter
= IOERR_ABORT_REQUESTED
;
1822 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1823 list_for_each_entry_safe(nvmewqeq
, next_nvmewqeq
,
1824 &wq
->wqfull_list
, list
) {
1826 /* Checking for a specific IO to flush */
1827 if (nvmewqeq
->context2
== ctxp
) {
1828 list_del(&nvmewqeq
->list
);
1829 spin_unlock_irqrestore(&pring
->ring_lock
,
1831 lpfc_nvmet_xmt_fcp_op_cmp(phba
, nvmewqeq
,
1838 list_del(&nvmewqeq
->list
);
1839 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1840 lpfc_nvmet_xmt_fcp_op_cmp(phba
, nvmewqeq
, wcqep
);
1841 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1845 wq
->q_flag
&= ~HBA_NVMET_WQFULL
;
1846 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1850 lpfc_nvmet_wqfull_process(struct lpfc_hba
*phba
,
1851 struct lpfc_queue
*wq
)
1853 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1854 struct lpfc_sli_ring
*pring
;
1855 struct lpfc_iocbq
*nvmewqeq
;
1856 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1857 unsigned long iflags
;
1861 * Some WQE slots are available, so try to re-issue anything
1862 * on the WQ wqfull_list.
1865 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1866 while (!list_empty(&wq
->wqfull_list
)) {
1867 list_remove_head(&wq
->wqfull_list
, nvmewqeq
, struct lpfc_iocbq
,
1869 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1870 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)nvmewqeq
->context2
;
1871 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
1872 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1874 /* WQ was full again, so put it back on the list */
1875 list_add(&nvmewqeq
->list
, &wq
->wqfull_list
);
1876 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1879 if (rc
== WQE_SUCCESS
) {
1880 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1881 if (ctxp
->ts_cmd_nvme
) {
1882 if (ctxp
->ctx
.fcp_req
.op
== NVMET_FCOP_RSP
)
1883 ctxp
->ts_status_wqput
= ktime_get_ns();
1885 ctxp
->ts_data_wqput
= ktime_get_ns();
1892 wq
->q_flag
&= ~HBA_NVMET_WQFULL
;
1893 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1899 lpfc_nvmet_destroy_targetport(struct lpfc_hba
*phba
)
1901 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1902 struct lpfc_nvmet_tgtport
*tgtp
;
1903 struct lpfc_queue
*wq
;
1905 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp
);
1907 if (phba
->nvmet_support
== 0)
1909 if (phba
->targetport
) {
1910 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1911 for (qidx
= 0; qidx
< phba
->cfg_hdw_queue
; qidx
++) {
1912 wq
= phba
->sli4_hba
.hdwq
[qidx
].io_wq
;
1913 lpfc_nvmet_wqfull_flush(phba
, wq
, NULL
);
1915 tgtp
->tport_unreg_cmp
= &tport_unreg_cmp
;
1916 nvmet_fc_unregister_targetport(phba
->targetport
);
1917 if (!wait_for_completion_timeout(tgtp
->tport_unreg_cmp
,
1918 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO
)))
1919 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1920 "6179 Unreg targetport x%px timeout "
1921 "reached.\n", phba
->targetport
);
1922 lpfc_nvmet_cleanup_io_context(phba
);
1924 phba
->targetport
= NULL
;
1929 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1930 * @phba: pointer to lpfc hba data structure.
1931 * @pring: pointer to a SLI ring.
1932 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1934 * This routine is used for processing the WQE associated with a unsolicited
1935 * event. It first determines whether there is an existing ndlp that matches
1936 * the DID from the unsolicited WQE. If not, it will create a new one with
1937 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1938 * WQE is then used to invoke the proper routine and to set up proper state
1939 * of the discovery state machine.
1942 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1943 struct hbq_dmabuf
*nvmebuf
)
1945 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1946 struct lpfc_nvmet_tgtport
*tgtp
;
1947 struct fc_frame_header
*fc_hdr
;
1948 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1950 uint32_t size
, oxid
, sid
, rc
;
1953 if (!nvmebuf
|| !phba
->targetport
) {
1954 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1955 "6154 LS Drop IO\n");
1963 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
1964 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1966 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1967 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
1968 size
= bf_get(lpfc_rcqe_length
, &nvmebuf
->cq_event
.cqe
.rcqe_cmpl
);
1969 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
1971 ctxp
= kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx
), GFP_ATOMIC
);
1973 atomic_inc(&tgtp
->rcv_ls_req_drop
);
1974 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1975 "6155 LS Drop IO x%x: Alloc\n",
1978 lpfc_nvmeio_data(phba
, "NVMET LS DROP: "
1979 "xri x%x sz %d from %06x\n",
1981 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
1989 ctxp
->state
= LPFC_NVMET_STE_LS_RCV
;
1990 ctxp
->entry_cnt
= 1;
1991 ctxp
->rqb_buffer
= (void *)nvmebuf
;
1992 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[0];
1994 lpfc_nvmeio_data(phba
, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1997 * The calling sequence should be:
1998 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1999 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
2001 atomic_inc(&tgtp
->rcv_ls_req_in
);
2002 rc
= nvmet_fc_rcv_ls_req(phba
->targetport
, &ctxp
->ctx
.ls_req
,
2005 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
2006 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2007 "%08x %08x %08x\n", size
, rc
,
2008 *payload
, *(payload
+1), *(payload
+2),
2009 *(payload
+3), *(payload
+4), *(payload
+5));
2012 atomic_inc(&tgtp
->rcv_ls_req_out
);
2016 lpfc_nvmeio_data(phba
, "NVMET LS DROP: xri x%x sz %d from %06x\n",
2019 atomic_inc(&tgtp
->rcv_ls_req_drop
);
2020 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2021 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
2024 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
2025 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
2027 atomic_inc(&tgtp
->xmt_ls_abort
);
2028 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
, sid
, oxid
);
2033 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf
*ctx_buf
)
2035 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2036 struct lpfc_nvmet_rcv_ctx
*ctxp
= ctx_buf
->context
;
2037 struct lpfc_hba
*phba
= ctxp
->phba
;
2038 struct rqb_dmabuf
*nvmebuf
= ctxp
->rqb_buffer
;
2039 struct lpfc_nvmet_tgtport
*tgtp
;
2040 uint32_t *payload
, qno
;
2042 unsigned long iflags
;
2045 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2046 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2047 "oxid: x%x flg: x%x state: x%x\n",
2048 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
2049 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2050 lpfc_nvmet_defer_release(phba
, ctxp
);
2051 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2052 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
2057 if (ctxp
->flag
& LPFC_NVMET_ABTS_RCV
) {
2058 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2059 "6324 IO oxid x%x aborted\n",
2064 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
2065 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2066 ctxp
->flag
|= LPFC_NVMET_TNOTIFY
;
2067 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2068 if (ctxp
->ts_isr_cmd
)
2069 ctxp
->ts_cmd_nvme
= ktime_get_ns();
2072 * The calling sequence should be:
2073 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2074 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2075 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2076 * the NVME command / FC header is stored.
2077 * A buffer has already been reposted for this IO, so just free
2080 rc
= nvmet_fc_rcv_fcp_req(phba
->targetport
, &ctxp
->ctx
.fcp_req
,
2081 payload
, ctxp
->size
);
2082 /* Process FCP command */
2084 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
2085 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2086 if ((ctxp
->flag
& LPFC_NVMET_CTX_REUSE_WQ
) ||
2087 (nvmebuf
!= ctxp
->rqb_buffer
)) {
2088 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2091 ctxp
->rqb_buffer
= NULL
;
2092 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2093 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
); /* repost */
2097 /* Processing of FCP command is deferred */
2098 if (rc
== -EOVERFLOW
) {
2099 lpfc_nvmeio_data(phba
, "NVMET RCV BUSY: xri x%x sz %d "
2101 ctxp
->oxid
, ctxp
->size
, ctxp
->sid
);
2102 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
2103 atomic_inc(&tgtp
->defer_fod
);
2104 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2105 if (ctxp
->flag
& LPFC_NVMET_CTX_REUSE_WQ
) {
2106 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2109 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2111 * Post a replacement DMA buffer to RQ and defer
2112 * freeing rcv buffer till .defer_rcv callback
2115 lpfc_post_rq_buffer(
2116 phba
, phba
->sli4_hba
.nvmet_mrq_hdr
[qno
],
2117 phba
->sli4_hba
.nvmet_mrq_data
[qno
], 1, qno
);
2120 ctxp
->flag
&= ~LPFC_NVMET_TNOTIFY
;
2121 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
2122 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2123 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2125 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
2126 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
2127 atomic_read(&tgtp
->xmt_fcp_release
));
2128 lpfc_nvmeio_data(phba
, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2129 ctxp
->oxid
, ctxp
->size
, ctxp
->sid
);
2130 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
2131 lpfc_nvmet_defer_release(phba
, ctxp
);
2132 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
2133 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
, ctxp
->oxid
);
2138 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct
*work
)
2140 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2141 struct lpfc_nvmet_ctxbuf
*ctx_buf
=
2142 container_of(work
, struct lpfc_nvmet_ctxbuf
, defer_work
);
2144 lpfc_nvmet_process_rcv_fcp_req(ctx_buf
);
2148 static struct lpfc_nvmet_ctxbuf
*
2149 lpfc_nvmet_replenish_context(struct lpfc_hba
*phba
,
2150 struct lpfc_nvmet_ctx_info
*current_infop
)
2152 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2153 struct lpfc_nvmet_ctxbuf
*ctx_buf
= NULL
;
2154 struct lpfc_nvmet_ctx_info
*get_infop
;
2158 * The current_infop for the MRQ a NVME command IU was received
2159 * on is empty. Our goal is to replenish this MRQs context
2160 * list from a another CPUs.
2162 * First we need to pick a context list to start looking on.
2163 * nvmet_ctx_start_cpu has available context the last time
2164 * we needed to replenish this CPU where nvmet_ctx_next_cpu
2165 * is just the next sequential CPU for this MRQ.
2167 if (current_infop
->nvmet_ctx_start_cpu
)
2168 get_infop
= current_infop
->nvmet_ctx_start_cpu
;
2170 get_infop
= current_infop
->nvmet_ctx_next_cpu
;
2172 for (i
= 0; i
< phba
->sli4_hba
.num_possible_cpu
; i
++) {
2173 if (get_infop
== current_infop
) {
2174 get_infop
= get_infop
->nvmet_ctx_next_cpu
;
2177 spin_lock(&get_infop
->nvmet_ctx_list_lock
);
2179 /* Just take the entire context list, if there are any */
2180 if (get_infop
->nvmet_ctx_list_cnt
) {
2181 list_splice_init(&get_infop
->nvmet_ctx_list
,
2182 ¤t_infop
->nvmet_ctx_list
);
2183 current_infop
->nvmet_ctx_list_cnt
=
2184 get_infop
->nvmet_ctx_list_cnt
- 1;
2185 get_infop
->nvmet_ctx_list_cnt
= 0;
2186 spin_unlock(&get_infop
->nvmet_ctx_list_lock
);
2188 current_infop
->nvmet_ctx_start_cpu
= get_infop
;
2189 list_remove_head(¤t_infop
->nvmet_ctx_list
,
2190 ctx_buf
, struct lpfc_nvmet_ctxbuf
,
2195 /* Otherwise, move on to the next CPU for this MRQ */
2196 spin_unlock(&get_infop
->nvmet_ctx_list_lock
);
2197 get_infop
= get_infop
->nvmet_ctx_next_cpu
;
2201 /* Nothing found, all contexts for the MRQ are in-flight */
2206 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2207 * @phba: pointer to lpfc hba data structure.
2208 * @idx: relative index of MRQ vector
2209 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2210 * @isr_timestamp: in jiffies.
2211 * @cqflag: cq processing information regarding workload.
2213 * This routine is used for processing the WQE associated with a unsolicited
2214 * event. It first determines whether there is an existing ndlp that matches
2215 * the DID from the unsolicited WQE. If not, it will create a new one with
2216 * the DID from the unsolicited WQE. The ELS command from the unsolicited
2217 * WQE is then used to invoke the proper routine and to set up proper state
2218 * of the discovery state machine.
2221 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba
*phba
,
2223 struct rqb_dmabuf
*nvmebuf
,
2224 uint64_t isr_timestamp
,
2227 struct lpfc_nvmet_rcv_ctx
*ctxp
;
2228 struct lpfc_nvmet_tgtport
*tgtp
;
2229 struct fc_frame_header
*fc_hdr
;
2230 struct lpfc_nvmet_ctxbuf
*ctx_buf
;
2231 struct lpfc_nvmet_ctx_info
*current_infop
;
2232 uint32_t size
, oxid
, sid
, qno
;
2233 unsigned long iflag
;
2236 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC
))
2240 if (!nvmebuf
|| !phba
->targetport
) {
2241 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2242 "6157 NVMET FCP Drop IO\n");
2244 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
2249 * Get a pointer to the context list for this MRQ based on
2250 * the CPU this MRQ IRQ is associated with. If the CPU association
2251 * changes from our initial assumption, the context list could
2252 * be empty, thus it would need to be replenished with the
2253 * context list from another CPU for this MRQ.
2255 current_cpu
= raw_smp_processor_id();
2256 current_infop
= lpfc_get_ctx_list(phba
, current_cpu
, idx
);
2257 spin_lock_irqsave(¤t_infop
->nvmet_ctx_list_lock
, iflag
);
2258 if (current_infop
->nvmet_ctx_list_cnt
) {
2259 list_remove_head(¤t_infop
->nvmet_ctx_list
,
2260 ctx_buf
, struct lpfc_nvmet_ctxbuf
, list
);
2261 current_infop
->nvmet_ctx_list_cnt
--;
2263 ctx_buf
= lpfc_nvmet_replenish_context(phba
, current_infop
);
2265 spin_unlock_irqrestore(¤t_infop
->nvmet_ctx_list_lock
, iflag
);
2267 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
2268 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
2269 size
= nvmebuf
->bytes_recv
;
2271 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2272 if (phba
->hdwqstat_on
& LPFC_CHECK_NVMET_IO
) {
2273 this_cpu_inc(phba
->sli4_hba
.c_stat
->rcv_io
);
2274 if (idx
!= current_cpu
)
2275 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
2276 "6703 CPU Check rcv: "
2277 "cpu %d expect %d\n",
2282 lpfc_nvmeio_data(phba
, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2283 oxid
, size
, raw_smp_processor_id());
2285 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2288 /* Queue this NVME IO to process later */
2289 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
2290 list_add_tail(&nvmebuf
->hbuf
.list
,
2291 &phba
->sli4_hba
.lpfc_nvmet_io_wait_list
);
2292 phba
->sli4_hba
.nvmet_io_wait_cnt
++;
2293 phba
->sli4_hba
.nvmet_io_wait_total
++;
2294 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
2297 /* Post a brand new DMA buffer to RQ */
2299 lpfc_post_rq_buffer(
2300 phba
, phba
->sli4_hba
.nvmet_mrq_hdr
[qno
],
2301 phba
->sli4_hba
.nvmet_mrq_data
[qno
], 1, qno
);
2303 atomic_inc(&tgtp
->defer_ctx
);
2307 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
2309 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)ctx_buf
->context
;
2310 spin_lock_irqsave(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
2311 list_add_tail(&ctxp
->list
, &phba
->sli4_hba
.t_active_ctx_list
);
2312 spin_unlock_irqrestore(&phba
->sli4_hba
.t_active_list_lock
, iflag
);
2313 if (ctxp
->state
!= LPFC_NVMET_STE_FREE
) {
2314 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2315 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2316 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
2325 ctxp
->state
= LPFC_NVMET_STE_RCV
;
2326 ctxp
->entry_cnt
= 1;
2328 ctxp
->ctxbuf
= ctx_buf
;
2329 ctxp
->rqb_buffer
= (void *)nvmebuf
;
2331 spin_lock_init(&ctxp
->ctxlock
);
2333 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2335 ctxp
->ts_isr_cmd
= isr_timestamp
;
2336 ctxp
->ts_cmd_nvme
= 0;
2337 ctxp
->ts_nvme_data
= 0;
2338 ctxp
->ts_data_wqput
= 0;
2339 ctxp
->ts_isr_data
= 0;
2340 ctxp
->ts_data_nvme
= 0;
2341 ctxp
->ts_nvme_status
= 0;
2342 ctxp
->ts_status_wqput
= 0;
2343 ctxp
->ts_isr_status
= 0;
2344 ctxp
->ts_status_nvme
= 0;
2347 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
2348 /* check for cq processing load */
2350 lpfc_nvmet_process_rcv_fcp_req(ctx_buf
);
2354 if (!queue_work(phba
->wq
, &ctx_buf
->defer_work
)) {
2355 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
2356 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
2357 "6325 Unable to queue work for oxid x%x. "
2358 "FCP Drop IO [x%x x%x x%x]\n",
2360 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
2361 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
2362 atomic_read(&tgtp
->xmt_fcp_release
));
2364 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
2365 lpfc_nvmet_defer_release(phba
, ctxp
);
2366 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
2367 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, sid
, oxid
);
2372 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2373 * @phba: pointer to lpfc hba data structure.
2374 * @pring: pointer to a SLI ring.
2375 * @nvmebuf: pointer to received nvme data structure.
2377 * This routine is used to process an unsolicited event received from a SLI
2378 * (Service Level Interface) ring. The actual processing of the data buffer
2379 * associated with the unsolicited event is done by invoking the routine
2380 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2381 * SLI RQ on which the unsolicited event was received.
2384 lpfc_nvmet_unsol_ls_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2385 struct lpfc_iocbq
*piocb
)
2387 struct lpfc_dmabuf
*d_buf
;
2388 struct hbq_dmabuf
*nvmebuf
;
2390 d_buf
= piocb
->context2
;
2391 nvmebuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
2394 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2395 "3015 LS Drop IO\n");
2398 if (phba
->nvmet_support
== 0) {
2399 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
2402 lpfc_nvmet_unsol_ls_buffer(phba
, pring
, nvmebuf
);
2406 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2407 * @phba: pointer to lpfc hba data structure.
2408 * @idx: relative index of MRQ vector
2409 * @nvmebuf: pointer to received nvme data structure.
2410 * @isr_timestamp: in jiffies.
2411 * @cqflag: cq processing information regarding workload.
2413 * This routine is used to process an unsolicited event received from a SLI
2414 * (Service Level Interface) ring. The actual processing of the data buffer
2415 * associated with the unsolicited event is done by invoking the routine
2416 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2417 * SLI RQ on which the unsolicited event was received.
2420 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba
*phba
,
2422 struct rqb_dmabuf
*nvmebuf
,
2423 uint64_t isr_timestamp
,
2427 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2428 "3167 NVMET FCP Drop IO\n");
2431 if (phba
->nvmet_support
== 0) {
2432 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
2435 lpfc_nvmet_unsol_fcp_buffer(phba
, idx
, nvmebuf
, isr_timestamp
, cqflag
);
2439 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2440 * @phba: pointer to a host N_Port data structure.
2441 * @ctxp: Context info for NVME LS Request
2442 * @rspbuf: DMA buffer of NVME command.
2443 * @rspsize: size of the NVME command.
2445 * This routine is used for allocating a lpfc-WQE data structure from
2446 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2447 * passed into the routine for discovery state machine to issue an Extended
2448 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2449 * and preparation routine that is used by all the discovery state machine
2450 * routines and the NVME command-specific fields will be later set up by
2451 * the individual discovery machine routines after calling this routine
2452 * allocating and preparing a generic WQE data structure. It fills in the
2453 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2454 * payload and response payload (if expected). The reference count on the
2455 * ndlp is incremented by 1 and the reference to the ndlp is put into
2456 * context1 of the WQE data structure for this WQE to hold the ndlp
2457 * reference for the command's callback function to access later.
2460 * Pointer to the newly allocated/prepared nvme wqe data structure
2461 * NULL - when nvme wqe data structure allocation/preparation failed
2463 static struct lpfc_iocbq
*
2464 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*phba
,
2465 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2466 dma_addr_t rspbuf
, uint16_t rspsize
)
2468 struct lpfc_nodelist
*ndlp
;
2469 struct lpfc_iocbq
*nvmewqe
;
2470 union lpfc_wqe128
*wqe
;
2472 if (!lpfc_is_link_up(phba
)) {
2473 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
2474 "6104 NVMET prep LS wqe: link err: "
2475 "NPORT x%x oxid:x%x ste %d\n",
2476 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2480 /* Allocate buffer for command wqe */
2481 nvmewqe
= lpfc_sli_get_iocbq(phba
);
2482 if (nvmewqe
== NULL
) {
2483 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
2484 "6105 NVMET prep LS wqe: No WQE: "
2485 "NPORT x%x oxid x%x ste %d\n",
2486 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2490 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
2491 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2492 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2493 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2494 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
2495 "6106 NVMET prep LS wqe: No ndlp: "
2496 "NPORT x%x oxid x%x ste %d\n",
2497 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2498 goto nvme_wqe_free_wqeq_exit
;
2500 ctxp
->wqeq
= nvmewqe
;
2502 /* prevent preparing wqe with NULL ndlp reference */
2503 nvmewqe
->context1
= lpfc_nlp_get(ndlp
);
2504 if (nvmewqe
->context1
== NULL
)
2505 goto nvme_wqe_free_wqeq_exit
;
2506 nvmewqe
->context2
= ctxp
;
2508 wqe
= &nvmewqe
->wqe
;
2509 memset(wqe
, 0, sizeof(union lpfc_wqe
));
2512 wqe
->xmit_sequence
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2513 wqe
->xmit_sequence
.bde
.tus
.f
.bdeSize
= rspsize
;
2514 wqe
->xmit_sequence
.bde
.addrLow
= le32_to_cpu(putPaddrLow(rspbuf
));
2515 wqe
->xmit_sequence
.bde
.addrHigh
= le32_to_cpu(putPaddrHigh(rspbuf
));
2522 bf_set(wqe_dfctl
, &wqe
->xmit_sequence
.wge_ctl
, 0);
2523 bf_set(wqe_ls
, &wqe
->xmit_sequence
.wge_ctl
, 1);
2524 bf_set(wqe_la
, &wqe
->xmit_sequence
.wge_ctl
, 0);
2525 bf_set(wqe_rctl
, &wqe
->xmit_sequence
.wge_ctl
, FC_RCTL_ELS4_REP
);
2526 bf_set(wqe_type
, &wqe
->xmit_sequence
.wge_ctl
, FC_TYPE_NVME
);
2529 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
2530 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2531 bf_set(wqe_xri_tag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->sli4_xritag
);
2534 bf_set(wqe_cmnd
, &wqe
->xmit_sequence
.wqe_com
,
2535 CMD_XMIT_SEQUENCE64_WQE
);
2536 bf_set(wqe_ct
, &wqe
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
2537 bf_set(wqe_class
, &wqe
->xmit_sequence
.wqe_com
, CLASS3
);
2538 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
2541 wqe
->xmit_sequence
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2544 bf_set(wqe_reqtag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->iotag
);
2545 /* Needs to be set by caller */
2546 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
, ctxp
->oxid
);
2549 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
2550 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
2551 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
2552 LPFC_WQE_LENLOC_WORD12
);
2553 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
2556 bf_set(wqe_cqid
, &wqe
->xmit_sequence
.wqe_com
,
2557 LPFC_WQE_CQ_ID_DEFAULT
);
2558 bf_set(wqe_cmd_type
, &wqe
->xmit_sequence
.wqe_com
,
2562 wqe
->xmit_sequence
.xmit_len
= rspsize
;
2565 nvmewqe
->vport
= phba
->pport
;
2566 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
2567 nvmewqe
->iocb_flag
|= LPFC_IO_NVME_LS
;
2569 /* Xmit NVMET response to remote NPORT <did> */
2570 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
2571 "6039 Xmit NVMET LS response to remote "
2572 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2573 ndlp
->nlp_DID
, nvmewqe
->iotag
, ctxp
->oxid
,
2577 nvme_wqe_free_wqeq_exit
:
2578 nvmewqe
->context2
= NULL
;
2579 nvmewqe
->context3
= NULL
;
2580 lpfc_sli_release_iocbq(phba
, nvmewqe
);
2585 static struct lpfc_iocbq
*
2586 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*phba
,
2587 struct lpfc_nvmet_rcv_ctx
*ctxp
)
2589 struct nvmefc_tgt_fcp_req
*rsp
= &ctxp
->ctx
.fcp_req
;
2590 struct lpfc_nvmet_tgtport
*tgtp
;
2591 struct sli4_sge
*sgl
;
2592 struct lpfc_nodelist
*ndlp
;
2593 struct lpfc_iocbq
*nvmewqe
;
2594 struct scatterlist
*sgel
;
2595 union lpfc_wqe128
*wqe
;
2596 struct ulp_bde64
*bde
;
2597 dma_addr_t physaddr
;
2602 if (!lpfc_is_link_up(phba
)) {
2603 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2604 "6107 NVMET prep FCP wqe: link err:"
2605 "NPORT x%x oxid x%x ste %d\n",
2606 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2610 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
2611 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2612 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2613 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2614 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2615 "6108 NVMET prep FCP wqe: no ndlp: "
2616 "NPORT x%x oxid x%x ste %d\n",
2617 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2621 if (rsp
->sg_cnt
> lpfc_tgttemplate
.max_sgl_segments
) {
2622 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2623 "6109 NVMET prep FCP wqe: seg cnt err: "
2624 "NPORT x%x oxid x%x ste %d cnt %d\n",
2625 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
,
2626 phba
->cfg_nvme_seg_cnt
);
2629 nsegs
= rsp
->sg_cnt
;
2631 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2632 nvmewqe
= ctxp
->wqeq
;
2633 if (nvmewqe
== NULL
) {
2634 /* Allocate buffer for command wqe */
2635 nvmewqe
= ctxp
->ctxbuf
->iocbq
;
2636 if (nvmewqe
== NULL
) {
2637 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2638 "6110 NVMET prep FCP wqe: No "
2639 "WQE: NPORT x%x oxid x%x ste %d\n",
2640 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2643 ctxp
->wqeq
= nvmewqe
;
2644 xc
= 0; /* create new XRI */
2645 nvmewqe
->sli4_lxritag
= NO_XRI
;
2646 nvmewqe
->sli4_xritag
= NO_XRI
;
2650 if (((ctxp
->state
== LPFC_NVMET_STE_RCV
) &&
2651 (ctxp
->entry_cnt
== 1)) ||
2652 (ctxp
->state
== LPFC_NVMET_STE_DATA
)) {
2653 wqe
= &nvmewqe
->wqe
;
2655 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2656 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2657 ctxp
->state
, ctxp
->entry_cnt
);
2661 sgl
= (struct sli4_sge
*)ctxp
->ctxbuf
->sglq
->sgl
;
2663 case NVMET_FCOP_READDATA
:
2664 case NVMET_FCOP_READDATA_RSP
:
2665 /* From the tsend template, initialize words 7 - 11 */
2666 memcpy(&wqe
->words
[7],
2667 &lpfc_tsend_cmd_template
.words
[7],
2668 sizeof(uint32_t) * 5);
2670 /* Words 0 - 2 : The first sg segment */
2672 physaddr
= sg_dma_address(sgel
);
2673 wqe
->fcp_tsend
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2674 wqe
->fcp_tsend
.bde
.tus
.f
.bdeSize
= sg_dma_len(sgel
);
2675 wqe
->fcp_tsend
.bde
.addrLow
= cpu_to_le32(putPaddrLow(physaddr
));
2676 wqe
->fcp_tsend
.bde
.addrHigh
=
2677 cpu_to_le32(putPaddrHigh(physaddr
));
2680 wqe
->fcp_tsend
.payload_offset_len
= 0;
2683 wqe
->fcp_tsend
.relative_offset
= ctxp
->offset
;
2686 wqe
->fcp_tsend
.reserved
= 0;
2689 bf_set(wqe_ctxt_tag
, &wqe
->fcp_tsend
.wqe_com
,
2690 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2691 bf_set(wqe_xri_tag
, &wqe
->fcp_tsend
.wqe_com
,
2692 nvmewqe
->sli4_xritag
);
2694 /* Word 7 - set ar later */
2697 wqe
->fcp_tsend
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2700 bf_set(wqe_reqtag
, &wqe
->fcp_tsend
.wqe_com
, nvmewqe
->iotag
);
2701 bf_set(wqe_rcvoxid
, &wqe
->fcp_tsend
.wqe_com
, ctxp
->oxid
);
2703 /* Word 10 - set wqes later, in template xc=1 */
2705 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 0);
2707 /* Word 11 - set sup, irsp, irsplen later */
2711 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
2713 /* Setup 2 SKIP SGEs */
2717 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2718 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2724 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2725 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2728 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
2729 atomic_inc(&tgtp
->xmt_fcp_read_rsp
);
2731 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2733 if (rsp
->rsplen
== LPFC_NVMET_SUCCESS_LEN
) {
2734 if (ndlp
->nlp_flag
& NLP_SUPPRESS_RSP
)
2736 &wqe
->fcp_tsend
.wqe_com
, 1);
2738 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 1);
2739 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 1);
2740 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
,
2741 ((rsp
->rsplen
>> 2) - 1));
2742 memcpy(&wqe
->words
[16], rsp
->rspaddr
,
2746 atomic_inc(&tgtp
->xmt_fcp_read
);
2748 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2749 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 0);
2753 case NVMET_FCOP_WRITEDATA
:
2754 /* From the treceive template, initialize words 3 - 11 */
2755 memcpy(&wqe
->words
[3],
2756 &lpfc_treceive_cmd_template
.words
[3],
2757 sizeof(uint32_t) * 9);
2759 /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2760 wqe
->fcp_treceive
.bde
.tus
.f
.bdeFlags
= LPFC_SGE_TYPE_SKIP
;
2761 wqe
->fcp_treceive
.bde
.tus
.f
.bdeSize
= 0;
2762 wqe
->fcp_treceive
.bde
.addrLow
= 0;
2763 wqe
->fcp_treceive
.bde
.addrHigh
= 0;
2766 wqe
->fcp_treceive
.relative_offset
= ctxp
->offset
;
2769 bf_set(wqe_ctxt_tag
, &wqe
->fcp_treceive
.wqe_com
,
2770 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2771 bf_set(wqe_xri_tag
, &wqe
->fcp_treceive
.wqe_com
,
2772 nvmewqe
->sli4_xritag
);
2777 wqe
->fcp_treceive
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2780 bf_set(wqe_reqtag
, &wqe
->fcp_treceive
.wqe_com
, nvmewqe
->iotag
);
2781 bf_set(wqe_rcvoxid
, &wqe
->fcp_treceive
.wqe_com
, ctxp
->oxid
);
2783 /* Word 10 - in template xc=1 */
2785 bf_set(wqe_xc
, &wqe
->fcp_treceive
.wqe_com
, 0);
2787 /* Word 11 - set pbde later */
2788 if (phba
->cfg_enable_pbde
) {
2791 bf_set(wqe_pbde
, &wqe
->fcp_treceive
.wqe_com
, 0);
2796 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
2798 /* Setup 2 SKIP SGEs */
2802 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2803 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2809 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2810 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2813 atomic_inc(&tgtp
->xmt_fcp_write
);
2816 case NVMET_FCOP_RSP
:
2817 /* From the treceive template, initialize words 4 - 11 */
2818 memcpy(&wqe
->words
[4],
2819 &lpfc_trsp_cmd_template
.words
[4],
2820 sizeof(uint32_t) * 8);
2823 physaddr
= rsp
->rspdma
;
2824 wqe
->fcp_trsp
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2825 wqe
->fcp_trsp
.bde
.tus
.f
.bdeSize
= rsp
->rsplen
;
2826 wqe
->fcp_trsp
.bde
.addrLow
=
2827 cpu_to_le32(putPaddrLow(physaddr
));
2828 wqe
->fcp_trsp
.bde
.addrHigh
=
2829 cpu_to_le32(putPaddrHigh(physaddr
));
2832 wqe
->fcp_trsp
.response_len
= rsp
->rsplen
;
2835 bf_set(wqe_ctxt_tag
, &wqe
->fcp_trsp
.wqe_com
,
2836 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2837 bf_set(wqe_xri_tag
, &wqe
->fcp_trsp
.wqe_com
,
2838 nvmewqe
->sli4_xritag
);
2843 wqe
->fcp_trsp
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2846 bf_set(wqe_reqtag
, &wqe
->fcp_trsp
.wqe_com
, nvmewqe
->iotag
);
2847 bf_set(wqe_rcvoxid
, &wqe
->fcp_trsp
.wqe_com
, ctxp
->oxid
);
2851 bf_set(wqe_xc
, &wqe
->fcp_trsp
.wqe_com
, 1);
2854 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2855 if (rsp
->rsplen
!= LPFC_NVMET_SUCCESS_LEN
) {
2856 /* Bad response - embed it */
2857 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 1);
2858 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 1);
2859 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
,
2860 ((rsp
->rsplen
>> 2) - 1));
2861 memcpy(&wqe
->words
[16], rsp
->rspaddr
, rsp
->rsplen
);
2866 wqe
->fcp_trsp
.rsvd_12_15
[0] = 0;
2868 /* Use rspbuf, NOT sg list */
2871 atomic_inc(&tgtp
->xmt_fcp_rsp
);
2875 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
2876 "6064 Unknown Rsp Op %d\n",
2882 nvmewqe
->vport
= phba
->pport
;
2883 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
2884 nvmewqe
->context1
= ndlp
;
2886 for_each_sg(rsp
->sg
, sgel
, nsegs
, i
) {
2887 physaddr
= sg_dma_address(sgel
);
2888 cnt
= sg_dma_len(sgel
);
2889 sgl
->addr_hi
= putPaddrHigh(physaddr
);
2890 sgl
->addr_lo
= putPaddrLow(physaddr
);
2892 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
2893 bf_set(lpfc_sli4_sge_offset
, sgl
, ctxp
->offset
);
2894 if ((i
+1) == rsp
->sg_cnt
)
2895 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
2896 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2897 sgl
->sge_len
= cpu_to_le32(cnt
);
2899 bde
= (struct ulp_bde64
*)&wqe
->words
[13];
2901 /* Words 13-15 (PBDE) */
2902 bde
->addrLow
= sgl
->addr_lo
;
2903 bde
->addrHigh
= sgl
->addr_hi
;
2904 bde
->tus
.f
.bdeSize
=
2905 le32_to_cpu(sgl
->sge_len
);
2906 bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2907 bde
->tus
.w
= cpu_to_le32(bde
->tus
.w
);
2909 memset(bde
, 0, sizeof(struct ulp_bde64
));
2913 ctxp
->offset
+= cnt
;
2915 ctxp
->state
= LPFC_NVMET_STE_DATA
;
2921 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2922 * @phba: Pointer to HBA context object.
2923 * @cmdwqe: Pointer to driver command WQE object.
2924 * @wcqe: Pointer to driver response CQE object.
2926 * The function is called from SLI ring event handler with no
2927 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2928 * The function frees memory resources used for the NVME commands.
2931 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
2932 struct lpfc_wcqe_complete
*wcqe
)
2934 struct lpfc_nvmet_rcv_ctx
*ctxp
;
2935 struct lpfc_nvmet_tgtport
*tgtp
;
2937 unsigned long flags
;
2938 bool released
= false;
2940 ctxp
= cmdwqe
->context2
;
2941 result
= wcqe
->parameter
;
2943 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2944 if (ctxp
->flag
& LPFC_NVMET_ABORT_OP
)
2945 atomic_inc(&tgtp
->xmt_fcp_abort_cmpl
);
2947 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
2948 ctxp
->state
= LPFC_NVMET_STE_DONE
;
2950 /* Check if we already received a free context call
2951 * and we have completed processing an abort situation.
2953 if ((ctxp
->flag
& LPFC_NVMET_CTX_RLS
) &&
2954 !(ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
2955 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
2956 list_del_init(&ctxp
->list
);
2957 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
2960 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2961 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
2962 atomic_inc(&tgtp
->xmt_abort_rsp
);
2964 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2965 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
2966 "WCQE: %08x %08x %08x %08x\n",
2967 ctxp
->oxid
, ctxp
->flag
, released
,
2968 wcqe
->word0
, wcqe
->total_data_placed
,
2969 result
, wcqe
->word3
);
2971 cmdwqe
->context2
= NULL
;
2972 cmdwqe
->context3
= NULL
;
2974 * if transport has released ctx, then can reuse it. Otherwise,
2975 * will be recycled by transport release call.
2978 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
2980 /* This is the iocbq for the abort, not the command */
2981 lpfc_sli_release_iocbq(phba
, cmdwqe
);
2983 /* Since iaab/iaar are NOT set, there is no work left.
2984 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2985 * should have been called already.
2990 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2991 * @phba: Pointer to HBA context object.
2992 * @cmdwqe: Pointer to driver command WQE object.
2993 * @wcqe: Pointer to driver response CQE object.
2995 * The function is called from SLI ring event handler with no
2996 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2997 * The function frees memory resources used for the NVME commands.
3000 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
3001 struct lpfc_wcqe_complete
*wcqe
)
3003 struct lpfc_nvmet_rcv_ctx
*ctxp
;
3004 struct lpfc_nvmet_tgtport
*tgtp
;
3005 unsigned long flags
;
3007 bool released
= false;
3009 ctxp
= cmdwqe
->context2
;
3010 result
= wcqe
->parameter
;
3013 /* if context is clear, related io alrady complete */
3014 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3015 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3016 wcqe
->word0
, wcqe
->total_data_placed
,
3017 result
, wcqe
->word3
);
3021 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3022 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3023 if (ctxp
->flag
& LPFC_NVMET_ABORT_OP
)
3024 atomic_inc(&tgtp
->xmt_fcp_abort_cmpl
);
3027 if (ctxp
->state
!= LPFC_NVMET_STE_ABORT
) {
3028 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3029 "6112 ABTS Wrong state:%d oxid x%x\n",
3030 ctxp
->state
, ctxp
->oxid
);
3033 /* Check if we already received a free context call
3034 * and we have completed processing an abort situation.
3036 ctxp
->state
= LPFC_NVMET_STE_DONE
;
3037 if ((ctxp
->flag
& LPFC_NVMET_CTX_RLS
) &&
3038 !(ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
3039 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3040 list_del_init(&ctxp
->list
);
3041 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3044 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3045 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3046 atomic_inc(&tgtp
->xmt_abort_rsp
);
3048 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3049 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3050 "WCQE: %08x %08x %08x %08x\n",
3051 ctxp
->oxid
, ctxp
->flag
, released
,
3052 wcqe
->word0
, wcqe
->total_data_placed
,
3053 result
, wcqe
->word3
);
3055 cmdwqe
->context2
= NULL
;
3056 cmdwqe
->context3
= NULL
;
3058 * if transport has released ctx, then can reuse it. Otherwise,
3059 * will be recycled by transport release call.
3062 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
3064 /* Since iaab/iaar are NOT set, there is no work left.
3065 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3066 * should have been called already.
3071 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3072 * @phba: Pointer to HBA context object.
3073 * @cmdwqe: Pointer to driver command WQE object.
3074 * @wcqe: Pointer to driver response CQE object.
3076 * The function is called from SLI ring event handler with no
3077 * lock held. This function is the completion handler for NVME ABTS for LS cmds
3078 * The function frees memory resources used for the NVME commands.
3081 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
3082 struct lpfc_wcqe_complete
*wcqe
)
3084 struct lpfc_nvmet_rcv_ctx
*ctxp
;
3085 struct lpfc_nvmet_tgtport
*tgtp
;
3088 ctxp
= cmdwqe
->context2
;
3089 result
= wcqe
->parameter
;
3091 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3092 atomic_inc(&tgtp
->xmt_ls_abort_cmpl
);
3094 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3095 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3096 ctxp
, wcqe
->word0
, wcqe
->total_data_placed
,
3097 result
, wcqe
->word3
);
3100 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3101 "6415 NVMET LS Abort No ctx: WCQE: "
3102 "%08x %08x %08x %08x\n",
3103 wcqe
->word0
, wcqe
->total_data_placed
,
3104 result
, wcqe
->word3
);
3106 lpfc_sli_release_iocbq(phba
, cmdwqe
);
3110 if (ctxp
->state
!= LPFC_NVMET_STE_LS_ABORT
) {
3111 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
3112 "6416 NVMET LS abort cmpl state mismatch: "
3113 "oxid x%x: %d %d\n",
3114 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
3117 cmdwqe
->context2
= NULL
;
3118 cmdwqe
->context3
= NULL
;
3119 lpfc_sli_release_iocbq(phba
, cmdwqe
);
3124 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba
*phba
,
3125 struct lpfc_nvmet_rcv_ctx
*ctxp
,
3126 uint32_t sid
, uint16_t xri
)
3128 struct lpfc_nvmet_tgtport
*tgtp
;
3129 struct lpfc_iocbq
*abts_wqeq
;
3130 union lpfc_wqe128
*wqe_abts
;
3131 struct lpfc_nodelist
*ndlp
;
3133 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3134 "6067 ABTS: sid %x xri x%x/x%x\n",
3135 sid
, xri
, ctxp
->wqeq
->sli4_xritag
);
3137 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3139 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
3140 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
3141 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
3142 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
3143 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3144 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3145 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3146 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
3148 /* No failure to an ABTS request. */
3152 abts_wqeq
= ctxp
->wqeq
;
3153 wqe_abts
= &abts_wqeq
->wqe
;
3156 * Since we zero the whole WQE, we need to ensure we set the WQE fields
3157 * that were initialized in lpfc_sli4_nvmet_alloc.
3159 memset(wqe_abts
, 0, sizeof(union lpfc_wqe
));
3162 bf_set(wqe_dfctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
3163 bf_set(wqe_ls
, &wqe_abts
->xmit_sequence
.wge_ctl
, 1);
3164 bf_set(wqe_la
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
3165 bf_set(wqe_rctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_RCTL_BA_ABTS
);
3166 bf_set(wqe_type
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_TYPE_BLS
);
3169 bf_set(wqe_ctxt_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
3170 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
3171 bf_set(wqe_xri_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
3172 abts_wqeq
->sli4_xritag
);
3175 bf_set(wqe_cmnd
, &wqe_abts
->xmit_sequence
.wqe_com
,
3176 CMD_XMIT_SEQUENCE64_WQE
);
3177 bf_set(wqe_ct
, &wqe_abts
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
3178 bf_set(wqe_class
, &wqe_abts
->xmit_sequence
.wqe_com
, CLASS3
);
3179 bf_set(wqe_pu
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
3182 wqe_abts
->xmit_sequence
.wqe_com
.abort_tag
= abts_wqeq
->iotag
;
3185 bf_set(wqe_reqtag
, &wqe_abts
->xmit_sequence
.wqe_com
, abts_wqeq
->iotag
);
3186 /* Needs to be set by caller */
3187 bf_set(wqe_rcvoxid
, &wqe_abts
->xmit_sequence
.wqe_com
, xri
);
3190 bf_set(wqe_dbde
, &wqe_abts
->xmit_sequence
.wqe_com
, 1);
3191 bf_set(wqe_iod
, &wqe_abts
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
3192 bf_set(wqe_lenloc
, &wqe_abts
->xmit_sequence
.wqe_com
,
3193 LPFC_WQE_LENLOC_WORD12
);
3194 bf_set(wqe_ebde_cnt
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
3195 bf_set(wqe_qosd
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
3198 bf_set(wqe_cqid
, &wqe_abts
->xmit_sequence
.wqe_com
,
3199 LPFC_WQE_CQ_ID_DEFAULT
);
3200 bf_set(wqe_cmd_type
, &wqe_abts
->xmit_sequence
.wqe_com
,
3203 abts_wqeq
->vport
= phba
->pport
;
3204 abts_wqeq
->context1
= ndlp
;
3205 abts_wqeq
->context2
= ctxp
;
3206 abts_wqeq
->context3
= NULL
;
3207 abts_wqeq
->rsvd2
= 0;
3208 /* hba_wqidx should already be setup from command we are aborting */
3209 abts_wqeq
->iocb
.ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
3210 abts_wqeq
->iocb
.ulpLe
= 1;
3212 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3213 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3214 xri
, abts_wqeq
->iotag
);
3219 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*phba
,
3220 struct lpfc_nvmet_rcv_ctx
*ctxp
,
3221 uint32_t sid
, uint16_t xri
)
3223 struct lpfc_nvmet_tgtport
*tgtp
;
3224 struct lpfc_iocbq
*abts_wqeq
;
3225 struct lpfc_nodelist
*ndlp
;
3226 unsigned long flags
;
3230 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3232 ctxp
->wqeq
= ctxp
->ctxbuf
->iocbq
;
3233 ctxp
->wqeq
->hba_wqidx
= 0;
3236 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
3237 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
3238 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
3239 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
3240 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3241 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3242 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3243 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
3245 /* No failure to an ABTS request. */
3246 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3247 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3248 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3252 /* Issue ABTS for this WQE based on iotag */
3253 ctxp
->abort_wqeq
= lpfc_sli_get_iocbq(phba
);
3254 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3255 if (!ctxp
->abort_wqeq
) {
3256 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3257 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3258 "6161 ABORT failed: No wqeqs: "
3259 "xri: x%x\n", ctxp
->oxid
);
3260 /* No failure to an ABTS request. */
3261 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3262 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3265 abts_wqeq
= ctxp
->abort_wqeq
;
3266 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
3267 opt
= (ctxp
->flag
& LPFC_NVMET_ABTS_RCV
) ? INHIBIT_ABORT
: 0;
3268 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3270 /* Announce entry to new IO submit field. */
3271 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3272 "6162 ABORT Request to rport DID x%06x "
3273 "for xri x%x x%x\n",
3274 ctxp
->sid
, ctxp
->oxid
, ctxp
->wqeq
->sli4_xritag
);
3276 /* If the hba is getting reset, this flag is set. It is
3277 * cleared when the reset is complete and rings reestablished.
3279 spin_lock_irqsave(&phba
->hbalock
, flags
);
3280 /* driver queued commands are in process of being flushed */
3281 if (phba
->hba_flag
& HBA_IOQ_FLUSH
) {
3282 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3283 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3284 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
3285 "6163 Driver in reset cleanup - flushing "
3286 "NVME Req now. hba_flag x%x oxid x%x\n",
3287 phba
->hba_flag
, ctxp
->oxid
);
3288 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3289 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3290 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3291 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3295 /* Outstanding abort is in progress */
3296 if (abts_wqeq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
3297 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3298 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3299 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
3300 "6164 Outstanding NVME I/O Abort Request "
3301 "still pending on oxid x%x\n",
3303 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3304 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3305 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3306 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3310 /* Ready - mark outstanding as aborted by driver. */
3311 abts_wqeq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
3313 lpfc_nvme_prep_abort_wqe(abts_wqeq
, ctxp
->wqeq
->sli4_xritag
, opt
);
3315 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3316 abts_wqeq
->hba_wqidx
= ctxp
->wqeq
->hba_wqidx
;
3317 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_sol_fcp_abort_cmp
;
3318 abts_wqeq
->iocb_cmpl
= NULL
;
3319 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME
;
3320 abts_wqeq
->context2
= ctxp
;
3321 abts_wqeq
->vport
= phba
->pport
;
3323 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[abts_wqeq
->hba_wqidx
];
3325 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3326 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3327 if (rc
== WQE_SUCCESS
) {
3328 atomic_inc(&tgtp
->xmt_abort_sol
);
3332 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3333 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3334 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3335 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3336 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3337 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3338 "6166 Failed ABORT issue_wqe with status x%x "
3345 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*phba
,
3346 struct lpfc_nvmet_rcv_ctx
*ctxp
,
3347 uint32_t sid
, uint16_t xri
)
3349 struct lpfc_nvmet_tgtport
*tgtp
;
3350 struct lpfc_iocbq
*abts_wqeq
;
3351 unsigned long flags
;
3352 bool released
= false;
3355 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3357 ctxp
->wqeq
= ctxp
->ctxbuf
->iocbq
;
3358 ctxp
->wqeq
->hba_wqidx
= 0;
3361 if (ctxp
->state
== LPFC_NVMET_STE_FREE
) {
3362 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
3363 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3364 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
3368 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
3370 rc
= lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
);
3374 spin_lock_irqsave(&phba
->hbalock
, flags
);
3375 abts_wqeq
= ctxp
->wqeq
;
3376 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_unsol_fcp_abort_cmp
;
3377 abts_wqeq
->iocb_cmpl
= NULL
;
3378 abts_wqeq
->iocb_flag
|= LPFC_IO_NVMET
;
3380 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[abts_wqeq
->hba_wqidx
];
3382 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3383 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3384 if (rc
== WQE_SUCCESS
) {
3389 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3390 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
) {
3391 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3392 list_del_init(&ctxp
->list
);
3393 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3396 ctxp
->flag
&= ~(LPFC_NVMET_ABORT_OP
| LPFC_NVMET_CTX_RLS
);
3397 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3399 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3400 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3401 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3403 ctxp
->oxid
, rc
, released
);
3405 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
3410 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba
*phba
,
3411 struct lpfc_nvmet_rcv_ctx
*ctxp
,
3412 uint32_t sid
, uint16_t xri
)
3414 struct lpfc_nvmet_tgtport
*tgtp
;
3415 struct lpfc_iocbq
*abts_wqeq
;
3416 unsigned long flags
;
3419 if ((ctxp
->state
== LPFC_NVMET_STE_LS_RCV
&& ctxp
->entry_cnt
== 1) ||
3420 (ctxp
->state
== LPFC_NVMET_STE_LS_RSP
&& ctxp
->entry_cnt
== 2)) {
3421 ctxp
->state
= LPFC_NVMET_STE_LS_ABORT
;
3424 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
3425 "6418 NVMET LS abort state mismatch "
3427 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
3428 ctxp
->state
= LPFC_NVMET_STE_LS_ABORT
;
3431 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3433 /* Issue ABTS for this WQE based on iotag */
3434 ctxp
->wqeq
= lpfc_sli_get_iocbq(phba
);
3436 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3437 "6068 Abort failed: No wqeqs: "
3439 /* No failure to an ABTS request. */
3444 abts_wqeq
= ctxp
->wqeq
;
3446 if (lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
) == 0) {
3451 spin_lock_irqsave(&phba
->hbalock
, flags
);
3452 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_abort_cmp
;
3453 abts_wqeq
->iocb_cmpl
= NULL
;
3454 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME_LS
;
3455 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3456 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3457 if (rc
== WQE_SUCCESS
) {
3458 atomic_inc(&tgtp
->xmt_abort_unsol
);
3462 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3463 abts_wqeq
->context2
= NULL
;
3464 abts_wqeq
->context3
= NULL
;
3465 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3467 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3468 "6056 Failed to Issue ABTS. Status x%x\n", rc
);