1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2017 QLogic Corporation
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-mq.h>
14 static struct nvme_fc_port_template qla_nvme_fc_transport
;
15 static int qla_nvme_ls_reject_iocb(struct scsi_qla_host
*vha
,
17 struct qla_nvme_lsrjt_pt_arg
*a
,
18 bool is_xchg_terminate
);
20 struct qla_nvme_unsol_ctx
{
21 struct list_head elem
;
22 struct scsi_qla_host
*vha
;
23 struct fc_port
*fcport
;
25 struct nvmefc_ls_rsp lsrsp
;
26 struct nvmefc_ls_rsp
*fd_rsp
;
27 struct work_struct lsrsp_work
;
28 struct work_struct abort_work
;
29 __le32 exchange_address
;
36 int qla_nvme_register_remote(struct scsi_qla_host
*vha
, struct fc_port
*fcport
)
38 struct qla_nvme_rport
*rport
;
39 struct nvme_fc_port_info req
;
42 if (!IS_ENABLED(CONFIG_NVME_FC
))
45 if (!vha
->flags
.nvme_enabled
) {
46 ql_log(ql_log_info
, vha
, 0x2100,
47 "%s: Not registering target since Host NVME is not enabled\n",
52 if (qla_nvme_register_hba(vha
))
55 if (!vha
->nvme_local_port
)
58 if (!(fcport
->nvme_prli_service_param
&
59 (NVME_PRLI_SP_TARGET
| NVME_PRLI_SP_DISCOVERY
)) ||
60 (fcport
->nvme_flag
& NVME_FLAG_REGISTERED
))
63 fcport
->nvme_flag
&= ~NVME_FLAG_RESETTING
;
65 memset(&req
, 0, sizeof(struct nvme_fc_port_info
));
66 req
.port_name
= wwn_to_u64(fcport
->port_name
);
67 req
.node_name
= wwn_to_u64(fcport
->node_name
);
69 req
.dev_loss_tmo
= fcport
->dev_loss_tmo
;
71 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_INITIATOR
)
72 req
.port_role
= FC_PORT_ROLE_NVME_INITIATOR
;
74 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_TARGET
)
75 req
.port_role
|= FC_PORT_ROLE_NVME_TARGET
;
77 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_DISCOVERY
)
78 req
.port_role
|= FC_PORT_ROLE_NVME_DISCOVERY
;
80 req
.port_id
= fcport
->d_id
.b24
;
82 ql_log(ql_log_info
, vha
, 0x2102,
83 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
84 __func__
, req
.node_name
, req
.port_name
,
87 ret
= nvme_fc_register_remoteport(vha
->nvme_local_port
, &req
,
88 &fcport
->nvme_remote_port
);
90 ql_log(ql_log_warn
, vha
, 0x212e,
91 "Failed to register remote port. Transport returned %d\n",
96 nvme_fc_set_remoteport_devloss(fcport
->nvme_remote_port
,
97 fcport
->dev_loss_tmo
);
99 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_SLER
)
100 ql_log(ql_log_info
, vha
, 0x212a,
101 "PortID:%06x Supports SLER\n", req
.port_id
);
103 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_PI_CTRL
)
104 ql_log(ql_log_info
, vha
, 0x212b,
105 "PortID:%06x Supports PI control\n", req
.port_id
);
107 rport
= fcport
->nvme_remote_port
->private;
108 rport
->fcport
= fcport
;
110 fcport
->nvme_flag
|= NVME_FLAG_REGISTERED
;
114 /* Allocate a queue for NVMe traffic */
115 static int qla_nvme_alloc_queue(struct nvme_fc_local_port
*lport
,
116 unsigned int qidx
, u16 qsize
, void **handle
)
118 struct scsi_qla_host
*vha
;
119 struct qla_hw_data
*ha
;
120 struct qla_qpair
*qpair
;
122 /* Map admin queue and 1st IO queue to index 0 */
126 vha
= (struct scsi_qla_host
*)lport
->private;
129 ql_log(ql_log_info
, vha
, 0x2104,
130 "%s: handle %p, idx =%d, qsize %d\n",
131 __func__
, handle
, qidx
, qsize
);
133 if (qidx
> qla_nvme_fc_transport
.max_hw_queues
) {
134 ql_log(ql_log_warn
, vha
, 0x212f,
135 "%s: Illegal qidx=%d. Max=%d\n",
136 __func__
, qidx
, qla_nvme_fc_transport
.max_hw_queues
);
140 /* Use base qpair if max_qpairs is 0 */
141 if (!ha
->max_qpairs
) {
142 qpair
= ha
->base_qpair
;
144 if (ha
->queue_pair_map
[qidx
]) {
145 *handle
= ha
->queue_pair_map
[qidx
];
146 ql_log(ql_log_info
, vha
, 0x2121,
147 "Returning existing qpair of %p for idx=%x\n",
152 qpair
= qla2xxx_create_qpair(vha
, 5, vha
->vp_idx
, true);
154 ql_log(ql_log_warn
, vha
, 0x2122,
155 "Failed to allocate qpair\n");
158 qla_adjust_iocb_limit(vha
);
165 static void qla_nvme_release_fcp_cmd_kref(struct kref
*kref
)
167 struct srb
*sp
= container_of(kref
, struct srb
, cmd_kref
);
168 struct nvme_private
*priv
= (struct nvme_private
*)sp
->priv
;
169 struct nvmefc_fcp_req
*fd
;
170 struct srb_iocb
*nvme
;
176 nvme
= &sp
->u
.iocb_cmd
;
177 fd
= nvme
->u
.nvme
.desc
;
179 spin_lock_irqsave(&priv
->cmd_lock
, flags
);
182 if (priv
->comp_status
== QLA_SUCCESS
) {
183 fd
->rcv_rsplen
= le16_to_cpu(nvme
->u
.nvme
.rsp_pyld_len
);
184 fd
->status
= NVME_SC_SUCCESS
;
187 fd
->transferred_length
= 0;
188 fd
->status
= NVME_SC_INTERNAL
;
190 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
194 qla2xxx_rel_qpair_sp(sp
->qpair
, sp
);
197 static void qla_nvme_release_ls_cmd_kref(struct kref
*kref
)
199 struct srb
*sp
= container_of(kref
, struct srb
, cmd_kref
);
200 struct nvme_private
*priv
= (struct nvme_private
*)sp
->priv
;
201 struct nvmefc_ls_req
*fd
;
207 spin_lock_irqsave(&priv
->cmd_lock
, flags
);
210 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
214 fd
->done(fd
, priv
->comp_status
);
219 static void qla_nvme_ls_complete(struct work_struct
*work
)
221 struct nvme_private
*priv
=
222 container_of(work
, struct nvme_private
, ls_work
);
224 kref_put(&priv
->sp
->cmd_kref
, qla_nvme_release_ls_cmd_kref
);
227 static void qla_nvme_sp_ls_done(srb_t
*sp
, int res
)
229 struct nvme_private
*priv
= sp
->priv
;
231 if (WARN_ON_ONCE(kref_read(&sp
->cmd_kref
) == 0))
237 priv
->comp_status
= res
;
238 INIT_WORK(&priv
->ls_work
, qla_nvme_ls_complete
);
239 schedule_work(&priv
->ls_work
);
242 static void qla_nvme_release_lsrsp_cmd_kref(struct kref
*kref
)
244 struct srb
*sp
= container_of(kref
, struct srb
, cmd_kref
);
245 struct qla_nvme_unsol_ctx
*uctx
= sp
->priv
;
246 struct nvmefc_ls_rsp
*fd_rsp
;
254 spin_lock_irqsave(&uctx
->cmd_lock
, flags
);
257 spin_unlock_irqrestore(&uctx
->cmd_lock
, flags
);
259 fd_rsp
= uctx
->fd_rsp
;
261 list_del(&uctx
->elem
);
263 fd_rsp
->done(fd_rsp
);
268 static void qla_nvme_lsrsp_complete(struct work_struct
*work
)
270 struct qla_nvme_unsol_ctx
*uctx
=
271 container_of(work
, struct qla_nvme_unsol_ctx
, lsrsp_work
);
273 kref_put(&uctx
->sp
->cmd_kref
, qla_nvme_release_lsrsp_cmd_kref
);
276 static void qla_nvme_sp_lsrsp_done(srb_t
*sp
, int res
)
278 struct qla_nvme_unsol_ctx
*uctx
= sp
->priv
;
280 if (WARN_ON_ONCE(kref_read(&sp
->cmd_kref
) == 0))
286 uctx
->comp_status
= res
;
287 INIT_WORK(&uctx
->lsrsp_work
, qla_nvme_lsrsp_complete
);
288 schedule_work(&uctx
->lsrsp_work
);
291 /* it assumed that QPair lock is held. */
292 static void qla_nvme_sp_done(srb_t
*sp
, int res
)
294 struct nvme_private
*priv
= sp
->priv
;
296 priv
->comp_status
= res
;
297 kref_put(&sp
->cmd_kref
, qla_nvme_release_fcp_cmd_kref
);
302 static void qla_nvme_abort_work(struct work_struct
*work
)
304 struct nvme_private
*priv
=
305 container_of(work
, struct nvme_private
, abort_work
);
306 srb_t
*sp
= priv
->sp
;
307 fc_port_t
*fcport
= sp
->fcport
;
308 struct qla_hw_data
*ha
= fcport
->vha
->hw
;
309 int rval
, abts_done_called
= 1;
310 bool io_wait_for_abort_done
;
313 ql_dbg(ql_dbg_io
, fcport
->vha
, 0xffff,
314 "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
315 __func__
, sp
, sp
->handle
, fcport
, sp
->u
.iocb_cmd
.u
.nvme
.desc
, fcport
->deleted
);
317 if (!ha
->flags
.fw_started
|| fcport
->deleted
== QLA_SESS_DELETED
)
320 if (ha
->flags
.host_shutting_down
) {
321 ql_log(ql_log_info
, sp
->fcport
->vha
, 0xffff,
322 "%s Calling done on sp: %p, type: 0x%x\n",
323 __func__
, sp
, sp
->type
);
329 * sp may not be valid after abort_command if return code is either
330 * SUCCESS or ERR_FROM_FW codes, so cache the value here.
332 io_wait_for_abort_done
= ql2xabts_wait_nvme
&&
333 QLA_ABTS_WAIT_ENABLED(sp
);
336 rval
= ha
->isp_ops
->abort_command(sp
);
338 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x212b,
339 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
340 __func__
, (rval
!= QLA_SUCCESS
) ? "Failed to abort" : "Aborted",
341 sp
, handle
, fcport
, rval
);
344 * If async tmf is enabled, the abort callback is called only on
345 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
347 if (ql2xasynctmfenable
&&
348 rval
!= QLA_SUCCESS
&& rval
!= QLA_ERR_FROM_FW
)
349 abts_done_called
= 0;
352 * Returned before decreasing kref so that I/O requests
353 * are waited until ABTS complete. This kref is decreased
354 * at qla24xx_abort_sp_done function.
356 if (abts_done_called
&& io_wait_for_abort_done
)
359 /* kref_get was done before work was schedule. */
360 kref_put(&sp
->cmd_kref
, sp
->put_fn
);
363 static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port
*lport
,
364 struct nvme_fc_remote_port
*rport
,
365 struct nvmefc_ls_rsp
*fd_resp
)
367 struct qla_nvme_unsol_ctx
*uctx
= container_of(fd_resp
,
368 struct qla_nvme_unsol_ctx
, lsrsp
);
369 struct qla_nvme_rport
*qla_rport
= rport
->private;
370 fc_port_t
*fcport
= qla_rport
->fcport
;
371 struct scsi_qla_host
*vha
= uctx
->vha
;
372 struct qla_hw_data
*ha
= vha
->hw
;
373 struct qla_nvme_lsrjt_pt_arg a
;
374 struct srb_iocb
*nvme
;
376 int rval
= QLA_FUNCTION_FAILED
;
379 if (!fcport
|| fcport
->deleted
)
382 if (!ha
->flags
.fw_started
)
385 /* Alloc SRB structure */
386 sp
= qla2x00_get_sp(vha
, fcport
, GFP_ATOMIC
);
390 sp
->type
= SRB_NVME_LS
;
391 sp
->name
= "nvme_ls";
392 sp
->done
= qla_nvme_sp_lsrsp_done
;
393 sp
->put_fn
= qla_nvme_release_lsrsp_cmd_kref
;
394 sp
->priv
= (void *)uctx
;
397 spin_lock_init(&uctx
->cmd_lock
);
398 nvme
= &sp
->u
.iocb_cmd
;
399 uctx
->fd_rsp
= fd_resp
;
400 nvme
->u
.nvme
.desc
= fd_resp
;
401 nvme
->u
.nvme
.dir
= 0;
403 nvme
->u
.nvme
.timeout_sec
= 0;
404 nvme
->u
.nvme
.cmd_dma
= fd_resp
->rspdma
;
405 nvme
->u
.nvme
.cmd_len
= cpu_to_le32(fd_resp
->rsplen
);
406 nvme
->u
.nvme
.rsp_len
= 0;
407 nvme
->u
.nvme
.rsp_dma
= 0;
408 nvme
->u
.nvme
.exchange_address
= uctx
->exchange_address
;
409 nvme
->u
.nvme
.nport_handle
= uctx
->nport_handle
;
410 nvme
->u
.nvme
.ox_id
= uctx
->ox_id
;
411 dma_sync_single_for_device(&ha
->pdev
->dev
, nvme
->u
.nvme
.cmd_dma
,
412 fd_resp
->rsplen
, DMA_TO_DEVICE
);
414 ql_dbg(ql_dbg_unsol
, vha
, 0x2122,
415 "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
416 fcport
->d_id
.b24
, fcport
->port_name
, uctx
->exchange_address
,
417 uctx
->ox_id
, uctx
->nport_handle
);
419 rval
= qla2x00_start_sp(sp
);
424 msleep(PURLS_MSLEEP_INTERVAL
);
426 if (cnt
< PURLS_RETRY_COUNT
)
431 ql_dbg(ql_log_warn
, vha
, 0x2123,
432 "Failed to xmit Unsol ls response = %d\n", rval
);
440 memset((void *)&a
, 0, sizeof(a
));
441 a
.vp_idx
= vha
->vp_idx
;
442 a
.nport_handle
= uctx
->nport_handle
;
443 a
.xchg_address
= uctx
->exchange_address
;
444 qla_nvme_ls_reject_iocb(vha
, ha
->base_qpair
, &a
, true);
449 static void qla_nvme_ls_abort(struct nvme_fc_local_port
*lport
,
450 struct nvme_fc_remote_port
*rport
, struct nvmefc_ls_req
*fd
)
452 struct nvme_private
*priv
= fd
->private;
455 spin_lock_irqsave(&priv
->cmd_lock
, flags
);
457 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
461 if (!kref_get_unless_zero(&priv
->sp
->cmd_kref
)) {
462 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
465 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
467 INIT_WORK(&priv
->abort_work
, qla_nvme_abort_work
);
468 schedule_work(&priv
->abort_work
);
471 static int qla_nvme_ls_req(struct nvme_fc_local_port
*lport
,
472 struct nvme_fc_remote_port
*rport
, struct nvmefc_ls_req
*fd
)
474 struct qla_nvme_rport
*qla_rport
= rport
->private;
475 fc_port_t
*fcport
= qla_rport
->fcport
;
476 struct srb_iocb
*nvme
;
477 struct nvme_private
*priv
= fd
->private;
478 struct scsi_qla_host
*vha
;
479 int rval
= QLA_FUNCTION_FAILED
;
480 struct qla_hw_data
*ha
;
483 if (!fcport
|| fcport
->deleted
)
489 if (!ha
->flags
.fw_started
)
492 /* Alloc SRB structure */
493 sp
= qla2x00_get_sp(vha
, fcport
, GFP_ATOMIC
);
497 sp
->type
= SRB_NVME_LS
;
498 sp
->name
= "nvme_ls";
499 sp
->done
= qla_nvme_sp_ls_done
;
500 sp
->put_fn
= qla_nvme_release_ls_cmd_kref
;
503 kref_init(&sp
->cmd_kref
);
504 spin_lock_init(&priv
->cmd_lock
);
505 nvme
= &sp
->u
.iocb_cmd
;
507 nvme
->u
.nvme
.desc
= fd
;
508 nvme
->u
.nvme
.dir
= 0;
510 nvme
->u
.nvme
.cmd_len
= cpu_to_le32(fd
->rqstlen
);
511 nvme
->u
.nvme
.rsp_len
= cpu_to_le32(fd
->rsplen
);
512 nvme
->u
.nvme
.rsp_dma
= fd
->rspdma
;
513 nvme
->u
.nvme
.timeout_sec
= fd
->timeout
;
514 nvme
->u
.nvme
.cmd_dma
= fd
->rqstdma
;
515 dma_sync_single_for_device(&ha
->pdev
->dev
, nvme
->u
.nvme
.cmd_dma
,
516 fd
->rqstlen
, DMA_TO_DEVICE
);
518 rval
= qla2x00_start_sp(sp
);
519 if (rval
!= QLA_SUCCESS
) {
520 ql_log(ql_log_warn
, vha
, 0x700e,
521 "qla2x00_start_sp failed = %d\n", rval
);
531 static void qla_nvme_fcp_abort(struct nvme_fc_local_port
*lport
,
532 struct nvme_fc_remote_port
*rport
, void *hw_queue_handle
,
533 struct nvmefc_fcp_req
*fd
)
535 struct nvme_private
*priv
= fd
->private;
538 spin_lock_irqsave(&priv
->cmd_lock
, flags
);
540 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
543 if (!kref_get_unless_zero(&priv
->sp
->cmd_kref
)) {
544 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
547 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
549 INIT_WORK(&priv
->abort_work
, qla_nvme_abort_work
);
550 schedule_work(&priv
->abort_work
);
553 static inline int qla2x00_start_nvme_mq(srb_t
*sp
)
558 struct cmd_nvme
*cmd_pkt
;
563 struct dsd64
*cur_dsd
;
564 struct req_que
*req
= NULL
;
565 struct rsp_que
*rsp
= NULL
;
566 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
567 struct qla_hw_data
*ha
= vha
->hw
;
568 struct qla_qpair
*qpair
= sp
->qpair
;
569 struct srb_iocb
*nvme
= &sp
->u
.iocb_cmd
;
570 struct scatterlist
*sgl
, *sg
;
571 struct nvmefc_fcp_req
*fd
= nvme
->u
.nvme
.desc
;
572 struct nvme_fc_cmd_iu
*cmd
= fd
->cmdaddr
;
573 uint32_t rval
= QLA_SUCCESS
;
575 /* Setup qpair pointers */
578 tot_dsds
= fd
->sg_cnt
;
580 /* Acquire qpair specific lock */
581 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
583 handle
= qla2xxx_get_next_handle(req
);
588 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
590 sp
->iores
.res_type
= RESOURCE_IOCB
| RESOURCE_EXCH
;
591 sp
->iores
.exch_cnt
= 1;
592 sp
->iores
.iocb_cnt
= req_cnt
;
593 if (qla_get_fw_resources(sp
->qpair
, &sp
->iores
)) {
598 if (req
->cnt
< (req_cnt
+ 2)) {
599 if (IS_SHADOW_REG_CAPABLE(ha
)) {
602 cnt
= rd_reg_dword_relaxed(req
->req_q_out
);
603 if (qla2x00_check_reg16_for_disconnect(vha
, cnt
)) {
609 if (req
->ring_index
< cnt
)
610 req
->cnt
= cnt
- req
->ring_index
;
612 req
->cnt
= req
->length
- (req
->ring_index
- cnt
);
614 if (req
->cnt
< (req_cnt
+ 2)){
620 if (unlikely(!fd
->sqid
)) {
621 if (cmd
->sqe
.common
.opcode
== nvme_admin_async_event
) {
622 nvme
->u
.nvme
.aen_op
= 1;
623 atomic_inc(&ha
->nvme_active_aen_cnt
);
627 /* Build command packet. */
628 req
->current_outstanding_cmd
= handle
;
629 req
->outstanding_cmds
[handle
] = sp
;
633 cmd_pkt
= (struct cmd_nvme
*)req
->ring_ptr
;
634 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
636 /* Zero out remaining portion of packet. */
637 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
638 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
640 cmd_pkt
->entry_status
= 0;
642 /* Update entry type to indicate Command NVME IOCB */
643 cmd_pkt
->entry_type
= COMMAND_NVME
;
645 /* No data transfer how do we check buffer len == 0?? */
646 if (fd
->io_dir
== NVMEFC_FCP_READ
) {
647 cmd_pkt
->control_flags
= cpu_to_le16(CF_READ_DATA
);
648 qpair
->counters
.input_bytes
+= fd
->payload_length
;
649 qpair
->counters
.input_requests
++;
650 } else if (fd
->io_dir
== NVMEFC_FCP_WRITE
) {
651 cmd_pkt
->control_flags
= cpu_to_le16(CF_WRITE_DATA
);
652 if ((vha
->flags
.nvme_first_burst
) &&
653 (sp
->fcport
->nvme_prli_service_param
&
654 NVME_PRLI_SP_FIRST_BURST
)) {
655 if ((fd
->payload_length
<=
656 sp
->fcport
->nvme_first_burst_size
) ||
657 (sp
->fcport
->nvme_first_burst_size
== 0))
658 cmd_pkt
->control_flags
|=
659 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE
);
661 qpair
->counters
.output_bytes
+= fd
->payload_length
;
662 qpair
->counters
.output_requests
++;
663 } else if (fd
->io_dir
== 0) {
664 cmd_pkt
->control_flags
= 0;
667 if (sp
->fcport
->edif
.enable
&& fd
->io_dir
!= 0)
668 cmd_pkt
->control_flags
|= cpu_to_le16(CF_EN_EDIF
);
670 /* Set BIT_13 of control flags for Async event */
671 if (vha
->flags
.nvme2_enabled
&&
672 cmd
->sqe
.common
.opcode
== nvme_admin_async_event
) {
673 cmd_pkt
->control_flags
|= cpu_to_le16(CF_ADMIN_ASYNC_EVENT
);
677 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
678 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
679 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
680 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
681 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
684 cmd_pkt
->nvme_rsp_dsd_len
= cpu_to_le16(fd
->rsplen
);
685 put_unaligned_le64(fd
->rspdma
, &cmd_pkt
->nvme_rsp_dseg_address
);
688 cmd_pkt
->nvme_cmnd_dseg_len
= cpu_to_le16(fd
->cmdlen
);
689 cmd_pkt
->nvme_cmnd_dseg_address
= cpu_to_le64(fd
->cmddma
);
691 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
692 cmd_pkt
->byte_count
= cpu_to_le32(fd
->payload_length
);
694 /* One DSD is available in the Command Type NVME IOCB */
696 cur_dsd
= &cmd_pkt
->nvme_dsd
;
699 /* Load data segments */
700 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
701 cont_a64_entry_t
*cont_pkt
;
703 /* Allocate additional continuation packets? */
704 if (avail_dsds
== 0) {
706 * Five DSDs are available in the Continuation
710 /* Adjust ring index */
712 if (req
->ring_index
== req
->length
) {
714 req
->ring_ptr
= req
->ring
;
718 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
719 put_unaligned_le32(CONTINUE_A64_TYPE
,
720 &cont_pkt
->entry_type
);
722 cur_dsd
= cont_pkt
->dsd
;
723 avail_dsds
= ARRAY_SIZE(cont_pkt
->dsd
);
726 append_dsd64(&cur_dsd
, sg
);
730 /* Set total entry count. */
731 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
734 /* Adjust ring index. */
736 if (req
->ring_index
== req
->length
) {
738 req
->ring_ptr
= req
->ring
;
743 /* ignore nvme async cmd due to long timeout */
744 if (!nvme
->u
.nvme
.aen_op
)
745 sp
->qpair
->cmd_cnt
++;
747 /* Set chip new ring index. */
748 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
750 if (vha
->flags
.process_response_queue
&&
751 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
752 qla24xx_process_response_queue(vha
, rsp
);
756 qla_put_fw_resources(sp
->qpair
, &sp
->iores
);
757 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
763 static int qla_nvme_post_cmd(struct nvme_fc_local_port
*lport
,
764 struct nvme_fc_remote_port
*rport
, void *hw_queue_handle
,
765 struct nvmefc_fcp_req
*fd
)
768 struct srb_iocb
*nvme
;
769 struct scsi_qla_host
*vha
;
770 struct qla_hw_data
*ha
;
773 struct qla_qpair
*qpair
= hw_queue_handle
;
774 struct nvme_private
*priv
= fd
->private;
775 struct qla_nvme_rport
*qla_rport
= rport
->private;
778 /* nvme association has been torn down */
782 fcport
= qla_rport
->fcport
;
784 if (unlikely(!qpair
|| !fcport
|| fcport
->deleted
))
787 if (!(fcport
->nvme_flag
& NVME_FLAG_REGISTERED
))
793 if (test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
))
797 * If we know the dev is going away while the transport is still sending
798 * IO's return busy back to stall the IO Q. This happens when the
799 * link goes away and fw hasn't notified us yet, but IO's are being
800 * returned. If the dev comes back quickly we won't exhaust the IO
801 * retry count at the core.
803 if (fcport
->nvme_flag
& NVME_FLAG_RESETTING
)
806 qpair
= qla_mapq_nvme_select_qpair(ha
, qpair
);
808 /* Alloc SRB structure */
809 sp
= qla2xxx_get_qpair_sp(vha
, qpair
, fcport
, GFP_ATOMIC
);
813 kref_init(&sp
->cmd_kref
);
814 spin_lock_init(&priv
->cmd_lock
);
817 sp
->type
= SRB_NVME_CMD
;
818 sp
->name
= "nvme_cmd";
819 sp
->done
= qla_nvme_sp_done
;
820 sp
->put_fn
= qla_nvme_release_fcp_cmd_kref
;
824 nvme
= &sp
->u
.iocb_cmd
;
825 nvme
->u
.nvme
.desc
= fd
;
827 rval
= qla2x00_start_nvme_mq(sp
);
828 if (rval
!= QLA_SUCCESS
) {
829 ql_dbg(ql_dbg_io
+ ql_dbg_verbose
, vha
, 0x212d,
830 "qla2x00_start_nvme_mq failed = %d\n", rval
);
833 qla2xxx_rel_qpair_sp(sp
->qpair
, sp
);
839 static void qla_nvme_map_queues(struct nvme_fc_local_port
*lport
,
840 struct blk_mq_queue_map
*map
)
842 struct scsi_qla_host
*vha
= lport
->private;
844 blk_mq_pci_map_queues(map
, vha
->hw
->pdev
, vha
->irq_offset
);
847 static void qla_nvme_localport_delete(struct nvme_fc_local_port
*lport
)
849 struct scsi_qla_host
*vha
= lport
->private;
851 ql_log(ql_log_info
, vha
, 0x210f,
852 "localport delete of %p completed.\n", vha
->nvme_local_port
);
853 vha
->nvme_local_port
= NULL
;
854 complete(&vha
->nvme_del_done
);
857 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port
*rport
)
860 struct qla_nvme_rport
*qla_rport
= rport
->private;
862 fcport
= qla_rport
->fcport
;
863 fcport
->nvme_remote_port
= NULL
;
864 fcport
->nvme_flag
&= ~NVME_FLAG_REGISTERED
;
865 fcport
->nvme_flag
&= ~NVME_FLAG_DELETING
;
866 ql_log(ql_log_info
, fcport
->vha
, 0x2110,
867 "remoteport_delete of %p %8phN completed.\n",
868 fcport
, fcport
->port_name
);
869 complete(&fcport
->nvme_del_done
);
872 static struct nvme_fc_port_template qla_nvme_fc_transport
= {
873 .localport_delete
= qla_nvme_localport_delete
,
874 .remoteport_delete
= qla_nvme_remoteport_delete
,
875 .create_queue
= qla_nvme_alloc_queue
,
876 .delete_queue
= NULL
,
877 .ls_req
= qla_nvme_ls_req
,
878 .ls_abort
= qla_nvme_ls_abort
,
879 .fcp_io
= qla_nvme_post_cmd
,
880 .fcp_abort
= qla_nvme_fcp_abort
,
881 .xmt_ls_rsp
= qla_nvme_xmt_ls_rsp
,
882 .map_queues
= qla_nvme_map_queues
,
883 .max_hw_queues
= DEF_NVME_HW_QUEUES
,
884 .max_sgl_segments
= 1024,
885 .max_dif_sgl_segments
= 64,
886 .dma_boundary
= 0xFFFFFFFF,
888 .remote_priv_sz
= sizeof(struct qla_nvme_rport
),
889 .lsrqst_priv_sz
= sizeof(struct nvme_private
),
890 .fcprqst_priv_sz
= sizeof(struct nvme_private
),
893 void qla_nvme_unregister_remote_port(struct fc_port
*fcport
)
897 if (!IS_ENABLED(CONFIG_NVME_FC
))
900 ql_log(ql_log_warn
, fcport
->vha
, 0x2112,
901 "%s: unregister remoteport on %p %8phN\n",
902 __func__
, fcport
, fcport
->port_name
);
904 if (test_bit(PFLG_DRIVER_REMOVING
, &fcport
->vha
->pci_flags
))
905 nvme_fc_set_remoteport_devloss(fcport
->nvme_remote_port
, 0);
907 init_completion(&fcport
->nvme_del_done
);
908 ret
= nvme_fc_unregister_remoteport(fcport
->nvme_remote_port
);
910 ql_log(ql_log_info
, fcport
->vha
, 0x2114,
911 "%s: Failed to unregister nvme_remote_port (%d)\n",
913 wait_for_completion(&fcport
->nvme_del_done
);
916 void qla_nvme_delete(struct scsi_qla_host
*vha
)
920 if (!IS_ENABLED(CONFIG_NVME_FC
))
923 if (vha
->nvme_local_port
) {
924 init_completion(&vha
->nvme_del_done
);
925 ql_log(ql_log_info
, vha
, 0x2116,
926 "unregister localport=%p\n",
927 vha
->nvme_local_port
);
928 nv_ret
= nvme_fc_unregister_localport(vha
->nvme_local_port
);
930 ql_log(ql_log_info
, vha
, 0x2115,
931 "Unregister of localport failed\n");
933 wait_for_completion(&vha
->nvme_del_done
);
937 int qla_nvme_register_hba(struct scsi_qla_host
*vha
)
939 struct nvme_fc_port_template
*tmpl
;
940 struct qla_hw_data
*ha
;
941 struct nvme_fc_port_info pinfo
;
944 if (!IS_ENABLED(CONFIG_NVME_FC
))
948 tmpl
= &qla_nvme_fc_transport
;
950 if (ql2xnvme_queues
< MIN_NVME_HW_QUEUES
) {
951 ql_log(ql_log_warn
, vha
, 0xfffd,
952 "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
953 ql2xnvme_queues
, MIN_NVME_HW_QUEUES
, DEF_NVME_HW_QUEUES
);
954 ql2xnvme_queues
= DEF_NVME_HW_QUEUES
;
955 } else if (ql2xnvme_queues
> (ha
->max_qpairs
- 1)) {
956 ql_log(ql_log_warn
, vha
, 0xfffd,
957 "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
958 ql2xnvme_queues
, (ha
->max_qpairs
- 1),
959 (ha
->max_qpairs
- 1));
960 ql2xnvme_queues
= ((ha
->max_qpairs
- 1));
963 qla_nvme_fc_transport
.max_hw_queues
=
964 min((uint8_t)(ql2xnvme_queues
),
965 (uint8_t)((ha
->max_qpairs
- 1) ? (ha
->max_qpairs
- 1) : 1));
967 ql_log(ql_log_info
, vha
, 0xfffb,
968 "Number of NVME queues used for this port: %d\n",
969 qla_nvme_fc_transport
.max_hw_queues
);
971 pinfo
.node_name
= wwn_to_u64(vha
->node_name
);
972 pinfo
.port_name
= wwn_to_u64(vha
->port_name
);
973 pinfo
.port_role
= FC_PORT_ROLE_NVME_INITIATOR
;
974 pinfo
.port_id
= vha
->d_id
.b24
;
976 mutex_lock(&ha
->vport_lock
);
978 * Check again for nvme_local_port to see if any other thread raced
979 * with this one and finished registration.
981 if (!vha
->nvme_local_port
) {
982 ql_log(ql_log_info
, vha
, 0xffff,
983 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
984 pinfo
.node_name
, pinfo
.port_name
, pinfo
.port_id
);
985 qla_nvme_fc_transport
.dma_boundary
= vha
->host
->dma_boundary
;
987 ret
= nvme_fc_register_localport(&pinfo
, tmpl
,
988 get_device(&ha
->pdev
->dev
),
989 &vha
->nvme_local_port
);
990 mutex_unlock(&ha
->vport_lock
);
992 mutex_unlock(&ha
->vport_lock
);
996 ql_log(ql_log_warn
, vha
, 0xffff,
997 "register_localport failed: ret=%x\n", ret
);
999 vha
->nvme_local_port
->private = vha
;
1005 void qla_nvme_abort_set_option(struct abort_entry_24xx
*abt
, srb_t
*orig_sp
)
1007 struct qla_hw_data
*ha
;
1009 if (!(ql2xabts_wait_nvme
&& QLA_ABTS_WAIT_ENABLED(orig_sp
)))
1012 ha
= orig_sp
->fcport
->vha
->hw
;
1014 WARN_ON_ONCE(abt
->options
& cpu_to_le16(BIT_0
));
1015 /* Use Driver Specified Retry Count */
1016 abt
->options
|= cpu_to_le16(AOF_ABTS_RTY_CNT
);
1017 abt
->drv
.abts_rty_cnt
= cpu_to_le16(2);
1018 /* Use specified response timeout */
1019 abt
->options
|= cpu_to_le16(AOF_RSP_TIMEOUT
);
1020 /* set it to 2 * r_a_tov in secs */
1021 abt
->drv
.rsp_timeout
= cpu_to_le16(2 * (ha
->r_a_tov
/ 10));
1024 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx
*abt
, srb_t
*orig_sp
)
1027 struct scsi_qla_host
*vha
;
1029 if (!(ql2xabts_wait_nvme
&& QLA_ABTS_WAIT_ENABLED(orig_sp
)))
1032 vha
= orig_sp
->fcport
->vha
;
1034 comp_status
= le16_to_cpu(abt
->comp_status
);
1035 switch (comp_status
) {
1036 case CS_RESET
: /* reset event aborted */
1037 case CS_ABORTED
: /* IOCB was cleaned */
1038 /* N_Port handle is not currently logged in */
1040 /* N_Port handle was logged out while waiting for ABTS to complete */
1041 case CS_PORT_UNAVAILABLE
:
1042 /* Firmware found that the port name changed */
1043 case CS_PORT_LOGGED_OUT
:
1044 /* BA_RJT was received for the ABTS */
1045 case CS_PORT_CONFIG_CHG
:
1046 ql_dbg(ql_dbg_async
, vha
, 0xf09d,
1047 "Abort I/O IOCB completed with error, comp_status=%x\n",
1051 /* BA_RJT was received for the ABTS */
1052 case CS_REJECT_RECEIVED
:
1053 ql_dbg(ql_dbg_async
, vha
, 0xf09e,
1054 "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
1055 abt
->fw
.ba_rjt_vendorUnique
);
1056 ql_dbg(ql_dbg_async
+ ql_dbg_mbx
, vha
, 0xf09e,
1057 "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
1058 abt
->fw
.ba_rjt_reasonCodeExpl
, abt
->fw
.ba_rjt_reasonCode
);
1062 ql_dbg(ql_dbg_async
+ ql_dbg_verbose
, vha
, 0xf09f,
1063 "IOCB request is completed successfully comp_status=%x\n",
1068 ql_dbg(ql_dbg_async
, vha
, 0xf0a0,
1069 "IOCB request is failed, comp_status=%x\n", comp_status
);
1073 ql_dbg(ql_dbg_async
, vha
, 0xf0a1,
1074 "Invalid Abort IO IOCB Completion Status %x\n",
1080 inline void qla_wait_nvme_release_cmd_kref(srb_t
*orig_sp
)
1082 if (!(ql2xabts_wait_nvme
&& QLA_ABTS_WAIT_ENABLED(orig_sp
)))
1084 kref_put(&orig_sp
->cmd_kref
, orig_sp
->put_fn
);
1087 static void qla_nvme_fc_format_rjt(void *buf
, u8 ls_cmd
, u8 reason
,
1088 u8 explanation
, u8 vendor
)
1090 struct fcnvme_ls_rjt
*rjt
= buf
;
1092 rjt
->w0
.ls_cmd
= FCNVME_LSDESC_RQST
;
1093 rjt
->desc_list_len
= fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt
));
1094 rjt
->rqst
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_RQST
);
1095 rjt
->rqst
.desc_len
=
1096 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
));
1097 rjt
->rqst
.w0
.ls_cmd
= ls_cmd
;
1098 rjt
->rjt
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_RJT
);
1099 rjt
->rjt
.desc_len
= fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt
));
1100 rjt
->rjt
.reason_code
= reason
;
1101 rjt
->rjt
.reason_explanation
= explanation
;
1102 rjt
->rjt
.vendor
= vendor
;
1105 static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host
*vha
,
1106 struct pt_ls4_request
*lsrjt_iocb
,
1107 struct qla_nvme_lsrjt_pt_arg
*a
)
1109 lsrjt_iocb
->entry_type
= PT_LS4_REQUEST
;
1110 lsrjt_iocb
->entry_count
= 1;
1111 lsrjt_iocb
->sys_define
= 0;
1112 lsrjt_iocb
->entry_status
= 0;
1113 lsrjt_iocb
->handle
= QLA_SKIP_HANDLE
;
1114 lsrjt_iocb
->nport_handle
= a
->nport_handle
;
1115 lsrjt_iocb
->exchange_address
= a
->xchg_address
;
1116 lsrjt_iocb
->vp_index
= a
->vp_idx
;
1118 lsrjt_iocb
->control_flags
= cpu_to_le16(a
->control_flags
);
1120 put_unaligned_le64(a
->tx_addr
, &lsrjt_iocb
->dsd
[0].address
);
1121 lsrjt_iocb
->dsd
[0].length
= cpu_to_le32(a
->tx_byte_count
);
1122 lsrjt_iocb
->tx_dseg_count
= cpu_to_le16(1);
1123 lsrjt_iocb
->tx_byte_count
= cpu_to_le32(a
->tx_byte_count
);
1125 put_unaligned_le64(a
->rx_addr
, &lsrjt_iocb
->dsd
[1].address
);
1126 lsrjt_iocb
->dsd
[1].length
= 0;
1127 lsrjt_iocb
->rx_dseg_count
= 0;
1128 lsrjt_iocb
->rx_byte_count
= 0;
1132 qla_nvme_ls_reject_iocb(struct scsi_qla_host
*vha
, struct qla_qpair
*qp
,
1133 struct qla_nvme_lsrjt_pt_arg
*a
, bool is_xchg_terminate
)
1135 struct pt_ls4_request
*lsrjt_iocb
;
1137 lsrjt_iocb
= __qla2x00_alloc_iocbs(qp
, NULL
);
1139 ql_log(ql_log_warn
, vha
, 0x210e,
1140 "qla2x00_alloc_iocbs failed.\n");
1141 return QLA_FUNCTION_FAILED
;
1144 if (!is_xchg_terminate
) {
1145 qla_nvme_fc_format_rjt((void *)vha
->hw
->lsrjt
.c
, a
->opcode
,
1146 a
->reason
, a
->explanation
, 0);
1148 a
->tx_byte_count
= sizeof(struct fcnvme_ls_rjt
);
1149 a
->tx_addr
= vha
->hw
->lsrjt
.cdma
;
1150 a
->control_flags
= CF_LS4_RESPONDER
<< CF_LS4_SHIFT
;
1152 ql_dbg(ql_dbg_unsol
, vha
, 0x211f,
1153 "Sending nvme fc ls reject ox_id %04x op %04x\n",
1154 a
->ox_id
, a
->opcode
);
1155 ql_dump_buffer(ql_dbg_unsol
+ ql_dbg_verbose
, vha
, 0x210f,
1156 vha
->hw
->lsrjt
.c
, sizeof(*vha
->hw
->lsrjt
.c
));
1158 a
->tx_byte_count
= 0;
1159 a
->control_flags
= CF_LS4_RESPONDER_TERM
<< CF_LS4_SHIFT
;
1160 ql_dbg(ql_dbg_unsol
, vha
, 0x2110,
1161 "Terminate nvme ls xchg 0x%x\n", a
->xchg_address
);
1164 qla_nvme_lsrjt_pt_iocb(vha
, lsrjt_iocb
, a
);
1165 /* flush iocb to mem before notifying hw doorbell */
1167 qla2x00_start_iocbs(vha
, qp
->req
);
1172 * qla2xxx_process_purls_pkt() - Pass-up Unsolicited
1173 * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req().
1174 * LLDD need to provide memory for response buffer, which
1175 * will be used to reference the exchange corresponding
1176 * to the LS when issuing an ls response. LLDD will have to free
1177 * response buffer in lport->ops->xmt_ls_rsp().
1179 * @vha: SCSI qla host
1180 * @item: ptr to purex_item
1183 qla2xxx_process_purls_pkt(struct scsi_qla_host
*vha
, struct purex_item
*item
)
1185 struct qla_nvme_unsol_ctx
*uctx
= item
->purls_context
;
1186 struct qla_nvme_lsrjt_pt_arg a
;
1189 #if (IS_ENABLED(CONFIG_NVME_FC))
1190 ret
= nvme_fc_rcv_ls_req(uctx
->fcport
->nvme_remote_port
, &uctx
->lsrsp
,
1191 &item
->iocb
, item
->size
);
1194 ql_dbg(ql_dbg_unsol
, vha
, 0x2125, "NVMe transport ls_req failed\n");
1195 memset((void *)&a
, 0, sizeof(a
));
1196 a
.vp_idx
= vha
->vp_idx
;
1197 a
.nport_handle
= uctx
->nport_handle
;
1198 a
.xchg_address
= uctx
->exchange_address
;
1199 qla_nvme_ls_reject_iocb(vha
, vha
->hw
->base_qpair
, &a
, true);
1200 list_del(&uctx
->elem
);
1205 static scsi_qla_host_t
*
1206 qla2xxx_get_vha_from_vp_idx(struct qla_hw_data
*ha
, uint16_t vp_index
)
1208 scsi_qla_host_t
*base_vha
, *vha
, *tvp
;
1209 unsigned long flags
;
1211 base_vha
= pci_get_drvdata(ha
->pdev
);
1213 if (!vp_index
&& !ha
->num_vhosts
)
1216 spin_lock_irqsave(&ha
->vport_slock
, flags
);
1217 list_for_each_entry_safe(vha
, tvp
, &ha
->vp_list
, list
) {
1218 if (vha
->vp_idx
== vp_index
) {
1219 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
1223 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
1228 void qla2xxx_process_purls_iocb(void **pkt
, struct rsp_que
**rsp
)
1230 struct nvme_fc_remote_port
*rport
;
1231 struct qla_nvme_rport
*qla_rport
;
1232 struct qla_nvme_lsrjt_pt_arg a
;
1233 struct pt_ls4_rx_unsol
*p
= *pkt
;
1234 struct qla_nvme_unsol_ctx
*uctx
;
1235 struct rsp_que
*rsp_q
= *rsp
;
1236 struct qla_hw_data
*ha
;
1237 scsi_qla_host_t
*vha
;
1238 fc_port_t
*fcport
= NULL
;
1239 struct purex_item
*item
;
1240 port_id_t d_id
= {0};
1243 bool xmt_reject
= false;
1247 vha
= qla2xxx_get_vha_from_vp_idx(ha
, p
->vp_index
);
1249 ql_log(ql_log_warn
, NULL
, 0x2110, "Invalid vp index %d\n", p
->vp_index
);
1254 memset((void *)&a
, 0, sizeof(a
));
1255 opcode
= (u8
*)&p
->payload
[0];
1256 a
.opcode
= opcode
[3];
1257 a
.vp_idx
= p
->vp_index
;
1258 a
.nport_handle
= p
->nport_handle
;
1260 a
.xchg_address
= p
->exchange_address
;
1262 id
.b
.domain
= p
->s_id
.domain
;
1263 id
.b
.area
= p
->s_id
.area
;
1264 id
.b
.al_pa
= p
->s_id
.al_pa
;
1265 d_id
.b
.domain
= p
->d_id
[2];
1266 d_id
.b
.area
= p
->d_id
[1];
1267 d_id
.b
.al_pa
= p
->d_id
[0];
1269 fcport
= qla2x00_find_fcport_by_nportid(vha
, &id
, 0);
1271 ql_dbg(ql_dbg_unsol
, vha
, 0x211e,
1272 "Failed to find sid=%06x did=%06x\n",
1274 a
.reason
= FCNVME_RJT_RC_INV_ASSOC
;
1275 a
.explanation
= FCNVME_RJT_EXP_NONE
;
1279 rport
= fcport
->nvme_remote_port
;
1280 qla_rport
= rport
->private;
1282 item
= qla27xx_copy_multiple_pkt(vha
, pkt
, rsp
, true, false);
1284 a
.reason
= FCNVME_RJT_RC_LOGIC
;
1285 a
.explanation
= FCNVME_RJT_EXP_NONE
;
1290 uctx
= kzalloc(sizeof(*uctx
), GFP_ATOMIC
);
1292 ql_log(ql_log_info
, vha
, 0x2126, "Failed allocate memory\n");
1293 a
.reason
= FCNVME_RJT_RC_LOGIC
;
1294 a
.explanation
= FCNVME_RJT_EXP_NONE
;
1301 uctx
->fcport
= fcport
;
1302 uctx
->exchange_address
= p
->exchange_address
;
1303 uctx
->nport_handle
= p
->nport_handle
;
1304 uctx
->ox_id
= p
->ox_id
;
1305 qla_rport
->uctx
= uctx
;
1306 INIT_LIST_HEAD(&uctx
->elem
);
1307 list_add_tail(&uctx
->elem
, &fcport
->unsol_ctx_head
);
1308 item
->purls_context
= (void *)uctx
;
1310 ql_dbg(ql_dbg_unsol
, vha
, 0x2121,
1311 "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
1312 item
->iocb
.iocb
[3], item
->size
, uctx
->exchange_address
,
1314 /* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F
1315 * ----- -----------------------------------------------
1316 * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00
1317 * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00
1318 * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
1320 ql_dump_buffer(ql_dbg_unsol
+ ql_dbg_verbose
, vha
, 0x2120,
1321 &item
->iocb
, item
->size
);
1323 qla24xx_queue_purex_item(vha
, item
, qla2xxx_process_purls_pkt
);
1326 qla_nvme_ls_reject_iocb(vha
, (*rsp
)->qpair
, &a
, false);
1327 __qla_consume_iocb(vha
, pkt
, rsp
);