1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2017 QLogic Corporation
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
12 static struct nvme_fc_port_template qla_nvme_fc_transport
;
14 int qla_nvme_register_remote(struct scsi_qla_host
*vha
, struct fc_port
*fcport
)
16 struct qla_nvme_rport
*rport
;
17 struct nvme_fc_port_info req
;
20 if (!IS_ENABLED(CONFIG_NVME_FC
))
23 if (!vha
->flags
.nvme_enabled
) {
24 ql_log(ql_log_info
, vha
, 0x2100,
25 "%s: Not registering target since Host NVME is not enabled\n",
30 if (!vha
->nvme_local_port
&& qla_nvme_register_hba(vha
))
33 if (!(fcport
->nvme_prli_service_param
&
34 (NVME_PRLI_SP_TARGET
| NVME_PRLI_SP_DISCOVERY
)) ||
35 (fcport
->nvme_flag
& NVME_FLAG_REGISTERED
))
38 fcport
->nvme_flag
&= ~NVME_FLAG_RESETTING
;
40 memset(&req
, 0, sizeof(struct nvme_fc_port_info
));
41 req
.port_name
= wwn_to_u64(fcport
->port_name
);
42 req
.node_name
= wwn_to_u64(fcport
->node_name
);
46 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_INITIATOR
)
47 req
.port_role
= FC_PORT_ROLE_NVME_INITIATOR
;
49 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_TARGET
)
50 req
.port_role
|= FC_PORT_ROLE_NVME_TARGET
;
52 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_DISCOVERY
)
53 req
.port_role
|= FC_PORT_ROLE_NVME_DISCOVERY
;
55 req
.port_id
= fcport
->d_id
.b24
;
57 ql_log(ql_log_info
, vha
, 0x2102,
58 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
59 __func__
, req
.node_name
, req
.port_name
,
62 ret
= nvme_fc_register_remoteport(vha
->nvme_local_port
, &req
,
63 &fcport
->nvme_remote_port
);
65 ql_log(ql_log_warn
, vha
, 0x212e,
66 "Failed to register remote port. Transport returned %d\n",
71 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_SLER
)
72 ql_log(ql_log_info
, vha
, 0x212a,
73 "PortID:%06x Supports SLER\n", req
.port_id
);
75 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_PI_CTRL
)
76 ql_log(ql_log_info
, vha
, 0x212b,
77 "PortID:%06x Supports PI control\n", req
.port_id
);
79 rport
= fcport
->nvme_remote_port
->private;
80 rport
->fcport
= fcport
;
82 fcport
->nvme_flag
|= NVME_FLAG_REGISTERED
;
86 /* Allocate a queue for NVMe traffic */
87 static int qla_nvme_alloc_queue(struct nvme_fc_local_port
*lport
,
88 unsigned int qidx
, u16 qsize
, void **handle
)
90 struct scsi_qla_host
*vha
;
91 struct qla_hw_data
*ha
;
92 struct qla_qpair
*qpair
;
97 vha
= (struct scsi_qla_host
*)lport
->private;
100 ql_log(ql_log_info
, vha
, 0x2104,
101 "%s: handle %p, idx =%d, qsize %d\n",
102 __func__
, handle
, qidx
, qsize
);
104 if (qidx
> qla_nvme_fc_transport
.max_hw_queues
) {
105 ql_log(ql_log_warn
, vha
, 0x212f,
106 "%s: Illegal qidx=%d. Max=%d\n",
107 __func__
, qidx
, qla_nvme_fc_transport
.max_hw_queues
);
111 if (ha
->queue_pair_map
[qidx
]) {
112 *handle
= ha
->queue_pair_map
[qidx
];
113 ql_log(ql_log_info
, vha
, 0x2121,
114 "Returning existing qpair of %p for idx=%x\n",
119 qpair
= qla2xxx_create_qpair(vha
, 5, vha
->vp_idx
, true);
121 ql_log(ql_log_warn
, vha
, 0x2122,
122 "Failed to allocate qpair\n");
130 static void qla_nvme_release_fcp_cmd_kref(struct kref
*kref
)
132 struct srb
*sp
= container_of(kref
, struct srb
, cmd_kref
);
133 struct nvme_private
*priv
= (struct nvme_private
*)sp
->priv
;
134 struct nvmefc_fcp_req
*fd
;
135 struct srb_iocb
*nvme
;
141 nvme
= &sp
->u
.iocb_cmd
;
142 fd
= nvme
->u
.nvme
.desc
;
144 spin_lock_irqsave(&priv
->cmd_lock
, flags
);
147 if (priv
->comp_status
== QLA_SUCCESS
) {
148 fd
->rcv_rsplen
= le16_to_cpu(nvme
->u
.nvme
.rsp_pyld_len
);
149 fd
->status
= NVME_SC_SUCCESS
;
152 fd
->transferred_length
= 0;
153 fd
->status
= NVME_SC_INTERNAL
;
155 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
159 qla2xxx_rel_qpair_sp(sp
->qpair
, sp
);
162 static void qla_nvme_release_ls_cmd_kref(struct kref
*kref
)
164 struct srb
*sp
= container_of(kref
, struct srb
, cmd_kref
);
165 struct nvme_private
*priv
= (struct nvme_private
*)sp
->priv
;
166 struct nvmefc_ls_req
*fd
;
172 spin_lock_irqsave(&priv
->cmd_lock
, flags
);
175 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
178 fd
->done(fd
, priv
->comp_status
);
183 static void qla_nvme_ls_complete(struct work_struct
*work
)
185 struct nvme_private
*priv
=
186 container_of(work
, struct nvme_private
, ls_work
);
188 kref_put(&priv
->sp
->cmd_kref
, qla_nvme_release_ls_cmd_kref
);
191 static void qla_nvme_sp_ls_done(srb_t
*sp
, int res
)
193 struct nvme_private
*priv
= sp
->priv
;
195 if (WARN_ON_ONCE(kref_read(&sp
->cmd_kref
) == 0))
201 priv
->comp_status
= res
;
202 INIT_WORK(&priv
->ls_work
, qla_nvme_ls_complete
);
203 schedule_work(&priv
->ls_work
);
206 /* it assumed that QPair lock is held. */
207 static void qla_nvme_sp_done(srb_t
*sp
, int res
)
209 struct nvme_private
*priv
= sp
->priv
;
211 priv
->comp_status
= res
;
212 kref_put(&sp
->cmd_kref
, qla_nvme_release_fcp_cmd_kref
);
217 static void qla_nvme_abort_work(struct work_struct
*work
)
219 struct nvme_private
*priv
=
220 container_of(work
, struct nvme_private
, abort_work
);
221 srb_t
*sp
= priv
->sp
;
222 fc_port_t
*fcport
= sp
->fcport
;
223 struct qla_hw_data
*ha
= fcport
->vha
->hw
;
226 ql_dbg(ql_dbg_io
, fcport
->vha
, 0xffff,
227 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
228 __func__
, sp
, sp
->handle
, fcport
, fcport
->deleted
);
230 if (!ha
->flags
.fw_started
|| fcport
->deleted
)
233 if (ha
->flags
.host_shutting_down
) {
234 ql_log(ql_log_info
, sp
->fcport
->vha
, 0xffff,
235 "%s Calling done on sp: %p, type: 0x%x\n",
236 __func__
, sp
, sp
->type
);
241 rval
= ha
->isp_ops
->abort_command(sp
);
243 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x212b,
244 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
245 __func__
, (rval
!= QLA_SUCCESS
) ? "Failed to abort" : "Aborted",
246 sp
, sp
->handle
, fcport
, rval
);
249 /* kref_get was done before work was schedule. */
250 kref_put(&sp
->cmd_kref
, sp
->put_fn
);
253 static void qla_nvme_ls_abort(struct nvme_fc_local_port
*lport
,
254 struct nvme_fc_remote_port
*rport
, struct nvmefc_ls_req
*fd
)
256 struct nvme_private
*priv
= fd
->private;
259 spin_lock_irqsave(&priv
->cmd_lock
, flags
);
261 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
265 if (!kref_get_unless_zero(&priv
->sp
->cmd_kref
)) {
266 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
269 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
271 INIT_WORK(&priv
->abort_work
, qla_nvme_abort_work
);
272 schedule_work(&priv
->abort_work
);
275 static int qla_nvme_ls_req(struct nvme_fc_local_port
*lport
,
276 struct nvme_fc_remote_port
*rport
, struct nvmefc_ls_req
*fd
)
278 struct qla_nvme_rport
*qla_rport
= rport
->private;
279 fc_port_t
*fcport
= qla_rport
->fcport
;
280 struct srb_iocb
*nvme
;
281 struct nvme_private
*priv
= fd
->private;
282 struct scsi_qla_host
*vha
;
283 int rval
= QLA_FUNCTION_FAILED
;
284 struct qla_hw_data
*ha
;
288 if (!fcport
|| (fcport
&& fcport
->deleted
))
294 if (!ha
->flags
.fw_started
)
297 /* Alloc SRB structure */
298 sp
= qla2x00_get_sp(vha
, fcport
, GFP_ATOMIC
);
302 sp
->type
= SRB_NVME_LS
;
303 sp
->name
= "nvme_ls";
304 sp
->done
= qla_nvme_sp_ls_done
;
305 sp
->put_fn
= qla_nvme_release_ls_cmd_kref
;
308 kref_init(&sp
->cmd_kref
);
309 spin_lock_init(&priv
->cmd_lock
);
310 nvme
= &sp
->u
.iocb_cmd
;
312 nvme
->u
.nvme
.desc
= fd
;
313 nvme
->u
.nvme
.dir
= 0;
315 nvme
->u
.nvme
.cmd_len
= fd
->rqstlen
;
316 nvme
->u
.nvme
.rsp_len
= fd
->rsplen
;
317 nvme
->u
.nvme
.rsp_dma
= fd
->rspdma
;
318 nvme
->u
.nvme
.timeout_sec
= fd
->timeout
;
319 nvme
->u
.nvme
.cmd_dma
= dma_map_single(&ha
->pdev
->dev
, fd
->rqstaddr
,
320 fd
->rqstlen
, DMA_TO_DEVICE
);
321 dma_sync_single_for_device(&ha
->pdev
->dev
, nvme
->u
.nvme
.cmd_dma
,
322 fd
->rqstlen
, DMA_TO_DEVICE
);
324 rval
= qla2x00_start_sp(sp
);
325 if (rval
!= QLA_SUCCESS
) {
326 ql_log(ql_log_warn
, vha
, 0x700e,
327 "qla2x00_start_sp failed = %d\n", rval
);
328 wake_up(&sp
->nvme_ls_waitq
);
338 static void qla_nvme_fcp_abort(struct nvme_fc_local_port
*lport
,
339 struct nvme_fc_remote_port
*rport
, void *hw_queue_handle
,
340 struct nvmefc_fcp_req
*fd
)
342 struct nvme_private
*priv
= fd
->private;
345 spin_lock_irqsave(&priv
->cmd_lock
, flags
);
347 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
350 if (!kref_get_unless_zero(&priv
->sp
->cmd_kref
)) {
351 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
354 spin_unlock_irqrestore(&priv
->cmd_lock
, flags
);
356 INIT_WORK(&priv
->abort_work
, qla_nvme_abort_work
);
357 schedule_work(&priv
->abort_work
);
360 static inline int qla2x00_start_nvme_mq(srb_t
*sp
)
365 struct cmd_nvme
*cmd_pkt
;
370 struct dsd64
*cur_dsd
;
371 struct req_que
*req
= NULL
;
372 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
373 struct qla_hw_data
*ha
= vha
->hw
;
374 struct qla_qpair
*qpair
= sp
->qpair
;
375 struct srb_iocb
*nvme
= &sp
->u
.iocb_cmd
;
376 struct scatterlist
*sgl
, *sg
;
377 struct nvmefc_fcp_req
*fd
= nvme
->u
.nvme
.desc
;
378 struct nvme_fc_cmd_iu
*cmd
= fd
->cmdaddr
;
379 uint32_t rval
= QLA_SUCCESS
;
381 /* Setup qpair pointers */
383 tot_dsds
= fd
->sg_cnt
;
385 /* Acquire qpair specific lock */
386 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
388 handle
= qla2xxx_get_next_handle(req
);
393 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
394 if (req
->cnt
< (req_cnt
+ 2)) {
395 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
396 rd_reg_dword_relaxed(req
->req_q_out
);
398 if (req
->ring_index
< cnt
)
399 req
->cnt
= cnt
- req
->ring_index
;
401 req
->cnt
= req
->length
- (req
->ring_index
- cnt
);
403 if (req
->cnt
< (req_cnt
+ 2)){
409 if (unlikely(!fd
->sqid
)) {
410 if (cmd
->sqe
.common
.opcode
== nvme_admin_async_event
) {
411 nvme
->u
.nvme
.aen_op
= 1;
412 atomic_inc(&ha
->nvme_active_aen_cnt
);
416 /* Build command packet. */
417 req
->current_outstanding_cmd
= handle
;
418 req
->outstanding_cmds
[handle
] = sp
;
422 cmd_pkt
= (struct cmd_nvme
*)req
->ring_ptr
;
423 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
425 /* Zero out remaining portion of packet. */
426 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
427 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
429 cmd_pkt
->entry_status
= 0;
431 /* Update entry type to indicate Command NVME IOCB */
432 cmd_pkt
->entry_type
= COMMAND_NVME
;
434 /* No data transfer how do we check buffer len == 0?? */
435 if (fd
->io_dir
== NVMEFC_FCP_READ
) {
436 cmd_pkt
->control_flags
= cpu_to_le16(CF_READ_DATA
);
437 qpair
->counters
.input_bytes
+= fd
->payload_length
;
438 qpair
->counters
.input_requests
++;
439 } else if (fd
->io_dir
== NVMEFC_FCP_WRITE
) {
440 cmd_pkt
->control_flags
= cpu_to_le16(CF_WRITE_DATA
);
441 if ((vha
->flags
.nvme_first_burst
) &&
442 (sp
->fcport
->nvme_prli_service_param
&
443 NVME_PRLI_SP_FIRST_BURST
)) {
444 if ((fd
->payload_length
<=
445 sp
->fcport
->nvme_first_burst_size
) ||
446 (sp
->fcport
->nvme_first_burst_size
== 0))
447 cmd_pkt
->control_flags
|=
448 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE
);
450 qpair
->counters
.output_bytes
+= fd
->payload_length
;
451 qpair
->counters
.output_requests
++;
452 } else if (fd
->io_dir
== 0) {
453 cmd_pkt
->control_flags
= 0;
455 /* Set BIT_13 of control flags for Async event */
456 if (vha
->flags
.nvme2_enabled
&&
457 cmd
->sqe
.common
.opcode
== nvme_admin_async_event
) {
458 cmd_pkt
->control_flags
|= cpu_to_le16(CF_ADMIN_ASYNC_EVENT
);
462 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
463 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
464 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
465 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
466 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
469 cmd_pkt
->nvme_rsp_dsd_len
= cpu_to_le16(fd
->rsplen
);
470 put_unaligned_le64(fd
->rspdma
, &cmd_pkt
->nvme_rsp_dseg_address
);
473 cmd_pkt
->nvme_cmnd_dseg_len
= cpu_to_le16(fd
->cmdlen
);
474 cmd_pkt
->nvme_cmnd_dseg_address
= cpu_to_le64(fd
->cmddma
);
476 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
477 cmd_pkt
->byte_count
= cpu_to_le32(fd
->payload_length
);
479 /* One DSD is available in the Command Type NVME IOCB */
481 cur_dsd
= &cmd_pkt
->nvme_dsd
;
484 /* Load data segments */
485 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
486 cont_a64_entry_t
*cont_pkt
;
488 /* Allocate additional continuation packets? */
489 if (avail_dsds
== 0) {
491 * Five DSDs are available in the Continuation
495 /* Adjust ring index */
497 if (req
->ring_index
== req
->length
) {
499 req
->ring_ptr
= req
->ring
;
503 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
504 put_unaligned_le32(CONTINUE_A64_TYPE
,
505 &cont_pkt
->entry_type
);
507 cur_dsd
= cont_pkt
->dsd
;
508 avail_dsds
= ARRAY_SIZE(cont_pkt
->dsd
);
511 append_dsd64(&cur_dsd
, sg
);
515 /* Set total entry count. */
516 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
519 /* Adjust ring index. */
521 if (req
->ring_index
== req
->length
) {
523 req
->ring_ptr
= req
->ring
;
528 /* Set chip new ring index. */
529 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
532 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
537 static int qla_nvme_post_cmd(struct nvme_fc_local_port
*lport
,
538 struct nvme_fc_remote_port
*rport
, void *hw_queue_handle
,
539 struct nvmefc_fcp_req
*fd
)
542 struct srb_iocb
*nvme
;
543 struct scsi_qla_host
*vha
;
546 struct qla_qpair
*qpair
= hw_queue_handle
;
547 struct nvme_private
*priv
= fd
->private;
548 struct qla_nvme_rport
*qla_rport
= rport
->private;
551 /* nvme association has been torn down */
555 fcport
= qla_rport
->fcport
;
557 if (unlikely(!qpair
|| !fcport
|| fcport
->deleted
))
560 if (!(fcport
->nvme_flag
& NVME_FLAG_REGISTERED
))
565 if (test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
))
569 * If we know the dev is going away while the transport is still sending
570 * IO's return busy back to stall the IO Q. This happens when the
571 * link goes away and fw hasn't notified us yet, but IO's are being
572 * returned. If the dev comes back quickly we won't exhaust the IO
573 * retry count at the core.
575 if (fcport
->nvme_flag
& NVME_FLAG_RESETTING
)
578 /* Alloc SRB structure */
579 sp
= qla2xxx_get_qpair_sp(vha
, qpair
, fcport
, GFP_ATOMIC
);
583 init_waitqueue_head(&sp
->nvme_ls_waitq
);
584 kref_init(&sp
->cmd_kref
);
585 spin_lock_init(&priv
->cmd_lock
);
588 sp
->type
= SRB_NVME_CMD
;
589 sp
->name
= "nvme_cmd";
590 sp
->done
= qla_nvme_sp_done
;
591 sp
->put_fn
= qla_nvme_release_fcp_cmd_kref
;
594 nvme
= &sp
->u
.iocb_cmd
;
595 nvme
->u
.nvme
.desc
= fd
;
597 rval
= qla2x00_start_nvme_mq(sp
);
598 if (rval
!= QLA_SUCCESS
) {
599 ql_log(ql_log_warn
, vha
, 0x212d,
600 "qla2x00_start_nvme_mq failed = %d\n", rval
);
601 wake_up(&sp
->nvme_ls_waitq
);
604 qla2xxx_rel_qpair_sp(sp
->qpair
, sp
);
610 static void qla_nvme_localport_delete(struct nvme_fc_local_port
*lport
)
612 struct scsi_qla_host
*vha
= lport
->private;
614 ql_log(ql_log_info
, vha
, 0x210f,
615 "localport delete of %p completed.\n", vha
->nvme_local_port
);
616 vha
->nvme_local_port
= NULL
;
617 complete(&vha
->nvme_del_done
);
620 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port
*rport
)
623 struct qla_nvme_rport
*qla_rport
= rport
->private;
625 fcport
= qla_rport
->fcport
;
626 fcport
->nvme_remote_port
= NULL
;
627 fcport
->nvme_flag
&= ~NVME_FLAG_REGISTERED
;
628 fcport
->nvme_flag
&= ~NVME_FLAG_DELETING
;
629 ql_log(ql_log_info
, fcport
->vha
, 0x2110,
630 "remoteport_delete of %p %8phN completed.\n",
631 fcport
, fcport
->port_name
);
632 complete(&fcport
->nvme_del_done
);
635 static struct nvme_fc_port_template qla_nvme_fc_transport
= {
636 .localport_delete
= qla_nvme_localport_delete
,
637 .remoteport_delete
= qla_nvme_remoteport_delete
,
638 .create_queue
= qla_nvme_alloc_queue
,
639 .delete_queue
= NULL
,
640 .ls_req
= qla_nvme_ls_req
,
641 .ls_abort
= qla_nvme_ls_abort
,
642 .fcp_io
= qla_nvme_post_cmd
,
643 .fcp_abort
= qla_nvme_fcp_abort
,
645 .max_sgl_segments
= 1024,
646 .max_dif_sgl_segments
= 64,
647 .dma_boundary
= 0xFFFFFFFF,
649 .remote_priv_sz
= sizeof(struct qla_nvme_rport
),
650 .lsrqst_priv_sz
= sizeof(struct nvme_private
),
651 .fcprqst_priv_sz
= sizeof(struct nvme_private
),
654 void qla_nvme_unregister_remote_port(struct fc_port
*fcport
)
658 if (!IS_ENABLED(CONFIG_NVME_FC
))
661 ql_log(ql_log_warn
, NULL
, 0x2112,
662 "%s: unregister remoteport on %p %8phN\n",
663 __func__
, fcport
, fcport
->port_name
);
665 if (test_bit(PFLG_DRIVER_REMOVING
, &fcport
->vha
->pci_flags
))
666 nvme_fc_set_remoteport_devloss(fcport
->nvme_remote_port
, 0);
668 init_completion(&fcport
->nvme_del_done
);
669 ret
= nvme_fc_unregister_remoteport(fcport
->nvme_remote_port
);
671 ql_log(ql_log_info
, fcport
->vha
, 0x2114,
672 "%s: Failed to unregister nvme_remote_port (%d)\n",
674 wait_for_completion(&fcport
->nvme_del_done
);
677 void qla_nvme_delete(struct scsi_qla_host
*vha
)
681 if (!IS_ENABLED(CONFIG_NVME_FC
))
684 if (vha
->nvme_local_port
) {
685 init_completion(&vha
->nvme_del_done
);
686 ql_log(ql_log_info
, vha
, 0x2116,
687 "unregister localport=%p\n",
688 vha
->nvme_local_port
);
689 nv_ret
= nvme_fc_unregister_localport(vha
->nvme_local_port
);
691 ql_log(ql_log_info
, vha
, 0x2115,
692 "Unregister of localport failed\n");
694 wait_for_completion(&vha
->nvme_del_done
);
698 int qla_nvme_register_hba(struct scsi_qla_host
*vha
)
700 struct nvme_fc_port_template
*tmpl
;
701 struct qla_hw_data
*ha
;
702 struct nvme_fc_port_info pinfo
;
705 if (!IS_ENABLED(CONFIG_NVME_FC
))
709 tmpl
= &qla_nvme_fc_transport
;
711 WARN_ON(vha
->nvme_local_port
);
713 if (ha
->max_req_queues
< 3) {
714 if (!ha
->flags
.max_req_queue_warned
)
715 ql_log(ql_log_info
, vha
, 0x2120,
716 "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
717 __func__
, ha
->max_req_queues
);
718 ha
->flags
.max_req_queue_warned
= 1;
722 qla_nvme_fc_transport
.max_hw_queues
=
723 min((uint8_t)(qla_nvme_fc_transport
.max_hw_queues
),
724 (uint8_t)(ha
->max_req_queues
- 2));
726 pinfo
.node_name
= wwn_to_u64(vha
->node_name
);
727 pinfo
.port_name
= wwn_to_u64(vha
->port_name
);
728 pinfo
.port_role
= FC_PORT_ROLE_NVME_INITIATOR
;
729 pinfo
.port_id
= vha
->d_id
.b24
;
731 ql_log(ql_log_info
, vha
, 0xffff,
732 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
733 pinfo
.node_name
, pinfo
.port_name
, pinfo
.port_id
);
734 qla_nvme_fc_transport
.dma_boundary
= vha
->host
->dma_boundary
;
736 ret
= nvme_fc_register_localport(&pinfo
, tmpl
,
737 get_device(&ha
->pdev
->dev
), &vha
->nvme_local_port
);
739 ql_log(ql_log_warn
, vha
, 0xffff,
740 "register_localport failed: ret=%x\n", ret
);
742 vha
->nvme_local_port
->private = vha
;