2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2017 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
13 static struct nvme_fc_port_template qla_nvme_fc_transport
;
15 static void qla_nvme_unregister_remote_port(struct work_struct
*);
17 int qla_nvme_register_remote(struct scsi_qla_host
*vha
, struct fc_port
*fcport
)
19 struct nvme_rport
*rport
;
22 if (!IS_ENABLED(CONFIG_NVME_FC
))
25 if (fcport
->nvme_flag
& NVME_FLAG_REGISTERED
)
28 if (!vha
->flags
.nvme_enabled
) {
29 ql_log(ql_log_info
, vha
, 0x2100,
30 "%s: Not registering target since Host NVME is not enabled\n",
35 if (!(fcport
->nvme_prli_service_param
&
36 (NVME_PRLI_SP_TARGET
| NVME_PRLI_SP_DISCOVERY
)))
39 INIT_WORK(&fcport
->nvme_del_work
, qla_nvme_unregister_remote_port
);
40 rport
= kzalloc(sizeof(*rport
), GFP_KERNEL
);
42 ql_log(ql_log_warn
, vha
, 0x2101,
43 "%s: unable to alloc memory\n", __func__
);
47 rport
->req
.port_name
= wwn_to_u64(fcport
->port_name
);
48 rport
->req
.node_name
= wwn_to_u64(fcport
->node_name
);
49 rport
->req
.port_role
= 0;
51 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_INITIATOR
)
52 rport
->req
.port_role
= FC_PORT_ROLE_NVME_INITIATOR
;
54 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_TARGET
)
55 rport
->req
.port_role
|= FC_PORT_ROLE_NVME_TARGET
;
57 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_DISCOVERY
)
58 rport
->req
.port_role
|= FC_PORT_ROLE_NVME_DISCOVERY
;
60 rport
->req
.port_id
= fcport
->d_id
.b24
;
62 ql_log(ql_log_info
, vha
, 0x2102,
63 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
64 __func__
, rport
->req
.node_name
, rport
->req
.port_name
,
67 ret
= nvme_fc_register_remoteport(vha
->nvme_local_port
, &rport
->req
,
68 &fcport
->nvme_remote_port
);
70 ql_log(ql_log_warn
, vha
, 0x212e,
71 "Failed to register remote port. Transport returned %d\n",
76 fcport
->nvme_remote_port
->private = fcport
;
77 fcport
->nvme_flag
|= NVME_FLAG_REGISTERED
;
78 rport
->fcport
= fcport
;
79 list_add_tail(&rport
->list
, &vha
->nvme_rport_list
);
83 /* Allocate a queue for NVMe traffic */
84 static int qla_nvme_alloc_queue(struct nvme_fc_local_port
*lport
,
85 unsigned int qidx
, u16 qsize
, void **handle
)
87 struct scsi_qla_host
*vha
;
88 struct qla_hw_data
*ha
;
89 struct qla_qpair
*qpair
;
94 vha
= (struct scsi_qla_host
*)lport
->private;
97 ql_log(ql_log_info
, vha
, 0x2104,
98 "%s: handle %p, idx =%d, qsize %d\n",
99 __func__
, handle
, qidx
, qsize
);
101 if (qidx
> qla_nvme_fc_transport
.max_hw_queues
) {
102 ql_log(ql_log_warn
, vha
, 0x212f,
103 "%s: Illegal qidx=%d. Max=%d\n",
104 __func__
, qidx
, qla_nvme_fc_transport
.max_hw_queues
);
108 if (ha
->queue_pair_map
[qidx
]) {
109 *handle
= ha
->queue_pair_map
[qidx
];
110 ql_log(ql_log_info
, vha
, 0x2121,
111 "Returning existing qpair of %p for idx=%x\n",
116 ql_log(ql_log_warn
, vha
, 0xffff,
117 "allocating q for idx=%x w/o cpu mask\n", qidx
);
118 qpair
= qla2xxx_create_qpair(vha
, 5, vha
->vp_idx
, true);
120 ql_log(ql_log_warn
, vha
, 0x2122,
121 "Failed to allocate qpair\n");
129 static void qla_nvme_sp_ls_done(void *ptr
, int res
)
132 struct srb_iocb
*nvme
;
133 struct nvmefc_ls_req
*fd
;
134 struct nvme_private
*priv
;
136 if (atomic_read(&sp
->ref_count
) == 0) {
137 ql_log(ql_log_warn
, sp
->fcport
->vha
, 0x2123,
138 "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp
);
142 if (!atomic_dec_and_test(&sp
->ref_count
))
148 nvme
= &sp
->u
.iocb_cmd
;
149 fd
= nvme
->u
.nvme
.desc
;
151 priv
->comp_status
= res
;
152 schedule_work(&priv
->ls_work
);
153 /* work schedule doesn't need the sp */
157 void qla_nvme_cmpl_io(struct srb_iocb
*nvme
)
160 struct nvmefc_fcp_req
*fd
= nvme
->u
.nvme
.desc
;
162 sp
= container_of(nvme
, srb_t
, u
.iocb_cmd
);
164 qla2xxx_rel_qpair_sp(sp
->qpair
, sp
);
167 static void qla_nvme_sp_done(void *ptr
, int res
)
170 struct srb_iocb
*nvme
;
171 struct nvmefc_fcp_req
*fd
;
173 nvme
= &sp
->u
.iocb_cmd
;
174 fd
= nvme
->u
.nvme
.desc
;
176 if (!atomic_dec_and_test(&sp
->ref_count
))
179 if (!(sp
->fcport
->nvme_flag
& NVME_FLAG_REGISTERED
))
182 if (unlikely(res
== QLA_FUNCTION_FAILED
))
183 fd
->status
= NVME_SC_INTERNAL
;
187 fd
->rcv_rsplen
= nvme
->u
.nvme
.rsp_pyld_len
;
188 list_add_tail(&nvme
->u
.nvme
.entry
, &sp
->qpair
->nvme_done_list
);
191 qla2xxx_rel_qpair_sp(sp
->qpair
, sp
);
194 static void qla_nvme_ls_abort(struct nvme_fc_local_port
*lport
,
195 struct nvme_fc_remote_port
*rport
, struct nvmefc_ls_req
*fd
)
197 struct nvme_private
*priv
= fd
->private;
198 fc_port_t
*fcport
= rport
->private;
199 srb_t
*sp
= priv
->sp
;
201 struct qla_hw_data
*ha
= fcport
->vha
->hw
;
203 rval
= ha
->isp_ops
->abort_command(sp
);
205 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x212b,
206 "%s: %s LS command for sp=%p on fcport=%p rval=%x\n", __func__
,
207 (rval
!= QLA_SUCCESS
) ? "Failed to abort" : "Aborted",
211 static void qla_nvme_ls_complete(struct work_struct
*work
)
213 struct nvme_private
*priv
=
214 container_of(work
, struct nvme_private
, ls_work
);
215 struct nvmefc_ls_req
*fd
= priv
->fd
;
217 fd
->done(fd
, priv
->comp_status
);
220 static int qla_nvme_ls_req(struct nvme_fc_local_port
*lport
,
221 struct nvme_fc_remote_port
*rport
, struct nvmefc_ls_req
*fd
)
223 fc_port_t
*fcport
= rport
->private;
224 struct srb_iocb
*nvme
;
225 struct nvme_private
*priv
= fd
->private;
226 struct scsi_qla_host
*vha
;
227 int rval
= QLA_FUNCTION_FAILED
;
228 struct qla_hw_data
*ha
;
231 if (!(fcport
->nvme_flag
& NVME_FLAG_REGISTERED
))
236 /* Alloc SRB structure */
237 sp
= qla2x00_get_sp(vha
, fcport
, GFP_ATOMIC
);
241 sp
->type
= SRB_NVME_LS
;
242 sp
->name
= "nvme_ls";
243 sp
->done
= qla_nvme_sp_ls_done
;
244 atomic_set(&sp
->ref_count
, 1);
245 nvme
= &sp
->u
.iocb_cmd
;
248 INIT_WORK(&priv
->ls_work
, qla_nvme_ls_complete
);
249 nvme
->u
.nvme
.desc
= fd
;
250 nvme
->u
.nvme
.dir
= 0;
252 nvme
->u
.nvme
.cmd_len
= fd
->rqstlen
;
253 nvme
->u
.nvme
.rsp_len
= fd
->rsplen
;
254 nvme
->u
.nvme
.rsp_dma
= fd
->rspdma
;
255 nvme
->u
.nvme
.timeout_sec
= fd
->timeout
;
256 nvme
->u
.nvme
.cmd_dma
= dma_map_single(&ha
->pdev
->dev
, fd
->rqstaddr
,
257 fd
->rqstlen
, DMA_TO_DEVICE
);
258 dma_sync_single_for_device(&ha
->pdev
->dev
, nvme
->u
.nvme
.cmd_dma
,
259 fd
->rqstlen
, DMA_TO_DEVICE
);
261 rval
= qla2x00_start_sp(sp
);
262 if (rval
!= QLA_SUCCESS
) {
263 ql_log(ql_log_warn
, vha
, 0x700e,
264 "qla2x00_start_sp failed = %d\n", rval
);
265 atomic_dec(&sp
->ref_count
);
266 wake_up(&sp
->nvme_ls_waitq
);
273 static void qla_nvme_fcp_abort(struct nvme_fc_local_port
*lport
,
274 struct nvme_fc_remote_port
*rport
, void *hw_queue_handle
,
275 struct nvmefc_fcp_req
*fd
)
277 struct nvme_private
*priv
= fd
->private;
278 srb_t
*sp
= priv
->sp
;
280 fc_port_t
*fcport
= rport
->private;
281 struct qla_hw_data
*ha
= fcport
->vha
->hw
;
283 rval
= ha
->isp_ops
->abort_command(sp
);
285 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x2127,
286 "%s: %s command for sp=%p on fcport=%p rval=%x\n", __func__
,
287 (rval
!= QLA_SUCCESS
) ? "Failed to abort" : "Aborted",
291 static void qla_nvme_poll(struct nvme_fc_local_port
*lport
, void *hw_queue_handle
)
293 struct scsi_qla_host
*vha
= lport
->private;
295 struct qla_qpair
*qpair
= hw_queue_handle
;
297 /* Acquire ring specific lock */
298 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
299 qla24xx_process_response_queue(vha
, qpair
->rsp
);
300 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
303 static int qla2x00_start_nvme_mq(srb_t
*sp
)
309 struct cmd_nvme
*cmd_pkt
;
315 struct req_que
*req
= NULL
;
316 struct rsp_que
*rsp
= NULL
;
317 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
318 struct qla_hw_data
*ha
= vha
->hw
;
319 struct qla_qpair
*qpair
= sp
->qpair
;
320 struct srb_iocb
*nvme
= &sp
->u
.iocb_cmd
;
321 struct scatterlist
*sgl
, *sg
;
322 struct nvmefc_fcp_req
*fd
= nvme
->u
.nvme
.desc
;
323 uint32_t rval
= QLA_SUCCESS
;
325 tot_dsds
= fd
->sg_cnt
;
327 /* Acquire qpair specific lock */
328 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
330 /* Setup qpair pointers */
334 /* Check for room in outstanding command list. */
335 handle
= req
->current_outstanding_cmd
;
336 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
338 if (handle
== req
->num_outstanding_cmds
)
340 if (!req
->outstanding_cmds
[handle
])
344 if (index
== req
->num_outstanding_cmds
) {
348 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
349 if (req
->cnt
< (req_cnt
+ 2)) {
350 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
351 RD_REG_DWORD_RELAXED(req
->req_q_out
);
353 if (req
->ring_index
< cnt
)
354 req
->cnt
= cnt
- req
->ring_index
;
356 req
->cnt
= req
->length
- (req
->ring_index
- cnt
);
358 if (req
->cnt
< (req_cnt
+ 2)){
364 if (unlikely(!fd
->sqid
)) {
365 struct nvme_fc_cmd_iu
*cmd
= fd
->cmdaddr
;
366 if (cmd
->sqe
.common
.opcode
== nvme_admin_async_event
) {
367 nvme
->u
.nvme
.aen_op
= 1;
368 atomic_inc(&vha
->hw
->nvme_active_aen_cnt
);
372 /* Build command packet. */
373 req
->current_outstanding_cmd
= handle
;
374 req
->outstanding_cmds
[handle
] = sp
;
378 cmd_pkt
= (struct cmd_nvme
*)req
->ring_ptr
;
379 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
381 /* Zero out remaining portion of packet. */
382 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
383 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
385 cmd_pkt
->entry_status
= 0;
387 /* Update entry type to indicate Command NVME IOCB */
388 cmd_pkt
->entry_type
= COMMAND_NVME
;
390 /* No data transfer how do we check buffer len == 0?? */
391 if (fd
->io_dir
== NVMEFC_FCP_READ
) {
392 cmd_pkt
->control_flags
=
393 cpu_to_le16(CF_READ_DATA
| CF_NVME_ENABLE
);
394 vha
->qla_stats
.input_bytes
+= fd
->payload_length
;
395 vha
->qla_stats
.input_requests
++;
396 } else if (fd
->io_dir
== NVMEFC_FCP_WRITE
) {
397 cmd_pkt
->control_flags
=
398 cpu_to_le16(CF_WRITE_DATA
| CF_NVME_ENABLE
);
399 vha
->qla_stats
.output_bytes
+= fd
->payload_length
;
400 vha
->qla_stats
.output_requests
++;
401 } else if (fd
->io_dir
== 0) {
402 cmd_pkt
->control_flags
= cpu_to_le16(CF_NVME_ENABLE
);
406 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
407 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
408 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
409 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
410 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
413 cmd_pkt
->nvme_rsp_dsd_len
= cpu_to_le16(fd
->rsplen
);
414 cmd_pkt
->nvme_rsp_dseg_address
[0] = cpu_to_le32(LSD(fd
->rspdma
));
415 cmd_pkt
->nvme_rsp_dseg_address
[1] = cpu_to_le32(MSD(fd
->rspdma
));
418 cmd_pkt
->nvme_cmnd_dseg_len
= cpu_to_le16(fd
->cmdlen
);
419 cmd_pkt
->nvme_cmnd_dseg_address
[0] = cpu_to_le32(LSD(fd
->cmddma
));
420 cmd_pkt
->nvme_cmnd_dseg_address
[1] = cpu_to_le32(MSD(fd
->cmddma
));
422 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
423 cmd_pkt
->byte_count
= cpu_to_le32(fd
->payload_length
);
425 /* One DSD is available in the Command Type NVME IOCB */
427 cur_dsd
= (uint32_t *)&cmd_pkt
->nvme_data_dseg_address
[0];
430 /* Load data segments */
431 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
433 cont_a64_entry_t
*cont_pkt
;
435 /* Allocate additional continuation packets? */
436 if (avail_dsds
== 0) {
438 * Five DSDs are available in the Continuation
442 /* Adjust ring index */
444 if (req
->ring_index
== req
->length
) {
446 req
->ring_ptr
= req
->ring
;
450 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
451 *((uint32_t *)(&cont_pkt
->entry_type
)) =
452 cpu_to_le32(CONTINUE_A64_TYPE
);
454 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
458 sle_dma
= sg_dma_address(sg
);
459 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
460 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
461 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
465 /* Set total entry count. */
466 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
469 /* Adjust ring index. */
471 if (req
->ring_index
== req
->length
) {
473 req
->ring_ptr
= req
->ring
;
478 /* Set chip new ring index. */
479 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
481 /* Manage unprocessed RIO/ZIO commands in response queue. */
482 if (vha
->flags
.process_response_queue
&&
483 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
484 qla24xx_process_response_queue(vha
, rsp
);
487 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
492 static int qla_nvme_post_cmd(struct nvme_fc_local_port
*lport
,
493 struct nvme_fc_remote_port
*rport
, void *hw_queue_handle
,
494 struct nvmefc_fcp_req
*fd
)
497 struct srb_iocb
*nvme
;
498 struct scsi_qla_host
*vha
;
499 int rval
= QLA_FUNCTION_FAILED
;
501 struct qla_qpair
*qpair
= hw_queue_handle
;
502 struct nvme_private
*priv
;
505 ql_log(ql_log_warn
, NULL
, 0x2134, "NO NVMe FCP request\n");
510 fcport
= rport
->private;
512 ql_log(ql_log_warn
, NULL
, 0x210e, "No fcport ptr\n");
517 if ((!qpair
) || (!(fcport
->nvme_flag
& NVME_FLAG_REGISTERED
)))
520 /* Alloc SRB structure */
521 sp
= qla2xxx_get_qpair_sp(qpair
, fcport
, GFP_ATOMIC
);
525 atomic_set(&sp
->ref_count
, 1);
526 init_waitqueue_head(&sp
->nvme_ls_waitq
);
528 sp
->type
= SRB_NVME_CMD
;
529 sp
->name
= "nvme_cmd";
530 sp
->done
= qla_nvme_sp_done
;
532 nvme
= &sp
->u
.iocb_cmd
;
533 nvme
->u
.nvme
.desc
= fd
;
535 rval
= qla2x00_start_nvme_mq(sp
);
536 if (rval
!= QLA_SUCCESS
) {
537 ql_log(ql_log_warn
, vha
, 0x212d,
538 "qla2x00_start_nvme_mq failed = %d\n", rval
);
539 atomic_dec(&sp
->ref_count
);
540 wake_up(&sp
->nvme_ls_waitq
);
547 static void qla_nvme_localport_delete(struct nvme_fc_local_port
*lport
)
549 struct scsi_qla_host
*vha
= lport
->private;
551 ql_log(ql_log_info
, vha
, 0x210f,
552 "localport delete of %p completed.\n", vha
->nvme_local_port
);
553 vha
->nvme_local_port
= NULL
;
554 complete(&vha
->nvme_del_done
);
557 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port
*rport
)
560 struct nvme_rport
*r_port
, *trport
;
562 fcport
= rport
->private;
563 fcport
->nvme_remote_port
= NULL
;
564 fcport
->nvme_flag
&= ~NVME_FLAG_REGISTERED
;
566 list_for_each_entry_safe(r_port
, trport
,
567 &fcport
->vha
->nvme_rport_list
, list
) {
568 if (r_port
->fcport
== fcport
) {
569 list_del(&r_port
->list
);
574 complete(&fcport
->nvme_del_done
);
576 ql_log(ql_log_info
, fcport
->vha
, 0x2110,
577 "remoteport_delete of %p completed.\n", fcport
);
580 static struct nvme_fc_port_template qla_nvme_fc_transport
= {
581 .localport_delete
= qla_nvme_localport_delete
,
582 .remoteport_delete
= qla_nvme_remoteport_delete
,
583 .create_queue
= qla_nvme_alloc_queue
,
584 .delete_queue
= NULL
,
585 .ls_req
= qla_nvme_ls_req
,
586 .ls_abort
= qla_nvme_ls_abort
,
587 .fcp_io
= qla_nvme_post_cmd
,
588 .fcp_abort
= qla_nvme_fcp_abort
,
589 .poll_queue
= qla_nvme_poll
,
591 .max_sgl_segments
= 128,
592 .max_dif_sgl_segments
= 64,
593 .dma_boundary
= 0xFFFFFFFF,
596 .lsrqst_priv_sz
= sizeof(struct nvme_private
),
597 .fcprqst_priv_sz
= sizeof(struct nvme_private
),
600 #define NVME_ABORT_POLLING_PERIOD 2
601 static int qla_nvme_wait_on_command(srb_t
*sp
)
603 int ret
= QLA_SUCCESS
;
605 wait_event_timeout(sp
->nvme_ls_waitq
, (atomic_read(&sp
->ref_count
) > 1),
606 NVME_ABORT_POLLING_PERIOD
*HZ
);
608 if (atomic_read(&sp
->ref_count
) > 1)
609 ret
= QLA_FUNCTION_FAILED
;
614 static int qla_nvme_wait_on_rport_del(fc_port_t
*fcport
)
616 int ret
= QLA_SUCCESS
;
619 timeout
= wait_for_completion_timeout(&fcport
->nvme_del_done
,
620 msecs_to_jiffies(2000));
622 ret
= QLA_FUNCTION_FAILED
;
623 ql_log(ql_log_info
, fcport
->vha
, 0x2111,
624 "timed out waiting for fcport=%p to delete\n", fcport
);
630 void qla_nvme_abort(struct qla_hw_data
*ha
, struct srb
*sp
)
634 rval
= ha
->isp_ops
->abort_command(sp
);
635 if (!rval
&& !qla_nvme_wait_on_command(sp
))
636 ql_log(ql_log_warn
, NULL
, 0x2112,
637 "nvme_wait_on_comand timed out waiting on sp=%p\n", sp
);
640 static void qla_nvme_unregister_remote_port(struct work_struct
*work
)
642 struct fc_port
*fcport
= container_of(work
, struct fc_port
,
644 struct nvme_rport
*rport
, *trport
;
646 if (!IS_ENABLED(CONFIG_NVME_FC
))
649 ql_log(ql_log_warn
, NULL
, 0x2112,
650 "%s: unregister remoteport on %p\n",__func__
, fcport
);
652 list_for_each_entry_safe(rport
, trport
,
653 &fcport
->vha
->nvme_rport_list
, list
) {
654 if (rport
->fcport
== fcport
) {
655 ql_log(ql_log_info
, fcport
->vha
, 0x2113,
656 "%s: fcport=%p\n", __func__
, fcport
);
657 init_completion(&fcport
->nvme_del_done
);
658 nvme_fc_unregister_remoteport(
659 fcport
->nvme_remote_port
);
660 qla_nvme_wait_on_rport_del(fcport
);
665 void qla_nvme_delete(struct scsi_qla_host
*vha
)
667 struct nvme_rport
*rport
, *trport
;
671 if (!IS_ENABLED(CONFIG_NVME_FC
))
674 list_for_each_entry_safe(rport
, trport
, &vha
->nvme_rport_list
, list
) {
675 fcport
= rport
->fcport
;
677 ql_log(ql_log_info
, fcport
->vha
, 0x2114, "%s: fcport=%p\n",
680 init_completion(&fcport
->nvme_del_done
);
681 nvme_fc_unregister_remoteport(fcport
->nvme_remote_port
);
682 qla_nvme_wait_on_rport_del(fcport
);
685 if (vha
->nvme_local_port
) {
686 init_completion(&vha
->nvme_del_done
);
687 nv_ret
= nvme_fc_unregister_localport(vha
->nvme_local_port
);
689 ql_log(ql_log_info
, vha
, 0x2116,
690 "unregistered localport=%p\n",
691 vha
->nvme_local_port
);
693 ql_log(ql_log_info
, vha
, 0x2115,
694 "Unregister of localport failed\n");
695 wait_for_completion_timeout(&vha
->nvme_del_done
,
696 msecs_to_jiffies(5000));
700 void qla_nvme_register_hba(struct scsi_qla_host
*vha
)
702 struct nvme_fc_port_template
*tmpl
;
703 struct qla_hw_data
*ha
;
704 struct nvme_fc_port_info pinfo
;
707 if (!IS_ENABLED(CONFIG_NVME_FC
))
711 tmpl
= &qla_nvme_fc_transport
;
713 WARN_ON(vha
->nvme_local_port
);
714 WARN_ON(ha
->max_req_queues
< 3);
716 qla_nvme_fc_transport
.max_hw_queues
=
717 min((uint8_t)(qla_nvme_fc_transport
.max_hw_queues
),
718 (uint8_t)(ha
->max_req_queues
- 2));
720 pinfo
.node_name
= wwn_to_u64(vha
->node_name
);
721 pinfo
.port_name
= wwn_to_u64(vha
->port_name
);
722 pinfo
.port_role
= FC_PORT_ROLE_NVME_INITIATOR
;
723 pinfo
.port_id
= vha
->d_id
.b24
;
725 ql_log(ql_log_info
, vha
, 0xffff,
726 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
727 pinfo
.node_name
, pinfo
.port_name
, pinfo
.port_id
);
728 qla_nvme_fc_transport
.dma_boundary
= vha
->host
->dma_boundary
;
730 ret
= nvme_fc_register_localport(&pinfo
, tmpl
,
731 get_device(&ha
->pdev
->dev
), &vha
->nvme_local_port
);
733 ql_log(ql_log_warn
, vha
, 0xffff,
734 "register_localport failed: ret=%x\n", ret
);
737 vha
->nvme_local_port
->private = vha
;