2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2017 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
13 static struct nvme_fc_port_template qla_nvme_fc_transport
;
15 static void qla_nvme_unregister_remote_port(struct work_struct
*);
17 int qla_nvme_register_remote(struct scsi_qla_host
*vha
, struct fc_port
*fcport
)
19 struct qla_nvme_rport
*rport
;
20 struct nvme_fc_port_info req
;
23 if (!IS_ENABLED(CONFIG_NVME_FC
))
26 if (!vha
->flags
.nvme_enabled
) {
27 ql_log(ql_log_info
, vha
, 0x2100,
28 "%s: Not registering target since Host NVME is not enabled\n",
33 if (!vha
->nvme_local_port
&& qla_nvme_register_hba(vha
))
36 if (!(fcport
->nvme_prli_service_param
&
37 (NVME_PRLI_SP_TARGET
| NVME_PRLI_SP_DISCOVERY
)) ||
38 (fcport
->nvme_flag
& NVME_FLAG_REGISTERED
))
41 INIT_WORK(&fcport
->nvme_del_work
, qla_nvme_unregister_remote_port
);
42 fcport
->nvme_flag
&= ~NVME_FLAG_RESETTING
;
44 memset(&req
, 0, sizeof(struct nvme_fc_port_info
));
45 req
.port_name
= wwn_to_u64(fcport
->port_name
);
46 req
.node_name
= wwn_to_u64(fcport
->node_name
);
48 req
.dev_loss_tmo
= NVME_FC_DEV_LOSS_TMO
;
50 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_INITIATOR
)
51 req
.port_role
= FC_PORT_ROLE_NVME_INITIATOR
;
53 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_TARGET
)
54 req
.port_role
|= FC_PORT_ROLE_NVME_TARGET
;
56 if (fcport
->nvme_prli_service_param
& NVME_PRLI_SP_DISCOVERY
)
57 req
.port_role
|= FC_PORT_ROLE_NVME_DISCOVERY
;
59 req
.port_id
= fcport
->d_id
.b24
;
61 ql_log(ql_log_info
, vha
, 0x2102,
62 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
63 __func__
, req
.node_name
, req
.port_name
,
66 ret
= nvme_fc_register_remoteport(vha
->nvme_local_port
, &req
,
67 &fcport
->nvme_remote_port
);
69 ql_log(ql_log_warn
, vha
, 0x212e,
70 "Failed to register remote port. Transport returned %d\n",
75 rport
= fcport
->nvme_remote_port
->private;
76 rport
->fcport
= fcport
;
77 list_add_tail(&rport
->list
, &vha
->nvme_rport_list
);
79 fcport
->nvme_flag
|= NVME_FLAG_REGISTERED
;
83 /* Allocate a queue for NVMe traffic */
84 static int qla_nvme_alloc_queue(struct nvme_fc_local_port
*lport
,
85 unsigned int qidx
, u16 qsize
, void **handle
)
87 struct scsi_qla_host
*vha
;
88 struct qla_hw_data
*ha
;
89 struct qla_qpair
*qpair
;
94 vha
= (struct scsi_qla_host
*)lport
->private;
97 ql_log(ql_log_info
, vha
, 0x2104,
98 "%s: handle %p, idx =%d, qsize %d\n",
99 __func__
, handle
, qidx
, qsize
);
101 if (qidx
> qla_nvme_fc_transport
.max_hw_queues
) {
102 ql_log(ql_log_warn
, vha
, 0x212f,
103 "%s: Illegal qidx=%d. Max=%d\n",
104 __func__
, qidx
, qla_nvme_fc_transport
.max_hw_queues
);
108 if (ha
->queue_pair_map
[qidx
]) {
109 *handle
= ha
->queue_pair_map
[qidx
];
110 ql_log(ql_log_info
, vha
, 0x2121,
111 "Returning existing qpair of %p for idx=%x\n",
116 qpair
= qla2xxx_create_qpair(vha
, 5, vha
->vp_idx
, true);
118 ql_log(ql_log_warn
, vha
, 0x2122,
119 "Failed to allocate qpair\n");
127 static void qla_nvme_sp_ls_done(void *ptr
, int res
)
130 struct srb_iocb
*nvme
;
131 struct nvmefc_ls_req
*fd
;
132 struct nvme_private
*priv
;
134 if (atomic_read(&sp
->ref_count
) == 0) {
135 ql_log(ql_log_warn
, sp
->fcport
->vha
, 0x2123,
136 "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp
);
140 if (!atomic_dec_and_test(&sp
->ref_count
))
146 nvme
= &sp
->u
.iocb_cmd
;
147 fd
= nvme
->u
.nvme
.desc
;
149 priv
->comp_status
= res
;
150 schedule_work(&priv
->ls_work
);
151 /* work schedule doesn't need the sp */
155 static void qla_nvme_sp_done(void *ptr
, int res
)
158 struct srb_iocb
*nvme
;
159 struct nvmefc_fcp_req
*fd
;
161 nvme
= &sp
->u
.iocb_cmd
;
162 fd
= nvme
->u
.nvme
.desc
;
164 if (!atomic_dec_and_test(&sp
->ref_count
))
167 if (res
== QLA_SUCCESS
)
170 fd
->status
= NVME_SC_INTERNAL
;
172 fd
->rcv_rsplen
= nvme
->u
.nvme
.rsp_pyld_len
;
174 qla2xxx_rel_qpair_sp(sp
->qpair
, sp
);
179 static void qla_nvme_abort_work(struct work_struct
*work
)
181 struct nvme_private
*priv
=
182 container_of(work
, struct nvme_private
, abort_work
);
183 srb_t
*sp
= priv
->sp
;
184 fc_port_t
*fcport
= sp
->fcport
;
185 struct qla_hw_data
*ha
= fcport
->vha
->hw
;
189 ql_dbg(ql_dbg_io
, fcport
->vha
, 0xffff,
190 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
191 __func__
, sp
, sp
->handle
, fcport
, fcport
->deleted
);
193 if (!ha
->flags
.fw_started
&& (fcport
&& fcport
->deleted
))
196 rval
= ha
->isp_ops
->abort_command(sp
);
198 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x212b,
199 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
200 __func__
, (rval
!= QLA_SUCCESS
) ? "Failed to abort" : "Aborted",
201 sp
, sp
->handle
, fcport
, rval
);
204 static void qla_nvme_ls_abort(struct nvme_fc_local_port
*lport
,
205 struct nvme_fc_remote_port
*rport
, struct nvmefc_ls_req
*fd
)
207 struct nvme_private
*priv
= fd
->private;
209 INIT_WORK(&priv
->abort_work
, qla_nvme_abort_work
);
210 schedule_work(&priv
->abort_work
);
213 static void qla_nvme_ls_complete(struct work_struct
*work
)
215 struct nvme_private
*priv
=
216 container_of(work
, struct nvme_private
, ls_work
);
217 struct nvmefc_ls_req
*fd
= priv
->fd
;
219 fd
->done(fd
, priv
->comp_status
);
222 static int qla_nvme_ls_req(struct nvme_fc_local_port
*lport
,
223 struct nvme_fc_remote_port
*rport
, struct nvmefc_ls_req
*fd
)
225 struct qla_nvme_rport
*qla_rport
= rport
->private;
226 fc_port_t
*fcport
= qla_rport
->fcport
;
227 struct srb_iocb
*nvme
;
228 struct nvme_private
*priv
= fd
->private;
229 struct scsi_qla_host
*vha
;
230 int rval
= QLA_FUNCTION_FAILED
;
231 struct qla_hw_data
*ha
;
236 /* Alloc SRB structure */
237 sp
= qla2x00_get_sp(vha
, fcport
, GFP_ATOMIC
);
241 sp
->type
= SRB_NVME_LS
;
242 sp
->name
= "nvme_ls";
243 sp
->done
= qla_nvme_sp_ls_done
;
244 atomic_set(&sp
->ref_count
, 1);
245 nvme
= &sp
->u
.iocb_cmd
;
248 INIT_WORK(&priv
->ls_work
, qla_nvme_ls_complete
);
249 nvme
->u
.nvme
.desc
= fd
;
250 nvme
->u
.nvme
.dir
= 0;
252 nvme
->u
.nvme
.cmd_len
= fd
->rqstlen
;
253 nvme
->u
.nvme
.rsp_len
= fd
->rsplen
;
254 nvme
->u
.nvme
.rsp_dma
= fd
->rspdma
;
255 nvme
->u
.nvme
.timeout_sec
= fd
->timeout
;
256 nvme
->u
.nvme
.cmd_dma
= dma_map_single(&ha
->pdev
->dev
, fd
->rqstaddr
,
257 fd
->rqstlen
, DMA_TO_DEVICE
);
258 dma_sync_single_for_device(&ha
->pdev
->dev
, nvme
->u
.nvme
.cmd_dma
,
259 fd
->rqstlen
, DMA_TO_DEVICE
);
261 rval
= qla2x00_start_sp(sp
);
262 if (rval
!= QLA_SUCCESS
) {
263 ql_log(ql_log_warn
, vha
, 0x700e,
264 "qla2x00_start_sp failed = %d\n", rval
);
265 atomic_dec(&sp
->ref_count
);
266 wake_up(&sp
->nvme_ls_waitq
);
273 static void qla_nvme_fcp_abort(struct nvme_fc_local_port
*lport
,
274 struct nvme_fc_remote_port
*rport
, void *hw_queue_handle
,
275 struct nvmefc_fcp_req
*fd
)
277 struct nvme_private
*priv
= fd
->private;
279 INIT_WORK(&priv
->abort_work
, qla_nvme_abort_work
);
280 schedule_work(&priv
->abort_work
);
283 static inline int qla2x00_start_nvme_mq(srb_t
*sp
)
289 struct cmd_nvme
*cmd_pkt
;
295 struct req_que
*req
= NULL
;
296 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
297 struct qla_hw_data
*ha
= vha
->hw
;
298 struct qla_qpair
*qpair
= sp
->qpair
;
299 struct srb_iocb
*nvme
= &sp
->u
.iocb_cmd
;
300 struct scatterlist
*sgl
, *sg
;
301 struct nvmefc_fcp_req
*fd
= nvme
->u
.nvme
.desc
;
302 uint32_t rval
= QLA_SUCCESS
;
304 /* Setup qpair pointers */
306 tot_dsds
= fd
->sg_cnt
;
308 /* Acquire qpair specific lock */
309 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
311 /* Check for room in outstanding command list. */
312 handle
= req
->current_outstanding_cmd
;
313 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
315 if (handle
== req
->num_outstanding_cmds
)
317 if (!req
->outstanding_cmds
[handle
])
321 if (index
== req
->num_outstanding_cmds
) {
325 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
326 if (req
->cnt
< (req_cnt
+ 2)) {
327 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
328 RD_REG_DWORD_RELAXED(req
->req_q_out
);
330 if (req
->ring_index
< cnt
)
331 req
->cnt
= cnt
- req
->ring_index
;
333 req
->cnt
= req
->length
- (req
->ring_index
- cnt
);
335 if (req
->cnt
< (req_cnt
+ 2)){
341 if (unlikely(!fd
->sqid
)) {
342 struct nvme_fc_cmd_iu
*cmd
= fd
->cmdaddr
;
343 if (cmd
->sqe
.common
.opcode
== nvme_admin_async_event
) {
344 nvme
->u
.nvme
.aen_op
= 1;
345 atomic_inc(&ha
->nvme_active_aen_cnt
);
349 /* Build command packet. */
350 req
->current_outstanding_cmd
= handle
;
351 req
->outstanding_cmds
[handle
] = sp
;
355 cmd_pkt
= (struct cmd_nvme
*)req
->ring_ptr
;
356 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
358 /* Zero out remaining portion of packet. */
359 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
360 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
362 cmd_pkt
->entry_status
= 0;
364 /* Update entry type to indicate Command NVME IOCB */
365 cmd_pkt
->entry_type
= COMMAND_NVME
;
367 /* No data transfer how do we check buffer len == 0?? */
368 if (fd
->io_dir
== NVMEFC_FCP_READ
) {
369 cmd_pkt
->control_flags
= CF_READ_DATA
;
370 vha
->qla_stats
.input_bytes
+= fd
->payload_length
;
371 vha
->qla_stats
.input_requests
++;
372 } else if (fd
->io_dir
== NVMEFC_FCP_WRITE
) {
373 cmd_pkt
->control_flags
= CF_WRITE_DATA
;
374 if ((vha
->flags
.nvme_first_burst
) &&
375 (sp
->fcport
->nvme_prli_service_param
&
376 NVME_PRLI_SP_FIRST_BURST
)) {
377 if ((fd
->payload_length
<=
378 sp
->fcport
->nvme_first_burst_size
) ||
379 (sp
->fcport
->nvme_first_burst_size
== 0))
380 cmd_pkt
->control_flags
|=
381 CF_NVME_FIRST_BURST_ENABLE
;
383 vha
->qla_stats
.output_bytes
+= fd
->payload_length
;
384 vha
->qla_stats
.output_requests
++;
385 } else if (fd
->io_dir
== 0) {
386 cmd_pkt
->control_flags
= 0;
390 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
391 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
392 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
393 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
394 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
397 cmd_pkt
->nvme_rsp_dsd_len
= cpu_to_le16(fd
->rsplen
);
398 cmd_pkt
->nvme_rsp_dseg_address
[0] = cpu_to_le32(LSD(fd
->rspdma
));
399 cmd_pkt
->nvme_rsp_dseg_address
[1] = cpu_to_le32(MSD(fd
->rspdma
));
402 cmd_pkt
->nvme_cmnd_dseg_len
= cpu_to_le16(fd
->cmdlen
);
403 cmd_pkt
->nvme_cmnd_dseg_address
[0] = cpu_to_le32(LSD(fd
->cmddma
));
404 cmd_pkt
->nvme_cmnd_dseg_address
[1] = cpu_to_le32(MSD(fd
->cmddma
));
406 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
407 cmd_pkt
->byte_count
= cpu_to_le32(fd
->payload_length
);
409 /* One DSD is available in the Command Type NVME IOCB */
411 cur_dsd
= (uint32_t *)&cmd_pkt
->nvme_data_dseg_address
[0];
414 /* Load data segments */
415 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
417 cont_a64_entry_t
*cont_pkt
;
419 /* Allocate additional continuation packets? */
420 if (avail_dsds
== 0) {
422 * Five DSDs are available in the Continuation
426 /* Adjust ring index */
428 if (req
->ring_index
== req
->length
) {
430 req
->ring_ptr
= req
->ring
;
434 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
435 *((uint32_t *)(&cont_pkt
->entry_type
)) =
436 cpu_to_le32(CONTINUE_A64_TYPE
);
438 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
442 sle_dma
= sg_dma_address(sg
);
443 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
444 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
445 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
449 /* Set total entry count. */
450 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
453 /* Adjust ring index. */
455 if (req
->ring_index
== req
->length
) {
457 req
->ring_ptr
= req
->ring
;
462 /* Set chip new ring index. */
463 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
466 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
471 static int qla_nvme_post_cmd(struct nvme_fc_local_port
*lport
,
472 struct nvme_fc_remote_port
*rport
, void *hw_queue_handle
,
473 struct nvmefc_fcp_req
*fd
)
476 struct srb_iocb
*nvme
;
477 struct scsi_qla_host
*vha
;
480 struct qla_qpair
*qpair
= hw_queue_handle
;
481 struct nvme_private
*priv
= fd
->private;
482 struct qla_nvme_rport
*qla_rport
= rport
->private;
484 fcport
= qla_rport
->fcport
;
488 if (test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
))
492 * If we know the dev is going away while the transport is still sending
493 * IO's return busy back to stall the IO Q. This happens when the
494 * link goes away and fw hasn't notified us yet, but IO's are being
495 * returned. If the dev comes back quickly we won't exhaust the IO
496 * retry count at the core.
498 if (fcport
->nvme_flag
& NVME_FLAG_RESETTING
)
501 /* Alloc SRB structure */
502 sp
= qla2xxx_get_qpair_sp(vha
, qpair
, fcport
, GFP_ATOMIC
);
506 atomic_set(&sp
->ref_count
, 1);
507 init_waitqueue_head(&sp
->nvme_ls_waitq
);
509 sp
->type
= SRB_NVME_CMD
;
510 sp
->name
= "nvme_cmd";
511 sp
->done
= qla_nvme_sp_done
;
514 nvme
= &sp
->u
.iocb_cmd
;
515 nvme
->u
.nvme
.desc
= fd
;
517 rval
= qla2x00_start_nvme_mq(sp
);
518 if (rval
!= QLA_SUCCESS
) {
519 ql_log(ql_log_warn
, vha
, 0x212d,
520 "qla2x00_start_nvme_mq failed = %d\n", rval
);
521 atomic_dec(&sp
->ref_count
);
522 wake_up(&sp
->nvme_ls_waitq
);
528 static void qla_nvme_localport_delete(struct nvme_fc_local_port
*lport
)
530 struct scsi_qla_host
*vha
= lport
->private;
532 ql_log(ql_log_info
, vha
, 0x210f,
533 "localport delete of %p completed.\n", vha
->nvme_local_port
);
534 vha
->nvme_local_port
= NULL
;
535 complete(&vha
->nvme_del_done
);
538 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port
*rport
)
541 struct qla_nvme_rport
*qla_rport
= rport
->private, *trport
;
543 fcport
= qla_rport
->fcport
;
544 fcport
->nvme_remote_port
= NULL
;
545 fcport
->nvme_flag
&= ~NVME_FLAG_REGISTERED
;
547 list_for_each_entry_safe(qla_rport
, trport
,
548 &fcport
->vha
->nvme_rport_list
, list
) {
549 if (qla_rport
->fcport
== fcport
) {
550 list_del(&qla_rport
->list
);
554 complete(&fcport
->nvme_del_done
);
556 if (!test_bit(UNLOADING
, &fcport
->vha
->dpc_flags
)) {
557 INIT_WORK(&fcport
->free_work
, qlt_free_session_done
);
558 schedule_work(&fcport
->free_work
);
561 fcport
->nvme_flag
&= ~NVME_FLAG_DELETING
;
562 ql_log(ql_log_info
, fcport
->vha
, 0x2110,
563 "remoteport_delete of %p completed.\n", fcport
);
566 static struct nvme_fc_port_template qla_nvme_fc_transport
= {
567 .localport_delete
= qla_nvme_localport_delete
,
568 .remoteport_delete
= qla_nvme_remoteport_delete
,
569 .create_queue
= qla_nvme_alloc_queue
,
570 .delete_queue
= NULL
,
571 .ls_req
= qla_nvme_ls_req
,
572 .ls_abort
= qla_nvme_ls_abort
,
573 .fcp_io
= qla_nvme_post_cmd
,
574 .fcp_abort
= qla_nvme_fcp_abort
,
576 .max_sgl_segments
= 128,
577 .max_dif_sgl_segments
= 64,
578 .dma_boundary
= 0xFFFFFFFF,
580 .remote_priv_sz
= sizeof(struct qla_nvme_rport
),
581 .lsrqst_priv_sz
= sizeof(struct nvme_private
),
582 .fcprqst_priv_sz
= sizeof(struct nvme_private
),
585 #define NVME_ABORT_POLLING_PERIOD 2
586 static int qla_nvme_wait_on_command(srb_t
*sp
)
588 int ret
= QLA_SUCCESS
;
590 wait_event_timeout(sp
->nvme_ls_waitq
, (atomic_read(&sp
->ref_count
) > 1),
591 NVME_ABORT_POLLING_PERIOD
*HZ
);
593 if (atomic_read(&sp
->ref_count
) > 1)
594 ret
= QLA_FUNCTION_FAILED
;
599 void qla_nvme_abort(struct qla_hw_data
*ha
, struct srb
*sp
, int res
)
603 if (ha
->flags
.fw_started
) {
604 rval
= ha
->isp_ops
->abort_command(sp
);
605 if (!rval
&& !qla_nvme_wait_on_command(sp
))
606 ql_log(ql_log_warn
, NULL
, 0x2112,
607 "timed out waiting on sp=%p\n", sp
);
613 static void qla_nvme_unregister_remote_port(struct work_struct
*work
)
615 struct fc_port
*fcport
= container_of(work
, struct fc_port
,
617 struct qla_nvme_rport
*qla_rport
, *trport
;
619 if (!IS_ENABLED(CONFIG_NVME_FC
))
622 ql_log(ql_log_warn
, NULL
, 0x2112,
623 "%s: unregister remoteport on %p\n",__func__
, fcport
);
625 list_for_each_entry_safe(qla_rport
, trport
,
626 &fcport
->vha
->nvme_rport_list
, list
) {
627 if (qla_rport
->fcport
== fcport
) {
628 ql_log(ql_log_info
, fcport
->vha
, 0x2113,
629 "%s: fcport=%p\n", __func__
, fcport
);
630 nvme_fc_set_remoteport_devloss
631 (fcport
->nvme_remote_port
, 0);
632 init_completion(&fcport
->nvme_del_done
);
633 if (nvme_fc_unregister_remoteport
634 (fcport
->nvme_remote_port
))
635 ql_log(ql_log_info
, fcport
->vha
, 0x2114,
636 "%s: Failed to unregister nvme_remote_port\n",
638 wait_for_completion(&fcport
->nvme_del_done
);
644 void qla_nvme_delete(struct scsi_qla_host
*vha
)
648 if (!IS_ENABLED(CONFIG_NVME_FC
))
651 if (vha
->nvme_local_port
) {
652 init_completion(&vha
->nvme_del_done
);
653 ql_log(ql_log_info
, vha
, 0x2116,
654 "unregister localport=%p\n",
655 vha
->nvme_local_port
);
656 nv_ret
= nvme_fc_unregister_localport(vha
->nvme_local_port
);
658 ql_log(ql_log_info
, vha
, 0x2115,
659 "Unregister of localport failed\n");
661 wait_for_completion(&vha
->nvme_del_done
);
665 int qla_nvme_register_hba(struct scsi_qla_host
*vha
)
667 struct nvme_fc_port_template
*tmpl
;
668 struct qla_hw_data
*ha
;
669 struct nvme_fc_port_info pinfo
;
672 if (!IS_ENABLED(CONFIG_NVME_FC
))
676 tmpl
= &qla_nvme_fc_transport
;
678 WARN_ON(vha
->nvme_local_port
);
679 WARN_ON(ha
->max_req_queues
< 3);
681 qla_nvme_fc_transport
.max_hw_queues
=
682 min((uint8_t)(qla_nvme_fc_transport
.max_hw_queues
),
683 (uint8_t)(ha
->max_req_queues
- 2));
685 pinfo
.node_name
= wwn_to_u64(vha
->node_name
);
686 pinfo
.port_name
= wwn_to_u64(vha
->port_name
);
687 pinfo
.port_role
= FC_PORT_ROLE_NVME_INITIATOR
;
688 pinfo
.port_id
= vha
->d_id
.b24
;
690 ql_log(ql_log_info
, vha
, 0xffff,
691 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
692 pinfo
.node_name
, pinfo
.port_name
, pinfo
.port_id
);
693 qla_nvme_fc_transport
.dma_boundary
= vha
->host
->dma_boundary
;
695 ret
= nvme_fc_register_localport(&pinfo
, tmpl
,
696 get_device(&ha
->pdev
->dev
), &vha
->nvme_local_port
);
698 ql_log(ql_log_warn
, vha
, 0xffff,
699 "register_localport failed: ret=%x\n", ret
);
701 vha
->nvme_local_port
->private = vha
;