2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include <uapi/scsi/fc/fc_fs.h>
21 #include <uapi/scsi/fc/fc_els.h>
22 #include <linux/delay.h>
23 #include <linux/overflow.h>
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
31 /* *************************** Data Structures/Defines ****************** */
34 enum nvme_fc_queue_flags
{
35 NVME_FC_Q_CONNECTED
= 0,
39 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
41 struct nvme_fc_queue
{
42 struct nvme_fc_ctrl
*ctrl
;
44 struct blk_mq_hw_ctx
*hctx
;
46 size_t cmnd_capsule_len
;
55 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
57 enum nvme_fcop_flags
{
58 FCOP_FLAGS_TERMIO
= (1 << 0),
59 FCOP_FLAGS_AEN
= (1 << 1),
62 struct nvmefc_ls_req_op
{
63 struct nvmefc_ls_req ls_req
;
65 struct nvme_fc_rport
*rport
;
66 struct nvme_fc_queue
*queue
;
71 struct completion ls_done
;
72 struct list_head lsreq_list
; /* rport->ls_req_list */
76 enum nvme_fcpop_state
{
77 FCPOP_STATE_UNINIT
= 0,
79 FCPOP_STATE_ACTIVE
= 2,
80 FCPOP_STATE_ABORTED
= 3,
81 FCPOP_STATE_COMPLETE
= 4,
84 struct nvme_fc_fcp_op
{
85 struct nvme_request nreq
; /*
88 * the 1st element in the
93 struct nvmefc_fcp_req fcp_req
;
95 struct nvme_fc_ctrl
*ctrl
;
96 struct nvme_fc_queue
*queue
;
104 struct nvme_fc_cmd_iu cmd_iu
;
105 struct nvme_fc_ersp_iu rsp_iu
;
108 struct nvme_fcp_op_w_sgl
{
109 struct nvme_fc_fcp_op op
;
110 struct scatterlist sgl
[SG_CHUNK_SIZE
];
114 struct nvme_fc_lport
{
115 struct nvme_fc_local_port localport
;
118 struct list_head port_list
; /* nvme_fc_port_list */
119 struct list_head endp_list
;
120 struct device
*dev
; /* physical device for dma */
121 struct nvme_fc_port_template
*ops
;
123 atomic_t act_rport_cnt
;
124 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
126 struct nvme_fc_rport
{
127 struct nvme_fc_remote_port remoteport
;
129 struct list_head endp_list
; /* for lport->endp_list */
130 struct list_head ctrl_list
;
131 struct list_head ls_req_list
;
132 struct list_head disc_list
;
133 struct device
*dev
; /* physical device for dma */
134 struct nvme_fc_lport
*lport
;
137 atomic_t act_ctrl_cnt
;
138 unsigned long dev_loss_end
;
139 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
141 enum nvme_fcctrl_flags
{
142 FCCTRL_TERMIO
= (1 << 0),
145 struct nvme_fc_ctrl
{
147 struct nvme_fc_queue
*queues
;
149 struct nvme_fc_lport
*lport
;
150 struct nvme_fc_rport
*rport
;
157 struct list_head ctrl_list
; /* rport->ctrl_list */
159 struct blk_mq_tag_set admin_tag_set
;
160 struct blk_mq_tag_set tag_set
;
162 struct delayed_work connect_work
;
167 wait_queue_head_t ioabort_wait
;
169 struct nvme_fc_fcp_op aen_ops
[NVME_NR_AEN_COMMANDS
];
171 struct nvme_ctrl ctrl
;
174 static inline struct nvme_fc_ctrl
*
175 to_fc_ctrl(struct nvme_ctrl
*ctrl
)
177 return container_of(ctrl
, struct nvme_fc_ctrl
, ctrl
);
180 static inline struct nvme_fc_lport
*
181 localport_to_lport(struct nvme_fc_local_port
*portptr
)
183 return container_of(portptr
, struct nvme_fc_lport
, localport
);
186 static inline struct nvme_fc_rport
*
187 remoteport_to_rport(struct nvme_fc_remote_port
*portptr
)
189 return container_of(portptr
, struct nvme_fc_rport
, remoteport
);
192 static inline struct nvmefc_ls_req_op
*
193 ls_req_to_lsop(struct nvmefc_ls_req
*lsreq
)
195 return container_of(lsreq
, struct nvmefc_ls_req_op
, ls_req
);
198 static inline struct nvme_fc_fcp_op
*
199 fcp_req_to_fcp_op(struct nvmefc_fcp_req
*fcpreq
)
201 return container_of(fcpreq
, struct nvme_fc_fcp_op
, fcp_req
);
206 /* *************************** Globals **************************** */
209 static DEFINE_SPINLOCK(nvme_fc_lock
);
211 static LIST_HEAD(nvme_fc_lport_list
);
212 static DEFINE_IDA(nvme_fc_local_port_cnt
);
213 static DEFINE_IDA(nvme_fc_ctrl_cnt
);
218 * These items are short-term. They will eventually be moved into
219 * a generic FC class. See comments in module init.
221 static struct device
*fc_udev_device
;
224 /* *********************** FC-NVME Port Management ************************ */
226 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl
*,
227 struct nvme_fc_queue
*, unsigned int);
230 nvme_fc_free_lport(struct kref
*ref
)
232 struct nvme_fc_lport
*lport
=
233 container_of(ref
, struct nvme_fc_lport
, ref
);
236 WARN_ON(lport
->localport
.port_state
!= FC_OBJSTATE_DELETED
);
237 WARN_ON(!list_empty(&lport
->endp_list
));
239 /* remove from transport list */
240 spin_lock_irqsave(&nvme_fc_lock
, flags
);
241 list_del(&lport
->port_list
);
242 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
244 ida_simple_remove(&nvme_fc_local_port_cnt
, lport
->localport
.port_num
);
245 ida_destroy(&lport
->endp_cnt
);
247 put_device(lport
->dev
);
253 nvme_fc_lport_put(struct nvme_fc_lport
*lport
)
255 kref_put(&lport
->ref
, nvme_fc_free_lport
);
259 nvme_fc_lport_get(struct nvme_fc_lport
*lport
)
261 return kref_get_unless_zero(&lport
->ref
);
265 static struct nvme_fc_lport
*
266 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info
*pinfo
,
267 struct nvme_fc_port_template
*ops
,
270 struct nvme_fc_lport
*lport
;
273 spin_lock_irqsave(&nvme_fc_lock
, flags
);
275 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
276 if (lport
->localport
.node_name
!= pinfo
->node_name
||
277 lport
->localport
.port_name
!= pinfo
->port_name
)
280 if (lport
->dev
!= dev
) {
281 lport
= ERR_PTR(-EXDEV
);
285 if (lport
->localport
.port_state
!= FC_OBJSTATE_DELETED
) {
286 lport
= ERR_PTR(-EEXIST
);
290 if (!nvme_fc_lport_get(lport
)) {
292 * fails if ref cnt already 0. If so,
293 * act as if lport already deleted
299 /* resume the lport */
302 lport
->localport
.port_role
= pinfo
->port_role
;
303 lport
->localport
.port_id
= pinfo
->port_id
;
304 lport
->localport
.port_state
= FC_OBJSTATE_ONLINE
;
306 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
314 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
320 * nvme_fc_register_localport - transport entry point called by an
321 * LLDD to register the existence of a NVME
323 * @pinfo: pointer to information about the port to be registered
324 * @template: LLDD entrypoints and operational parameters for the port
325 * @dev: physical hardware device node port corresponds to. Will be
326 * used for DMA mappings
327 * @portptr: pointer to a local port pointer. Upon success, the routine
328 * will allocate a nvme_fc_local_port structure and place its
329 * address in the local port pointer. Upon failure, local port
330 * pointer will be set to 0.
333 * a completion status. Must be 0 upon success; a negative errno
334 * (ex: -ENXIO) upon failure.
337 nvme_fc_register_localport(struct nvme_fc_port_info
*pinfo
,
338 struct nvme_fc_port_template
*template,
340 struct nvme_fc_local_port
**portptr
)
342 struct nvme_fc_lport
*newrec
;
346 if (!template->localport_delete
|| !template->remoteport_delete
||
347 !template->ls_req
|| !template->fcp_io
||
348 !template->ls_abort
|| !template->fcp_abort
||
349 !template->max_hw_queues
|| !template->max_sgl_segments
||
350 !template->max_dif_sgl_segments
|| !template->dma_boundary
) {
352 goto out_reghost_failed
;
356 * look to see if there is already a localport that had been
357 * deregistered and in the process of waiting for all the
358 * references to fully be removed. If the references haven't
359 * expired, we can simply re-enable the localport. Remoteports
360 * and controller reconnections should resume naturally.
362 newrec
= nvme_fc_attach_to_unreg_lport(pinfo
, template, dev
);
364 /* found an lport, but something about its state is bad */
365 if (IS_ERR(newrec
)) {
366 ret
= PTR_ERR(newrec
);
367 goto out_reghost_failed
;
369 /* found existing lport, which was resumed */
371 *portptr
= &newrec
->localport
;
375 /* nothing found - allocate a new localport struct */
377 newrec
= kmalloc((sizeof(*newrec
) + template->local_priv_sz
),
381 goto out_reghost_failed
;
384 idx
= ida_simple_get(&nvme_fc_local_port_cnt
, 0, 0, GFP_KERNEL
);
390 if (!get_device(dev
) && dev
) {
395 INIT_LIST_HEAD(&newrec
->port_list
);
396 INIT_LIST_HEAD(&newrec
->endp_list
);
397 kref_init(&newrec
->ref
);
398 atomic_set(&newrec
->act_rport_cnt
, 0);
399 newrec
->ops
= template;
401 ida_init(&newrec
->endp_cnt
);
402 newrec
->localport
.private = &newrec
[1];
403 newrec
->localport
.node_name
= pinfo
->node_name
;
404 newrec
->localport
.port_name
= pinfo
->port_name
;
405 newrec
->localport
.port_role
= pinfo
->port_role
;
406 newrec
->localport
.port_id
= pinfo
->port_id
;
407 newrec
->localport
.port_state
= FC_OBJSTATE_ONLINE
;
408 newrec
->localport
.port_num
= idx
;
410 spin_lock_irqsave(&nvme_fc_lock
, flags
);
411 list_add_tail(&newrec
->port_list
, &nvme_fc_lport_list
);
412 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
415 dma_set_seg_boundary(dev
, template->dma_boundary
);
417 *portptr
= &newrec
->localport
;
421 ida_simple_remove(&nvme_fc_local_port_cnt
, idx
);
429 EXPORT_SYMBOL_GPL(nvme_fc_register_localport
);
432 * nvme_fc_unregister_localport - transport entry point called by an
433 * LLDD to deregister/remove a previously
434 * registered a NVME host FC port.
435 * @portptr: pointer to the (registered) local port that is to be deregistered.
438 * a completion status. Must be 0 upon success; a negative errno
439 * (ex: -ENXIO) upon failure.
442 nvme_fc_unregister_localport(struct nvme_fc_local_port
*portptr
)
444 struct nvme_fc_lport
*lport
= localport_to_lport(portptr
);
450 spin_lock_irqsave(&nvme_fc_lock
, flags
);
452 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
453 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
456 portptr
->port_state
= FC_OBJSTATE_DELETED
;
458 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
460 if (atomic_read(&lport
->act_rport_cnt
) == 0)
461 lport
->ops
->localport_delete(&lport
->localport
);
463 nvme_fc_lport_put(lport
);
467 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport
);
470 * TRADDR strings, per FC-NVME are fixed format:
471 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
472 * udev event will only differ by prefix of what field is
474 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
475 * 19 + 43 + null_fudge = 64 characters
477 #define FCNVME_TRADDR_LENGTH 64
480 nvme_fc_signal_discovery_scan(struct nvme_fc_lport
*lport
,
481 struct nvme_fc_rport
*rport
)
483 char hostaddr
[FCNVME_TRADDR_LENGTH
]; /* NVMEFC_HOST_TRADDR=...*/
484 char tgtaddr
[FCNVME_TRADDR_LENGTH
]; /* NVMEFC_TRADDR=...*/
485 char *envp
[4] = { "FC_EVENT=nvmediscovery", hostaddr
, tgtaddr
, NULL
};
487 if (!(rport
->remoteport
.port_role
& FC_PORT_ROLE_NVME_DISCOVERY
))
490 snprintf(hostaddr
, sizeof(hostaddr
),
491 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
492 lport
->localport
.node_name
, lport
->localport
.port_name
);
493 snprintf(tgtaddr
, sizeof(tgtaddr
),
494 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
495 rport
->remoteport
.node_name
, rport
->remoteport
.port_name
);
496 kobject_uevent_env(&fc_udev_device
->kobj
, KOBJ_CHANGE
, envp
);
500 nvme_fc_free_rport(struct kref
*ref
)
502 struct nvme_fc_rport
*rport
=
503 container_of(ref
, struct nvme_fc_rport
, ref
);
504 struct nvme_fc_lport
*lport
=
505 localport_to_lport(rport
->remoteport
.localport
);
508 WARN_ON(rport
->remoteport
.port_state
!= FC_OBJSTATE_DELETED
);
509 WARN_ON(!list_empty(&rport
->ctrl_list
));
511 /* remove from lport list */
512 spin_lock_irqsave(&nvme_fc_lock
, flags
);
513 list_del(&rport
->endp_list
);
514 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
516 WARN_ON(!list_empty(&rport
->disc_list
));
517 ida_simple_remove(&lport
->endp_cnt
, rport
->remoteport
.port_num
);
521 nvme_fc_lport_put(lport
);
525 nvme_fc_rport_put(struct nvme_fc_rport
*rport
)
527 kref_put(&rport
->ref
, nvme_fc_free_rport
);
531 nvme_fc_rport_get(struct nvme_fc_rport
*rport
)
533 return kref_get_unless_zero(&rport
->ref
);
537 nvme_fc_resume_controller(struct nvme_fc_ctrl
*ctrl
)
539 switch (ctrl
->ctrl
.state
) {
541 case NVME_CTRL_CONNECTING
:
543 * As all reconnects were suppressed, schedule a
546 dev_info(ctrl
->ctrl
.device
,
547 "NVME-FC{%d}: connectivity re-established. "
548 "Attempting reconnect\n", ctrl
->cnum
);
550 queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, 0);
553 case NVME_CTRL_RESETTING
:
555 * Controller is already in the process of terminating the
556 * association. No need to do anything further. The reconnect
557 * step will naturally occur after the reset completes.
562 /* no action to take - let it delete */
567 static struct nvme_fc_rport
*
568 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport
*lport
,
569 struct nvme_fc_port_info
*pinfo
)
571 struct nvme_fc_rport
*rport
;
572 struct nvme_fc_ctrl
*ctrl
;
575 spin_lock_irqsave(&nvme_fc_lock
, flags
);
577 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
578 if (rport
->remoteport
.node_name
!= pinfo
->node_name
||
579 rport
->remoteport
.port_name
!= pinfo
->port_name
)
582 if (!nvme_fc_rport_get(rport
)) {
583 rport
= ERR_PTR(-ENOLCK
);
587 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
589 spin_lock_irqsave(&rport
->lock
, flags
);
591 /* has it been unregistered */
592 if (rport
->remoteport
.port_state
!= FC_OBJSTATE_DELETED
) {
593 /* means lldd called us twice */
594 spin_unlock_irqrestore(&rport
->lock
, flags
);
595 nvme_fc_rport_put(rport
);
596 return ERR_PTR(-ESTALE
);
599 rport
->remoteport
.port_role
= pinfo
->port_role
;
600 rport
->remoteport
.port_id
= pinfo
->port_id
;
601 rport
->remoteport
.port_state
= FC_OBJSTATE_ONLINE
;
602 rport
->dev_loss_end
= 0;
605 * kick off a reconnect attempt on all associations to the
606 * remote port. A successful reconnects will resume i/o.
608 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
)
609 nvme_fc_resume_controller(ctrl
);
611 spin_unlock_irqrestore(&rport
->lock
, flags
);
619 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
625 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport
*rport
,
626 struct nvme_fc_port_info
*pinfo
)
628 if (pinfo
->dev_loss_tmo
)
629 rport
->remoteport
.dev_loss_tmo
= pinfo
->dev_loss_tmo
;
631 rport
->remoteport
.dev_loss_tmo
= NVME_FC_DEFAULT_DEV_LOSS_TMO
;
635 * nvme_fc_register_remoteport - transport entry point called by an
636 * LLDD to register the existence of a NVME
637 * subsystem FC port on its fabric.
638 * @localport: pointer to the (registered) local port that the remote
639 * subsystem port is connected to.
640 * @pinfo: pointer to information about the port to be registered
641 * @portptr: pointer to a remote port pointer. Upon success, the routine
642 * will allocate a nvme_fc_remote_port structure and place its
643 * address in the remote port pointer. Upon failure, remote port
644 * pointer will be set to 0.
647 * a completion status. Must be 0 upon success; a negative errno
648 * (ex: -ENXIO) upon failure.
651 nvme_fc_register_remoteport(struct nvme_fc_local_port
*localport
,
652 struct nvme_fc_port_info
*pinfo
,
653 struct nvme_fc_remote_port
**portptr
)
655 struct nvme_fc_lport
*lport
= localport_to_lport(localport
);
656 struct nvme_fc_rport
*newrec
;
660 if (!nvme_fc_lport_get(lport
)) {
662 goto out_reghost_failed
;
666 * look to see if there is already a remoteport that is waiting
667 * for a reconnect (within dev_loss_tmo) with the same WWN's.
668 * If so, transition to it and reconnect.
670 newrec
= nvme_fc_attach_to_suspended_rport(lport
, pinfo
);
672 /* found an rport, but something about its state is bad */
673 if (IS_ERR(newrec
)) {
674 ret
= PTR_ERR(newrec
);
677 /* found existing rport, which was resumed */
679 nvme_fc_lport_put(lport
);
680 __nvme_fc_set_dev_loss_tmo(newrec
, pinfo
);
681 nvme_fc_signal_discovery_scan(lport
, newrec
);
682 *portptr
= &newrec
->remoteport
;
686 /* nothing found - allocate a new remoteport struct */
688 newrec
= kmalloc((sizeof(*newrec
) + lport
->ops
->remote_priv_sz
),
695 idx
= ida_simple_get(&lport
->endp_cnt
, 0, 0, GFP_KERNEL
);
698 goto out_kfree_rport
;
701 INIT_LIST_HEAD(&newrec
->endp_list
);
702 INIT_LIST_HEAD(&newrec
->ctrl_list
);
703 INIT_LIST_HEAD(&newrec
->ls_req_list
);
704 INIT_LIST_HEAD(&newrec
->disc_list
);
705 kref_init(&newrec
->ref
);
706 atomic_set(&newrec
->act_ctrl_cnt
, 0);
707 spin_lock_init(&newrec
->lock
);
708 newrec
->remoteport
.localport
= &lport
->localport
;
709 newrec
->dev
= lport
->dev
;
710 newrec
->lport
= lport
;
711 newrec
->remoteport
.private = &newrec
[1];
712 newrec
->remoteport
.port_role
= pinfo
->port_role
;
713 newrec
->remoteport
.node_name
= pinfo
->node_name
;
714 newrec
->remoteport
.port_name
= pinfo
->port_name
;
715 newrec
->remoteport
.port_id
= pinfo
->port_id
;
716 newrec
->remoteport
.port_state
= FC_OBJSTATE_ONLINE
;
717 newrec
->remoteport
.port_num
= idx
;
718 __nvme_fc_set_dev_loss_tmo(newrec
, pinfo
);
720 spin_lock_irqsave(&nvme_fc_lock
, flags
);
721 list_add_tail(&newrec
->endp_list
, &lport
->endp_list
);
722 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
724 nvme_fc_signal_discovery_scan(lport
, newrec
);
726 *portptr
= &newrec
->remoteport
;
732 nvme_fc_lport_put(lport
);
737 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport
);
740 nvme_fc_abort_lsops(struct nvme_fc_rport
*rport
)
742 struct nvmefc_ls_req_op
*lsop
;
746 spin_lock_irqsave(&rport
->lock
, flags
);
748 list_for_each_entry(lsop
, &rport
->ls_req_list
, lsreq_list
) {
749 if (!(lsop
->flags
& FCOP_FLAGS_TERMIO
)) {
750 lsop
->flags
|= FCOP_FLAGS_TERMIO
;
751 spin_unlock_irqrestore(&rport
->lock
, flags
);
752 rport
->lport
->ops
->ls_abort(&rport
->lport
->localport
,
758 spin_unlock_irqrestore(&rport
->lock
, flags
);
764 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl
*ctrl
)
766 dev_info(ctrl
->ctrl
.device
,
767 "NVME-FC{%d}: controller connectivity lost. Awaiting "
768 "Reconnect", ctrl
->cnum
);
770 switch (ctrl
->ctrl
.state
) {
774 * Schedule a controller reset. The reset will terminate the
775 * association and schedule the reconnect timer. Reconnects
776 * will be attempted until either the ctlr_loss_tmo
777 * (max_retries * connect_delay) expires or the remoteport's
778 * dev_loss_tmo expires.
780 if (nvme_reset_ctrl(&ctrl
->ctrl
)) {
781 dev_warn(ctrl
->ctrl
.device
,
782 "NVME-FC{%d}: Couldn't schedule reset.\n",
784 nvme_delete_ctrl(&ctrl
->ctrl
);
788 case NVME_CTRL_CONNECTING
:
790 * The association has already been terminated and the
791 * controller is attempting reconnects. No need to do anything
792 * futher. Reconnects will be attempted until either the
793 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
794 * remoteport's dev_loss_tmo expires.
798 case NVME_CTRL_RESETTING
:
800 * Controller is already in the process of terminating the
801 * association. No need to do anything further. The reconnect
802 * step will kick in naturally after the association is
807 case NVME_CTRL_DELETING
:
809 /* no action to take - let it delete */
815 * nvme_fc_unregister_remoteport - transport entry point called by an
816 * LLDD to deregister/remove a previously
817 * registered a NVME subsystem FC port.
818 * @portptr: pointer to the (registered) remote port that is to be
822 * a completion status. Must be 0 upon success; a negative errno
823 * (ex: -ENXIO) upon failure.
826 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port
*portptr
)
828 struct nvme_fc_rport
*rport
= remoteport_to_rport(portptr
);
829 struct nvme_fc_ctrl
*ctrl
;
835 spin_lock_irqsave(&rport
->lock
, flags
);
837 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
838 spin_unlock_irqrestore(&rport
->lock
, flags
);
841 portptr
->port_state
= FC_OBJSTATE_DELETED
;
843 rport
->dev_loss_end
= jiffies
+ (portptr
->dev_loss_tmo
* HZ
);
845 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
) {
846 /* if dev_loss_tmo==0, dev loss is immediate */
847 if (!portptr
->dev_loss_tmo
) {
848 dev_warn(ctrl
->ctrl
.device
,
849 "NVME-FC{%d}: controller connectivity lost.\n",
851 nvme_delete_ctrl(&ctrl
->ctrl
);
853 nvme_fc_ctrl_connectivity_loss(ctrl
);
856 spin_unlock_irqrestore(&rport
->lock
, flags
);
858 nvme_fc_abort_lsops(rport
);
860 if (atomic_read(&rport
->act_ctrl_cnt
) == 0)
861 rport
->lport
->ops
->remoteport_delete(portptr
);
864 * release the reference, which will allow, if all controllers
865 * go away, which should only occur after dev_loss_tmo occurs,
866 * for the rport to be torn down.
868 nvme_fc_rport_put(rport
);
872 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport
);
875 * nvme_fc_rescan_remoteport - transport entry point called by an
876 * LLDD to request a nvme device rescan.
877 * @remoteport: pointer to the (registered) remote port that is to be
883 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port
*remoteport
)
885 struct nvme_fc_rport
*rport
= remoteport_to_rport(remoteport
);
887 nvme_fc_signal_discovery_scan(rport
->lport
, rport
);
889 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport
);
892 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port
*portptr
,
895 struct nvme_fc_rport
*rport
= remoteport_to_rport(portptr
);
898 spin_lock_irqsave(&rport
->lock
, flags
);
900 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
901 spin_unlock_irqrestore(&rport
->lock
, flags
);
905 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
906 rport
->remoteport
.dev_loss_tmo
= dev_loss_tmo
;
908 spin_unlock_irqrestore(&rport
->lock
, flags
);
912 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss
);
915 /* *********************** FC-NVME DMA Handling **************************** */
918 * The fcloop device passes in a NULL device pointer. Real LLD's will
919 * pass in a valid device pointer. If NULL is passed to the dma mapping
920 * routines, depending on the platform, it may or may not succeed, and
924 * Wrapper all the dma routines and check the dev pointer.
926 * If simple mappings (return just a dma address, we'll noop them,
927 * returning a dma address of 0.
929 * On more complex mappings (dma_map_sg), a pseudo routine fills
930 * in the scatter list, setting all dma addresses to 0.
933 static inline dma_addr_t
934 fc_dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
935 enum dma_data_direction dir
)
937 return dev
? dma_map_single(dev
, ptr
, size
, dir
) : (dma_addr_t
)0L;
941 fc_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
943 return dev
? dma_mapping_error(dev
, dma_addr
) : 0;
947 fc_dma_unmap_single(struct device
*dev
, dma_addr_t addr
, size_t size
,
948 enum dma_data_direction dir
)
951 dma_unmap_single(dev
, addr
, size
, dir
);
955 fc_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
, size_t size
,
956 enum dma_data_direction dir
)
959 dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
963 fc_dma_sync_single_for_device(struct device
*dev
, dma_addr_t addr
, size_t size
,
964 enum dma_data_direction dir
)
967 dma_sync_single_for_device(dev
, addr
, size
, dir
);
970 /* pseudo dma_map_sg call */
972 fc_map_sg(struct scatterlist
*sg
, int nents
)
974 struct scatterlist
*s
;
977 WARN_ON(nents
== 0 || sg
[0].length
== 0);
979 for_each_sg(sg
, s
, nents
, i
) {
981 #ifdef CONFIG_NEED_SG_DMA_LENGTH
982 s
->dma_length
= s
->length
;
989 fc_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
990 enum dma_data_direction dir
)
992 return dev
? dma_map_sg(dev
, sg
, nents
, dir
) : fc_map_sg(sg
, nents
);
996 fc_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
997 enum dma_data_direction dir
)
1000 dma_unmap_sg(dev
, sg
, nents
, dir
);
1003 /* *********************** FC-NVME LS Handling **************************** */
1005 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl
*);
1006 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl
*);
1010 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op
*lsop
)
1012 struct nvme_fc_rport
*rport
= lsop
->rport
;
1013 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1014 unsigned long flags
;
1016 spin_lock_irqsave(&rport
->lock
, flags
);
1018 if (!lsop
->req_queued
) {
1019 spin_unlock_irqrestore(&rport
->lock
, flags
);
1023 list_del(&lsop
->lsreq_list
);
1025 lsop
->req_queued
= false;
1027 spin_unlock_irqrestore(&rport
->lock
, flags
);
1029 fc_dma_unmap_single(rport
->dev
, lsreq
->rqstdma
,
1030 (lsreq
->rqstlen
+ lsreq
->rsplen
),
1033 nvme_fc_rport_put(rport
);
1037 __nvme_fc_send_ls_req(struct nvme_fc_rport
*rport
,
1038 struct nvmefc_ls_req_op
*lsop
,
1039 void (*done
)(struct nvmefc_ls_req
*req
, int status
))
1041 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1042 unsigned long flags
;
1045 if (rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
1046 return -ECONNREFUSED
;
1048 if (!nvme_fc_rport_get(rport
))
1052 lsop
->rport
= rport
;
1053 lsop
->req_queued
= false;
1054 INIT_LIST_HEAD(&lsop
->lsreq_list
);
1055 init_completion(&lsop
->ls_done
);
1057 lsreq
->rqstdma
= fc_dma_map_single(rport
->dev
, lsreq
->rqstaddr
,
1058 lsreq
->rqstlen
+ lsreq
->rsplen
,
1060 if (fc_dma_mapping_error(rport
->dev
, lsreq
->rqstdma
)) {
1064 lsreq
->rspdma
= lsreq
->rqstdma
+ lsreq
->rqstlen
;
1066 spin_lock_irqsave(&rport
->lock
, flags
);
1068 list_add_tail(&lsop
->lsreq_list
, &rport
->ls_req_list
);
1070 lsop
->req_queued
= true;
1072 spin_unlock_irqrestore(&rport
->lock
, flags
);
1074 ret
= rport
->lport
->ops
->ls_req(&rport
->lport
->localport
,
1075 &rport
->remoteport
, lsreq
);
1082 lsop
->ls_error
= ret
;
1083 spin_lock_irqsave(&rport
->lock
, flags
);
1084 lsop
->req_queued
= false;
1085 list_del(&lsop
->lsreq_list
);
1086 spin_unlock_irqrestore(&rport
->lock
, flags
);
1087 fc_dma_unmap_single(rport
->dev
, lsreq
->rqstdma
,
1088 (lsreq
->rqstlen
+ lsreq
->rsplen
),
1091 nvme_fc_rport_put(rport
);
1097 nvme_fc_send_ls_req_done(struct nvmefc_ls_req
*lsreq
, int status
)
1099 struct nvmefc_ls_req_op
*lsop
= ls_req_to_lsop(lsreq
);
1101 lsop
->ls_error
= status
;
1102 complete(&lsop
->ls_done
);
1106 nvme_fc_send_ls_req(struct nvme_fc_rport
*rport
, struct nvmefc_ls_req_op
*lsop
)
1108 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1109 struct fcnvme_ls_rjt
*rjt
= lsreq
->rspaddr
;
1112 ret
= __nvme_fc_send_ls_req(rport
, lsop
, nvme_fc_send_ls_req_done
);
1116 * No timeout/not interruptible as we need the struct
1117 * to exist until the lldd calls us back. Thus mandate
1118 * wait until driver calls back. lldd responsible for
1119 * the timeout action
1121 wait_for_completion(&lsop
->ls_done
);
1123 __nvme_fc_finish_ls_req(lsop
);
1125 ret
= lsop
->ls_error
;
1131 /* ACC or RJT payload ? */
1132 if (rjt
->w0
.ls_cmd
== FCNVME_LS_RJT
)
1139 nvme_fc_send_ls_req_async(struct nvme_fc_rport
*rport
,
1140 struct nvmefc_ls_req_op
*lsop
,
1141 void (*done
)(struct nvmefc_ls_req
*req
, int status
))
1143 /* don't wait for completion */
1145 return __nvme_fc_send_ls_req(rport
, lsop
, done
);
1148 /* Validation Error indexes into the string table below */
1152 VERR_LSDESC_RQST
= 2,
1153 VERR_LSDESC_RQST_LEN
= 3,
1155 VERR_ASSOC_ID_LEN
= 5,
1157 VERR_CONN_ID_LEN
= 7,
1159 VERR_CR_ASSOC_ACC_LEN
= 9,
1161 VERR_CR_CONN_ACC_LEN
= 11,
1163 VERR_DISCONN_ACC_LEN
= 13,
1166 static char *validation_errors
[] = {
1170 "Bad LSDESC_RQST Length",
1171 "Not Association ID",
1172 "Bad Association ID Length",
1173 "Not Connection ID",
1174 "Bad Connection ID Length",
1175 "Not CR_ASSOC Rqst",
1176 "Bad CR_ASSOC ACC Length",
1178 "Bad CR_CONN ACC Length",
1179 "Not Disconnect Rqst",
1180 "Bad Disconnect ACC Length",
1184 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl
*ctrl
,
1185 struct nvme_fc_queue
*queue
, u16 qsize
, u16 ersp_ratio
)
1187 struct nvmefc_ls_req_op
*lsop
;
1188 struct nvmefc_ls_req
*lsreq
;
1189 struct fcnvme_ls_cr_assoc_rqst
*assoc_rqst
;
1190 struct fcnvme_ls_cr_assoc_acc
*assoc_acc
;
1193 lsop
= kzalloc((sizeof(*lsop
) +
1194 ctrl
->lport
->ops
->lsrqst_priv_sz
+
1195 sizeof(*assoc_rqst
) + sizeof(*assoc_acc
)), GFP_KERNEL
);
1200 lsreq
= &lsop
->ls_req
;
1202 lsreq
->private = (void *)&lsop
[1];
1203 assoc_rqst
= (struct fcnvme_ls_cr_assoc_rqst
*)
1204 (lsreq
->private + ctrl
->lport
->ops
->lsrqst_priv_sz
);
1205 assoc_acc
= (struct fcnvme_ls_cr_assoc_acc
*)&assoc_rqst
[1];
1207 assoc_rqst
->w0
.ls_cmd
= FCNVME_LS_CREATE_ASSOCIATION
;
1208 assoc_rqst
->desc_list_len
=
1209 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd
));
1211 assoc_rqst
->assoc_cmd
.desc_tag
=
1212 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD
);
1213 assoc_rqst
->assoc_cmd
.desc_len
=
1215 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd
));
1217 assoc_rqst
->assoc_cmd
.ersp_ratio
= cpu_to_be16(ersp_ratio
);
1218 assoc_rqst
->assoc_cmd
.sqsize
= cpu_to_be16(qsize
- 1);
1219 /* Linux supports only Dynamic controllers */
1220 assoc_rqst
->assoc_cmd
.cntlid
= cpu_to_be16(0xffff);
1221 uuid_copy(&assoc_rqst
->assoc_cmd
.hostid
, &ctrl
->ctrl
.opts
->host
->id
);
1222 strncpy(assoc_rqst
->assoc_cmd
.hostnqn
, ctrl
->ctrl
.opts
->host
->nqn
,
1223 min(FCNVME_ASSOC_HOSTNQN_LEN
, NVMF_NQN_SIZE
));
1224 strncpy(assoc_rqst
->assoc_cmd
.subnqn
, ctrl
->ctrl
.opts
->subsysnqn
,
1225 min(FCNVME_ASSOC_SUBNQN_LEN
, NVMF_NQN_SIZE
));
1227 lsop
->queue
= queue
;
1228 lsreq
->rqstaddr
= assoc_rqst
;
1229 lsreq
->rqstlen
= sizeof(*assoc_rqst
);
1230 lsreq
->rspaddr
= assoc_acc
;
1231 lsreq
->rsplen
= sizeof(*assoc_acc
);
1232 lsreq
->timeout
= NVME_FC_CONNECT_TIMEOUT_SEC
;
1234 ret
= nvme_fc_send_ls_req(ctrl
->rport
, lsop
);
1236 goto out_free_buffer
;
1238 /* process connect LS completion */
1240 /* validate the ACC response */
1241 if (assoc_acc
->hdr
.w0
.ls_cmd
!= FCNVME_LS_ACC
)
1243 else if (assoc_acc
->hdr
.desc_list_len
!=
1245 sizeof(struct fcnvme_ls_cr_assoc_acc
)))
1246 fcret
= VERR_CR_ASSOC_ACC_LEN
;
1247 else if (assoc_acc
->hdr
.rqst
.desc_tag
!=
1248 cpu_to_be32(FCNVME_LSDESC_RQST
))
1249 fcret
= VERR_LSDESC_RQST
;
1250 else if (assoc_acc
->hdr
.rqst
.desc_len
!=
1251 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
)))
1252 fcret
= VERR_LSDESC_RQST_LEN
;
1253 else if (assoc_acc
->hdr
.rqst
.w0
.ls_cmd
!= FCNVME_LS_CREATE_ASSOCIATION
)
1254 fcret
= VERR_CR_ASSOC
;
1255 else if (assoc_acc
->associd
.desc_tag
!=
1256 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
))
1257 fcret
= VERR_ASSOC_ID
;
1258 else if (assoc_acc
->associd
.desc_len
!=
1260 sizeof(struct fcnvme_lsdesc_assoc_id
)))
1261 fcret
= VERR_ASSOC_ID_LEN
;
1262 else if (assoc_acc
->connectid
.desc_tag
!=
1263 cpu_to_be32(FCNVME_LSDESC_CONN_ID
))
1264 fcret
= VERR_CONN_ID
;
1265 else if (assoc_acc
->connectid
.desc_len
!=
1266 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id
)))
1267 fcret
= VERR_CONN_ID_LEN
;
1272 "q %d connect failed: %s\n",
1273 queue
->qnum
, validation_errors
[fcret
]);
1275 ctrl
->association_id
=
1276 be64_to_cpu(assoc_acc
->associd
.association_id
);
1277 queue
->connection_id
=
1278 be64_to_cpu(assoc_acc
->connectid
.connection_id
);
1279 set_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
);
1287 "queue %d connect admin queue failed (%d).\n",
1293 nvme_fc_connect_queue(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_queue
*queue
,
1294 u16 qsize
, u16 ersp_ratio
)
1296 struct nvmefc_ls_req_op
*lsop
;
1297 struct nvmefc_ls_req
*lsreq
;
1298 struct fcnvme_ls_cr_conn_rqst
*conn_rqst
;
1299 struct fcnvme_ls_cr_conn_acc
*conn_acc
;
1302 lsop
= kzalloc((sizeof(*lsop
) +
1303 ctrl
->lport
->ops
->lsrqst_priv_sz
+
1304 sizeof(*conn_rqst
) + sizeof(*conn_acc
)), GFP_KERNEL
);
1309 lsreq
= &lsop
->ls_req
;
1311 lsreq
->private = (void *)&lsop
[1];
1312 conn_rqst
= (struct fcnvme_ls_cr_conn_rqst
*)
1313 (lsreq
->private + ctrl
->lport
->ops
->lsrqst_priv_sz
);
1314 conn_acc
= (struct fcnvme_ls_cr_conn_acc
*)&conn_rqst
[1];
1316 conn_rqst
->w0
.ls_cmd
= FCNVME_LS_CREATE_CONNECTION
;
1317 conn_rqst
->desc_list_len
= cpu_to_be32(
1318 sizeof(struct fcnvme_lsdesc_assoc_id
) +
1319 sizeof(struct fcnvme_lsdesc_cr_conn_cmd
));
1321 conn_rqst
->associd
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
);
1322 conn_rqst
->associd
.desc_len
=
1324 sizeof(struct fcnvme_lsdesc_assoc_id
));
1325 conn_rqst
->associd
.association_id
= cpu_to_be64(ctrl
->association_id
);
1326 conn_rqst
->connect_cmd
.desc_tag
=
1327 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD
);
1328 conn_rqst
->connect_cmd
.desc_len
=
1330 sizeof(struct fcnvme_lsdesc_cr_conn_cmd
));
1331 conn_rqst
->connect_cmd
.ersp_ratio
= cpu_to_be16(ersp_ratio
);
1332 conn_rqst
->connect_cmd
.qid
= cpu_to_be16(queue
->qnum
);
1333 conn_rqst
->connect_cmd
.sqsize
= cpu_to_be16(qsize
- 1);
1335 lsop
->queue
= queue
;
1336 lsreq
->rqstaddr
= conn_rqst
;
1337 lsreq
->rqstlen
= sizeof(*conn_rqst
);
1338 lsreq
->rspaddr
= conn_acc
;
1339 lsreq
->rsplen
= sizeof(*conn_acc
);
1340 lsreq
->timeout
= NVME_FC_CONNECT_TIMEOUT_SEC
;
1342 ret
= nvme_fc_send_ls_req(ctrl
->rport
, lsop
);
1344 goto out_free_buffer
;
1346 /* process connect LS completion */
1348 /* validate the ACC response */
1349 if (conn_acc
->hdr
.w0
.ls_cmd
!= FCNVME_LS_ACC
)
1351 else if (conn_acc
->hdr
.desc_list_len
!=
1352 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc
)))
1353 fcret
= VERR_CR_CONN_ACC_LEN
;
1354 else if (conn_acc
->hdr
.rqst
.desc_tag
!= cpu_to_be32(FCNVME_LSDESC_RQST
))
1355 fcret
= VERR_LSDESC_RQST
;
1356 else if (conn_acc
->hdr
.rqst
.desc_len
!=
1357 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
)))
1358 fcret
= VERR_LSDESC_RQST_LEN
;
1359 else if (conn_acc
->hdr
.rqst
.w0
.ls_cmd
!= FCNVME_LS_CREATE_CONNECTION
)
1360 fcret
= VERR_CR_CONN
;
1361 else if (conn_acc
->connectid
.desc_tag
!=
1362 cpu_to_be32(FCNVME_LSDESC_CONN_ID
))
1363 fcret
= VERR_CONN_ID
;
1364 else if (conn_acc
->connectid
.desc_len
!=
1365 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id
)))
1366 fcret
= VERR_CONN_ID_LEN
;
1371 "q %d connect failed: %s\n",
1372 queue
->qnum
, validation_errors
[fcret
]);
1374 queue
->connection_id
=
1375 be64_to_cpu(conn_acc
->connectid
.connection_id
);
1376 set_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
);
1384 "queue %d connect command failed (%d).\n",
1390 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req
*lsreq
, int status
)
1392 struct nvmefc_ls_req_op
*lsop
= ls_req_to_lsop(lsreq
);
1394 __nvme_fc_finish_ls_req(lsop
);
1396 /* fc-nvme initiator doesn't care about success or failure of cmd */
1402 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1403 * the FC-NVME Association. Terminating the association also
1404 * terminates the FC-NVME connections (per queue, both admin and io
1405 * queues) that are part of the association. E.g. things are torn
1406 * down, and the related FC-NVME Association ID and Connection IDs
1409 * The behavior of the fc-nvme initiator is such that it's
1410 * understanding of the association and connections will implicitly
1411 * be torn down. The action is implicit as it may be due to a loss of
1412 * connectivity with the fc-nvme target, so you may never get a
1413 * response even if you tried. As such, the action of this routine
1414 * is to asynchronously send the LS, ignore any results of the LS, and
1415 * continue on with terminating the association. If the fc-nvme target
1416 * is present and receives the LS, it too can tear down.
1419 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl
*ctrl
)
1421 struct fcnvme_ls_disconnect_rqst
*discon_rqst
;
1422 struct fcnvme_ls_disconnect_acc
*discon_acc
;
1423 struct nvmefc_ls_req_op
*lsop
;
1424 struct nvmefc_ls_req
*lsreq
;
1427 lsop
= kzalloc((sizeof(*lsop
) +
1428 ctrl
->lport
->ops
->lsrqst_priv_sz
+
1429 sizeof(*discon_rqst
) + sizeof(*discon_acc
)),
1432 /* couldn't sent it... too bad */
1435 lsreq
= &lsop
->ls_req
;
1437 lsreq
->private = (void *)&lsop
[1];
1438 discon_rqst
= (struct fcnvme_ls_disconnect_rqst
*)
1439 (lsreq
->private + ctrl
->lport
->ops
->lsrqst_priv_sz
);
1440 discon_acc
= (struct fcnvme_ls_disconnect_acc
*)&discon_rqst
[1];
1442 discon_rqst
->w0
.ls_cmd
= FCNVME_LS_DISCONNECT
;
1443 discon_rqst
->desc_list_len
= cpu_to_be32(
1444 sizeof(struct fcnvme_lsdesc_assoc_id
) +
1445 sizeof(struct fcnvme_lsdesc_disconn_cmd
));
1447 discon_rqst
->associd
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
);
1448 discon_rqst
->associd
.desc_len
=
1450 sizeof(struct fcnvme_lsdesc_assoc_id
));
1452 discon_rqst
->associd
.association_id
= cpu_to_be64(ctrl
->association_id
);
1454 discon_rqst
->discon_cmd
.desc_tag
= cpu_to_be32(
1455 FCNVME_LSDESC_DISCONN_CMD
);
1456 discon_rqst
->discon_cmd
.desc_len
=
1458 sizeof(struct fcnvme_lsdesc_disconn_cmd
));
1459 discon_rqst
->discon_cmd
.scope
= FCNVME_DISCONN_ASSOCIATION
;
1460 discon_rqst
->discon_cmd
.id
= cpu_to_be64(ctrl
->association_id
);
1462 lsreq
->rqstaddr
= discon_rqst
;
1463 lsreq
->rqstlen
= sizeof(*discon_rqst
);
1464 lsreq
->rspaddr
= discon_acc
;
1465 lsreq
->rsplen
= sizeof(*discon_acc
);
1466 lsreq
->timeout
= NVME_FC_CONNECT_TIMEOUT_SEC
;
1468 ret
= nvme_fc_send_ls_req_async(ctrl
->rport
, lsop
,
1469 nvme_fc_disconnect_assoc_done
);
1473 /* only meaningful part to terminating the association */
1474 ctrl
->association_id
= 0;
1478 /* *********************** NVME Ctrl Routines **************************** */
1480 static void nvme_fc_error_recovery(struct nvme_fc_ctrl
*ctrl
, char *errmsg
);
1483 __nvme_fc_exit_request(struct nvme_fc_ctrl
*ctrl
,
1484 struct nvme_fc_fcp_op
*op
)
1486 fc_dma_unmap_single(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
,
1487 sizeof(op
->rsp_iu
), DMA_FROM_DEVICE
);
1488 fc_dma_unmap_single(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
,
1489 sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
1491 atomic_set(&op
->state
, FCPOP_STATE_UNINIT
);
1495 nvme_fc_exit_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1496 unsigned int hctx_idx
)
1498 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
1500 return __nvme_fc_exit_request(set
->driver_data
, op
);
1504 __nvme_fc_abort_op(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_fcp_op
*op
)
1506 unsigned long flags
;
1509 spin_lock_irqsave(&ctrl
->lock
, flags
);
1510 opstate
= atomic_xchg(&op
->state
, FCPOP_STATE_ABORTED
);
1511 if (opstate
!= FCPOP_STATE_ACTIVE
)
1512 atomic_set(&op
->state
, opstate
);
1513 else if (ctrl
->flags
& FCCTRL_TERMIO
)
1515 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1517 if (opstate
!= FCPOP_STATE_ACTIVE
)
1520 ctrl
->lport
->ops
->fcp_abort(&ctrl
->lport
->localport
,
1521 &ctrl
->rport
->remoteport
,
1522 op
->queue
->lldd_handle
,
1529 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl
*ctrl
)
1531 struct nvme_fc_fcp_op
*aen_op
= ctrl
->aen_ops
;
1534 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++)
1535 __nvme_fc_abort_op(ctrl
, aen_op
);
1539 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl
*ctrl
,
1540 struct nvme_fc_fcp_op
*op
, int opstate
)
1542 unsigned long flags
;
1544 if (opstate
== FCPOP_STATE_ABORTED
) {
1545 spin_lock_irqsave(&ctrl
->lock
, flags
);
1546 if (ctrl
->flags
& FCCTRL_TERMIO
) {
1548 wake_up(&ctrl
->ioabort_wait
);
1550 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1555 nvme_fc_fcpio_done(struct nvmefc_fcp_req
*req
)
1557 struct nvme_fc_fcp_op
*op
= fcp_req_to_fcp_op(req
);
1558 struct request
*rq
= op
->rq
;
1559 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
1560 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
1561 struct nvme_fc_queue
*queue
= op
->queue
;
1562 struct nvme_completion
*cqe
= &op
->rsp_iu
.cqe
;
1563 struct nvme_command
*sqe
= &op
->cmd_iu
.sqe
;
1564 __le16 status
= cpu_to_le16(NVME_SC_SUCCESS
<< 1);
1565 union nvme_result result
;
1566 bool terminate_assoc
= true;
1571 * The current linux implementation of a nvme controller
1572 * allocates a single tag set for all io queues and sizes
1573 * the io queues to fully hold all possible tags. Thus, the
1574 * implementation does not reference or care about the sqhd
1575 * value as it never needs to use the sqhd/sqtail pointers
1576 * for submission pacing.
1578 * This affects the FC-NVME implementation in two ways:
1579 * 1) As the value doesn't matter, we don't need to waste
1580 * cycles extracting it from ERSPs and stamping it in the
1581 * cases where the transport fabricates CQEs on successful
1583 * 2) The FC-NVME implementation requires that delivery of
1584 * ERSP completions are to go back to the nvme layer in order
1585 * relative to the rsn, such that the sqhd value will always
1586 * be "in order" for the nvme layer. As the nvme layer in
1587 * linux doesn't care about sqhd, there's no need to return
1591 * As the core nvme layer in linux currently does not look at
1592 * every field in the cqe - in cases where the FC transport must
1593 * fabricate a CQE, the following fields will not be set as they
1594 * are not referenced:
1595 * cqe.sqid, cqe.sqhd, cqe.command_id
1597 * Failure or error of an individual i/o, in a transport
1598 * detected fashion unrelated to the nvme completion status,
1599 * potentially cause the initiator and target sides to get out
1600 * of sync on SQ head/tail (aka outstanding io count allowed).
1601 * Per FC-NVME spec, failure of an individual command requires
1602 * the connection to be terminated, which in turn requires the
1603 * association to be terminated.
1606 opstate
= atomic_xchg(&op
->state
, FCPOP_STATE_COMPLETE
);
1608 fc_dma_sync_single_for_cpu(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
,
1609 sizeof(op
->rsp_iu
), DMA_FROM_DEVICE
);
1611 if (opstate
== FCPOP_STATE_ABORTED
)
1612 status
= cpu_to_le16(NVME_SC_ABORT_REQ
<< 1);
1613 else if (freq
->status
)
1614 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1617 * For the linux implementation, if we have an unsuccesful
1618 * status, they blk-mq layer can typically be called with the
1619 * non-zero status and the content of the cqe isn't important.
1625 * command completed successfully relative to the wire
1626 * protocol. However, validate anything received and
1627 * extract the status and result from the cqe (create it
1631 switch (freq
->rcv_rsplen
) {
1634 case NVME_FC_SIZEOF_ZEROS_RSP
:
1636 * No response payload or 12 bytes of payload (which
1637 * should all be zeros) are considered successful and
1638 * no payload in the CQE by the transport.
1640 if (freq
->transferred_length
!=
1641 be32_to_cpu(op
->cmd_iu
.data_len
)) {
1642 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1648 case sizeof(struct nvme_fc_ersp_iu
):
1650 * The ERSP IU contains a full completion with CQE.
1651 * Validate ERSP IU and look at cqe.
1653 if (unlikely(be16_to_cpu(op
->rsp_iu
.iu_len
) !=
1654 (freq
->rcv_rsplen
/ 4) ||
1655 be32_to_cpu(op
->rsp_iu
.xfrd_len
) !=
1656 freq
->transferred_length
||
1657 op
->rsp_iu
.status_code
||
1658 sqe
->common
.command_id
!= cqe
->command_id
)) {
1659 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1662 result
= cqe
->result
;
1663 status
= cqe
->status
;
1667 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1671 terminate_assoc
= false;
1674 if (op
->flags
& FCOP_FLAGS_AEN
) {
1675 nvme_complete_async_event(&queue
->ctrl
->ctrl
, status
, &result
);
1676 __nvme_fc_fcpop_chk_teardowns(ctrl
, op
, opstate
);
1677 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
1678 op
->flags
= FCOP_FLAGS_AEN
; /* clear other flags */
1679 nvme_fc_ctrl_put(ctrl
);
1683 __nvme_fc_fcpop_chk_teardowns(ctrl
, op
, opstate
);
1684 nvme_end_request(rq
, status
, result
);
1687 if (terminate_assoc
)
1688 nvme_fc_error_recovery(ctrl
, "transport detected io error");
1692 __nvme_fc_init_request(struct nvme_fc_ctrl
*ctrl
,
1693 struct nvme_fc_queue
*queue
, struct nvme_fc_fcp_op
*op
,
1694 struct request
*rq
, u32 rqno
)
1696 struct nvme_fcp_op_w_sgl
*op_w_sgl
=
1697 container_of(op
, typeof(*op_w_sgl
), op
);
1698 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
1701 memset(op
, 0, sizeof(*op
));
1702 op
->fcp_req
.cmdaddr
= &op
->cmd_iu
;
1703 op
->fcp_req
.cmdlen
= sizeof(op
->cmd_iu
);
1704 op
->fcp_req
.rspaddr
= &op
->rsp_iu
;
1705 op
->fcp_req
.rsplen
= sizeof(op
->rsp_iu
);
1706 op
->fcp_req
.done
= nvme_fc_fcpio_done
;
1707 op
->fcp_req
.private = &op
->fcp_req
.first_sgl
[SG_CHUNK_SIZE
];
1713 cmdiu
->scsi_id
= NVME_CMD_SCSI_ID
;
1714 cmdiu
->fc_id
= NVME_CMD_FC_ID
;
1715 cmdiu
->iu_len
= cpu_to_be16(sizeof(*cmdiu
) / sizeof(u32
));
1717 op
->fcp_req
.cmddma
= fc_dma_map_single(ctrl
->lport
->dev
,
1718 &op
->cmd_iu
, sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
1719 if (fc_dma_mapping_error(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
)) {
1721 "FCP Op failed - cmdiu dma mapping failed.\n");
1726 op
->fcp_req
.rspdma
= fc_dma_map_single(ctrl
->lport
->dev
,
1727 &op
->rsp_iu
, sizeof(op
->rsp_iu
),
1729 if (fc_dma_mapping_error(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
)) {
1731 "FCP Op failed - rspiu dma mapping failed.\n");
1735 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
1741 nvme_fc_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1742 unsigned int hctx_idx
, unsigned int numa_node
)
1744 struct nvme_fc_ctrl
*ctrl
= set
->driver_data
;
1745 struct nvme_fcp_op_w_sgl
*op
= blk_mq_rq_to_pdu(rq
);
1746 int queue_idx
= (set
== &ctrl
->tag_set
) ? hctx_idx
+ 1 : 0;
1747 struct nvme_fc_queue
*queue
= &ctrl
->queues
[queue_idx
];
1750 nvme_req(rq
)->ctrl
= &ctrl
->ctrl
;
1751 res
= __nvme_fc_init_request(ctrl
, queue
, &op
->op
, rq
, queue
->rqcnt
++);
1754 op
->op
.fcp_req
.first_sgl
= &op
->sgl
[0];
1759 nvme_fc_init_aen_ops(struct nvme_fc_ctrl
*ctrl
)
1761 struct nvme_fc_fcp_op
*aen_op
;
1762 struct nvme_fc_cmd_iu
*cmdiu
;
1763 struct nvme_command
*sqe
;
1767 aen_op
= ctrl
->aen_ops
;
1768 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++) {
1769 private = kzalloc(ctrl
->lport
->ops
->fcprqst_priv_sz
,
1774 cmdiu
= &aen_op
->cmd_iu
;
1776 ret
= __nvme_fc_init_request(ctrl
, &ctrl
->queues
[0],
1777 aen_op
, (struct request
*)NULL
,
1778 (NVME_AQ_BLK_MQ_DEPTH
+ i
));
1784 aen_op
->flags
= FCOP_FLAGS_AEN
;
1785 aen_op
->fcp_req
.private = private;
1787 memset(sqe
, 0, sizeof(*sqe
));
1788 sqe
->common
.opcode
= nvme_admin_async_event
;
1789 /* Note: core layer may overwrite the sqe.command_id value */
1790 sqe
->common
.command_id
= NVME_AQ_BLK_MQ_DEPTH
+ i
;
1796 nvme_fc_term_aen_ops(struct nvme_fc_ctrl
*ctrl
)
1798 struct nvme_fc_fcp_op
*aen_op
;
1801 aen_op
= ctrl
->aen_ops
;
1802 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++) {
1803 if (!aen_op
->fcp_req
.private)
1806 __nvme_fc_exit_request(ctrl
, aen_op
);
1808 kfree(aen_op
->fcp_req
.private);
1809 aen_op
->fcp_req
.private = NULL
;
1814 __nvme_fc_init_hctx(struct blk_mq_hw_ctx
*hctx
, struct nvme_fc_ctrl
*ctrl
,
1817 struct nvme_fc_queue
*queue
= &ctrl
->queues
[qidx
];
1819 hctx
->driver_data
= queue
;
1824 nvme_fc_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
1825 unsigned int hctx_idx
)
1827 struct nvme_fc_ctrl
*ctrl
= data
;
1829 __nvme_fc_init_hctx(hctx
, ctrl
, hctx_idx
+ 1);
1835 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
1836 unsigned int hctx_idx
)
1838 struct nvme_fc_ctrl
*ctrl
= data
;
1840 __nvme_fc_init_hctx(hctx
, ctrl
, hctx_idx
);
1846 nvme_fc_init_queue(struct nvme_fc_ctrl
*ctrl
, int idx
)
1848 struct nvme_fc_queue
*queue
;
1850 queue
= &ctrl
->queues
[idx
];
1851 memset(queue
, 0, sizeof(*queue
));
1854 atomic_set(&queue
->csn
, 1);
1855 queue
->dev
= ctrl
->dev
;
1858 queue
->cmnd_capsule_len
= ctrl
->ctrl
.ioccsz
* 16;
1860 queue
->cmnd_capsule_len
= sizeof(struct nvme_command
);
1863 * Considered whether we should allocate buffers for all SQEs
1864 * and CQEs and dma map them - mapping their respective entries
1865 * into the request structures (kernel vm addr and dma address)
1866 * thus the driver could use the buffers/mappings directly.
1867 * It only makes sense if the LLDD would use them for its
1868 * messaging api. It's very unlikely most adapter api's would use
1869 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1870 * structures were used instead.
1875 * This routine terminates a queue at the transport level.
1876 * The transport has already ensured that all outstanding ios on
1877 * the queue have been terminated.
1878 * The transport will send a Disconnect LS request to terminate
1879 * the queue's connection. Termination of the admin queue will also
1880 * terminate the association at the target.
1883 nvme_fc_free_queue(struct nvme_fc_queue
*queue
)
1885 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
))
1888 clear_bit(NVME_FC_Q_LIVE
, &queue
->flags
);
1890 * Current implementation never disconnects a single queue.
1891 * It always terminates a whole association. So there is never
1892 * a disconnect(queue) LS sent to the target.
1895 queue
->connection_id
= 0;
1896 atomic_set(&queue
->csn
, 1);
1900 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl
*ctrl
,
1901 struct nvme_fc_queue
*queue
, unsigned int qidx
)
1903 if (ctrl
->lport
->ops
->delete_queue
)
1904 ctrl
->lport
->ops
->delete_queue(&ctrl
->lport
->localport
, qidx
,
1905 queue
->lldd_handle
);
1906 queue
->lldd_handle
= NULL
;
1910 nvme_fc_free_io_queues(struct nvme_fc_ctrl
*ctrl
)
1914 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++)
1915 nvme_fc_free_queue(&ctrl
->queues
[i
]);
1919 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl
*ctrl
,
1920 struct nvme_fc_queue
*queue
, unsigned int qidx
, u16 qsize
)
1924 queue
->lldd_handle
= NULL
;
1925 if (ctrl
->lport
->ops
->create_queue
)
1926 ret
= ctrl
->lport
->ops
->create_queue(&ctrl
->lport
->localport
,
1927 qidx
, qsize
, &queue
->lldd_handle
);
1933 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl
*ctrl
)
1935 struct nvme_fc_queue
*queue
= &ctrl
->queues
[ctrl
->ctrl
.queue_count
- 1];
1938 for (i
= ctrl
->ctrl
.queue_count
- 1; i
>= 1; i
--, queue
--)
1939 __nvme_fc_delete_hw_queue(ctrl
, queue
, i
);
1943 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl
*ctrl
, u16 qsize
)
1945 struct nvme_fc_queue
*queue
= &ctrl
->queues
[1];
1948 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++, queue
++) {
1949 ret
= __nvme_fc_create_hw_queue(ctrl
, queue
, i
, qsize
);
1958 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[i
], i
);
1963 nvme_fc_connect_io_queues(struct nvme_fc_ctrl
*ctrl
, u16 qsize
)
1967 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++) {
1968 ret
= nvme_fc_connect_queue(ctrl
, &ctrl
->queues
[i
], qsize
,
1972 ret
= nvmf_connect_io_queue(&ctrl
->ctrl
, i
);
1976 set_bit(NVME_FC_Q_LIVE
, &ctrl
->queues
[i
].flags
);
1983 nvme_fc_init_io_queues(struct nvme_fc_ctrl
*ctrl
)
1987 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++)
1988 nvme_fc_init_queue(ctrl
, i
);
1992 nvme_fc_ctrl_free(struct kref
*ref
)
1994 struct nvme_fc_ctrl
*ctrl
=
1995 container_of(ref
, struct nvme_fc_ctrl
, ref
);
1996 unsigned long flags
;
1998 if (ctrl
->ctrl
.tagset
) {
1999 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
2000 blk_mq_free_tag_set(&ctrl
->tag_set
);
2003 /* remove from rport list */
2004 spin_lock_irqsave(&ctrl
->rport
->lock
, flags
);
2005 list_del(&ctrl
->ctrl_list
);
2006 spin_unlock_irqrestore(&ctrl
->rport
->lock
, flags
);
2008 blk_mq_unquiesce_queue(ctrl
->ctrl
.admin_q
);
2009 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
2010 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
2012 kfree(ctrl
->queues
);
2014 put_device(ctrl
->dev
);
2015 nvme_fc_rport_put(ctrl
->rport
);
2017 ida_simple_remove(&nvme_fc_ctrl_cnt
, ctrl
->cnum
);
2018 if (ctrl
->ctrl
.opts
)
2019 nvmf_free_options(ctrl
->ctrl
.opts
);
2024 nvme_fc_ctrl_put(struct nvme_fc_ctrl
*ctrl
)
2026 kref_put(&ctrl
->ref
, nvme_fc_ctrl_free
);
2030 nvme_fc_ctrl_get(struct nvme_fc_ctrl
*ctrl
)
2032 return kref_get_unless_zero(&ctrl
->ref
);
2036 * All accesses from nvme core layer done - can now free the
2037 * controller. Called after last nvme_put_ctrl() call
2040 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl
*nctrl
)
2042 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2044 WARN_ON(nctrl
!= &ctrl
->ctrl
);
2046 nvme_fc_ctrl_put(ctrl
);
2050 nvme_fc_error_recovery(struct nvme_fc_ctrl
*ctrl
, char *errmsg
)
2052 /* only proceed if in LIVE state - e.g. on first error */
2053 if (ctrl
->ctrl
.state
!= NVME_CTRL_LIVE
)
2056 dev_warn(ctrl
->ctrl
.device
,
2057 "NVME-FC{%d}: transport association error detected: %s\n",
2058 ctrl
->cnum
, errmsg
);
2059 dev_warn(ctrl
->ctrl
.device
,
2060 "NVME-FC{%d}: resetting controller\n", ctrl
->cnum
);
2062 nvme_reset_ctrl(&ctrl
->ctrl
);
2065 static enum blk_eh_timer_return
2066 nvme_fc_timeout(struct request
*rq
, bool reserved
)
2068 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2069 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
2072 * we can't individually ABTS an io without affecting the queue,
2073 * thus killing the queue, and thus the association.
2074 * So resolve by performing a controller reset, which will stop
2075 * the host/io stack, terminate the association on the link,
2076 * and recreate an association on the link.
2078 nvme_fc_error_recovery(ctrl
, "io timeout error");
2081 * the io abort has been initiated. Have the reset timer
2082 * restarted and the abort completion will complete the io
2083 * shortly. Avoids a synchronous wait while the abort finishes.
2085 return BLK_EH_RESET_TIMER
;
2089 nvme_fc_map_data(struct nvme_fc_ctrl
*ctrl
, struct request
*rq
,
2090 struct nvme_fc_fcp_op
*op
)
2092 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
2093 enum dma_data_direction dir
;
2098 if (!blk_rq_payload_bytes(rq
))
2101 freq
->sg_table
.sgl
= freq
->first_sgl
;
2102 ret
= sg_alloc_table_chained(&freq
->sg_table
,
2103 blk_rq_nr_phys_segments(rq
), freq
->sg_table
.sgl
);
2107 op
->nents
= blk_rq_map_sg(rq
->q
, rq
, freq
->sg_table
.sgl
);
2108 WARN_ON(op
->nents
> blk_rq_nr_phys_segments(rq
));
2109 dir
= (rq_data_dir(rq
) == WRITE
) ? DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
2110 freq
->sg_cnt
= fc_dma_map_sg(ctrl
->lport
->dev
, freq
->sg_table
.sgl
,
2112 if (unlikely(freq
->sg_cnt
<= 0)) {
2113 sg_free_table_chained(&freq
->sg_table
, true);
2119 * TODO: blk_integrity_rq(rq) for DIF
2125 nvme_fc_unmap_data(struct nvme_fc_ctrl
*ctrl
, struct request
*rq
,
2126 struct nvme_fc_fcp_op
*op
)
2128 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
2133 fc_dma_unmap_sg(ctrl
->lport
->dev
, freq
->sg_table
.sgl
, op
->nents
,
2134 ((rq_data_dir(rq
) == WRITE
) ?
2135 DMA_TO_DEVICE
: DMA_FROM_DEVICE
));
2137 nvme_cleanup_cmd(rq
);
2139 sg_free_table_chained(&freq
->sg_table
, true);
2145 * In FC, the queue is a logical thing. At transport connect, the target
2146 * creates its "queue" and returns a handle that is to be given to the
2147 * target whenever it posts something to the corresponding SQ. When an
2148 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2149 * command contained within the SQE, an io, and assigns a FC exchange
2150 * to it. The SQE and the associated SQ handle are sent in the initial
2151 * CMD IU sents on the exchange. All transfers relative to the io occur
2152 * as part of the exchange. The CQE is the last thing for the io,
2153 * which is transferred (explicitly or implicitly) with the RSP IU
2154 * sent on the exchange. After the CQE is received, the FC exchange is
2155 * terminaed and the Exchange may be used on a different io.
2157 * The transport to LLDD api has the transport making a request for a
2158 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2159 * resource and transfers the command. The LLDD will then process all
2160 * steps to complete the io. Upon completion, the transport done routine
2163 * So - while the operation is outstanding to the LLDD, there is a link
2164 * level FC exchange resource that is also outstanding. This must be
2165 * considered in all cleanup operations.
2168 nvme_fc_start_fcp_op(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_queue
*queue
,
2169 struct nvme_fc_fcp_op
*op
, u32 data_len
,
2170 enum nvmefc_fcp_datadir io_dir
)
2172 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
2173 struct nvme_command
*sqe
= &cmdiu
->sqe
;
2178 * before attempting to send the io, check to see if we believe
2179 * the target device is present
2181 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
2182 return BLK_STS_RESOURCE
;
2184 if (!nvme_fc_ctrl_get(ctrl
))
2185 return BLK_STS_IOERR
;
2187 /* format the FC-NVME CMD IU and fcp_req */
2188 cmdiu
->connection_id
= cpu_to_be64(queue
->connection_id
);
2189 csn
= atomic_inc_return(&queue
->csn
);
2190 cmdiu
->csn
= cpu_to_be32(csn
);
2191 cmdiu
->data_len
= cpu_to_be32(data_len
);
2193 case NVMEFC_FCP_WRITE
:
2194 cmdiu
->flags
= FCNVME_CMD_FLAGS_WRITE
;
2196 case NVMEFC_FCP_READ
:
2197 cmdiu
->flags
= FCNVME_CMD_FLAGS_READ
;
2199 case NVMEFC_FCP_NODATA
:
2203 op
->fcp_req
.payload_length
= data_len
;
2204 op
->fcp_req
.io_dir
= io_dir
;
2205 op
->fcp_req
.transferred_length
= 0;
2206 op
->fcp_req
.rcv_rsplen
= 0;
2207 op
->fcp_req
.status
= NVME_SC_SUCCESS
;
2208 op
->fcp_req
.sqid
= cpu_to_le16(queue
->qnum
);
2211 * validate per fabric rules, set fields mandated by fabric spec
2212 * as well as those by FC-NVME spec.
2214 WARN_ON_ONCE(sqe
->common
.metadata
);
2215 sqe
->common
.flags
|= NVME_CMD_SGL_METABUF
;
2218 * format SQE DPTR field per FC-NVME rules:
2219 * type=0x5 Transport SGL Data Block Descriptor
2220 * subtype=0xA Transport-specific value
2222 * length=length of the data series
2224 sqe
->rw
.dptr
.sgl
.type
= (NVME_TRANSPORT_SGL_DATA_DESC
<< 4) |
2225 NVME_SGL_FMT_TRANSPORT_A
;
2226 sqe
->rw
.dptr
.sgl
.length
= cpu_to_le32(data_len
);
2227 sqe
->rw
.dptr
.sgl
.addr
= 0;
2229 if (!(op
->flags
& FCOP_FLAGS_AEN
)) {
2230 ret
= nvme_fc_map_data(ctrl
, op
->rq
, op
);
2232 nvme_cleanup_cmd(op
->rq
);
2233 nvme_fc_ctrl_put(ctrl
);
2234 if (ret
== -ENOMEM
|| ret
== -EAGAIN
)
2235 return BLK_STS_RESOURCE
;
2236 return BLK_STS_IOERR
;
2240 fc_dma_sync_single_for_device(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
,
2241 sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
2243 atomic_set(&op
->state
, FCPOP_STATE_ACTIVE
);
2245 if (!(op
->flags
& FCOP_FLAGS_AEN
))
2246 blk_mq_start_request(op
->rq
);
2248 ret
= ctrl
->lport
->ops
->fcp_io(&ctrl
->lport
->localport
,
2249 &ctrl
->rport
->remoteport
,
2250 queue
->lldd_handle
, &op
->fcp_req
);
2253 opstate
= atomic_xchg(&op
->state
, FCPOP_STATE_COMPLETE
);
2254 __nvme_fc_fcpop_chk_teardowns(ctrl
, op
, opstate
);
2256 if (!(op
->flags
& FCOP_FLAGS_AEN
))
2257 nvme_fc_unmap_data(ctrl
, op
->rq
, op
);
2259 nvme_fc_ctrl_put(ctrl
);
2261 if (ctrl
->rport
->remoteport
.port_state
== FC_OBJSTATE_ONLINE
&&
2263 return BLK_STS_IOERR
;
2265 return BLK_STS_RESOURCE
;
2272 nvme_fc_queue_rq(struct blk_mq_hw_ctx
*hctx
,
2273 const struct blk_mq_queue_data
*bd
)
2275 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
2276 struct nvme_fc_queue
*queue
= hctx
->driver_data
;
2277 struct nvme_fc_ctrl
*ctrl
= queue
->ctrl
;
2278 struct request
*rq
= bd
->rq
;
2279 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2280 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
2281 struct nvme_command
*sqe
= &cmdiu
->sqe
;
2282 enum nvmefc_fcp_datadir io_dir
;
2283 bool queue_ready
= test_bit(NVME_FC_Q_LIVE
, &queue
->flags
);
2287 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
||
2288 !nvmf_check_ready(&queue
->ctrl
->ctrl
, rq
, queue_ready
))
2289 return nvmf_fail_nonready_command(&queue
->ctrl
->ctrl
, rq
);
2291 ret
= nvme_setup_cmd(ns
, rq
, sqe
);
2295 data_len
= blk_rq_payload_bytes(rq
);
2297 io_dir
= ((rq_data_dir(rq
) == WRITE
) ?
2298 NVMEFC_FCP_WRITE
: NVMEFC_FCP_READ
);
2300 io_dir
= NVMEFC_FCP_NODATA
;
2302 return nvme_fc_start_fcp_op(ctrl
, queue
, op
, data_len
, io_dir
);
2305 static struct blk_mq_tags
*
2306 nvme_fc_tagset(struct nvme_fc_queue
*queue
)
2308 if (queue
->qnum
== 0)
2309 return queue
->ctrl
->admin_tag_set
.tags
[queue
->qnum
];
2311 return queue
->ctrl
->tag_set
.tags
[queue
->qnum
- 1];
2315 nvme_fc_poll(struct blk_mq_hw_ctx
*hctx
, unsigned int tag
)
2318 struct nvme_fc_queue
*queue
= hctx
->driver_data
;
2319 struct nvme_fc_ctrl
*ctrl
= queue
->ctrl
;
2320 struct request
*req
;
2321 struct nvme_fc_fcp_op
*op
;
2323 req
= blk_mq_tag_to_rq(nvme_fc_tagset(queue
), tag
);
2327 op
= blk_mq_rq_to_pdu(req
);
2329 if ((atomic_read(&op
->state
) == FCPOP_STATE_ACTIVE
) &&
2330 (ctrl
->lport
->ops
->poll_queue
))
2331 ctrl
->lport
->ops
->poll_queue(&ctrl
->lport
->localport
,
2332 queue
->lldd_handle
);
2334 return ((atomic_read(&op
->state
) != FCPOP_STATE_ACTIVE
));
2338 nvme_fc_submit_async_event(struct nvme_ctrl
*arg
)
2340 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(arg
);
2341 struct nvme_fc_fcp_op
*aen_op
;
2342 unsigned long flags
;
2343 bool terminating
= false;
2346 spin_lock_irqsave(&ctrl
->lock
, flags
);
2347 if (ctrl
->flags
& FCCTRL_TERMIO
)
2349 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2354 aen_op
= &ctrl
->aen_ops
[0];
2356 ret
= nvme_fc_start_fcp_op(ctrl
, aen_op
->queue
, aen_op
, 0,
2359 dev_err(ctrl
->ctrl
.device
,
2360 "failed async event work\n");
2364 nvme_fc_complete_rq(struct request
*rq
)
2366 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2367 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
2369 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
2371 nvme_fc_unmap_data(ctrl
, rq
, op
);
2372 nvme_complete_rq(rq
);
2373 nvme_fc_ctrl_put(ctrl
);
2377 * This routine is used by the transport when it needs to find active
2378 * io on a queue that is to be terminated. The transport uses
2379 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2380 * this routine to kill them on a 1 by 1 basis.
2382 * As FC allocates FC exchange for each io, the transport must contact
2383 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2384 * After terminating the exchange the LLDD will call the transport's
2385 * normal io done path for the request, but it will have an aborted
2386 * status. The done path will return the io request back to the block
2387 * layer with an error status.
2390 nvme_fc_terminate_exchange(struct request
*req
, void *data
, bool reserved
)
2392 struct nvme_ctrl
*nctrl
= data
;
2393 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2394 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(req
);
2396 __nvme_fc_abort_op(ctrl
, op
);
2400 static const struct blk_mq_ops nvme_fc_mq_ops
= {
2401 .queue_rq
= nvme_fc_queue_rq
,
2402 .complete
= nvme_fc_complete_rq
,
2403 .init_request
= nvme_fc_init_request
,
2404 .exit_request
= nvme_fc_exit_request
,
2405 .init_hctx
= nvme_fc_init_hctx
,
2406 .poll
= nvme_fc_poll
,
2407 .timeout
= nvme_fc_timeout
,
2411 nvme_fc_create_io_queues(struct nvme_fc_ctrl
*ctrl
)
2413 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2414 unsigned int nr_io_queues
;
2417 nr_io_queues
= min(min(opts
->nr_io_queues
, num_online_cpus()),
2418 ctrl
->lport
->ops
->max_hw_queues
);
2419 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &nr_io_queues
);
2421 dev_info(ctrl
->ctrl
.device
,
2422 "set_queue_count failed: %d\n", ret
);
2426 ctrl
->ctrl
.queue_count
= nr_io_queues
+ 1;
2430 nvme_fc_init_io_queues(ctrl
);
2432 memset(&ctrl
->tag_set
, 0, sizeof(ctrl
->tag_set
));
2433 ctrl
->tag_set
.ops
= &nvme_fc_mq_ops
;
2434 ctrl
->tag_set
.queue_depth
= ctrl
->ctrl
.opts
->queue_size
;
2435 ctrl
->tag_set
.reserved_tags
= 1; /* fabric connect */
2436 ctrl
->tag_set
.numa_node
= NUMA_NO_NODE
;
2437 ctrl
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
2438 ctrl
->tag_set
.cmd_size
=
2439 struct_size((struct nvme_fcp_op_w_sgl
*)NULL
, priv
,
2440 ctrl
->lport
->ops
->fcprqst_priv_sz
);
2441 ctrl
->tag_set
.driver_data
= ctrl
;
2442 ctrl
->tag_set
.nr_hw_queues
= ctrl
->ctrl
.queue_count
- 1;
2443 ctrl
->tag_set
.timeout
= NVME_IO_TIMEOUT
;
2445 ret
= blk_mq_alloc_tag_set(&ctrl
->tag_set
);
2449 ctrl
->ctrl
.tagset
= &ctrl
->tag_set
;
2451 ctrl
->ctrl
.connect_q
= blk_mq_init_queue(&ctrl
->tag_set
);
2452 if (IS_ERR(ctrl
->ctrl
.connect_q
)) {
2453 ret
= PTR_ERR(ctrl
->ctrl
.connect_q
);
2454 goto out_free_tag_set
;
2457 ret
= nvme_fc_create_hw_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
2459 goto out_cleanup_blk_queue
;
2461 ret
= nvme_fc_connect_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
2463 goto out_delete_hw_queues
;
2465 ctrl
->ioq_live
= true;
2469 out_delete_hw_queues
:
2470 nvme_fc_delete_hw_io_queues(ctrl
);
2471 out_cleanup_blk_queue
:
2472 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
2474 blk_mq_free_tag_set(&ctrl
->tag_set
);
2475 nvme_fc_free_io_queues(ctrl
);
2477 /* force put free routine to ignore io queues */
2478 ctrl
->ctrl
.tagset
= NULL
;
2484 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl
*ctrl
)
2486 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2487 unsigned int nr_io_queues
;
2490 nr_io_queues
= min(min(opts
->nr_io_queues
, num_online_cpus()),
2491 ctrl
->lport
->ops
->max_hw_queues
);
2492 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &nr_io_queues
);
2494 dev_info(ctrl
->ctrl
.device
,
2495 "set_queue_count failed: %d\n", ret
);
2499 ctrl
->ctrl
.queue_count
= nr_io_queues
+ 1;
2500 /* check for io queues existing */
2501 if (ctrl
->ctrl
.queue_count
== 1)
2504 ret
= nvme_fc_create_hw_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
2506 goto out_free_io_queues
;
2508 ret
= nvme_fc_connect_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
2510 goto out_delete_hw_queues
;
2512 blk_mq_update_nr_hw_queues(&ctrl
->tag_set
, nr_io_queues
);
2516 out_delete_hw_queues
:
2517 nvme_fc_delete_hw_io_queues(ctrl
);
2519 nvme_fc_free_io_queues(ctrl
);
2524 nvme_fc_rport_active_on_lport(struct nvme_fc_rport
*rport
)
2526 struct nvme_fc_lport
*lport
= rport
->lport
;
2528 atomic_inc(&lport
->act_rport_cnt
);
2532 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport
*rport
)
2534 struct nvme_fc_lport
*lport
= rport
->lport
;
2537 cnt
= atomic_dec_return(&lport
->act_rport_cnt
);
2538 if (cnt
== 0 && lport
->localport
.port_state
== FC_OBJSTATE_DELETED
)
2539 lport
->ops
->localport_delete(&lport
->localport
);
2543 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl
*ctrl
)
2545 struct nvme_fc_rport
*rport
= ctrl
->rport
;
2548 if (ctrl
->assoc_active
)
2551 ctrl
->assoc_active
= true;
2552 cnt
= atomic_inc_return(&rport
->act_ctrl_cnt
);
2554 nvme_fc_rport_active_on_lport(rport
);
2560 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl
*ctrl
)
2562 struct nvme_fc_rport
*rport
= ctrl
->rport
;
2563 struct nvme_fc_lport
*lport
= rport
->lport
;
2566 /* ctrl->assoc_active=false will be set independently */
2568 cnt
= atomic_dec_return(&rport
->act_ctrl_cnt
);
2570 if (rport
->remoteport
.port_state
== FC_OBJSTATE_DELETED
)
2571 lport
->ops
->remoteport_delete(&rport
->remoteport
);
2572 nvme_fc_rport_inactive_on_lport(rport
);
2579 * This routine restarts the controller on the host side, and
2580 * on the link side, recreates the controller association.
2583 nvme_fc_create_association(struct nvme_fc_ctrl
*ctrl
)
2585 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2589 ++ctrl
->ctrl
.nr_reconnects
;
2591 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
2594 if (nvme_fc_ctlr_active_on_rport(ctrl
))
2598 * Create the admin queue
2601 ret
= __nvme_fc_create_hw_queue(ctrl
, &ctrl
->queues
[0], 0,
2604 goto out_free_queue
;
2606 ret
= nvme_fc_connect_admin_queue(ctrl
, &ctrl
->queues
[0],
2607 NVME_AQ_DEPTH
, (NVME_AQ_DEPTH
/ 4));
2609 goto out_delete_hw_queue
;
2611 blk_mq_unquiesce_queue(ctrl
->ctrl
.admin_q
);
2613 ret
= nvmf_connect_admin_queue(&ctrl
->ctrl
);
2615 goto out_disconnect_admin_queue
;
2617 set_bit(NVME_FC_Q_LIVE
, &ctrl
->queues
[0].flags
);
2620 * Check controller capabilities
2622 * todo:- add code to check if ctrl attributes changed from
2623 * prior connection values
2626 ret
= nvmf_reg_read64(&ctrl
->ctrl
, NVME_REG_CAP
, &ctrl
->ctrl
.cap
);
2628 dev_err(ctrl
->ctrl
.device
,
2629 "prop_get NVME_REG_CAP failed\n");
2630 goto out_disconnect_admin_queue
;
2634 min_t(int, NVME_CAP_MQES(ctrl
->ctrl
.cap
), ctrl
->ctrl
.sqsize
);
2636 ret
= nvme_enable_ctrl(&ctrl
->ctrl
, ctrl
->ctrl
.cap
);
2638 goto out_disconnect_admin_queue
;
2640 ctrl
->ctrl
.max_hw_sectors
=
2641 (ctrl
->lport
->ops
->max_sgl_segments
- 1) << (PAGE_SHIFT
- 9);
2643 ret
= nvme_init_identify(&ctrl
->ctrl
);
2645 goto out_disconnect_admin_queue
;
2649 /* FC-NVME does not have other data in the capsule */
2650 if (ctrl
->ctrl
.icdoff
) {
2651 dev_err(ctrl
->ctrl
.device
, "icdoff %d is not supported!\n",
2653 goto out_disconnect_admin_queue
;
2656 /* FC-NVME supports normal SGL Data Block Descriptors */
2658 if (opts
->queue_size
> ctrl
->ctrl
.maxcmd
) {
2659 /* warn if maxcmd is lower than queue_size */
2660 dev_warn(ctrl
->ctrl
.device
,
2661 "queue_size %zu > ctrl maxcmd %u, reducing "
2663 opts
->queue_size
, ctrl
->ctrl
.maxcmd
);
2664 opts
->queue_size
= ctrl
->ctrl
.maxcmd
;
2667 if (opts
->queue_size
> ctrl
->ctrl
.sqsize
+ 1) {
2668 /* warn if sqsize is lower than queue_size */
2669 dev_warn(ctrl
->ctrl
.device
,
2670 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2671 opts
->queue_size
, ctrl
->ctrl
.sqsize
+ 1);
2672 opts
->queue_size
= ctrl
->ctrl
.sqsize
+ 1;
2675 ret
= nvme_fc_init_aen_ops(ctrl
);
2677 goto out_term_aen_ops
;
2680 * Create the io queues
2683 if (ctrl
->ctrl
.queue_count
> 1) {
2684 if (!ctrl
->ioq_live
)
2685 ret
= nvme_fc_create_io_queues(ctrl
);
2687 ret
= nvme_fc_recreate_io_queues(ctrl
);
2689 goto out_term_aen_ops
;
2692 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
2694 ctrl
->ctrl
.nr_reconnects
= 0;
2697 nvme_start_ctrl(&ctrl
->ctrl
);
2699 return 0; /* Success */
2702 nvme_fc_term_aen_ops(ctrl
);
2703 out_disconnect_admin_queue
:
2704 /* send a Disconnect(association) LS to fc-nvme target */
2705 nvme_fc_xmt_disconnect_assoc(ctrl
);
2706 out_delete_hw_queue
:
2707 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[0], 0);
2709 nvme_fc_free_queue(&ctrl
->queues
[0]);
2710 ctrl
->assoc_active
= false;
2711 nvme_fc_ctlr_inactive_on_rport(ctrl
);
2717 * This routine stops operation of the controller on the host side.
2718 * On the host os stack side: Admin and IO queues are stopped,
2719 * outstanding ios on them terminated via FC ABTS.
2720 * On the link side: the association is terminated.
2723 nvme_fc_delete_association(struct nvme_fc_ctrl
*ctrl
)
2725 unsigned long flags
;
2727 if (!ctrl
->assoc_active
)
2729 ctrl
->assoc_active
= false;
2731 spin_lock_irqsave(&ctrl
->lock
, flags
);
2732 ctrl
->flags
|= FCCTRL_TERMIO
;
2734 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2737 * If io queues are present, stop them and terminate all outstanding
2738 * ios on them. As FC allocates FC exchange for each io, the
2739 * transport must contact the LLDD to terminate the exchange,
2740 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2741 * to tell us what io's are busy and invoke a transport routine
2742 * to kill them with the LLDD. After terminating the exchange
2743 * the LLDD will call the transport's normal io done path, but it
2744 * will have an aborted status. The done path will return the
2745 * io requests back to the block layer as part of normal completions
2746 * (but with error status).
2748 if (ctrl
->ctrl
.queue_count
> 1) {
2749 nvme_stop_queues(&ctrl
->ctrl
);
2750 blk_mq_tagset_busy_iter(&ctrl
->tag_set
,
2751 nvme_fc_terminate_exchange
, &ctrl
->ctrl
);
2755 * Other transports, which don't have link-level contexts bound
2756 * to sqe's, would try to gracefully shutdown the controller by
2757 * writing the registers for shutdown and polling (call
2758 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2759 * just aborted and we will wait on those contexts, and given
2760 * there was no indication of how live the controlelr is on the
2761 * link, don't send more io to create more contexts for the
2762 * shutdown. Let the controller fail via keepalive failure if
2763 * its still present.
2767 * clean up the admin queue. Same thing as above.
2768 * use blk_mq_tagset_busy_itr() and the transport routine to
2769 * terminate the exchanges.
2771 blk_mq_quiesce_queue(ctrl
->ctrl
.admin_q
);
2772 blk_mq_tagset_busy_iter(&ctrl
->admin_tag_set
,
2773 nvme_fc_terminate_exchange
, &ctrl
->ctrl
);
2775 /* kill the aens as they are a separate path */
2776 nvme_fc_abort_aen_ops(ctrl
);
2778 /* wait for all io that had to be aborted */
2779 spin_lock_irq(&ctrl
->lock
);
2780 wait_event_lock_irq(ctrl
->ioabort_wait
, ctrl
->iocnt
== 0, ctrl
->lock
);
2781 ctrl
->flags
&= ~FCCTRL_TERMIO
;
2782 spin_unlock_irq(&ctrl
->lock
);
2784 nvme_fc_term_aen_ops(ctrl
);
2787 * send a Disconnect(association) LS to fc-nvme target
2788 * Note: could have been sent at top of process, but
2789 * cleaner on link traffic if after the aborts complete.
2790 * Note: if association doesn't exist, association_id will be 0
2792 if (ctrl
->association_id
)
2793 nvme_fc_xmt_disconnect_assoc(ctrl
);
2795 if (ctrl
->ctrl
.tagset
) {
2796 nvme_fc_delete_hw_io_queues(ctrl
);
2797 nvme_fc_free_io_queues(ctrl
);
2800 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[0], 0);
2801 nvme_fc_free_queue(&ctrl
->queues
[0]);
2803 /* re-enable the admin_q so anything new can fast fail */
2804 blk_mq_unquiesce_queue(ctrl
->ctrl
.admin_q
);
2806 /* resume the io queues so that things will fast fail */
2807 nvme_start_queues(&ctrl
->ctrl
);
2809 nvme_fc_ctlr_inactive_on_rport(ctrl
);
2813 nvme_fc_delete_ctrl(struct nvme_ctrl
*nctrl
)
2815 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2817 cancel_delayed_work_sync(&ctrl
->connect_work
);
2819 * kill the association on the link side. this will block
2820 * waiting for io to terminate
2822 nvme_fc_delete_association(ctrl
);
2826 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl
*ctrl
, int status
)
2828 struct nvme_fc_rport
*rport
= ctrl
->rport
;
2829 struct nvme_fc_remote_port
*portptr
= &rport
->remoteport
;
2830 unsigned long recon_delay
= ctrl
->ctrl
.opts
->reconnect_delay
* HZ
;
2833 if (ctrl
->ctrl
.state
!= NVME_CTRL_CONNECTING
)
2836 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
)
2837 dev_info(ctrl
->ctrl
.device
,
2838 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2839 ctrl
->cnum
, status
);
2840 else if (time_after_eq(jiffies
, rport
->dev_loss_end
))
2843 if (recon
&& nvmf_should_reconnect(&ctrl
->ctrl
)) {
2844 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
)
2845 dev_info(ctrl
->ctrl
.device
,
2846 "NVME-FC{%d}: Reconnect attempt in %ld "
2848 ctrl
->cnum
, recon_delay
/ HZ
);
2849 else if (time_after(jiffies
+ recon_delay
, rport
->dev_loss_end
))
2850 recon_delay
= rport
->dev_loss_end
- jiffies
;
2852 queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, recon_delay
);
2854 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
)
2855 dev_warn(ctrl
->ctrl
.device
,
2856 "NVME-FC{%d}: Max reconnect attempts (%d) "
2858 ctrl
->cnum
, ctrl
->ctrl
.nr_reconnects
);
2860 dev_warn(ctrl
->ctrl
.device
,
2861 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2862 "while waiting for remoteport connectivity.\n",
2863 ctrl
->cnum
, portptr
->dev_loss_tmo
);
2864 WARN_ON(nvme_delete_ctrl(&ctrl
->ctrl
));
2869 nvme_fc_reset_ctrl_work(struct work_struct
*work
)
2871 struct nvme_fc_ctrl
*ctrl
=
2872 container_of(work
, struct nvme_fc_ctrl
, ctrl
.reset_work
);
2875 nvme_stop_ctrl(&ctrl
->ctrl
);
2877 /* will block will waiting for io to terminate */
2878 nvme_fc_delete_association(ctrl
);
2880 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_CONNECTING
)) {
2881 dev_err(ctrl
->ctrl
.device
,
2882 "NVME-FC{%d}: error_recovery: Couldn't change state "
2883 "to CONNECTING\n", ctrl
->cnum
);
2887 if (ctrl
->rport
->remoteport
.port_state
== FC_OBJSTATE_ONLINE
)
2888 ret
= nvme_fc_create_association(ctrl
);
2893 nvme_fc_reconnect_or_delete(ctrl
, ret
);
2895 dev_info(ctrl
->ctrl
.device
,
2896 "NVME-FC{%d}: controller reset complete\n",
2900 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops
= {
2902 .module
= THIS_MODULE
,
2903 .flags
= NVME_F_FABRICS
,
2904 .reg_read32
= nvmf_reg_read32
,
2905 .reg_read64
= nvmf_reg_read64
,
2906 .reg_write32
= nvmf_reg_write32
,
2907 .free_ctrl
= nvme_fc_nvme_ctrl_freed
,
2908 .submit_async_event
= nvme_fc_submit_async_event
,
2909 .delete_ctrl
= nvme_fc_delete_ctrl
,
2910 .get_address
= nvmf_get_address
,
2914 nvme_fc_connect_ctrl_work(struct work_struct
*work
)
2918 struct nvme_fc_ctrl
*ctrl
=
2919 container_of(to_delayed_work(work
),
2920 struct nvme_fc_ctrl
, connect_work
);
2922 ret
= nvme_fc_create_association(ctrl
);
2924 nvme_fc_reconnect_or_delete(ctrl
, ret
);
2926 dev_info(ctrl
->ctrl
.device
,
2927 "NVME-FC{%d}: controller connect complete\n",
2932 static const struct blk_mq_ops nvme_fc_admin_mq_ops
= {
2933 .queue_rq
= nvme_fc_queue_rq
,
2934 .complete
= nvme_fc_complete_rq
,
2935 .init_request
= nvme_fc_init_request
,
2936 .exit_request
= nvme_fc_exit_request
,
2937 .init_hctx
= nvme_fc_init_admin_hctx
,
2938 .timeout
= nvme_fc_timeout
,
2943 * Fails a controller request if it matches an existing controller
2944 * (association) with the same tuple:
2945 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
2947 * The ports don't need to be compared as they are intrinsically
2948 * already matched by the port pointers supplied.
2951 nvme_fc_existing_controller(struct nvme_fc_rport
*rport
,
2952 struct nvmf_ctrl_options
*opts
)
2954 struct nvme_fc_ctrl
*ctrl
;
2955 unsigned long flags
;
2958 spin_lock_irqsave(&rport
->lock
, flags
);
2959 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
) {
2960 found
= nvmf_ctlr_matches_baseopts(&ctrl
->ctrl
, opts
);
2964 spin_unlock_irqrestore(&rport
->lock
, flags
);
2969 static struct nvme_ctrl
*
2970 nvme_fc_init_ctrl(struct device
*dev
, struct nvmf_ctrl_options
*opts
,
2971 struct nvme_fc_lport
*lport
, struct nvme_fc_rport
*rport
)
2973 struct nvme_fc_ctrl
*ctrl
;
2974 unsigned long flags
;
2977 if (!(rport
->remoteport
.port_role
&
2978 (FC_PORT_ROLE_NVME_DISCOVERY
| FC_PORT_ROLE_NVME_TARGET
))) {
2983 if (!opts
->duplicate_connect
&&
2984 nvme_fc_existing_controller(rport
, opts
)) {
2989 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
2995 idx
= ida_simple_get(&nvme_fc_ctrl_cnt
, 0, 0, GFP_KERNEL
);
3001 ctrl
->ctrl
.opts
= opts
;
3002 ctrl
->ctrl
.nr_reconnects
= 0;
3003 INIT_LIST_HEAD(&ctrl
->ctrl_list
);
3004 ctrl
->lport
= lport
;
3005 ctrl
->rport
= rport
;
3006 ctrl
->dev
= lport
->dev
;
3008 ctrl
->ioq_live
= false;
3009 ctrl
->assoc_active
= false;
3010 init_waitqueue_head(&ctrl
->ioabort_wait
);
3012 get_device(ctrl
->dev
);
3013 kref_init(&ctrl
->ref
);
3015 INIT_WORK(&ctrl
->ctrl
.reset_work
, nvme_fc_reset_ctrl_work
);
3016 INIT_DELAYED_WORK(&ctrl
->connect_work
, nvme_fc_connect_ctrl_work
);
3017 spin_lock_init(&ctrl
->lock
);
3019 /* io queue count */
3020 ctrl
->ctrl
.queue_count
= min_t(unsigned int,
3022 lport
->ops
->max_hw_queues
);
3023 ctrl
->ctrl
.queue_count
++; /* +1 for admin queue */
3025 ctrl
->ctrl
.sqsize
= opts
->queue_size
- 1;
3026 ctrl
->ctrl
.kato
= opts
->kato
;
3027 ctrl
->ctrl
.cntlid
= 0xffff;
3030 ctrl
->queues
= kcalloc(ctrl
->ctrl
.queue_count
,
3031 sizeof(struct nvme_fc_queue
), GFP_KERNEL
);
3035 nvme_fc_init_queue(ctrl
, 0);
3037 memset(&ctrl
->admin_tag_set
, 0, sizeof(ctrl
->admin_tag_set
));
3038 ctrl
->admin_tag_set
.ops
= &nvme_fc_admin_mq_ops
;
3039 ctrl
->admin_tag_set
.queue_depth
= NVME_AQ_MQ_TAG_DEPTH
;
3040 ctrl
->admin_tag_set
.reserved_tags
= 2; /* fabric connect + Keep-Alive */
3041 ctrl
->admin_tag_set
.numa_node
= NUMA_NO_NODE
;
3042 ctrl
->admin_tag_set
.cmd_size
=
3043 struct_size((struct nvme_fcp_op_w_sgl
*)NULL
, priv
,
3044 ctrl
->lport
->ops
->fcprqst_priv_sz
);
3045 ctrl
->admin_tag_set
.driver_data
= ctrl
;
3046 ctrl
->admin_tag_set
.nr_hw_queues
= 1;
3047 ctrl
->admin_tag_set
.timeout
= ADMIN_TIMEOUT
;
3048 ctrl
->admin_tag_set
.flags
= BLK_MQ_F_NO_SCHED
;
3050 ret
= blk_mq_alloc_tag_set(&ctrl
->admin_tag_set
);
3052 goto out_free_queues
;
3053 ctrl
->ctrl
.admin_tagset
= &ctrl
->admin_tag_set
;
3055 ctrl
->ctrl
.admin_q
= blk_mq_init_queue(&ctrl
->admin_tag_set
);
3056 if (IS_ERR(ctrl
->ctrl
.admin_q
)) {
3057 ret
= PTR_ERR(ctrl
->ctrl
.admin_q
);
3058 goto out_free_admin_tag_set
;
3062 * Would have been nice to init io queues tag set as well.
3063 * However, we require interaction from the controller
3064 * for max io queue count before we can do so.
3065 * Defer this to the connect path.
3068 ret
= nvme_init_ctrl(&ctrl
->ctrl
, dev
, &nvme_fc_ctrl_ops
, 0);
3070 goto out_cleanup_admin_q
;
3072 /* at this point, teardown path changes to ref counting on nvme ctrl */
3074 spin_lock_irqsave(&rport
->lock
, flags
);
3075 list_add_tail(&ctrl
->ctrl_list
, &rport
->ctrl_list
);
3076 spin_unlock_irqrestore(&rport
->lock
, flags
);
3078 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_RESETTING
) ||
3079 !nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_CONNECTING
)) {
3080 dev_err(ctrl
->ctrl
.device
,
3081 "NVME-FC{%d}: failed to init ctrl state\n", ctrl
->cnum
);
3085 nvme_get_ctrl(&ctrl
->ctrl
);
3087 if (!queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, 0)) {
3088 nvme_put_ctrl(&ctrl
->ctrl
);
3089 dev_err(ctrl
->ctrl
.device
,
3090 "NVME-FC{%d}: failed to schedule initial connect\n",
3095 flush_delayed_work(&ctrl
->connect_work
);
3097 dev_info(ctrl
->ctrl
.device
,
3098 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3099 ctrl
->cnum
, ctrl
->ctrl
.opts
->subsysnqn
);
3104 nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_DELETING
);
3105 cancel_work_sync(&ctrl
->ctrl
.reset_work
);
3106 cancel_delayed_work_sync(&ctrl
->connect_work
);
3108 ctrl
->ctrl
.opts
= NULL
;
3110 /* initiate nvme ctrl ref counting teardown */
3111 nvme_uninit_ctrl(&ctrl
->ctrl
);
3113 /* Remove core ctrl ref. */
3114 nvme_put_ctrl(&ctrl
->ctrl
);
3116 /* as we're past the point where we transition to the ref
3117 * counting teardown path, if we return a bad pointer here,
3118 * the calling routine, thinking it's prior to the
3119 * transition, will do an rport put. Since the teardown
3120 * path also does a rport put, we do an extra get here to
3121 * so proper order/teardown happens.
3123 nvme_fc_rport_get(rport
);
3125 return ERR_PTR(-EIO
);
3127 out_cleanup_admin_q
:
3128 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
3129 out_free_admin_tag_set
:
3130 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
3132 kfree(ctrl
->queues
);
3134 put_device(ctrl
->dev
);
3135 ida_simple_remove(&nvme_fc_ctrl_cnt
, ctrl
->cnum
);
3139 /* exit via here doesn't follow ctlr ref points */
3140 return ERR_PTR(ret
);
3144 struct nvmet_fc_traddr
{
3150 __nvme_fc_parse_u64(substring_t
*sstr
, u64
*val
)
3154 if (match_u64(sstr
, &token64
))
3162 * This routine validates and extracts the WWN's from the TRADDR string.
3163 * As kernel parsers need the 0x to determine number base, universally
3164 * build string to parse with 0x prefix before parsing name strings.
3167 nvme_fc_parse_traddr(struct nvmet_fc_traddr
*traddr
, char *buf
, size_t blen
)
3169 char name
[2 + NVME_FC_TRADDR_HEXNAMELEN
+ 1];
3170 substring_t wwn
= { name
, &name
[sizeof(name
)-1] };
3171 int nnoffset
, pnoffset
;
3173 /* validate if string is one of the 2 allowed formats */
3174 if (strnlen(buf
, blen
) == NVME_FC_TRADDR_MAXLENGTH
&&
3175 !strncmp(buf
, "nn-0x", NVME_FC_TRADDR_OXNNLEN
) &&
3176 !strncmp(&buf
[NVME_FC_TRADDR_MAX_PN_OFFSET
],
3177 "pn-0x", NVME_FC_TRADDR_OXNNLEN
)) {
3178 nnoffset
= NVME_FC_TRADDR_OXNNLEN
;
3179 pnoffset
= NVME_FC_TRADDR_MAX_PN_OFFSET
+
3180 NVME_FC_TRADDR_OXNNLEN
;
3181 } else if ((strnlen(buf
, blen
) == NVME_FC_TRADDR_MINLENGTH
&&
3182 !strncmp(buf
, "nn-", NVME_FC_TRADDR_NNLEN
) &&
3183 !strncmp(&buf
[NVME_FC_TRADDR_MIN_PN_OFFSET
],
3184 "pn-", NVME_FC_TRADDR_NNLEN
))) {
3185 nnoffset
= NVME_FC_TRADDR_NNLEN
;
3186 pnoffset
= NVME_FC_TRADDR_MIN_PN_OFFSET
+ NVME_FC_TRADDR_NNLEN
;
3192 name
[2 + NVME_FC_TRADDR_HEXNAMELEN
] = 0;
3194 memcpy(&name
[2], &buf
[nnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
3195 if (__nvme_fc_parse_u64(&wwn
, &traddr
->nn
))
3198 memcpy(&name
[2], &buf
[pnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
3199 if (__nvme_fc_parse_u64(&wwn
, &traddr
->pn
))
3205 pr_warn("%s: bad traddr string\n", __func__
);
3209 static struct nvme_ctrl
*
3210 nvme_fc_create_ctrl(struct device
*dev
, struct nvmf_ctrl_options
*opts
)
3212 struct nvme_fc_lport
*lport
;
3213 struct nvme_fc_rport
*rport
;
3214 struct nvme_ctrl
*ctrl
;
3215 struct nvmet_fc_traddr laddr
= { 0L, 0L };
3216 struct nvmet_fc_traddr raddr
= { 0L, 0L };
3217 unsigned long flags
;
3220 ret
= nvme_fc_parse_traddr(&raddr
, opts
->traddr
, NVMF_TRADDR_SIZE
);
3221 if (ret
|| !raddr
.nn
|| !raddr
.pn
)
3222 return ERR_PTR(-EINVAL
);
3224 ret
= nvme_fc_parse_traddr(&laddr
, opts
->host_traddr
, NVMF_TRADDR_SIZE
);
3225 if (ret
|| !laddr
.nn
|| !laddr
.pn
)
3226 return ERR_PTR(-EINVAL
);
3228 /* find the host and remote ports to connect together */
3229 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3230 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
3231 if (lport
->localport
.node_name
!= laddr
.nn
||
3232 lport
->localport
.port_name
!= laddr
.pn
)
3235 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
3236 if (rport
->remoteport
.node_name
!= raddr
.nn
||
3237 rport
->remoteport
.port_name
!= raddr
.pn
)
3240 /* if fail to get reference fall through. Will error */
3241 if (!nvme_fc_rport_get(rport
))
3244 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3246 ctrl
= nvme_fc_init_ctrl(dev
, opts
, lport
, rport
);
3248 nvme_fc_rport_put(rport
);
3252 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3254 pr_warn("%s: %s - %s combination not found\n",
3255 __func__
, opts
->traddr
, opts
->host_traddr
);
3256 return ERR_PTR(-ENOENT
);
3260 static struct nvmf_transport_ops nvme_fc_transport
= {
3262 .module
= THIS_MODULE
,
3263 .required_opts
= NVMF_OPT_TRADDR
| NVMF_OPT_HOST_TRADDR
,
3264 .allowed_opts
= NVMF_OPT_RECONNECT_DELAY
| NVMF_OPT_CTRL_LOSS_TMO
,
3265 .create_ctrl
= nvme_fc_create_ctrl
,
3268 /* Arbitrary successive failures max. With lots of subsystems could be high */
3269 #define DISCOVERY_MAX_FAIL 20
3271 static ssize_t
nvme_fc_nvme_discovery_store(struct device
*dev
,
3272 struct device_attribute
*attr
, const char *buf
, size_t count
)
3274 unsigned long flags
;
3275 LIST_HEAD(local_disc_list
);
3276 struct nvme_fc_lport
*lport
;
3277 struct nvme_fc_rport
*rport
;
3280 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3282 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
3283 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
3284 if (!nvme_fc_lport_get(lport
))
3286 if (!nvme_fc_rport_get(rport
)) {
3288 * This is a temporary condition. Upon restart
3289 * this rport will be gone from the list.
3291 * Revert the lport put and retry. Anything
3292 * added to the list already will be skipped (as
3293 * they are no longer list_empty). Loops should
3294 * resume at rports that were not yet seen.
3296 nvme_fc_lport_put(lport
);
3298 if (failcnt
++ < DISCOVERY_MAX_FAIL
)
3301 pr_err("nvme_discovery: too many reference "
3303 goto process_local_list
;
3305 if (list_empty(&rport
->disc_list
))
3306 list_add_tail(&rport
->disc_list
,
3312 while (!list_empty(&local_disc_list
)) {
3313 rport
= list_first_entry(&local_disc_list
,
3314 struct nvme_fc_rport
, disc_list
);
3315 list_del_init(&rport
->disc_list
);
3316 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3318 lport
= rport
->lport
;
3319 /* signal discovery. Won't hurt if it repeats */
3320 nvme_fc_signal_discovery_scan(lport
, rport
);
3321 nvme_fc_rport_put(rport
);
3322 nvme_fc_lport_put(lport
);
3324 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3326 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3330 static DEVICE_ATTR(nvme_discovery
, 0200, NULL
, nvme_fc_nvme_discovery_store
);
3332 static struct attribute
*nvme_fc_attrs
[] = {
3333 &dev_attr_nvme_discovery
.attr
,
3337 static struct attribute_group nvme_fc_attr_group
= {
3338 .attrs
= nvme_fc_attrs
,
3341 static const struct attribute_group
*nvme_fc_attr_groups
[] = {
3342 &nvme_fc_attr_group
,
3346 static struct class fc_class
= {
3348 .dev_groups
= nvme_fc_attr_groups
,
3349 .owner
= THIS_MODULE
,
3352 static int __init
nvme_fc_init_module(void)
3358 * It is expected that in the future the kernel will combine
3359 * the FC-isms that are currently under scsi and now being
3360 * added to by NVME into a new standalone FC class. The SCSI
3361 * and NVME protocols and their devices would be under this
3364 * As we need something to post FC-specific udev events to,
3365 * specifically for nvme probe events, start by creating the
3366 * new device class. When the new standalone FC class is
3367 * put in place, this code will move to a more generic
3368 * location for the class.
3370 ret
= class_register(&fc_class
);
3372 pr_err("couldn't register class fc\n");
3377 * Create a device for the FC-centric udev events
3379 fc_udev_device
= device_create(&fc_class
, NULL
, MKDEV(0, 0), NULL
,
3381 if (IS_ERR(fc_udev_device
)) {
3382 pr_err("couldn't create fc_udev device!\n");
3383 ret
= PTR_ERR(fc_udev_device
);
3384 goto out_destroy_class
;
3387 ret
= nvmf_register_transport(&nvme_fc_transport
);
3389 goto out_destroy_device
;
3394 device_destroy(&fc_class
, MKDEV(0, 0));
3396 class_unregister(&fc_class
);
3400 static void __exit
nvme_fc_exit_module(void)
3402 /* sanity check - all lports should be removed */
3403 if (!list_empty(&nvme_fc_lport_list
))
3404 pr_warn("%s: localport list not empty\n", __func__
);
3406 nvmf_unregister_transport(&nvme_fc_transport
);
3408 ida_destroy(&nvme_fc_local_port_cnt
);
3409 ida_destroy(&nvme_fc_ctrl_cnt
);
3411 device_destroy(&fc_class
, MKDEV(0, 0));
3412 class_unregister(&fc_class
);
3415 module_init(nvme_fc_init_module
);
3416 module_exit(nvme_fc_exit_module
);
3418 MODULE_LICENSE("GPL v2");