1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
18 NVMF_OPT_WWNN
= 1 << 0,
19 NVMF_OPT_WWPN
= 1 << 1,
20 NVMF_OPT_ROLES
= 1 << 2,
21 NVMF_OPT_FCADDR
= 1 << 3,
22 NVMF_OPT_LPWWNN
= 1 << 4,
23 NVMF_OPT_LPWWPN
= 1 << 5,
26 struct fcloop_ctrl_options
{
36 static const match_table_t opt_tokens
= {
37 { NVMF_OPT_WWNN
, "wwnn=%s" },
38 { NVMF_OPT_WWPN
, "wwpn=%s" },
39 { NVMF_OPT_ROLES
, "roles=%d" },
40 { NVMF_OPT_FCADDR
, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN
, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN
, "lpwwpn=%s" },
43 { NVMF_OPT_ERR
, NULL
}
46 static int fcloop_verify_addr(substring_t
*s
)
48 size_t blen
= s
->to
- s
->from
+ 1;
50 if (strnlen(s
->from
, blen
) != NVME_FC_TRADDR_HEXNAMELEN
+ 2 ||
51 strncmp(s
->from
, "0x", 2))
58 fcloop_parse_options(struct fcloop_ctrl_options
*opts
,
61 substring_t args
[MAX_OPT_ARGS
];
62 char *options
, *o
, *p
;
66 options
= o
= kstrdup(buf
, GFP_KERNEL
);
70 while ((p
= strsep(&o
, ",\n")) != NULL
) {
74 token
= match_token(p
, opt_tokens
, args
);
78 if (fcloop_verify_addr(args
) ||
79 match_u64(args
, &token64
)) {
81 goto out_free_options
;
86 if (fcloop_verify_addr(args
) ||
87 match_u64(args
, &token64
)) {
89 goto out_free_options
;
94 if (match_int(args
, &token
)) {
96 goto out_free_options
;
100 case NVMF_OPT_FCADDR
:
101 if (match_hex(args
, &token
)) {
103 goto out_free_options
;
105 opts
->fcaddr
= token
;
107 case NVMF_OPT_LPWWNN
:
108 if (fcloop_verify_addr(args
) ||
109 match_u64(args
, &token64
)) {
111 goto out_free_options
;
113 opts
->lpwwnn
= token64
;
115 case NVMF_OPT_LPWWPN
:
116 if (fcloop_verify_addr(args
) ||
117 match_u64(args
, &token64
)) {
119 goto out_free_options
;
121 opts
->lpwwpn
= token64
;
124 pr_warn("unknown parameter or missing value '%s'\n", p
);
126 goto out_free_options
;
137 fcloop_parse_nm_options(struct device
*dev
, u64
*nname
, u64
*pname
,
140 substring_t args
[MAX_OPT_ARGS
];
141 char *options
, *o
, *p
;
148 options
= o
= kstrdup(buf
, GFP_KERNEL
);
152 while ((p
= strsep(&o
, ",\n")) != NULL
) {
156 token
= match_token(p
, opt_tokens
, args
);
159 if (fcloop_verify_addr(args
) ||
160 match_u64(args
, &token64
)) {
162 goto out_free_options
;
167 if (fcloop_verify_addr(args
) ||
168 match_u64(args
, &token64
)) {
170 goto out_free_options
;
175 pr_warn("unknown parameter or missing value '%s'\n", p
);
177 goto out_free_options
;
195 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
197 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
200 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
203 static DEFINE_SPINLOCK(fcloop_lock
);
204 static LIST_HEAD(fcloop_lports
);
205 static LIST_HEAD(fcloop_nports
);
207 struct fcloop_lport
{
208 struct nvme_fc_local_port
*localport
;
209 struct list_head lport_list
;
210 struct completion unreg_done
;
213 struct fcloop_lport_priv
{
214 struct fcloop_lport
*lport
;
217 struct fcloop_rport
{
218 struct nvme_fc_remote_port
*remoteport
;
219 struct nvmet_fc_target_port
*targetport
;
220 struct fcloop_nport
*nport
;
221 struct fcloop_lport
*lport
;
223 struct list_head ls_list
;
224 struct work_struct ls_work
;
227 struct fcloop_tport
{
228 struct nvmet_fc_target_port
*targetport
;
229 struct nvme_fc_remote_port
*remoteport
;
230 struct fcloop_nport
*nport
;
231 struct fcloop_lport
*lport
;
233 struct list_head ls_list
;
234 struct work_struct ls_work
;
237 struct fcloop_nport
{
238 struct fcloop_rport
*rport
;
239 struct fcloop_tport
*tport
;
240 struct fcloop_lport
*lport
;
241 struct list_head nport_list
;
249 struct fcloop_lsreq
{
250 struct nvmefc_ls_req
*lsreq
;
251 struct nvmefc_ls_rsp ls_rsp
;
252 int lsdir
; /* H2T or T2H */
254 struct list_head ls_list
; /* fcloop_rport->ls_list */
258 struct fcloop_tport
*tport
;
259 struct work_struct work
;
266 INI_IO_COMPLETED
= 3,
269 struct fcloop_fcpreq
{
270 struct fcloop_tport
*tport
;
271 struct nvmefc_fcp_req
*fcpreq
;
278 struct work_struct fcp_rcv_work
;
279 struct work_struct abort_rcv_work
;
280 struct work_struct tio_done_work
;
281 struct nvmefc_tgt_fcp_req tgt_fcp_req
;
284 struct fcloop_ini_fcpreq
{
285 struct nvmefc_fcp_req
*fcpreq
;
286 struct fcloop_fcpreq
*tfcp_req
;
290 static inline struct fcloop_lsreq
*
291 ls_rsp_to_lsreq(struct nvmefc_ls_rsp
*lsrsp
)
293 return container_of(lsrsp
, struct fcloop_lsreq
, ls_rsp
);
296 static inline struct fcloop_fcpreq
*
297 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
299 return container_of(tgt_fcpreq
, struct fcloop_fcpreq
, tgt_fcp_req
);
304 fcloop_create_queue(struct nvme_fc_local_port
*localport
,
305 unsigned int qidx
, u16 qsize
,
313 fcloop_delete_queue(struct nvme_fc_local_port
*localport
,
314 unsigned int idx
, void *handle
)
319 fcloop_rport_lsrqst_work(struct work_struct
*work
)
321 struct fcloop_rport
*rport
=
322 container_of(work
, struct fcloop_rport
, ls_work
);
323 struct fcloop_lsreq
*tls_req
;
325 spin_lock(&rport
->lock
);
327 tls_req
= list_first_entry_or_null(&rport
->ls_list
,
328 struct fcloop_lsreq
, ls_list
);
332 list_del(&tls_req
->ls_list
);
333 spin_unlock(&rport
->lock
);
335 tls_req
->lsreq
->done(tls_req
->lsreq
, tls_req
->status
);
337 * callee may free memory containing tls_req.
338 * do not reference lsreq after this.
341 spin_lock(&rport
->lock
);
343 spin_unlock(&rport
->lock
);
347 fcloop_h2t_ls_req(struct nvme_fc_local_port
*localport
,
348 struct nvme_fc_remote_port
*remoteport
,
349 struct nvmefc_ls_req
*lsreq
)
351 struct fcloop_lsreq
*tls_req
= lsreq
->private;
352 struct fcloop_rport
*rport
= remoteport
->private;
355 tls_req
->lsreq
= lsreq
;
356 INIT_LIST_HEAD(&tls_req
->ls_list
);
358 if (!rport
->targetport
) {
359 tls_req
->status
= -ECONNREFUSED
;
360 spin_lock(&rport
->lock
);
361 list_add_tail(&tls_req
->ls_list
, &rport
->ls_list
);
362 spin_unlock(&rport
->lock
);
363 queue_work(nvmet_wq
, &rport
->ls_work
);
368 ret
= nvmet_fc_rcv_ls_req(rport
->targetport
, rport
,
370 lsreq
->rqstaddr
, lsreq
->rqstlen
);
376 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port
*targetport
,
377 struct nvmefc_ls_rsp
*lsrsp
)
379 struct fcloop_lsreq
*tls_req
= ls_rsp_to_lsreq(lsrsp
);
380 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
381 struct fcloop_tport
*tport
= targetport
->private;
382 struct nvme_fc_remote_port
*remoteport
= tport
->remoteport
;
383 struct fcloop_rport
*rport
;
385 memcpy(lsreq
->rspaddr
, lsrsp
->rspbuf
,
386 ((lsreq
->rsplen
< lsrsp
->rsplen
) ?
387 lsreq
->rsplen
: lsrsp
->rsplen
));
392 rport
= remoteport
->private;
393 spin_lock(&rport
->lock
);
394 list_add_tail(&tls_req
->ls_list
, &rport
->ls_list
);
395 spin_unlock(&rport
->lock
);
396 queue_work(nvmet_wq
, &rport
->ls_work
);
403 fcloop_tport_lsrqst_work(struct work_struct
*work
)
405 struct fcloop_tport
*tport
=
406 container_of(work
, struct fcloop_tport
, ls_work
);
407 struct fcloop_lsreq
*tls_req
;
409 spin_lock(&tport
->lock
);
411 tls_req
= list_first_entry_or_null(&tport
->ls_list
,
412 struct fcloop_lsreq
, ls_list
);
416 list_del(&tls_req
->ls_list
);
417 spin_unlock(&tport
->lock
);
419 tls_req
->lsreq
->done(tls_req
->lsreq
, tls_req
->status
);
421 * callee may free memory containing tls_req.
422 * do not reference lsreq after this.
425 spin_lock(&tport
->lock
);
427 spin_unlock(&tport
->lock
);
431 fcloop_t2h_ls_req(struct nvmet_fc_target_port
*targetport
, void *hosthandle
,
432 struct nvmefc_ls_req
*lsreq
)
434 struct fcloop_lsreq
*tls_req
= lsreq
->private;
435 struct fcloop_tport
*tport
= targetport
->private;
439 * hosthandle should be the dst.rport value.
440 * hosthandle ignored as fcloop currently is
441 * 1:1 tgtport vs remoteport
443 tls_req
->lsreq
= lsreq
;
444 INIT_LIST_HEAD(&tls_req
->ls_list
);
446 if (!tport
->remoteport
) {
447 tls_req
->status
= -ECONNREFUSED
;
448 spin_lock(&tport
->lock
);
449 list_add_tail(&tls_req
->ls_list
, &tport
->ls_list
);
450 spin_unlock(&tport
->lock
);
451 queue_work(nvmet_wq
, &tport
->ls_work
);
456 ret
= nvme_fc_rcv_ls_req(tport
->remoteport
, &tls_req
->ls_rsp
,
457 lsreq
->rqstaddr
, lsreq
->rqstlen
);
463 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port
*localport
,
464 struct nvme_fc_remote_port
*remoteport
,
465 struct nvmefc_ls_rsp
*lsrsp
)
467 struct fcloop_lsreq
*tls_req
= ls_rsp_to_lsreq(lsrsp
);
468 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
469 struct fcloop_rport
*rport
= remoteport
->private;
470 struct nvmet_fc_target_port
*targetport
= rport
->targetport
;
471 struct fcloop_tport
*tport
;
473 memcpy(lsreq
->rspaddr
, lsrsp
->rspbuf
,
474 ((lsreq
->rsplen
< lsrsp
->rsplen
) ?
475 lsreq
->rsplen
: lsrsp
->rsplen
));
479 tport
= targetport
->private;
480 spin_lock(&tport
->lock
);
481 list_add_tail(&tport
->ls_list
, &tls_req
->ls_list
);
482 spin_unlock(&tport
->lock
);
483 queue_work(nvmet_wq
, &tport
->ls_work
);
490 fcloop_t2h_host_release(void *hosthandle
)
492 /* host handle ignored for now */
496 fcloop_t2h_host_traddr(void *hosthandle
, u64
*wwnn
, u64
*wwpn
)
498 struct fcloop_rport
*rport
= hosthandle
;
500 *wwnn
= rport
->lport
->localport
->node_name
;
501 *wwpn
= rport
->lport
->localport
->port_name
;
506 * Simulate reception of RSCN and converting it to a initiator transport
507 * call to rescan a remote port.
510 fcloop_tgt_rscn_work(struct work_struct
*work
)
512 struct fcloop_rscn
*tgt_rscn
=
513 container_of(work
, struct fcloop_rscn
, work
);
514 struct fcloop_tport
*tport
= tgt_rscn
->tport
;
516 if (tport
->remoteport
)
517 nvme_fc_rescan_remoteport(tport
->remoteport
);
522 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port
*tgtport
)
524 struct fcloop_rscn
*tgt_rscn
;
526 tgt_rscn
= kzalloc(sizeof(*tgt_rscn
), GFP_KERNEL
);
530 tgt_rscn
->tport
= tgtport
->private;
531 INIT_WORK(&tgt_rscn
->work
, fcloop_tgt_rscn_work
);
533 queue_work(nvmet_wq
, &tgt_rscn
->work
);
537 fcloop_tfcp_req_free(struct kref
*ref
)
539 struct fcloop_fcpreq
*tfcp_req
=
540 container_of(ref
, struct fcloop_fcpreq
, ref
);
546 fcloop_tfcp_req_put(struct fcloop_fcpreq
*tfcp_req
)
548 kref_put(&tfcp_req
->ref
, fcloop_tfcp_req_free
);
552 fcloop_tfcp_req_get(struct fcloop_fcpreq
*tfcp_req
)
554 return kref_get_unless_zero(&tfcp_req
->ref
);
558 fcloop_call_host_done(struct nvmefc_fcp_req
*fcpreq
,
559 struct fcloop_fcpreq
*tfcp_req
, int status
)
561 struct fcloop_ini_fcpreq
*inireq
= NULL
;
564 inireq
= fcpreq
->private;
565 spin_lock(&inireq
->inilock
);
566 inireq
->tfcp_req
= NULL
;
567 spin_unlock(&inireq
->inilock
);
569 fcpreq
->status
= status
;
570 fcpreq
->done(fcpreq
);
573 /* release original io reference on tgt struct */
574 fcloop_tfcp_req_put(tfcp_req
);
577 static bool drop_fabric_opcode
;
578 #define DROP_OPCODE_MASK 0x00FF
579 /* fabrics opcode will have a bit set above 1st byte */
580 static int drop_opcode
= -1;
581 static int drop_instance
;
582 static int drop_amount
;
583 static int drop_current_cnt
;
586 * Routine to parse io and determine if the io is to be dropped.
588 * 0 if io is not obstructed
589 * 1 if io was dropped
591 static int check_for_drop(struct fcloop_fcpreq
*tfcp_req
)
593 struct nvmefc_fcp_req
*fcpreq
= tfcp_req
->fcpreq
;
594 struct nvme_fc_cmd_iu
*cmdiu
= fcpreq
->cmdaddr
;
595 struct nvme_command
*sqe
= &cmdiu
->sqe
;
597 if (drop_opcode
== -1)
600 pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
601 "inst %d start %d amt %d\n",
602 __func__
, sqe
->common
.opcode
, sqe
->fabrics
.fctype
,
603 drop_fabric_opcode
? "y" : "n",
604 drop_opcode
, drop_current_cnt
, drop_instance
, drop_amount
);
606 if ((drop_fabric_opcode
&&
607 (sqe
->common
.opcode
!= nvme_fabrics_command
||
608 sqe
->fabrics
.fctype
!= drop_opcode
)) ||
609 (!drop_fabric_opcode
&& sqe
->common
.opcode
!= drop_opcode
))
612 if (++drop_current_cnt
>= drop_instance
) {
613 if (drop_current_cnt
>= drop_instance
+ drop_amount
)
622 fcloop_fcp_recv_work(struct work_struct
*work
)
624 struct fcloop_fcpreq
*tfcp_req
=
625 container_of(work
, struct fcloop_fcpreq
, fcp_rcv_work
);
626 struct nvmefc_fcp_req
*fcpreq
= tfcp_req
->fcpreq
;
629 bool aborted
= false;
631 spin_lock_irqsave(&tfcp_req
->reqlock
, flags
);
632 switch (tfcp_req
->inistate
) {
634 tfcp_req
->inistate
= INI_IO_ACTIVE
;
640 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
644 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
646 if (unlikely(aborted
))
649 if (likely(!check_for_drop(tfcp_req
)))
650 ret
= nvmet_fc_rcv_fcp_req(tfcp_req
->tport
->targetport
,
651 &tfcp_req
->tgt_fcp_req
,
652 fcpreq
->cmdaddr
, fcpreq
->cmdlen
);
654 pr_info("%s: dropped command ********\n", __func__
);
657 fcloop_call_host_done(fcpreq
, tfcp_req
, ret
);
661 fcloop_fcp_abort_recv_work(struct work_struct
*work
)
663 struct fcloop_fcpreq
*tfcp_req
=
664 container_of(work
, struct fcloop_fcpreq
, abort_rcv_work
);
665 struct nvmefc_fcp_req
*fcpreq
;
666 bool completed
= false;
669 spin_lock_irqsave(&tfcp_req
->reqlock
, flags
);
670 fcpreq
= tfcp_req
->fcpreq
;
671 switch (tfcp_req
->inistate
) {
674 case INI_IO_COMPLETED
:
678 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
682 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
684 if (unlikely(completed
)) {
685 /* remove reference taken in original abort downcall */
686 fcloop_tfcp_req_put(tfcp_req
);
690 if (tfcp_req
->tport
->targetport
)
691 nvmet_fc_rcv_fcp_abort(tfcp_req
->tport
->targetport
,
692 &tfcp_req
->tgt_fcp_req
);
694 spin_lock_irqsave(&tfcp_req
->reqlock
, flags
);
695 tfcp_req
->fcpreq
= NULL
;
696 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
698 fcloop_call_host_done(fcpreq
, tfcp_req
, -ECANCELED
);
699 /* call_host_done releases reference for abort downcall */
703 * FCP IO operation done by target completion.
704 * call back up initiator "done" flows.
707 fcloop_tgt_fcprqst_done_work(struct work_struct
*work
)
709 struct fcloop_fcpreq
*tfcp_req
=
710 container_of(work
, struct fcloop_fcpreq
, tio_done_work
);
711 struct nvmefc_fcp_req
*fcpreq
;
714 spin_lock_irqsave(&tfcp_req
->reqlock
, flags
);
715 fcpreq
= tfcp_req
->fcpreq
;
716 tfcp_req
->inistate
= INI_IO_COMPLETED
;
717 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
719 fcloop_call_host_done(fcpreq
, tfcp_req
, tfcp_req
->status
);
724 fcloop_fcp_req(struct nvme_fc_local_port
*localport
,
725 struct nvme_fc_remote_port
*remoteport
,
726 void *hw_queue_handle
,
727 struct nvmefc_fcp_req
*fcpreq
)
729 struct fcloop_rport
*rport
= remoteport
->private;
730 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
731 struct fcloop_fcpreq
*tfcp_req
;
733 if (!rport
->targetport
)
734 return -ECONNREFUSED
;
736 tfcp_req
= kzalloc(sizeof(*tfcp_req
), GFP_ATOMIC
);
740 inireq
->fcpreq
= fcpreq
;
741 inireq
->tfcp_req
= tfcp_req
;
742 spin_lock_init(&inireq
->inilock
);
744 tfcp_req
->fcpreq
= fcpreq
;
745 tfcp_req
->tport
= rport
->targetport
->private;
746 tfcp_req
->inistate
= INI_IO_START
;
747 spin_lock_init(&tfcp_req
->reqlock
);
748 INIT_WORK(&tfcp_req
->fcp_rcv_work
, fcloop_fcp_recv_work
);
749 INIT_WORK(&tfcp_req
->abort_rcv_work
, fcloop_fcp_abort_recv_work
);
750 INIT_WORK(&tfcp_req
->tio_done_work
, fcloop_tgt_fcprqst_done_work
);
751 kref_init(&tfcp_req
->ref
);
753 queue_work(nvmet_wq
, &tfcp_req
->fcp_rcv_work
);
759 fcloop_fcp_copy_data(u8 op
, struct scatterlist
*data_sg
,
760 struct scatterlist
*io_sg
, u32 offset
, u32 length
)
763 u32 data_len
, io_len
, tlen
;
765 io_p
= sg_virt(io_sg
);
766 io_len
= io_sg
->length
;
769 tlen
= min_t(u32
, offset
, io_len
);
773 io_sg
= sg_next(io_sg
);
774 io_p
= sg_virt(io_sg
);
775 io_len
= io_sg
->length
;
780 data_p
= sg_virt(data_sg
);
781 data_len
= data_sg
->length
;
784 tlen
= min_t(u32
, io_len
, data_len
);
785 tlen
= min_t(u32
, tlen
, length
);
787 if (op
== NVMET_FCOP_WRITEDATA
)
788 memcpy(data_p
, io_p
, tlen
);
790 memcpy(io_p
, data_p
, tlen
);
795 if ((!io_len
) && (length
)) {
796 io_sg
= sg_next(io_sg
);
797 io_p
= sg_virt(io_sg
);
798 io_len
= io_sg
->length
;
803 if ((!data_len
) && (length
)) {
804 data_sg
= sg_next(data_sg
);
805 data_p
= sg_virt(data_sg
);
806 data_len
= data_sg
->length
;
813 fcloop_fcp_op(struct nvmet_fc_target_port
*tgtport
,
814 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
816 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
817 struct nvmefc_fcp_req
*fcpreq
;
818 u32 rsplen
= 0, xfrlen
= 0;
819 int fcp_err
= 0, active
, aborted
;
820 u8 op
= tgt_fcpreq
->op
;
823 spin_lock_irqsave(&tfcp_req
->reqlock
, flags
);
824 fcpreq
= tfcp_req
->fcpreq
;
825 active
= tfcp_req
->active
;
826 aborted
= tfcp_req
->aborted
;
827 tfcp_req
->active
= true;
828 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
830 if (unlikely(active
))
831 /* illegal - call while i/o active */
834 if (unlikely(aborted
)) {
835 /* target transport has aborted i/o prior */
836 spin_lock_irqsave(&tfcp_req
->reqlock
, flags
);
837 tfcp_req
->active
= false;
838 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
839 tgt_fcpreq
->transferred_length
= 0;
840 tgt_fcpreq
->fcp_error
= -ECANCELED
;
841 tgt_fcpreq
->done(tgt_fcpreq
);
846 * if fcpreq is NULL, the I/O has been aborted (from
847 * initiator side). For the target side, act as if all is well
848 * but don't actually move data.
852 case NVMET_FCOP_WRITEDATA
:
853 xfrlen
= tgt_fcpreq
->transfer_length
;
855 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
856 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
858 fcpreq
->transferred_length
+= xfrlen
;
862 case NVMET_FCOP_READDATA
:
863 case NVMET_FCOP_READDATA_RSP
:
864 xfrlen
= tgt_fcpreq
->transfer_length
;
866 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
867 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
869 fcpreq
->transferred_length
+= xfrlen
;
871 if (op
== NVMET_FCOP_READDATA
)
874 /* Fall-Thru to RSP handling */
879 rsplen
= ((fcpreq
->rsplen
< tgt_fcpreq
->rsplen
) ?
880 fcpreq
->rsplen
: tgt_fcpreq
->rsplen
);
881 memcpy(fcpreq
->rspaddr
, tgt_fcpreq
->rspaddr
, rsplen
);
882 if (rsplen
< tgt_fcpreq
->rsplen
)
884 fcpreq
->rcv_rsplen
= rsplen
;
887 tfcp_req
->status
= 0;
895 spin_lock_irqsave(&tfcp_req
->reqlock
, flags
);
896 tfcp_req
->active
= false;
897 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
899 tgt_fcpreq
->transferred_length
= xfrlen
;
900 tgt_fcpreq
->fcp_error
= fcp_err
;
901 tgt_fcpreq
->done(tgt_fcpreq
);
907 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
908 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
910 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
914 * mark aborted only in case there were 2 threads in transport
915 * (one doing io, other doing abort) and only kills ops posted
916 * after the abort request
918 spin_lock_irqsave(&tfcp_req
->reqlock
, flags
);
919 tfcp_req
->aborted
= true;
920 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
922 tfcp_req
->status
= NVME_SC_INTERNAL
;
925 * nothing more to do. If io wasn't active, the transport should
926 * immediately call the req_release. If it was active, the op
927 * will complete, and the lldd should call req_release.
932 fcloop_fcp_req_release(struct nvmet_fc_target_port
*tgtport
,
933 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
935 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
937 queue_work(nvmet_wq
, &tfcp_req
->tio_done_work
);
941 fcloop_h2t_ls_abort(struct nvme_fc_local_port
*localport
,
942 struct nvme_fc_remote_port
*remoteport
,
943 struct nvmefc_ls_req
*lsreq
)
948 fcloop_t2h_ls_abort(struct nvmet_fc_target_port
*targetport
,
949 void *hosthandle
, struct nvmefc_ls_req
*lsreq
)
954 fcloop_fcp_abort(struct nvme_fc_local_port
*localport
,
955 struct nvme_fc_remote_port
*remoteport
,
956 void *hw_queue_handle
,
957 struct nvmefc_fcp_req
*fcpreq
)
959 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
960 struct fcloop_fcpreq
*tfcp_req
;
964 spin_lock(&inireq
->inilock
);
965 tfcp_req
= inireq
->tfcp_req
;
967 fcloop_tfcp_req_get(tfcp_req
);
968 spin_unlock(&inireq
->inilock
);
971 /* abort has already been called */
974 /* break initiator/target relationship for io */
975 spin_lock_irqsave(&tfcp_req
->reqlock
, flags
);
976 switch (tfcp_req
->inistate
) {
979 tfcp_req
->inistate
= INI_IO_ABORTED
;
981 case INI_IO_COMPLETED
:
985 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
989 spin_unlock_irqrestore(&tfcp_req
->reqlock
, flags
);
992 /* leave the reference while the work item is scheduled */
993 WARN_ON(!queue_work(nvmet_wq
, &tfcp_req
->abort_rcv_work
));
996 * as the io has already had the done callback made,
997 * nothing more to do. So release the reference taken above
999 fcloop_tfcp_req_put(tfcp_req
);
1004 fcloop_nport_free(struct kref
*ref
)
1006 struct fcloop_nport
*nport
=
1007 container_of(ref
, struct fcloop_nport
, ref
);
1013 fcloop_nport_put(struct fcloop_nport
*nport
)
1015 kref_put(&nport
->ref
, fcloop_nport_free
);
1019 fcloop_nport_get(struct fcloop_nport
*nport
)
1021 return kref_get_unless_zero(&nport
->ref
);
1025 fcloop_localport_delete(struct nvme_fc_local_port
*localport
)
1027 struct fcloop_lport_priv
*lport_priv
= localport
->private;
1028 struct fcloop_lport
*lport
= lport_priv
->lport
;
1030 /* release any threads waiting for the unreg to complete */
1031 complete(&lport
->unreg_done
);
1035 fcloop_remoteport_delete(struct nvme_fc_remote_port
*remoteport
)
1037 struct fcloop_rport
*rport
= remoteport
->private;
1039 flush_work(&rport
->ls_work
);
1040 fcloop_nport_put(rport
->nport
);
1044 fcloop_targetport_delete(struct nvmet_fc_target_port
*targetport
)
1046 struct fcloop_tport
*tport
= targetport
->private;
1048 flush_work(&tport
->ls_work
);
1049 fcloop_nport_put(tport
->nport
);
1052 #define FCLOOP_HW_QUEUES 4
1053 #define FCLOOP_SGL_SEGS 256
1054 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
1056 static struct nvme_fc_port_template fctemplate
= {
1057 .localport_delete
= fcloop_localport_delete
,
1058 .remoteport_delete
= fcloop_remoteport_delete
,
1059 .create_queue
= fcloop_create_queue
,
1060 .delete_queue
= fcloop_delete_queue
,
1061 .ls_req
= fcloop_h2t_ls_req
,
1062 .fcp_io
= fcloop_fcp_req
,
1063 .ls_abort
= fcloop_h2t_ls_abort
,
1064 .fcp_abort
= fcloop_fcp_abort
,
1065 .xmt_ls_rsp
= fcloop_t2h_xmt_ls_rsp
,
1066 .max_hw_queues
= FCLOOP_HW_QUEUES
,
1067 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
1068 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
1069 .dma_boundary
= FCLOOP_DMABOUND_4G
,
1070 /* sizes of additional private data for data structures */
1071 .local_priv_sz
= sizeof(struct fcloop_lport_priv
),
1072 .remote_priv_sz
= sizeof(struct fcloop_rport
),
1073 .lsrqst_priv_sz
= sizeof(struct fcloop_lsreq
),
1074 .fcprqst_priv_sz
= sizeof(struct fcloop_ini_fcpreq
),
1077 static struct nvmet_fc_target_template tgttemplate
= {
1078 .targetport_delete
= fcloop_targetport_delete
,
1079 .xmt_ls_rsp
= fcloop_h2t_xmt_ls_rsp
,
1080 .fcp_op
= fcloop_fcp_op
,
1081 .fcp_abort
= fcloop_tgt_fcp_abort
,
1082 .fcp_req_release
= fcloop_fcp_req_release
,
1083 .discovery_event
= fcloop_tgt_discovery_evt
,
1084 .ls_req
= fcloop_t2h_ls_req
,
1085 .ls_abort
= fcloop_t2h_ls_abort
,
1086 .host_release
= fcloop_t2h_host_release
,
1087 .host_traddr
= fcloop_t2h_host_traddr
,
1088 .max_hw_queues
= FCLOOP_HW_QUEUES
,
1089 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
1090 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
1091 .dma_boundary
= FCLOOP_DMABOUND_4G
,
1092 /* optional features */
1093 .target_features
= 0,
1094 /* sizes of additional private data for data structures */
1095 .target_priv_sz
= sizeof(struct fcloop_tport
),
1096 .lsrqst_priv_sz
= sizeof(struct fcloop_lsreq
),
1100 fcloop_create_local_port(struct device
*dev
, struct device_attribute
*attr
,
1101 const char *buf
, size_t count
)
1103 struct nvme_fc_port_info pinfo
;
1104 struct fcloop_ctrl_options
*opts
;
1105 struct nvme_fc_local_port
*localport
;
1106 struct fcloop_lport
*lport
;
1107 struct fcloop_lport_priv
*lport_priv
;
1108 unsigned long flags
;
1111 lport
= kzalloc(sizeof(*lport
), GFP_KERNEL
);
1115 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
1117 goto out_free_lport
;
1119 ret
= fcloop_parse_options(opts
, buf
);
1123 /* everything there ? */
1124 if ((opts
->mask
& LPORT_OPTS
) != LPORT_OPTS
) {
1129 memset(&pinfo
, 0, sizeof(pinfo
));
1130 pinfo
.node_name
= opts
->wwnn
;
1131 pinfo
.port_name
= opts
->wwpn
;
1132 pinfo
.port_role
= opts
->roles
;
1133 pinfo
.port_id
= opts
->fcaddr
;
1135 ret
= nvme_fc_register_localport(&pinfo
, &fctemplate
, NULL
, &localport
);
1138 lport_priv
= localport
->private;
1139 lport_priv
->lport
= lport
;
1141 lport
->localport
= localport
;
1142 INIT_LIST_HEAD(&lport
->lport_list
);
1144 spin_lock_irqsave(&fcloop_lock
, flags
);
1145 list_add_tail(&lport
->lport_list
, &fcloop_lports
);
1146 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1152 /* free only if we're going to fail */
1156 return ret
? ret
: count
;
1161 __unlink_local_port(struct fcloop_lport
*lport
)
1163 list_del(&lport
->lport_list
);
1167 __wait_localport_unreg(struct fcloop_lport
*lport
)
1171 init_completion(&lport
->unreg_done
);
1173 ret
= nvme_fc_unregister_localport(lport
->localport
);
1176 wait_for_completion(&lport
->unreg_done
);
1185 fcloop_delete_local_port(struct device
*dev
, struct device_attribute
*attr
,
1186 const char *buf
, size_t count
)
1188 struct fcloop_lport
*tlport
, *lport
= NULL
;
1189 u64 nodename
, portname
;
1190 unsigned long flags
;
1193 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1197 spin_lock_irqsave(&fcloop_lock
, flags
);
1199 list_for_each_entry(tlport
, &fcloop_lports
, lport_list
) {
1200 if (tlport
->localport
->node_name
== nodename
&&
1201 tlport
->localport
->port_name
== portname
) {
1203 __unlink_local_port(lport
);
1207 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1212 ret
= __wait_localport_unreg(lport
);
1214 return ret
? ret
: count
;
1217 static struct fcloop_nport
*
1218 fcloop_alloc_nport(const char *buf
, size_t count
, bool remoteport
)
1220 struct fcloop_nport
*newnport
, *nport
= NULL
;
1221 struct fcloop_lport
*tmplport
, *lport
= NULL
;
1222 struct fcloop_ctrl_options
*opts
;
1223 unsigned long flags
;
1224 u32 opts_mask
= (remoteport
) ? RPORT_OPTS
: TGTPORT_OPTS
;
1227 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
1231 ret
= fcloop_parse_options(opts
, buf
);
1235 /* everything there ? */
1236 if ((opts
->mask
& opts_mask
) != opts_mask
) {
1241 newnport
= kzalloc(sizeof(*newnport
), GFP_KERNEL
);
1245 INIT_LIST_HEAD(&newnport
->nport_list
);
1246 newnport
->node_name
= opts
->wwnn
;
1247 newnport
->port_name
= opts
->wwpn
;
1248 if (opts
->mask
& NVMF_OPT_ROLES
)
1249 newnport
->port_role
= opts
->roles
;
1250 if (opts
->mask
& NVMF_OPT_FCADDR
)
1251 newnport
->port_id
= opts
->fcaddr
;
1252 kref_init(&newnport
->ref
);
1254 spin_lock_irqsave(&fcloop_lock
, flags
);
1256 list_for_each_entry(tmplport
, &fcloop_lports
, lport_list
) {
1257 if (tmplport
->localport
->node_name
== opts
->wwnn
&&
1258 tmplport
->localport
->port_name
== opts
->wwpn
)
1259 goto out_invalid_opts
;
1261 if (tmplport
->localport
->node_name
== opts
->lpwwnn
&&
1262 tmplport
->localport
->port_name
== opts
->lpwwpn
)
1268 goto out_invalid_opts
;
1269 newnport
->lport
= lport
;
1272 list_for_each_entry(nport
, &fcloop_nports
, nport_list
) {
1273 if (nport
->node_name
== opts
->wwnn
&&
1274 nport
->port_name
== opts
->wwpn
) {
1275 if ((remoteport
&& nport
->rport
) ||
1276 (!remoteport
&& nport
->tport
)) {
1278 goto out_invalid_opts
;
1281 fcloop_nport_get(nport
);
1283 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1286 nport
->lport
= lport
;
1287 if (opts
->mask
& NVMF_OPT_ROLES
)
1288 nport
->port_role
= opts
->roles
;
1289 if (opts
->mask
& NVMF_OPT_FCADDR
)
1290 nport
->port_id
= opts
->fcaddr
;
1291 goto out_free_newnport
;
1295 list_add_tail(&newnport
->nport_list
, &fcloop_nports
);
1297 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1303 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1312 fcloop_create_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1313 const char *buf
, size_t count
)
1315 struct nvme_fc_remote_port
*remoteport
;
1316 struct fcloop_nport
*nport
;
1317 struct fcloop_rport
*rport
;
1318 struct nvme_fc_port_info pinfo
;
1321 nport
= fcloop_alloc_nport(buf
, count
, true);
1325 memset(&pinfo
, 0, sizeof(pinfo
));
1326 pinfo
.node_name
= nport
->node_name
;
1327 pinfo
.port_name
= nport
->port_name
;
1328 pinfo
.port_role
= nport
->port_role
;
1329 pinfo
.port_id
= nport
->port_id
;
1331 ret
= nvme_fc_register_remoteport(nport
->lport
->localport
,
1332 &pinfo
, &remoteport
);
1333 if (ret
|| !remoteport
) {
1334 fcloop_nport_put(nport
);
1339 rport
= remoteport
->private;
1340 rport
->remoteport
= remoteport
;
1341 rport
->targetport
= (nport
->tport
) ? nport
->tport
->targetport
: NULL
;
1343 nport
->tport
->remoteport
= remoteport
;
1344 nport
->tport
->lport
= nport
->lport
;
1346 rport
->nport
= nport
;
1347 rport
->lport
= nport
->lport
;
1348 nport
->rport
= rport
;
1349 spin_lock_init(&rport
->lock
);
1350 INIT_WORK(&rport
->ls_work
, fcloop_rport_lsrqst_work
);
1351 INIT_LIST_HEAD(&rport
->ls_list
);
1357 static struct fcloop_rport
*
1358 __unlink_remote_port(struct fcloop_nport
*nport
)
1360 struct fcloop_rport
*rport
= nport
->rport
;
1362 if (rport
&& nport
->tport
)
1363 nport
->tport
->remoteport
= NULL
;
1364 nport
->rport
= NULL
;
1366 list_del(&nport
->nport_list
);
1372 __remoteport_unreg(struct fcloop_nport
*nport
, struct fcloop_rport
*rport
)
1377 return nvme_fc_unregister_remoteport(rport
->remoteport
);
1381 fcloop_delete_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1382 const char *buf
, size_t count
)
1384 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1385 static struct fcloop_rport
*rport
;
1386 u64 nodename
, portname
;
1387 unsigned long flags
;
1390 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1394 spin_lock_irqsave(&fcloop_lock
, flags
);
1396 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1397 if (tmpport
->node_name
== nodename
&&
1398 tmpport
->port_name
== portname
&& tmpport
->rport
) {
1400 rport
= __unlink_remote_port(nport
);
1405 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1410 ret
= __remoteport_unreg(nport
, rport
);
1412 return ret
? ret
: count
;
1416 fcloop_create_target_port(struct device
*dev
, struct device_attribute
*attr
,
1417 const char *buf
, size_t count
)
1419 struct nvmet_fc_target_port
*targetport
;
1420 struct fcloop_nport
*nport
;
1421 struct fcloop_tport
*tport
;
1422 struct nvmet_fc_port_info tinfo
;
1425 nport
= fcloop_alloc_nport(buf
, count
, false);
1429 tinfo
.node_name
= nport
->node_name
;
1430 tinfo
.port_name
= nport
->port_name
;
1431 tinfo
.port_id
= nport
->port_id
;
1433 ret
= nvmet_fc_register_targetport(&tinfo
, &tgttemplate
, NULL
,
1436 fcloop_nport_put(nport
);
1441 tport
= targetport
->private;
1442 tport
->targetport
= targetport
;
1443 tport
->remoteport
= (nport
->rport
) ? nport
->rport
->remoteport
: NULL
;
1445 nport
->rport
->targetport
= targetport
;
1446 tport
->nport
= nport
;
1447 tport
->lport
= nport
->lport
;
1448 nport
->tport
= tport
;
1449 spin_lock_init(&tport
->lock
);
1450 INIT_WORK(&tport
->ls_work
, fcloop_tport_lsrqst_work
);
1451 INIT_LIST_HEAD(&tport
->ls_list
);
1457 static struct fcloop_tport
*
1458 __unlink_target_port(struct fcloop_nport
*nport
)
1460 struct fcloop_tport
*tport
= nport
->tport
;
1462 if (tport
&& nport
->rport
)
1463 nport
->rport
->targetport
= NULL
;
1464 nport
->tport
= NULL
;
1470 __targetport_unreg(struct fcloop_nport
*nport
, struct fcloop_tport
*tport
)
1475 return nvmet_fc_unregister_targetport(tport
->targetport
);
1479 fcloop_delete_target_port(struct device
*dev
, struct device_attribute
*attr
,
1480 const char *buf
, size_t count
)
1482 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1483 struct fcloop_tport
*tport
= NULL
;
1484 u64 nodename
, portname
;
1485 unsigned long flags
;
1488 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1492 spin_lock_irqsave(&fcloop_lock
, flags
);
1494 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1495 if (tmpport
->node_name
== nodename
&&
1496 tmpport
->port_name
== portname
&& tmpport
->tport
) {
1498 tport
= __unlink_target_port(nport
);
1503 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1508 ret
= __targetport_unreg(nport
, tport
);
1510 return ret
? ret
: count
;
1514 fcloop_set_cmd_drop(struct device
*dev
, struct device_attribute
*attr
,
1515 const char *buf
, size_t count
)
1517 unsigned int opcode
;
1518 int starting
, amount
;
1520 if (sscanf(buf
, "%x:%d:%d", &opcode
, &starting
, &amount
) != 3)
1523 drop_current_cnt
= 0;
1524 drop_fabric_opcode
= (opcode
& ~DROP_OPCODE_MASK
) ? true : false;
1525 drop_opcode
= (opcode
& DROP_OPCODE_MASK
);
1526 drop_instance
= starting
;
1527 /* the check to drop routine uses instance + count to know when
1528 * to end. Thus, if dropping 1 instance, count should be 0.
1529 * so subtract 1 from the count.
1531 drop_amount
= amount
- 1;
1533 pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
1535 __func__
, drop_instance
, drop_fabric_opcode
? " fabric" : "",
1536 drop_opcode
, drop_amount
);
1542 static DEVICE_ATTR(add_local_port
, 0200, NULL
, fcloop_create_local_port
);
1543 static DEVICE_ATTR(del_local_port
, 0200, NULL
, fcloop_delete_local_port
);
1544 static DEVICE_ATTR(add_remote_port
, 0200, NULL
, fcloop_create_remote_port
);
1545 static DEVICE_ATTR(del_remote_port
, 0200, NULL
, fcloop_delete_remote_port
);
1546 static DEVICE_ATTR(add_target_port
, 0200, NULL
, fcloop_create_target_port
);
1547 static DEVICE_ATTR(del_target_port
, 0200, NULL
, fcloop_delete_target_port
);
1548 static DEVICE_ATTR(set_cmd_drop
, 0200, NULL
, fcloop_set_cmd_drop
);
1550 static struct attribute
*fcloop_dev_attrs
[] = {
1551 &dev_attr_add_local_port
.attr
,
1552 &dev_attr_del_local_port
.attr
,
1553 &dev_attr_add_remote_port
.attr
,
1554 &dev_attr_del_remote_port
.attr
,
1555 &dev_attr_add_target_port
.attr
,
1556 &dev_attr_del_target_port
.attr
,
1557 &dev_attr_set_cmd_drop
.attr
,
1561 static const struct attribute_group fclopp_dev_attrs_group
= {
1562 .attrs
= fcloop_dev_attrs
,
1565 static const struct attribute_group
*fcloop_dev_attr_groups
[] = {
1566 &fclopp_dev_attrs_group
,
1570 static const struct class fcloop_class
= {
1573 static struct device
*fcloop_device
;
1576 static int __init
fcloop_init(void)
1580 ret
= class_register(&fcloop_class
);
1582 pr_err("couldn't register class fcloop\n");
1586 fcloop_device
= device_create_with_groups(
1587 &fcloop_class
, NULL
, MKDEV(0, 0), NULL
,
1588 fcloop_dev_attr_groups
, "ctl");
1589 if (IS_ERR(fcloop_device
)) {
1590 pr_err("couldn't create ctl device!\n");
1591 ret
= PTR_ERR(fcloop_device
);
1592 goto out_destroy_class
;
1595 get_device(fcloop_device
);
1600 class_unregister(&fcloop_class
);
1604 static void __exit
fcloop_exit(void)
1606 struct fcloop_lport
*lport
= NULL
;
1607 struct fcloop_nport
*nport
= NULL
;
1608 struct fcloop_tport
*tport
;
1609 struct fcloop_rport
*rport
;
1610 unsigned long flags
;
1613 spin_lock_irqsave(&fcloop_lock
, flags
);
1616 nport
= list_first_entry_or_null(&fcloop_nports
,
1617 typeof(*nport
), nport_list
);
1621 tport
= __unlink_target_port(nport
);
1622 rport
= __unlink_remote_port(nport
);
1624 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1626 ret
= __targetport_unreg(nport
, tport
);
1628 pr_warn("%s: Failed deleting target port\n", __func__
);
1630 ret
= __remoteport_unreg(nport
, rport
);
1632 pr_warn("%s: Failed deleting remote port\n", __func__
);
1634 spin_lock_irqsave(&fcloop_lock
, flags
);
1638 lport
= list_first_entry_or_null(&fcloop_lports
,
1639 typeof(*lport
), lport_list
);
1643 __unlink_local_port(lport
);
1645 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1647 ret
= __wait_localport_unreg(lport
);
1649 pr_warn("%s: Failed deleting local port\n", __func__
);
1651 spin_lock_irqsave(&fcloop_lock
, flags
);
1654 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1656 put_device(fcloop_device
);
1658 device_destroy(&fcloop_class
, MKDEV(0, 0));
1659 class_unregister(&fcloop_class
);
1662 module_init(fcloop_init
);
1663 module_exit(fcloop_exit
);
1665 MODULE_DESCRIPTION("NVMe target FC loop transport driver");
1666 MODULE_LICENSE("GPL v2");