1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
18 NVMF_OPT_WWNN
= 1 << 0,
19 NVMF_OPT_WWPN
= 1 << 1,
20 NVMF_OPT_ROLES
= 1 << 2,
21 NVMF_OPT_FCADDR
= 1 << 3,
22 NVMF_OPT_LPWWNN
= 1 << 4,
23 NVMF_OPT_LPWWPN
= 1 << 5,
26 struct fcloop_ctrl_options
{
36 static const match_table_t opt_tokens
= {
37 { NVMF_OPT_WWNN
, "wwnn=%s" },
38 { NVMF_OPT_WWPN
, "wwpn=%s" },
39 { NVMF_OPT_ROLES
, "roles=%d" },
40 { NVMF_OPT_FCADDR
, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN
, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN
, "lpwwpn=%s" },
43 { NVMF_OPT_ERR
, NULL
}
47 fcloop_parse_options(struct fcloop_ctrl_options
*opts
,
50 substring_t args
[MAX_OPT_ARGS
];
51 char *options
, *o
, *p
;
55 options
= o
= kstrdup(buf
, GFP_KERNEL
);
59 while ((p
= strsep(&o
, ",\n")) != NULL
) {
63 token
= match_token(p
, opt_tokens
, args
);
67 if (match_u64(args
, &token64
)) {
69 goto out_free_options
;
74 if (match_u64(args
, &token64
)) {
76 goto out_free_options
;
81 if (match_int(args
, &token
)) {
83 goto out_free_options
;
88 if (match_hex(args
, &token
)) {
90 goto out_free_options
;
95 if (match_u64(args
, &token64
)) {
97 goto out_free_options
;
99 opts
->lpwwnn
= token64
;
101 case NVMF_OPT_LPWWPN
:
102 if (match_u64(args
, &token64
)) {
104 goto out_free_options
;
106 opts
->lpwwpn
= token64
;
109 pr_warn("unknown parameter or missing value '%s'\n", p
);
111 goto out_free_options
;
122 fcloop_parse_nm_options(struct device
*dev
, u64
*nname
, u64
*pname
,
125 substring_t args
[MAX_OPT_ARGS
];
126 char *options
, *o
, *p
;
133 options
= o
= kstrdup(buf
, GFP_KERNEL
);
137 while ((p
= strsep(&o
, ",\n")) != NULL
) {
141 token
= match_token(p
, opt_tokens
, args
);
144 if (match_u64(args
, &token64
)) {
146 goto out_free_options
;
151 if (match_u64(args
, &token64
)) {
153 goto out_free_options
;
158 pr_warn("unknown parameter or missing value '%s'\n", p
);
160 goto out_free_options
;
178 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
180 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
181 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
183 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
186 static DEFINE_SPINLOCK(fcloop_lock
);
187 static LIST_HEAD(fcloop_lports
);
188 static LIST_HEAD(fcloop_nports
);
190 struct fcloop_lport
{
191 struct nvme_fc_local_port
*localport
;
192 struct list_head lport_list
;
193 struct completion unreg_done
;
196 struct fcloop_lport_priv
{
197 struct fcloop_lport
*lport
;
200 struct fcloop_rport
{
201 struct nvme_fc_remote_port
*remoteport
;
202 struct nvmet_fc_target_port
*targetport
;
203 struct fcloop_nport
*nport
;
204 struct fcloop_lport
*lport
;
206 struct list_head ls_list
;
207 struct work_struct ls_work
;
210 struct fcloop_tport
{
211 struct nvmet_fc_target_port
*targetport
;
212 struct nvme_fc_remote_port
*remoteport
;
213 struct fcloop_nport
*nport
;
214 struct fcloop_lport
*lport
;
217 struct fcloop_nport
{
218 struct fcloop_rport
*rport
;
219 struct fcloop_tport
*tport
;
220 struct fcloop_lport
*lport
;
221 struct list_head nport_list
;
229 struct fcloop_lsreq
{
230 struct nvmefc_ls_req
*lsreq
;
231 struct nvmefc_tgt_ls_req tgt_ls_req
;
233 struct list_head ls_list
; /* fcloop_rport->ls_list */
237 struct fcloop_tport
*tport
;
238 struct work_struct work
;
245 INI_IO_COMPLETED
= 3,
248 struct fcloop_fcpreq
{
249 struct fcloop_tport
*tport
;
250 struct nvmefc_fcp_req
*fcpreq
;
257 struct work_struct fcp_rcv_work
;
258 struct work_struct abort_rcv_work
;
259 struct work_struct tio_done_work
;
260 struct nvmefc_tgt_fcp_req tgt_fcp_req
;
263 struct fcloop_ini_fcpreq
{
264 struct nvmefc_fcp_req
*fcpreq
;
265 struct fcloop_fcpreq
*tfcp_req
;
269 static inline struct fcloop_lsreq
*
270 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req
*tgt_lsreq
)
272 return container_of(tgt_lsreq
, struct fcloop_lsreq
, tgt_ls_req
);
275 static inline struct fcloop_fcpreq
*
276 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
278 return container_of(tgt_fcpreq
, struct fcloop_fcpreq
, tgt_fcp_req
);
283 fcloop_create_queue(struct nvme_fc_local_port
*localport
,
284 unsigned int qidx
, u16 qsize
,
292 fcloop_delete_queue(struct nvme_fc_local_port
*localport
,
293 unsigned int idx
, void *handle
)
298 fcloop_rport_lsrqst_work(struct work_struct
*work
)
300 struct fcloop_rport
*rport
=
301 container_of(work
, struct fcloop_rport
, ls_work
);
302 struct fcloop_lsreq
*tls_req
;
304 spin_lock(&rport
->lock
);
306 tls_req
= list_first_entry_or_null(&rport
->ls_list
,
307 struct fcloop_lsreq
, ls_list
);
311 list_del(&tls_req
->ls_list
);
312 spin_unlock(&rport
->lock
);
314 tls_req
->lsreq
->done(tls_req
->lsreq
, tls_req
->status
);
316 * callee may free memory containing tls_req.
317 * do not reference lsreq after this.
320 spin_lock(&rport
->lock
);
322 spin_unlock(&rport
->lock
);
326 fcloop_ls_req(struct nvme_fc_local_port
*localport
,
327 struct nvme_fc_remote_port
*remoteport
,
328 struct nvmefc_ls_req
*lsreq
)
330 struct fcloop_lsreq
*tls_req
= lsreq
->private;
331 struct fcloop_rport
*rport
= remoteport
->private;
334 tls_req
->lsreq
= lsreq
;
335 INIT_LIST_HEAD(&tls_req
->ls_list
);
337 if (!rport
->targetport
) {
338 tls_req
->status
= -ECONNREFUSED
;
339 spin_lock(&rport
->lock
);
340 list_add_tail(&rport
->ls_list
, &tls_req
->ls_list
);
341 spin_unlock(&rport
->lock
);
342 schedule_work(&rport
->ls_work
);
347 ret
= nvmet_fc_rcv_ls_req(rport
->targetport
, &tls_req
->tgt_ls_req
,
348 lsreq
->rqstaddr
, lsreq
->rqstlen
);
354 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port
*targetport
,
355 struct nvmefc_tgt_ls_req
*tgt_lsreq
)
357 struct fcloop_lsreq
*tls_req
= tgt_ls_req_to_lsreq(tgt_lsreq
);
358 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
359 struct fcloop_tport
*tport
= targetport
->private;
360 struct nvme_fc_remote_port
*remoteport
= tport
->remoteport
;
361 struct fcloop_rport
*rport
;
363 memcpy(lsreq
->rspaddr
, tgt_lsreq
->rspbuf
,
364 ((lsreq
->rsplen
< tgt_lsreq
->rsplen
) ?
365 lsreq
->rsplen
: tgt_lsreq
->rsplen
));
367 tgt_lsreq
->done(tgt_lsreq
);
370 rport
= remoteport
->private;
371 spin_lock(&rport
->lock
);
372 list_add_tail(&rport
->ls_list
, &tls_req
->ls_list
);
373 spin_unlock(&rport
->lock
);
374 schedule_work(&rport
->ls_work
);
381 * Simulate reception of RSCN and converting it to a initiator transport
382 * call to rescan a remote port.
385 fcloop_tgt_rscn_work(struct work_struct
*work
)
387 struct fcloop_rscn
*tgt_rscn
=
388 container_of(work
, struct fcloop_rscn
, work
);
389 struct fcloop_tport
*tport
= tgt_rscn
->tport
;
391 if (tport
->remoteport
)
392 nvme_fc_rescan_remoteport(tport
->remoteport
);
397 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port
*tgtport
)
399 struct fcloop_rscn
*tgt_rscn
;
401 tgt_rscn
= kzalloc(sizeof(*tgt_rscn
), GFP_KERNEL
);
405 tgt_rscn
->tport
= tgtport
->private;
406 INIT_WORK(&tgt_rscn
->work
, fcloop_tgt_rscn_work
);
408 schedule_work(&tgt_rscn
->work
);
412 fcloop_tfcp_req_free(struct kref
*ref
)
414 struct fcloop_fcpreq
*tfcp_req
=
415 container_of(ref
, struct fcloop_fcpreq
, ref
);
421 fcloop_tfcp_req_put(struct fcloop_fcpreq
*tfcp_req
)
423 kref_put(&tfcp_req
->ref
, fcloop_tfcp_req_free
);
427 fcloop_tfcp_req_get(struct fcloop_fcpreq
*tfcp_req
)
429 return kref_get_unless_zero(&tfcp_req
->ref
);
433 fcloop_call_host_done(struct nvmefc_fcp_req
*fcpreq
,
434 struct fcloop_fcpreq
*tfcp_req
, int status
)
436 struct fcloop_ini_fcpreq
*inireq
= NULL
;
439 inireq
= fcpreq
->private;
440 spin_lock(&inireq
->inilock
);
441 inireq
->tfcp_req
= NULL
;
442 spin_unlock(&inireq
->inilock
);
444 fcpreq
->status
= status
;
445 fcpreq
->done(fcpreq
);
448 /* release original io reference on tgt struct */
449 fcloop_tfcp_req_put(tfcp_req
);
453 fcloop_fcp_recv_work(struct work_struct
*work
)
455 struct fcloop_fcpreq
*tfcp_req
=
456 container_of(work
, struct fcloop_fcpreq
, fcp_rcv_work
);
457 struct nvmefc_fcp_req
*fcpreq
= tfcp_req
->fcpreq
;
459 bool aborted
= false;
461 spin_lock_irq(&tfcp_req
->reqlock
);
462 switch (tfcp_req
->inistate
) {
464 tfcp_req
->inistate
= INI_IO_ACTIVE
;
470 spin_unlock_irq(&tfcp_req
->reqlock
);
474 spin_unlock_irq(&tfcp_req
->reqlock
);
476 if (unlikely(aborted
))
479 ret
= nvmet_fc_rcv_fcp_req(tfcp_req
->tport
->targetport
,
480 &tfcp_req
->tgt_fcp_req
,
481 fcpreq
->cmdaddr
, fcpreq
->cmdlen
);
483 fcloop_call_host_done(fcpreq
, tfcp_req
, ret
);
489 fcloop_fcp_abort_recv_work(struct work_struct
*work
)
491 struct fcloop_fcpreq
*tfcp_req
=
492 container_of(work
, struct fcloop_fcpreq
, abort_rcv_work
);
493 struct nvmefc_fcp_req
*fcpreq
;
494 bool completed
= false;
496 spin_lock_irq(&tfcp_req
->reqlock
);
497 fcpreq
= tfcp_req
->fcpreq
;
498 switch (tfcp_req
->inistate
) {
501 case INI_IO_COMPLETED
:
505 spin_unlock_irq(&tfcp_req
->reqlock
);
509 spin_unlock_irq(&tfcp_req
->reqlock
);
511 if (unlikely(completed
)) {
512 /* remove reference taken in original abort downcall */
513 fcloop_tfcp_req_put(tfcp_req
);
517 if (tfcp_req
->tport
->targetport
)
518 nvmet_fc_rcv_fcp_abort(tfcp_req
->tport
->targetport
,
519 &tfcp_req
->tgt_fcp_req
);
521 spin_lock_irq(&tfcp_req
->reqlock
);
522 tfcp_req
->fcpreq
= NULL
;
523 spin_unlock_irq(&tfcp_req
->reqlock
);
525 fcloop_call_host_done(fcpreq
, tfcp_req
, -ECANCELED
);
526 /* call_host_done releases reference for abort downcall */
530 * FCP IO operation done by target completion.
531 * call back up initiator "done" flows.
534 fcloop_tgt_fcprqst_done_work(struct work_struct
*work
)
536 struct fcloop_fcpreq
*tfcp_req
=
537 container_of(work
, struct fcloop_fcpreq
, tio_done_work
);
538 struct nvmefc_fcp_req
*fcpreq
;
540 spin_lock_irq(&tfcp_req
->reqlock
);
541 fcpreq
= tfcp_req
->fcpreq
;
542 tfcp_req
->inistate
= INI_IO_COMPLETED
;
543 spin_unlock_irq(&tfcp_req
->reqlock
);
545 fcloop_call_host_done(fcpreq
, tfcp_req
, tfcp_req
->status
);
550 fcloop_fcp_req(struct nvme_fc_local_port
*localport
,
551 struct nvme_fc_remote_port
*remoteport
,
552 void *hw_queue_handle
,
553 struct nvmefc_fcp_req
*fcpreq
)
555 struct fcloop_rport
*rport
= remoteport
->private;
556 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
557 struct fcloop_fcpreq
*tfcp_req
;
559 if (!rport
->targetport
)
560 return -ECONNREFUSED
;
562 tfcp_req
= kzalloc(sizeof(*tfcp_req
), GFP_ATOMIC
);
566 inireq
->fcpreq
= fcpreq
;
567 inireq
->tfcp_req
= tfcp_req
;
568 spin_lock_init(&inireq
->inilock
);
570 tfcp_req
->fcpreq
= fcpreq
;
571 tfcp_req
->tport
= rport
->targetport
->private;
572 tfcp_req
->inistate
= INI_IO_START
;
573 spin_lock_init(&tfcp_req
->reqlock
);
574 INIT_WORK(&tfcp_req
->fcp_rcv_work
, fcloop_fcp_recv_work
);
575 INIT_WORK(&tfcp_req
->abort_rcv_work
, fcloop_fcp_abort_recv_work
);
576 INIT_WORK(&tfcp_req
->tio_done_work
, fcloop_tgt_fcprqst_done_work
);
577 kref_init(&tfcp_req
->ref
);
579 schedule_work(&tfcp_req
->fcp_rcv_work
);
585 fcloop_fcp_copy_data(u8 op
, struct scatterlist
*data_sg
,
586 struct scatterlist
*io_sg
, u32 offset
, u32 length
)
589 u32 data_len
, io_len
, tlen
;
591 io_p
= sg_virt(io_sg
);
592 io_len
= io_sg
->length
;
595 tlen
= min_t(u32
, offset
, io_len
);
599 io_sg
= sg_next(io_sg
);
600 io_p
= sg_virt(io_sg
);
601 io_len
= io_sg
->length
;
606 data_p
= sg_virt(data_sg
);
607 data_len
= data_sg
->length
;
610 tlen
= min_t(u32
, io_len
, data_len
);
611 tlen
= min_t(u32
, tlen
, length
);
613 if (op
== NVMET_FCOP_WRITEDATA
)
614 memcpy(data_p
, io_p
, tlen
);
616 memcpy(io_p
, data_p
, tlen
);
621 if ((!io_len
) && (length
)) {
622 io_sg
= sg_next(io_sg
);
623 io_p
= sg_virt(io_sg
);
624 io_len
= io_sg
->length
;
629 if ((!data_len
) && (length
)) {
630 data_sg
= sg_next(data_sg
);
631 data_p
= sg_virt(data_sg
);
632 data_len
= data_sg
->length
;
639 fcloop_fcp_op(struct nvmet_fc_target_port
*tgtport
,
640 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
642 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
643 struct nvmefc_fcp_req
*fcpreq
;
644 u32 rsplen
= 0, xfrlen
= 0;
645 int fcp_err
= 0, active
, aborted
;
646 u8 op
= tgt_fcpreq
->op
;
648 spin_lock_irq(&tfcp_req
->reqlock
);
649 fcpreq
= tfcp_req
->fcpreq
;
650 active
= tfcp_req
->active
;
651 aborted
= tfcp_req
->aborted
;
652 tfcp_req
->active
= true;
653 spin_unlock_irq(&tfcp_req
->reqlock
);
655 if (unlikely(active
))
656 /* illegal - call while i/o active */
659 if (unlikely(aborted
)) {
660 /* target transport has aborted i/o prior */
661 spin_lock_irq(&tfcp_req
->reqlock
);
662 tfcp_req
->active
= false;
663 spin_unlock_irq(&tfcp_req
->reqlock
);
664 tgt_fcpreq
->transferred_length
= 0;
665 tgt_fcpreq
->fcp_error
= -ECANCELED
;
666 tgt_fcpreq
->done(tgt_fcpreq
);
671 * if fcpreq is NULL, the I/O has been aborted (from
672 * initiator side). For the target side, act as if all is well
673 * but don't actually move data.
677 case NVMET_FCOP_WRITEDATA
:
678 xfrlen
= tgt_fcpreq
->transfer_length
;
680 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
681 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
683 fcpreq
->transferred_length
+= xfrlen
;
687 case NVMET_FCOP_READDATA
:
688 case NVMET_FCOP_READDATA_RSP
:
689 xfrlen
= tgt_fcpreq
->transfer_length
;
691 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
692 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
694 fcpreq
->transferred_length
+= xfrlen
;
696 if (op
== NVMET_FCOP_READDATA
)
699 /* Fall-Thru to RSP handling */
704 rsplen
= ((fcpreq
->rsplen
< tgt_fcpreq
->rsplen
) ?
705 fcpreq
->rsplen
: tgt_fcpreq
->rsplen
);
706 memcpy(fcpreq
->rspaddr
, tgt_fcpreq
->rspaddr
, rsplen
);
707 if (rsplen
< tgt_fcpreq
->rsplen
)
709 fcpreq
->rcv_rsplen
= rsplen
;
712 tfcp_req
->status
= 0;
720 spin_lock_irq(&tfcp_req
->reqlock
);
721 tfcp_req
->active
= false;
722 spin_unlock_irq(&tfcp_req
->reqlock
);
724 tgt_fcpreq
->transferred_length
= xfrlen
;
725 tgt_fcpreq
->fcp_error
= fcp_err
;
726 tgt_fcpreq
->done(tgt_fcpreq
);
732 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
733 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
735 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
738 * mark aborted only in case there were 2 threads in transport
739 * (one doing io, other doing abort) and only kills ops posted
740 * after the abort request
742 spin_lock_irq(&tfcp_req
->reqlock
);
743 tfcp_req
->aborted
= true;
744 spin_unlock_irq(&tfcp_req
->reqlock
);
746 tfcp_req
->status
= NVME_SC_INTERNAL
;
749 * nothing more to do. If io wasn't active, the transport should
750 * immediately call the req_release. If it was active, the op
751 * will complete, and the lldd should call req_release.
756 fcloop_fcp_req_release(struct nvmet_fc_target_port
*tgtport
,
757 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
759 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
761 schedule_work(&tfcp_req
->tio_done_work
);
765 fcloop_ls_abort(struct nvme_fc_local_port
*localport
,
766 struct nvme_fc_remote_port
*remoteport
,
767 struct nvmefc_ls_req
*lsreq
)
772 fcloop_fcp_abort(struct nvme_fc_local_port
*localport
,
773 struct nvme_fc_remote_port
*remoteport
,
774 void *hw_queue_handle
,
775 struct nvmefc_fcp_req
*fcpreq
)
777 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
778 struct fcloop_fcpreq
*tfcp_req
;
781 spin_lock(&inireq
->inilock
);
782 tfcp_req
= inireq
->tfcp_req
;
784 fcloop_tfcp_req_get(tfcp_req
);
785 spin_unlock(&inireq
->inilock
);
788 /* abort has already been called */
791 /* break initiator/target relationship for io */
792 spin_lock_irq(&tfcp_req
->reqlock
);
793 switch (tfcp_req
->inistate
) {
796 tfcp_req
->inistate
= INI_IO_ABORTED
;
798 case INI_IO_COMPLETED
:
802 spin_unlock_irq(&tfcp_req
->reqlock
);
806 spin_unlock_irq(&tfcp_req
->reqlock
);
809 /* leave the reference while the work item is scheduled */
810 WARN_ON(!schedule_work(&tfcp_req
->abort_rcv_work
));
813 * as the io has already had the done callback made,
814 * nothing more to do. So release the reference taken above
816 fcloop_tfcp_req_put(tfcp_req
);
821 fcloop_nport_free(struct kref
*ref
)
823 struct fcloop_nport
*nport
=
824 container_of(ref
, struct fcloop_nport
, ref
);
827 spin_lock_irqsave(&fcloop_lock
, flags
);
828 list_del(&nport
->nport_list
);
829 spin_unlock_irqrestore(&fcloop_lock
, flags
);
835 fcloop_nport_put(struct fcloop_nport
*nport
)
837 kref_put(&nport
->ref
, fcloop_nport_free
);
841 fcloop_nport_get(struct fcloop_nport
*nport
)
843 return kref_get_unless_zero(&nport
->ref
);
847 fcloop_localport_delete(struct nvme_fc_local_port
*localport
)
849 struct fcloop_lport_priv
*lport_priv
= localport
->private;
850 struct fcloop_lport
*lport
= lport_priv
->lport
;
852 /* release any threads waiting for the unreg to complete */
853 complete(&lport
->unreg_done
);
857 fcloop_remoteport_delete(struct nvme_fc_remote_port
*remoteport
)
859 struct fcloop_rport
*rport
= remoteport
->private;
861 flush_work(&rport
->ls_work
);
862 fcloop_nport_put(rport
->nport
);
866 fcloop_targetport_delete(struct nvmet_fc_target_port
*targetport
)
868 struct fcloop_tport
*tport
= targetport
->private;
870 fcloop_nport_put(tport
->nport
);
873 #define FCLOOP_HW_QUEUES 4
874 #define FCLOOP_SGL_SEGS 256
875 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
877 static struct nvme_fc_port_template fctemplate
= {
878 .localport_delete
= fcloop_localport_delete
,
879 .remoteport_delete
= fcloop_remoteport_delete
,
880 .create_queue
= fcloop_create_queue
,
881 .delete_queue
= fcloop_delete_queue
,
882 .ls_req
= fcloop_ls_req
,
883 .fcp_io
= fcloop_fcp_req
,
884 .ls_abort
= fcloop_ls_abort
,
885 .fcp_abort
= fcloop_fcp_abort
,
886 .max_hw_queues
= FCLOOP_HW_QUEUES
,
887 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
888 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
889 .dma_boundary
= FCLOOP_DMABOUND_4G
,
890 /* sizes of additional private data for data structures */
891 .local_priv_sz
= sizeof(struct fcloop_lport_priv
),
892 .remote_priv_sz
= sizeof(struct fcloop_rport
),
893 .lsrqst_priv_sz
= sizeof(struct fcloop_lsreq
),
894 .fcprqst_priv_sz
= sizeof(struct fcloop_ini_fcpreq
),
897 static struct nvmet_fc_target_template tgttemplate
= {
898 .targetport_delete
= fcloop_targetport_delete
,
899 .xmt_ls_rsp
= fcloop_xmt_ls_rsp
,
900 .fcp_op
= fcloop_fcp_op
,
901 .fcp_abort
= fcloop_tgt_fcp_abort
,
902 .fcp_req_release
= fcloop_fcp_req_release
,
903 .discovery_event
= fcloop_tgt_discovery_evt
,
904 .max_hw_queues
= FCLOOP_HW_QUEUES
,
905 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
906 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
907 .dma_boundary
= FCLOOP_DMABOUND_4G
,
908 /* optional features */
909 .target_features
= 0,
910 /* sizes of additional private data for data structures */
911 .target_priv_sz
= sizeof(struct fcloop_tport
),
915 fcloop_create_local_port(struct device
*dev
, struct device_attribute
*attr
,
916 const char *buf
, size_t count
)
918 struct nvme_fc_port_info pinfo
;
919 struct fcloop_ctrl_options
*opts
;
920 struct nvme_fc_local_port
*localport
;
921 struct fcloop_lport
*lport
;
922 struct fcloop_lport_priv
*lport_priv
;
926 lport
= kzalloc(sizeof(*lport
), GFP_KERNEL
);
930 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
934 ret
= fcloop_parse_options(opts
, buf
);
938 /* everything there ? */
939 if ((opts
->mask
& LPORT_OPTS
) != LPORT_OPTS
) {
944 memset(&pinfo
, 0, sizeof(pinfo
));
945 pinfo
.node_name
= opts
->wwnn
;
946 pinfo
.port_name
= opts
->wwpn
;
947 pinfo
.port_role
= opts
->roles
;
948 pinfo
.port_id
= opts
->fcaddr
;
950 ret
= nvme_fc_register_localport(&pinfo
, &fctemplate
, NULL
, &localport
);
953 lport_priv
= localport
->private;
954 lport_priv
->lport
= lport
;
956 lport
->localport
= localport
;
957 INIT_LIST_HEAD(&lport
->lport_list
);
959 spin_lock_irqsave(&fcloop_lock
, flags
);
960 list_add_tail(&lport
->lport_list
, &fcloop_lports
);
961 spin_unlock_irqrestore(&fcloop_lock
, flags
);
967 /* free only if we're going to fail */
971 return ret
? ret
: count
;
976 __unlink_local_port(struct fcloop_lport
*lport
)
978 list_del(&lport
->lport_list
);
982 __wait_localport_unreg(struct fcloop_lport
*lport
)
986 init_completion(&lport
->unreg_done
);
988 ret
= nvme_fc_unregister_localport(lport
->localport
);
990 wait_for_completion(&lport
->unreg_done
);
999 fcloop_delete_local_port(struct device
*dev
, struct device_attribute
*attr
,
1000 const char *buf
, size_t count
)
1002 struct fcloop_lport
*tlport
, *lport
= NULL
;
1003 u64 nodename
, portname
;
1004 unsigned long flags
;
1007 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1011 spin_lock_irqsave(&fcloop_lock
, flags
);
1013 list_for_each_entry(tlport
, &fcloop_lports
, lport_list
) {
1014 if (tlport
->localport
->node_name
== nodename
&&
1015 tlport
->localport
->port_name
== portname
) {
1017 __unlink_local_port(lport
);
1021 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1026 ret
= __wait_localport_unreg(lport
);
1028 return ret
? ret
: count
;
1031 static struct fcloop_nport
*
1032 fcloop_alloc_nport(const char *buf
, size_t count
, bool remoteport
)
1034 struct fcloop_nport
*newnport
, *nport
= NULL
;
1035 struct fcloop_lport
*tmplport
, *lport
= NULL
;
1036 struct fcloop_ctrl_options
*opts
;
1037 unsigned long flags
;
1038 u32 opts_mask
= (remoteport
) ? RPORT_OPTS
: TGTPORT_OPTS
;
1041 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
1045 ret
= fcloop_parse_options(opts
, buf
);
1049 /* everything there ? */
1050 if ((opts
->mask
& opts_mask
) != opts_mask
) {
1055 newnport
= kzalloc(sizeof(*newnport
), GFP_KERNEL
);
1059 INIT_LIST_HEAD(&newnport
->nport_list
);
1060 newnport
->node_name
= opts
->wwnn
;
1061 newnport
->port_name
= opts
->wwpn
;
1062 if (opts
->mask
& NVMF_OPT_ROLES
)
1063 newnport
->port_role
= opts
->roles
;
1064 if (opts
->mask
& NVMF_OPT_FCADDR
)
1065 newnport
->port_id
= opts
->fcaddr
;
1066 kref_init(&newnport
->ref
);
1068 spin_lock_irqsave(&fcloop_lock
, flags
);
1070 list_for_each_entry(tmplport
, &fcloop_lports
, lport_list
) {
1071 if (tmplport
->localport
->node_name
== opts
->wwnn
&&
1072 tmplport
->localport
->port_name
== opts
->wwpn
)
1073 goto out_invalid_opts
;
1075 if (tmplport
->localport
->node_name
== opts
->lpwwnn
&&
1076 tmplport
->localport
->port_name
== opts
->lpwwpn
)
1082 goto out_invalid_opts
;
1083 newnport
->lport
= lport
;
1086 list_for_each_entry(nport
, &fcloop_nports
, nport_list
) {
1087 if (nport
->node_name
== opts
->wwnn
&&
1088 nport
->port_name
== opts
->wwpn
) {
1089 if ((remoteport
&& nport
->rport
) ||
1090 (!remoteport
&& nport
->tport
)) {
1092 goto out_invalid_opts
;
1095 fcloop_nport_get(nport
);
1097 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1100 nport
->lport
= lport
;
1101 if (opts
->mask
& NVMF_OPT_ROLES
)
1102 nport
->port_role
= opts
->roles
;
1103 if (opts
->mask
& NVMF_OPT_FCADDR
)
1104 nport
->port_id
= opts
->fcaddr
;
1105 goto out_free_newnport
;
1109 list_add_tail(&newnport
->nport_list
, &fcloop_nports
);
1111 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1117 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1126 fcloop_create_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1127 const char *buf
, size_t count
)
1129 struct nvme_fc_remote_port
*remoteport
;
1130 struct fcloop_nport
*nport
;
1131 struct fcloop_rport
*rport
;
1132 struct nvme_fc_port_info pinfo
;
1135 nport
= fcloop_alloc_nport(buf
, count
, true);
1139 memset(&pinfo
, 0, sizeof(pinfo
));
1140 pinfo
.node_name
= nport
->node_name
;
1141 pinfo
.port_name
= nport
->port_name
;
1142 pinfo
.port_role
= nport
->port_role
;
1143 pinfo
.port_id
= nport
->port_id
;
1145 ret
= nvme_fc_register_remoteport(nport
->lport
->localport
,
1146 &pinfo
, &remoteport
);
1147 if (ret
|| !remoteport
) {
1148 fcloop_nport_put(nport
);
1153 rport
= remoteport
->private;
1154 rport
->remoteport
= remoteport
;
1155 rport
->targetport
= (nport
->tport
) ? nport
->tport
->targetport
: NULL
;
1157 nport
->tport
->remoteport
= remoteport
;
1158 nport
->tport
->lport
= nport
->lport
;
1160 rport
->nport
= nport
;
1161 rport
->lport
= nport
->lport
;
1162 nport
->rport
= rport
;
1163 spin_lock_init(&rport
->lock
);
1164 INIT_WORK(&rport
->ls_work
, fcloop_rport_lsrqst_work
);
1165 INIT_LIST_HEAD(&rport
->ls_list
);
1171 static struct fcloop_rport
*
1172 __unlink_remote_port(struct fcloop_nport
*nport
)
1174 struct fcloop_rport
*rport
= nport
->rport
;
1176 if (rport
&& nport
->tport
)
1177 nport
->tport
->remoteport
= NULL
;
1178 nport
->rport
= NULL
;
1184 __remoteport_unreg(struct fcloop_nport
*nport
, struct fcloop_rport
*rport
)
1189 return nvme_fc_unregister_remoteport(rport
->remoteport
);
1193 fcloop_delete_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1194 const char *buf
, size_t count
)
1196 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1197 static struct fcloop_rport
*rport
;
1198 u64 nodename
, portname
;
1199 unsigned long flags
;
1202 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1206 spin_lock_irqsave(&fcloop_lock
, flags
);
1208 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1209 if (tmpport
->node_name
== nodename
&&
1210 tmpport
->port_name
== portname
&& tmpport
->rport
) {
1212 rport
= __unlink_remote_port(nport
);
1217 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1222 ret
= __remoteport_unreg(nport
, rport
);
1224 return ret
? ret
: count
;
1228 fcloop_create_target_port(struct device
*dev
, struct device_attribute
*attr
,
1229 const char *buf
, size_t count
)
1231 struct nvmet_fc_target_port
*targetport
;
1232 struct fcloop_nport
*nport
;
1233 struct fcloop_tport
*tport
;
1234 struct nvmet_fc_port_info tinfo
;
1237 nport
= fcloop_alloc_nport(buf
, count
, false);
1241 tinfo
.node_name
= nport
->node_name
;
1242 tinfo
.port_name
= nport
->port_name
;
1243 tinfo
.port_id
= nport
->port_id
;
1245 ret
= nvmet_fc_register_targetport(&tinfo
, &tgttemplate
, NULL
,
1248 fcloop_nport_put(nport
);
1253 tport
= targetport
->private;
1254 tport
->targetport
= targetport
;
1255 tport
->remoteport
= (nport
->rport
) ? nport
->rport
->remoteport
: NULL
;
1257 nport
->rport
->targetport
= targetport
;
1258 tport
->nport
= nport
;
1259 tport
->lport
= nport
->lport
;
1260 nport
->tport
= tport
;
1266 static struct fcloop_tport
*
1267 __unlink_target_port(struct fcloop_nport
*nport
)
1269 struct fcloop_tport
*tport
= nport
->tport
;
1271 if (tport
&& nport
->rport
)
1272 nport
->rport
->targetport
= NULL
;
1273 nport
->tport
= NULL
;
1279 __targetport_unreg(struct fcloop_nport
*nport
, struct fcloop_tport
*tport
)
1284 return nvmet_fc_unregister_targetport(tport
->targetport
);
1288 fcloop_delete_target_port(struct device
*dev
, struct device_attribute
*attr
,
1289 const char *buf
, size_t count
)
1291 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1292 struct fcloop_tport
*tport
= NULL
;
1293 u64 nodename
, portname
;
1294 unsigned long flags
;
1297 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1301 spin_lock_irqsave(&fcloop_lock
, flags
);
1303 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1304 if (tmpport
->node_name
== nodename
&&
1305 tmpport
->port_name
== portname
&& tmpport
->tport
) {
1307 tport
= __unlink_target_port(nport
);
1312 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1317 ret
= __targetport_unreg(nport
, tport
);
1319 return ret
? ret
: count
;
1323 static DEVICE_ATTR(add_local_port
, 0200, NULL
, fcloop_create_local_port
);
1324 static DEVICE_ATTR(del_local_port
, 0200, NULL
, fcloop_delete_local_port
);
1325 static DEVICE_ATTR(add_remote_port
, 0200, NULL
, fcloop_create_remote_port
);
1326 static DEVICE_ATTR(del_remote_port
, 0200, NULL
, fcloop_delete_remote_port
);
1327 static DEVICE_ATTR(add_target_port
, 0200, NULL
, fcloop_create_target_port
);
1328 static DEVICE_ATTR(del_target_port
, 0200, NULL
, fcloop_delete_target_port
);
1330 static struct attribute
*fcloop_dev_attrs
[] = {
1331 &dev_attr_add_local_port
.attr
,
1332 &dev_attr_del_local_port
.attr
,
1333 &dev_attr_add_remote_port
.attr
,
1334 &dev_attr_del_remote_port
.attr
,
1335 &dev_attr_add_target_port
.attr
,
1336 &dev_attr_del_target_port
.attr
,
1340 static struct attribute_group fclopp_dev_attrs_group
= {
1341 .attrs
= fcloop_dev_attrs
,
1344 static const struct attribute_group
*fcloop_dev_attr_groups
[] = {
1345 &fclopp_dev_attrs_group
,
1349 static struct class *fcloop_class
;
1350 static struct device
*fcloop_device
;
1353 static int __init
fcloop_init(void)
1357 fcloop_class
= class_create(THIS_MODULE
, "fcloop");
1358 if (IS_ERR(fcloop_class
)) {
1359 pr_err("couldn't register class fcloop\n");
1360 ret
= PTR_ERR(fcloop_class
);
1364 fcloop_device
= device_create_with_groups(
1365 fcloop_class
, NULL
, MKDEV(0, 0), NULL
,
1366 fcloop_dev_attr_groups
, "ctl");
1367 if (IS_ERR(fcloop_device
)) {
1368 pr_err("couldn't create ctl device!\n");
1369 ret
= PTR_ERR(fcloop_device
);
1370 goto out_destroy_class
;
1373 get_device(fcloop_device
);
1378 class_destroy(fcloop_class
);
1382 static void __exit
fcloop_exit(void)
1384 struct fcloop_lport
*lport
;
1385 struct fcloop_nport
*nport
;
1386 struct fcloop_tport
*tport
;
1387 struct fcloop_rport
*rport
;
1388 unsigned long flags
;
1391 spin_lock_irqsave(&fcloop_lock
, flags
);
1394 nport
= list_first_entry_or_null(&fcloop_nports
,
1395 typeof(*nport
), nport_list
);
1399 tport
= __unlink_target_port(nport
);
1400 rport
= __unlink_remote_port(nport
);
1402 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1404 ret
= __targetport_unreg(nport
, tport
);
1406 pr_warn("%s: Failed deleting target port\n", __func__
);
1408 ret
= __remoteport_unreg(nport
, rport
);
1410 pr_warn("%s: Failed deleting remote port\n", __func__
);
1412 spin_lock_irqsave(&fcloop_lock
, flags
);
1416 lport
= list_first_entry_or_null(&fcloop_lports
,
1417 typeof(*lport
), lport_list
);
1421 __unlink_local_port(lport
);
1423 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1425 ret
= __wait_localport_unreg(lport
);
1427 pr_warn("%s: Failed deleting local port\n", __func__
);
1429 spin_lock_irqsave(&fcloop_lock
, flags
);
1432 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1434 put_device(fcloop_device
);
1436 device_destroy(fcloop_class
, MKDEV(0, 0));
1437 class_destroy(fcloop_class
);
1440 module_init(fcloop_init
);
1441 module_exit(fcloop_exit
);
1443 MODULE_LICENSE("GPL v2");