1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
18 NVMF_OPT_WWNN
= 1 << 0,
19 NVMF_OPT_WWPN
= 1 << 1,
20 NVMF_OPT_ROLES
= 1 << 2,
21 NVMF_OPT_FCADDR
= 1 << 3,
22 NVMF_OPT_LPWWNN
= 1 << 4,
23 NVMF_OPT_LPWWPN
= 1 << 5,
26 struct fcloop_ctrl_options
{
36 static const match_table_t opt_tokens
= {
37 { NVMF_OPT_WWNN
, "wwnn=%s" },
38 { NVMF_OPT_WWPN
, "wwpn=%s" },
39 { NVMF_OPT_ROLES
, "roles=%d" },
40 { NVMF_OPT_FCADDR
, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN
, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN
, "lpwwpn=%s" },
43 { NVMF_OPT_ERR
, NULL
}
47 fcloop_parse_options(struct fcloop_ctrl_options
*opts
,
50 substring_t args
[MAX_OPT_ARGS
];
51 char *options
, *o
, *p
;
55 options
= o
= kstrdup(buf
, GFP_KERNEL
);
59 while ((p
= strsep(&o
, ",\n")) != NULL
) {
63 token
= match_token(p
, opt_tokens
, args
);
67 if (match_u64(args
, &token64
)) {
69 goto out_free_options
;
74 if (match_u64(args
, &token64
)) {
76 goto out_free_options
;
81 if (match_int(args
, &token
)) {
83 goto out_free_options
;
88 if (match_hex(args
, &token
)) {
90 goto out_free_options
;
95 if (match_u64(args
, &token64
)) {
97 goto out_free_options
;
99 opts
->lpwwnn
= token64
;
101 case NVMF_OPT_LPWWPN
:
102 if (match_u64(args
, &token64
)) {
104 goto out_free_options
;
106 opts
->lpwwpn
= token64
;
109 pr_warn("unknown parameter or missing value '%s'\n", p
);
111 goto out_free_options
;
122 fcloop_parse_nm_options(struct device
*dev
, u64
*nname
, u64
*pname
,
125 substring_t args
[MAX_OPT_ARGS
];
126 char *options
, *o
, *p
;
133 options
= o
= kstrdup(buf
, GFP_KERNEL
);
137 while ((p
= strsep(&o
, ",\n")) != NULL
) {
141 token
= match_token(p
, opt_tokens
, args
);
144 if (match_u64(args
, &token64
)) {
146 goto out_free_options
;
151 if (match_u64(args
, &token64
)) {
153 goto out_free_options
;
158 pr_warn("unknown parameter or missing value '%s'\n", p
);
160 goto out_free_options
;
178 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
180 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
181 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
183 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
186 static DEFINE_SPINLOCK(fcloop_lock
);
187 static LIST_HEAD(fcloop_lports
);
188 static LIST_HEAD(fcloop_nports
);
190 struct fcloop_lport
{
191 struct nvme_fc_local_port
*localport
;
192 struct list_head lport_list
;
193 struct completion unreg_done
;
196 struct fcloop_lport_priv
{
197 struct fcloop_lport
*lport
;
200 struct fcloop_rport
{
201 struct nvme_fc_remote_port
*remoteport
;
202 struct nvmet_fc_target_port
*targetport
;
203 struct fcloop_nport
*nport
;
204 struct fcloop_lport
*lport
;
207 struct fcloop_tport
{
208 struct nvmet_fc_target_port
*targetport
;
209 struct nvme_fc_remote_port
*remoteport
;
210 struct fcloop_nport
*nport
;
211 struct fcloop_lport
*lport
;
214 struct fcloop_nport
{
215 struct fcloop_rport
*rport
;
216 struct fcloop_tport
*tport
;
217 struct fcloop_lport
*lport
;
218 struct list_head nport_list
;
226 struct fcloop_lsreq
{
227 struct fcloop_tport
*tport
;
228 struct nvmefc_ls_req
*lsreq
;
229 struct work_struct work
;
230 struct nvmefc_tgt_ls_req tgt_ls_req
;
235 struct fcloop_tport
*tport
;
236 struct work_struct work
;
243 INI_IO_COMPLETED
= 3,
246 struct fcloop_fcpreq
{
247 struct fcloop_tport
*tport
;
248 struct nvmefc_fcp_req
*fcpreq
;
255 struct work_struct fcp_rcv_work
;
256 struct work_struct abort_rcv_work
;
257 struct work_struct tio_done_work
;
258 struct nvmefc_tgt_fcp_req tgt_fcp_req
;
261 struct fcloop_ini_fcpreq
{
262 struct nvmefc_fcp_req
*fcpreq
;
263 struct fcloop_fcpreq
*tfcp_req
;
267 static inline struct fcloop_lsreq
*
268 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req
*tgt_lsreq
)
270 return container_of(tgt_lsreq
, struct fcloop_lsreq
, tgt_ls_req
);
273 static inline struct fcloop_fcpreq
*
274 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
276 return container_of(tgt_fcpreq
, struct fcloop_fcpreq
, tgt_fcp_req
);
281 fcloop_create_queue(struct nvme_fc_local_port
*localport
,
282 unsigned int qidx
, u16 qsize
,
290 fcloop_delete_queue(struct nvme_fc_local_port
*localport
,
291 unsigned int idx
, void *handle
)
297 * Transmit of LS RSP done (e.g. buffers all set). call back up
298 * initiator "done" flows.
301 fcloop_tgt_lsrqst_done_work(struct work_struct
*work
)
303 struct fcloop_lsreq
*tls_req
=
304 container_of(work
, struct fcloop_lsreq
, work
);
305 struct fcloop_tport
*tport
= tls_req
->tport
;
306 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
308 if (!tport
|| tport
->remoteport
)
309 lsreq
->done(lsreq
, tls_req
->status
);
313 fcloop_ls_req(struct nvme_fc_local_port
*localport
,
314 struct nvme_fc_remote_port
*remoteport
,
315 struct nvmefc_ls_req
*lsreq
)
317 struct fcloop_lsreq
*tls_req
= lsreq
->private;
318 struct fcloop_rport
*rport
= remoteport
->private;
321 tls_req
->lsreq
= lsreq
;
322 INIT_WORK(&tls_req
->work
, fcloop_tgt_lsrqst_done_work
);
324 if (!rport
->targetport
) {
325 tls_req
->status
= -ECONNREFUSED
;
326 tls_req
->tport
= NULL
;
327 schedule_work(&tls_req
->work
);
332 tls_req
->tport
= rport
->targetport
->private;
333 ret
= nvmet_fc_rcv_ls_req(rport
->targetport
, &tls_req
->tgt_ls_req
,
334 lsreq
->rqstaddr
, lsreq
->rqstlen
);
340 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port
*tport
,
341 struct nvmefc_tgt_ls_req
*tgt_lsreq
)
343 struct fcloop_lsreq
*tls_req
= tgt_ls_req_to_lsreq(tgt_lsreq
);
344 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
346 memcpy(lsreq
->rspaddr
, tgt_lsreq
->rspbuf
,
347 ((lsreq
->rsplen
< tgt_lsreq
->rsplen
) ?
348 lsreq
->rsplen
: tgt_lsreq
->rsplen
));
349 tgt_lsreq
->done(tgt_lsreq
);
351 schedule_work(&tls_req
->work
);
357 * Simulate reception of RSCN and converting it to a initiator transport
358 * call to rescan a remote port.
361 fcloop_tgt_rscn_work(struct work_struct
*work
)
363 struct fcloop_rscn
*tgt_rscn
=
364 container_of(work
, struct fcloop_rscn
, work
);
365 struct fcloop_tport
*tport
= tgt_rscn
->tport
;
367 if (tport
->remoteport
)
368 nvme_fc_rescan_remoteport(tport
->remoteport
);
373 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port
*tgtport
)
375 struct fcloop_rscn
*tgt_rscn
;
377 tgt_rscn
= kzalloc(sizeof(*tgt_rscn
), GFP_KERNEL
);
381 tgt_rscn
->tport
= tgtport
->private;
382 INIT_WORK(&tgt_rscn
->work
, fcloop_tgt_rscn_work
);
384 schedule_work(&tgt_rscn
->work
);
388 fcloop_tfcp_req_free(struct kref
*ref
)
390 struct fcloop_fcpreq
*tfcp_req
=
391 container_of(ref
, struct fcloop_fcpreq
, ref
);
397 fcloop_tfcp_req_put(struct fcloop_fcpreq
*tfcp_req
)
399 kref_put(&tfcp_req
->ref
, fcloop_tfcp_req_free
);
403 fcloop_tfcp_req_get(struct fcloop_fcpreq
*tfcp_req
)
405 return kref_get_unless_zero(&tfcp_req
->ref
);
409 fcloop_call_host_done(struct nvmefc_fcp_req
*fcpreq
,
410 struct fcloop_fcpreq
*tfcp_req
, int status
)
412 struct fcloop_ini_fcpreq
*inireq
= NULL
;
415 inireq
= fcpreq
->private;
416 spin_lock(&inireq
->inilock
);
417 inireq
->tfcp_req
= NULL
;
418 spin_unlock(&inireq
->inilock
);
420 fcpreq
->status
= status
;
421 fcpreq
->done(fcpreq
);
424 /* release original io reference on tgt struct */
425 fcloop_tfcp_req_put(tfcp_req
);
429 fcloop_fcp_recv_work(struct work_struct
*work
)
431 struct fcloop_fcpreq
*tfcp_req
=
432 container_of(work
, struct fcloop_fcpreq
, fcp_rcv_work
);
433 struct nvmefc_fcp_req
*fcpreq
= tfcp_req
->fcpreq
;
435 bool aborted
= false;
437 spin_lock_irq(&tfcp_req
->reqlock
);
438 switch (tfcp_req
->inistate
) {
440 tfcp_req
->inistate
= INI_IO_ACTIVE
;
446 spin_unlock_irq(&tfcp_req
->reqlock
);
450 spin_unlock_irq(&tfcp_req
->reqlock
);
452 if (unlikely(aborted
))
455 ret
= nvmet_fc_rcv_fcp_req(tfcp_req
->tport
->targetport
,
456 &tfcp_req
->tgt_fcp_req
,
457 fcpreq
->cmdaddr
, fcpreq
->cmdlen
);
459 fcloop_call_host_done(fcpreq
, tfcp_req
, ret
);
465 fcloop_fcp_abort_recv_work(struct work_struct
*work
)
467 struct fcloop_fcpreq
*tfcp_req
=
468 container_of(work
, struct fcloop_fcpreq
, abort_rcv_work
);
469 struct nvmefc_fcp_req
*fcpreq
;
470 bool completed
= false;
472 spin_lock_irq(&tfcp_req
->reqlock
);
473 fcpreq
= tfcp_req
->fcpreq
;
474 switch (tfcp_req
->inistate
) {
477 case INI_IO_COMPLETED
:
481 spin_unlock_irq(&tfcp_req
->reqlock
);
485 spin_unlock_irq(&tfcp_req
->reqlock
);
487 if (unlikely(completed
)) {
488 /* remove reference taken in original abort downcall */
489 fcloop_tfcp_req_put(tfcp_req
);
493 if (tfcp_req
->tport
->targetport
)
494 nvmet_fc_rcv_fcp_abort(tfcp_req
->tport
->targetport
,
495 &tfcp_req
->tgt_fcp_req
);
497 spin_lock_irq(&tfcp_req
->reqlock
);
498 tfcp_req
->fcpreq
= NULL
;
499 spin_unlock_irq(&tfcp_req
->reqlock
);
501 fcloop_call_host_done(fcpreq
, tfcp_req
, -ECANCELED
);
502 /* call_host_done releases reference for abort downcall */
506 * FCP IO operation done by target completion.
507 * call back up initiator "done" flows.
510 fcloop_tgt_fcprqst_done_work(struct work_struct
*work
)
512 struct fcloop_fcpreq
*tfcp_req
=
513 container_of(work
, struct fcloop_fcpreq
, tio_done_work
);
514 struct nvmefc_fcp_req
*fcpreq
;
516 spin_lock_irq(&tfcp_req
->reqlock
);
517 fcpreq
= tfcp_req
->fcpreq
;
518 tfcp_req
->inistate
= INI_IO_COMPLETED
;
519 spin_unlock_irq(&tfcp_req
->reqlock
);
521 fcloop_call_host_done(fcpreq
, tfcp_req
, tfcp_req
->status
);
526 fcloop_fcp_req(struct nvme_fc_local_port
*localport
,
527 struct nvme_fc_remote_port
*remoteport
,
528 void *hw_queue_handle
,
529 struct nvmefc_fcp_req
*fcpreq
)
531 struct fcloop_rport
*rport
= remoteport
->private;
532 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
533 struct fcloop_fcpreq
*tfcp_req
;
535 if (!rport
->targetport
)
536 return -ECONNREFUSED
;
538 tfcp_req
= kzalloc(sizeof(*tfcp_req
), GFP_ATOMIC
);
542 inireq
->fcpreq
= fcpreq
;
543 inireq
->tfcp_req
= tfcp_req
;
544 spin_lock_init(&inireq
->inilock
);
546 tfcp_req
->fcpreq
= fcpreq
;
547 tfcp_req
->tport
= rport
->targetport
->private;
548 tfcp_req
->inistate
= INI_IO_START
;
549 spin_lock_init(&tfcp_req
->reqlock
);
550 INIT_WORK(&tfcp_req
->fcp_rcv_work
, fcloop_fcp_recv_work
);
551 INIT_WORK(&tfcp_req
->abort_rcv_work
, fcloop_fcp_abort_recv_work
);
552 INIT_WORK(&tfcp_req
->tio_done_work
, fcloop_tgt_fcprqst_done_work
);
553 kref_init(&tfcp_req
->ref
);
555 schedule_work(&tfcp_req
->fcp_rcv_work
);
561 fcloop_fcp_copy_data(u8 op
, struct scatterlist
*data_sg
,
562 struct scatterlist
*io_sg
, u32 offset
, u32 length
)
565 u32 data_len
, io_len
, tlen
;
567 io_p
= sg_virt(io_sg
);
568 io_len
= io_sg
->length
;
571 tlen
= min_t(u32
, offset
, io_len
);
575 io_sg
= sg_next(io_sg
);
576 io_p
= sg_virt(io_sg
);
577 io_len
= io_sg
->length
;
582 data_p
= sg_virt(data_sg
);
583 data_len
= data_sg
->length
;
586 tlen
= min_t(u32
, io_len
, data_len
);
587 tlen
= min_t(u32
, tlen
, length
);
589 if (op
== NVMET_FCOP_WRITEDATA
)
590 memcpy(data_p
, io_p
, tlen
);
592 memcpy(io_p
, data_p
, tlen
);
597 if ((!io_len
) && (length
)) {
598 io_sg
= sg_next(io_sg
);
599 io_p
= sg_virt(io_sg
);
600 io_len
= io_sg
->length
;
605 if ((!data_len
) && (length
)) {
606 data_sg
= sg_next(data_sg
);
607 data_p
= sg_virt(data_sg
);
608 data_len
= data_sg
->length
;
615 fcloop_fcp_op(struct nvmet_fc_target_port
*tgtport
,
616 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
618 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
619 struct nvmefc_fcp_req
*fcpreq
;
620 u32 rsplen
= 0, xfrlen
= 0;
621 int fcp_err
= 0, active
, aborted
;
622 u8 op
= tgt_fcpreq
->op
;
624 spin_lock_irq(&tfcp_req
->reqlock
);
625 fcpreq
= tfcp_req
->fcpreq
;
626 active
= tfcp_req
->active
;
627 aborted
= tfcp_req
->aborted
;
628 tfcp_req
->active
= true;
629 spin_unlock_irq(&tfcp_req
->reqlock
);
631 if (unlikely(active
))
632 /* illegal - call while i/o active */
635 if (unlikely(aborted
)) {
636 /* target transport has aborted i/o prior */
637 spin_lock_irq(&tfcp_req
->reqlock
);
638 tfcp_req
->active
= false;
639 spin_unlock_irq(&tfcp_req
->reqlock
);
640 tgt_fcpreq
->transferred_length
= 0;
641 tgt_fcpreq
->fcp_error
= -ECANCELED
;
642 tgt_fcpreq
->done(tgt_fcpreq
);
647 * if fcpreq is NULL, the I/O has been aborted (from
648 * initiator side). For the target side, act as if all is well
649 * but don't actually move data.
653 case NVMET_FCOP_WRITEDATA
:
654 xfrlen
= tgt_fcpreq
->transfer_length
;
656 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
657 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
659 fcpreq
->transferred_length
+= xfrlen
;
663 case NVMET_FCOP_READDATA
:
664 case NVMET_FCOP_READDATA_RSP
:
665 xfrlen
= tgt_fcpreq
->transfer_length
;
667 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
668 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
670 fcpreq
->transferred_length
+= xfrlen
;
672 if (op
== NVMET_FCOP_READDATA
)
675 /* Fall-Thru to RSP handling */
680 rsplen
= ((fcpreq
->rsplen
< tgt_fcpreq
->rsplen
) ?
681 fcpreq
->rsplen
: tgt_fcpreq
->rsplen
);
682 memcpy(fcpreq
->rspaddr
, tgt_fcpreq
->rspaddr
, rsplen
);
683 if (rsplen
< tgt_fcpreq
->rsplen
)
685 fcpreq
->rcv_rsplen
= rsplen
;
688 tfcp_req
->status
= 0;
696 spin_lock_irq(&tfcp_req
->reqlock
);
697 tfcp_req
->active
= false;
698 spin_unlock_irq(&tfcp_req
->reqlock
);
700 tgt_fcpreq
->transferred_length
= xfrlen
;
701 tgt_fcpreq
->fcp_error
= fcp_err
;
702 tgt_fcpreq
->done(tgt_fcpreq
);
708 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
709 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
711 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
714 * mark aborted only in case there were 2 threads in transport
715 * (one doing io, other doing abort) and only kills ops posted
716 * after the abort request
718 spin_lock_irq(&tfcp_req
->reqlock
);
719 tfcp_req
->aborted
= true;
720 spin_unlock_irq(&tfcp_req
->reqlock
);
722 tfcp_req
->status
= NVME_SC_INTERNAL
;
725 * nothing more to do. If io wasn't active, the transport should
726 * immediately call the req_release. If it was active, the op
727 * will complete, and the lldd should call req_release.
732 fcloop_fcp_req_release(struct nvmet_fc_target_port
*tgtport
,
733 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
735 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
737 schedule_work(&tfcp_req
->tio_done_work
);
741 fcloop_ls_abort(struct nvme_fc_local_port
*localport
,
742 struct nvme_fc_remote_port
*remoteport
,
743 struct nvmefc_ls_req
*lsreq
)
748 fcloop_fcp_abort(struct nvme_fc_local_port
*localport
,
749 struct nvme_fc_remote_port
*remoteport
,
750 void *hw_queue_handle
,
751 struct nvmefc_fcp_req
*fcpreq
)
753 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
754 struct fcloop_fcpreq
*tfcp_req
;
757 spin_lock(&inireq
->inilock
);
758 tfcp_req
= inireq
->tfcp_req
;
760 fcloop_tfcp_req_get(tfcp_req
);
761 spin_unlock(&inireq
->inilock
);
764 /* abort has already been called */
767 /* break initiator/target relationship for io */
768 spin_lock_irq(&tfcp_req
->reqlock
);
769 switch (tfcp_req
->inistate
) {
772 tfcp_req
->inistate
= INI_IO_ABORTED
;
774 case INI_IO_COMPLETED
:
778 spin_unlock_irq(&tfcp_req
->reqlock
);
782 spin_unlock_irq(&tfcp_req
->reqlock
);
785 /* leave the reference while the work item is scheduled */
786 WARN_ON(!schedule_work(&tfcp_req
->abort_rcv_work
));
789 * as the io has already had the done callback made,
790 * nothing more to do. So release the reference taken above
792 fcloop_tfcp_req_put(tfcp_req
);
797 fcloop_nport_free(struct kref
*ref
)
799 struct fcloop_nport
*nport
=
800 container_of(ref
, struct fcloop_nport
, ref
);
803 spin_lock_irqsave(&fcloop_lock
, flags
);
804 list_del(&nport
->nport_list
);
805 spin_unlock_irqrestore(&fcloop_lock
, flags
);
811 fcloop_nport_put(struct fcloop_nport
*nport
)
813 kref_put(&nport
->ref
, fcloop_nport_free
);
817 fcloop_nport_get(struct fcloop_nport
*nport
)
819 return kref_get_unless_zero(&nport
->ref
);
823 fcloop_localport_delete(struct nvme_fc_local_port
*localport
)
825 struct fcloop_lport_priv
*lport_priv
= localport
->private;
826 struct fcloop_lport
*lport
= lport_priv
->lport
;
828 /* release any threads waiting for the unreg to complete */
829 complete(&lport
->unreg_done
);
833 fcloop_remoteport_delete(struct nvme_fc_remote_port
*remoteport
)
835 struct fcloop_rport
*rport
= remoteport
->private;
837 fcloop_nport_put(rport
->nport
);
841 fcloop_targetport_delete(struct nvmet_fc_target_port
*targetport
)
843 struct fcloop_tport
*tport
= targetport
->private;
845 fcloop_nport_put(tport
->nport
);
848 #define FCLOOP_HW_QUEUES 4
849 #define FCLOOP_SGL_SEGS 256
850 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
852 static struct nvme_fc_port_template fctemplate
= {
853 .module
= THIS_MODULE
,
854 .localport_delete
= fcloop_localport_delete
,
855 .remoteport_delete
= fcloop_remoteport_delete
,
856 .create_queue
= fcloop_create_queue
,
857 .delete_queue
= fcloop_delete_queue
,
858 .ls_req
= fcloop_ls_req
,
859 .fcp_io
= fcloop_fcp_req
,
860 .ls_abort
= fcloop_ls_abort
,
861 .fcp_abort
= fcloop_fcp_abort
,
862 .max_hw_queues
= FCLOOP_HW_QUEUES
,
863 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
864 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
865 .dma_boundary
= FCLOOP_DMABOUND_4G
,
866 /* sizes of additional private data for data structures */
867 .local_priv_sz
= sizeof(struct fcloop_lport_priv
),
868 .remote_priv_sz
= sizeof(struct fcloop_rport
),
869 .lsrqst_priv_sz
= sizeof(struct fcloop_lsreq
),
870 .fcprqst_priv_sz
= sizeof(struct fcloop_ini_fcpreq
),
873 static struct nvmet_fc_target_template tgttemplate
= {
874 .targetport_delete
= fcloop_targetport_delete
,
875 .xmt_ls_rsp
= fcloop_xmt_ls_rsp
,
876 .fcp_op
= fcloop_fcp_op
,
877 .fcp_abort
= fcloop_tgt_fcp_abort
,
878 .fcp_req_release
= fcloop_fcp_req_release
,
879 .discovery_event
= fcloop_tgt_discovery_evt
,
880 .max_hw_queues
= FCLOOP_HW_QUEUES
,
881 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
882 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
883 .dma_boundary
= FCLOOP_DMABOUND_4G
,
884 /* optional features */
885 .target_features
= 0,
886 /* sizes of additional private data for data structures */
887 .target_priv_sz
= sizeof(struct fcloop_tport
),
891 fcloop_create_local_port(struct device
*dev
, struct device_attribute
*attr
,
892 const char *buf
, size_t count
)
894 struct nvme_fc_port_info pinfo
;
895 struct fcloop_ctrl_options
*opts
;
896 struct nvme_fc_local_port
*localport
;
897 struct fcloop_lport
*lport
;
898 struct fcloop_lport_priv
*lport_priv
;
902 lport
= kzalloc(sizeof(*lport
), GFP_KERNEL
);
906 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
910 ret
= fcloop_parse_options(opts
, buf
);
914 /* everything there ? */
915 if ((opts
->mask
& LPORT_OPTS
) != LPORT_OPTS
) {
920 memset(&pinfo
, 0, sizeof(pinfo
));
921 pinfo
.node_name
= opts
->wwnn
;
922 pinfo
.port_name
= opts
->wwpn
;
923 pinfo
.port_role
= opts
->roles
;
924 pinfo
.port_id
= opts
->fcaddr
;
926 ret
= nvme_fc_register_localport(&pinfo
, &fctemplate
, NULL
, &localport
);
929 lport_priv
= localport
->private;
930 lport_priv
->lport
= lport
;
932 lport
->localport
= localport
;
933 INIT_LIST_HEAD(&lport
->lport_list
);
935 spin_lock_irqsave(&fcloop_lock
, flags
);
936 list_add_tail(&lport
->lport_list
, &fcloop_lports
);
937 spin_unlock_irqrestore(&fcloop_lock
, flags
);
943 /* free only if we're going to fail */
947 return ret
? ret
: count
;
952 __unlink_local_port(struct fcloop_lport
*lport
)
954 list_del(&lport
->lport_list
);
958 __wait_localport_unreg(struct fcloop_lport
*lport
)
962 init_completion(&lport
->unreg_done
);
964 ret
= nvme_fc_unregister_localport(lport
->localport
);
966 wait_for_completion(&lport
->unreg_done
);
975 fcloop_delete_local_port(struct device
*dev
, struct device_attribute
*attr
,
976 const char *buf
, size_t count
)
978 struct fcloop_lport
*tlport
, *lport
= NULL
;
979 u64 nodename
, portname
;
983 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
987 spin_lock_irqsave(&fcloop_lock
, flags
);
989 list_for_each_entry(tlport
, &fcloop_lports
, lport_list
) {
990 if (tlport
->localport
->node_name
== nodename
&&
991 tlport
->localport
->port_name
== portname
) {
993 __unlink_local_port(lport
);
997 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1002 ret
= __wait_localport_unreg(lport
);
1004 return ret
? ret
: count
;
1007 static struct fcloop_nport
*
1008 fcloop_alloc_nport(const char *buf
, size_t count
, bool remoteport
)
1010 struct fcloop_nport
*newnport
, *nport
= NULL
;
1011 struct fcloop_lport
*tmplport
, *lport
= NULL
;
1012 struct fcloop_ctrl_options
*opts
;
1013 unsigned long flags
;
1014 u32 opts_mask
= (remoteport
) ? RPORT_OPTS
: TGTPORT_OPTS
;
1017 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
1021 ret
= fcloop_parse_options(opts
, buf
);
1025 /* everything there ? */
1026 if ((opts
->mask
& opts_mask
) != opts_mask
) {
1031 newnport
= kzalloc(sizeof(*newnport
), GFP_KERNEL
);
1035 INIT_LIST_HEAD(&newnport
->nport_list
);
1036 newnport
->node_name
= opts
->wwnn
;
1037 newnport
->port_name
= opts
->wwpn
;
1038 if (opts
->mask
& NVMF_OPT_ROLES
)
1039 newnport
->port_role
= opts
->roles
;
1040 if (opts
->mask
& NVMF_OPT_FCADDR
)
1041 newnport
->port_id
= opts
->fcaddr
;
1042 kref_init(&newnport
->ref
);
1044 spin_lock_irqsave(&fcloop_lock
, flags
);
1046 list_for_each_entry(tmplport
, &fcloop_lports
, lport_list
) {
1047 if (tmplport
->localport
->node_name
== opts
->wwnn
&&
1048 tmplport
->localport
->port_name
== opts
->wwpn
)
1049 goto out_invalid_opts
;
1051 if (tmplport
->localport
->node_name
== opts
->lpwwnn
&&
1052 tmplport
->localport
->port_name
== opts
->lpwwpn
)
1058 goto out_invalid_opts
;
1059 newnport
->lport
= lport
;
1062 list_for_each_entry(nport
, &fcloop_nports
, nport_list
) {
1063 if (nport
->node_name
== opts
->wwnn
&&
1064 nport
->port_name
== opts
->wwpn
) {
1065 if ((remoteport
&& nport
->rport
) ||
1066 (!remoteport
&& nport
->tport
)) {
1068 goto out_invalid_opts
;
1071 fcloop_nport_get(nport
);
1073 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1076 nport
->lport
= lport
;
1077 if (opts
->mask
& NVMF_OPT_ROLES
)
1078 nport
->port_role
= opts
->roles
;
1079 if (opts
->mask
& NVMF_OPT_FCADDR
)
1080 nport
->port_id
= opts
->fcaddr
;
1081 goto out_free_newnport
;
1085 list_add_tail(&newnport
->nport_list
, &fcloop_nports
);
1087 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1093 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1102 fcloop_create_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1103 const char *buf
, size_t count
)
1105 struct nvme_fc_remote_port
*remoteport
;
1106 struct fcloop_nport
*nport
;
1107 struct fcloop_rport
*rport
;
1108 struct nvme_fc_port_info pinfo
;
1111 nport
= fcloop_alloc_nport(buf
, count
, true);
1115 memset(&pinfo
, 0, sizeof(pinfo
));
1116 pinfo
.node_name
= nport
->node_name
;
1117 pinfo
.port_name
= nport
->port_name
;
1118 pinfo
.port_role
= nport
->port_role
;
1119 pinfo
.port_id
= nport
->port_id
;
1121 ret
= nvme_fc_register_remoteport(nport
->lport
->localport
,
1122 &pinfo
, &remoteport
);
1123 if (ret
|| !remoteport
) {
1124 fcloop_nport_put(nport
);
1129 rport
= remoteport
->private;
1130 rport
->remoteport
= remoteport
;
1131 rport
->targetport
= (nport
->tport
) ? nport
->tport
->targetport
: NULL
;
1133 nport
->tport
->remoteport
= remoteport
;
1134 nport
->tport
->lport
= nport
->lport
;
1136 rport
->nport
= nport
;
1137 rport
->lport
= nport
->lport
;
1138 nport
->rport
= rport
;
1144 static struct fcloop_rport
*
1145 __unlink_remote_port(struct fcloop_nport
*nport
)
1147 struct fcloop_rport
*rport
= nport
->rport
;
1149 if (rport
&& nport
->tport
)
1150 nport
->tport
->remoteport
= NULL
;
1151 nport
->rport
= NULL
;
1157 __remoteport_unreg(struct fcloop_nport
*nport
, struct fcloop_rport
*rport
)
1162 return nvme_fc_unregister_remoteport(rport
->remoteport
);
1166 fcloop_delete_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1167 const char *buf
, size_t count
)
1169 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1170 static struct fcloop_rport
*rport
;
1171 u64 nodename
, portname
;
1172 unsigned long flags
;
1175 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1179 spin_lock_irqsave(&fcloop_lock
, flags
);
1181 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1182 if (tmpport
->node_name
== nodename
&&
1183 tmpport
->port_name
== portname
&& tmpport
->rport
) {
1185 rport
= __unlink_remote_port(nport
);
1190 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1195 ret
= __remoteport_unreg(nport
, rport
);
1197 return ret
? ret
: count
;
1201 fcloop_create_target_port(struct device
*dev
, struct device_attribute
*attr
,
1202 const char *buf
, size_t count
)
1204 struct nvmet_fc_target_port
*targetport
;
1205 struct fcloop_nport
*nport
;
1206 struct fcloop_tport
*tport
;
1207 struct nvmet_fc_port_info tinfo
;
1210 nport
= fcloop_alloc_nport(buf
, count
, false);
1214 tinfo
.node_name
= nport
->node_name
;
1215 tinfo
.port_name
= nport
->port_name
;
1216 tinfo
.port_id
= nport
->port_id
;
1218 ret
= nvmet_fc_register_targetport(&tinfo
, &tgttemplate
, NULL
,
1221 fcloop_nport_put(nport
);
1226 tport
= targetport
->private;
1227 tport
->targetport
= targetport
;
1228 tport
->remoteport
= (nport
->rport
) ? nport
->rport
->remoteport
: NULL
;
1230 nport
->rport
->targetport
= targetport
;
1231 tport
->nport
= nport
;
1232 tport
->lport
= nport
->lport
;
1233 nport
->tport
= tport
;
1239 static struct fcloop_tport
*
1240 __unlink_target_port(struct fcloop_nport
*nport
)
1242 struct fcloop_tport
*tport
= nport
->tport
;
1244 if (tport
&& nport
->rport
)
1245 nport
->rport
->targetport
= NULL
;
1246 nport
->tport
= NULL
;
1252 __targetport_unreg(struct fcloop_nport
*nport
, struct fcloop_tport
*tport
)
1257 return nvmet_fc_unregister_targetport(tport
->targetport
);
1261 fcloop_delete_target_port(struct device
*dev
, struct device_attribute
*attr
,
1262 const char *buf
, size_t count
)
1264 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1265 struct fcloop_tport
*tport
= NULL
;
1266 u64 nodename
, portname
;
1267 unsigned long flags
;
1270 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1274 spin_lock_irqsave(&fcloop_lock
, flags
);
1276 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1277 if (tmpport
->node_name
== nodename
&&
1278 tmpport
->port_name
== portname
&& tmpport
->tport
) {
1280 tport
= __unlink_target_port(nport
);
1285 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1290 ret
= __targetport_unreg(nport
, tport
);
1292 return ret
? ret
: count
;
1296 static DEVICE_ATTR(add_local_port
, 0200, NULL
, fcloop_create_local_port
);
1297 static DEVICE_ATTR(del_local_port
, 0200, NULL
, fcloop_delete_local_port
);
1298 static DEVICE_ATTR(add_remote_port
, 0200, NULL
, fcloop_create_remote_port
);
1299 static DEVICE_ATTR(del_remote_port
, 0200, NULL
, fcloop_delete_remote_port
);
1300 static DEVICE_ATTR(add_target_port
, 0200, NULL
, fcloop_create_target_port
);
1301 static DEVICE_ATTR(del_target_port
, 0200, NULL
, fcloop_delete_target_port
);
1303 static struct attribute
*fcloop_dev_attrs
[] = {
1304 &dev_attr_add_local_port
.attr
,
1305 &dev_attr_del_local_port
.attr
,
1306 &dev_attr_add_remote_port
.attr
,
1307 &dev_attr_del_remote_port
.attr
,
1308 &dev_attr_add_target_port
.attr
,
1309 &dev_attr_del_target_port
.attr
,
1313 static struct attribute_group fclopp_dev_attrs_group
= {
1314 .attrs
= fcloop_dev_attrs
,
1317 static const struct attribute_group
*fcloop_dev_attr_groups
[] = {
1318 &fclopp_dev_attrs_group
,
1322 static struct class *fcloop_class
;
1323 static struct device
*fcloop_device
;
1326 static int __init
fcloop_init(void)
1330 fcloop_class
= class_create(THIS_MODULE
, "fcloop");
1331 if (IS_ERR(fcloop_class
)) {
1332 pr_err("couldn't register class fcloop\n");
1333 ret
= PTR_ERR(fcloop_class
);
1337 fcloop_device
= device_create_with_groups(
1338 fcloop_class
, NULL
, MKDEV(0, 0), NULL
,
1339 fcloop_dev_attr_groups
, "ctl");
1340 if (IS_ERR(fcloop_device
)) {
1341 pr_err("couldn't create ctl device!\n");
1342 ret
= PTR_ERR(fcloop_device
);
1343 goto out_destroy_class
;
1346 get_device(fcloop_device
);
1351 class_destroy(fcloop_class
);
1355 static void __exit
fcloop_exit(void)
1357 struct fcloop_lport
*lport
;
1358 struct fcloop_nport
*nport
;
1359 struct fcloop_tport
*tport
;
1360 struct fcloop_rport
*rport
;
1361 unsigned long flags
;
1364 spin_lock_irqsave(&fcloop_lock
, flags
);
1367 nport
= list_first_entry_or_null(&fcloop_nports
,
1368 typeof(*nport
), nport_list
);
1372 tport
= __unlink_target_port(nport
);
1373 rport
= __unlink_remote_port(nport
);
1375 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1377 ret
= __targetport_unreg(nport
, tport
);
1379 pr_warn("%s: Failed deleting target port\n", __func__
);
1381 ret
= __remoteport_unreg(nport
, rport
);
1383 pr_warn("%s: Failed deleting remote port\n", __func__
);
1385 spin_lock_irqsave(&fcloop_lock
, flags
);
1389 lport
= list_first_entry_or_null(&fcloop_lports
,
1390 typeof(*lport
), lport_list
);
1394 __unlink_local_port(lport
);
1396 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1398 ret
= __wait_localport_unreg(lport
);
1400 pr_warn("%s: Failed deleting local port\n", __func__
);
1402 spin_lock_irqsave(&fcloop_lock
, flags
);
1405 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1407 put_device(fcloop_device
);
1409 device_destroy(fcloop_class
, MKDEV(0, 0));
1410 class_destroy(fcloop_class
);
1413 module_init(fcloop_init
);
1414 module_exit(fcloop_exit
);
1416 MODULE_LICENSE("GPL v2");