2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
29 NVMF_OPT_WWNN
= 1 << 0,
30 NVMF_OPT_WWPN
= 1 << 1,
31 NVMF_OPT_ROLES
= 1 << 2,
32 NVMF_OPT_FCADDR
= 1 << 3,
33 NVMF_OPT_LPWWNN
= 1 << 4,
34 NVMF_OPT_LPWWPN
= 1 << 5,
37 struct fcloop_ctrl_options
{
47 static const match_table_t opt_tokens
= {
48 { NVMF_OPT_WWNN
, "wwnn=%s" },
49 { NVMF_OPT_WWPN
, "wwpn=%s" },
50 { NVMF_OPT_ROLES
, "roles=%d" },
51 { NVMF_OPT_FCADDR
, "fcaddr=%x" },
52 { NVMF_OPT_LPWWNN
, "lpwwnn=%s" },
53 { NVMF_OPT_LPWWPN
, "lpwwpn=%s" },
54 { NVMF_OPT_ERR
, NULL
}
58 fcloop_parse_options(struct fcloop_ctrl_options
*opts
,
61 substring_t args
[MAX_OPT_ARGS
];
62 char *options
, *o
, *p
;
66 options
= o
= kstrdup(buf
, GFP_KERNEL
);
70 while ((p
= strsep(&o
, ",\n")) != NULL
) {
74 token
= match_token(p
, opt_tokens
, args
);
78 if (match_u64(args
, &token64
)) {
80 goto out_free_options
;
85 if (match_u64(args
, &token64
)) {
87 goto out_free_options
;
92 if (match_int(args
, &token
)) {
94 goto out_free_options
;
99 if (match_hex(args
, &token
)) {
101 goto out_free_options
;
103 opts
->fcaddr
= token
;
105 case NVMF_OPT_LPWWNN
:
106 if (match_u64(args
, &token64
)) {
108 goto out_free_options
;
110 opts
->lpwwnn
= token64
;
112 case NVMF_OPT_LPWWPN
:
113 if (match_u64(args
, &token64
)) {
115 goto out_free_options
;
117 opts
->lpwwpn
= token64
;
120 pr_warn("unknown parameter or missing value '%s'\n", p
);
122 goto out_free_options
;
133 fcloop_parse_nm_options(struct device
*dev
, u64
*nname
, u64
*pname
,
136 substring_t args
[MAX_OPT_ARGS
];
137 char *options
, *o
, *p
;
144 options
= o
= kstrdup(buf
, GFP_KERNEL
);
148 while ((p
= strsep(&o
, ",\n")) != NULL
) {
152 token
= match_token(p
, opt_tokens
, args
);
155 if (match_u64(args
, &token64
)) {
157 goto out_free_options
;
162 if (match_u64(args
, &token64
)) {
164 goto out_free_options
;
169 pr_warn("unknown parameter or missing value '%s'\n", p
);
171 goto out_free_options
;
189 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
191 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
192 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
194 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
197 static DEFINE_SPINLOCK(fcloop_lock
);
198 static LIST_HEAD(fcloop_lports
);
199 static LIST_HEAD(fcloop_nports
);
201 struct fcloop_lport
{
202 struct nvme_fc_local_port
*localport
;
203 struct list_head lport_list
;
204 struct completion unreg_done
;
207 struct fcloop_lport_priv
{
208 struct fcloop_lport
*lport
;
211 struct fcloop_rport
{
212 struct nvme_fc_remote_port
*remoteport
;
213 struct nvmet_fc_target_port
*targetport
;
214 struct fcloop_nport
*nport
;
215 struct fcloop_lport
*lport
;
218 struct fcloop_tport
{
219 struct nvmet_fc_target_port
*targetport
;
220 struct nvme_fc_remote_port
*remoteport
;
221 struct fcloop_nport
*nport
;
222 struct fcloop_lport
*lport
;
225 struct fcloop_nport
{
226 struct fcloop_rport
*rport
;
227 struct fcloop_tport
*tport
;
228 struct fcloop_lport
*lport
;
229 struct list_head nport_list
;
237 struct fcloop_lsreq
{
238 struct fcloop_tport
*tport
;
239 struct nvmefc_ls_req
*lsreq
;
240 struct work_struct work
;
241 struct nvmefc_tgt_ls_req tgt_ls_req
;
249 INI_IO_COMPLETED
= 3,
252 struct fcloop_fcpreq
{
253 struct fcloop_tport
*tport
;
254 struct nvmefc_fcp_req
*fcpreq
;
261 struct work_struct fcp_rcv_work
;
262 struct work_struct abort_rcv_work
;
263 struct work_struct tio_done_work
;
264 struct nvmefc_tgt_fcp_req tgt_fcp_req
;
267 struct fcloop_ini_fcpreq
{
268 struct nvmefc_fcp_req
*fcpreq
;
269 struct fcloop_fcpreq
*tfcp_req
;
273 static inline struct fcloop_lsreq
*
274 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req
*tgt_lsreq
)
276 return container_of(tgt_lsreq
, struct fcloop_lsreq
, tgt_ls_req
);
279 static inline struct fcloop_fcpreq
*
280 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
282 return container_of(tgt_fcpreq
, struct fcloop_fcpreq
, tgt_fcp_req
);
287 fcloop_create_queue(struct nvme_fc_local_port
*localport
,
288 unsigned int qidx
, u16 qsize
,
296 fcloop_delete_queue(struct nvme_fc_local_port
*localport
,
297 unsigned int idx
, void *handle
)
303 * Transmit of LS RSP done (e.g. buffers all set). call back up
304 * initiator "done" flows.
307 fcloop_tgt_lsrqst_done_work(struct work_struct
*work
)
309 struct fcloop_lsreq
*tls_req
=
310 container_of(work
, struct fcloop_lsreq
, work
);
311 struct fcloop_tport
*tport
= tls_req
->tport
;
312 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
314 if (!tport
|| tport
->remoteport
)
315 lsreq
->done(lsreq
, tls_req
->status
);
319 fcloop_ls_req(struct nvme_fc_local_port
*localport
,
320 struct nvme_fc_remote_port
*remoteport
,
321 struct nvmefc_ls_req
*lsreq
)
323 struct fcloop_lsreq
*tls_req
= lsreq
->private;
324 struct fcloop_rport
*rport
= remoteport
->private;
327 tls_req
->lsreq
= lsreq
;
328 INIT_WORK(&tls_req
->work
, fcloop_tgt_lsrqst_done_work
);
330 if (!rport
->targetport
) {
331 tls_req
->status
= -ECONNREFUSED
;
332 tls_req
->tport
= NULL
;
333 schedule_work(&tls_req
->work
);
338 tls_req
->tport
= rport
->targetport
->private;
339 ret
= nvmet_fc_rcv_ls_req(rport
->targetport
, &tls_req
->tgt_ls_req
,
340 lsreq
->rqstaddr
, lsreq
->rqstlen
);
346 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port
*tport
,
347 struct nvmefc_tgt_ls_req
*tgt_lsreq
)
349 struct fcloop_lsreq
*tls_req
= tgt_ls_req_to_lsreq(tgt_lsreq
);
350 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
352 memcpy(lsreq
->rspaddr
, tgt_lsreq
->rspbuf
,
353 ((lsreq
->rsplen
< tgt_lsreq
->rsplen
) ?
354 lsreq
->rsplen
: tgt_lsreq
->rsplen
));
355 tgt_lsreq
->done(tgt_lsreq
);
357 schedule_work(&tls_req
->work
);
363 fcloop_tfcp_req_free(struct kref
*ref
)
365 struct fcloop_fcpreq
*tfcp_req
=
366 container_of(ref
, struct fcloop_fcpreq
, ref
);
372 fcloop_tfcp_req_put(struct fcloop_fcpreq
*tfcp_req
)
374 kref_put(&tfcp_req
->ref
, fcloop_tfcp_req_free
);
378 fcloop_tfcp_req_get(struct fcloop_fcpreq
*tfcp_req
)
380 return kref_get_unless_zero(&tfcp_req
->ref
);
384 fcloop_call_host_done(struct nvmefc_fcp_req
*fcpreq
,
385 struct fcloop_fcpreq
*tfcp_req
, int status
)
387 struct fcloop_ini_fcpreq
*inireq
= NULL
;
390 inireq
= fcpreq
->private;
391 spin_lock(&inireq
->inilock
);
392 inireq
->tfcp_req
= NULL
;
393 spin_unlock(&inireq
->inilock
);
395 fcpreq
->status
= status
;
396 fcpreq
->done(fcpreq
);
399 /* release original io reference on tgt struct */
400 fcloop_tfcp_req_put(tfcp_req
);
404 fcloop_fcp_recv_work(struct work_struct
*work
)
406 struct fcloop_fcpreq
*tfcp_req
=
407 container_of(work
, struct fcloop_fcpreq
, fcp_rcv_work
);
408 struct nvmefc_fcp_req
*fcpreq
= tfcp_req
->fcpreq
;
410 bool aborted
= false;
412 spin_lock(&tfcp_req
->reqlock
);
413 switch (tfcp_req
->inistate
) {
415 tfcp_req
->inistate
= INI_IO_ACTIVE
;
421 spin_unlock(&tfcp_req
->reqlock
);
425 spin_unlock(&tfcp_req
->reqlock
);
427 if (unlikely(aborted
))
430 ret
= nvmet_fc_rcv_fcp_req(tfcp_req
->tport
->targetport
,
431 &tfcp_req
->tgt_fcp_req
,
432 fcpreq
->cmdaddr
, fcpreq
->cmdlen
);
434 fcloop_call_host_done(fcpreq
, tfcp_req
, ret
);
440 fcloop_fcp_abort_recv_work(struct work_struct
*work
)
442 struct fcloop_fcpreq
*tfcp_req
=
443 container_of(work
, struct fcloop_fcpreq
, abort_rcv_work
);
444 struct nvmefc_fcp_req
*fcpreq
;
445 bool completed
= false;
447 spin_lock(&tfcp_req
->reqlock
);
448 fcpreq
= tfcp_req
->fcpreq
;
449 switch (tfcp_req
->inistate
) {
452 case INI_IO_COMPLETED
:
456 spin_unlock(&tfcp_req
->reqlock
);
460 spin_unlock(&tfcp_req
->reqlock
);
462 if (unlikely(completed
)) {
463 /* remove reference taken in original abort downcall */
464 fcloop_tfcp_req_put(tfcp_req
);
468 if (tfcp_req
->tport
->targetport
)
469 nvmet_fc_rcv_fcp_abort(tfcp_req
->tport
->targetport
,
470 &tfcp_req
->tgt_fcp_req
);
472 spin_lock(&tfcp_req
->reqlock
);
473 tfcp_req
->fcpreq
= NULL
;
474 spin_unlock(&tfcp_req
->reqlock
);
476 fcloop_call_host_done(fcpreq
, tfcp_req
, -ECANCELED
);
477 /* call_host_done releases reference for abort downcall */
481 * FCP IO operation done by target completion.
482 * call back up initiator "done" flows.
485 fcloop_tgt_fcprqst_done_work(struct work_struct
*work
)
487 struct fcloop_fcpreq
*tfcp_req
=
488 container_of(work
, struct fcloop_fcpreq
, tio_done_work
);
489 struct nvmefc_fcp_req
*fcpreq
;
491 spin_lock(&tfcp_req
->reqlock
);
492 fcpreq
= tfcp_req
->fcpreq
;
493 tfcp_req
->inistate
= INI_IO_COMPLETED
;
494 spin_unlock(&tfcp_req
->reqlock
);
496 fcloop_call_host_done(fcpreq
, tfcp_req
, tfcp_req
->status
);
501 fcloop_fcp_req(struct nvme_fc_local_port
*localport
,
502 struct nvme_fc_remote_port
*remoteport
,
503 void *hw_queue_handle
,
504 struct nvmefc_fcp_req
*fcpreq
)
506 struct fcloop_rport
*rport
= remoteport
->private;
507 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
508 struct fcloop_fcpreq
*tfcp_req
;
510 if (!rport
->targetport
)
511 return -ECONNREFUSED
;
513 tfcp_req
= kzalloc(sizeof(*tfcp_req
), GFP_KERNEL
);
517 inireq
->fcpreq
= fcpreq
;
518 inireq
->tfcp_req
= tfcp_req
;
519 spin_lock_init(&inireq
->inilock
);
521 tfcp_req
->fcpreq
= fcpreq
;
522 tfcp_req
->tport
= rport
->targetport
->private;
523 tfcp_req
->inistate
= INI_IO_START
;
524 spin_lock_init(&tfcp_req
->reqlock
);
525 INIT_WORK(&tfcp_req
->fcp_rcv_work
, fcloop_fcp_recv_work
);
526 INIT_WORK(&tfcp_req
->abort_rcv_work
, fcloop_fcp_abort_recv_work
);
527 INIT_WORK(&tfcp_req
->tio_done_work
, fcloop_tgt_fcprqst_done_work
);
528 kref_init(&tfcp_req
->ref
);
530 schedule_work(&tfcp_req
->fcp_rcv_work
);
536 fcloop_fcp_copy_data(u8 op
, struct scatterlist
*data_sg
,
537 struct scatterlist
*io_sg
, u32 offset
, u32 length
)
540 u32 data_len
, io_len
, tlen
;
542 io_p
= sg_virt(io_sg
);
543 io_len
= io_sg
->length
;
546 tlen
= min_t(u32
, offset
, io_len
);
550 io_sg
= sg_next(io_sg
);
551 io_p
= sg_virt(io_sg
);
552 io_len
= io_sg
->length
;
557 data_p
= sg_virt(data_sg
);
558 data_len
= data_sg
->length
;
561 tlen
= min_t(u32
, io_len
, data_len
);
562 tlen
= min_t(u32
, tlen
, length
);
564 if (op
== NVMET_FCOP_WRITEDATA
)
565 memcpy(data_p
, io_p
, tlen
);
567 memcpy(io_p
, data_p
, tlen
);
572 if ((!io_len
) && (length
)) {
573 io_sg
= sg_next(io_sg
);
574 io_p
= sg_virt(io_sg
);
575 io_len
= io_sg
->length
;
580 if ((!data_len
) && (length
)) {
581 data_sg
= sg_next(data_sg
);
582 data_p
= sg_virt(data_sg
);
583 data_len
= data_sg
->length
;
590 fcloop_fcp_op(struct nvmet_fc_target_port
*tgtport
,
591 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
593 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
594 struct nvmefc_fcp_req
*fcpreq
;
595 u32 rsplen
= 0, xfrlen
= 0;
596 int fcp_err
= 0, active
, aborted
;
597 u8 op
= tgt_fcpreq
->op
;
599 spin_lock(&tfcp_req
->reqlock
);
600 fcpreq
= tfcp_req
->fcpreq
;
601 active
= tfcp_req
->active
;
602 aborted
= tfcp_req
->aborted
;
603 tfcp_req
->active
= true;
604 spin_unlock(&tfcp_req
->reqlock
);
606 if (unlikely(active
))
607 /* illegal - call while i/o active */
610 if (unlikely(aborted
)) {
611 /* target transport has aborted i/o prior */
612 spin_lock(&tfcp_req
->reqlock
);
613 tfcp_req
->active
= false;
614 spin_unlock(&tfcp_req
->reqlock
);
615 tgt_fcpreq
->transferred_length
= 0;
616 tgt_fcpreq
->fcp_error
= -ECANCELED
;
617 tgt_fcpreq
->done(tgt_fcpreq
);
622 * if fcpreq is NULL, the I/O has been aborted (from
623 * initiator side). For the target side, act as if all is well
624 * but don't actually move data.
628 case NVMET_FCOP_WRITEDATA
:
629 xfrlen
= tgt_fcpreq
->transfer_length
;
631 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
632 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
634 fcpreq
->transferred_length
+= xfrlen
;
638 case NVMET_FCOP_READDATA
:
639 case NVMET_FCOP_READDATA_RSP
:
640 xfrlen
= tgt_fcpreq
->transfer_length
;
642 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
643 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
645 fcpreq
->transferred_length
+= xfrlen
;
647 if (op
== NVMET_FCOP_READDATA
)
650 /* Fall-Thru to RSP handling */
655 rsplen
= ((fcpreq
->rsplen
< tgt_fcpreq
->rsplen
) ?
656 fcpreq
->rsplen
: tgt_fcpreq
->rsplen
);
657 memcpy(fcpreq
->rspaddr
, tgt_fcpreq
->rspaddr
, rsplen
);
658 if (rsplen
< tgt_fcpreq
->rsplen
)
660 fcpreq
->rcv_rsplen
= rsplen
;
663 tfcp_req
->status
= 0;
671 spin_lock(&tfcp_req
->reqlock
);
672 tfcp_req
->active
= false;
673 spin_unlock(&tfcp_req
->reqlock
);
675 tgt_fcpreq
->transferred_length
= xfrlen
;
676 tgt_fcpreq
->fcp_error
= fcp_err
;
677 tgt_fcpreq
->done(tgt_fcpreq
);
683 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
684 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
686 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
689 * mark aborted only in case there were 2 threads in transport
690 * (one doing io, other doing abort) and only kills ops posted
691 * after the abort request
693 spin_lock(&tfcp_req
->reqlock
);
694 tfcp_req
->aborted
= true;
695 spin_unlock(&tfcp_req
->reqlock
);
697 tfcp_req
->status
= NVME_SC_INTERNAL
;
700 * nothing more to do. If io wasn't active, the transport should
701 * immediately call the req_release. If it was active, the op
702 * will complete, and the lldd should call req_release.
707 fcloop_fcp_req_release(struct nvmet_fc_target_port
*tgtport
,
708 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
710 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
712 schedule_work(&tfcp_req
->tio_done_work
);
716 fcloop_ls_abort(struct nvme_fc_local_port
*localport
,
717 struct nvme_fc_remote_port
*remoteport
,
718 struct nvmefc_ls_req
*lsreq
)
723 fcloop_fcp_abort(struct nvme_fc_local_port
*localport
,
724 struct nvme_fc_remote_port
*remoteport
,
725 void *hw_queue_handle
,
726 struct nvmefc_fcp_req
*fcpreq
)
728 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
729 struct fcloop_fcpreq
*tfcp_req
;
732 spin_lock(&inireq
->inilock
);
733 tfcp_req
= inireq
->tfcp_req
;
735 fcloop_tfcp_req_get(tfcp_req
);
736 spin_unlock(&inireq
->inilock
);
739 /* abort has already been called */
742 /* break initiator/target relationship for io */
743 spin_lock(&tfcp_req
->reqlock
);
744 switch (tfcp_req
->inistate
) {
747 tfcp_req
->inistate
= INI_IO_ABORTED
;
749 case INI_IO_COMPLETED
:
753 spin_unlock(&tfcp_req
->reqlock
);
757 spin_unlock(&tfcp_req
->reqlock
);
760 /* leave the reference while the work item is scheduled */
761 WARN_ON(!schedule_work(&tfcp_req
->abort_rcv_work
));
764 * as the io has already had the done callback made,
765 * nothing more to do. So release the reference taken above
767 fcloop_tfcp_req_put(tfcp_req
);
772 fcloop_nport_free(struct kref
*ref
)
774 struct fcloop_nport
*nport
=
775 container_of(ref
, struct fcloop_nport
, ref
);
778 spin_lock_irqsave(&fcloop_lock
, flags
);
779 list_del(&nport
->nport_list
);
780 spin_unlock_irqrestore(&fcloop_lock
, flags
);
786 fcloop_nport_put(struct fcloop_nport
*nport
)
788 kref_put(&nport
->ref
, fcloop_nport_free
);
792 fcloop_nport_get(struct fcloop_nport
*nport
)
794 return kref_get_unless_zero(&nport
->ref
);
798 fcloop_localport_delete(struct nvme_fc_local_port
*localport
)
800 struct fcloop_lport_priv
*lport_priv
= localport
->private;
801 struct fcloop_lport
*lport
= lport_priv
->lport
;
803 /* release any threads waiting for the unreg to complete */
804 complete(&lport
->unreg_done
);
808 fcloop_remoteport_delete(struct nvme_fc_remote_port
*remoteport
)
810 struct fcloop_rport
*rport
= remoteport
->private;
812 fcloop_nport_put(rport
->nport
);
816 fcloop_targetport_delete(struct nvmet_fc_target_port
*targetport
)
818 struct fcloop_tport
*tport
= targetport
->private;
820 fcloop_nport_put(tport
->nport
);
823 #define FCLOOP_HW_QUEUES 4
824 #define FCLOOP_SGL_SEGS 256
825 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
827 static struct nvme_fc_port_template fctemplate
= {
828 .localport_delete
= fcloop_localport_delete
,
829 .remoteport_delete
= fcloop_remoteport_delete
,
830 .create_queue
= fcloop_create_queue
,
831 .delete_queue
= fcloop_delete_queue
,
832 .ls_req
= fcloop_ls_req
,
833 .fcp_io
= fcloop_fcp_req
,
834 .ls_abort
= fcloop_ls_abort
,
835 .fcp_abort
= fcloop_fcp_abort
,
836 .max_hw_queues
= FCLOOP_HW_QUEUES
,
837 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
838 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
839 .dma_boundary
= FCLOOP_DMABOUND_4G
,
840 /* sizes of additional private data for data structures */
841 .local_priv_sz
= sizeof(struct fcloop_lport_priv
),
842 .remote_priv_sz
= sizeof(struct fcloop_rport
),
843 .lsrqst_priv_sz
= sizeof(struct fcloop_lsreq
),
844 .fcprqst_priv_sz
= sizeof(struct fcloop_ini_fcpreq
),
847 static struct nvmet_fc_target_template tgttemplate
= {
848 .targetport_delete
= fcloop_targetport_delete
,
849 .xmt_ls_rsp
= fcloop_xmt_ls_rsp
,
850 .fcp_op
= fcloop_fcp_op
,
851 .fcp_abort
= fcloop_tgt_fcp_abort
,
852 .fcp_req_release
= fcloop_fcp_req_release
,
853 .max_hw_queues
= FCLOOP_HW_QUEUES
,
854 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
855 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
856 .dma_boundary
= FCLOOP_DMABOUND_4G
,
857 /* optional features */
858 .target_features
= 0,
859 /* sizes of additional private data for data structures */
860 .target_priv_sz
= sizeof(struct fcloop_tport
),
864 fcloop_create_local_port(struct device
*dev
, struct device_attribute
*attr
,
865 const char *buf
, size_t count
)
867 struct nvme_fc_port_info pinfo
;
868 struct fcloop_ctrl_options
*opts
;
869 struct nvme_fc_local_port
*localport
;
870 struct fcloop_lport
*lport
;
871 struct fcloop_lport_priv
*lport_priv
;
875 lport
= kzalloc(sizeof(*lport
), GFP_KERNEL
);
879 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
883 ret
= fcloop_parse_options(opts
, buf
);
887 /* everything there ? */
888 if ((opts
->mask
& LPORT_OPTS
) != LPORT_OPTS
) {
893 memset(&pinfo
, 0, sizeof(pinfo
));
894 pinfo
.node_name
= opts
->wwnn
;
895 pinfo
.port_name
= opts
->wwpn
;
896 pinfo
.port_role
= opts
->roles
;
897 pinfo
.port_id
= opts
->fcaddr
;
899 ret
= nvme_fc_register_localport(&pinfo
, &fctemplate
, NULL
, &localport
);
902 lport_priv
= localport
->private;
903 lport_priv
->lport
= lport
;
905 lport
->localport
= localport
;
906 INIT_LIST_HEAD(&lport
->lport_list
);
908 spin_lock_irqsave(&fcloop_lock
, flags
);
909 list_add_tail(&lport
->lport_list
, &fcloop_lports
);
910 spin_unlock_irqrestore(&fcloop_lock
, flags
);
916 /* free only if we're going to fail */
920 return ret
? ret
: count
;
925 __unlink_local_port(struct fcloop_lport
*lport
)
927 list_del(&lport
->lport_list
);
931 __wait_localport_unreg(struct fcloop_lport
*lport
)
935 init_completion(&lport
->unreg_done
);
937 ret
= nvme_fc_unregister_localport(lport
->localport
);
939 wait_for_completion(&lport
->unreg_done
);
948 fcloop_delete_local_port(struct device
*dev
, struct device_attribute
*attr
,
949 const char *buf
, size_t count
)
951 struct fcloop_lport
*tlport
, *lport
= NULL
;
952 u64 nodename
, portname
;
956 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
960 spin_lock_irqsave(&fcloop_lock
, flags
);
962 list_for_each_entry(tlport
, &fcloop_lports
, lport_list
) {
963 if (tlport
->localport
->node_name
== nodename
&&
964 tlport
->localport
->port_name
== portname
) {
966 __unlink_local_port(lport
);
970 spin_unlock_irqrestore(&fcloop_lock
, flags
);
975 ret
= __wait_localport_unreg(lport
);
977 return ret
? ret
: count
;
980 static struct fcloop_nport
*
981 fcloop_alloc_nport(const char *buf
, size_t count
, bool remoteport
)
983 struct fcloop_nport
*newnport
, *nport
= NULL
;
984 struct fcloop_lport
*tmplport
, *lport
= NULL
;
985 struct fcloop_ctrl_options
*opts
;
987 u32 opts_mask
= (remoteport
) ? RPORT_OPTS
: TGTPORT_OPTS
;
990 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
994 ret
= fcloop_parse_options(opts
, buf
);
998 /* everything there ? */
999 if ((opts
->mask
& opts_mask
) != opts_mask
) {
1004 newnport
= kzalloc(sizeof(*newnport
), GFP_KERNEL
);
1008 INIT_LIST_HEAD(&newnport
->nport_list
);
1009 newnport
->node_name
= opts
->wwnn
;
1010 newnport
->port_name
= opts
->wwpn
;
1011 if (opts
->mask
& NVMF_OPT_ROLES
)
1012 newnport
->port_role
= opts
->roles
;
1013 if (opts
->mask
& NVMF_OPT_FCADDR
)
1014 newnport
->port_id
= opts
->fcaddr
;
1015 kref_init(&newnport
->ref
);
1017 spin_lock_irqsave(&fcloop_lock
, flags
);
1019 list_for_each_entry(tmplport
, &fcloop_lports
, lport_list
) {
1020 if (tmplport
->localport
->node_name
== opts
->wwnn
&&
1021 tmplport
->localport
->port_name
== opts
->wwpn
)
1022 goto out_invalid_opts
;
1024 if (tmplport
->localport
->node_name
== opts
->lpwwnn
&&
1025 tmplport
->localport
->port_name
== opts
->lpwwpn
)
1031 goto out_invalid_opts
;
1032 newnport
->lport
= lport
;
1035 list_for_each_entry(nport
, &fcloop_nports
, nport_list
) {
1036 if (nport
->node_name
== opts
->wwnn
&&
1037 nport
->port_name
== opts
->wwpn
) {
1038 if ((remoteport
&& nport
->rport
) ||
1039 (!remoteport
&& nport
->tport
)) {
1041 goto out_invalid_opts
;
1044 fcloop_nport_get(nport
);
1046 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1049 nport
->lport
= lport
;
1050 if (opts
->mask
& NVMF_OPT_ROLES
)
1051 nport
->port_role
= opts
->roles
;
1052 if (opts
->mask
& NVMF_OPT_FCADDR
)
1053 nport
->port_id
= opts
->fcaddr
;
1054 goto out_free_newnport
;
1058 list_add_tail(&newnport
->nport_list
, &fcloop_nports
);
1060 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1066 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1075 fcloop_create_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1076 const char *buf
, size_t count
)
1078 struct nvme_fc_remote_port
*remoteport
;
1079 struct fcloop_nport
*nport
;
1080 struct fcloop_rport
*rport
;
1081 struct nvme_fc_port_info pinfo
;
1084 nport
= fcloop_alloc_nport(buf
, count
, true);
1088 memset(&pinfo
, 0, sizeof(pinfo
));
1089 pinfo
.node_name
= nport
->node_name
;
1090 pinfo
.port_name
= nport
->port_name
;
1091 pinfo
.port_role
= nport
->port_role
;
1092 pinfo
.port_id
= nport
->port_id
;
1094 ret
= nvme_fc_register_remoteport(nport
->lport
->localport
,
1095 &pinfo
, &remoteport
);
1096 if (ret
|| !remoteport
) {
1097 fcloop_nport_put(nport
);
1102 rport
= remoteport
->private;
1103 rport
->remoteport
= remoteport
;
1104 rport
->targetport
= (nport
->tport
) ? nport
->tport
->targetport
: NULL
;
1106 nport
->tport
->remoteport
= remoteport
;
1107 nport
->tport
->lport
= nport
->lport
;
1109 rport
->nport
= nport
;
1110 rport
->lport
= nport
->lport
;
1111 nport
->rport
= rport
;
1117 static struct fcloop_rport
*
1118 __unlink_remote_port(struct fcloop_nport
*nport
)
1120 struct fcloop_rport
*rport
= nport
->rport
;
1122 if (rport
&& nport
->tport
)
1123 nport
->tport
->remoteport
= NULL
;
1124 nport
->rport
= NULL
;
1130 __remoteport_unreg(struct fcloop_nport
*nport
, struct fcloop_rport
*rport
)
1135 return nvme_fc_unregister_remoteport(rport
->remoteport
);
1139 fcloop_delete_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1140 const char *buf
, size_t count
)
1142 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1143 static struct fcloop_rport
*rport
;
1144 u64 nodename
, portname
;
1145 unsigned long flags
;
1148 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1152 spin_lock_irqsave(&fcloop_lock
, flags
);
1154 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1155 if (tmpport
->node_name
== nodename
&&
1156 tmpport
->port_name
== portname
&& tmpport
->rport
) {
1158 rport
= __unlink_remote_port(nport
);
1163 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1168 ret
= __remoteport_unreg(nport
, rport
);
1170 return ret
? ret
: count
;
1174 fcloop_create_target_port(struct device
*dev
, struct device_attribute
*attr
,
1175 const char *buf
, size_t count
)
1177 struct nvmet_fc_target_port
*targetport
;
1178 struct fcloop_nport
*nport
;
1179 struct fcloop_tport
*tport
;
1180 struct nvmet_fc_port_info tinfo
;
1183 nport
= fcloop_alloc_nport(buf
, count
, false);
1187 tinfo
.node_name
= nport
->node_name
;
1188 tinfo
.port_name
= nport
->port_name
;
1189 tinfo
.port_id
= nport
->port_id
;
1191 ret
= nvmet_fc_register_targetport(&tinfo
, &tgttemplate
, NULL
,
1194 fcloop_nport_put(nport
);
1199 tport
= targetport
->private;
1200 tport
->targetport
= targetport
;
1201 tport
->remoteport
= (nport
->rport
) ? nport
->rport
->remoteport
: NULL
;
1203 nport
->rport
->targetport
= targetport
;
1204 tport
->nport
= nport
;
1205 tport
->lport
= nport
->lport
;
1206 nport
->tport
= tport
;
1212 static struct fcloop_tport
*
1213 __unlink_target_port(struct fcloop_nport
*nport
)
1215 struct fcloop_tport
*tport
= nport
->tport
;
1217 if (tport
&& nport
->rport
)
1218 nport
->rport
->targetport
= NULL
;
1219 nport
->tport
= NULL
;
1225 __targetport_unreg(struct fcloop_nport
*nport
, struct fcloop_tport
*tport
)
1230 return nvmet_fc_unregister_targetport(tport
->targetport
);
1234 fcloop_delete_target_port(struct device
*dev
, struct device_attribute
*attr
,
1235 const char *buf
, size_t count
)
1237 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1238 struct fcloop_tport
*tport
= NULL
;
1239 u64 nodename
, portname
;
1240 unsigned long flags
;
1243 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1247 spin_lock_irqsave(&fcloop_lock
, flags
);
1249 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1250 if (tmpport
->node_name
== nodename
&&
1251 tmpport
->port_name
== portname
&& tmpport
->tport
) {
1253 tport
= __unlink_target_port(nport
);
1258 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1263 ret
= __targetport_unreg(nport
, tport
);
1265 return ret
? ret
: count
;
1269 static DEVICE_ATTR(add_local_port
, 0200, NULL
, fcloop_create_local_port
);
1270 static DEVICE_ATTR(del_local_port
, 0200, NULL
, fcloop_delete_local_port
);
1271 static DEVICE_ATTR(add_remote_port
, 0200, NULL
, fcloop_create_remote_port
);
1272 static DEVICE_ATTR(del_remote_port
, 0200, NULL
, fcloop_delete_remote_port
);
1273 static DEVICE_ATTR(add_target_port
, 0200, NULL
, fcloop_create_target_port
);
1274 static DEVICE_ATTR(del_target_port
, 0200, NULL
, fcloop_delete_target_port
);
1276 static struct attribute
*fcloop_dev_attrs
[] = {
1277 &dev_attr_add_local_port
.attr
,
1278 &dev_attr_del_local_port
.attr
,
1279 &dev_attr_add_remote_port
.attr
,
1280 &dev_attr_del_remote_port
.attr
,
1281 &dev_attr_add_target_port
.attr
,
1282 &dev_attr_del_target_port
.attr
,
1286 static struct attribute_group fclopp_dev_attrs_group
= {
1287 .attrs
= fcloop_dev_attrs
,
1290 static const struct attribute_group
*fcloop_dev_attr_groups
[] = {
1291 &fclopp_dev_attrs_group
,
1295 static struct class *fcloop_class
;
1296 static struct device
*fcloop_device
;
1299 static int __init
fcloop_init(void)
1303 fcloop_class
= class_create(THIS_MODULE
, "fcloop");
1304 if (IS_ERR(fcloop_class
)) {
1305 pr_err("couldn't register class fcloop\n");
1306 ret
= PTR_ERR(fcloop_class
);
1310 fcloop_device
= device_create_with_groups(
1311 fcloop_class
, NULL
, MKDEV(0, 0), NULL
,
1312 fcloop_dev_attr_groups
, "ctl");
1313 if (IS_ERR(fcloop_device
)) {
1314 pr_err("couldn't create ctl device!\n");
1315 ret
= PTR_ERR(fcloop_device
);
1316 goto out_destroy_class
;
1319 get_device(fcloop_device
);
1324 class_destroy(fcloop_class
);
1328 static void __exit
fcloop_exit(void)
1330 struct fcloop_lport
*lport
;
1331 struct fcloop_nport
*nport
;
1332 struct fcloop_tport
*tport
;
1333 struct fcloop_rport
*rport
;
1334 unsigned long flags
;
1337 spin_lock_irqsave(&fcloop_lock
, flags
);
1340 nport
= list_first_entry_or_null(&fcloop_nports
,
1341 typeof(*nport
), nport_list
);
1345 tport
= __unlink_target_port(nport
);
1346 rport
= __unlink_remote_port(nport
);
1348 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1350 ret
= __targetport_unreg(nport
, tport
);
1352 pr_warn("%s: Failed deleting target port\n", __func__
);
1354 ret
= __remoteport_unreg(nport
, rport
);
1356 pr_warn("%s: Failed deleting remote port\n", __func__
);
1358 spin_lock_irqsave(&fcloop_lock
, flags
);
1362 lport
= list_first_entry_or_null(&fcloop_lports
,
1363 typeof(*lport
), lport_list
);
1367 __unlink_local_port(lport
);
1369 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1371 ret
= __wait_localport_unreg(lport
);
1373 pr_warn("%s: Failed deleting local port\n", __func__
);
1375 spin_lock_irqsave(&fcloop_lock
, flags
);
1378 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1380 put_device(fcloop_device
);
1382 device_destroy(fcloop_class
, MKDEV(0, 0));
1383 class_destroy(fcloop_class
);
1386 module_init(fcloop_init
);
1387 module_exit(fcloop_exit
);
1389 MODULE_LICENSE("GPL v2");