2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
29 NVMF_OPT_WWNN
= 1 << 0,
30 NVMF_OPT_WWPN
= 1 << 1,
31 NVMF_OPT_ROLES
= 1 << 2,
32 NVMF_OPT_FCADDR
= 1 << 3,
33 NVMF_OPT_LPWWNN
= 1 << 4,
34 NVMF_OPT_LPWWPN
= 1 << 5,
37 struct fcloop_ctrl_options
{
47 static const match_table_t opt_tokens
= {
48 { NVMF_OPT_WWNN
, "wwnn=%s" },
49 { NVMF_OPT_WWPN
, "wwpn=%s" },
50 { NVMF_OPT_ROLES
, "roles=%d" },
51 { NVMF_OPT_FCADDR
, "fcaddr=%x" },
52 { NVMF_OPT_LPWWNN
, "lpwwnn=%s" },
53 { NVMF_OPT_LPWWPN
, "lpwwpn=%s" },
54 { NVMF_OPT_ERR
, NULL
}
58 fcloop_parse_options(struct fcloop_ctrl_options
*opts
,
61 substring_t args
[MAX_OPT_ARGS
];
62 char *options
, *o
, *p
;
66 options
= o
= kstrdup(buf
, GFP_KERNEL
);
70 while ((p
= strsep(&o
, ",\n")) != NULL
) {
74 token
= match_token(p
, opt_tokens
, args
);
78 if (match_u64(args
, &token64
)) {
80 goto out_free_options
;
85 if (match_u64(args
, &token64
)) {
87 goto out_free_options
;
92 if (match_int(args
, &token
)) {
94 goto out_free_options
;
99 if (match_hex(args
, &token
)) {
101 goto out_free_options
;
103 opts
->fcaddr
= token
;
105 case NVMF_OPT_LPWWNN
:
106 if (match_u64(args
, &token64
)) {
108 goto out_free_options
;
110 opts
->lpwwnn
= token64
;
112 case NVMF_OPT_LPWWPN
:
113 if (match_u64(args
, &token64
)) {
115 goto out_free_options
;
117 opts
->lpwwpn
= token64
;
120 pr_warn("unknown parameter or missing value '%s'\n", p
);
122 goto out_free_options
;
133 fcloop_parse_nm_options(struct device
*dev
, u64
*nname
, u64
*pname
,
136 substring_t args
[MAX_OPT_ARGS
];
137 char *options
, *o
, *p
;
144 options
= o
= kstrdup(buf
, GFP_KERNEL
);
148 while ((p
= strsep(&o
, ",\n")) != NULL
) {
152 token
= match_token(p
, opt_tokens
, args
);
155 if (match_u64(args
, &token64
)) {
157 goto out_free_options
;
162 if (match_u64(args
, &token64
)) {
164 goto out_free_options
;
169 pr_warn("unknown parameter or missing value '%s'\n", p
);
171 goto out_free_options
;
189 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
191 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
192 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
194 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
197 static DEFINE_SPINLOCK(fcloop_lock
);
198 static LIST_HEAD(fcloop_lports
);
199 static LIST_HEAD(fcloop_nports
);
201 struct fcloop_lport
{
202 struct nvme_fc_local_port
*localport
;
203 struct list_head lport_list
;
204 struct completion unreg_done
;
207 struct fcloop_lport_priv
{
208 struct fcloop_lport
*lport
;
211 struct fcloop_rport
{
212 struct nvme_fc_remote_port
*remoteport
;
213 struct nvmet_fc_target_port
*targetport
;
214 struct fcloop_nport
*nport
;
215 struct fcloop_lport
*lport
;
218 struct fcloop_tport
{
219 struct nvmet_fc_target_port
*targetport
;
220 struct nvme_fc_remote_port
*remoteport
;
221 struct fcloop_nport
*nport
;
222 struct fcloop_lport
*lport
;
225 struct fcloop_nport
{
226 struct fcloop_rport
*rport
;
227 struct fcloop_tport
*tport
;
228 struct fcloop_lport
*lport
;
229 struct list_head nport_list
;
237 struct fcloop_lsreq
{
238 struct fcloop_tport
*tport
;
239 struct nvmefc_ls_req
*lsreq
;
240 struct work_struct work
;
241 struct nvmefc_tgt_ls_req tgt_ls_req
;
249 INI_IO_COMPLETED
= 3,
252 struct fcloop_fcpreq
{
253 struct fcloop_tport
*tport
;
254 struct nvmefc_fcp_req
*fcpreq
;
261 struct work_struct fcp_rcv_work
;
262 struct work_struct abort_rcv_work
;
263 struct work_struct tio_done_work
;
264 struct nvmefc_tgt_fcp_req tgt_fcp_req
;
267 struct fcloop_ini_fcpreq
{
268 struct nvmefc_fcp_req
*fcpreq
;
269 struct fcloop_fcpreq
*tfcp_req
;
273 static inline struct fcloop_lsreq
*
274 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req
*tgt_lsreq
)
276 return container_of(tgt_lsreq
, struct fcloop_lsreq
, tgt_ls_req
);
279 static inline struct fcloop_fcpreq
*
280 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
282 return container_of(tgt_fcpreq
, struct fcloop_fcpreq
, tgt_fcp_req
);
287 fcloop_create_queue(struct nvme_fc_local_port
*localport
,
288 unsigned int qidx
, u16 qsize
,
296 fcloop_delete_queue(struct nvme_fc_local_port
*localport
,
297 unsigned int idx
, void *handle
)
303 * Transmit of LS RSP done (e.g. buffers all set). call back up
304 * initiator "done" flows.
307 fcloop_tgt_lsrqst_done_work(struct work_struct
*work
)
309 struct fcloop_lsreq
*tls_req
=
310 container_of(work
, struct fcloop_lsreq
, work
);
311 struct fcloop_tport
*tport
= tls_req
->tport
;
312 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
314 if (tport
->remoteport
)
315 lsreq
->done(lsreq
, tls_req
->status
);
319 fcloop_ls_req(struct nvme_fc_local_port
*localport
,
320 struct nvme_fc_remote_port
*remoteport
,
321 struct nvmefc_ls_req
*lsreq
)
323 struct fcloop_lsreq
*tls_req
= lsreq
->private;
324 struct fcloop_rport
*rport
= remoteport
->private;
327 tls_req
->lsreq
= lsreq
;
328 INIT_WORK(&tls_req
->work
, fcloop_tgt_lsrqst_done_work
);
330 if (!rport
->targetport
) {
331 tls_req
->status
= -ECONNREFUSED
;
332 schedule_work(&tls_req
->work
);
337 tls_req
->tport
= rport
->targetport
->private;
338 ret
= nvmet_fc_rcv_ls_req(rport
->targetport
, &tls_req
->tgt_ls_req
,
339 lsreq
->rqstaddr
, lsreq
->rqstlen
);
345 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port
*tport
,
346 struct nvmefc_tgt_ls_req
*tgt_lsreq
)
348 struct fcloop_lsreq
*tls_req
= tgt_ls_req_to_lsreq(tgt_lsreq
);
349 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
351 memcpy(lsreq
->rspaddr
, tgt_lsreq
->rspbuf
,
352 ((lsreq
->rsplen
< tgt_lsreq
->rsplen
) ?
353 lsreq
->rsplen
: tgt_lsreq
->rsplen
));
354 tgt_lsreq
->done(tgt_lsreq
);
356 schedule_work(&tls_req
->work
);
362 fcloop_tfcp_req_free(struct kref
*ref
)
364 struct fcloop_fcpreq
*tfcp_req
=
365 container_of(ref
, struct fcloop_fcpreq
, ref
);
371 fcloop_tfcp_req_put(struct fcloop_fcpreq
*tfcp_req
)
373 kref_put(&tfcp_req
->ref
, fcloop_tfcp_req_free
);
377 fcloop_tfcp_req_get(struct fcloop_fcpreq
*tfcp_req
)
379 return kref_get_unless_zero(&tfcp_req
->ref
);
383 fcloop_call_host_done(struct nvmefc_fcp_req
*fcpreq
,
384 struct fcloop_fcpreq
*tfcp_req
, int status
)
386 struct fcloop_ini_fcpreq
*inireq
= NULL
;
389 inireq
= fcpreq
->private;
390 spin_lock(&inireq
->inilock
);
391 inireq
->tfcp_req
= NULL
;
392 spin_unlock(&inireq
->inilock
);
394 fcpreq
->status
= status
;
395 fcpreq
->done(fcpreq
);
398 /* release original io reference on tgt struct */
399 fcloop_tfcp_req_put(tfcp_req
);
403 fcloop_fcp_recv_work(struct work_struct
*work
)
405 struct fcloop_fcpreq
*tfcp_req
=
406 container_of(work
, struct fcloop_fcpreq
, fcp_rcv_work
);
407 struct nvmefc_fcp_req
*fcpreq
= tfcp_req
->fcpreq
;
409 bool aborted
= false;
411 spin_lock(&tfcp_req
->reqlock
);
412 switch (tfcp_req
->inistate
) {
414 tfcp_req
->inistate
= INI_IO_ACTIVE
;
420 spin_unlock(&tfcp_req
->reqlock
);
424 spin_unlock(&tfcp_req
->reqlock
);
426 if (unlikely(aborted
))
429 ret
= nvmet_fc_rcv_fcp_req(tfcp_req
->tport
->targetport
,
430 &tfcp_req
->tgt_fcp_req
,
431 fcpreq
->cmdaddr
, fcpreq
->cmdlen
);
433 fcloop_call_host_done(fcpreq
, tfcp_req
, ret
);
439 fcloop_fcp_abort_recv_work(struct work_struct
*work
)
441 struct fcloop_fcpreq
*tfcp_req
=
442 container_of(work
, struct fcloop_fcpreq
, abort_rcv_work
);
443 struct nvmefc_fcp_req
*fcpreq
;
444 bool completed
= false;
446 spin_lock(&tfcp_req
->reqlock
);
447 fcpreq
= tfcp_req
->fcpreq
;
448 switch (tfcp_req
->inistate
) {
451 case INI_IO_COMPLETED
:
455 spin_unlock(&tfcp_req
->reqlock
);
459 spin_unlock(&tfcp_req
->reqlock
);
461 if (unlikely(completed
)) {
462 /* remove reference taken in original abort downcall */
463 fcloop_tfcp_req_put(tfcp_req
);
467 if (tfcp_req
->tport
->targetport
)
468 nvmet_fc_rcv_fcp_abort(tfcp_req
->tport
->targetport
,
469 &tfcp_req
->tgt_fcp_req
);
471 spin_lock(&tfcp_req
->reqlock
);
472 tfcp_req
->fcpreq
= NULL
;
473 spin_unlock(&tfcp_req
->reqlock
);
475 fcloop_call_host_done(fcpreq
, tfcp_req
, -ECANCELED
);
476 /* call_host_done releases reference for abort downcall */
480 * FCP IO operation done by target completion.
481 * call back up initiator "done" flows.
484 fcloop_tgt_fcprqst_done_work(struct work_struct
*work
)
486 struct fcloop_fcpreq
*tfcp_req
=
487 container_of(work
, struct fcloop_fcpreq
, tio_done_work
);
488 struct nvmefc_fcp_req
*fcpreq
;
490 spin_lock(&tfcp_req
->reqlock
);
491 fcpreq
= tfcp_req
->fcpreq
;
492 tfcp_req
->inistate
= INI_IO_COMPLETED
;
493 spin_unlock(&tfcp_req
->reqlock
);
495 fcloop_call_host_done(fcpreq
, tfcp_req
, tfcp_req
->status
);
500 fcloop_fcp_req(struct nvme_fc_local_port
*localport
,
501 struct nvme_fc_remote_port
*remoteport
,
502 void *hw_queue_handle
,
503 struct nvmefc_fcp_req
*fcpreq
)
505 struct fcloop_rport
*rport
= remoteport
->private;
506 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
507 struct fcloop_fcpreq
*tfcp_req
;
509 if (!rport
->targetport
)
510 return -ECONNREFUSED
;
512 tfcp_req
= kzalloc(sizeof(*tfcp_req
), GFP_KERNEL
);
516 inireq
->fcpreq
= fcpreq
;
517 inireq
->tfcp_req
= tfcp_req
;
518 spin_lock_init(&inireq
->inilock
);
520 tfcp_req
->fcpreq
= fcpreq
;
521 tfcp_req
->tport
= rport
->targetport
->private;
522 tfcp_req
->inistate
= INI_IO_START
;
523 spin_lock_init(&tfcp_req
->reqlock
);
524 INIT_WORK(&tfcp_req
->fcp_rcv_work
, fcloop_fcp_recv_work
);
525 INIT_WORK(&tfcp_req
->abort_rcv_work
, fcloop_fcp_abort_recv_work
);
526 INIT_WORK(&tfcp_req
->tio_done_work
, fcloop_tgt_fcprqst_done_work
);
527 kref_init(&tfcp_req
->ref
);
529 schedule_work(&tfcp_req
->fcp_rcv_work
);
535 fcloop_fcp_copy_data(u8 op
, struct scatterlist
*data_sg
,
536 struct scatterlist
*io_sg
, u32 offset
, u32 length
)
539 u32 data_len
, io_len
, tlen
;
541 io_p
= sg_virt(io_sg
);
542 io_len
= io_sg
->length
;
545 tlen
= min_t(u32
, offset
, io_len
);
549 io_sg
= sg_next(io_sg
);
550 io_p
= sg_virt(io_sg
);
551 io_len
= io_sg
->length
;
556 data_p
= sg_virt(data_sg
);
557 data_len
= data_sg
->length
;
560 tlen
= min_t(u32
, io_len
, data_len
);
561 tlen
= min_t(u32
, tlen
, length
);
563 if (op
== NVMET_FCOP_WRITEDATA
)
564 memcpy(data_p
, io_p
, tlen
);
566 memcpy(io_p
, data_p
, tlen
);
571 if ((!io_len
) && (length
)) {
572 io_sg
= sg_next(io_sg
);
573 io_p
= sg_virt(io_sg
);
574 io_len
= io_sg
->length
;
579 if ((!data_len
) && (length
)) {
580 data_sg
= sg_next(data_sg
);
581 data_p
= sg_virt(data_sg
);
582 data_len
= data_sg
->length
;
589 fcloop_fcp_op(struct nvmet_fc_target_port
*tgtport
,
590 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
592 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
593 struct nvmefc_fcp_req
*fcpreq
;
594 u32 rsplen
= 0, xfrlen
= 0;
595 int fcp_err
= 0, active
, aborted
;
596 u8 op
= tgt_fcpreq
->op
;
598 spin_lock(&tfcp_req
->reqlock
);
599 fcpreq
= tfcp_req
->fcpreq
;
600 active
= tfcp_req
->active
;
601 aborted
= tfcp_req
->aborted
;
602 tfcp_req
->active
= true;
603 spin_unlock(&tfcp_req
->reqlock
);
605 if (unlikely(active
))
606 /* illegal - call while i/o active */
609 if (unlikely(aborted
)) {
610 /* target transport has aborted i/o prior */
611 spin_lock(&tfcp_req
->reqlock
);
612 tfcp_req
->active
= false;
613 spin_unlock(&tfcp_req
->reqlock
);
614 tgt_fcpreq
->transferred_length
= 0;
615 tgt_fcpreq
->fcp_error
= -ECANCELED
;
616 tgt_fcpreq
->done(tgt_fcpreq
);
621 * if fcpreq is NULL, the I/O has been aborted (from
622 * initiator side). For the target side, act as if all is well
623 * but don't actually move data.
627 case NVMET_FCOP_WRITEDATA
:
628 xfrlen
= tgt_fcpreq
->transfer_length
;
630 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
631 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
633 fcpreq
->transferred_length
+= xfrlen
;
637 case NVMET_FCOP_READDATA
:
638 case NVMET_FCOP_READDATA_RSP
:
639 xfrlen
= tgt_fcpreq
->transfer_length
;
641 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
642 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
644 fcpreq
->transferred_length
+= xfrlen
;
646 if (op
== NVMET_FCOP_READDATA
)
649 /* Fall-Thru to RSP handling */
653 rsplen
= ((fcpreq
->rsplen
< tgt_fcpreq
->rsplen
) ?
654 fcpreq
->rsplen
: tgt_fcpreq
->rsplen
);
655 memcpy(fcpreq
->rspaddr
, tgt_fcpreq
->rspaddr
, rsplen
);
656 if (rsplen
< tgt_fcpreq
->rsplen
)
658 fcpreq
->rcv_rsplen
= rsplen
;
661 tfcp_req
->status
= 0;
669 spin_lock(&tfcp_req
->reqlock
);
670 tfcp_req
->active
= false;
671 spin_unlock(&tfcp_req
->reqlock
);
673 tgt_fcpreq
->transferred_length
= xfrlen
;
674 tgt_fcpreq
->fcp_error
= fcp_err
;
675 tgt_fcpreq
->done(tgt_fcpreq
);
681 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
682 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
684 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
687 * mark aborted only in case there were 2 threads in transport
688 * (one doing io, other doing abort) and only kills ops posted
689 * after the abort request
691 spin_lock(&tfcp_req
->reqlock
);
692 tfcp_req
->aborted
= true;
693 spin_unlock(&tfcp_req
->reqlock
);
695 tfcp_req
->status
= NVME_SC_INTERNAL
;
698 * nothing more to do. If io wasn't active, the transport should
699 * immediately call the req_release. If it was active, the op
700 * will complete, and the lldd should call req_release.
705 fcloop_fcp_req_release(struct nvmet_fc_target_port
*tgtport
,
706 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
708 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
710 schedule_work(&tfcp_req
->tio_done_work
);
714 fcloop_ls_abort(struct nvme_fc_local_port
*localport
,
715 struct nvme_fc_remote_port
*remoteport
,
716 struct nvmefc_ls_req
*lsreq
)
721 fcloop_fcp_abort(struct nvme_fc_local_port
*localport
,
722 struct nvme_fc_remote_port
*remoteport
,
723 void *hw_queue_handle
,
724 struct nvmefc_fcp_req
*fcpreq
)
726 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
727 struct fcloop_fcpreq
*tfcp_req
;
730 spin_lock(&inireq
->inilock
);
731 tfcp_req
= inireq
->tfcp_req
;
733 fcloop_tfcp_req_get(tfcp_req
);
734 spin_unlock(&inireq
->inilock
);
737 /* abort has already been called */
740 /* break initiator/target relationship for io */
741 spin_lock(&tfcp_req
->reqlock
);
742 switch (tfcp_req
->inistate
) {
745 tfcp_req
->inistate
= INI_IO_ABORTED
;
747 case INI_IO_COMPLETED
:
751 spin_unlock(&tfcp_req
->reqlock
);
755 spin_unlock(&tfcp_req
->reqlock
);
758 /* leave the reference while the work item is scheduled */
759 WARN_ON(!schedule_work(&tfcp_req
->abort_rcv_work
));
762 * as the io has already had the done callback made,
763 * nothing more to do. So release the reference taken above
765 fcloop_tfcp_req_put(tfcp_req
);
770 fcloop_nport_free(struct kref
*ref
)
772 struct fcloop_nport
*nport
=
773 container_of(ref
, struct fcloop_nport
, ref
);
776 spin_lock_irqsave(&fcloop_lock
, flags
);
777 list_del(&nport
->nport_list
);
778 spin_unlock_irqrestore(&fcloop_lock
, flags
);
784 fcloop_nport_put(struct fcloop_nport
*nport
)
786 kref_put(&nport
->ref
, fcloop_nport_free
);
790 fcloop_nport_get(struct fcloop_nport
*nport
)
792 return kref_get_unless_zero(&nport
->ref
);
796 fcloop_localport_delete(struct nvme_fc_local_port
*localport
)
798 struct fcloop_lport_priv
*lport_priv
= localport
->private;
799 struct fcloop_lport
*lport
= lport_priv
->lport
;
801 /* release any threads waiting for the unreg to complete */
802 complete(&lport
->unreg_done
);
806 fcloop_remoteport_delete(struct nvme_fc_remote_port
*remoteport
)
808 struct fcloop_rport
*rport
= remoteport
->private;
810 fcloop_nport_put(rport
->nport
);
814 fcloop_targetport_delete(struct nvmet_fc_target_port
*targetport
)
816 struct fcloop_tport
*tport
= targetport
->private;
818 fcloop_nport_put(tport
->nport
);
821 #define FCLOOP_HW_QUEUES 4
822 #define FCLOOP_SGL_SEGS 256
823 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
825 static struct nvme_fc_port_template fctemplate
= {
826 .localport_delete
= fcloop_localport_delete
,
827 .remoteport_delete
= fcloop_remoteport_delete
,
828 .create_queue
= fcloop_create_queue
,
829 .delete_queue
= fcloop_delete_queue
,
830 .ls_req
= fcloop_ls_req
,
831 .fcp_io
= fcloop_fcp_req
,
832 .ls_abort
= fcloop_ls_abort
,
833 .fcp_abort
= fcloop_fcp_abort
,
834 .max_hw_queues
= FCLOOP_HW_QUEUES
,
835 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
836 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
837 .dma_boundary
= FCLOOP_DMABOUND_4G
,
838 /* sizes of additional private data for data structures */
839 .local_priv_sz
= sizeof(struct fcloop_lport_priv
),
840 .remote_priv_sz
= sizeof(struct fcloop_rport
),
841 .lsrqst_priv_sz
= sizeof(struct fcloop_lsreq
),
842 .fcprqst_priv_sz
= sizeof(struct fcloop_ini_fcpreq
),
845 static struct nvmet_fc_target_template tgttemplate
= {
846 .targetport_delete
= fcloop_targetport_delete
,
847 .xmt_ls_rsp
= fcloop_xmt_ls_rsp
,
848 .fcp_op
= fcloop_fcp_op
,
849 .fcp_abort
= fcloop_tgt_fcp_abort
,
850 .fcp_req_release
= fcloop_fcp_req_release
,
851 .max_hw_queues
= FCLOOP_HW_QUEUES
,
852 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
853 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
854 .dma_boundary
= FCLOOP_DMABOUND_4G
,
855 /* optional features */
856 .target_features
= 0,
857 /* sizes of additional private data for data structures */
858 .target_priv_sz
= sizeof(struct fcloop_tport
),
862 fcloop_create_local_port(struct device
*dev
, struct device_attribute
*attr
,
863 const char *buf
, size_t count
)
865 struct nvme_fc_port_info pinfo
;
866 struct fcloop_ctrl_options
*opts
;
867 struct nvme_fc_local_port
*localport
;
868 struct fcloop_lport
*lport
;
869 struct fcloop_lport_priv
*lport_priv
;
873 lport
= kzalloc(sizeof(*lport
), GFP_KERNEL
);
877 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
881 ret
= fcloop_parse_options(opts
, buf
);
885 /* everything there ? */
886 if ((opts
->mask
& LPORT_OPTS
) != LPORT_OPTS
) {
891 memset(&pinfo
, 0, sizeof(pinfo
));
892 pinfo
.node_name
= opts
->wwnn
;
893 pinfo
.port_name
= opts
->wwpn
;
894 pinfo
.port_role
= opts
->roles
;
895 pinfo
.port_id
= opts
->fcaddr
;
897 ret
= nvme_fc_register_localport(&pinfo
, &fctemplate
, NULL
, &localport
);
900 lport_priv
= localport
->private;
901 lport_priv
->lport
= lport
;
903 lport
->localport
= localport
;
904 INIT_LIST_HEAD(&lport
->lport_list
);
906 spin_lock_irqsave(&fcloop_lock
, flags
);
907 list_add_tail(&lport
->lport_list
, &fcloop_lports
);
908 spin_unlock_irqrestore(&fcloop_lock
, flags
);
914 /* free only if we're going to fail */
918 return ret
? ret
: count
;
923 __unlink_local_port(struct fcloop_lport
*lport
)
925 list_del(&lport
->lport_list
);
929 __wait_localport_unreg(struct fcloop_lport
*lport
)
933 init_completion(&lport
->unreg_done
);
935 ret
= nvme_fc_unregister_localport(lport
->localport
);
937 wait_for_completion(&lport
->unreg_done
);
946 fcloop_delete_local_port(struct device
*dev
, struct device_attribute
*attr
,
947 const char *buf
, size_t count
)
949 struct fcloop_lport
*tlport
, *lport
= NULL
;
950 u64 nodename
, portname
;
954 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
958 spin_lock_irqsave(&fcloop_lock
, flags
);
960 list_for_each_entry(tlport
, &fcloop_lports
, lport_list
) {
961 if (tlport
->localport
->node_name
== nodename
&&
962 tlport
->localport
->port_name
== portname
) {
964 __unlink_local_port(lport
);
968 spin_unlock_irqrestore(&fcloop_lock
, flags
);
973 ret
= __wait_localport_unreg(lport
);
975 return ret
? ret
: count
;
978 static struct fcloop_nport
*
979 fcloop_alloc_nport(const char *buf
, size_t count
, bool remoteport
)
981 struct fcloop_nport
*newnport
, *nport
= NULL
;
982 struct fcloop_lport
*tmplport
, *lport
= NULL
;
983 struct fcloop_ctrl_options
*opts
;
985 u32 opts_mask
= (remoteport
) ? RPORT_OPTS
: TGTPORT_OPTS
;
988 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
992 ret
= fcloop_parse_options(opts
, buf
);
996 /* everything there ? */
997 if ((opts
->mask
& opts_mask
) != opts_mask
) {
1002 newnport
= kzalloc(sizeof(*newnport
), GFP_KERNEL
);
1006 INIT_LIST_HEAD(&newnport
->nport_list
);
1007 newnport
->node_name
= opts
->wwnn
;
1008 newnport
->port_name
= opts
->wwpn
;
1009 if (opts
->mask
& NVMF_OPT_ROLES
)
1010 newnport
->port_role
= opts
->roles
;
1011 if (opts
->mask
& NVMF_OPT_FCADDR
)
1012 newnport
->port_id
= opts
->fcaddr
;
1013 kref_init(&newnport
->ref
);
1015 spin_lock_irqsave(&fcloop_lock
, flags
);
1017 list_for_each_entry(tmplport
, &fcloop_lports
, lport_list
) {
1018 if (tmplport
->localport
->node_name
== opts
->wwnn
&&
1019 tmplport
->localport
->port_name
== opts
->wwpn
)
1020 goto out_invalid_opts
;
1022 if (tmplport
->localport
->node_name
== opts
->lpwwnn
&&
1023 tmplport
->localport
->port_name
== opts
->lpwwpn
)
1029 goto out_invalid_opts
;
1030 newnport
->lport
= lport
;
1033 list_for_each_entry(nport
, &fcloop_nports
, nport_list
) {
1034 if (nport
->node_name
== opts
->wwnn
&&
1035 nport
->port_name
== opts
->wwpn
) {
1036 if ((remoteport
&& nport
->rport
) ||
1037 (!remoteport
&& nport
->tport
)) {
1039 goto out_invalid_opts
;
1042 fcloop_nport_get(nport
);
1044 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1047 nport
->lport
= lport
;
1048 if (opts
->mask
& NVMF_OPT_ROLES
)
1049 nport
->port_role
= opts
->roles
;
1050 if (opts
->mask
& NVMF_OPT_FCADDR
)
1051 nport
->port_id
= opts
->fcaddr
;
1052 goto out_free_newnport
;
1056 list_add_tail(&newnport
->nport_list
, &fcloop_nports
);
1058 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1064 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1073 fcloop_create_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1074 const char *buf
, size_t count
)
1076 struct nvme_fc_remote_port
*remoteport
;
1077 struct fcloop_nport
*nport
;
1078 struct fcloop_rport
*rport
;
1079 struct nvme_fc_port_info pinfo
;
1082 nport
= fcloop_alloc_nport(buf
, count
, true);
1086 memset(&pinfo
, 0, sizeof(pinfo
));
1087 pinfo
.node_name
= nport
->node_name
;
1088 pinfo
.port_name
= nport
->port_name
;
1089 pinfo
.port_role
= nport
->port_role
;
1090 pinfo
.port_id
= nport
->port_id
;
1092 ret
= nvme_fc_register_remoteport(nport
->lport
->localport
,
1093 &pinfo
, &remoteport
);
1094 if (ret
|| !remoteport
) {
1095 fcloop_nport_put(nport
);
1100 rport
= remoteport
->private;
1101 rport
->remoteport
= remoteport
;
1102 rport
->targetport
= (nport
->tport
) ? nport
->tport
->targetport
: NULL
;
1104 nport
->tport
->remoteport
= remoteport
;
1105 nport
->tport
->lport
= nport
->lport
;
1107 rport
->nport
= nport
;
1108 rport
->lport
= nport
->lport
;
1109 nport
->rport
= rport
;
1115 static struct fcloop_rport
*
1116 __unlink_remote_port(struct fcloop_nport
*nport
)
1118 struct fcloop_rport
*rport
= nport
->rport
;
1120 if (rport
&& nport
->tport
)
1121 nport
->tport
->remoteport
= NULL
;
1122 nport
->rport
= NULL
;
1128 __remoteport_unreg(struct fcloop_nport
*nport
, struct fcloop_rport
*rport
)
1133 return nvme_fc_unregister_remoteport(rport
->remoteport
);
1137 fcloop_delete_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1138 const char *buf
, size_t count
)
1140 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1141 static struct fcloop_rport
*rport
;
1142 u64 nodename
, portname
;
1143 unsigned long flags
;
1146 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1150 spin_lock_irqsave(&fcloop_lock
, flags
);
1152 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1153 if (tmpport
->node_name
== nodename
&&
1154 tmpport
->port_name
== portname
&& tmpport
->rport
) {
1156 rport
= __unlink_remote_port(nport
);
1161 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1166 ret
= __remoteport_unreg(nport
, rport
);
1168 return ret
? ret
: count
;
1172 fcloop_create_target_port(struct device
*dev
, struct device_attribute
*attr
,
1173 const char *buf
, size_t count
)
1175 struct nvmet_fc_target_port
*targetport
;
1176 struct fcloop_nport
*nport
;
1177 struct fcloop_tport
*tport
;
1178 struct nvmet_fc_port_info tinfo
;
1181 nport
= fcloop_alloc_nport(buf
, count
, false);
1185 tinfo
.node_name
= nport
->node_name
;
1186 tinfo
.port_name
= nport
->port_name
;
1187 tinfo
.port_id
= nport
->port_id
;
1189 ret
= nvmet_fc_register_targetport(&tinfo
, &tgttemplate
, NULL
,
1192 fcloop_nport_put(nport
);
1197 tport
= targetport
->private;
1198 tport
->targetport
= targetport
;
1199 tport
->remoteport
= (nport
->rport
) ? nport
->rport
->remoteport
: NULL
;
1201 nport
->rport
->targetport
= targetport
;
1202 tport
->nport
= nport
;
1203 tport
->lport
= nport
->lport
;
1204 nport
->tport
= tport
;
1210 static struct fcloop_tport
*
1211 __unlink_target_port(struct fcloop_nport
*nport
)
1213 struct fcloop_tport
*tport
= nport
->tport
;
1215 if (tport
&& nport
->rport
)
1216 nport
->rport
->targetport
= NULL
;
1217 nport
->tport
= NULL
;
1223 __targetport_unreg(struct fcloop_nport
*nport
, struct fcloop_tport
*tport
)
1228 return nvmet_fc_unregister_targetport(tport
->targetport
);
1232 fcloop_delete_target_port(struct device
*dev
, struct device_attribute
*attr
,
1233 const char *buf
, size_t count
)
1235 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1236 struct fcloop_tport
*tport
= NULL
;
1237 u64 nodename
, portname
;
1238 unsigned long flags
;
1241 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1245 spin_lock_irqsave(&fcloop_lock
, flags
);
1247 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1248 if (tmpport
->node_name
== nodename
&&
1249 tmpport
->port_name
== portname
&& tmpport
->tport
) {
1251 tport
= __unlink_target_port(nport
);
1256 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1261 ret
= __targetport_unreg(nport
, tport
);
1263 return ret
? ret
: count
;
1267 static DEVICE_ATTR(add_local_port
, 0200, NULL
, fcloop_create_local_port
);
1268 static DEVICE_ATTR(del_local_port
, 0200, NULL
, fcloop_delete_local_port
);
1269 static DEVICE_ATTR(add_remote_port
, 0200, NULL
, fcloop_create_remote_port
);
1270 static DEVICE_ATTR(del_remote_port
, 0200, NULL
, fcloop_delete_remote_port
);
1271 static DEVICE_ATTR(add_target_port
, 0200, NULL
, fcloop_create_target_port
);
1272 static DEVICE_ATTR(del_target_port
, 0200, NULL
, fcloop_delete_target_port
);
1274 static struct attribute
*fcloop_dev_attrs
[] = {
1275 &dev_attr_add_local_port
.attr
,
1276 &dev_attr_del_local_port
.attr
,
1277 &dev_attr_add_remote_port
.attr
,
1278 &dev_attr_del_remote_port
.attr
,
1279 &dev_attr_add_target_port
.attr
,
1280 &dev_attr_del_target_port
.attr
,
1284 static struct attribute_group fclopp_dev_attrs_group
= {
1285 .attrs
= fcloop_dev_attrs
,
1288 static const struct attribute_group
*fcloop_dev_attr_groups
[] = {
1289 &fclopp_dev_attrs_group
,
1293 static struct class *fcloop_class
;
1294 static struct device
*fcloop_device
;
1297 static int __init
fcloop_init(void)
1301 fcloop_class
= class_create(THIS_MODULE
, "fcloop");
1302 if (IS_ERR(fcloop_class
)) {
1303 pr_err("couldn't register class fcloop\n");
1304 ret
= PTR_ERR(fcloop_class
);
1308 fcloop_device
= device_create_with_groups(
1309 fcloop_class
, NULL
, MKDEV(0, 0), NULL
,
1310 fcloop_dev_attr_groups
, "ctl");
1311 if (IS_ERR(fcloop_device
)) {
1312 pr_err("couldn't create ctl device!\n");
1313 ret
= PTR_ERR(fcloop_device
);
1314 goto out_destroy_class
;
1317 get_device(fcloop_device
);
1322 class_destroy(fcloop_class
);
1326 static void __exit
fcloop_exit(void)
1328 struct fcloop_lport
*lport
;
1329 struct fcloop_nport
*nport
;
1330 struct fcloop_tport
*tport
;
1331 struct fcloop_rport
*rport
;
1332 unsigned long flags
;
1335 spin_lock_irqsave(&fcloop_lock
, flags
);
1338 nport
= list_first_entry_or_null(&fcloop_nports
,
1339 typeof(*nport
), nport_list
);
1343 tport
= __unlink_target_port(nport
);
1344 rport
= __unlink_remote_port(nport
);
1346 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1348 ret
= __targetport_unreg(nport
, tport
);
1350 pr_warn("%s: Failed deleting target port\n", __func__
);
1352 ret
= __remoteport_unreg(nport
, rport
);
1354 pr_warn("%s: Failed deleting remote port\n", __func__
);
1356 spin_lock_irqsave(&fcloop_lock
, flags
);
1360 lport
= list_first_entry_or_null(&fcloop_lports
,
1361 typeof(*lport
), lport_list
);
1365 __unlink_local_port(lport
);
1367 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1369 ret
= __wait_localport_unreg(lport
);
1371 pr_warn("%s: Failed deleting local port\n", __func__
);
1373 spin_lock_irqsave(&fcloop_lock
, flags
);
1376 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1378 put_device(fcloop_device
);
1380 device_destroy(fcloop_class
, MKDEV(0, 0));
1381 class_destroy(fcloop_class
);
1384 module_init(fcloop_init
);
1385 module_exit(fcloop_exit
);
1387 MODULE_LICENSE("GPL v2");