1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
18 NVMF_OPT_WWNN
= 1 << 0,
19 NVMF_OPT_WWPN
= 1 << 1,
20 NVMF_OPT_ROLES
= 1 << 2,
21 NVMF_OPT_FCADDR
= 1 << 3,
22 NVMF_OPT_LPWWNN
= 1 << 4,
23 NVMF_OPT_LPWWPN
= 1 << 5,
26 struct fcloop_ctrl_options
{
36 static const match_table_t opt_tokens
= {
37 { NVMF_OPT_WWNN
, "wwnn=%s" },
38 { NVMF_OPT_WWPN
, "wwpn=%s" },
39 { NVMF_OPT_ROLES
, "roles=%d" },
40 { NVMF_OPT_FCADDR
, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN
, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN
, "lpwwpn=%s" },
43 { NVMF_OPT_ERR
, NULL
}
46 static int fcloop_verify_addr(substring_t
*s
)
48 size_t blen
= s
->to
- s
->from
+ 1;
50 if (strnlen(s
->from
, blen
) != NVME_FC_TRADDR_HEXNAMELEN
+ 2 ||
51 strncmp(s
->from
, "0x", 2))
58 fcloop_parse_options(struct fcloop_ctrl_options
*opts
,
61 substring_t args
[MAX_OPT_ARGS
];
62 char *options
, *o
, *p
;
66 options
= o
= kstrdup(buf
, GFP_KERNEL
);
70 while ((p
= strsep(&o
, ",\n")) != NULL
) {
74 token
= match_token(p
, opt_tokens
, args
);
78 if (fcloop_verify_addr(args
) ||
79 match_u64(args
, &token64
)) {
81 goto out_free_options
;
86 if (fcloop_verify_addr(args
) ||
87 match_u64(args
, &token64
)) {
89 goto out_free_options
;
94 if (match_int(args
, &token
)) {
96 goto out_free_options
;
100 case NVMF_OPT_FCADDR
:
101 if (match_hex(args
, &token
)) {
103 goto out_free_options
;
105 opts
->fcaddr
= token
;
107 case NVMF_OPT_LPWWNN
:
108 if (fcloop_verify_addr(args
) ||
109 match_u64(args
, &token64
)) {
111 goto out_free_options
;
113 opts
->lpwwnn
= token64
;
115 case NVMF_OPT_LPWWPN
:
116 if (fcloop_verify_addr(args
) ||
117 match_u64(args
, &token64
)) {
119 goto out_free_options
;
121 opts
->lpwwpn
= token64
;
124 pr_warn("unknown parameter or missing value '%s'\n", p
);
126 goto out_free_options
;
137 fcloop_parse_nm_options(struct device
*dev
, u64
*nname
, u64
*pname
,
140 substring_t args
[MAX_OPT_ARGS
];
141 char *options
, *o
, *p
;
148 options
= o
= kstrdup(buf
, GFP_KERNEL
);
152 while ((p
= strsep(&o
, ",\n")) != NULL
) {
156 token
= match_token(p
, opt_tokens
, args
);
159 if (fcloop_verify_addr(args
) ||
160 match_u64(args
, &token64
)) {
162 goto out_free_options
;
167 if (fcloop_verify_addr(args
) ||
168 match_u64(args
, &token64
)) {
170 goto out_free_options
;
175 pr_warn("unknown parameter or missing value '%s'\n", p
);
177 goto out_free_options
;
195 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
197 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
200 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
203 static DEFINE_SPINLOCK(fcloop_lock
);
204 static LIST_HEAD(fcloop_lports
);
205 static LIST_HEAD(fcloop_nports
);
207 struct fcloop_lport
{
208 struct nvme_fc_local_port
*localport
;
209 struct list_head lport_list
;
210 struct completion unreg_done
;
213 struct fcloop_lport_priv
{
214 struct fcloop_lport
*lport
;
217 struct fcloop_rport
{
218 struct nvme_fc_remote_port
*remoteport
;
219 struct nvmet_fc_target_port
*targetport
;
220 struct fcloop_nport
*nport
;
221 struct fcloop_lport
*lport
;
223 struct list_head ls_list
;
224 struct work_struct ls_work
;
227 struct fcloop_tport
{
228 struct nvmet_fc_target_port
*targetport
;
229 struct nvme_fc_remote_port
*remoteport
;
230 struct fcloop_nport
*nport
;
231 struct fcloop_lport
*lport
;
233 struct list_head ls_list
;
234 struct work_struct ls_work
;
237 struct fcloop_nport
{
238 struct fcloop_rport
*rport
;
239 struct fcloop_tport
*tport
;
240 struct fcloop_lport
*lport
;
241 struct list_head nport_list
;
249 struct fcloop_lsreq
{
250 struct nvmefc_ls_req
*lsreq
;
251 struct nvmefc_ls_rsp ls_rsp
;
252 int lsdir
; /* H2T or T2H */
254 struct list_head ls_list
; /* fcloop_rport->ls_list */
258 struct fcloop_tport
*tport
;
259 struct work_struct work
;
266 INI_IO_COMPLETED
= 3,
269 struct fcloop_fcpreq
{
270 struct fcloop_tport
*tport
;
271 struct nvmefc_fcp_req
*fcpreq
;
278 struct work_struct fcp_rcv_work
;
279 struct work_struct abort_rcv_work
;
280 struct work_struct tio_done_work
;
281 struct nvmefc_tgt_fcp_req tgt_fcp_req
;
284 struct fcloop_ini_fcpreq
{
285 struct nvmefc_fcp_req
*fcpreq
;
286 struct fcloop_fcpreq
*tfcp_req
;
290 static inline struct fcloop_lsreq
*
291 ls_rsp_to_lsreq(struct nvmefc_ls_rsp
*lsrsp
)
293 return container_of(lsrsp
, struct fcloop_lsreq
, ls_rsp
);
296 static inline struct fcloop_fcpreq
*
297 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
299 return container_of(tgt_fcpreq
, struct fcloop_fcpreq
, tgt_fcp_req
);
304 fcloop_create_queue(struct nvme_fc_local_port
*localport
,
305 unsigned int qidx
, u16 qsize
,
313 fcloop_delete_queue(struct nvme_fc_local_port
*localport
,
314 unsigned int idx
, void *handle
)
319 fcloop_rport_lsrqst_work(struct work_struct
*work
)
321 struct fcloop_rport
*rport
=
322 container_of(work
, struct fcloop_rport
, ls_work
);
323 struct fcloop_lsreq
*tls_req
;
325 spin_lock(&rport
->lock
);
327 tls_req
= list_first_entry_or_null(&rport
->ls_list
,
328 struct fcloop_lsreq
, ls_list
);
332 list_del(&tls_req
->ls_list
);
333 spin_unlock(&rport
->lock
);
335 tls_req
->lsreq
->done(tls_req
->lsreq
, tls_req
->status
);
337 * callee may free memory containing tls_req.
338 * do not reference lsreq after this.
341 spin_lock(&rport
->lock
);
343 spin_unlock(&rport
->lock
);
347 fcloop_h2t_ls_req(struct nvme_fc_local_port
*localport
,
348 struct nvme_fc_remote_port
*remoteport
,
349 struct nvmefc_ls_req
*lsreq
)
351 struct fcloop_lsreq
*tls_req
= lsreq
->private;
352 struct fcloop_rport
*rport
= remoteport
->private;
355 tls_req
->lsreq
= lsreq
;
356 INIT_LIST_HEAD(&tls_req
->ls_list
);
358 if (!rport
->targetport
) {
359 tls_req
->status
= -ECONNREFUSED
;
360 spin_lock(&rport
->lock
);
361 list_add_tail(&rport
->ls_list
, &tls_req
->ls_list
);
362 spin_unlock(&rport
->lock
);
363 schedule_work(&rport
->ls_work
);
368 ret
= nvmet_fc_rcv_ls_req(rport
->targetport
, rport
,
370 lsreq
->rqstaddr
, lsreq
->rqstlen
);
376 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port
*targetport
,
377 struct nvmefc_ls_rsp
*lsrsp
)
379 struct fcloop_lsreq
*tls_req
= ls_rsp_to_lsreq(lsrsp
);
380 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
381 struct fcloop_tport
*tport
= targetport
->private;
382 struct nvme_fc_remote_port
*remoteport
= tport
->remoteport
;
383 struct fcloop_rport
*rport
;
385 memcpy(lsreq
->rspaddr
, lsrsp
->rspbuf
,
386 ((lsreq
->rsplen
< lsrsp
->rsplen
) ?
387 lsreq
->rsplen
: lsrsp
->rsplen
));
392 rport
= remoteport
->private;
393 spin_lock(&rport
->lock
);
394 list_add_tail(&rport
->ls_list
, &tls_req
->ls_list
);
395 spin_unlock(&rport
->lock
);
396 schedule_work(&rport
->ls_work
);
403 fcloop_tport_lsrqst_work(struct work_struct
*work
)
405 struct fcloop_tport
*tport
=
406 container_of(work
, struct fcloop_tport
, ls_work
);
407 struct fcloop_lsreq
*tls_req
;
409 spin_lock(&tport
->lock
);
411 tls_req
= list_first_entry_or_null(&tport
->ls_list
,
412 struct fcloop_lsreq
, ls_list
);
416 list_del(&tls_req
->ls_list
);
417 spin_unlock(&tport
->lock
);
419 tls_req
->lsreq
->done(tls_req
->lsreq
, tls_req
->status
);
421 * callee may free memory containing tls_req.
422 * do not reference lsreq after this.
425 spin_lock(&tport
->lock
);
427 spin_unlock(&tport
->lock
);
431 fcloop_t2h_ls_req(struct nvmet_fc_target_port
*targetport
, void *hosthandle
,
432 struct nvmefc_ls_req
*lsreq
)
434 struct fcloop_lsreq
*tls_req
= lsreq
->private;
435 struct fcloop_tport
*tport
= targetport
->private;
439 * hosthandle should be the dst.rport value.
440 * hosthandle ignored as fcloop currently is
441 * 1:1 tgtport vs remoteport
443 tls_req
->lsreq
= lsreq
;
444 INIT_LIST_HEAD(&tls_req
->ls_list
);
446 if (!tport
->remoteport
) {
447 tls_req
->status
= -ECONNREFUSED
;
448 spin_lock(&tport
->lock
);
449 list_add_tail(&tport
->ls_list
, &tls_req
->ls_list
);
450 spin_unlock(&tport
->lock
);
451 schedule_work(&tport
->ls_work
);
456 ret
= nvme_fc_rcv_ls_req(tport
->remoteport
, &tls_req
->ls_rsp
,
457 lsreq
->rqstaddr
, lsreq
->rqstlen
);
463 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port
*localport
,
464 struct nvme_fc_remote_port
*remoteport
,
465 struct nvmefc_ls_rsp
*lsrsp
)
467 struct fcloop_lsreq
*tls_req
= ls_rsp_to_lsreq(lsrsp
);
468 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
469 struct fcloop_rport
*rport
= remoteport
->private;
470 struct nvmet_fc_target_port
*targetport
= rport
->targetport
;
471 struct fcloop_tport
*tport
;
473 memcpy(lsreq
->rspaddr
, lsrsp
->rspbuf
,
474 ((lsreq
->rsplen
< lsrsp
->rsplen
) ?
475 lsreq
->rsplen
: lsrsp
->rsplen
));
479 tport
= targetport
->private;
480 spin_lock(&tport
->lock
);
481 list_add_tail(&tport
->ls_list
, &tls_req
->ls_list
);
482 spin_unlock(&tport
->lock
);
483 schedule_work(&tport
->ls_work
);
490 fcloop_t2h_host_release(void *hosthandle
)
492 /* host handle ignored for now */
496 * Simulate reception of RSCN and converting it to a initiator transport
497 * call to rescan a remote port.
500 fcloop_tgt_rscn_work(struct work_struct
*work
)
502 struct fcloop_rscn
*tgt_rscn
=
503 container_of(work
, struct fcloop_rscn
, work
);
504 struct fcloop_tport
*tport
= tgt_rscn
->tport
;
506 if (tport
->remoteport
)
507 nvme_fc_rescan_remoteport(tport
->remoteport
);
512 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port
*tgtport
)
514 struct fcloop_rscn
*tgt_rscn
;
516 tgt_rscn
= kzalloc(sizeof(*tgt_rscn
), GFP_KERNEL
);
520 tgt_rscn
->tport
= tgtport
->private;
521 INIT_WORK(&tgt_rscn
->work
, fcloop_tgt_rscn_work
);
523 schedule_work(&tgt_rscn
->work
);
527 fcloop_tfcp_req_free(struct kref
*ref
)
529 struct fcloop_fcpreq
*tfcp_req
=
530 container_of(ref
, struct fcloop_fcpreq
, ref
);
536 fcloop_tfcp_req_put(struct fcloop_fcpreq
*tfcp_req
)
538 kref_put(&tfcp_req
->ref
, fcloop_tfcp_req_free
);
542 fcloop_tfcp_req_get(struct fcloop_fcpreq
*tfcp_req
)
544 return kref_get_unless_zero(&tfcp_req
->ref
);
548 fcloop_call_host_done(struct nvmefc_fcp_req
*fcpreq
,
549 struct fcloop_fcpreq
*tfcp_req
, int status
)
551 struct fcloop_ini_fcpreq
*inireq
= NULL
;
554 inireq
= fcpreq
->private;
555 spin_lock(&inireq
->inilock
);
556 inireq
->tfcp_req
= NULL
;
557 spin_unlock(&inireq
->inilock
);
559 fcpreq
->status
= status
;
560 fcpreq
->done(fcpreq
);
563 /* release original io reference on tgt struct */
564 fcloop_tfcp_req_put(tfcp_req
);
567 static bool drop_fabric_opcode
;
568 #define DROP_OPCODE_MASK 0x00FF
569 /* fabrics opcode will have a bit set above 1st byte */
570 static int drop_opcode
= -1;
571 static int drop_instance
;
572 static int drop_amount
;
573 static int drop_current_cnt
;
576 * Routine to parse io and determine if the io is to be dropped.
578 * 0 if io is not obstructed
579 * 1 if io was dropped
581 static int check_for_drop(struct fcloop_fcpreq
*tfcp_req
)
583 struct nvmefc_fcp_req
*fcpreq
= tfcp_req
->fcpreq
;
584 struct nvme_fc_cmd_iu
*cmdiu
= fcpreq
->cmdaddr
;
585 struct nvme_command
*sqe
= &cmdiu
->sqe
;
587 if (drop_opcode
== -1)
590 pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
591 "inst %d start %d amt %d\n",
592 __func__
, sqe
->common
.opcode
, sqe
->fabrics
.fctype
,
593 drop_fabric_opcode
? "y" : "n",
594 drop_opcode
, drop_current_cnt
, drop_instance
, drop_amount
);
596 if ((drop_fabric_opcode
&&
597 (sqe
->common
.opcode
!= nvme_fabrics_command
||
598 sqe
->fabrics
.fctype
!= drop_opcode
)) ||
599 (!drop_fabric_opcode
&& sqe
->common
.opcode
!= drop_opcode
))
602 if (++drop_current_cnt
>= drop_instance
) {
603 if (drop_current_cnt
>= drop_instance
+ drop_amount
)
612 fcloop_fcp_recv_work(struct work_struct
*work
)
614 struct fcloop_fcpreq
*tfcp_req
=
615 container_of(work
, struct fcloop_fcpreq
, fcp_rcv_work
);
616 struct nvmefc_fcp_req
*fcpreq
= tfcp_req
->fcpreq
;
618 bool aborted
= false;
620 spin_lock_irq(&tfcp_req
->reqlock
);
621 switch (tfcp_req
->inistate
) {
623 tfcp_req
->inistate
= INI_IO_ACTIVE
;
629 spin_unlock_irq(&tfcp_req
->reqlock
);
633 spin_unlock_irq(&tfcp_req
->reqlock
);
635 if (unlikely(aborted
))
638 if (likely(!check_for_drop(tfcp_req
)))
639 ret
= nvmet_fc_rcv_fcp_req(tfcp_req
->tport
->targetport
,
640 &tfcp_req
->tgt_fcp_req
,
641 fcpreq
->cmdaddr
, fcpreq
->cmdlen
);
643 pr_info("%s: dropped command ********\n", __func__
);
646 fcloop_call_host_done(fcpreq
, tfcp_req
, ret
);
652 fcloop_fcp_abort_recv_work(struct work_struct
*work
)
654 struct fcloop_fcpreq
*tfcp_req
=
655 container_of(work
, struct fcloop_fcpreq
, abort_rcv_work
);
656 struct nvmefc_fcp_req
*fcpreq
;
657 bool completed
= false;
659 spin_lock_irq(&tfcp_req
->reqlock
);
660 fcpreq
= tfcp_req
->fcpreq
;
661 switch (tfcp_req
->inistate
) {
664 case INI_IO_COMPLETED
:
668 spin_unlock_irq(&tfcp_req
->reqlock
);
672 spin_unlock_irq(&tfcp_req
->reqlock
);
674 if (unlikely(completed
)) {
675 /* remove reference taken in original abort downcall */
676 fcloop_tfcp_req_put(tfcp_req
);
680 if (tfcp_req
->tport
->targetport
)
681 nvmet_fc_rcv_fcp_abort(tfcp_req
->tport
->targetport
,
682 &tfcp_req
->tgt_fcp_req
);
684 spin_lock_irq(&tfcp_req
->reqlock
);
685 tfcp_req
->fcpreq
= NULL
;
686 spin_unlock_irq(&tfcp_req
->reqlock
);
688 fcloop_call_host_done(fcpreq
, tfcp_req
, -ECANCELED
);
689 /* call_host_done releases reference for abort downcall */
693 * FCP IO operation done by target completion.
694 * call back up initiator "done" flows.
697 fcloop_tgt_fcprqst_done_work(struct work_struct
*work
)
699 struct fcloop_fcpreq
*tfcp_req
=
700 container_of(work
, struct fcloop_fcpreq
, tio_done_work
);
701 struct nvmefc_fcp_req
*fcpreq
;
703 spin_lock_irq(&tfcp_req
->reqlock
);
704 fcpreq
= tfcp_req
->fcpreq
;
705 tfcp_req
->inistate
= INI_IO_COMPLETED
;
706 spin_unlock_irq(&tfcp_req
->reqlock
);
708 fcloop_call_host_done(fcpreq
, tfcp_req
, tfcp_req
->status
);
713 fcloop_fcp_req(struct nvme_fc_local_port
*localport
,
714 struct nvme_fc_remote_port
*remoteport
,
715 void *hw_queue_handle
,
716 struct nvmefc_fcp_req
*fcpreq
)
718 struct fcloop_rport
*rport
= remoteport
->private;
719 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
720 struct fcloop_fcpreq
*tfcp_req
;
722 if (!rport
->targetport
)
723 return -ECONNREFUSED
;
725 tfcp_req
= kzalloc(sizeof(*tfcp_req
), GFP_ATOMIC
);
729 inireq
->fcpreq
= fcpreq
;
730 inireq
->tfcp_req
= tfcp_req
;
731 spin_lock_init(&inireq
->inilock
);
733 tfcp_req
->fcpreq
= fcpreq
;
734 tfcp_req
->tport
= rport
->targetport
->private;
735 tfcp_req
->inistate
= INI_IO_START
;
736 spin_lock_init(&tfcp_req
->reqlock
);
737 INIT_WORK(&tfcp_req
->fcp_rcv_work
, fcloop_fcp_recv_work
);
738 INIT_WORK(&tfcp_req
->abort_rcv_work
, fcloop_fcp_abort_recv_work
);
739 INIT_WORK(&tfcp_req
->tio_done_work
, fcloop_tgt_fcprqst_done_work
);
740 kref_init(&tfcp_req
->ref
);
742 schedule_work(&tfcp_req
->fcp_rcv_work
);
748 fcloop_fcp_copy_data(u8 op
, struct scatterlist
*data_sg
,
749 struct scatterlist
*io_sg
, u32 offset
, u32 length
)
752 u32 data_len
, io_len
, tlen
;
754 io_p
= sg_virt(io_sg
);
755 io_len
= io_sg
->length
;
758 tlen
= min_t(u32
, offset
, io_len
);
762 io_sg
= sg_next(io_sg
);
763 io_p
= sg_virt(io_sg
);
764 io_len
= io_sg
->length
;
769 data_p
= sg_virt(data_sg
);
770 data_len
= data_sg
->length
;
773 tlen
= min_t(u32
, io_len
, data_len
);
774 tlen
= min_t(u32
, tlen
, length
);
776 if (op
== NVMET_FCOP_WRITEDATA
)
777 memcpy(data_p
, io_p
, tlen
);
779 memcpy(io_p
, data_p
, tlen
);
784 if ((!io_len
) && (length
)) {
785 io_sg
= sg_next(io_sg
);
786 io_p
= sg_virt(io_sg
);
787 io_len
= io_sg
->length
;
792 if ((!data_len
) && (length
)) {
793 data_sg
= sg_next(data_sg
);
794 data_p
= sg_virt(data_sg
);
795 data_len
= data_sg
->length
;
802 fcloop_fcp_op(struct nvmet_fc_target_port
*tgtport
,
803 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
805 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
806 struct nvmefc_fcp_req
*fcpreq
;
807 u32 rsplen
= 0, xfrlen
= 0;
808 int fcp_err
= 0, active
, aborted
;
809 u8 op
= tgt_fcpreq
->op
;
811 spin_lock_irq(&tfcp_req
->reqlock
);
812 fcpreq
= tfcp_req
->fcpreq
;
813 active
= tfcp_req
->active
;
814 aborted
= tfcp_req
->aborted
;
815 tfcp_req
->active
= true;
816 spin_unlock_irq(&tfcp_req
->reqlock
);
818 if (unlikely(active
))
819 /* illegal - call while i/o active */
822 if (unlikely(aborted
)) {
823 /* target transport has aborted i/o prior */
824 spin_lock_irq(&tfcp_req
->reqlock
);
825 tfcp_req
->active
= false;
826 spin_unlock_irq(&tfcp_req
->reqlock
);
827 tgt_fcpreq
->transferred_length
= 0;
828 tgt_fcpreq
->fcp_error
= -ECANCELED
;
829 tgt_fcpreq
->done(tgt_fcpreq
);
834 * if fcpreq is NULL, the I/O has been aborted (from
835 * initiator side). For the target side, act as if all is well
836 * but don't actually move data.
840 case NVMET_FCOP_WRITEDATA
:
841 xfrlen
= tgt_fcpreq
->transfer_length
;
843 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
844 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
846 fcpreq
->transferred_length
+= xfrlen
;
850 case NVMET_FCOP_READDATA
:
851 case NVMET_FCOP_READDATA_RSP
:
852 xfrlen
= tgt_fcpreq
->transfer_length
;
854 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
855 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
857 fcpreq
->transferred_length
+= xfrlen
;
859 if (op
== NVMET_FCOP_READDATA
)
862 /* Fall-Thru to RSP handling */
867 rsplen
= ((fcpreq
->rsplen
< tgt_fcpreq
->rsplen
) ?
868 fcpreq
->rsplen
: tgt_fcpreq
->rsplen
);
869 memcpy(fcpreq
->rspaddr
, tgt_fcpreq
->rspaddr
, rsplen
);
870 if (rsplen
< tgt_fcpreq
->rsplen
)
872 fcpreq
->rcv_rsplen
= rsplen
;
875 tfcp_req
->status
= 0;
883 spin_lock_irq(&tfcp_req
->reqlock
);
884 tfcp_req
->active
= false;
885 spin_unlock_irq(&tfcp_req
->reqlock
);
887 tgt_fcpreq
->transferred_length
= xfrlen
;
888 tgt_fcpreq
->fcp_error
= fcp_err
;
889 tgt_fcpreq
->done(tgt_fcpreq
);
895 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
896 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
898 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
901 * mark aborted only in case there were 2 threads in transport
902 * (one doing io, other doing abort) and only kills ops posted
903 * after the abort request
905 spin_lock_irq(&tfcp_req
->reqlock
);
906 tfcp_req
->aborted
= true;
907 spin_unlock_irq(&tfcp_req
->reqlock
);
909 tfcp_req
->status
= NVME_SC_INTERNAL
;
912 * nothing more to do. If io wasn't active, the transport should
913 * immediately call the req_release. If it was active, the op
914 * will complete, and the lldd should call req_release.
919 fcloop_fcp_req_release(struct nvmet_fc_target_port
*tgtport
,
920 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
922 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
924 schedule_work(&tfcp_req
->tio_done_work
);
928 fcloop_h2t_ls_abort(struct nvme_fc_local_port
*localport
,
929 struct nvme_fc_remote_port
*remoteport
,
930 struct nvmefc_ls_req
*lsreq
)
935 fcloop_t2h_ls_abort(struct nvmet_fc_target_port
*targetport
,
936 void *hosthandle
, struct nvmefc_ls_req
*lsreq
)
941 fcloop_fcp_abort(struct nvme_fc_local_port
*localport
,
942 struct nvme_fc_remote_port
*remoteport
,
943 void *hw_queue_handle
,
944 struct nvmefc_fcp_req
*fcpreq
)
946 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
947 struct fcloop_fcpreq
*tfcp_req
;
950 spin_lock(&inireq
->inilock
);
951 tfcp_req
= inireq
->tfcp_req
;
953 fcloop_tfcp_req_get(tfcp_req
);
954 spin_unlock(&inireq
->inilock
);
957 /* abort has already been called */
960 /* break initiator/target relationship for io */
961 spin_lock_irq(&tfcp_req
->reqlock
);
962 switch (tfcp_req
->inistate
) {
965 tfcp_req
->inistate
= INI_IO_ABORTED
;
967 case INI_IO_COMPLETED
:
971 spin_unlock_irq(&tfcp_req
->reqlock
);
975 spin_unlock_irq(&tfcp_req
->reqlock
);
978 /* leave the reference while the work item is scheduled */
979 WARN_ON(!schedule_work(&tfcp_req
->abort_rcv_work
));
982 * as the io has already had the done callback made,
983 * nothing more to do. So release the reference taken above
985 fcloop_tfcp_req_put(tfcp_req
);
990 fcloop_nport_free(struct kref
*ref
)
992 struct fcloop_nport
*nport
=
993 container_of(ref
, struct fcloop_nport
, ref
);
996 spin_lock_irqsave(&fcloop_lock
, flags
);
997 list_del(&nport
->nport_list
);
998 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1004 fcloop_nport_put(struct fcloop_nport
*nport
)
1006 kref_put(&nport
->ref
, fcloop_nport_free
);
1010 fcloop_nport_get(struct fcloop_nport
*nport
)
1012 return kref_get_unless_zero(&nport
->ref
);
1016 fcloop_localport_delete(struct nvme_fc_local_port
*localport
)
1018 struct fcloop_lport_priv
*lport_priv
= localport
->private;
1019 struct fcloop_lport
*lport
= lport_priv
->lport
;
1021 /* release any threads waiting for the unreg to complete */
1022 complete(&lport
->unreg_done
);
1026 fcloop_remoteport_delete(struct nvme_fc_remote_port
*remoteport
)
1028 struct fcloop_rport
*rport
= remoteport
->private;
1030 flush_work(&rport
->ls_work
);
1031 fcloop_nport_put(rport
->nport
);
1035 fcloop_targetport_delete(struct nvmet_fc_target_port
*targetport
)
1037 struct fcloop_tport
*tport
= targetport
->private;
1039 flush_work(&tport
->ls_work
);
1040 fcloop_nport_put(tport
->nport
);
1043 #define FCLOOP_HW_QUEUES 4
1044 #define FCLOOP_SGL_SEGS 256
1045 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
1047 static struct nvme_fc_port_template fctemplate
= {
1048 .localport_delete
= fcloop_localport_delete
,
1049 .remoteport_delete
= fcloop_remoteport_delete
,
1050 .create_queue
= fcloop_create_queue
,
1051 .delete_queue
= fcloop_delete_queue
,
1052 .ls_req
= fcloop_h2t_ls_req
,
1053 .fcp_io
= fcloop_fcp_req
,
1054 .ls_abort
= fcloop_h2t_ls_abort
,
1055 .fcp_abort
= fcloop_fcp_abort
,
1056 .xmt_ls_rsp
= fcloop_t2h_xmt_ls_rsp
,
1057 .max_hw_queues
= FCLOOP_HW_QUEUES
,
1058 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
1059 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
1060 .dma_boundary
= FCLOOP_DMABOUND_4G
,
1061 /* sizes of additional private data for data structures */
1062 .local_priv_sz
= sizeof(struct fcloop_lport_priv
),
1063 .remote_priv_sz
= sizeof(struct fcloop_rport
),
1064 .lsrqst_priv_sz
= sizeof(struct fcloop_lsreq
),
1065 .fcprqst_priv_sz
= sizeof(struct fcloop_ini_fcpreq
),
1068 static struct nvmet_fc_target_template tgttemplate
= {
1069 .targetport_delete
= fcloop_targetport_delete
,
1070 .xmt_ls_rsp
= fcloop_h2t_xmt_ls_rsp
,
1071 .fcp_op
= fcloop_fcp_op
,
1072 .fcp_abort
= fcloop_tgt_fcp_abort
,
1073 .fcp_req_release
= fcloop_fcp_req_release
,
1074 .discovery_event
= fcloop_tgt_discovery_evt
,
1075 .ls_req
= fcloop_t2h_ls_req
,
1076 .ls_abort
= fcloop_t2h_ls_abort
,
1077 .host_release
= fcloop_t2h_host_release
,
1078 .max_hw_queues
= FCLOOP_HW_QUEUES
,
1079 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
1080 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
1081 .dma_boundary
= FCLOOP_DMABOUND_4G
,
1082 /* optional features */
1083 .target_features
= 0,
1084 /* sizes of additional private data for data structures */
1085 .target_priv_sz
= sizeof(struct fcloop_tport
),
1086 .lsrqst_priv_sz
= sizeof(struct fcloop_lsreq
),
1090 fcloop_create_local_port(struct device
*dev
, struct device_attribute
*attr
,
1091 const char *buf
, size_t count
)
1093 struct nvme_fc_port_info pinfo
;
1094 struct fcloop_ctrl_options
*opts
;
1095 struct nvme_fc_local_port
*localport
;
1096 struct fcloop_lport
*lport
;
1097 struct fcloop_lport_priv
*lport_priv
;
1098 unsigned long flags
;
1101 lport
= kzalloc(sizeof(*lport
), GFP_KERNEL
);
1105 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
1107 goto out_free_lport
;
1109 ret
= fcloop_parse_options(opts
, buf
);
1113 /* everything there ? */
1114 if ((opts
->mask
& LPORT_OPTS
) != LPORT_OPTS
) {
1119 memset(&pinfo
, 0, sizeof(pinfo
));
1120 pinfo
.node_name
= opts
->wwnn
;
1121 pinfo
.port_name
= opts
->wwpn
;
1122 pinfo
.port_role
= opts
->roles
;
1123 pinfo
.port_id
= opts
->fcaddr
;
1125 ret
= nvme_fc_register_localport(&pinfo
, &fctemplate
, NULL
, &localport
);
1128 lport_priv
= localport
->private;
1129 lport_priv
->lport
= lport
;
1131 lport
->localport
= localport
;
1132 INIT_LIST_HEAD(&lport
->lport_list
);
1134 spin_lock_irqsave(&fcloop_lock
, flags
);
1135 list_add_tail(&lport
->lport_list
, &fcloop_lports
);
1136 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1142 /* free only if we're going to fail */
1146 return ret
? ret
: count
;
1151 __unlink_local_port(struct fcloop_lport
*lport
)
1153 list_del(&lport
->lport_list
);
1157 __wait_localport_unreg(struct fcloop_lport
*lport
)
1161 init_completion(&lport
->unreg_done
);
1163 ret
= nvme_fc_unregister_localport(lport
->localport
);
1165 wait_for_completion(&lport
->unreg_done
);
1174 fcloop_delete_local_port(struct device
*dev
, struct device_attribute
*attr
,
1175 const char *buf
, size_t count
)
1177 struct fcloop_lport
*tlport
, *lport
= NULL
;
1178 u64 nodename
, portname
;
1179 unsigned long flags
;
1182 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1186 spin_lock_irqsave(&fcloop_lock
, flags
);
1188 list_for_each_entry(tlport
, &fcloop_lports
, lport_list
) {
1189 if (tlport
->localport
->node_name
== nodename
&&
1190 tlport
->localport
->port_name
== portname
) {
1192 __unlink_local_port(lport
);
1196 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1201 ret
= __wait_localport_unreg(lport
);
1203 return ret
? ret
: count
;
1206 static struct fcloop_nport
*
1207 fcloop_alloc_nport(const char *buf
, size_t count
, bool remoteport
)
1209 struct fcloop_nport
*newnport
, *nport
= NULL
;
1210 struct fcloop_lport
*tmplport
, *lport
= NULL
;
1211 struct fcloop_ctrl_options
*opts
;
1212 unsigned long flags
;
1213 u32 opts_mask
= (remoteport
) ? RPORT_OPTS
: TGTPORT_OPTS
;
1216 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
1220 ret
= fcloop_parse_options(opts
, buf
);
1224 /* everything there ? */
1225 if ((opts
->mask
& opts_mask
) != opts_mask
) {
1230 newnport
= kzalloc(sizeof(*newnport
), GFP_KERNEL
);
1234 INIT_LIST_HEAD(&newnport
->nport_list
);
1235 newnport
->node_name
= opts
->wwnn
;
1236 newnport
->port_name
= opts
->wwpn
;
1237 if (opts
->mask
& NVMF_OPT_ROLES
)
1238 newnport
->port_role
= opts
->roles
;
1239 if (opts
->mask
& NVMF_OPT_FCADDR
)
1240 newnport
->port_id
= opts
->fcaddr
;
1241 kref_init(&newnport
->ref
);
1243 spin_lock_irqsave(&fcloop_lock
, flags
);
1245 list_for_each_entry(tmplport
, &fcloop_lports
, lport_list
) {
1246 if (tmplport
->localport
->node_name
== opts
->wwnn
&&
1247 tmplport
->localport
->port_name
== opts
->wwpn
)
1248 goto out_invalid_opts
;
1250 if (tmplport
->localport
->node_name
== opts
->lpwwnn
&&
1251 tmplport
->localport
->port_name
== opts
->lpwwpn
)
1257 goto out_invalid_opts
;
1258 newnport
->lport
= lport
;
1261 list_for_each_entry(nport
, &fcloop_nports
, nport_list
) {
1262 if (nport
->node_name
== opts
->wwnn
&&
1263 nport
->port_name
== opts
->wwpn
) {
1264 if ((remoteport
&& nport
->rport
) ||
1265 (!remoteport
&& nport
->tport
)) {
1267 goto out_invalid_opts
;
1270 fcloop_nport_get(nport
);
1272 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1275 nport
->lport
= lport
;
1276 if (opts
->mask
& NVMF_OPT_ROLES
)
1277 nport
->port_role
= opts
->roles
;
1278 if (opts
->mask
& NVMF_OPT_FCADDR
)
1279 nport
->port_id
= opts
->fcaddr
;
1280 goto out_free_newnport
;
1284 list_add_tail(&newnport
->nport_list
, &fcloop_nports
);
1286 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1292 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1301 fcloop_create_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1302 const char *buf
, size_t count
)
1304 struct nvme_fc_remote_port
*remoteport
;
1305 struct fcloop_nport
*nport
;
1306 struct fcloop_rport
*rport
;
1307 struct nvme_fc_port_info pinfo
;
1310 nport
= fcloop_alloc_nport(buf
, count
, true);
1314 memset(&pinfo
, 0, sizeof(pinfo
));
1315 pinfo
.node_name
= nport
->node_name
;
1316 pinfo
.port_name
= nport
->port_name
;
1317 pinfo
.port_role
= nport
->port_role
;
1318 pinfo
.port_id
= nport
->port_id
;
1320 ret
= nvme_fc_register_remoteport(nport
->lport
->localport
,
1321 &pinfo
, &remoteport
);
1322 if (ret
|| !remoteport
) {
1323 fcloop_nport_put(nport
);
1328 rport
= remoteport
->private;
1329 rport
->remoteport
= remoteport
;
1330 rport
->targetport
= (nport
->tport
) ? nport
->tport
->targetport
: NULL
;
1332 nport
->tport
->remoteport
= remoteport
;
1333 nport
->tport
->lport
= nport
->lport
;
1335 rport
->nport
= nport
;
1336 rport
->lport
= nport
->lport
;
1337 nport
->rport
= rport
;
1338 spin_lock_init(&rport
->lock
);
1339 INIT_WORK(&rport
->ls_work
, fcloop_rport_lsrqst_work
);
1340 INIT_LIST_HEAD(&rport
->ls_list
);
1346 static struct fcloop_rport
*
1347 __unlink_remote_port(struct fcloop_nport
*nport
)
1349 struct fcloop_rport
*rport
= nport
->rport
;
1351 if (rport
&& nport
->tport
)
1352 nport
->tport
->remoteport
= NULL
;
1353 nport
->rport
= NULL
;
1359 __remoteport_unreg(struct fcloop_nport
*nport
, struct fcloop_rport
*rport
)
1364 return nvme_fc_unregister_remoteport(rport
->remoteport
);
1368 fcloop_delete_remote_port(struct device
*dev
, struct device_attribute
*attr
,
1369 const char *buf
, size_t count
)
1371 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1372 static struct fcloop_rport
*rport
;
1373 u64 nodename
, portname
;
1374 unsigned long flags
;
1377 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1381 spin_lock_irqsave(&fcloop_lock
, flags
);
1383 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1384 if (tmpport
->node_name
== nodename
&&
1385 tmpport
->port_name
== portname
&& tmpport
->rport
) {
1387 rport
= __unlink_remote_port(nport
);
1392 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1397 ret
= __remoteport_unreg(nport
, rport
);
1399 return ret
? ret
: count
;
1403 fcloop_create_target_port(struct device
*dev
, struct device_attribute
*attr
,
1404 const char *buf
, size_t count
)
1406 struct nvmet_fc_target_port
*targetport
;
1407 struct fcloop_nport
*nport
;
1408 struct fcloop_tport
*tport
;
1409 struct nvmet_fc_port_info tinfo
;
1412 nport
= fcloop_alloc_nport(buf
, count
, false);
1416 tinfo
.node_name
= nport
->node_name
;
1417 tinfo
.port_name
= nport
->port_name
;
1418 tinfo
.port_id
= nport
->port_id
;
1420 ret
= nvmet_fc_register_targetport(&tinfo
, &tgttemplate
, NULL
,
1423 fcloop_nport_put(nport
);
1428 tport
= targetport
->private;
1429 tport
->targetport
= targetport
;
1430 tport
->remoteport
= (nport
->rport
) ? nport
->rport
->remoteport
: NULL
;
1432 nport
->rport
->targetport
= targetport
;
1433 tport
->nport
= nport
;
1434 tport
->lport
= nport
->lport
;
1435 nport
->tport
= tport
;
1436 spin_lock_init(&tport
->lock
);
1437 INIT_WORK(&tport
->ls_work
, fcloop_tport_lsrqst_work
);
1438 INIT_LIST_HEAD(&tport
->ls_list
);
1444 static struct fcloop_tport
*
1445 __unlink_target_port(struct fcloop_nport
*nport
)
1447 struct fcloop_tport
*tport
= nport
->tport
;
1449 if (tport
&& nport
->rport
)
1450 nport
->rport
->targetport
= NULL
;
1451 nport
->tport
= NULL
;
1457 __targetport_unreg(struct fcloop_nport
*nport
, struct fcloop_tport
*tport
)
1462 return nvmet_fc_unregister_targetport(tport
->targetport
);
1466 fcloop_delete_target_port(struct device
*dev
, struct device_attribute
*attr
,
1467 const char *buf
, size_t count
)
1469 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1470 struct fcloop_tport
*tport
= NULL
;
1471 u64 nodename
, portname
;
1472 unsigned long flags
;
1475 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1479 spin_lock_irqsave(&fcloop_lock
, flags
);
1481 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1482 if (tmpport
->node_name
== nodename
&&
1483 tmpport
->port_name
== portname
&& tmpport
->tport
) {
1485 tport
= __unlink_target_port(nport
);
1490 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1495 ret
= __targetport_unreg(nport
, tport
);
1497 return ret
? ret
: count
;
1501 fcloop_set_cmd_drop(struct device
*dev
, struct device_attribute
*attr
,
1502 const char *buf
, size_t count
)
1504 unsigned int opcode
;
1505 int starting
, amount
;
1507 if (sscanf(buf
, "%x:%d:%d", &opcode
, &starting
, &amount
) != 3)
1510 drop_current_cnt
= 0;
1511 drop_fabric_opcode
= (opcode
& ~DROP_OPCODE_MASK
) ? true : false;
1512 drop_opcode
= (opcode
& DROP_OPCODE_MASK
);
1513 drop_instance
= starting
;
1514 /* the check to drop routine uses instance + count to know when
1515 * to end. Thus, if dropping 1 instance, count should be 0.
1516 * so subtract 1 from the count.
1518 drop_amount
= amount
- 1;
1520 pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
1522 __func__
, drop_instance
, drop_fabric_opcode
? " fabric" : "",
1523 drop_opcode
, drop_amount
);
1529 static DEVICE_ATTR(add_local_port
, 0200, NULL
, fcloop_create_local_port
);
1530 static DEVICE_ATTR(del_local_port
, 0200, NULL
, fcloop_delete_local_port
);
1531 static DEVICE_ATTR(add_remote_port
, 0200, NULL
, fcloop_create_remote_port
);
1532 static DEVICE_ATTR(del_remote_port
, 0200, NULL
, fcloop_delete_remote_port
);
1533 static DEVICE_ATTR(add_target_port
, 0200, NULL
, fcloop_create_target_port
);
1534 static DEVICE_ATTR(del_target_port
, 0200, NULL
, fcloop_delete_target_port
);
1535 static DEVICE_ATTR(set_cmd_drop
, 0200, NULL
, fcloop_set_cmd_drop
);
1537 static struct attribute
*fcloop_dev_attrs
[] = {
1538 &dev_attr_add_local_port
.attr
,
1539 &dev_attr_del_local_port
.attr
,
1540 &dev_attr_add_remote_port
.attr
,
1541 &dev_attr_del_remote_port
.attr
,
1542 &dev_attr_add_target_port
.attr
,
1543 &dev_attr_del_target_port
.attr
,
1544 &dev_attr_set_cmd_drop
.attr
,
1548 static struct attribute_group fclopp_dev_attrs_group
= {
1549 .attrs
= fcloop_dev_attrs
,
1552 static const struct attribute_group
*fcloop_dev_attr_groups
[] = {
1553 &fclopp_dev_attrs_group
,
1557 static struct class *fcloop_class
;
1558 static struct device
*fcloop_device
;
1561 static int __init
fcloop_init(void)
1565 fcloop_class
= class_create(THIS_MODULE
, "fcloop");
1566 if (IS_ERR(fcloop_class
)) {
1567 pr_err("couldn't register class fcloop\n");
1568 ret
= PTR_ERR(fcloop_class
);
1572 fcloop_device
= device_create_with_groups(
1573 fcloop_class
, NULL
, MKDEV(0, 0), NULL
,
1574 fcloop_dev_attr_groups
, "ctl");
1575 if (IS_ERR(fcloop_device
)) {
1576 pr_err("couldn't create ctl device!\n");
1577 ret
= PTR_ERR(fcloop_device
);
1578 goto out_destroy_class
;
1581 get_device(fcloop_device
);
1586 class_destroy(fcloop_class
);
1590 static void __exit
fcloop_exit(void)
1592 struct fcloop_lport
*lport
= NULL
;
1593 struct fcloop_nport
*nport
= NULL
;
1594 struct fcloop_tport
*tport
;
1595 struct fcloop_rport
*rport
;
1596 unsigned long flags
;
1599 spin_lock_irqsave(&fcloop_lock
, flags
);
1602 nport
= list_first_entry_or_null(&fcloop_nports
,
1603 typeof(*nport
), nport_list
);
1607 tport
= __unlink_target_port(nport
);
1608 rport
= __unlink_remote_port(nport
);
1610 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1612 ret
= __targetport_unreg(nport
, tport
);
1614 pr_warn("%s: Failed deleting target port\n", __func__
);
1616 ret
= __remoteport_unreg(nport
, rport
);
1618 pr_warn("%s: Failed deleting remote port\n", __func__
);
1620 spin_lock_irqsave(&fcloop_lock
, flags
);
1624 lport
= list_first_entry_or_null(&fcloop_lports
,
1625 typeof(*lport
), lport_list
);
1629 __unlink_local_port(lport
);
1631 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1633 ret
= __wait_localport_unreg(lport
);
1635 pr_warn("%s: Failed deleting local port\n", __func__
);
1637 spin_lock_irqsave(&fcloop_lock
, flags
);
1640 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1642 put_device(fcloop_device
);
1644 device_destroy(fcloop_class
, MKDEV(0, 0));
1645 class_destroy(fcloop_class
);
1648 module_init(fcloop_init
);
1649 module_exit(fcloop_exit
);
1651 MODULE_LICENSE("GPL v2");