dm writecache: add cond_resched to loop in persistent_memory_claim()
[linux/fpc-iii.git] / drivers / nvme / target / fcloop.c
blobf69ce66e2d44048aa20c0998767c1946465d4f42
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
16 enum {
17 NVMF_OPT_ERR = 0,
18 NVMF_OPT_WWNN = 1 << 0,
19 NVMF_OPT_WWPN = 1 << 1,
20 NVMF_OPT_ROLES = 1 << 2,
21 NVMF_OPT_FCADDR = 1 << 3,
22 NVMF_OPT_LPWWNN = 1 << 4,
23 NVMF_OPT_LPWWPN = 1 << 5,
26 struct fcloop_ctrl_options {
27 int mask;
28 u64 wwnn;
29 u64 wwpn;
30 u32 roles;
31 u32 fcaddr;
32 u64 lpwwnn;
33 u64 lpwwpn;
36 static const match_table_t opt_tokens = {
37 { NVMF_OPT_WWNN, "wwnn=%s" },
38 { NVMF_OPT_WWPN, "wwpn=%s" },
39 { NVMF_OPT_ROLES, "roles=%d" },
40 { NVMF_OPT_FCADDR, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
43 { NVMF_OPT_ERR, NULL }
46 static int
47 fcloop_parse_options(struct fcloop_ctrl_options *opts,
48 const char *buf)
50 substring_t args[MAX_OPT_ARGS];
51 char *options, *o, *p;
52 int token, ret = 0;
53 u64 token64;
55 options = o = kstrdup(buf, GFP_KERNEL);
56 if (!options)
57 return -ENOMEM;
59 while ((p = strsep(&o, ",\n")) != NULL) {
60 if (!*p)
61 continue;
63 token = match_token(p, opt_tokens, args);
64 opts->mask |= token;
65 switch (token) {
66 case NVMF_OPT_WWNN:
67 if (match_u64(args, &token64)) {
68 ret = -EINVAL;
69 goto out_free_options;
71 opts->wwnn = token64;
72 break;
73 case NVMF_OPT_WWPN:
74 if (match_u64(args, &token64)) {
75 ret = -EINVAL;
76 goto out_free_options;
78 opts->wwpn = token64;
79 break;
80 case NVMF_OPT_ROLES:
81 if (match_int(args, &token)) {
82 ret = -EINVAL;
83 goto out_free_options;
85 opts->roles = token;
86 break;
87 case NVMF_OPT_FCADDR:
88 if (match_hex(args, &token)) {
89 ret = -EINVAL;
90 goto out_free_options;
92 opts->fcaddr = token;
93 break;
94 case NVMF_OPT_LPWWNN:
95 if (match_u64(args, &token64)) {
96 ret = -EINVAL;
97 goto out_free_options;
99 opts->lpwwnn = token64;
100 break;
101 case NVMF_OPT_LPWWPN:
102 if (match_u64(args, &token64)) {
103 ret = -EINVAL;
104 goto out_free_options;
106 opts->lpwwpn = token64;
107 break;
108 default:
109 pr_warn("unknown parameter or missing value '%s'\n", p);
110 ret = -EINVAL;
111 goto out_free_options;
115 out_free_options:
116 kfree(options);
117 return ret;
121 static int
122 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
123 const char *buf)
125 substring_t args[MAX_OPT_ARGS];
126 char *options, *o, *p;
127 int token, ret = 0;
128 u64 token64;
130 *nname = -1;
131 *pname = -1;
133 options = o = kstrdup(buf, GFP_KERNEL);
134 if (!options)
135 return -ENOMEM;
137 while ((p = strsep(&o, ",\n")) != NULL) {
138 if (!*p)
139 continue;
141 token = match_token(p, opt_tokens, args);
142 switch (token) {
143 case NVMF_OPT_WWNN:
144 if (match_u64(args, &token64)) {
145 ret = -EINVAL;
146 goto out_free_options;
148 *nname = token64;
149 break;
150 case NVMF_OPT_WWPN:
151 if (match_u64(args, &token64)) {
152 ret = -EINVAL;
153 goto out_free_options;
155 *pname = token64;
156 break;
157 default:
158 pr_warn("unknown parameter or missing value '%s'\n", p);
159 ret = -EINVAL;
160 goto out_free_options;
164 out_free_options:
165 kfree(options);
167 if (!ret) {
168 if (*nname == -1)
169 return -EINVAL;
170 if (*pname == -1)
171 return -EINVAL;
174 return ret;
178 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
180 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
181 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
183 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
186 static DEFINE_SPINLOCK(fcloop_lock);
187 static LIST_HEAD(fcloop_lports);
188 static LIST_HEAD(fcloop_nports);
190 struct fcloop_lport {
191 struct nvme_fc_local_port *localport;
192 struct list_head lport_list;
193 struct completion unreg_done;
196 struct fcloop_lport_priv {
197 struct fcloop_lport *lport;
200 struct fcloop_rport {
201 struct nvme_fc_remote_port *remoteport;
202 struct nvmet_fc_target_port *targetport;
203 struct fcloop_nport *nport;
204 struct fcloop_lport *lport;
205 spinlock_t lock;
206 struct list_head ls_list;
207 struct work_struct ls_work;
210 struct fcloop_tport {
211 struct nvmet_fc_target_port *targetport;
212 struct nvme_fc_remote_port *remoteport;
213 struct fcloop_nport *nport;
214 struct fcloop_lport *lport;
217 struct fcloop_nport {
218 struct fcloop_rport *rport;
219 struct fcloop_tport *tport;
220 struct fcloop_lport *lport;
221 struct list_head nport_list;
222 struct kref ref;
223 u64 node_name;
224 u64 port_name;
225 u32 port_role;
226 u32 port_id;
229 struct fcloop_lsreq {
230 struct nvmefc_ls_req *lsreq;
231 struct nvmefc_tgt_ls_req tgt_ls_req;
232 int status;
233 struct list_head ls_list; /* fcloop_rport->ls_list */
236 struct fcloop_rscn {
237 struct fcloop_tport *tport;
238 struct work_struct work;
241 enum {
242 INI_IO_START = 0,
243 INI_IO_ACTIVE = 1,
244 INI_IO_ABORTED = 2,
245 INI_IO_COMPLETED = 3,
248 struct fcloop_fcpreq {
249 struct fcloop_tport *tport;
250 struct nvmefc_fcp_req *fcpreq;
251 spinlock_t reqlock;
252 u16 status;
253 u32 inistate;
254 bool active;
255 bool aborted;
256 struct kref ref;
257 struct work_struct fcp_rcv_work;
258 struct work_struct abort_rcv_work;
259 struct work_struct tio_done_work;
260 struct nvmefc_tgt_fcp_req tgt_fcp_req;
263 struct fcloop_ini_fcpreq {
264 struct nvmefc_fcp_req *fcpreq;
265 struct fcloop_fcpreq *tfcp_req;
266 spinlock_t inilock;
269 static inline struct fcloop_lsreq *
270 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
272 return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
275 static inline struct fcloop_fcpreq *
276 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
278 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
282 static int
283 fcloop_create_queue(struct nvme_fc_local_port *localport,
284 unsigned int qidx, u16 qsize,
285 void **handle)
287 *handle = localport;
288 return 0;
291 static void
292 fcloop_delete_queue(struct nvme_fc_local_port *localport,
293 unsigned int idx, void *handle)
297 static void
298 fcloop_rport_lsrqst_work(struct work_struct *work)
300 struct fcloop_rport *rport =
301 container_of(work, struct fcloop_rport, ls_work);
302 struct fcloop_lsreq *tls_req;
304 spin_lock(&rport->lock);
305 for (;;) {
306 tls_req = list_first_entry_or_null(&rport->ls_list,
307 struct fcloop_lsreq, ls_list);
308 if (!tls_req)
309 break;
311 list_del(&tls_req->ls_list);
312 spin_unlock(&rport->lock);
314 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
316 * callee may free memory containing tls_req.
317 * do not reference lsreq after this.
320 spin_lock(&rport->lock);
322 spin_unlock(&rport->lock);
325 static int
326 fcloop_ls_req(struct nvme_fc_local_port *localport,
327 struct nvme_fc_remote_port *remoteport,
328 struct nvmefc_ls_req *lsreq)
330 struct fcloop_lsreq *tls_req = lsreq->private;
331 struct fcloop_rport *rport = remoteport->private;
332 int ret = 0;
334 tls_req->lsreq = lsreq;
335 INIT_LIST_HEAD(&tls_req->ls_list);
337 if (!rport->targetport) {
338 tls_req->status = -ECONNREFUSED;
339 spin_lock(&rport->lock);
340 list_add_tail(&rport->ls_list, &tls_req->ls_list);
341 spin_unlock(&rport->lock);
342 schedule_work(&rport->ls_work);
343 return ret;
346 tls_req->status = 0;
347 ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
348 lsreq->rqstaddr, lsreq->rqstlen);
350 return ret;
353 static int
354 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
355 struct nvmefc_tgt_ls_req *tgt_lsreq)
357 struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
358 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
359 struct fcloop_tport *tport = targetport->private;
360 struct nvme_fc_remote_port *remoteport = tport->remoteport;
361 struct fcloop_rport *rport;
363 memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
364 ((lsreq->rsplen < tgt_lsreq->rsplen) ?
365 lsreq->rsplen : tgt_lsreq->rsplen));
367 tgt_lsreq->done(tgt_lsreq);
369 if (remoteport) {
370 rport = remoteport->private;
371 spin_lock(&rport->lock);
372 list_add_tail(&rport->ls_list, &tls_req->ls_list);
373 spin_unlock(&rport->lock);
374 schedule_work(&rport->ls_work);
377 return 0;
381 * Simulate reception of RSCN and converting it to a initiator transport
382 * call to rescan a remote port.
384 static void
385 fcloop_tgt_rscn_work(struct work_struct *work)
387 struct fcloop_rscn *tgt_rscn =
388 container_of(work, struct fcloop_rscn, work);
389 struct fcloop_tport *tport = tgt_rscn->tport;
391 if (tport->remoteport)
392 nvme_fc_rescan_remoteport(tport->remoteport);
393 kfree(tgt_rscn);
396 static void
397 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
399 struct fcloop_rscn *tgt_rscn;
401 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
402 if (!tgt_rscn)
403 return;
405 tgt_rscn->tport = tgtport->private;
406 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
408 schedule_work(&tgt_rscn->work);
411 static void
412 fcloop_tfcp_req_free(struct kref *ref)
414 struct fcloop_fcpreq *tfcp_req =
415 container_of(ref, struct fcloop_fcpreq, ref);
417 kfree(tfcp_req);
420 static void
421 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
423 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
426 static int
427 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
429 return kref_get_unless_zero(&tfcp_req->ref);
432 static void
433 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
434 struct fcloop_fcpreq *tfcp_req, int status)
436 struct fcloop_ini_fcpreq *inireq = NULL;
438 if (fcpreq) {
439 inireq = fcpreq->private;
440 spin_lock(&inireq->inilock);
441 inireq->tfcp_req = NULL;
442 spin_unlock(&inireq->inilock);
444 fcpreq->status = status;
445 fcpreq->done(fcpreq);
448 /* release original io reference on tgt struct */
449 fcloop_tfcp_req_put(tfcp_req);
452 static void
453 fcloop_fcp_recv_work(struct work_struct *work)
455 struct fcloop_fcpreq *tfcp_req =
456 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
457 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
458 int ret = 0;
459 bool aborted = false;
461 spin_lock_irq(&tfcp_req->reqlock);
462 switch (tfcp_req->inistate) {
463 case INI_IO_START:
464 tfcp_req->inistate = INI_IO_ACTIVE;
465 break;
466 case INI_IO_ABORTED:
467 aborted = true;
468 break;
469 default:
470 spin_unlock_irq(&tfcp_req->reqlock);
471 WARN_ON(1);
472 return;
474 spin_unlock_irq(&tfcp_req->reqlock);
476 if (unlikely(aborted))
477 ret = -ECANCELED;
478 else
479 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
480 &tfcp_req->tgt_fcp_req,
481 fcpreq->cmdaddr, fcpreq->cmdlen);
482 if (ret)
483 fcloop_call_host_done(fcpreq, tfcp_req, ret);
485 return;
488 static void
489 fcloop_fcp_abort_recv_work(struct work_struct *work)
491 struct fcloop_fcpreq *tfcp_req =
492 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
493 struct nvmefc_fcp_req *fcpreq;
494 bool completed = false;
496 spin_lock_irq(&tfcp_req->reqlock);
497 fcpreq = tfcp_req->fcpreq;
498 switch (tfcp_req->inistate) {
499 case INI_IO_ABORTED:
500 break;
501 case INI_IO_COMPLETED:
502 completed = true;
503 break;
504 default:
505 spin_unlock_irq(&tfcp_req->reqlock);
506 WARN_ON(1);
507 return;
509 spin_unlock_irq(&tfcp_req->reqlock);
511 if (unlikely(completed)) {
512 /* remove reference taken in original abort downcall */
513 fcloop_tfcp_req_put(tfcp_req);
514 return;
517 if (tfcp_req->tport->targetport)
518 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
519 &tfcp_req->tgt_fcp_req);
521 spin_lock_irq(&tfcp_req->reqlock);
522 tfcp_req->fcpreq = NULL;
523 spin_unlock_irq(&tfcp_req->reqlock);
525 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
526 /* call_host_done releases reference for abort downcall */
530 * FCP IO operation done by target completion.
531 * call back up initiator "done" flows.
533 static void
534 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
536 struct fcloop_fcpreq *tfcp_req =
537 container_of(work, struct fcloop_fcpreq, tio_done_work);
538 struct nvmefc_fcp_req *fcpreq;
540 spin_lock_irq(&tfcp_req->reqlock);
541 fcpreq = tfcp_req->fcpreq;
542 tfcp_req->inistate = INI_IO_COMPLETED;
543 spin_unlock_irq(&tfcp_req->reqlock);
545 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
549 static int
550 fcloop_fcp_req(struct nvme_fc_local_port *localport,
551 struct nvme_fc_remote_port *remoteport,
552 void *hw_queue_handle,
553 struct nvmefc_fcp_req *fcpreq)
555 struct fcloop_rport *rport = remoteport->private;
556 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
557 struct fcloop_fcpreq *tfcp_req;
559 if (!rport->targetport)
560 return -ECONNREFUSED;
562 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
563 if (!tfcp_req)
564 return -ENOMEM;
566 inireq->fcpreq = fcpreq;
567 inireq->tfcp_req = tfcp_req;
568 spin_lock_init(&inireq->inilock);
570 tfcp_req->fcpreq = fcpreq;
571 tfcp_req->tport = rport->targetport->private;
572 tfcp_req->inistate = INI_IO_START;
573 spin_lock_init(&tfcp_req->reqlock);
574 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
575 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
576 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
577 kref_init(&tfcp_req->ref);
579 schedule_work(&tfcp_req->fcp_rcv_work);
581 return 0;
584 static void
585 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
586 struct scatterlist *io_sg, u32 offset, u32 length)
588 void *data_p, *io_p;
589 u32 data_len, io_len, tlen;
591 io_p = sg_virt(io_sg);
592 io_len = io_sg->length;
594 for ( ; offset; ) {
595 tlen = min_t(u32, offset, io_len);
596 offset -= tlen;
597 io_len -= tlen;
598 if (!io_len) {
599 io_sg = sg_next(io_sg);
600 io_p = sg_virt(io_sg);
601 io_len = io_sg->length;
602 } else
603 io_p += tlen;
606 data_p = sg_virt(data_sg);
607 data_len = data_sg->length;
609 for ( ; length; ) {
610 tlen = min_t(u32, io_len, data_len);
611 tlen = min_t(u32, tlen, length);
613 if (op == NVMET_FCOP_WRITEDATA)
614 memcpy(data_p, io_p, tlen);
615 else
616 memcpy(io_p, data_p, tlen);
618 length -= tlen;
620 io_len -= tlen;
621 if ((!io_len) && (length)) {
622 io_sg = sg_next(io_sg);
623 io_p = sg_virt(io_sg);
624 io_len = io_sg->length;
625 } else
626 io_p += tlen;
628 data_len -= tlen;
629 if ((!data_len) && (length)) {
630 data_sg = sg_next(data_sg);
631 data_p = sg_virt(data_sg);
632 data_len = data_sg->length;
633 } else
634 data_p += tlen;
638 static int
639 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
640 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
642 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
643 struct nvmefc_fcp_req *fcpreq;
644 u32 rsplen = 0, xfrlen = 0;
645 int fcp_err = 0, active, aborted;
646 u8 op = tgt_fcpreq->op;
648 spin_lock_irq(&tfcp_req->reqlock);
649 fcpreq = tfcp_req->fcpreq;
650 active = tfcp_req->active;
651 aborted = tfcp_req->aborted;
652 tfcp_req->active = true;
653 spin_unlock_irq(&tfcp_req->reqlock);
655 if (unlikely(active))
656 /* illegal - call while i/o active */
657 return -EALREADY;
659 if (unlikely(aborted)) {
660 /* target transport has aborted i/o prior */
661 spin_lock_irq(&tfcp_req->reqlock);
662 tfcp_req->active = false;
663 spin_unlock_irq(&tfcp_req->reqlock);
664 tgt_fcpreq->transferred_length = 0;
665 tgt_fcpreq->fcp_error = -ECANCELED;
666 tgt_fcpreq->done(tgt_fcpreq);
667 return 0;
671 * if fcpreq is NULL, the I/O has been aborted (from
672 * initiator side). For the target side, act as if all is well
673 * but don't actually move data.
676 switch (op) {
677 case NVMET_FCOP_WRITEDATA:
678 xfrlen = tgt_fcpreq->transfer_length;
679 if (fcpreq) {
680 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
681 fcpreq->first_sgl, tgt_fcpreq->offset,
682 xfrlen);
683 fcpreq->transferred_length += xfrlen;
685 break;
687 case NVMET_FCOP_READDATA:
688 case NVMET_FCOP_READDATA_RSP:
689 xfrlen = tgt_fcpreq->transfer_length;
690 if (fcpreq) {
691 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
692 fcpreq->first_sgl, tgt_fcpreq->offset,
693 xfrlen);
694 fcpreq->transferred_length += xfrlen;
696 if (op == NVMET_FCOP_READDATA)
697 break;
699 /* Fall-Thru to RSP handling */
700 /* FALLTHRU */
702 case NVMET_FCOP_RSP:
703 if (fcpreq) {
704 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
705 fcpreq->rsplen : tgt_fcpreq->rsplen);
706 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
707 if (rsplen < tgt_fcpreq->rsplen)
708 fcp_err = -E2BIG;
709 fcpreq->rcv_rsplen = rsplen;
710 fcpreq->status = 0;
712 tfcp_req->status = 0;
713 break;
715 default:
716 fcp_err = -EINVAL;
717 break;
720 spin_lock_irq(&tfcp_req->reqlock);
721 tfcp_req->active = false;
722 spin_unlock_irq(&tfcp_req->reqlock);
724 tgt_fcpreq->transferred_length = xfrlen;
725 tgt_fcpreq->fcp_error = fcp_err;
726 tgt_fcpreq->done(tgt_fcpreq);
728 return 0;
731 static void
732 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
733 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
735 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
738 * mark aborted only in case there were 2 threads in transport
739 * (one doing io, other doing abort) and only kills ops posted
740 * after the abort request
742 spin_lock_irq(&tfcp_req->reqlock);
743 tfcp_req->aborted = true;
744 spin_unlock_irq(&tfcp_req->reqlock);
746 tfcp_req->status = NVME_SC_INTERNAL;
749 * nothing more to do. If io wasn't active, the transport should
750 * immediately call the req_release. If it was active, the op
751 * will complete, and the lldd should call req_release.
755 static void
756 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
757 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
759 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
761 schedule_work(&tfcp_req->tio_done_work);
764 static void
765 fcloop_ls_abort(struct nvme_fc_local_port *localport,
766 struct nvme_fc_remote_port *remoteport,
767 struct nvmefc_ls_req *lsreq)
771 static void
772 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
773 struct nvme_fc_remote_port *remoteport,
774 void *hw_queue_handle,
775 struct nvmefc_fcp_req *fcpreq)
777 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
778 struct fcloop_fcpreq *tfcp_req;
779 bool abortio = true;
781 spin_lock(&inireq->inilock);
782 tfcp_req = inireq->tfcp_req;
783 if (tfcp_req)
784 fcloop_tfcp_req_get(tfcp_req);
785 spin_unlock(&inireq->inilock);
787 if (!tfcp_req)
788 /* abort has already been called */
789 return;
791 /* break initiator/target relationship for io */
792 spin_lock_irq(&tfcp_req->reqlock);
793 switch (tfcp_req->inistate) {
794 case INI_IO_START:
795 case INI_IO_ACTIVE:
796 tfcp_req->inistate = INI_IO_ABORTED;
797 break;
798 case INI_IO_COMPLETED:
799 abortio = false;
800 break;
801 default:
802 spin_unlock_irq(&tfcp_req->reqlock);
803 WARN_ON(1);
804 return;
806 spin_unlock_irq(&tfcp_req->reqlock);
808 if (abortio)
809 /* leave the reference while the work item is scheduled */
810 WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
811 else {
813 * as the io has already had the done callback made,
814 * nothing more to do. So release the reference taken above
816 fcloop_tfcp_req_put(tfcp_req);
820 static void
821 fcloop_nport_free(struct kref *ref)
823 struct fcloop_nport *nport =
824 container_of(ref, struct fcloop_nport, ref);
825 unsigned long flags;
827 spin_lock_irqsave(&fcloop_lock, flags);
828 list_del(&nport->nport_list);
829 spin_unlock_irqrestore(&fcloop_lock, flags);
831 kfree(nport);
834 static void
835 fcloop_nport_put(struct fcloop_nport *nport)
837 kref_put(&nport->ref, fcloop_nport_free);
840 static int
841 fcloop_nport_get(struct fcloop_nport *nport)
843 return kref_get_unless_zero(&nport->ref);
846 static void
847 fcloop_localport_delete(struct nvme_fc_local_port *localport)
849 struct fcloop_lport_priv *lport_priv = localport->private;
850 struct fcloop_lport *lport = lport_priv->lport;
852 /* release any threads waiting for the unreg to complete */
853 complete(&lport->unreg_done);
856 static void
857 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
859 struct fcloop_rport *rport = remoteport->private;
861 flush_work(&rport->ls_work);
862 fcloop_nport_put(rport->nport);
865 static void
866 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
868 struct fcloop_tport *tport = targetport->private;
870 fcloop_nport_put(tport->nport);
873 #define FCLOOP_HW_QUEUES 4
874 #define FCLOOP_SGL_SEGS 256
875 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
877 static struct nvme_fc_port_template fctemplate = {
878 .localport_delete = fcloop_localport_delete,
879 .remoteport_delete = fcloop_remoteport_delete,
880 .create_queue = fcloop_create_queue,
881 .delete_queue = fcloop_delete_queue,
882 .ls_req = fcloop_ls_req,
883 .fcp_io = fcloop_fcp_req,
884 .ls_abort = fcloop_ls_abort,
885 .fcp_abort = fcloop_fcp_abort,
886 .max_hw_queues = FCLOOP_HW_QUEUES,
887 .max_sgl_segments = FCLOOP_SGL_SEGS,
888 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
889 .dma_boundary = FCLOOP_DMABOUND_4G,
890 /* sizes of additional private data for data structures */
891 .local_priv_sz = sizeof(struct fcloop_lport_priv),
892 .remote_priv_sz = sizeof(struct fcloop_rport),
893 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
894 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
897 static struct nvmet_fc_target_template tgttemplate = {
898 .targetport_delete = fcloop_targetport_delete,
899 .xmt_ls_rsp = fcloop_xmt_ls_rsp,
900 .fcp_op = fcloop_fcp_op,
901 .fcp_abort = fcloop_tgt_fcp_abort,
902 .fcp_req_release = fcloop_fcp_req_release,
903 .discovery_event = fcloop_tgt_discovery_evt,
904 .max_hw_queues = FCLOOP_HW_QUEUES,
905 .max_sgl_segments = FCLOOP_SGL_SEGS,
906 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
907 .dma_boundary = FCLOOP_DMABOUND_4G,
908 /* optional features */
909 .target_features = 0,
910 /* sizes of additional private data for data structures */
911 .target_priv_sz = sizeof(struct fcloop_tport),
914 static ssize_t
915 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
916 const char *buf, size_t count)
918 struct nvme_fc_port_info pinfo;
919 struct fcloop_ctrl_options *opts;
920 struct nvme_fc_local_port *localport;
921 struct fcloop_lport *lport;
922 struct fcloop_lport_priv *lport_priv;
923 unsigned long flags;
924 int ret = -ENOMEM;
926 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
927 if (!lport)
928 return -ENOMEM;
930 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
931 if (!opts)
932 goto out_free_lport;
934 ret = fcloop_parse_options(opts, buf);
935 if (ret)
936 goto out_free_opts;
938 /* everything there ? */
939 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
940 ret = -EINVAL;
941 goto out_free_opts;
944 memset(&pinfo, 0, sizeof(pinfo));
945 pinfo.node_name = opts->wwnn;
946 pinfo.port_name = opts->wwpn;
947 pinfo.port_role = opts->roles;
948 pinfo.port_id = opts->fcaddr;
950 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
951 if (!ret) {
952 /* success */
953 lport_priv = localport->private;
954 lport_priv->lport = lport;
956 lport->localport = localport;
957 INIT_LIST_HEAD(&lport->lport_list);
959 spin_lock_irqsave(&fcloop_lock, flags);
960 list_add_tail(&lport->lport_list, &fcloop_lports);
961 spin_unlock_irqrestore(&fcloop_lock, flags);
964 out_free_opts:
965 kfree(opts);
966 out_free_lport:
967 /* free only if we're going to fail */
968 if (ret)
969 kfree(lport);
971 return ret ? ret : count;
975 static void
976 __unlink_local_port(struct fcloop_lport *lport)
978 list_del(&lport->lport_list);
981 static int
982 __wait_localport_unreg(struct fcloop_lport *lport)
984 int ret;
986 init_completion(&lport->unreg_done);
988 ret = nvme_fc_unregister_localport(lport->localport);
990 wait_for_completion(&lport->unreg_done);
992 kfree(lport);
994 return ret;
998 static ssize_t
999 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1000 const char *buf, size_t count)
1002 struct fcloop_lport *tlport, *lport = NULL;
1003 u64 nodename, portname;
1004 unsigned long flags;
1005 int ret;
1007 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1008 if (ret)
1009 return ret;
1011 spin_lock_irqsave(&fcloop_lock, flags);
1013 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
1014 if (tlport->localport->node_name == nodename &&
1015 tlport->localport->port_name == portname) {
1016 lport = tlport;
1017 __unlink_local_port(lport);
1018 break;
1021 spin_unlock_irqrestore(&fcloop_lock, flags);
1023 if (!lport)
1024 return -ENOENT;
1026 ret = __wait_localport_unreg(lport);
1028 return ret ? ret : count;
1031 static struct fcloop_nport *
1032 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1034 struct fcloop_nport *newnport, *nport = NULL;
1035 struct fcloop_lport *tmplport, *lport = NULL;
1036 struct fcloop_ctrl_options *opts;
1037 unsigned long flags;
1038 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1039 int ret;
1041 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1042 if (!opts)
1043 return NULL;
1045 ret = fcloop_parse_options(opts, buf);
1046 if (ret)
1047 goto out_free_opts;
1049 /* everything there ? */
1050 if ((opts->mask & opts_mask) != opts_mask) {
1051 ret = -EINVAL;
1052 goto out_free_opts;
1055 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1056 if (!newnport)
1057 goto out_free_opts;
1059 INIT_LIST_HEAD(&newnport->nport_list);
1060 newnport->node_name = opts->wwnn;
1061 newnport->port_name = opts->wwpn;
1062 if (opts->mask & NVMF_OPT_ROLES)
1063 newnport->port_role = opts->roles;
1064 if (opts->mask & NVMF_OPT_FCADDR)
1065 newnport->port_id = opts->fcaddr;
1066 kref_init(&newnport->ref);
1068 spin_lock_irqsave(&fcloop_lock, flags);
1070 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1071 if (tmplport->localport->node_name == opts->wwnn &&
1072 tmplport->localport->port_name == opts->wwpn)
1073 goto out_invalid_opts;
1075 if (tmplport->localport->node_name == opts->lpwwnn &&
1076 tmplport->localport->port_name == opts->lpwwpn)
1077 lport = tmplport;
1080 if (remoteport) {
1081 if (!lport)
1082 goto out_invalid_opts;
1083 newnport->lport = lport;
1086 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1087 if (nport->node_name == opts->wwnn &&
1088 nport->port_name == opts->wwpn) {
1089 if ((remoteport && nport->rport) ||
1090 (!remoteport && nport->tport)) {
1091 nport = NULL;
1092 goto out_invalid_opts;
1095 fcloop_nport_get(nport);
1097 spin_unlock_irqrestore(&fcloop_lock, flags);
1099 if (remoteport)
1100 nport->lport = lport;
1101 if (opts->mask & NVMF_OPT_ROLES)
1102 nport->port_role = opts->roles;
1103 if (opts->mask & NVMF_OPT_FCADDR)
1104 nport->port_id = opts->fcaddr;
1105 goto out_free_newnport;
1109 list_add_tail(&newnport->nport_list, &fcloop_nports);
1111 spin_unlock_irqrestore(&fcloop_lock, flags);
1113 kfree(opts);
1114 return newnport;
1116 out_invalid_opts:
1117 spin_unlock_irqrestore(&fcloop_lock, flags);
1118 out_free_newnport:
1119 kfree(newnport);
1120 out_free_opts:
1121 kfree(opts);
1122 return nport;
1125 static ssize_t
1126 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1127 const char *buf, size_t count)
1129 struct nvme_fc_remote_port *remoteport;
1130 struct fcloop_nport *nport;
1131 struct fcloop_rport *rport;
1132 struct nvme_fc_port_info pinfo;
1133 int ret;
1135 nport = fcloop_alloc_nport(buf, count, true);
1136 if (!nport)
1137 return -EIO;
1139 memset(&pinfo, 0, sizeof(pinfo));
1140 pinfo.node_name = nport->node_name;
1141 pinfo.port_name = nport->port_name;
1142 pinfo.port_role = nport->port_role;
1143 pinfo.port_id = nport->port_id;
1145 ret = nvme_fc_register_remoteport(nport->lport->localport,
1146 &pinfo, &remoteport);
1147 if (ret || !remoteport) {
1148 fcloop_nport_put(nport);
1149 return ret;
1152 /* success */
1153 rport = remoteport->private;
1154 rport->remoteport = remoteport;
1155 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1156 if (nport->tport) {
1157 nport->tport->remoteport = remoteport;
1158 nport->tport->lport = nport->lport;
1160 rport->nport = nport;
1161 rport->lport = nport->lport;
1162 nport->rport = rport;
1163 spin_lock_init(&rport->lock);
1164 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1165 INIT_LIST_HEAD(&rport->ls_list);
1167 return count;
1171 static struct fcloop_rport *
1172 __unlink_remote_port(struct fcloop_nport *nport)
1174 struct fcloop_rport *rport = nport->rport;
1176 if (rport && nport->tport)
1177 nport->tport->remoteport = NULL;
1178 nport->rport = NULL;
1180 return rport;
1183 static int
1184 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1186 if (!rport)
1187 return -EALREADY;
1189 return nvme_fc_unregister_remoteport(rport->remoteport);
1192 static ssize_t
1193 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1194 const char *buf, size_t count)
1196 struct fcloop_nport *nport = NULL, *tmpport;
1197 static struct fcloop_rport *rport;
1198 u64 nodename, portname;
1199 unsigned long flags;
1200 int ret;
1202 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1203 if (ret)
1204 return ret;
1206 spin_lock_irqsave(&fcloop_lock, flags);
1208 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1209 if (tmpport->node_name == nodename &&
1210 tmpport->port_name == portname && tmpport->rport) {
1211 nport = tmpport;
1212 rport = __unlink_remote_port(nport);
1213 break;
1217 spin_unlock_irqrestore(&fcloop_lock, flags);
1219 if (!nport)
1220 return -ENOENT;
1222 ret = __remoteport_unreg(nport, rport);
1224 return ret ? ret : count;
1227 static ssize_t
1228 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1229 const char *buf, size_t count)
1231 struct nvmet_fc_target_port *targetport;
1232 struct fcloop_nport *nport;
1233 struct fcloop_tport *tport;
1234 struct nvmet_fc_port_info tinfo;
1235 int ret;
1237 nport = fcloop_alloc_nport(buf, count, false);
1238 if (!nport)
1239 return -EIO;
1241 tinfo.node_name = nport->node_name;
1242 tinfo.port_name = nport->port_name;
1243 tinfo.port_id = nport->port_id;
1245 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1246 &targetport);
1247 if (ret) {
1248 fcloop_nport_put(nport);
1249 return ret;
1252 /* success */
1253 tport = targetport->private;
1254 tport->targetport = targetport;
1255 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1256 if (nport->rport)
1257 nport->rport->targetport = targetport;
1258 tport->nport = nport;
1259 tport->lport = nport->lport;
1260 nport->tport = tport;
1262 return count;
1266 static struct fcloop_tport *
1267 __unlink_target_port(struct fcloop_nport *nport)
1269 struct fcloop_tport *tport = nport->tport;
1271 if (tport && nport->rport)
1272 nport->rport->targetport = NULL;
1273 nport->tport = NULL;
1275 return tport;
1278 static int
1279 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1281 if (!tport)
1282 return -EALREADY;
1284 return nvmet_fc_unregister_targetport(tport->targetport);
1287 static ssize_t
1288 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1289 const char *buf, size_t count)
1291 struct fcloop_nport *nport = NULL, *tmpport;
1292 struct fcloop_tport *tport = NULL;
1293 u64 nodename, portname;
1294 unsigned long flags;
1295 int ret;
1297 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1298 if (ret)
1299 return ret;
1301 spin_lock_irqsave(&fcloop_lock, flags);
1303 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1304 if (tmpport->node_name == nodename &&
1305 tmpport->port_name == portname && tmpport->tport) {
1306 nport = tmpport;
1307 tport = __unlink_target_port(nport);
1308 break;
1312 spin_unlock_irqrestore(&fcloop_lock, flags);
1314 if (!nport)
1315 return -ENOENT;
1317 ret = __targetport_unreg(nport, tport);
1319 return ret ? ret : count;
1323 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1324 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1325 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1326 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1327 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1328 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1330 static struct attribute *fcloop_dev_attrs[] = {
1331 &dev_attr_add_local_port.attr,
1332 &dev_attr_del_local_port.attr,
1333 &dev_attr_add_remote_port.attr,
1334 &dev_attr_del_remote_port.attr,
1335 &dev_attr_add_target_port.attr,
1336 &dev_attr_del_target_port.attr,
1337 NULL
1340 static struct attribute_group fclopp_dev_attrs_group = {
1341 .attrs = fcloop_dev_attrs,
1344 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1345 &fclopp_dev_attrs_group,
1346 NULL,
1349 static struct class *fcloop_class;
1350 static struct device *fcloop_device;
1353 static int __init fcloop_init(void)
1355 int ret;
1357 fcloop_class = class_create(THIS_MODULE, "fcloop");
1358 if (IS_ERR(fcloop_class)) {
1359 pr_err("couldn't register class fcloop\n");
1360 ret = PTR_ERR(fcloop_class);
1361 return ret;
1364 fcloop_device = device_create_with_groups(
1365 fcloop_class, NULL, MKDEV(0, 0), NULL,
1366 fcloop_dev_attr_groups, "ctl");
1367 if (IS_ERR(fcloop_device)) {
1368 pr_err("couldn't create ctl device!\n");
1369 ret = PTR_ERR(fcloop_device);
1370 goto out_destroy_class;
1373 get_device(fcloop_device);
1375 return 0;
1377 out_destroy_class:
1378 class_destroy(fcloop_class);
1379 return ret;
1382 static void __exit fcloop_exit(void)
1384 struct fcloop_lport *lport;
1385 struct fcloop_nport *nport;
1386 struct fcloop_tport *tport;
1387 struct fcloop_rport *rport;
1388 unsigned long flags;
1389 int ret;
1391 spin_lock_irqsave(&fcloop_lock, flags);
1393 for (;;) {
1394 nport = list_first_entry_or_null(&fcloop_nports,
1395 typeof(*nport), nport_list);
1396 if (!nport)
1397 break;
1399 tport = __unlink_target_port(nport);
1400 rport = __unlink_remote_port(nport);
1402 spin_unlock_irqrestore(&fcloop_lock, flags);
1404 ret = __targetport_unreg(nport, tport);
1405 if (ret)
1406 pr_warn("%s: Failed deleting target port\n", __func__);
1408 ret = __remoteport_unreg(nport, rport);
1409 if (ret)
1410 pr_warn("%s: Failed deleting remote port\n", __func__);
1412 spin_lock_irqsave(&fcloop_lock, flags);
1415 for (;;) {
1416 lport = list_first_entry_or_null(&fcloop_lports,
1417 typeof(*lport), lport_list);
1418 if (!lport)
1419 break;
1421 __unlink_local_port(lport);
1423 spin_unlock_irqrestore(&fcloop_lock, flags);
1425 ret = __wait_localport_unreg(lport);
1426 if (ret)
1427 pr_warn("%s: Failed deleting local port\n", __func__);
1429 spin_lock_irqsave(&fcloop_lock, flags);
1432 spin_unlock_irqrestore(&fcloop_lock, flags);
1434 put_device(fcloop_device);
1436 device_destroy(fcloop_class, MKDEV(0, 0));
1437 class_destroy(fcloop_class);
1440 module_init(fcloop_init);
1441 module_exit(fcloop_exit);
1443 MODULE_LICENSE("GPL v2");