2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/scatterlist.h>
16 #include <linux/blk-mq.h>
17 #include <linux/nvme.h>
18 #include <linux/module.h>
19 #include <linux/parser.h>
21 #include "../host/nvme.h"
22 #include "../host/fabrics.h"
24 #define NVME_LOOP_MAX_SEGMENTS 256
26 struct nvme_loop_iod
{
27 struct nvme_request nvme_req
;
28 struct nvme_command cmd
;
29 struct nvme_completion rsp
;
31 struct nvme_loop_queue
*queue
;
32 struct work_struct work
;
33 struct sg_table sg_table
;
34 struct scatterlist first_sgl
[];
37 struct nvme_loop_ctrl
{
38 struct nvme_loop_queue
*queues
;
40 struct blk_mq_tag_set admin_tag_set
;
42 struct list_head list
;
43 struct blk_mq_tag_set tag_set
;
44 struct nvme_loop_iod async_event_iod
;
45 struct nvme_ctrl ctrl
;
47 struct nvmet_ctrl
*target_ctrl
;
48 struct nvmet_port
*port
;
51 static inline struct nvme_loop_ctrl
*to_loop_ctrl(struct nvme_ctrl
*ctrl
)
53 return container_of(ctrl
, struct nvme_loop_ctrl
, ctrl
);
56 enum nvme_loop_queue_flags
{
60 struct nvme_loop_queue
{
61 struct nvmet_cq nvme_cq
;
62 struct nvmet_sq nvme_sq
;
63 struct nvme_loop_ctrl
*ctrl
;
67 static LIST_HEAD(nvme_loop_ports
);
68 static DEFINE_MUTEX(nvme_loop_ports_mutex
);
70 static LIST_HEAD(nvme_loop_ctrl_list
);
71 static DEFINE_MUTEX(nvme_loop_ctrl_mutex
);
73 static void nvme_loop_queue_response(struct nvmet_req
*nvme_req
);
74 static void nvme_loop_delete_ctrl(struct nvmet_ctrl
*ctrl
);
76 static const struct nvmet_fabrics_ops nvme_loop_ops
;
78 static inline int nvme_loop_queue_idx(struct nvme_loop_queue
*queue
)
80 return queue
- queue
->ctrl
->queues
;
83 static void nvme_loop_complete_rq(struct request
*req
)
85 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(req
);
87 nvme_cleanup_cmd(req
);
88 sg_free_table_chained(&iod
->sg_table
, true);
89 nvme_complete_rq(req
);
92 static struct blk_mq_tags
*nvme_loop_tagset(struct nvme_loop_queue
*queue
)
94 u32 queue_idx
= nvme_loop_queue_idx(queue
);
97 return queue
->ctrl
->admin_tag_set
.tags
[queue_idx
];
98 return queue
->ctrl
->tag_set
.tags
[queue_idx
- 1];
101 static void nvme_loop_queue_response(struct nvmet_req
*req
)
103 struct nvme_loop_queue
*queue
=
104 container_of(req
->sq
, struct nvme_loop_queue
, nvme_sq
);
105 struct nvme_completion
*cqe
= req
->rsp
;
108 * AEN requests are special as they don't time out and can
109 * survive any kind of queue freeze and often don't respond to
110 * aborts. We don't even bother to allocate a struct request
111 * for them but rather special case them here.
113 if (unlikely(nvme_loop_queue_idx(queue
) == 0 &&
114 cqe
->command_id
>= NVME_AQ_BLK_MQ_DEPTH
)) {
115 nvme_complete_async_event(&queue
->ctrl
->ctrl
, cqe
->status
,
120 rq
= blk_mq_tag_to_rq(nvme_loop_tagset(queue
), cqe
->command_id
);
122 dev_err(queue
->ctrl
->ctrl
.device
,
123 "tag 0x%x on queue %d not found\n",
124 cqe
->command_id
, nvme_loop_queue_idx(queue
));
128 nvme_end_request(rq
, cqe
->status
, cqe
->result
);
132 static void nvme_loop_execute_work(struct work_struct
*work
)
134 struct nvme_loop_iod
*iod
=
135 container_of(work
, struct nvme_loop_iod
, work
);
137 nvmet_req_execute(&iod
->req
);
140 static enum blk_eh_timer_return
141 nvme_loop_timeout(struct request
*rq
, bool reserved
)
143 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(rq
);
145 /* queue error recovery */
146 nvme_reset_ctrl(&iod
->queue
->ctrl
->ctrl
);
148 /* fail with DNR on admin cmd timeout */
149 nvme_req(rq
)->status
= NVME_SC_ABORT_REQ
| NVME_SC_DNR
;
154 static blk_status_t
nvme_loop_queue_rq(struct blk_mq_hw_ctx
*hctx
,
155 const struct blk_mq_queue_data
*bd
)
157 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
158 struct nvme_loop_queue
*queue
= hctx
->driver_data
;
159 struct request
*req
= bd
->rq
;
160 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(req
);
161 bool queue_ready
= test_bit(NVME_LOOP_Q_LIVE
, &queue
->flags
);
164 if (!nvmf_check_ready(&queue
->ctrl
->ctrl
, req
, queue_ready
))
165 return nvmf_fail_nonready_command(&queue
->ctrl
->ctrl
, req
);
167 ret
= nvme_setup_cmd(ns
, req
, &iod
->cmd
);
171 blk_mq_start_request(req
);
172 iod
->cmd
.common
.flags
|= NVME_CMD_SGL_METABUF
;
173 iod
->req
.port
= queue
->ctrl
->port
;
174 if (!nvmet_req_init(&iod
->req
, &queue
->nvme_cq
,
175 &queue
->nvme_sq
, &nvme_loop_ops
))
178 if (blk_rq_nr_phys_segments(req
)) {
179 iod
->sg_table
.sgl
= iod
->first_sgl
;
180 if (sg_alloc_table_chained(&iod
->sg_table
,
181 blk_rq_nr_phys_segments(req
),
183 return BLK_STS_RESOURCE
;
185 iod
->req
.sg
= iod
->sg_table
.sgl
;
186 iod
->req
.sg_cnt
= blk_rq_map_sg(req
->q
, req
, iod
->sg_table
.sgl
);
187 iod
->req
.transfer_len
= blk_rq_payload_bytes(req
);
190 schedule_work(&iod
->work
);
194 static void nvme_loop_submit_async_event(struct nvme_ctrl
*arg
)
196 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(arg
);
197 struct nvme_loop_queue
*queue
= &ctrl
->queues
[0];
198 struct nvme_loop_iod
*iod
= &ctrl
->async_event_iod
;
200 memset(&iod
->cmd
, 0, sizeof(iod
->cmd
));
201 iod
->cmd
.common
.opcode
= nvme_admin_async_event
;
202 iod
->cmd
.common
.command_id
= NVME_AQ_BLK_MQ_DEPTH
;
203 iod
->cmd
.common
.flags
|= NVME_CMD_SGL_METABUF
;
205 if (!nvmet_req_init(&iod
->req
, &queue
->nvme_cq
, &queue
->nvme_sq
,
207 dev_err(ctrl
->ctrl
.device
, "failed async event work\n");
211 schedule_work(&iod
->work
);
214 static int nvme_loop_init_iod(struct nvme_loop_ctrl
*ctrl
,
215 struct nvme_loop_iod
*iod
, unsigned int queue_idx
)
217 iod
->req
.cmd
= &iod
->cmd
;
218 iod
->req
.rsp
= &iod
->rsp
;
219 iod
->queue
= &ctrl
->queues
[queue_idx
];
220 INIT_WORK(&iod
->work
, nvme_loop_execute_work
);
224 static int nvme_loop_init_request(struct blk_mq_tag_set
*set
,
225 struct request
*req
, unsigned int hctx_idx
,
226 unsigned int numa_node
)
228 struct nvme_loop_ctrl
*ctrl
= set
->driver_data
;
230 nvme_req(req
)->ctrl
= &ctrl
->ctrl
;
231 return nvme_loop_init_iod(ctrl
, blk_mq_rq_to_pdu(req
),
232 (set
== &ctrl
->tag_set
) ? hctx_idx
+ 1 : 0);
235 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
236 unsigned int hctx_idx
)
238 struct nvme_loop_ctrl
*ctrl
= data
;
239 struct nvme_loop_queue
*queue
= &ctrl
->queues
[hctx_idx
+ 1];
241 BUG_ON(hctx_idx
>= ctrl
->ctrl
.queue_count
);
243 hctx
->driver_data
= queue
;
247 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
248 unsigned int hctx_idx
)
250 struct nvme_loop_ctrl
*ctrl
= data
;
251 struct nvme_loop_queue
*queue
= &ctrl
->queues
[0];
253 BUG_ON(hctx_idx
!= 0);
255 hctx
->driver_data
= queue
;
259 static const struct blk_mq_ops nvme_loop_mq_ops
= {
260 .queue_rq
= nvme_loop_queue_rq
,
261 .complete
= nvme_loop_complete_rq
,
262 .init_request
= nvme_loop_init_request
,
263 .init_hctx
= nvme_loop_init_hctx
,
264 .timeout
= nvme_loop_timeout
,
267 static const struct blk_mq_ops nvme_loop_admin_mq_ops
= {
268 .queue_rq
= nvme_loop_queue_rq
,
269 .complete
= nvme_loop_complete_rq
,
270 .init_request
= nvme_loop_init_request
,
271 .init_hctx
= nvme_loop_init_admin_hctx
,
272 .timeout
= nvme_loop_timeout
,
275 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl
*ctrl
)
277 clear_bit(NVME_LOOP_Q_LIVE
, &ctrl
->queues
[0].flags
);
278 nvmet_sq_destroy(&ctrl
->queues
[0].nvme_sq
);
279 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
280 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
283 static void nvme_loop_free_ctrl(struct nvme_ctrl
*nctrl
)
285 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(nctrl
);
287 if (list_empty(&ctrl
->list
))
290 mutex_lock(&nvme_loop_ctrl_mutex
);
291 list_del(&ctrl
->list
);
292 mutex_unlock(&nvme_loop_ctrl_mutex
);
295 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
296 blk_mq_free_tag_set(&ctrl
->tag_set
);
299 nvmf_free_options(nctrl
->opts
);
304 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl
*ctrl
)
308 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++) {
309 clear_bit(NVME_LOOP_Q_LIVE
, &ctrl
->queues
[i
].flags
);
310 nvmet_sq_destroy(&ctrl
->queues
[i
].nvme_sq
);
314 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl
*ctrl
)
316 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
317 unsigned int nr_io_queues
;
320 nr_io_queues
= min(opts
->nr_io_queues
, num_online_cpus());
321 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &nr_io_queues
);
322 if (ret
|| !nr_io_queues
)
325 dev_info(ctrl
->ctrl
.device
, "creating %d I/O queues.\n", nr_io_queues
);
327 for (i
= 1; i
<= nr_io_queues
; i
++) {
328 ctrl
->queues
[i
].ctrl
= ctrl
;
329 ret
= nvmet_sq_init(&ctrl
->queues
[i
].nvme_sq
);
331 goto out_destroy_queues
;
333 ctrl
->ctrl
.queue_count
++;
339 nvme_loop_destroy_io_queues(ctrl
);
343 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl
*ctrl
)
347 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++) {
348 ret
= nvmf_connect_io_queue(&ctrl
->ctrl
, i
);
351 set_bit(NVME_LOOP_Q_LIVE
, &ctrl
->queues
[i
].flags
);
357 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl
*ctrl
)
361 memset(&ctrl
->admin_tag_set
, 0, sizeof(ctrl
->admin_tag_set
));
362 ctrl
->admin_tag_set
.ops
= &nvme_loop_admin_mq_ops
;
363 ctrl
->admin_tag_set
.queue_depth
= NVME_AQ_MQ_TAG_DEPTH
;
364 ctrl
->admin_tag_set
.reserved_tags
= 2; /* connect + keep-alive */
365 ctrl
->admin_tag_set
.numa_node
= NUMA_NO_NODE
;
366 ctrl
->admin_tag_set
.cmd_size
= sizeof(struct nvme_loop_iod
) +
367 SG_CHUNK_SIZE
* sizeof(struct scatterlist
);
368 ctrl
->admin_tag_set
.driver_data
= ctrl
;
369 ctrl
->admin_tag_set
.nr_hw_queues
= 1;
370 ctrl
->admin_tag_set
.timeout
= ADMIN_TIMEOUT
;
371 ctrl
->admin_tag_set
.flags
= BLK_MQ_F_NO_SCHED
;
373 ctrl
->queues
[0].ctrl
= ctrl
;
374 error
= nvmet_sq_init(&ctrl
->queues
[0].nvme_sq
);
377 ctrl
->ctrl
.queue_count
= 1;
379 error
= blk_mq_alloc_tag_set(&ctrl
->admin_tag_set
);
382 ctrl
->ctrl
.admin_tagset
= &ctrl
->admin_tag_set
;
384 ctrl
->ctrl
.admin_q
= blk_mq_init_queue(&ctrl
->admin_tag_set
);
385 if (IS_ERR(ctrl
->ctrl
.admin_q
)) {
386 error
= PTR_ERR(ctrl
->ctrl
.admin_q
);
387 goto out_free_tagset
;
390 error
= nvmf_connect_admin_queue(&ctrl
->ctrl
);
392 goto out_cleanup_queue
;
394 set_bit(NVME_LOOP_Q_LIVE
, &ctrl
->queues
[0].flags
);
396 error
= nvmf_reg_read64(&ctrl
->ctrl
, NVME_REG_CAP
, &ctrl
->ctrl
.cap
);
398 dev_err(ctrl
->ctrl
.device
,
399 "prop_get NVME_REG_CAP failed\n");
400 goto out_cleanup_queue
;
404 min_t(int, NVME_CAP_MQES(ctrl
->ctrl
.cap
), ctrl
->ctrl
.sqsize
);
406 error
= nvme_enable_ctrl(&ctrl
->ctrl
, ctrl
->ctrl
.cap
);
408 goto out_cleanup_queue
;
410 ctrl
->ctrl
.max_hw_sectors
=
411 (NVME_LOOP_MAX_SEGMENTS
- 1) << (PAGE_SHIFT
- 9);
413 error
= nvme_init_identify(&ctrl
->ctrl
);
415 goto out_cleanup_queue
;
420 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
422 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
424 nvmet_sq_destroy(&ctrl
->queues
[0].nvme_sq
);
428 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl
*ctrl
)
430 if (ctrl
->ctrl
.queue_count
> 1) {
431 nvme_stop_queues(&ctrl
->ctrl
);
432 blk_mq_tagset_busy_iter(&ctrl
->tag_set
,
433 nvme_cancel_request
, &ctrl
->ctrl
);
434 nvme_loop_destroy_io_queues(ctrl
);
437 if (ctrl
->ctrl
.state
== NVME_CTRL_LIVE
)
438 nvme_shutdown_ctrl(&ctrl
->ctrl
);
440 blk_mq_quiesce_queue(ctrl
->ctrl
.admin_q
);
441 blk_mq_tagset_busy_iter(&ctrl
->admin_tag_set
,
442 nvme_cancel_request
, &ctrl
->ctrl
);
443 blk_mq_unquiesce_queue(ctrl
->ctrl
.admin_q
);
444 nvme_loop_destroy_admin_queue(ctrl
);
447 static void nvme_loop_delete_ctrl_host(struct nvme_ctrl
*ctrl
)
449 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl
));
452 static void nvme_loop_delete_ctrl(struct nvmet_ctrl
*nctrl
)
454 struct nvme_loop_ctrl
*ctrl
;
456 mutex_lock(&nvme_loop_ctrl_mutex
);
457 list_for_each_entry(ctrl
, &nvme_loop_ctrl_list
, list
) {
458 if (ctrl
->ctrl
.cntlid
== nctrl
->cntlid
)
459 nvme_delete_ctrl(&ctrl
->ctrl
);
461 mutex_unlock(&nvme_loop_ctrl_mutex
);
464 static void nvme_loop_reset_ctrl_work(struct work_struct
*work
)
466 struct nvme_loop_ctrl
*ctrl
=
467 container_of(work
, struct nvme_loop_ctrl
, ctrl
.reset_work
);
471 nvme_stop_ctrl(&ctrl
->ctrl
);
472 nvme_loop_shutdown_ctrl(ctrl
);
474 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_CONNECTING
)) {
475 /* state change failure should never happen */
480 ret
= nvme_loop_configure_admin_queue(ctrl
);
484 ret
= nvme_loop_init_io_queues(ctrl
);
486 goto out_destroy_admin
;
488 ret
= nvme_loop_connect_io_queues(ctrl
);
492 blk_mq_update_nr_hw_queues(&ctrl
->tag_set
,
493 ctrl
->ctrl
.queue_count
- 1);
495 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
496 WARN_ON_ONCE(!changed
);
498 nvme_start_ctrl(&ctrl
->ctrl
);
503 nvme_loop_destroy_io_queues(ctrl
);
505 nvme_loop_destroy_admin_queue(ctrl
);
507 dev_warn(ctrl
->ctrl
.device
, "Removing after reset failure\n");
508 nvme_uninit_ctrl(&ctrl
->ctrl
);
509 nvme_put_ctrl(&ctrl
->ctrl
);
512 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops
= {
514 .module
= THIS_MODULE
,
515 .flags
= NVME_F_FABRICS
,
516 .reg_read32
= nvmf_reg_read32
,
517 .reg_read64
= nvmf_reg_read64
,
518 .reg_write32
= nvmf_reg_write32
,
519 .free_ctrl
= nvme_loop_free_ctrl
,
520 .submit_async_event
= nvme_loop_submit_async_event
,
521 .delete_ctrl
= nvme_loop_delete_ctrl_host
,
522 .get_address
= nvmf_get_address
,
525 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl
*ctrl
)
529 ret
= nvme_loop_init_io_queues(ctrl
);
533 memset(&ctrl
->tag_set
, 0, sizeof(ctrl
->tag_set
));
534 ctrl
->tag_set
.ops
= &nvme_loop_mq_ops
;
535 ctrl
->tag_set
.queue_depth
= ctrl
->ctrl
.opts
->queue_size
;
536 ctrl
->tag_set
.reserved_tags
= 1; /* fabric connect */
537 ctrl
->tag_set
.numa_node
= NUMA_NO_NODE
;
538 ctrl
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
539 ctrl
->tag_set
.cmd_size
= sizeof(struct nvme_loop_iod
) +
540 SG_CHUNK_SIZE
* sizeof(struct scatterlist
);
541 ctrl
->tag_set
.driver_data
= ctrl
;
542 ctrl
->tag_set
.nr_hw_queues
= ctrl
->ctrl
.queue_count
- 1;
543 ctrl
->tag_set
.timeout
= NVME_IO_TIMEOUT
;
544 ctrl
->ctrl
.tagset
= &ctrl
->tag_set
;
546 ret
= blk_mq_alloc_tag_set(&ctrl
->tag_set
);
548 goto out_destroy_queues
;
550 ctrl
->ctrl
.connect_q
= blk_mq_init_queue(&ctrl
->tag_set
);
551 if (IS_ERR(ctrl
->ctrl
.connect_q
)) {
552 ret
= PTR_ERR(ctrl
->ctrl
.connect_q
);
553 goto out_free_tagset
;
556 ret
= nvme_loop_connect_io_queues(ctrl
);
558 goto out_cleanup_connect_q
;
562 out_cleanup_connect_q
:
563 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
565 blk_mq_free_tag_set(&ctrl
->tag_set
);
567 nvme_loop_destroy_io_queues(ctrl
);
571 static struct nvmet_port
*nvme_loop_find_port(struct nvme_ctrl
*ctrl
)
573 struct nvmet_port
*p
, *found
= NULL
;
575 mutex_lock(&nvme_loop_ports_mutex
);
576 list_for_each_entry(p
, &nvme_loop_ports
, entry
) {
577 /* if no transport address is specified use the first port */
578 if ((ctrl
->opts
->mask
& NVMF_OPT_TRADDR
) &&
579 strcmp(ctrl
->opts
->traddr
, p
->disc_addr
.traddr
))
584 mutex_unlock(&nvme_loop_ports_mutex
);
588 static struct nvme_ctrl
*nvme_loop_create_ctrl(struct device
*dev
,
589 struct nvmf_ctrl_options
*opts
)
591 struct nvme_loop_ctrl
*ctrl
;
595 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
597 return ERR_PTR(-ENOMEM
);
598 ctrl
->ctrl
.opts
= opts
;
599 INIT_LIST_HEAD(&ctrl
->list
);
601 INIT_WORK(&ctrl
->ctrl
.reset_work
, nvme_loop_reset_ctrl_work
);
603 ret
= nvme_init_ctrl(&ctrl
->ctrl
, dev
, &nvme_loop_ctrl_ops
,
604 0 /* no quirks, we're perfect! */);
610 ctrl
->ctrl
.sqsize
= opts
->queue_size
- 1;
611 ctrl
->ctrl
.kato
= opts
->kato
;
612 ctrl
->port
= nvme_loop_find_port(&ctrl
->ctrl
);
614 ctrl
->queues
= kcalloc(opts
->nr_io_queues
+ 1, sizeof(*ctrl
->queues
),
617 goto out_uninit_ctrl
;
619 ret
= nvme_loop_configure_admin_queue(ctrl
);
621 goto out_free_queues
;
623 if (opts
->queue_size
> ctrl
->ctrl
.maxcmd
) {
624 /* warn if maxcmd is lower than queue_size */
625 dev_warn(ctrl
->ctrl
.device
,
626 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
627 opts
->queue_size
, ctrl
->ctrl
.maxcmd
);
628 opts
->queue_size
= ctrl
->ctrl
.maxcmd
;
631 if (opts
->nr_io_queues
) {
632 ret
= nvme_loop_create_io_queues(ctrl
);
634 goto out_remove_admin_queue
;
637 nvme_loop_init_iod(ctrl
, &ctrl
->async_event_iod
, 0);
639 dev_info(ctrl
->ctrl
.device
,
640 "new ctrl: \"%s\"\n", ctrl
->ctrl
.opts
->subsysnqn
);
642 nvme_get_ctrl(&ctrl
->ctrl
);
644 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
645 WARN_ON_ONCE(!changed
);
647 mutex_lock(&nvme_loop_ctrl_mutex
);
648 list_add_tail(&ctrl
->list
, &nvme_loop_ctrl_list
);
649 mutex_unlock(&nvme_loop_ctrl_mutex
);
651 nvme_start_ctrl(&ctrl
->ctrl
);
655 out_remove_admin_queue
:
656 nvme_loop_destroy_admin_queue(ctrl
);
660 nvme_uninit_ctrl(&ctrl
->ctrl
);
662 nvme_put_ctrl(&ctrl
->ctrl
);
668 static int nvme_loop_add_port(struct nvmet_port
*port
)
670 mutex_lock(&nvme_loop_ports_mutex
);
671 list_add_tail(&port
->entry
, &nvme_loop_ports
);
672 mutex_unlock(&nvme_loop_ports_mutex
);
676 static void nvme_loop_remove_port(struct nvmet_port
*port
)
678 mutex_lock(&nvme_loop_ports_mutex
);
679 list_del_init(&port
->entry
);
680 mutex_unlock(&nvme_loop_ports_mutex
);
683 static const struct nvmet_fabrics_ops nvme_loop_ops
= {
684 .owner
= THIS_MODULE
,
685 .type
= NVMF_TRTYPE_LOOP
,
686 .add_port
= nvme_loop_add_port
,
687 .remove_port
= nvme_loop_remove_port
,
688 .queue_response
= nvme_loop_queue_response
,
689 .delete_ctrl
= nvme_loop_delete_ctrl
,
692 static struct nvmf_transport_ops nvme_loop_transport
= {
694 .module
= THIS_MODULE
,
695 .create_ctrl
= nvme_loop_create_ctrl
,
696 .allowed_opts
= NVMF_OPT_TRADDR
,
699 static int __init
nvme_loop_init_module(void)
703 ret
= nvmet_register_transport(&nvme_loop_ops
);
707 ret
= nvmf_register_transport(&nvme_loop_transport
);
709 nvmet_unregister_transport(&nvme_loop_ops
);
714 static void __exit
nvme_loop_cleanup_module(void)
716 struct nvme_loop_ctrl
*ctrl
, *next
;
718 nvmf_unregister_transport(&nvme_loop_transport
);
719 nvmet_unregister_transport(&nvme_loop_ops
);
721 mutex_lock(&nvme_loop_ctrl_mutex
);
722 list_for_each_entry_safe(ctrl
, next
, &nvme_loop_ctrl_list
, list
)
723 nvme_delete_ctrl(&ctrl
->ctrl
);
724 mutex_unlock(&nvme_loop_ctrl_mutex
);
726 flush_workqueue(nvme_delete_wq
);
729 module_init(nvme_loop_init_module
);
730 module_exit(nvme_loop_cleanup_module
);
732 MODULE_LICENSE("GPL v2");
733 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */