2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/scatterlist.h>
16 #include <linux/delay.h>
17 #include <linux/blk-mq.h>
18 #include <linux/nvme.h>
19 #include <linux/module.h>
20 #include <linux/parser.h>
21 #include <linux/t10-pi.h>
23 #include "../host/nvme.h"
24 #include "../host/fabrics.h"
26 #define NVME_LOOP_AQ_DEPTH 256
28 #define NVME_LOOP_MAX_SEGMENTS 256
31 * We handle AEN commands ourselves and don't even let the
32 * block layer know about them.
34 #define NVME_LOOP_NR_AEN_COMMANDS 1
35 #define NVME_LOOP_AQ_BLKMQ_DEPTH \
36 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
38 struct nvme_loop_iod
{
39 struct nvme_command cmd
;
40 struct nvme_completion rsp
;
42 struct nvme_loop_queue
*queue
;
43 struct work_struct work
;
44 struct sg_table sg_table
;
45 struct scatterlist first_sgl
[];
48 struct nvme_loop_ctrl
{
50 struct nvme_loop_queue
*queues
;
53 struct blk_mq_tag_set admin_tag_set
;
55 struct list_head list
;
57 struct blk_mq_tag_set tag_set
;
58 struct nvme_loop_iod async_event_iod
;
59 struct nvme_ctrl ctrl
;
61 struct nvmet_ctrl
*target_ctrl
;
62 struct work_struct delete_work
;
63 struct work_struct reset_work
;
66 static inline struct nvme_loop_ctrl
*to_loop_ctrl(struct nvme_ctrl
*ctrl
)
68 return container_of(ctrl
, struct nvme_loop_ctrl
, ctrl
);
71 struct nvme_loop_queue
{
72 struct nvmet_cq nvme_cq
;
73 struct nvmet_sq nvme_sq
;
74 struct nvme_loop_ctrl
*ctrl
;
77 static struct nvmet_port
*nvmet_loop_port
;
79 static LIST_HEAD(nvme_loop_ctrl_list
);
80 static DEFINE_MUTEX(nvme_loop_ctrl_mutex
);
82 static void nvme_loop_queue_response(struct nvmet_req
*nvme_req
);
83 static void nvme_loop_delete_ctrl(struct nvmet_ctrl
*ctrl
);
85 static struct nvmet_fabrics_ops nvme_loop_ops
;
87 static inline int nvme_loop_queue_idx(struct nvme_loop_queue
*queue
)
89 return queue
- queue
->ctrl
->queues
;
92 static void nvme_loop_complete_rq(struct request
*req
)
94 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(req
);
97 nvme_cleanup_cmd(req
);
98 sg_free_table_chained(&iod
->sg_table
, true);
100 if (unlikely(req
->errors
)) {
101 if (nvme_req_needs_retry(req
, req
->errors
)) {
102 nvme_requeue_req(req
);
106 if (req
->cmd_type
== REQ_TYPE_DRV_PRIV
)
109 error
= nvme_error_status(req
->errors
);
112 blk_mq_end_request(req
, error
);
115 static void nvme_loop_queue_response(struct nvmet_req
*nvme_req
)
117 struct nvme_loop_iod
*iod
=
118 container_of(nvme_req
, struct nvme_loop_iod
, req
);
119 struct nvme_completion
*cqe
= &iod
->rsp
;
122 * AEN requests are special as they don't time out and can
123 * survive any kind of queue freeze and often don't respond to
124 * aborts. We don't even bother to allocate a struct request
125 * for them but rather special case them here.
127 if (unlikely(nvme_loop_queue_idx(iod
->queue
) == 0 &&
128 cqe
->command_id
>= NVME_LOOP_AQ_BLKMQ_DEPTH
)) {
129 nvme_complete_async_event(&iod
->queue
->ctrl
->ctrl
, cqe
);
131 struct request
*req
= blk_mq_rq_from_pdu(iod
);
133 if (req
->cmd_type
== REQ_TYPE_DRV_PRIV
&& req
->special
)
134 memcpy(req
->special
, cqe
, sizeof(*cqe
));
135 blk_mq_complete_request(req
, le16_to_cpu(cqe
->status
) >> 1);
139 static void nvme_loop_execute_work(struct work_struct
*work
)
141 struct nvme_loop_iod
*iod
=
142 container_of(work
, struct nvme_loop_iod
, work
);
144 iod
->req
.execute(&iod
->req
);
147 static enum blk_eh_timer_return
148 nvme_loop_timeout(struct request
*rq
, bool reserved
)
150 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(rq
);
152 /* queue error recovery */
153 schedule_work(&iod
->queue
->ctrl
->reset_work
);
155 /* fail with DNR on admin cmd timeout */
156 rq
->errors
= NVME_SC_ABORT_REQ
| NVME_SC_DNR
;
158 return BLK_EH_HANDLED
;
161 static int nvme_loop_queue_rq(struct blk_mq_hw_ctx
*hctx
,
162 const struct blk_mq_queue_data
*bd
)
164 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
165 struct nvme_loop_queue
*queue
= hctx
->driver_data
;
166 struct request
*req
= bd
->rq
;
167 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(req
);
170 ret
= nvme_setup_cmd(ns
, req
, &iod
->cmd
);
174 iod
->cmd
.common
.flags
|= NVME_CMD_SGL_METABUF
;
175 iod
->req
.port
= nvmet_loop_port
;
176 if (!nvmet_req_init(&iod
->req
, &queue
->nvme_cq
,
177 &queue
->nvme_sq
, &nvme_loop_ops
)) {
178 nvme_cleanup_cmd(req
);
179 blk_mq_start_request(req
);
180 nvme_loop_queue_response(&iod
->req
);
184 if (blk_rq_bytes(req
)) {
185 iod
->sg_table
.sgl
= iod
->first_sgl
;
186 ret
= sg_alloc_table_chained(&iod
->sg_table
,
187 req
->nr_phys_segments
, iod
->sg_table
.sgl
);
189 return BLK_MQ_RQ_QUEUE_BUSY
;
191 iod
->req
.sg
= iod
->sg_table
.sgl
;
192 iod
->req
.sg_cnt
= blk_rq_map_sg(req
->q
, req
, iod
->sg_table
.sgl
);
193 BUG_ON(iod
->req
.sg_cnt
> req
->nr_phys_segments
);
196 iod
->cmd
.common
.command_id
= req
->tag
;
197 blk_mq_start_request(req
);
199 schedule_work(&iod
->work
);
203 static void nvme_loop_submit_async_event(struct nvme_ctrl
*arg
, int aer_idx
)
205 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(arg
);
206 struct nvme_loop_queue
*queue
= &ctrl
->queues
[0];
207 struct nvme_loop_iod
*iod
= &ctrl
->async_event_iod
;
209 memset(&iod
->cmd
, 0, sizeof(iod
->cmd
));
210 iod
->cmd
.common
.opcode
= nvme_admin_async_event
;
211 iod
->cmd
.common
.command_id
= NVME_LOOP_AQ_BLKMQ_DEPTH
;
212 iod
->cmd
.common
.flags
|= NVME_CMD_SGL_METABUF
;
214 if (!nvmet_req_init(&iod
->req
, &queue
->nvme_cq
, &queue
->nvme_sq
,
216 dev_err(ctrl
->ctrl
.device
, "failed async event work\n");
220 schedule_work(&iod
->work
);
223 static int nvme_loop_init_iod(struct nvme_loop_ctrl
*ctrl
,
224 struct nvme_loop_iod
*iod
, unsigned int queue_idx
)
226 BUG_ON(queue_idx
>= ctrl
->queue_count
);
228 iod
->req
.cmd
= &iod
->cmd
;
229 iod
->req
.rsp
= &iod
->rsp
;
230 iod
->queue
= &ctrl
->queues
[queue_idx
];
231 INIT_WORK(&iod
->work
, nvme_loop_execute_work
);
235 static int nvme_loop_init_request(void *data
, struct request
*req
,
236 unsigned int hctx_idx
, unsigned int rq_idx
,
237 unsigned int numa_node
)
239 return nvme_loop_init_iod(data
, blk_mq_rq_to_pdu(req
), hctx_idx
+ 1);
242 static int nvme_loop_init_admin_request(void *data
, struct request
*req
,
243 unsigned int hctx_idx
, unsigned int rq_idx
,
244 unsigned int numa_node
)
246 return nvme_loop_init_iod(data
, blk_mq_rq_to_pdu(req
), 0);
249 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
250 unsigned int hctx_idx
)
252 struct nvme_loop_ctrl
*ctrl
= data
;
253 struct nvme_loop_queue
*queue
= &ctrl
->queues
[hctx_idx
+ 1];
255 BUG_ON(hctx_idx
>= ctrl
->queue_count
);
257 hctx
->driver_data
= queue
;
261 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
262 unsigned int hctx_idx
)
264 struct nvme_loop_ctrl
*ctrl
= data
;
265 struct nvme_loop_queue
*queue
= &ctrl
->queues
[0];
267 BUG_ON(hctx_idx
!= 0);
269 hctx
->driver_data
= queue
;
273 static struct blk_mq_ops nvme_loop_mq_ops
= {
274 .queue_rq
= nvme_loop_queue_rq
,
275 .complete
= nvme_loop_complete_rq
,
276 .map_queue
= blk_mq_map_queue
,
277 .init_request
= nvme_loop_init_request
,
278 .init_hctx
= nvme_loop_init_hctx
,
279 .timeout
= nvme_loop_timeout
,
282 static struct blk_mq_ops nvme_loop_admin_mq_ops
= {
283 .queue_rq
= nvme_loop_queue_rq
,
284 .complete
= nvme_loop_complete_rq
,
285 .map_queue
= blk_mq_map_queue
,
286 .init_request
= nvme_loop_init_admin_request
,
287 .init_hctx
= nvme_loop_init_admin_hctx
,
288 .timeout
= nvme_loop_timeout
,
291 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl
*ctrl
)
293 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
294 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
295 nvmet_sq_destroy(&ctrl
->queues
[0].nvme_sq
);
298 static void nvme_loop_free_ctrl(struct nvme_ctrl
*nctrl
)
300 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(nctrl
);
302 if (list_empty(&ctrl
->list
))
305 mutex_lock(&nvme_loop_ctrl_mutex
);
306 list_del(&ctrl
->list
);
307 mutex_unlock(&nvme_loop_ctrl_mutex
);
310 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
311 blk_mq_free_tag_set(&ctrl
->tag_set
);
314 nvmf_free_options(nctrl
->opts
);
319 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl
*ctrl
)
323 memset(&ctrl
->admin_tag_set
, 0, sizeof(ctrl
->admin_tag_set
));
324 ctrl
->admin_tag_set
.ops
= &nvme_loop_admin_mq_ops
;
325 ctrl
->admin_tag_set
.queue_depth
= NVME_LOOP_AQ_BLKMQ_DEPTH
;
326 ctrl
->admin_tag_set
.reserved_tags
= 2; /* connect + keep-alive */
327 ctrl
->admin_tag_set
.numa_node
= NUMA_NO_NODE
;
328 ctrl
->admin_tag_set
.cmd_size
= sizeof(struct nvme_loop_iod
) +
329 SG_CHUNK_SIZE
* sizeof(struct scatterlist
);
330 ctrl
->admin_tag_set
.driver_data
= ctrl
;
331 ctrl
->admin_tag_set
.nr_hw_queues
= 1;
332 ctrl
->admin_tag_set
.timeout
= ADMIN_TIMEOUT
;
334 ctrl
->queues
[0].ctrl
= ctrl
;
335 error
= nvmet_sq_init(&ctrl
->queues
[0].nvme_sq
);
338 ctrl
->queue_count
= 1;
340 error
= blk_mq_alloc_tag_set(&ctrl
->admin_tag_set
);
344 ctrl
->ctrl
.admin_q
= blk_mq_init_queue(&ctrl
->admin_tag_set
);
345 if (IS_ERR(ctrl
->ctrl
.admin_q
)) {
346 error
= PTR_ERR(ctrl
->ctrl
.admin_q
);
347 goto out_free_tagset
;
350 error
= nvmf_connect_admin_queue(&ctrl
->ctrl
);
352 goto out_cleanup_queue
;
354 error
= nvmf_reg_read64(&ctrl
->ctrl
, NVME_REG_CAP
, &ctrl
->cap
);
356 dev_err(ctrl
->ctrl
.device
,
357 "prop_get NVME_REG_CAP failed\n");
358 goto out_cleanup_queue
;
362 min_t(int, NVME_CAP_MQES(ctrl
->cap
) + 1, ctrl
->ctrl
.sqsize
);
364 error
= nvme_enable_ctrl(&ctrl
->ctrl
, ctrl
->cap
);
366 goto out_cleanup_queue
;
368 ctrl
->ctrl
.max_hw_sectors
=
369 (NVME_LOOP_MAX_SEGMENTS
- 1) << (PAGE_SHIFT
- 9);
371 error
= nvme_init_identify(&ctrl
->ctrl
);
373 goto out_cleanup_queue
;
375 nvme_start_keep_alive(&ctrl
->ctrl
);
380 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
382 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
384 nvmet_sq_destroy(&ctrl
->queues
[0].nvme_sq
);
388 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl
*ctrl
)
392 nvme_stop_keep_alive(&ctrl
->ctrl
);
394 if (ctrl
->queue_count
> 1) {
395 nvme_stop_queues(&ctrl
->ctrl
);
396 blk_mq_tagset_busy_iter(&ctrl
->tag_set
,
397 nvme_cancel_request
, &ctrl
->ctrl
);
399 for (i
= 1; i
< ctrl
->queue_count
; i
++)
400 nvmet_sq_destroy(&ctrl
->queues
[i
].nvme_sq
);
403 if (ctrl
->ctrl
.state
== NVME_CTRL_LIVE
)
404 nvme_shutdown_ctrl(&ctrl
->ctrl
);
406 blk_mq_stop_hw_queues(ctrl
->ctrl
.admin_q
);
407 blk_mq_tagset_busy_iter(&ctrl
->admin_tag_set
,
408 nvme_cancel_request
, &ctrl
->ctrl
);
409 nvme_loop_destroy_admin_queue(ctrl
);
412 static void nvme_loop_del_ctrl_work(struct work_struct
*work
)
414 struct nvme_loop_ctrl
*ctrl
= container_of(work
,
415 struct nvme_loop_ctrl
, delete_work
);
417 nvme_remove_namespaces(&ctrl
->ctrl
);
418 nvme_loop_shutdown_ctrl(ctrl
);
419 nvme_uninit_ctrl(&ctrl
->ctrl
);
420 nvme_put_ctrl(&ctrl
->ctrl
);
423 static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl
*ctrl
)
425 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_DELETING
))
428 if (!schedule_work(&ctrl
->delete_work
))
434 static int nvme_loop_del_ctrl(struct nvme_ctrl
*nctrl
)
436 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(nctrl
);
439 ret
= __nvme_loop_del_ctrl(ctrl
);
443 flush_work(&ctrl
->delete_work
);
448 static void nvme_loop_delete_ctrl(struct nvmet_ctrl
*nctrl
)
450 struct nvme_loop_ctrl
*ctrl
;
452 mutex_lock(&nvme_loop_ctrl_mutex
);
453 list_for_each_entry(ctrl
, &nvme_loop_ctrl_list
, list
) {
454 if (ctrl
->ctrl
.cntlid
== nctrl
->cntlid
)
455 __nvme_loop_del_ctrl(ctrl
);
457 mutex_unlock(&nvme_loop_ctrl_mutex
);
460 static void nvme_loop_reset_ctrl_work(struct work_struct
*work
)
462 struct nvme_loop_ctrl
*ctrl
= container_of(work
,
463 struct nvme_loop_ctrl
, reset_work
);
467 nvme_loop_shutdown_ctrl(ctrl
);
469 ret
= nvme_loop_configure_admin_queue(ctrl
);
473 for (i
= 1; i
<= ctrl
->ctrl
.opts
->nr_io_queues
; i
++) {
474 ctrl
->queues
[i
].ctrl
= ctrl
;
475 ret
= nvmet_sq_init(&ctrl
->queues
[i
].nvme_sq
);
477 goto out_free_queues
;
482 for (i
= 1; i
<= ctrl
->ctrl
.opts
->nr_io_queues
; i
++) {
483 ret
= nvmf_connect_io_queue(&ctrl
->ctrl
, i
);
485 goto out_free_queues
;
488 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
489 WARN_ON_ONCE(!changed
);
491 nvme_queue_scan(&ctrl
->ctrl
);
492 nvme_queue_async_events(&ctrl
->ctrl
);
494 nvme_start_queues(&ctrl
->ctrl
);
499 for (i
= 1; i
< ctrl
->queue_count
; i
++)
500 nvmet_sq_destroy(&ctrl
->queues
[i
].nvme_sq
);
501 nvme_loop_destroy_admin_queue(ctrl
);
503 dev_warn(ctrl
->ctrl
.device
, "Removing after reset failure\n");
504 nvme_remove_namespaces(&ctrl
->ctrl
);
505 nvme_uninit_ctrl(&ctrl
->ctrl
);
506 nvme_put_ctrl(&ctrl
->ctrl
);
509 static int nvme_loop_reset_ctrl(struct nvme_ctrl
*nctrl
)
511 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(nctrl
);
513 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_RESETTING
))
516 if (!schedule_work(&ctrl
->reset_work
))
519 flush_work(&ctrl
->reset_work
);
524 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops
= {
526 .module
= THIS_MODULE
,
528 .reg_read32
= nvmf_reg_read32
,
529 .reg_read64
= nvmf_reg_read64
,
530 .reg_write32
= nvmf_reg_write32
,
531 .reset_ctrl
= nvme_loop_reset_ctrl
,
532 .free_ctrl
= nvme_loop_free_ctrl
,
533 .submit_async_event
= nvme_loop_submit_async_event
,
534 .delete_ctrl
= nvme_loop_del_ctrl
,
535 .get_subsysnqn
= nvmf_get_subsysnqn
,
538 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl
*ctrl
)
540 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
543 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &opts
->nr_io_queues
);
544 if (ret
|| !opts
->nr_io_queues
)
547 dev_info(ctrl
->ctrl
.device
, "creating %d I/O queues.\n",
550 for (i
= 1; i
<= opts
->nr_io_queues
; i
++) {
551 ctrl
->queues
[i
].ctrl
= ctrl
;
552 ret
= nvmet_sq_init(&ctrl
->queues
[i
].nvme_sq
);
554 goto out_destroy_queues
;
559 memset(&ctrl
->tag_set
, 0, sizeof(ctrl
->tag_set
));
560 ctrl
->tag_set
.ops
= &nvme_loop_mq_ops
;
561 ctrl
->tag_set
.queue_depth
= ctrl
->ctrl
.sqsize
;
562 ctrl
->tag_set
.reserved_tags
= 1; /* fabric connect */
563 ctrl
->tag_set
.numa_node
= NUMA_NO_NODE
;
564 ctrl
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
565 ctrl
->tag_set
.cmd_size
= sizeof(struct nvme_loop_iod
) +
566 SG_CHUNK_SIZE
* sizeof(struct scatterlist
);
567 ctrl
->tag_set
.driver_data
= ctrl
;
568 ctrl
->tag_set
.nr_hw_queues
= ctrl
->queue_count
- 1;
569 ctrl
->tag_set
.timeout
= NVME_IO_TIMEOUT
;
570 ctrl
->ctrl
.tagset
= &ctrl
->tag_set
;
572 ret
= blk_mq_alloc_tag_set(&ctrl
->tag_set
);
574 goto out_destroy_queues
;
576 ctrl
->ctrl
.connect_q
= blk_mq_init_queue(&ctrl
->tag_set
);
577 if (IS_ERR(ctrl
->ctrl
.connect_q
)) {
578 ret
= PTR_ERR(ctrl
->ctrl
.connect_q
);
579 goto out_free_tagset
;
582 for (i
= 1; i
<= opts
->nr_io_queues
; i
++) {
583 ret
= nvmf_connect_io_queue(&ctrl
->ctrl
, i
);
585 goto out_cleanup_connect_q
;
590 out_cleanup_connect_q
:
591 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
593 blk_mq_free_tag_set(&ctrl
->tag_set
);
595 for (i
= 1; i
< ctrl
->queue_count
; i
++)
596 nvmet_sq_destroy(&ctrl
->queues
[i
].nvme_sq
);
600 static struct nvme_ctrl
*nvme_loop_create_ctrl(struct device
*dev
,
601 struct nvmf_ctrl_options
*opts
)
603 struct nvme_loop_ctrl
*ctrl
;
607 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
609 return ERR_PTR(-ENOMEM
);
610 ctrl
->ctrl
.opts
= opts
;
611 INIT_LIST_HEAD(&ctrl
->list
);
613 INIT_WORK(&ctrl
->delete_work
, nvme_loop_del_ctrl_work
);
614 INIT_WORK(&ctrl
->reset_work
, nvme_loop_reset_ctrl_work
);
616 ret
= nvme_init_ctrl(&ctrl
->ctrl
, dev
, &nvme_loop_ctrl_ops
,
617 0 /* no quirks, we're perfect! */);
621 spin_lock_init(&ctrl
->lock
);
625 ctrl
->ctrl
.sqsize
= opts
->queue_size
;
626 ctrl
->ctrl
.kato
= opts
->kato
;
628 ctrl
->queues
= kcalloc(opts
->nr_io_queues
+ 1, sizeof(*ctrl
->queues
),
631 goto out_uninit_ctrl
;
633 ret
= nvme_loop_configure_admin_queue(ctrl
);
635 goto out_free_queues
;
637 if (opts
->queue_size
> ctrl
->ctrl
.maxcmd
) {
638 /* warn if maxcmd is lower than queue_size */
639 dev_warn(ctrl
->ctrl
.device
,
640 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
641 opts
->queue_size
, ctrl
->ctrl
.maxcmd
);
642 opts
->queue_size
= ctrl
->ctrl
.maxcmd
;
645 if (opts
->nr_io_queues
) {
646 ret
= nvme_loop_create_io_queues(ctrl
);
648 goto out_remove_admin_queue
;
651 nvme_loop_init_iod(ctrl
, &ctrl
->async_event_iod
, 0);
653 dev_info(ctrl
->ctrl
.device
,
654 "new ctrl: \"%s\"\n", ctrl
->ctrl
.opts
->subsysnqn
);
656 kref_get(&ctrl
->ctrl
.kref
);
658 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
659 WARN_ON_ONCE(!changed
);
661 mutex_lock(&nvme_loop_ctrl_mutex
);
662 list_add_tail(&ctrl
->list
, &nvme_loop_ctrl_list
);
663 mutex_unlock(&nvme_loop_ctrl_mutex
);
665 if (opts
->nr_io_queues
) {
666 nvme_queue_scan(&ctrl
->ctrl
);
667 nvme_queue_async_events(&ctrl
->ctrl
);
672 out_remove_admin_queue
:
673 nvme_loop_destroy_admin_queue(ctrl
);
677 nvme_uninit_ctrl(&ctrl
->ctrl
);
679 nvme_put_ctrl(&ctrl
->ctrl
);
685 static int nvme_loop_add_port(struct nvmet_port
*port
)
688 * XXX: disalow adding more than one port so
689 * there is no connection rejections when a
690 * a subsystem is assigned to a port for which
691 * loop doesn't have a pointer.
692 * This scenario would be possible if we allowed
693 * more than one port to be added and a subsystem
694 * was assigned to a port other than nvmet_loop_port.
700 nvmet_loop_port
= port
;
704 static void nvme_loop_remove_port(struct nvmet_port
*port
)
706 if (port
== nvmet_loop_port
)
707 nvmet_loop_port
= NULL
;
710 static struct nvmet_fabrics_ops nvme_loop_ops
= {
711 .owner
= THIS_MODULE
,
712 .type
= NVMF_TRTYPE_LOOP
,
713 .add_port
= nvme_loop_add_port
,
714 .remove_port
= nvme_loop_remove_port
,
715 .queue_response
= nvme_loop_queue_response
,
716 .delete_ctrl
= nvme_loop_delete_ctrl
,
719 static struct nvmf_transport_ops nvme_loop_transport
= {
721 .create_ctrl
= nvme_loop_create_ctrl
,
724 static int __init
nvme_loop_init_module(void)
728 ret
= nvmet_register_transport(&nvme_loop_ops
);
731 nvmf_register_transport(&nvme_loop_transport
);
735 static void __exit
nvme_loop_cleanup_module(void)
737 struct nvme_loop_ctrl
*ctrl
, *next
;
739 nvmf_unregister_transport(&nvme_loop_transport
);
740 nvmet_unregister_transport(&nvme_loop_ops
);
742 mutex_lock(&nvme_loop_ctrl_mutex
);
743 list_for_each_entry_safe(ctrl
, next
, &nvme_loop_ctrl_list
, list
)
744 __nvme_loop_del_ctrl(ctrl
);
745 mutex_unlock(&nvme_loop_ctrl_mutex
);
747 flush_scheduled_work();
750 module_init(nvme_loop_init_module
);
751 module_exit(nvme_loop_cleanup_module
);
753 MODULE_LICENSE("GPL v2");
754 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */