1 // SPDX-License-Identifier: GPL-2.0
3 * Common code for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
13 #define CREATE_TRACE_POINTS
18 struct workqueue_struct
*buffered_io_wq
;
19 static const struct nvmet_fabrics_ops
*nvmet_transports
[NVMF_TRTYPE_MAX
];
20 static DEFINE_IDA(cntlid_ida
);
23 * This read/write semaphore is used to synchronize access to configuration
24 * information on a target system that will result in discovery log page
25 * information change for at least one host.
26 * The full list of resources to protected by this semaphore is:
29 * - per-subsystem allowed hosts list
30 * - allow_any_host subsystem attribute
32 * - the nvmet_transports array
34 * When updating any of those lists/structures write lock should be obtained,
35 * while when reading (popolating discovery log page or checking host-subsystem
36 * link) read lock is obtained to allow concurrent reads.
38 DECLARE_RWSEM(nvmet_config_sem
);
40 u32 nvmet_ana_group_enabled
[NVMET_MAX_ANAGRPS
+ 1];
42 DECLARE_RWSEM(nvmet_ana_sem
);
44 inline u16
errno_to_nvme_status(struct nvmet_req
*req
, int errno
)
50 status
= NVME_SC_SUCCESS
;
53 req
->error_loc
= offsetof(struct nvme_rw_command
, length
);
54 status
= NVME_SC_CAP_EXCEEDED
| NVME_SC_DNR
;
57 req
->error_loc
= offsetof(struct nvme_rw_command
, slba
);
58 status
= NVME_SC_LBA_RANGE
| NVME_SC_DNR
;
61 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
62 switch (req
->cmd
->common
.opcode
) {
64 case nvme_cmd_write_zeroes
:
65 status
= NVME_SC_ONCS_NOT_SUPPORTED
| NVME_SC_DNR
;
68 status
= NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
72 req
->error_loc
= offsetof(struct nvme_rw_command
, nsid
);
73 status
= NVME_SC_ACCESS_DENIED
;
78 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
79 status
= NVME_SC_INTERNAL
| NVME_SC_DNR
;
85 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
86 const char *subsysnqn
);
88 u16
nvmet_copy_to_sgl(struct nvmet_req
*req
, off_t off
, const void *buf
,
91 if (sg_pcopy_from_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
) {
92 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
93 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
98 u16
nvmet_copy_from_sgl(struct nvmet_req
*req
, off_t off
, void *buf
, size_t len
)
100 if (sg_pcopy_to_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
) {
101 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
102 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
107 u16
nvmet_zero_sgl(struct nvmet_req
*req
, off_t off
, size_t len
)
109 if (sg_zero_buffer(req
->sg
, req
->sg_cnt
, len
, off
) != len
) {
110 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
111 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
116 static unsigned int nvmet_max_nsid(struct nvmet_subsys
*subsys
)
120 if (list_empty(&subsys
->namespaces
))
123 ns
= list_last_entry(&subsys
->namespaces
, struct nvmet_ns
, dev_link
);
127 static u32
nvmet_async_event_result(struct nvmet_async_event
*aen
)
129 return aen
->event_type
| (aen
->event_info
<< 8) | (aen
->log_page
<< 16);
132 static void nvmet_async_events_failall(struct nvmet_ctrl
*ctrl
)
134 u16 status
= NVME_SC_INTERNAL
| NVME_SC_DNR
;
135 struct nvmet_req
*req
;
137 mutex_lock(&ctrl
->lock
);
138 while (ctrl
->nr_async_event_cmds
) {
139 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
140 mutex_unlock(&ctrl
->lock
);
141 nvmet_req_complete(req
, status
);
142 mutex_lock(&ctrl
->lock
);
144 mutex_unlock(&ctrl
->lock
);
147 static void nvmet_async_events_process(struct nvmet_ctrl
*ctrl
)
149 struct nvmet_async_event
*aen
;
150 struct nvmet_req
*req
;
152 mutex_lock(&ctrl
->lock
);
153 while (ctrl
->nr_async_event_cmds
&& !list_empty(&ctrl
->async_events
)) {
154 aen
= list_first_entry(&ctrl
->async_events
,
155 struct nvmet_async_event
, entry
);
156 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
157 nvmet_set_result(req
, nvmet_async_event_result(aen
));
159 list_del(&aen
->entry
);
162 mutex_unlock(&ctrl
->lock
);
163 nvmet_req_complete(req
, 0);
164 mutex_lock(&ctrl
->lock
);
166 mutex_unlock(&ctrl
->lock
);
169 static void nvmet_async_events_free(struct nvmet_ctrl
*ctrl
)
171 struct nvmet_async_event
*aen
, *tmp
;
173 mutex_lock(&ctrl
->lock
);
174 list_for_each_entry_safe(aen
, tmp
, &ctrl
->async_events
, entry
) {
175 list_del(&aen
->entry
);
178 mutex_unlock(&ctrl
->lock
);
181 static void nvmet_async_event_work(struct work_struct
*work
)
183 struct nvmet_ctrl
*ctrl
=
184 container_of(work
, struct nvmet_ctrl
, async_event_work
);
186 nvmet_async_events_process(ctrl
);
189 void nvmet_add_async_event(struct nvmet_ctrl
*ctrl
, u8 event_type
,
190 u8 event_info
, u8 log_page
)
192 struct nvmet_async_event
*aen
;
194 aen
= kmalloc(sizeof(*aen
), GFP_KERNEL
);
198 aen
->event_type
= event_type
;
199 aen
->event_info
= event_info
;
200 aen
->log_page
= log_page
;
202 mutex_lock(&ctrl
->lock
);
203 list_add_tail(&aen
->entry
, &ctrl
->async_events
);
204 mutex_unlock(&ctrl
->lock
);
206 schedule_work(&ctrl
->async_event_work
);
209 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl
*ctrl
, __le32 nsid
)
213 mutex_lock(&ctrl
->lock
);
214 if (ctrl
->nr_changed_ns
> NVME_MAX_CHANGED_NAMESPACES
)
217 for (i
= 0; i
< ctrl
->nr_changed_ns
; i
++) {
218 if (ctrl
->changed_ns_list
[i
] == nsid
)
222 if (ctrl
->nr_changed_ns
== NVME_MAX_CHANGED_NAMESPACES
) {
223 ctrl
->changed_ns_list
[0] = cpu_to_le32(0xffffffff);
224 ctrl
->nr_changed_ns
= U32_MAX
;
228 ctrl
->changed_ns_list
[ctrl
->nr_changed_ns
++] = nsid
;
230 mutex_unlock(&ctrl
->lock
);
233 void nvmet_ns_changed(struct nvmet_subsys
*subsys
, u32 nsid
)
235 struct nvmet_ctrl
*ctrl
;
237 lockdep_assert_held(&subsys
->lock
);
239 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
240 nvmet_add_to_changed_ns_log(ctrl
, cpu_to_le32(nsid
));
241 if (nvmet_aen_bit_disabled(ctrl
, NVME_AEN_BIT_NS_ATTR
))
243 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
,
244 NVME_AER_NOTICE_NS_CHANGED
,
245 NVME_LOG_CHANGED_NS
);
249 void nvmet_send_ana_event(struct nvmet_subsys
*subsys
,
250 struct nvmet_port
*port
)
252 struct nvmet_ctrl
*ctrl
;
254 mutex_lock(&subsys
->lock
);
255 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
256 if (port
&& ctrl
->port
!= port
)
258 if (nvmet_aen_bit_disabled(ctrl
, NVME_AEN_BIT_ANA_CHANGE
))
260 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
,
261 NVME_AER_NOTICE_ANA
, NVME_LOG_ANA
);
263 mutex_unlock(&subsys
->lock
);
266 void nvmet_port_send_ana_event(struct nvmet_port
*port
)
268 struct nvmet_subsys_link
*p
;
270 down_read(&nvmet_config_sem
);
271 list_for_each_entry(p
, &port
->subsystems
, entry
)
272 nvmet_send_ana_event(p
->subsys
, port
);
273 up_read(&nvmet_config_sem
);
276 int nvmet_register_transport(const struct nvmet_fabrics_ops
*ops
)
280 down_write(&nvmet_config_sem
);
281 if (nvmet_transports
[ops
->type
])
284 nvmet_transports
[ops
->type
] = ops
;
285 up_write(&nvmet_config_sem
);
289 EXPORT_SYMBOL_GPL(nvmet_register_transport
);
291 void nvmet_unregister_transport(const struct nvmet_fabrics_ops
*ops
)
293 down_write(&nvmet_config_sem
);
294 nvmet_transports
[ops
->type
] = NULL
;
295 up_write(&nvmet_config_sem
);
297 EXPORT_SYMBOL_GPL(nvmet_unregister_transport
);
299 void nvmet_port_del_ctrls(struct nvmet_port
*port
, struct nvmet_subsys
*subsys
)
301 struct nvmet_ctrl
*ctrl
;
303 mutex_lock(&subsys
->lock
);
304 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
305 if (ctrl
->port
== port
)
306 ctrl
->ops
->delete_ctrl(ctrl
);
308 mutex_unlock(&subsys
->lock
);
311 int nvmet_enable_port(struct nvmet_port
*port
)
313 const struct nvmet_fabrics_ops
*ops
;
316 lockdep_assert_held(&nvmet_config_sem
);
318 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
320 up_write(&nvmet_config_sem
);
321 request_module("nvmet-transport-%d", port
->disc_addr
.trtype
);
322 down_write(&nvmet_config_sem
);
323 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
325 pr_err("transport type %d not supported\n",
326 port
->disc_addr
.trtype
);
331 if (!try_module_get(ops
->owner
))
334 ret
= ops
->add_port(port
);
336 module_put(ops
->owner
);
340 /* If the transport didn't set inline_data_size, then disable it. */
341 if (port
->inline_data_size
< 0)
342 port
->inline_data_size
= 0;
344 port
->enabled
= true;
349 void nvmet_disable_port(struct nvmet_port
*port
)
351 const struct nvmet_fabrics_ops
*ops
;
353 lockdep_assert_held(&nvmet_config_sem
);
355 port
->enabled
= false;
358 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
359 ops
->remove_port(port
);
360 module_put(ops
->owner
);
363 static void nvmet_keep_alive_timer(struct work_struct
*work
)
365 struct nvmet_ctrl
*ctrl
= container_of(to_delayed_work(work
),
366 struct nvmet_ctrl
, ka_work
);
367 bool cmd_seen
= ctrl
->cmd_seen
;
369 ctrl
->cmd_seen
= false;
371 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
373 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
377 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
378 ctrl
->cntlid
, ctrl
->kato
);
380 nvmet_ctrl_fatal_error(ctrl
);
383 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
385 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
386 ctrl
->cntlid
, ctrl
->kato
);
388 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvmet_keep_alive_timer
);
389 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
392 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
394 pr_debug("ctrl %d stop keep-alive\n", ctrl
->cntlid
);
396 cancel_delayed_work_sync(&ctrl
->ka_work
);
399 static struct nvmet_ns
*__nvmet_find_namespace(struct nvmet_ctrl
*ctrl
,
404 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
405 if (ns
->nsid
== le32_to_cpu(nsid
))
412 struct nvmet_ns
*nvmet_find_namespace(struct nvmet_ctrl
*ctrl
, __le32 nsid
)
417 ns
= __nvmet_find_namespace(ctrl
, nsid
);
419 percpu_ref_get(&ns
->ref
);
425 static void nvmet_destroy_namespace(struct percpu_ref
*ref
)
427 struct nvmet_ns
*ns
= container_of(ref
, struct nvmet_ns
, ref
);
429 complete(&ns
->disable_done
);
432 void nvmet_put_namespace(struct nvmet_ns
*ns
)
434 percpu_ref_put(&ns
->ref
);
437 static void nvmet_ns_dev_disable(struct nvmet_ns
*ns
)
439 nvmet_bdev_ns_disable(ns
);
440 nvmet_file_ns_disable(ns
);
443 static int nvmet_p2pmem_ns_enable(struct nvmet_ns
*ns
)
446 struct pci_dev
*p2p_dev
;
452 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
456 if (!blk_queue_pci_p2pdma(ns
->bdev
->bd_queue
)) {
457 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
463 ret
= pci_p2pdma_distance(ns
->p2p_dev
, nvmet_ns_dev(ns
), true);
468 * Right now we just check that there is p2pmem available so
469 * we can report an error to the user right away if there
470 * is not. We'll find the actual device to use once we
471 * setup the controller when the port's device is available.
474 p2p_dev
= pci_p2pmem_find(nvmet_ns_dev(ns
));
476 pr_err("no peer-to-peer memory is available for %s\n",
481 pci_dev_put(p2p_dev
);
488 * Note: ctrl->subsys->lock should be held when calling this function
490 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl
*ctrl
,
493 struct device
*clients
[2];
494 struct pci_dev
*p2p_dev
;
497 if (!ctrl
->p2p_client
|| !ns
->use_p2pmem
)
501 ret
= pci_p2pdma_distance(ns
->p2p_dev
, ctrl
->p2p_client
, true);
505 p2p_dev
= pci_dev_get(ns
->p2p_dev
);
507 clients
[0] = ctrl
->p2p_client
;
508 clients
[1] = nvmet_ns_dev(ns
);
510 p2p_dev
= pci_p2pmem_find_many(clients
, ARRAY_SIZE(clients
));
512 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
513 dev_name(ctrl
->p2p_client
), ns
->device_path
);
518 ret
= radix_tree_insert(&ctrl
->p2p_ns_map
, ns
->nsid
, p2p_dev
);
520 pci_dev_put(p2p_dev
);
522 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev
),
526 int nvmet_ns_enable(struct nvmet_ns
*ns
)
528 struct nvmet_subsys
*subsys
= ns
->subsys
;
529 struct nvmet_ctrl
*ctrl
;
532 mutex_lock(&subsys
->lock
);
538 if (subsys
->nr_namespaces
== NVMET_MAX_NAMESPACES
)
541 ret
= nvmet_bdev_ns_enable(ns
);
543 ret
= nvmet_file_ns_enable(ns
);
547 ret
= nvmet_p2pmem_ns_enable(ns
);
549 goto out_dev_disable
;
551 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
552 nvmet_p2pmem_ns_add_p2p(ctrl
, ns
);
554 ret
= percpu_ref_init(&ns
->ref
, nvmet_destroy_namespace
,
559 if (ns
->nsid
> subsys
->max_nsid
)
560 subsys
->max_nsid
= ns
->nsid
;
563 * The namespaces list needs to be sorted to simplify the implementation
564 * of the Identify Namepace List subcommand.
566 if (list_empty(&subsys
->namespaces
)) {
567 list_add_tail_rcu(&ns
->dev_link
, &subsys
->namespaces
);
569 struct nvmet_ns
*old
;
571 list_for_each_entry_rcu(old
, &subsys
->namespaces
, dev_link
,
572 lockdep_is_held(&subsys
->lock
)) {
573 BUG_ON(ns
->nsid
== old
->nsid
);
574 if (ns
->nsid
< old
->nsid
)
578 list_add_tail_rcu(&ns
->dev_link
, &old
->dev_link
);
580 subsys
->nr_namespaces
++;
582 nvmet_ns_changed(subsys
, ns
->nsid
);
586 mutex_unlock(&subsys
->lock
);
589 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
590 pci_dev_put(radix_tree_delete(&ctrl
->p2p_ns_map
, ns
->nsid
));
592 nvmet_ns_dev_disable(ns
);
596 void nvmet_ns_disable(struct nvmet_ns
*ns
)
598 struct nvmet_subsys
*subsys
= ns
->subsys
;
599 struct nvmet_ctrl
*ctrl
;
601 mutex_lock(&subsys
->lock
);
606 list_del_rcu(&ns
->dev_link
);
607 if (ns
->nsid
== subsys
->max_nsid
)
608 subsys
->max_nsid
= nvmet_max_nsid(subsys
);
610 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
611 pci_dev_put(radix_tree_delete(&ctrl
->p2p_ns_map
, ns
->nsid
));
613 mutex_unlock(&subsys
->lock
);
616 * Now that we removed the namespaces from the lookup list, we
617 * can kill the per_cpu ref and wait for any remaining references
618 * to be dropped, as well as a RCU grace period for anyone only
619 * using the namepace under rcu_read_lock(). Note that we can't
620 * use call_rcu here as we need to ensure the namespaces have
621 * been fully destroyed before unloading the module.
623 percpu_ref_kill(&ns
->ref
);
625 wait_for_completion(&ns
->disable_done
);
626 percpu_ref_exit(&ns
->ref
);
628 mutex_lock(&subsys
->lock
);
630 subsys
->nr_namespaces
--;
631 nvmet_ns_changed(subsys
, ns
->nsid
);
632 nvmet_ns_dev_disable(ns
);
634 mutex_unlock(&subsys
->lock
);
637 void nvmet_ns_free(struct nvmet_ns
*ns
)
639 nvmet_ns_disable(ns
);
641 down_write(&nvmet_ana_sem
);
642 nvmet_ana_group_enabled
[ns
->anagrpid
]--;
643 up_write(&nvmet_ana_sem
);
645 kfree(ns
->device_path
);
649 struct nvmet_ns
*nvmet_ns_alloc(struct nvmet_subsys
*subsys
, u32 nsid
)
653 ns
= kzalloc(sizeof(*ns
), GFP_KERNEL
);
657 INIT_LIST_HEAD(&ns
->dev_link
);
658 init_completion(&ns
->disable_done
);
663 down_write(&nvmet_ana_sem
);
664 ns
->anagrpid
= NVMET_DEFAULT_ANA_GRPID
;
665 nvmet_ana_group_enabled
[ns
->anagrpid
]++;
666 up_write(&nvmet_ana_sem
);
669 ns
->buffered_io
= false;
674 static void nvmet_update_sq_head(struct nvmet_req
*req
)
677 u32 old_sqhd
, new_sqhd
;
680 old_sqhd
= req
->sq
->sqhd
;
681 new_sqhd
= (old_sqhd
+ 1) % req
->sq
->size
;
682 } while (cmpxchg(&req
->sq
->sqhd
, old_sqhd
, new_sqhd
) !=
685 req
->cqe
->sq_head
= cpu_to_le16(req
->sq
->sqhd
& 0x0000FFFF);
688 static void nvmet_set_error(struct nvmet_req
*req
, u16 status
)
690 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
691 struct nvme_error_slot
*new_error_slot
;
694 req
->cqe
->status
= cpu_to_le16(status
<< 1);
696 if (!ctrl
|| req
->error_loc
== NVMET_NO_ERROR_LOC
)
699 spin_lock_irqsave(&ctrl
->error_lock
, flags
);
702 &ctrl
->slots
[ctrl
->err_counter
% NVMET_ERROR_LOG_SLOTS
];
704 new_error_slot
->error_count
= cpu_to_le64(ctrl
->err_counter
);
705 new_error_slot
->sqid
= cpu_to_le16(req
->sq
->qid
);
706 new_error_slot
->cmdid
= cpu_to_le16(req
->cmd
->common
.command_id
);
707 new_error_slot
->status_field
= cpu_to_le16(status
<< 1);
708 new_error_slot
->param_error_location
= cpu_to_le16(req
->error_loc
);
709 new_error_slot
->lba
= cpu_to_le64(req
->error_slba
);
710 new_error_slot
->nsid
= req
->cmd
->common
.nsid
;
711 spin_unlock_irqrestore(&ctrl
->error_lock
, flags
);
713 /* set the more bit for this request */
714 req
->cqe
->status
|= cpu_to_le16(1 << 14);
717 static void __nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
719 if (!req
->sq
->sqhd_disabled
)
720 nvmet_update_sq_head(req
);
721 req
->cqe
->sq_id
= cpu_to_le16(req
->sq
->qid
);
722 req
->cqe
->command_id
= req
->cmd
->common
.command_id
;
724 if (unlikely(status
))
725 nvmet_set_error(req
, status
);
727 trace_nvmet_req_complete(req
);
730 nvmet_put_namespace(req
->ns
);
731 req
->ops
->queue_response(req
);
734 void nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
736 __nvmet_req_complete(req
, status
);
737 percpu_ref_put(&req
->sq
->ref
);
739 EXPORT_SYMBOL_GPL(nvmet_req_complete
);
741 void nvmet_cq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_cq
*cq
,
750 void nvmet_sq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_sq
*sq
,
760 static void nvmet_confirm_sq(struct percpu_ref
*ref
)
762 struct nvmet_sq
*sq
= container_of(ref
, struct nvmet_sq
, ref
);
764 complete(&sq
->confirm_done
);
767 void nvmet_sq_destroy(struct nvmet_sq
*sq
)
769 struct nvmet_ctrl
*ctrl
= sq
->ctrl
;
772 * If this is the admin queue, complete all AERs so that our
773 * queue doesn't have outstanding requests on it.
775 if (ctrl
&& ctrl
->sqs
&& ctrl
->sqs
[0] == sq
)
776 nvmet_async_events_failall(ctrl
);
777 percpu_ref_kill_and_confirm(&sq
->ref
, nvmet_confirm_sq
);
778 wait_for_completion(&sq
->confirm_done
);
779 wait_for_completion(&sq
->free_done
);
780 percpu_ref_exit(&sq
->ref
);
783 nvmet_ctrl_put(ctrl
);
784 sq
->ctrl
= NULL
; /* allows reusing the queue later */
787 EXPORT_SYMBOL_GPL(nvmet_sq_destroy
);
789 static void nvmet_sq_free(struct percpu_ref
*ref
)
791 struct nvmet_sq
*sq
= container_of(ref
, struct nvmet_sq
, ref
);
793 complete(&sq
->free_done
);
796 int nvmet_sq_init(struct nvmet_sq
*sq
)
800 ret
= percpu_ref_init(&sq
->ref
, nvmet_sq_free
, 0, GFP_KERNEL
);
802 pr_err("percpu_ref init failed!\n");
805 init_completion(&sq
->free_done
);
806 init_completion(&sq
->confirm_done
);
810 EXPORT_SYMBOL_GPL(nvmet_sq_init
);
812 static inline u16
nvmet_check_ana_state(struct nvmet_port
*port
,
815 enum nvme_ana_state state
= port
->ana_state
[ns
->anagrpid
];
817 if (unlikely(state
== NVME_ANA_INACCESSIBLE
))
818 return NVME_SC_ANA_INACCESSIBLE
;
819 if (unlikely(state
== NVME_ANA_PERSISTENT_LOSS
))
820 return NVME_SC_ANA_PERSISTENT_LOSS
;
821 if (unlikely(state
== NVME_ANA_CHANGE
))
822 return NVME_SC_ANA_TRANSITION
;
826 static inline u16
nvmet_io_cmd_check_access(struct nvmet_req
*req
)
828 if (unlikely(req
->ns
->readonly
)) {
829 switch (req
->cmd
->common
.opcode
) {
834 return NVME_SC_NS_WRITE_PROTECTED
;
841 static u16
nvmet_parse_io_cmd(struct nvmet_req
*req
)
843 struct nvme_command
*cmd
= req
->cmd
;
846 ret
= nvmet_check_ctrl_status(req
, cmd
);
850 req
->ns
= nvmet_find_namespace(req
->sq
->ctrl
, cmd
->rw
.nsid
);
851 if (unlikely(!req
->ns
)) {
852 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
853 return NVME_SC_INVALID_NS
| NVME_SC_DNR
;
855 ret
= nvmet_check_ana_state(req
->port
, req
->ns
);
857 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
860 ret
= nvmet_io_cmd_check_access(req
);
862 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
867 return nvmet_file_parse_io_cmd(req
);
869 return nvmet_bdev_parse_io_cmd(req
);
872 bool nvmet_req_init(struct nvmet_req
*req
, struct nvmet_cq
*cq
,
873 struct nvmet_sq
*sq
, const struct nvmet_fabrics_ops
*ops
)
875 u8 flags
= req
->cmd
->common
.flags
;
883 req
->transfer_len
= 0;
884 req
->cqe
->status
= 0;
885 req
->cqe
->sq_head
= 0;
887 req
->error_loc
= NVMET_NO_ERROR_LOC
;
890 trace_nvmet_req_init(req
, req
->cmd
);
892 /* no support for fused commands yet */
893 if (unlikely(flags
& (NVME_CMD_FUSE_FIRST
| NVME_CMD_FUSE_SECOND
))) {
894 req
->error_loc
= offsetof(struct nvme_common_command
, flags
);
895 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
900 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
901 * contains an address of a single contiguous physical buffer that is
904 if (unlikely((flags
& NVME_CMD_SGL_ALL
) != NVME_CMD_SGL_METABUF
)) {
905 req
->error_loc
= offsetof(struct nvme_common_command
, flags
);
906 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
910 if (unlikely(!req
->sq
->ctrl
))
911 /* will return an error for any non-connect command: */
912 status
= nvmet_parse_connect_cmd(req
);
913 else if (likely(req
->sq
->qid
!= 0))
914 status
= nvmet_parse_io_cmd(req
);
916 status
= nvmet_parse_admin_cmd(req
);
921 if (unlikely(!percpu_ref_tryget_live(&sq
->ref
))) {
922 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
927 sq
->ctrl
->cmd_seen
= true;
932 __nvmet_req_complete(req
, status
);
935 EXPORT_SYMBOL_GPL(nvmet_req_init
);
937 void nvmet_req_uninit(struct nvmet_req
*req
)
939 percpu_ref_put(&req
->sq
->ref
);
941 nvmet_put_namespace(req
->ns
);
943 EXPORT_SYMBOL_GPL(nvmet_req_uninit
);
945 bool nvmet_check_data_len(struct nvmet_req
*req
, size_t data_len
)
947 if (unlikely(data_len
!= req
->transfer_len
)) {
948 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
949 nvmet_req_complete(req
, NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
);
955 EXPORT_SYMBOL_GPL(nvmet_check_data_len
);
957 bool nvmet_check_data_len_lte(struct nvmet_req
*req
, size_t data_len
)
959 if (unlikely(data_len
> req
->transfer_len
)) {
960 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
961 nvmet_req_complete(req
, NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
);
968 int nvmet_req_alloc_sgl(struct nvmet_req
*req
)
970 struct pci_dev
*p2p_dev
= NULL
;
972 if (IS_ENABLED(CONFIG_PCI_P2PDMA
)) {
973 if (req
->sq
->ctrl
&& req
->ns
)
974 p2p_dev
= radix_tree_lookup(&req
->sq
->ctrl
->p2p_ns_map
,
978 if (req
->sq
->qid
&& p2p_dev
) {
979 req
->sg
= pci_p2pmem_alloc_sgl(p2p_dev
, &req
->sg_cnt
,
982 req
->p2p_dev
= p2p_dev
;
988 * If no P2P memory was available we fallback to using
993 req
->sg
= sgl_alloc(req
->transfer_len
, GFP_KERNEL
, &req
->sg_cnt
);
994 if (unlikely(!req
->sg
))
999 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl
);
1001 void nvmet_req_free_sgl(struct nvmet_req
*req
)
1004 pci_p2pmem_free_sgl(req
->p2p_dev
, req
->sg
);
1011 EXPORT_SYMBOL_GPL(nvmet_req_free_sgl
);
1013 static inline bool nvmet_cc_en(u32 cc
)
1015 return (cc
>> NVME_CC_EN_SHIFT
) & 0x1;
1018 static inline u8
nvmet_cc_css(u32 cc
)
1020 return (cc
>> NVME_CC_CSS_SHIFT
) & 0x7;
1023 static inline u8
nvmet_cc_mps(u32 cc
)
1025 return (cc
>> NVME_CC_MPS_SHIFT
) & 0xf;
1028 static inline u8
nvmet_cc_ams(u32 cc
)
1030 return (cc
>> NVME_CC_AMS_SHIFT
) & 0x7;
1033 static inline u8
nvmet_cc_shn(u32 cc
)
1035 return (cc
>> NVME_CC_SHN_SHIFT
) & 0x3;
1038 static inline u8
nvmet_cc_iosqes(u32 cc
)
1040 return (cc
>> NVME_CC_IOSQES_SHIFT
) & 0xf;
1043 static inline u8
nvmet_cc_iocqes(u32 cc
)
1045 return (cc
>> NVME_CC_IOCQES_SHIFT
) & 0xf;
1048 static void nvmet_start_ctrl(struct nvmet_ctrl
*ctrl
)
1050 lockdep_assert_held(&ctrl
->lock
);
1052 if (nvmet_cc_iosqes(ctrl
->cc
) != NVME_NVM_IOSQES
||
1053 nvmet_cc_iocqes(ctrl
->cc
) != NVME_NVM_IOCQES
||
1054 nvmet_cc_mps(ctrl
->cc
) != 0 ||
1055 nvmet_cc_ams(ctrl
->cc
) != 0 ||
1056 nvmet_cc_css(ctrl
->cc
) != 0) {
1057 ctrl
->csts
= NVME_CSTS_CFS
;
1061 ctrl
->csts
= NVME_CSTS_RDY
;
1064 * Controllers that are not yet enabled should not really enforce the
1065 * keep alive timeout, but we still want to track a timeout and cleanup
1066 * in case a host died before it enabled the controller. Hence, simply
1067 * reset the keep alive timer when the controller is enabled.
1069 mod_delayed_work(system_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
1072 static void nvmet_clear_ctrl(struct nvmet_ctrl
*ctrl
)
1074 lockdep_assert_held(&ctrl
->lock
);
1076 /* XXX: tear down queues? */
1077 ctrl
->csts
&= ~NVME_CSTS_RDY
;
1081 void nvmet_update_cc(struct nvmet_ctrl
*ctrl
, u32
new)
1085 mutex_lock(&ctrl
->lock
);
1089 if (nvmet_cc_en(new) && !nvmet_cc_en(old
))
1090 nvmet_start_ctrl(ctrl
);
1091 if (!nvmet_cc_en(new) && nvmet_cc_en(old
))
1092 nvmet_clear_ctrl(ctrl
);
1093 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old
)) {
1094 nvmet_clear_ctrl(ctrl
);
1095 ctrl
->csts
|= NVME_CSTS_SHST_CMPLT
;
1097 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old
))
1098 ctrl
->csts
&= ~NVME_CSTS_SHST_CMPLT
;
1099 mutex_unlock(&ctrl
->lock
);
1102 static void nvmet_init_cap(struct nvmet_ctrl
*ctrl
)
1104 /* command sets supported: NVMe command set: */
1105 ctrl
->cap
= (1ULL << 37);
1106 /* CC.EN timeout in 500msec units: */
1107 ctrl
->cap
|= (15ULL << 24);
1108 /* maximum queue entries supported: */
1109 ctrl
->cap
|= NVMET_QUEUE_SIZE
- 1;
1112 u16
nvmet_ctrl_find_get(const char *subsysnqn
, const char *hostnqn
, u16 cntlid
,
1113 struct nvmet_req
*req
, struct nvmet_ctrl
**ret
)
1115 struct nvmet_subsys
*subsys
;
1116 struct nvmet_ctrl
*ctrl
;
1119 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
1121 pr_warn("connect request for invalid subsystem %s!\n",
1123 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
1124 return NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1127 mutex_lock(&subsys
->lock
);
1128 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
1129 if (ctrl
->cntlid
== cntlid
) {
1130 if (strncmp(hostnqn
, ctrl
->hostnqn
, NVMF_NQN_SIZE
)) {
1131 pr_warn("hostnqn mismatch.\n");
1134 if (!kref_get_unless_zero(&ctrl
->ref
))
1142 pr_warn("could not find controller %d for subsys %s / host %s\n",
1143 cntlid
, subsysnqn
, hostnqn
);
1144 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(cntlid
);
1145 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1148 mutex_unlock(&subsys
->lock
);
1149 nvmet_subsys_put(subsys
);
1153 u16
nvmet_check_ctrl_status(struct nvmet_req
*req
, struct nvme_command
*cmd
)
1155 if (unlikely(!(req
->sq
->ctrl
->cc
& NVME_CC_ENABLE
))) {
1156 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1157 cmd
->common
.opcode
, req
->sq
->qid
);
1158 return NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
1161 if (unlikely(!(req
->sq
->ctrl
->csts
& NVME_CSTS_RDY
))) {
1162 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1163 cmd
->common
.opcode
, req
->sq
->qid
);
1164 return NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
1169 bool nvmet_host_allowed(struct nvmet_subsys
*subsys
, const char *hostnqn
)
1171 struct nvmet_host_link
*p
;
1173 lockdep_assert_held(&nvmet_config_sem
);
1175 if (subsys
->allow_any_host
)
1178 if (subsys
->type
== NVME_NQN_DISC
) /* allow all access to disc subsys */
1181 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
1182 if (!strcmp(nvmet_host_name(p
->host
), hostnqn
))
1190 * Note: ctrl->subsys->lock should be held when calling this function
1192 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl
*ctrl
,
1193 struct nvmet_req
*req
)
1195 struct nvmet_ns
*ns
;
1197 if (!req
->p2p_client
)
1200 ctrl
->p2p_client
= get_device(req
->p2p_client
);
1202 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
,
1203 lockdep_is_held(&ctrl
->subsys
->lock
))
1204 nvmet_p2pmem_ns_add_p2p(ctrl
, ns
);
1208 * Note: ctrl->subsys->lock should be held when calling this function
1210 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl
*ctrl
)
1212 struct radix_tree_iter iter
;
1215 radix_tree_for_each_slot(slot
, &ctrl
->p2p_ns_map
, &iter
, 0)
1216 pci_dev_put(radix_tree_deref_slot(slot
));
1218 put_device(ctrl
->p2p_client
);
1221 static void nvmet_fatal_error_handler(struct work_struct
*work
)
1223 struct nvmet_ctrl
*ctrl
=
1224 container_of(work
, struct nvmet_ctrl
, fatal_err_work
);
1226 pr_err("ctrl %d fatal error occurred!\n", ctrl
->cntlid
);
1227 ctrl
->ops
->delete_ctrl(ctrl
);
1230 u16
nvmet_alloc_ctrl(const char *subsysnqn
, const char *hostnqn
,
1231 struct nvmet_req
*req
, u32 kato
, struct nvmet_ctrl
**ctrlp
)
1233 struct nvmet_subsys
*subsys
;
1234 struct nvmet_ctrl
*ctrl
;
1238 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1239 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
1241 pr_warn("connect request for invalid subsystem %s!\n",
1243 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
1247 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1248 down_read(&nvmet_config_sem
);
1249 if (!nvmet_host_allowed(subsys
, hostnqn
)) {
1250 pr_info("connect by host %s for subsystem %s not allowed\n",
1251 hostnqn
, subsysnqn
);
1252 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(hostnqn
);
1253 up_read(&nvmet_config_sem
);
1254 status
= NVME_SC_CONNECT_INVALID_HOST
| NVME_SC_DNR
;
1255 goto out_put_subsystem
;
1257 up_read(&nvmet_config_sem
);
1259 status
= NVME_SC_INTERNAL
;
1260 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
1262 goto out_put_subsystem
;
1263 mutex_init(&ctrl
->lock
);
1265 nvmet_init_cap(ctrl
);
1267 ctrl
->port
= req
->port
;
1269 INIT_WORK(&ctrl
->async_event_work
, nvmet_async_event_work
);
1270 INIT_LIST_HEAD(&ctrl
->async_events
);
1271 INIT_RADIX_TREE(&ctrl
->p2p_ns_map
, GFP_KERNEL
);
1272 INIT_WORK(&ctrl
->fatal_err_work
, nvmet_fatal_error_handler
);
1274 memcpy(ctrl
->subsysnqn
, subsysnqn
, NVMF_NQN_SIZE
);
1275 memcpy(ctrl
->hostnqn
, hostnqn
, NVMF_NQN_SIZE
);
1277 kref_init(&ctrl
->ref
);
1278 ctrl
->subsys
= subsys
;
1279 WRITE_ONCE(ctrl
->aen_enabled
, NVMET_AEN_CFG_OPTIONAL
);
1281 ctrl
->changed_ns_list
= kmalloc_array(NVME_MAX_CHANGED_NAMESPACES
,
1282 sizeof(__le32
), GFP_KERNEL
);
1283 if (!ctrl
->changed_ns_list
)
1286 ctrl
->cqs
= kcalloc(subsys
->max_qid
+ 1,
1287 sizeof(struct nvmet_cq
*),
1290 goto out_free_changed_ns_list
;
1292 ctrl
->sqs
= kcalloc(subsys
->max_qid
+ 1,
1293 sizeof(struct nvmet_sq
*),
1298 if (subsys
->cntlid_min
> subsys
->cntlid_max
)
1301 ret
= ida_simple_get(&cntlid_ida
,
1302 subsys
->cntlid_min
, subsys
->cntlid_max
,
1305 status
= NVME_SC_CONNECT_CTRL_BUSY
| NVME_SC_DNR
;
1310 ctrl
->ops
= req
->ops
;
1313 * Discovery controllers may use some arbitrary high value
1314 * in order to cleanup stale discovery sessions
1316 if ((ctrl
->subsys
->type
== NVME_NQN_DISC
) && !kato
)
1317 kato
= NVMET_DISC_KATO_MS
;
1319 /* keep-alive timeout in seconds */
1320 ctrl
->kato
= DIV_ROUND_UP(kato
, 1000);
1322 ctrl
->err_counter
= 0;
1323 spin_lock_init(&ctrl
->error_lock
);
1325 nvmet_start_keep_alive_timer(ctrl
);
1327 mutex_lock(&subsys
->lock
);
1328 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
1329 nvmet_setup_p2p_ns_map(ctrl
, req
);
1330 mutex_unlock(&subsys
->lock
);
1339 out_free_changed_ns_list
:
1340 kfree(ctrl
->changed_ns_list
);
1344 nvmet_subsys_put(subsys
);
1349 static void nvmet_ctrl_free(struct kref
*ref
)
1351 struct nvmet_ctrl
*ctrl
= container_of(ref
, struct nvmet_ctrl
, ref
);
1352 struct nvmet_subsys
*subsys
= ctrl
->subsys
;
1354 mutex_lock(&subsys
->lock
);
1355 nvmet_release_p2p_ns_map(ctrl
);
1356 list_del(&ctrl
->subsys_entry
);
1357 mutex_unlock(&subsys
->lock
);
1359 nvmet_stop_keep_alive_timer(ctrl
);
1361 flush_work(&ctrl
->async_event_work
);
1362 cancel_work_sync(&ctrl
->fatal_err_work
);
1364 ida_simple_remove(&cntlid_ida
, ctrl
->cntlid
);
1366 nvmet_async_events_free(ctrl
);
1369 kfree(ctrl
->changed_ns_list
);
1372 nvmet_subsys_put(subsys
);
1375 void nvmet_ctrl_put(struct nvmet_ctrl
*ctrl
)
1377 kref_put(&ctrl
->ref
, nvmet_ctrl_free
);
1380 void nvmet_ctrl_fatal_error(struct nvmet_ctrl
*ctrl
)
1382 mutex_lock(&ctrl
->lock
);
1383 if (!(ctrl
->csts
& NVME_CSTS_CFS
)) {
1384 ctrl
->csts
|= NVME_CSTS_CFS
;
1385 schedule_work(&ctrl
->fatal_err_work
);
1387 mutex_unlock(&ctrl
->lock
);
1389 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error
);
1391 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
1392 const char *subsysnqn
)
1394 struct nvmet_subsys_link
*p
;
1399 if (!strcmp(NVME_DISC_SUBSYS_NAME
, subsysnqn
)) {
1400 if (!kref_get_unless_zero(&nvmet_disc_subsys
->ref
))
1402 return nvmet_disc_subsys
;
1405 down_read(&nvmet_config_sem
);
1406 list_for_each_entry(p
, &port
->subsystems
, entry
) {
1407 if (!strncmp(p
->subsys
->subsysnqn
, subsysnqn
,
1409 if (!kref_get_unless_zero(&p
->subsys
->ref
))
1411 up_read(&nvmet_config_sem
);
1415 up_read(&nvmet_config_sem
);
1419 struct nvmet_subsys
*nvmet_subsys_alloc(const char *subsysnqn
,
1420 enum nvme_subsys_type type
)
1422 struct nvmet_subsys
*subsys
;
1424 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
1426 return ERR_PTR(-ENOMEM
);
1428 subsys
->ver
= NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
1429 /* generate a random serial number as our controllers are ephemeral: */
1430 get_random_bytes(&subsys
->serial
, sizeof(subsys
->serial
));
1434 subsys
->max_qid
= NVMET_NR_QUEUES
;
1437 subsys
->max_qid
= 0;
1440 pr_err("%s: Unknown Subsystem type - %d\n", __func__
, type
);
1442 return ERR_PTR(-EINVAL
);
1444 subsys
->type
= type
;
1445 subsys
->subsysnqn
= kstrndup(subsysnqn
, NVMF_NQN_SIZE
,
1447 if (!subsys
->subsysnqn
) {
1449 return ERR_PTR(-ENOMEM
);
1451 subsys
->cntlid_min
= NVME_CNTLID_MIN
;
1452 subsys
->cntlid_max
= NVME_CNTLID_MAX
;
1453 kref_init(&subsys
->ref
);
1455 mutex_init(&subsys
->lock
);
1456 INIT_LIST_HEAD(&subsys
->namespaces
);
1457 INIT_LIST_HEAD(&subsys
->ctrls
);
1458 INIT_LIST_HEAD(&subsys
->hosts
);
1463 static void nvmet_subsys_free(struct kref
*ref
)
1465 struct nvmet_subsys
*subsys
=
1466 container_of(ref
, struct nvmet_subsys
, ref
);
1468 WARN_ON_ONCE(!list_empty(&subsys
->namespaces
));
1470 kfree(subsys
->subsysnqn
);
1471 kfree_rcu(subsys
->model
, rcuhead
);
1475 void nvmet_subsys_del_ctrls(struct nvmet_subsys
*subsys
)
1477 struct nvmet_ctrl
*ctrl
;
1479 mutex_lock(&subsys
->lock
);
1480 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
1481 ctrl
->ops
->delete_ctrl(ctrl
);
1482 mutex_unlock(&subsys
->lock
);
1485 void nvmet_subsys_put(struct nvmet_subsys
*subsys
)
1487 kref_put(&subsys
->ref
, nvmet_subsys_free
);
1490 static int __init
nvmet_init(void)
1494 nvmet_ana_group_enabled
[NVMET_DEFAULT_ANA_GRPID
] = 1;
1496 buffered_io_wq
= alloc_workqueue("nvmet-buffered-io-wq",
1498 if (!buffered_io_wq
) {
1503 error
= nvmet_init_discovery();
1505 goto out_free_work_queue
;
1507 error
= nvmet_init_configfs();
1509 goto out_exit_discovery
;
1513 nvmet_exit_discovery();
1514 out_free_work_queue
:
1515 destroy_workqueue(buffered_io_wq
);
1520 static void __exit
nvmet_exit(void)
1522 nvmet_exit_configfs();
1523 nvmet_exit_discovery();
1524 ida_destroy(&cntlid_ida
);
1525 destroy_workqueue(buffered_io_wq
);
1527 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry
) != 1024);
1528 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr
) != 1024);
1531 module_init(nvmet_init
);
1532 module_exit(nvmet_exit
);
1534 MODULE_LICENSE("GPL v2");