1 // SPDX-License-Identifier: GPL-2.0
3 * Common code for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
13 #define CREATE_TRACE_POINTS
18 struct workqueue_struct
*buffered_io_wq
;
19 static const struct nvmet_fabrics_ops
*nvmet_transports
[NVMF_TRTYPE_MAX
];
20 static DEFINE_IDA(cntlid_ida
);
23 * This read/write semaphore is used to synchronize access to configuration
24 * information on a target system that will result in discovery log page
25 * information change for at least one host.
26 * The full list of resources to protected by this semaphore is:
29 * - per-subsystem allowed hosts list
30 * - allow_any_host subsystem attribute
32 * - the nvmet_transports array
34 * When updating any of those lists/structures write lock should be obtained,
35 * while when reading (popolating discovery log page or checking host-subsystem
36 * link) read lock is obtained to allow concurrent reads.
38 DECLARE_RWSEM(nvmet_config_sem
);
40 u32 nvmet_ana_group_enabled
[NVMET_MAX_ANAGRPS
+ 1];
42 DECLARE_RWSEM(nvmet_ana_sem
);
44 inline u16
errno_to_nvme_status(struct nvmet_req
*req
, int errno
)
50 status
= NVME_SC_SUCCESS
;
53 req
->error_loc
= offsetof(struct nvme_rw_command
, length
);
54 status
= NVME_SC_CAP_EXCEEDED
| NVME_SC_DNR
;
57 req
->error_loc
= offsetof(struct nvme_rw_command
, slba
);
58 status
= NVME_SC_LBA_RANGE
| NVME_SC_DNR
;
61 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
62 switch (req
->cmd
->common
.opcode
) {
64 case nvme_cmd_write_zeroes
:
65 status
= NVME_SC_ONCS_NOT_SUPPORTED
| NVME_SC_DNR
;
68 status
= NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
72 req
->error_loc
= offsetof(struct nvme_rw_command
, nsid
);
73 status
= NVME_SC_ACCESS_DENIED
;
78 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
79 status
= NVME_SC_INTERNAL
| NVME_SC_DNR
;
85 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
86 const char *subsysnqn
);
88 u16
nvmet_copy_to_sgl(struct nvmet_req
*req
, off_t off
, const void *buf
,
91 if (sg_pcopy_from_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
) {
92 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
93 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
98 u16
nvmet_copy_from_sgl(struct nvmet_req
*req
, off_t off
, void *buf
, size_t len
)
100 if (sg_pcopy_to_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
) {
101 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
102 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
107 u16
nvmet_zero_sgl(struct nvmet_req
*req
, off_t off
, size_t len
)
109 if (sg_zero_buffer(req
->sg
, req
->sg_cnt
, len
, off
) != len
) {
110 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
111 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
116 static unsigned int nvmet_max_nsid(struct nvmet_subsys
*subsys
)
118 unsigned long nsid
= 0;
119 struct nvmet_ns
*cur
;
122 xa_for_each(&subsys
->namespaces
, idx
, cur
)
128 static u32
nvmet_async_event_result(struct nvmet_async_event
*aen
)
130 return aen
->event_type
| (aen
->event_info
<< 8) | (aen
->log_page
<< 16);
133 static void nvmet_async_events_failall(struct nvmet_ctrl
*ctrl
)
135 u16 status
= NVME_SC_INTERNAL
| NVME_SC_DNR
;
136 struct nvmet_req
*req
;
138 mutex_lock(&ctrl
->lock
);
139 while (ctrl
->nr_async_event_cmds
) {
140 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
141 mutex_unlock(&ctrl
->lock
);
142 nvmet_req_complete(req
, status
);
143 mutex_lock(&ctrl
->lock
);
145 mutex_unlock(&ctrl
->lock
);
148 static void nvmet_async_events_process(struct nvmet_ctrl
*ctrl
)
150 struct nvmet_async_event
*aen
;
151 struct nvmet_req
*req
;
153 mutex_lock(&ctrl
->lock
);
154 while (ctrl
->nr_async_event_cmds
&& !list_empty(&ctrl
->async_events
)) {
155 aen
= list_first_entry(&ctrl
->async_events
,
156 struct nvmet_async_event
, entry
);
157 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
158 nvmet_set_result(req
, nvmet_async_event_result(aen
));
160 list_del(&aen
->entry
);
163 mutex_unlock(&ctrl
->lock
);
164 trace_nvmet_async_event(ctrl
, req
->cqe
->result
.u32
);
165 nvmet_req_complete(req
, 0);
166 mutex_lock(&ctrl
->lock
);
168 mutex_unlock(&ctrl
->lock
);
171 static void nvmet_async_events_free(struct nvmet_ctrl
*ctrl
)
173 struct nvmet_async_event
*aen
, *tmp
;
175 mutex_lock(&ctrl
->lock
);
176 list_for_each_entry_safe(aen
, tmp
, &ctrl
->async_events
, entry
) {
177 list_del(&aen
->entry
);
180 mutex_unlock(&ctrl
->lock
);
183 static void nvmet_async_event_work(struct work_struct
*work
)
185 struct nvmet_ctrl
*ctrl
=
186 container_of(work
, struct nvmet_ctrl
, async_event_work
);
188 nvmet_async_events_process(ctrl
);
191 void nvmet_add_async_event(struct nvmet_ctrl
*ctrl
, u8 event_type
,
192 u8 event_info
, u8 log_page
)
194 struct nvmet_async_event
*aen
;
196 aen
= kmalloc(sizeof(*aen
), GFP_KERNEL
);
200 aen
->event_type
= event_type
;
201 aen
->event_info
= event_info
;
202 aen
->log_page
= log_page
;
204 mutex_lock(&ctrl
->lock
);
205 list_add_tail(&aen
->entry
, &ctrl
->async_events
);
206 mutex_unlock(&ctrl
->lock
);
208 schedule_work(&ctrl
->async_event_work
);
211 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl
*ctrl
, __le32 nsid
)
215 mutex_lock(&ctrl
->lock
);
216 if (ctrl
->nr_changed_ns
> NVME_MAX_CHANGED_NAMESPACES
)
219 for (i
= 0; i
< ctrl
->nr_changed_ns
; i
++) {
220 if (ctrl
->changed_ns_list
[i
] == nsid
)
224 if (ctrl
->nr_changed_ns
== NVME_MAX_CHANGED_NAMESPACES
) {
225 ctrl
->changed_ns_list
[0] = cpu_to_le32(0xffffffff);
226 ctrl
->nr_changed_ns
= U32_MAX
;
230 ctrl
->changed_ns_list
[ctrl
->nr_changed_ns
++] = nsid
;
232 mutex_unlock(&ctrl
->lock
);
235 void nvmet_ns_changed(struct nvmet_subsys
*subsys
, u32 nsid
)
237 struct nvmet_ctrl
*ctrl
;
239 lockdep_assert_held(&subsys
->lock
);
241 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
242 nvmet_add_to_changed_ns_log(ctrl
, cpu_to_le32(nsid
));
243 if (nvmet_aen_bit_disabled(ctrl
, NVME_AEN_BIT_NS_ATTR
))
245 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
,
246 NVME_AER_NOTICE_NS_CHANGED
,
247 NVME_LOG_CHANGED_NS
);
251 void nvmet_send_ana_event(struct nvmet_subsys
*subsys
,
252 struct nvmet_port
*port
)
254 struct nvmet_ctrl
*ctrl
;
256 mutex_lock(&subsys
->lock
);
257 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
258 if (port
&& ctrl
->port
!= port
)
260 if (nvmet_aen_bit_disabled(ctrl
, NVME_AEN_BIT_ANA_CHANGE
))
262 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
,
263 NVME_AER_NOTICE_ANA
, NVME_LOG_ANA
);
265 mutex_unlock(&subsys
->lock
);
268 void nvmet_port_send_ana_event(struct nvmet_port
*port
)
270 struct nvmet_subsys_link
*p
;
272 down_read(&nvmet_config_sem
);
273 list_for_each_entry(p
, &port
->subsystems
, entry
)
274 nvmet_send_ana_event(p
->subsys
, port
);
275 up_read(&nvmet_config_sem
);
278 int nvmet_register_transport(const struct nvmet_fabrics_ops
*ops
)
282 down_write(&nvmet_config_sem
);
283 if (nvmet_transports
[ops
->type
])
286 nvmet_transports
[ops
->type
] = ops
;
287 up_write(&nvmet_config_sem
);
291 EXPORT_SYMBOL_GPL(nvmet_register_transport
);
293 void nvmet_unregister_transport(const struct nvmet_fabrics_ops
*ops
)
295 down_write(&nvmet_config_sem
);
296 nvmet_transports
[ops
->type
] = NULL
;
297 up_write(&nvmet_config_sem
);
299 EXPORT_SYMBOL_GPL(nvmet_unregister_transport
);
301 void nvmet_port_del_ctrls(struct nvmet_port
*port
, struct nvmet_subsys
*subsys
)
303 struct nvmet_ctrl
*ctrl
;
305 mutex_lock(&subsys
->lock
);
306 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
307 if (ctrl
->port
== port
)
308 ctrl
->ops
->delete_ctrl(ctrl
);
310 mutex_unlock(&subsys
->lock
);
313 int nvmet_enable_port(struct nvmet_port
*port
)
315 const struct nvmet_fabrics_ops
*ops
;
318 lockdep_assert_held(&nvmet_config_sem
);
320 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
322 up_write(&nvmet_config_sem
);
323 request_module("nvmet-transport-%d", port
->disc_addr
.trtype
);
324 down_write(&nvmet_config_sem
);
325 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
327 pr_err("transport type %d not supported\n",
328 port
->disc_addr
.trtype
);
333 if (!try_module_get(ops
->owner
))
337 * If the user requested PI support and the transport isn't pi capable,
338 * don't enable the port.
340 if (port
->pi_enable
&& !(ops
->flags
& NVMF_METADATA_SUPPORTED
)) {
341 pr_err("T10-PI is not supported by transport type %d\n",
342 port
->disc_addr
.trtype
);
347 ret
= ops
->add_port(port
);
351 /* If the transport didn't set inline_data_size, then disable it. */
352 if (port
->inline_data_size
< 0)
353 port
->inline_data_size
= 0;
355 port
->enabled
= true;
360 module_put(ops
->owner
);
364 void nvmet_disable_port(struct nvmet_port
*port
)
366 const struct nvmet_fabrics_ops
*ops
;
368 lockdep_assert_held(&nvmet_config_sem
);
370 port
->enabled
= false;
373 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
374 ops
->remove_port(port
);
375 module_put(ops
->owner
);
378 static void nvmet_keep_alive_timer(struct work_struct
*work
)
380 struct nvmet_ctrl
*ctrl
= container_of(to_delayed_work(work
),
381 struct nvmet_ctrl
, ka_work
);
382 bool cmd_seen
= ctrl
->cmd_seen
;
384 ctrl
->cmd_seen
= false;
386 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
388 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
392 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
393 ctrl
->cntlid
, ctrl
->kato
);
395 nvmet_ctrl_fatal_error(ctrl
);
398 void nvmet_start_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
400 if (unlikely(ctrl
->kato
== 0))
403 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
404 ctrl
->cntlid
, ctrl
->kato
);
406 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvmet_keep_alive_timer
);
407 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
410 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
412 if (unlikely(ctrl
->kato
== 0))
415 pr_debug("ctrl %d stop keep-alive\n", ctrl
->cntlid
);
417 cancel_delayed_work_sync(&ctrl
->ka_work
);
420 struct nvmet_ns
*nvmet_find_namespace(struct nvmet_ctrl
*ctrl
, __le32 nsid
)
424 ns
= xa_load(&ctrl
->subsys
->namespaces
, le32_to_cpu(nsid
));
426 percpu_ref_get(&ns
->ref
);
431 static void nvmet_destroy_namespace(struct percpu_ref
*ref
)
433 struct nvmet_ns
*ns
= container_of(ref
, struct nvmet_ns
, ref
);
435 complete(&ns
->disable_done
);
438 void nvmet_put_namespace(struct nvmet_ns
*ns
)
440 percpu_ref_put(&ns
->ref
);
443 static void nvmet_ns_dev_disable(struct nvmet_ns
*ns
)
445 nvmet_bdev_ns_disable(ns
);
446 nvmet_file_ns_disable(ns
);
449 static int nvmet_p2pmem_ns_enable(struct nvmet_ns
*ns
)
452 struct pci_dev
*p2p_dev
;
458 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
462 if (!blk_queue_pci_p2pdma(ns
->bdev
->bd_disk
->queue
)) {
463 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
469 ret
= pci_p2pdma_distance(ns
->p2p_dev
, nvmet_ns_dev(ns
), true);
474 * Right now we just check that there is p2pmem available so
475 * we can report an error to the user right away if there
476 * is not. We'll find the actual device to use once we
477 * setup the controller when the port's device is available.
480 p2p_dev
= pci_p2pmem_find(nvmet_ns_dev(ns
));
482 pr_err("no peer-to-peer memory is available for %s\n",
487 pci_dev_put(p2p_dev
);
494 * Note: ctrl->subsys->lock should be held when calling this function
496 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl
*ctrl
,
499 struct device
*clients
[2];
500 struct pci_dev
*p2p_dev
;
503 if (!ctrl
->p2p_client
|| !ns
->use_p2pmem
)
507 ret
= pci_p2pdma_distance(ns
->p2p_dev
, ctrl
->p2p_client
, true);
511 p2p_dev
= pci_dev_get(ns
->p2p_dev
);
513 clients
[0] = ctrl
->p2p_client
;
514 clients
[1] = nvmet_ns_dev(ns
);
516 p2p_dev
= pci_p2pmem_find_many(clients
, ARRAY_SIZE(clients
));
518 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
519 dev_name(ctrl
->p2p_client
), ns
->device_path
);
524 ret
= radix_tree_insert(&ctrl
->p2p_ns_map
, ns
->nsid
, p2p_dev
);
526 pci_dev_put(p2p_dev
);
528 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev
),
532 void nvmet_ns_revalidate(struct nvmet_ns
*ns
)
534 loff_t oldsize
= ns
->size
;
537 nvmet_bdev_ns_revalidate(ns
);
539 nvmet_file_ns_revalidate(ns
);
541 if (oldsize
!= ns
->size
)
542 nvmet_ns_changed(ns
->subsys
, ns
->nsid
);
545 int nvmet_ns_enable(struct nvmet_ns
*ns
)
547 struct nvmet_subsys
*subsys
= ns
->subsys
;
548 struct nvmet_ctrl
*ctrl
;
551 mutex_lock(&subsys
->lock
);
554 if (nvmet_passthru_ctrl(subsys
)) {
555 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
563 if (subsys
->nr_namespaces
== NVMET_MAX_NAMESPACES
)
566 ret
= nvmet_bdev_ns_enable(ns
);
568 ret
= nvmet_file_ns_enable(ns
);
572 ret
= nvmet_p2pmem_ns_enable(ns
);
574 goto out_dev_disable
;
576 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
577 nvmet_p2pmem_ns_add_p2p(ctrl
, ns
);
579 ret
= percpu_ref_init(&ns
->ref
, nvmet_destroy_namespace
,
584 if (ns
->nsid
> subsys
->max_nsid
)
585 subsys
->max_nsid
= ns
->nsid
;
587 ret
= xa_insert(&subsys
->namespaces
, ns
->nsid
, ns
, GFP_KERNEL
);
589 goto out_restore_subsys_maxnsid
;
591 subsys
->nr_namespaces
++;
593 nvmet_ns_changed(subsys
, ns
->nsid
);
597 mutex_unlock(&subsys
->lock
);
600 out_restore_subsys_maxnsid
:
601 subsys
->max_nsid
= nvmet_max_nsid(subsys
);
602 percpu_ref_exit(&ns
->ref
);
604 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
605 pci_dev_put(radix_tree_delete(&ctrl
->p2p_ns_map
, ns
->nsid
));
607 nvmet_ns_dev_disable(ns
);
611 void nvmet_ns_disable(struct nvmet_ns
*ns
)
613 struct nvmet_subsys
*subsys
= ns
->subsys
;
614 struct nvmet_ctrl
*ctrl
;
616 mutex_lock(&subsys
->lock
);
621 xa_erase(&ns
->subsys
->namespaces
, ns
->nsid
);
622 if (ns
->nsid
== subsys
->max_nsid
)
623 subsys
->max_nsid
= nvmet_max_nsid(subsys
);
625 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
626 pci_dev_put(radix_tree_delete(&ctrl
->p2p_ns_map
, ns
->nsid
));
628 mutex_unlock(&subsys
->lock
);
631 * Now that we removed the namespaces from the lookup list, we
632 * can kill the per_cpu ref and wait for any remaining references
633 * to be dropped, as well as a RCU grace period for anyone only
634 * using the namepace under rcu_read_lock(). Note that we can't
635 * use call_rcu here as we need to ensure the namespaces have
636 * been fully destroyed before unloading the module.
638 percpu_ref_kill(&ns
->ref
);
640 wait_for_completion(&ns
->disable_done
);
641 percpu_ref_exit(&ns
->ref
);
643 mutex_lock(&subsys
->lock
);
645 subsys
->nr_namespaces
--;
646 nvmet_ns_changed(subsys
, ns
->nsid
);
647 nvmet_ns_dev_disable(ns
);
649 mutex_unlock(&subsys
->lock
);
652 void nvmet_ns_free(struct nvmet_ns
*ns
)
654 nvmet_ns_disable(ns
);
656 down_write(&nvmet_ana_sem
);
657 nvmet_ana_group_enabled
[ns
->anagrpid
]--;
658 up_write(&nvmet_ana_sem
);
660 kfree(ns
->device_path
);
664 struct nvmet_ns
*nvmet_ns_alloc(struct nvmet_subsys
*subsys
, u32 nsid
)
668 ns
= kzalloc(sizeof(*ns
), GFP_KERNEL
);
672 init_completion(&ns
->disable_done
);
677 down_write(&nvmet_ana_sem
);
678 ns
->anagrpid
= NVMET_DEFAULT_ANA_GRPID
;
679 nvmet_ana_group_enabled
[ns
->anagrpid
]++;
680 up_write(&nvmet_ana_sem
);
683 ns
->buffered_io
= false;
688 static void nvmet_update_sq_head(struct nvmet_req
*req
)
691 u32 old_sqhd
, new_sqhd
;
694 old_sqhd
= req
->sq
->sqhd
;
695 new_sqhd
= (old_sqhd
+ 1) % req
->sq
->size
;
696 } while (cmpxchg(&req
->sq
->sqhd
, old_sqhd
, new_sqhd
) !=
699 req
->cqe
->sq_head
= cpu_to_le16(req
->sq
->sqhd
& 0x0000FFFF);
702 static void nvmet_set_error(struct nvmet_req
*req
, u16 status
)
704 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
705 struct nvme_error_slot
*new_error_slot
;
708 req
->cqe
->status
= cpu_to_le16(status
<< 1);
710 if (!ctrl
|| req
->error_loc
== NVMET_NO_ERROR_LOC
)
713 spin_lock_irqsave(&ctrl
->error_lock
, flags
);
716 &ctrl
->slots
[ctrl
->err_counter
% NVMET_ERROR_LOG_SLOTS
];
718 new_error_slot
->error_count
= cpu_to_le64(ctrl
->err_counter
);
719 new_error_slot
->sqid
= cpu_to_le16(req
->sq
->qid
);
720 new_error_slot
->cmdid
= cpu_to_le16(req
->cmd
->common
.command_id
);
721 new_error_slot
->status_field
= cpu_to_le16(status
<< 1);
722 new_error_slot
->param_error_location
= cpu_to_le16(req
->error_loc
);
723 new_error_slot
->lba
= cpu_to_le64(req
->error_slba
);
724 new_error_slot
->nsid
= req
->cmd
->common
.nsid
;
725 spin_unlock_irqrestore(&ctrl
->error_lock
, flags
);
727 /* set the more bit for this request */
728 req
->cqe
->status
|= cpu_to_le16(1 << 14);
731 static void __nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
733 if (!req
->sq
->sqhd_disabled
)
734 nvmet_update_sq_head(req
);
735 req
->cqe
->sq_id
= cpu_to_le16(req
->sq
->qid
);
736 req
->cqe
->command_id
= req
->cmd
->common
.command_id
;
738 if (unlikely(status
))
739 nvmet_set_error(req
, status
);
741 trace_nvmet_req_complete(req
);
744 nvmet_put_namespace(req
->ns
);
745 req
->ops
->queue_response(req
);
748 void nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
750 __nvmet_req_complete(req
, status
);
751 percpu_ref_put(&req
->sq
->ref
);
753 EXPORT_SYMBOL_GPL(nvmet_req_complete
);
755 void nvmet_cq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_cq
*cq
,
762 void nvmet_sq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_sq
*sq
,
772 static void nvmet_confirm_sq(struct percpu_ref
*ref
)
774 struct nvmet_sq
*sq
= container_of(ref
, struct nvmet_sq
, ref
);
776 complete(&sq
->confirm_done
);
779 void nvmet_sq_destroy(struct nvmet_sq
*sq
)
781 struct nvmet_ctrl
*ctrl
= sq
->ctrl
;
784 * If this is the admin queue, complete all AERs so that our
785 * queue doesn't have outstanding requests on it.
787 if (ctrl
&& ctrl
->sqs
&& ctrl
->sqs
[0] == sq
)
788 nvmet_async_events_failall(ctrl
);
789 percpu_ref_kill_and_confirm(&sq
->ref
, nvmet_confirm_sq
);
790 wait_for_completion(&sq
->confirm_done
);
791 wait_for_completion(&sq
->free_done
);
792 percpu_ref_exit(&sq
->ref
);
795 nvmet_ctrl_put(ctrl
);
796 sq
->ctrl
= NULL
; /* allows reusing the queue later */
799 EXPORT_SYMBOL_GPL(nvmet_sq_destroy
);
801 static void nvmet_sq_free(struct percpu_ref
*ref
)
803 struct nvmet_sq
*sq
= container_of(ref
, struct nvmet_sq
, ref
);
805 complete(&sq
->free_done
);
808 int nvmet_sq_init(struct nvmet_sq
*sq
)
812 ret
= percpu_ref_init(&sq
->ref
, nvmet_sq_free
, 0, GFP_KERNEL
);
814 pr_err("percpu_ref init failed!\n");
817 init_completion(&sq
->free_done
);
818 init_completion(&sq
->confirm_done
);
822 EXPORT_SYMBOL_GPL(nvmet_sq_init
);
824 static inline u16
nvmet_check_ana_state(struct nvmet_port
*port
,
827 enum nvme_ana_state state
= port
->ana_state
[ns
->anagrpid
];
829 if (unlikely(state
== NVME_ANA_INACCESSIBLE
))
830 return NVME_SC_ANA_INACCESSIBLE
;
831 if (unlikely(state
== NVME_ANA_PERSISTENT_LOSS
))
832 return NVME_SC_ANA_PERSISTENT_LOSS
;
833 if (unlikely(state
== NVME_ANA_CHANGE
))
834 return NVME_SC_ANA_TRANSITION
;
838 static inline u16
nvmet_io_cmd_check_access(struct nvmet_req
*req
)
840 if (unlikely(req
->ns
->readonly
)) {
841 switch (req
->cmd
->common
.opcode
) {
846 return NVME_SC_NS_WRITE_PROTECTED
;
853 static u16
nvmet_parse_io_cmd(struct nvmet_req
*req
)
855 struct nvme_command
*cmd
= req
->cmd
;
858 ret
= nvmet_check_ctrl_status(req
, cmd
);
862 if (nvmet_req_passthru_ctrl(req
))
863 return nvmet_parse_passthru_io_cmd(req
);
865 req
->ns
= nvmet_find_namespace(req
->sq
->ctrl
, cmd
->rw
.nsid
);
866 if (unlikely(!req
->ns
)) {
867 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
868 return NVME_SC_INVALID_NS
| NVME_SC_DNR
;
870 ret
= nvmet_check_ana_state(req
->port
, req
->ns
);
872 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
875 ret
= nvmet_io_cmd_check_access(req
);
877 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
882 return nvmet_file_parse_io_cmd(req
);
884 return nvmet_bdev_parse_io_cmd(req
);
887 bool nvmet_req_init(struct nvmet_req
*req
, struct nvmet_cq
*cq
,
888 struct nvmet_sq
*sq
, const struct nvmet_fabrics_ops
*ops
)
890 u8 flags
= req
->cmd
->common
.flags
;
897 req
->metadata_sg
= NULL
;
899 req
->metadata_sg_cnt
= 0;
900 req
->transfer_len
= 0;
901 req
->metadata_len
= 0;
902 req
->cqe
->status
= 0;
903 req
->cqe
->sq_head
= 0;
905 req
->error_loc
= NVMET_NO_ERROR_LOC
;
908 /* no support for fused commands yet */
909 if (unlikely(flags
& (NVME_CMD_FUSE_FIRST
| NVME_CMD_FUSE_SECOND
))) {
910 req
->error_loc
= offsetof(struct nvme_common_command
, flags
);
911 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
916 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
917 * contains an address of a single contiguous physical buffer that is
920 if (unlikely((flags
& NVME_CMD_SGL_ALL
) != NVME_CMD_SGL_METABUF
)) {
921 req
->error_loc
= offsetof(struct nvme_common_command
, flags
);
922 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
926 if (unlikely(!req
->sq
->ctrl
))
927 /* will return an error for any non-connect command: */
928 status
= nvmet_parse_connect_cmd(req
);
929 else if (likely(req
->sq
->qid
!= 0))
930 status
= nvmet_parse_io_cmd(req
);
932 status
= nvmet_parse_admin_cmd(req
);
937 trace_nvmet_req_init(req
, req
->cmd
);
939 if (unlikely(!percpu_ref_tryget_live(&sq
->ref
))) {
940 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
945 sq
->ctrl
->cmd_seen
= true;
950 __nvmet_req_complete(req
, status
);
953 EXPORT_SYMBOL_GPL(nvmet_req_init
);
955 void nvmet_req_uninit(struct nvmet_req
*req
)
957 percpu_ref_put(&req
->sq
->ref
);
959 nvmet_put_namespace(req
->ns
);
961 EXPORT_SYMBOL_GPL(nvmet_req_uninit
);
963 bool nvmet_check_transfer_len(struct nvmet_req
*req
, size_t len
)
965 if (unlikely(len
!= req
->transfer_len
)) {
966 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
967 nvmet_req_complete(req
, NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
);
973 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len
);
975 bool nvmet_check_data_len_lte(struct nvmet_req
*req
, size_t data_len
)
977 if (unlikely(data_len
> req
->transfer_len
)) {
978 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
979 nvmet_req_complete(req
, NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
);
986 static unsigned int nvmet_data_transfer_len(struct nvmet_req
*req
)
988 return req
->transfer_len
- req
->metadata_len
;
991 static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req
*req
)
993 req
->sg
= pci_p2pmem_alloc_sgl(req
->p2p_dev
, &req
->sg_cnt
,
994 nvmet_data_transfer_len(req
));
998 if (req
->metadata_len
) {
999 req
->metadata_sg
= pci_p2pmem_alloc_sgl(req
->p2p_dev
,
1000 &req
->metadata_sg_cnt
, req
->metadata_len
);
1001 if (!req
->metadata_sg
)
1006 pci_p2pmem_free_sgl(req
->p2p_dev
, req
->sg
);
1011 static bool nvmet_req_find_p2p_dev(struct nvmet_req
*req
)
1013 if (!IS_ENABLED(CONFIG_PCI_P2PDMA
))
1016 if (req
->sq
->ctrl
&& req
->sq
->qid
&& req
->ns
) {
1017 req
->p2p_dev
= radix_tree_lookup(&req
->sq
->ctrl
->p2p_ns_map
,
1023 req
->p2p_dev
= NULL
;
1027 int nvmet_req_alloc_sgls(struct nvmet_req
*req
)
1029 if (nvmet_req_find_p2p_dev(req
) && !nvmet_req_alloc_p2pmem_sgls(req
))
1032 req
->sg
= sgl_alloc(nvmet_data_transfer_len(req
), GFP_KERNEL
,
1034 if (unlikely(!req
->sg
))
1037 if (req
->metadata_len
) {
1038 req
->metadata_sg
= sgl_alloc(req
->metadata_len
, GFP_KERNEL
,
1039 &req
->metadata_sg_cnt
);
1040 if (unlikely(!req
->metadata_sg
))
1050 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls
);
1052 void nvmet_req_free_sgls(struct nvmet_req
*req
)
1055 pci_p2pmem_free_sgl(req
->p2p_dev
, req
->sg
);
1056 if (req
->metadata_sg
)
1057 pci_p2pmem_free_sgl(req
->p2p_dev
, req
->metadata_sg
);
1060 if (req
->metadata_sg
)
1061 sgl_free(req
->metadata_sg
);
1065 req
->metadata_sg
= NULL
;
1067 req
->metadata_sg_cnt
= 0;
1069 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls
);
1071 static inline bool nvmet_cc_en(u32 cc
)
1073 return (cc
>> NVME_CC_EN_SHIFT
) & 0x1;
1076 static inline u8
nvmet_cc_css(u32 cc
)
1078 return (cc
>> NVME_CC_CSS_SHIFT
) & 0x7;
1081 static inline u8
nvmet_cc_mps(u32 cc
)
1083 return (cc
>> NVME_CC_MPS_SHIFT
) & 0xf;
1086 static inline u8
nvmet_cc_ams(u32 cc
)
1088 return (cc
>> NVME_CC_AMS_SHIFT
) & 0x7;
1091 static inline u8
nvmet_cc_shn(u32 cc
)
1093 return (cc
>> NVME_CC_SHN_SHIFT
) & 0x3;
1096 static inline u8
nvmet_cc_iosqes(u32 cc
)
1098 return (cc
>> NVME_CC_IOSQES_SHIFT
) & 0xf;
1101 static inline u8
nvmet_cc_iocqes(u32 cc
)
1103 return (cc
>> NVME_CC_IOCQES_SHIFT
) & 0xf;
1106 static void nvmet_start_ctrl(struct nvmet_ctrl
*ctrl
)
1108 lockdep_assert_held(&ctrl
->lock
);
1110 if (nvmet_cc_iosqes(ctrl
->cc
) != NVME_NVM_IOSQES
||
1111 nvmet_cc_iocqes(ctrl
->cc
) != NVME_NVM_IOCQES
||
1112 nvmet_cc_mps(ctrl
->cc
) != 0 ||
1113 nvmet_cc_ams(ctrl
->cc
) != 0 ||
1114 nvmet_cc_css(ctrl
->cc
) != 0) {
1115 ctrl
->csts
= NVME_CSTS_CFS
;
1119 ctrl
->csts
= NVME_CSTS_RDY
;
1122 * Controllers that are not yet enabled should not really enforce the
1123 * keep alive timeout, but we still want to track a timeout and cleanup
1124 * in case a host died before it enabled the controller. Hence, simply
1125 * reset the keep alive timer when the controller is enabled.
1128 mod_delayed_work(system_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
1131 static void nvmet_clear_ctrl(struct nvmet_ctrl
*ctrl
)
1133 lockdep_assert_held(&ctrl
->lock
);
1135 /* XXX: tear down queues? */
1136 ctrl
->csts
&= ~NVME_CSTS_RDY
;
1140 void nvmet_update_cc(struct nvmet_ctrl
*ctrl
, u32
new)
1144 mutex_lock(&ctrl
->lock
);
1148 if (nvmet_cc_en(new) && !nvmet_cc_en(old
))
1149 nvmet_start_ctrl(ctrl
);
1150 if (!nvmet_cc_en(new) && nvmet_cc_en(old
))
1151 nvmet_clear_ctrl(ctrl
);
1152 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old
)) {
1153 nvmet_clear_ctrl(ctrl
);
1154 ctrl
->csts
|= NVME_CSTS_SHST_CMPLT
;
1156 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old
))
1157 ctrl
->csts
&= ~NVME_CSTS_SHST_CMPLT
;
1158 mutex_unlock(&ctrl
->lock
);
1161 static void nvmet_init_cap(struct nvmet_ctrl
*ctrl
)
1163 /* command sets supported: NVMe command set: */
1164 ctrl
->cap
= (1ULL << 37);
1165 /* CC.EN timeout in 500msec units: */
1166 ctrl
->cap
|= (15ULL << 24);
1167 /* maximum queue entries supported: */
1168 ctrl
->cap
|= NVMET_QUEUE_SIZE
- 1;
1171 u16
nvmet_ctrl_find_get(const char *subsysnqn
, const char *hostnqn
, u16 cntlid
,
1172 struct nvmet_req
*req
, struct nvmet_ctrl
**ret
)
1174 struct nvmet_subsys
*subsys
;
1175 struct nvmet_ctrl
*ctrl
;
1178 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
1180 pr_warn("connect request for invalid subsystem %s!\n",
1182 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
1183 return NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1186 mutex_lock(&subsys
->lock
);
1187 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
1188 if (ctrl
->cntlid
== cntlid
) {
1189 if (strncmp(hostnqn
, ctrl
->hostnqn
, NVMF_NQN_SIZE
)) {
1190 pr_warn("hostnqn mismatch.\n");
1193 if (!kref_get_unless_zero(&ctrl
->ref
))
1201 pr_warn("could not find controller %d for subsys %s / host %s\n",
1202 cntlid
, subsysnqn
, hostnqn
);
1203 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(cntlid
);
1204 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1207 mutex_unlock(&subsys
->lock
);
1208 nvmet_subsys_put(subsys
);
1212 u16
nvmet_check_ctrl_status(struct nvmet_req
*req
, struct nvme_command
*cmd
)
1214 if (unlikely(!(req
->sq
->ctrl
->cc
& NVME_CC_ENABLE
))) {
1215 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1216 cmd
->common
.opcode
, req
->sq
->qid
);
1217 return NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
1220 if (unlikely(!(req
->sq
->ctrl
->csts
& NVME_CSTS_RDY
))) {
1221 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1222 cmd
->common
.opcode
, req
->sq
->qid
);
1223 return NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
1228 bool nvmet_host_allowed(struct nvmet_subsys
*subsys
, const char *hostnqn
)
1230 struct nvmet_host_link
*p
;
1232 lockdep_assert_held(&nvmet_config_sem
);
1234 if (subsys
->allow_any_host
)
1237 if (subsys
->type
== NVME_NQN_DISC
) /* allow all access to disc subsys */
1240 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
1241 if (!strcmp(nvmet_host_name(p
->host
), hostnqn
))
1249 * Note: ctrl->subsys->lock should be held when calling this function
1251 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl
*ctrl
,
1252 struct nvmet_req
*req
)
1254 struct nvmet_ns
*ns
;
1257 if (!req
->p2p_client
)
1260 ctrl
->p2p_client
= get_device(req
->p2p_client
);
1262 xa_for_each(&ctrl
->subsys
->namespaces
, idx
, ns
)
1263 nvmet_p2pmem_ns_add_p2p(ctrl
, ns
);
1267 * Note: ctrl->subsys->lock should be held when calling this function
1269 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl
*ctrl
)
1271 struct radix_tree_iter iter
;
1274 radix_tree_for_each_slot(slot
, &ctrl
->p2p_ns_map
, &iter
, 0)
1275 pci_dev_put(radix_tree_deref_slot(slot
));
1277 put_device(ctrl
->p2p_client
);
1280 static void nvmet_fatal_error_handler(struct work_struct
*work
)
1282 struct nvmet_ctrl
*ctrl
=
1283 container_of(work
, struct nvmet_ctrl
, fatal_err_work
);
1285 pr_err("ctrl %d fatal error occurred!\n", ctrl
->cntlid
);
1286 ctrl
->ops
->delete_ctrl(ctrl
);
1289 u16
nvmet_alloc_ctrl(const char *subsysnqn
, const char *hostnqn
,
1290 struct nvmet_req
*req
, u32 kato
, struct nvmet_ctrl
**ctrlp
)
1292 struct nvmet_subsys
*subsys
;
1293 struct nvmet_ctrl
*ctrl
;
1297 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1298 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
1300 pr_warn("connect request for invalid subsystem %s!\n",
1302 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
1306 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1307 down_read(&nvmet_config_sem
);
1308 if (!nvmet_host_allowed(subsys
, hostnqn
)) {
1309 pr_info("connect by host %s for subsystem %s not allowed\n",
1310 hostnqn
, subsysnqn
);
1311 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(hostnqn
);
1312 up_read(&nvmet_config_sem
);
1313 status
= NVME_SC_CONNECT_INVALID_HOST
| NVME_SC_DNR
;
1314 goto out_put_subsystem
;
1316 up_read(&nvmet_config_sem
);
1318 status
= NVME_SC_INTERNAL
;
1319 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
1321 goto out_put_subsystem
;
1322 mutex_init(&ctrl
->lock
);
1324 nvmet_init_cap(ctrl
);
1326 ctrl
->port
= req
->port
;
1328 INIT_WORK(&ctrl
->async_event_work
, nvmet_async_event_work
);
1329 INIT_LIST_HEAD(&ctrl
->async_events
);
1330 INIT_RADIX_TREE(&ctrl
->p2p_ns_map
, GFP_KERNEL
);
1331 INIT_WORK(&ctrl
->fatal_err_work
, nvmet_fatal_error_handler
);
1333 memcpy(ctrl
->subsysnqn
, subsysnqn
, NVMF_NQN_SIZE
);
1334 memcpy(ctrl
->hostnqn
, hostnqn
, NVMF_NQN_SIZE
);
1336 kref_init(&ctrl
->ref
);
1337 ctrl
->subsys
= subsys
;
1338 WRITE_ONCE(ctrl
->aen_enabled
, NVMET_AEN_CFG_OPTIONAL
);
1340 ctrl
->changed_ns_list
= kmalloc_array(NVME_MAX_CHANGED_NAMESPACES
,
1341 sizeof(__le32
), GFP_KERNEL
);
1342 if (!ctrl
->changed_ns_list
)
1345 ctrl
->sqs
= kcalloc(subsys
->max_qid
+ 1,
1346 sizeof(struct nvmet_sq
*),
1349 goto out_free_changed_ns_list
;
1351 if (subsys
->cntlid_min
> subsys
->cntlid_max
)
1352 goto out_free_changed_ns_list
;
1354 ret
= ida_simple_get(&cntlid_ida
,
1355 subsys
->cntlid_min
, subsys
->cntlid_max
,
1358 status
= NVME_SC_CONNECT_CTRL_BUSY
| NVME_SC_DNR
;
1363 ctrl
->ops
= req
->ops
;
1366 * Discovery controllers may use some arbitrary high value
1367 * in order to cleanup stale discovery sessions
1369 if ((ctrl
->subsys
->type
== NVME_NQN_DISC
) && !kato
)
1370 kato
= NVMET_DISC_KATO_MS
;
1372 /* keep-alive timeout in seconds */
1373 ctrl
->kato
= DIV_ROUND_UP(kato
, 1000);
1375 ctrl
->err_counter
= 0;
1376 spin_lock_init(&ctrl
->error_lock
);
1378 nvmet_start_keep_alive_timer(ctrl
);
1380 mutex_lock(&subsys
->lock
);
1381 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
1382 nvmet_setup_p2p_ns_map(ctrl
, req
);
1383 mutex_unlock(&subsys
->lock
);
1390 out_free_changed_ns_list
:
1391 kfree(ctrl
->changed_ns_list
);
1395 nvmet_subsys_put(subsys
);
1400 static void nvmet_ctrl_free(struct kref
*ref
)
1402 struct nvmet_ctrl
*ctrl
= container_of(ref
, struct nvmet_ctrl
, ref
);
1403 struct nvmet_subsys
*subsys
= ctrl
->subsys
;
1405 mutex_lock(&subsys
->lock
);
1406 nvmet_release_p2p_ns_map(ctrl
);
1407 list_del(&ctrl
->subsys_entry
);
1408 mutex_unlock(&subsys
->lock
);
1410 nvmet_stop_keep_alive_timer(ctrl
);
1412 flush_work(&ctrl
->async_event_work
);
1413 cancel_work_sync(&ctrl
->fatal_err_work
);
1415 ida_simple_remove(&cntlid_ida
, ctrl
->cntlid
);
1417 nvmet_async_events_free(ctrl
);
1419 kfree(ctrl
->changed_ns_list
);
1422 nvmet_subsys_put(subsys
);
1425 void nvmet_ctrl_put(struct nvmet_ctrl
*ctrl
)
1427 kref_put(&ctrl
->ref
, nvmet_ctrl_free
);
1430 void nvmet_ctrl_fatal_error(struct nvmet_ctrl
*ctrl
)
1432 mutex_lock(&ctrl
->lock
);
1433 if (!(ctrl
->csts
& NVME_CSTS_CFS
)) {
1434 ctrl
->csts
|= NVME_CSTS_CFS
;
1435 schedule_work(&ctrl
->fatal_err_work
);
1437 mutex_unlock(&ctrl
->lock
);
1439 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error
);
1441 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
1442 const char *subsysnqn
)
1444 struct nvmet_subsys_link
*p
;
1449 if (!strcmp(NVME_DISC_SUBSYS_NAME
, subsysnqn
)) {
1450 if (!kref_get_unless_zero(&nvmet_disc_subsys
->ref
))
1452 return nvmet_disc_subsys
;
1455 down_read(&nvmet_config_sem
);
1456 list_for_each_entry(p
, &port
->subsystems
, entry
) {
1457 if (!strncmp(p
->subsys
->subsysnqn
, subsysnqn
,
1459 if (!kref_get_unless_zero(&p
->subsys
->ref
))
1461 up_read(&nvmet_config_sem
);
1465 up_read(&nvmet_config_sem
);
1469 struct nvmet_subsys
*nvmet_subsys_alloc(const char *subsysnqn
,
1470 enum nvme_subsys_type type
)
1472 struct nvmet_subsys
*subsys
;
1474 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
1476 return ERR_PTR(-ENOMEM
);
1478 subsys
->ver
= NVMET_DEFAULT_VS
;
1479 /* generate a random serial number as our controllers are ephemeral: */
1480 get_random_bytes(&subsys
->serial
, sizeof(subsys
->serial
));
1484 subsys
->max_qid
= NVMET_NR_QUEUES
;
1487 subsys
->max_qid
= 0;
1490 pr_err("%s: Unknown Subsystem type - %d\n", __func__
, type
);
1492 return ERR_PTR(-EINVAL
);
1494 subsys
->type
= type
;
1495 subsys
->subsysnqn
= kstrndup(subsysnqn
, NVMF_NQN_SIZE
,
1497 if (!subsys
->subsysnqn
) {
1499 return ERR_PTR(-ENOMEM
);
1501 subsys
->cntlid_min
= NVME_CNTLID_MIN
;
1502 subsys
->cntlid_max
= NVME_CNTLID_MAX
;
1503 kref_init(&subsys
->ref
);
1505 mutex_init(&subsys
->lock
);
1506 xa_init(&subsys
->namespaces
);
1507 INIT_LIST_HEAD(&subsys
->ctrls
);
1508 INIT_LIST_HEAD(&subsys
->hosts
);
1513 static void nvmet_subsys_free(struct kref
*ref
)
1515 struct nvmet_subsys
*subsys
=
1516 container_of(ref
, struct nvmet_subsys
, ref
);
1518 WARN_ON_ONCE(!xa_empty(&subsys
->namespaces
));
1520 xa_destroy(&subsys
->namespaces
);
1521 nvmet_passthru_subsys_free(subsys
);
1523 kfree(subsys
->subsysnqn
);
1524 kfree_rcu(subsys
->model
, rcuhead
);
1528 void nvmet_subsys_del_ctrls(struct nvmet_subsys
*subsys
)
1530 struct nvmet_ctrl
*ctrl
;
1532 mutex_lock(&subsys
->lock
);
1533 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
1534 ctrl
->ops
->delete_ctrl(ctrl
);
1535 mutex_unlock(&subsys
->lock
);
1538 void nvmet_subsys_put(struct nvmet_subsys
*subsys
)
1540 kref_put(&subsys
->ref
, nvmet_subsys_free
);
1543 static int __init
nvmet_init(void)
1547 nvmet_ana_group_enabled
[NVMET_DEFAULT_ANA_GRPID
] = 1;
1549 buffered_io_wq
= alloc_workqueue("nvmet-buffered-io-wq",
1551 if (!buffered_io_wq
) {
1556 error
= nvmet_init_discovery();
1558 goto out_free_work_queue
;
1560 error
= nvmet_init_configfs();
1562 goto out_exit_discovery
;
1566 nvmet_exit_discovery();
1567 out_free_work_queue
:
1568 destroy_workqueue(buffered_io_wq
);
1573 static void __exit
nvmet_exit(void)
1575 nvmet_exit_configfs();
1576 nvmet_exit_discovery();
1577 ida_destroy(&cntlid_ida
);
1578 destroy_workqueue(buffered_io_wq
);
1580 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry
) != 1024);
1581 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr
) != 1024);
1584 module_init(nvmet_init
);
1585 module_exit(nvmet_exit
);
1587 MODULE_LICENSE("GPL v2");