1 // SPDX-License-Identifier: GPL-2.0
3 * Common code for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
13 #include <generated/utsrelease.h>
15 #define CREATE_TRACE_POINTS
21 struct kmem_cache
*nvmet_bvec_cache
;
22 struct workqueue_struct
*buffered_io_wq
;
23 struct workqueue_struct
*zbd_wq
;
24 static const struct nvmet_fabrics_ops
*nvmet_transports
[NVMF_TRTYPE_MAX
];
25 static DEFINE_IDA(cntlid_ida
);
27 struct workqueue_struct
*nvmet_wq
;
28 EXPORT_SYMBOL_GPL(nvmet_wq
);
31 * This read/write semaphore is used to synchronize access to configuration
32 * information on a target system that will result in discovery log page
33 * information change for at least one host.
34 * The full list of resources to protected by this semaphore is:
37 * - per-subsystem allowed hosts list
38 * - allow_any_host subsystem attribute
40 * - the nvmet_transports array
42 * When updating any of those lists/structures write lock should be obtained,
43 * while when reading (popolating discovery log page or checking host-subsystem
44 * link) read lock is obtained to allow concurrent reads.
46 DECLARE_RWSEM(nvmet_config_sem
);
48 u32 nvmet_ana_group_enabled
[NVMET_MAX_ANAGRPS
+ 1];
50 DECLARE_RWSEM(nvmet_ana_sem
);
52 inline u16
errno_to_nvme_status(struct nvmet_req
*req
, int errno
)
56 return NVME_SC_SUCCESS
;
58 req
->error_loc
= offsetof(struct nvme_rw_command
, length
);
59 return NVME_SC_CAP_EXCEEDED
| NVME_STATUS_DNR
;
61 req
->error_loc
= offsetof(struct nvme_rw_command
, slba
);
62 return NVME_SC_LBA_RANGE
| NVME_STATUS_DNR
;
64 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
65 switch (req
->cmd
->common
.opcode
) {
67 case nvme_cmd_write_zeroes
:
68 return NVME_SC_ONCS_NOT_SUPPORTED
| NVME_STATUS_DNR
;
70 return NVME_SC_INVALID_OPCODE
| NVME_STATUS_DNR
;
74 req
->error_loc
= offsetof(struct nvme_rw_command
, nsid
);
75 return NVME_SC_ACCESS_DENIED
;
79 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
80 return NVME_SC_INTERNAL
| NVME_STATUS_DNR
;
84 u16
nvmet_report_invalid_opcode(struct nvmet_req
*req
)
86 pr_debug("unhandled cmd %d on qid %d\n", req
->cmd
->common
.opcode
,
89 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
90 return NVME_SC_INVALID_OPCODE
| NVME_STATUS_DNR
;
93 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
94 const char *subsysnqn
);
96 u16
nvmet_copy_to_sgl(struct nvmet_req
*req
, off_t off
, const void *buf
,
99 if (sg_pcopy_from_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
) {
100 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
101 return NVME_SC_SGL_INVALID_DATA
| NVME_STATUS_DNR
;
106 u16
nvmet_copy_from_sgl(struct nvmet_req
*req
, off_t off
, void *buf
, size_t len
)
108 if (sg_pcopy_to_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
) {
109 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
110 return NVME_SC_SGL_INVALID_DATA
| NVME_STATUS_DNR
;
115 u16
nvmet_zero_sgl(struct nvmet_req
*req
, off_t off
, size_t len
)
117 if (sg_zero_buffer(req
->sg
, req
->sg_cnt
, len
, off
) != len
) {
118 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
119 return NVME_SC_SGL_INVALID_DATA
| NVME_STATUS_DNR
;
124 static u32
nvmet_max_nsid(struct nvmet_subsys
*subsys
)
126 struct nvmet_ns
*cur
;
130 xa_for_each(&subsys
->namespaces
, idx
, cur
)
136 static u32
nvmet_async_event_result(struct nvmet_async_event
*aen
)
138 return aen
->event_type
| (aen
->event_info
<< 8) | (aen
->log_page
<< 16);
141 static void nvmet_async_events_failall(struct nvmet_ctrl
*ctrl
)
143 struct nvmet_req
*req
;
145 mutex_lock(&ctrl
->lock
);
146 while (ctrl
->nr_async_event_cmds
) {
147 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
148 mutex_unlock(&ctrl
->lock
);
149 nvmet_req_complete(req
, NVME_SC_INTERNAL
| NVME_STATUS_DNR
);
150 mutex_lock(&ctrl
->lock
);
152 mutex_unlock(&ctrl
->lock
);
155 static void nvmet_async_events_process(struct nvmet_ctrl
*ctrl
)
157 struct nvmet_async_event
*aen
;
158 struct nvmet_req
*req
;
160 mutex_lock(&ctrl
->lock
);
161 while (ctrl
->nr_async_event_cmds
&& !list_empty(&ctrl
->async_events
)) {
162 aen
= list_first_entry(&ctrl
->async_events
,
163 struct nvmet_async_event
, entry
);
164 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
165 nvmet_set_result(req
, nvmet_async_event_result(aen
));
167 list_del(&aen
->entry
);
170 mutex_unlock(&ctrl
->lock
);
171 trace_nvmet_async_event(ctrl
, req
->cqe
->result
.u32
);
172 nvmet_req_complete(req
, 0);
173 mutex_lock(&ctrl
->lock
);
175 mutex_unlock(&ctrl
->lock
);
178 static void nvmet_async_events_free(struct nvmet_ctrl
*ctrl
)
180 struct nvmet_async_event
*aen
, *tmp
;
182 mutex_lock(&ctrl
->lock
);
183 list_for_each_entry_safe(aen
, tmp
, &ctrl
->async_events
, entry
) {
184 list_del(&aen
->entry
);
187 mutex_unlock(&ctrl
->lock
);
190 static void nvmet_async_event_work(struct work_struct
*work
)
192 struct nvmet_ctrl
*ctrl
=
193 container_of(work
, struct nvmet_ctrl
, async_event_work
);
195 nvmet_async_events_process(ctrl
);
198 void nvmet_add_async_event(struct nvmet_ctrl
*ctrl
, u8 event_type
,
199 u8 event_info
, u8 log_page
)
201 struct nvmet_async_event
*aen
;
203 aen
= kmalloc(sizeof(*aen
), GFP_KERNEL
);
207 aen
->event_type
= event_type
;
208 aen
->event_info
= event_info
;
209 aen
->log_page
= log_page
;
211 mutex_lock(&ctrl
->lock
);
212 list_add_tail(&aen
->entry
, &ctrl
->async_events
);
213 mutex_unlock(&ctrl
->lock
);
215 queue_work(nvmet_wq
, &ctrl
->async_event_work
);
218 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl
*ctrl
, __le32 nsid
)
222 mutex_lock(&ctrl
->lock
);
223 if (ctrl
->nr_changed_ns
> NVME_MAX_CHANGED_NAMESPACES
)
226 for (i
= 0; i
< ctrl
->nr_changed_ns
; i
++) {
227 if (ctrl
->changed_ns_list
[i
] == nsid
)
231 if (ctrl
->nr_changed_ns
== NVME_MAX_CHANGED_NAMESPACES
) {
232 ctrl
->changed_ns_list
[0] = cpu_to_le32(0xffffffff);
233 ctrl
->nr_changed_ns
= U32_MAX
;
237 ctrl
->changed_ns_list
[ctrl
->nr_changed_ns
++] = nsid
;
239 mutex_unlock(&ctrl
->lock
);
242 void nvmet_ns_changed(struct nvmet_subsys
*subsys
, u32 nsid
)
244 struct nvmet_ctrl
*ctrl
;
246 lockdep_assert_held(&subsys
->lock
);
248 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
249 nvmet_add_to_changed_ns_log(ctrl
, cpu_to_le32(nsid
));
250 if (nvmet_aen_bit_disabled(ctrl
, NVME_AEN_BIT_NS_ATTR
))
252 nvmet_add_async_event(ctrl
, NVME_AER_NOTICE
,
253 NVME_AER_NOTICE_NS_CHANGED
,
254 NVME_LOG_CHANGED_NS
);
258 void nvmet_send_ana_event(struct nvmet_subsys
*subsys
,
259 struct nvmet_port
*port
)
261 struct nvmet_ctrl
*ctrl
;
263 mutex_lock(&subsys
->lock
);
264 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
265 if (port
&& ctrl
->port
!= port
)
267 if (nvmet_aen_bit_disabled(ctrl
, NVME_AEN_BIT_ANA_CHANGE
))
269 nvmet_add_async_event(ctrl
, NVME_AER_NOTICE
,
270 NVME_AER_NOTICE_ANA
, NVME_LOG_ANA
);
272 mutex_unlock(&subsys
->lock
);
275 void nvmet_port_send_ana_event(struct nvmet_port
*port
)
277 struct nvmet_subsys_link
*p
;
279 down_read(&nvmet_config_sem
);
280 list_for_each_entry(p
, &port
->subsystems
, entry
)
281 nvmet_send_ana_event(p
->subsys
, port
);
282 up_read(&nvmet_config_sem
);
285 int nvmet_register_transport(const struct nvmet_fabrics_ops
*ops
)
289 down_write(&nvmet_config_sem
);
290 if (nvmet_transports
[ops
->type
])
293 nvmet_transports
[ops
->type
] = ops
;
294 up_write(&nvmet_config_sem
);
298 EXPORT_SYMBOL_GPL(nvmet_register_transport
);
300 void nvmet_unregister_transport(const struct nvmet_fabrics_ops
*ops
)
302 down_write(&nvmet_config_sem
);
303 nvmet_transports
[ops
->type
] = NULL
;
304 up_write(&nvmet_config_sem
);
306 EXPORT_SYMBOL_GPL(nvmet_unregister_transport
);
308 void nvmet_port_del_ctrls(struct nvmet_port
*port
, struct nvmet_subsys
*subsys
)
310 struct nvmet_ctrl
*ctrl
;
312 mutex_lock(&subsys
->lock
);
313 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
314 if (ctrl
->port
== port
)
315 ctrl
->ops
->delete_ctrl(ctrl
);
317 mutex_unlock(&subsys
->lock
);
320 int nvmet_enable_port(struct nvmet_port
*port
)
322 const struct nvmet_fabrics_ops
*ops
;
325 lockdep_assert_held(&nvmet_config_sem
);
327 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
329 up_write(&nvmet_config_sem
);
330 request_module("nvmet-transport-%d", port
->disc_addr
.trtype
);
331 down_write(&nvmet_config_sem
);
332 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
334 pr_err("transport type %d not supported\n",
335 port
->disc_addr
.trtype
);
340 if (!try_module_get(ops
->owner
))
344 * If the user requested PI support and the transport isn't pi capable,
345 * don't enable the port.
347 if (port
->pi_enable
&& !(ops
->flags
& NVMF_METADATA_SUPPORTED
)) {
348 pr_err("T10-PI is not supported by transport type %d\n",
349 port
->disc_addr
.trtype
);
354 ret
= ops
->add_port(port
);
358 /* If the transport didn't set inline_data_size, then disable it. */
359 if (port
->inline_data_size
< 0)
360 port
->inline_data_size
= 0;
363 * If the transport didn't set the max_queue_size properly, then clamp
364 * it to the target limits. Also set default values in case the
365 * transport didn't set it at all.
367 if (port
->max_queue_size
< 0)
368 port
->max_queue_size
= NVMET_MAX_QUEUE_SIZE
;
370 port
->max_queue_size
= clamp_t(int, port
->max_queue_size
,
371 NVMET_MIN_QUEUE_SIZE
,
372 NVMET_MAX_QUEUE_SIZE
);
374 port
->enabled
= true;
379 module_put(ops
->owner
);
383 void nvmet_disable_port(struct nvmet_port
*port
)
385 const struct nvmet_fabrics_ops
*ops
;
387 lockdep_assert_held(&nvmet_config_sem
);
389 port
->enabled
= false;
392 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
393 ops
->remove_port(port
);
394 module_put(ops
->owner
);
397 static void nvmet_keep_alive_timer(struct work_struct
*work
)
399 struct nvmet_ctrl
*ctrl
= container_of(to_delayed_work(work
),
400 struct nvmet_ctrl
, ka_work
);
401 bool reset_tbkas
= ctrl
->reset_tbkas
;
403 ctrl
->reset_tbkas
= false;
405 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
407 queue_delayed_work(nvmet_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
411 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
412 ctrl
->cntlid
, ctrl
->kato
);
414 nvmet_ctrl_fatal_error(ctrl
);
417 void nvmet_start_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
419 if (unlikely(ctrl
->kato
== 0))
422 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
423 ctrl
->cntlid
, ctrl
->kato
);
425 queue_delayed_work(nvmet_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
428 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
430 if (unlikely(ctrl
->kato
== 0))
433 pr_debug("ctrl %d stop keep-alive\n", ctrl
->cntlid
);
435 cancel_delayed_work_sync(&ctrl
->ka_work
);
438 u16
nvmet_req_find_ns(struct nvmet_req
*req
)
440 u32 nsid
= le32_to_cpu(req
->cmd
->common
.nsid
);
441 struct nvmet_subsys
*subsys
= nvmet_req_subsys(req
);
443 req
->ns
= xa_load(&subsys
->namespaces
, nsid
);
444 if (unlikely(!req
->ns
)) {
445 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
446 if (nvmet_subsys_nsid_exists(subsys
, nsid
))
447 return NVME_SC_INTERNAL_PATH_ERROR
;
448 return NVME_SC_INVALID_NS
| NVME_STATUS_DNR
;
451 percpu_ref_get(&req
->ns
->ref
);
452 return NVME_SC_SUCCESS
;
455 static void nvmet_destroy_namespace(struct percpu_ref
*ref
)
457 struct nvmet_ns
*ns
= container_of(ref
, struct nvmet_ns
, ref
);
459 complete(&ns
->disable_done
);
462 void nvmet_put_namespace(struct nvmet_ns
*ns
)
464 percpu_ref_put(&ns
->ref
);
467 static void nvmet_ns_dev_disable(struct nvmet_ns
*ns
)
469 nvmet_bdev_ns_disable(ns
);
470 nvmet_file_ns_disable(ns
);
473 static int nvmet_p2pmem_ns_enable(struct nvmet_ns
*ns
)
476 struct pci_dev
*p2p_dev
;
482 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
486 if (!blk_queue_pci_p2pdma(ns
->bdev
->bd_disk
->queue
)) {
487 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
493 ret
= pci_p2pdma_distance(ns
->p2p_dev
, nvmet_ns_dev(ns
), true);
498 * Right now we just check that there is p2pmem available so
499 * we can report an error to the user right away if there
500 * is not. We'll find the actual device to use once we
501 * setup the controller when the port's device is available.
504 p2p_dev
= pci_p2pmem_find(nvmet_ns_dev(ns
));
506 pr_err("no peer-to-peer memory is available for %s\n",
511 pci_dev_put(p2p_dev
);
518 * Note: ctrl->subsys->lock should be held when calling this function
520 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl
*ctrl
,
523 struct device
*clients
[2];
524 struct pci_dev
*p2p_dev
;
527 if (!ctrl
->p2p_client
|| !ns
->use_p2pmem
)
531 ret
= pci_p2pdma_distance(ns
->p2p_dev
, ctrl
->p2p_client
, true);
535 p2p_dev
= pci_dev_get(ns
->p2p_dev
);
537 clients
[0] = ctrl
->p2p_client
;
538 clients
[1] = nvmet_ns_dev(ns
);
540 p2p_dev
= pci_p2pmem_find_many(clients
, ARRAY_SIZE(clients
));
542 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
543 dev_name(ctrl
->p2p_client
), ns
->device_path
);
548 ret
= radix_tree_insert(&ctrl
->p2p_ns_map
, ns
->nsid
, p2p_dev
);
550 pci_dev_put(p2p_dev
);
552 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev
),
556 bool nvmet_ns_revalidate(struct nvmet_ns
*ns
)
558 loff_t oldsize
= ns
->size
;
561 nvmet_bdev_ns_revalidate(ns
);
563 nvmet_file_ns_revalidate(ns
);
565 return oldsize
!= ns
->size
;
568 int nvmet_ns_enable(struct nvmet_ns
*ns
)
570 struct nvmet_subsys
*subsys
= ns
->subsys
;
571 struct nvmet_ctrl
*ctrl
;
574 mutex_lock(&subsys
->lock
);
577 if (nvmet_is_passthru_subsys(subsys
)) {
578 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
586 if (subsys
->nr_namespaces
== NVMET_MAX_NAMESPACES
)
589 ret
= nvmet_bdev_ns_enable(ns
);
591 ret
= nvmet_file_ns_enable(ns
);
595 ret
= nvmet_p2pmem_ns_enable(ns
);
597 goto out_dev_disable
;
599 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
600 nvmet_p2pmem_ns_add_p2p(ctrl
, ns
);
602 ret
= percpu_ref_init(&ns
->ref
, nvmet_destroy_namespace
,
607 if (ns
->nsid
> subsys
->max_nsid
)
608 subsys
->max_nsid
= ns
->nsid
;
610 ret
= xa_insert(&subsys
->namespaces
, ns
->nsid
, ns
, GFP_KERNEL
);
612 goto out_restore_subsys_maxnsid
;
615 ret
= nvmet_pr_init_ns(ns
);
617 goto out_remove_from_subsys
;
620 subsys
->nr_namespaces
++;
622 nvmet_ns_changed(subsys
, ns
->nsid
);
626 mutex_unlock(&subsys
->lock
);
629 out_remove_from_subsys
:
630 xa_erase(&subsys
->namespaces
, ns
->nsid
);
631 out_restore_subsys_maxnsid
:
632 subsys
->max_nsid
= nvmet_max_nsid(subsys
);
633 percpu_ref_exit(&ns
->ref
);
635 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
636 pci_dev_put(radix_tree_delete(&ctrl
->p2p_ns_map
, ns
->nsid
));
638 nvmet_ns_dev_disable(ns
);
642 void nvmet_ns_disable(struct nvmet_ns
*ns
)
644 struct nvmet_subsys
*subsys
= ns
->subsys
;
645 struct nvmet_ctrl
*ctrl
;
647 mutex_lock(&subsys
->lock
);
652 xa_erase(&ns
->subsys
->namespaces
, ns
->nsid
);
653 if (ns
->nsid
== subsys
->max_nsid
)
654 subsys
->max_nsid
= nvmet_max_nsid(subsys
);
656 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
657 pci_dev_put(radix_tree_delete(&ctrl
->p2p_ns_map
, ns
->nsid
));
659 mutex_unlock(&subsys
->lock
);
662 * Now that we removed the namespaces from the lookup list, we
663 * can kill the per_cpu ref and wait for any remaining references
664 * to be dropped, as well as a RCU grace period for anyone only
665 * using the namepace under rcu_read_lock(). Note that we can't
666 * use call_rcu here as we need to ensure the namespaces have
667 * been fully destroyed before unloading the module.
669 percpu_ref_kill(&ns
->ref
);
671 wait_for_completion(&ns
->disable_done
);
672 percpu_ref_exit(&ns
->ref
);
675 nvmet_pr_exit_ns(ns
);
677 mutex_lock(&subsys
->lock
);
679 subsys
->nr_namespaces
--;
680 nvmet_ns_changed(subsys
, ns
->nsid
);
681 nvmet_ns_dev_disable(ns
);
683 mutex_unlock(&subsys
->lock
);
686 void nvmet_ns_free(struct nvmet_ns
*ns
)
688 nvmet_ns_disable(ns
);
690 down_write(&nvmet_ana_sem
);
691 nvmet_ana_group_enabled
[ns
->anagrpid
]--;
692 up_write(&nvmet_ana_sem
);
694 kfree(ns
->device_path
);
698 struct nvmet_ns
*nvmet_ns_alloc(struct nvmet_subsys
*subsys
, u32 nsid
)
702 ns
= kzalloc(sizeof(*ns
), GFP_KERNEL
);
706 init_completion(&ns
->disable_done
);
711 down_write(&nvmet_ana_sem
);
712 ns
->anagrpid
= NVMET_DEFAULT_ANA_GRPID
;
713 nvmet_ana_group_enabled
[ns
->anagrpid
]++;
714 up_write(&nvmet_ana_sem
);
717 ns
->buffered_io
= false;
718 ns
->csi
= NVME_CSI_NVM
;
723 static void nvmet_update_sq_head(struct nvmet_req
*req
)
726 u32 old_sqhd
, new_sqhd
;
728 old_sqhd
= READ_ONCE(req
->sq
->sqhd
);
730 new_sqhd
= (old_sqhd
+ 1) % req
->sq
->size
;
731 } while (!try_cmpxchg(&req
->sq
->sqhd
, &old_sqhd
, new_sqhd
));
733 req
->cqe
->sq_head
= cpu_to_le16(req
->sq
->sqhd
& 0x0000FFFF);
736 static void nvmet_set_error(struct nvmet_req
*req
, u16 status
)
738 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
739 struct nvme_error_slot
*new_error_slot
;
742 req
->cqe
->status
= cpu_to_le16(status
<< 1);
744 if (!ctrl
|| req
->error_loc
== NVMET_NO_ERROR_LOC
)
747 spin_lock_irqsave(&ctrl
->error_lock
, flags
);
750 &ctrl
->slots
[ctrl
->err_counter
% NVMET_ERROR_LOG_SLOTS
];
752 new_error_slot
->error_count
= cpu_to_le64(ctrl
->err_counter
);
753 new_error_slot
->sqid
= cpu_to_le16(req
->sq
->qid
);
754 new_error_slot
->cmdid
= cpu_to_le16(req
->cmd
->common
.command_id
);
755 new_error_slot
->status_field
= cpu_to_le16(status
<< 1);
756 new_error_slot
->param_error_location
= cpu_to_le16(req
->error_loc
);
757 new_error_slot
->lba
= cpu_to_le64(req
->error_slba
);
758 new_error_slot
->nsid
= req
->cmd
->common
.nsid
;
759 spin_unlock_irqrestore(&ctrl
->error_lock
, flags
);
761 /* set the more bit for this request */
762 req
->cqe
->status
|= cpu_to_le16(1 << 14);
765 static void __nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
767 struct nvmet_ns
*ns
= req
->ns
;
768 struct nvmet_pr_per_ctrl_ref
*pc_ref
= req
->pc_ref
;
770 if (!req
->sq
->sqhd_disabled
)
771 nvmet_update_sq_head(req
);
772 req
->cqe
->sq_id
= cpu_to_le16(req
->sq
->qid
);
773 req
->cqe
->command_id
= req
->cmd
->common
.command_id
;
775 if (unlikely(status
))
776 nvmet_set_error(req
, status
);
778 trace_nvmet_req_complete(req
);
780 req
->ops
->queue_response(req
);
783 nvmet_pr_put_ns_pc_ref(pc_ref
);
785 nvmet_put_namespace(ns
);
788 void nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
790 struct nvmet_sq
*sq
= req
->sq
;
792 __nvmet_req_complete(req
, status
);
793 percpu_ref_put(&sq
->ref
);
795 EXPORT_SYMBOL_GPL(nvmet_req_complete
);
797 void nvmet_cq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_cq
*cq
,
804 void nvmet_sq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_sq
*sq
,
814 static void nvmet_confirm_sq(struct percpu_ref
*ref
)
816 struct nvmet_sq
*sq
= container_of(ref
, struct nvmet_sq
, ref
);
818 complete(&sq
->confirm_done
);
821 void nvmet_sq_destroy(struct nvmet_sq
*sq
)
823 struct nvmet_ctrl
*ctrl
= sq
->ctrl
;
826 * If this is the admin queue, complete all AERs so that our
827 * queue doesn't have outstanding requests on it.
829 if (ctrl
&& ctrl
->sqs
&& ctrl
->sqs
[0] == sq
)
830 nvmet_async_events_failall(ctrl
);
831 percpu_ref_kill_and_confirm(&sq
->ref
, nvmet_confirm_sq
);
832 wait_for_completion(&sq
->confirm_done
);
833 wait_for_completion(&sq
->free_done
);
834 percpu_ref_exit(&sq
->ref
);
835 nvmet_auth_sq_free(sq
);
838 * we must reference the ctrl again after waiting for inflight IO
839 * to complete. Because admin connect may have sneaked in after we
840 * store sq->ctrl locally, but before we killed the percpu_ref. the
841 * admin connect allocates and assigns sq->ctrl, which now needs a
842 * final ref put, as this ctrl is going away.
848 * The teardown flow may take some time, and the host may not
849 * send us keep-alive during this period, hence reset the
850 * traffic based keep-alive timer so we don't trigger a
851 * controller teardown as a result of a keep-alive expiration.
853 ctrl
->reset_tbkas
= true;
854 sq
->ctrl
->sqs
[sq
->qid
] = NULL
;
855 nvmet_ctrl_put(ctrl
);
856 sq
->ctrl
= NULL
; /* allows reusing the queue later */
859 EXPORT_SYMBOL_GPL(nvmet_sq_destroy
);
861 static void nvmet_sq_free(struct percpu_ref
*ref
)
863 struct nvmet_sq
*sq
= container_of(ref
, struct nvmet_sq
, ref
);
865 complete(&sq
->free_done
);
868 int nvmet_sq_init(struct nvmet_sq
*sq
)
872 ret
= percpu_ref_init(&sq
->ref
, nvmet_sq_free
, 0, GFP_KERNEL
);
874 pr_err("percpu_ref init failed!\n");
877 init_completion(&sq
->free_done
);
878 init_completion(&sq
->confirm_done
);
879 nvmet_auth_sq_init(sq
);
883 EXPORT_SYMBOL_GPL(nvmet_sq_init
);
885 static inline u16
nvmet_check_ana_state(struct nvmet_port
*port
,
888 enum nvme_ana_state state
= port
->ana_state
[ns
->anagrpid
];
890 if (unlikely(state
== NVME_ANA_INACCESSIBLE
))
891 return NVME_SC_ANA_INACCESSIBLE
;
892 if (unlikely(state
== NVME_ANA_PERSISTENT_LOSS
))
893 return NVME_SC_ANA_PERSISTENT_LOSS
;
894 if (unlikely(state
== NVME_ANA_CHANGE
))
895 return NVME_SC_ANA_TRANSITION
;
899 static inline u16
nvmet_io_cmd_check_access(struct nvmet_req
*req
)
901 if (unlikely(req
->ns
->readonly
)) {
902 switch (req
->cmd
->common
.opcode
) {
907 return NVME_SC_NS_WRITE_PROTECTED
;
914 static u16
nvmet_parse_io_cmd(struct nvmet_req
*req
)
916 struct nvme_command
*cmd
= req
->cmd
;
919 if (nvme_is_fabrics(cmd
))
920 return nvmet_parse_fabrics_io_cmd(req
);
922 if (unlikely(!nvmet_check_auth_status(req
)))
923 return NVME_SC_AUTH_REQUIRED
| NVME_STATUS_DNR
;
925 ret
= nvmet_check_ctrl_status(req
);
929 if (nvmet_is_passthru_req(req
))
930 return nvmet_parse_passthru_io_cmd(req
);
932 ret
= nvmet_req_find_ns(req
);
936 ret
= nvmet_check_ana_state(req
->port
, req
->ns
);
938 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
941 ret
= nvmet_io_cmd_check_access(req
);
943 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
947 if (req
->ns
->pr
.enable
) {
948 ret
= nvmet_parse_pr_cmd(req
);
953 switch (req
->ns
->csi
) {
956 ret
= nvmet_file_parse_io_cmd(req
);
958 ret
= nvmet_bdev_parse_io_cmd(req
);
961 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED
))
962 ret
= nvmet_bdev_zns_parse_io_cmd(req
);
964 ret
= NVME_SC_INVALID_IO_CMD_SET
;
967 ret
= NVME_SC_INVALID_IO_CMD_SET
;
972 if (req
->ns
->pr
.enable
) {
973 ret
= nvmet_pr_check_cmd_access(req
);
977 ret
= nvmet_pr_get_ns_pc_ref(req
);
982 bool nvmet_req_init(struct nvmet_req
*req
, struct nvmet_cq
*cq
,
983 struct nvmet_sq
*sq
, const struct nvmet_fabrics_ops
*ops
)
985 u8 flags
= req
->cmd
->common
.flags
;
992 req
->metadata_sg
= NULL
;
994 req
->metadata_sg_cnt
= 0;
995 req
->transfer_len
= 0;
996 req
->metadata_len
= 0;
997 req
->cqe
->result
.u64
= 0;
998 req
->cqe
->status
= 0;
999 req
->cqe
->sq_head
= 0;
1001 req
->error_loc
= NVMET_NO_ERROR_LOC
;
1002 req
->error_slba
= 0;
1005 /* no support for fused commands yet */
1006 if (unlikely(flags
& (NVME_CMD_FUSE_FIRST
| NVME_CMD_FUSE_SECOND
))) {
1007 req
->error_loc
= offsetof(struct nvme_common_command
, flags
);
1008 status
= NVME_SC_INVALID_FIELD
| NVME_STATUS_DNR
;
1013 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
1014 * contains an address of a single contiguous physical buffer that is
1017 if (unlikely((flags
& NVME_CMD_SGL_ALL
) != NVME_CMD_SGL_METABUF
)) {
1018 req
->error_loc
= offsetof(struct nvme_common_command
, flags
);
1019 status
= NVME_SC_INVALID_FIELD
| NVME_STATUS_DNR
;
1023 if (unlikely(!req
->sq
->ctrl
))
1024 /* will return an error for any non-connect command: */
1025 status
= nvmet_parse_connect_cmd(req
);
1026 else if (likely(req
->sq
->qid
!= 0))
1027 status
= nvmet_parse_io_cmd(req
);
1029 status
= nvmet_parse_admin_cmd(req
);
1034 trace_nvmet_req_init(req
, req
->cmd
);
1036 if (unlikely(!percpu_ref_tryget_live(&sq
->ref
))) {
1037 status
= NVME_SC_INVALID_FIELD
| NVME_STATUS_DNR
;
1042 sq
->ctrl
->reset_tbkas
= true;
1047 __nvmet_req_complete(req
, status
);
1050 EXPORT_SYMBOL_GPL(nvmet_req_init
);
1052 void nvmet_req_uninit(struct nvmet_req
*req
)
1054 percpu_ref_put(&req
->sq
->ref
);
1056 nvmet_pr_put_ns_pc_ref(req
->pc_ref
);
1058 nvmet_put_namespace(req
->ns
);
1060 EXPORT_SYMBOL_GPL(nvmet_req_uninit
);
1062 bool nvmet_check_transfer_len(struct nvmet_req
*req
, size_t len
)
1064 if (unlikely(len
!= req
->transfer_len
)) {
1065 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
1066 nvmet_req_complete(req
, NVME_SC_SGL_INVALID_DATA
| NVME_STATUS_DNR
);
1072 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len
);
1074 bool nvmet_check_data_len_lte(struct nvmet_req
*req
, size_t data_len
)
1076 if (unlikely(data_len
> req
->transfer_len
)) {
1077 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
1078 nvmet_req_complete(req
, NVME_SC_SGL_INVALID_DATA
| NVME_STATUS_DNR
);
1085 static unsigned int nvmet_data_transfer_len(struct nvmet_req
*req
)
1087 return req
->transfer_len
- req
->metadata_len
;
1090 static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev
*p2p_dev
,
1091 struct nvmet_req
*req
)
1093 req
->sg
= pci_p2pmem_alloc_sgl(p2p_dev
, &req
->sg_cnt
,
1094 nvmet_data_transfer_len(req
));
1098 if (req
->metadata_len
) {
1099 req
->metadata_sg
= pci_p2pmem_alloc_sgl(p2p_dev
,
1100 &req
->metadata_sg_cnt
, req
->metadata_len
);
1101 if (!req
->metadata_sg
)
1105 req
->p2p_dev
= p2p_dev
;
1109 pci_p2pmem_free_sgl(req
->p2p_dev
, req
->sg
);
1114 static struct pci_dev
*nvmet_req_find_p2p_dev(struct nvmet_req
*req
)
1116 if (!IS_ENABLED(CONFIG_PCI_P2PDMA
) ||
1117 !req
->sq
->ctrl
|| !req
->sq
->qid
|| !req
->ns
)
1119 return radix_tree_lookup(&req
->sq
->ctrl
->p2p_ns_map
, req
->ns
->nsid
);
1122 int nvmet_req_alloc_sgls(struct nvmet_req
*req
)
1124 struct pci_dev
*p2p_dev
= nvmet_req_find_p2p_dev(req
);
1126 if (p2p_dev
&& !nvmet_req_alloc_p2pmem_sgls(p2p_dev
, req
))
1129 req
->sg
= sgl_alloc(nvmet_data_transfer_len(req
), GFP_KERNEL
,
1131 if (unlikely(!req
->sg
))
1134 if (req
->metadata_len
) {
1135 req
->metadata_sg
= sgl_alloc(req
->metadata_len
, GFP_KERNEL
,
1136 &req
->metadata_sg_cnt
);
1137 if (unlikely(!req
->metadata_sg
))
1147 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls
);
1149 void nvmet_req_free_sgls(struct nvmet_req
*req
)
1152 pci_p2pmem_free_sgl(req
->p2p_dev
, req
->sg
);
1153 if (req
->metadata_sg
)
1154 pci_p2pmem_free_sgl(req
->p2p_dev
, req
->metadata_sg
);
1155 req
->p2p_dev
= NULL
;
1158 if (req
->metadata_sg
)
1159 sgl_free(req
->metadata_sg
);
1163 req
->metadata_sg
= NULL
;
1165 req
->metadata_sg_cnt
= 0;
1167 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls
);
1169 static inline bool nvmet_cc_en(u32 cc
)
1171 return (cc
>> NVME_CC_EN_SHIFT
) & 0x1;
1174 static inline u8
nvmet_cc_css(u32 cc
)
1176 return (cc
>> NVME_CC_CSS_SHIFT
) & 0x7;
1179 static inline u8
nvmet_cc_mps(u32 cc
)
1181 return (cc
>> NVME_CC_MPS_SHIFT
) & 0xf;
1184 static inline u8
nvmet_cc_ams(u32 cc
)
1186 return (cc
>> NVME_CC_AMS_SHIFT
) & 0x7;
1189 static inline u8
nvmet_cc_shn(u32 cc
)
1191 return (cc
>> NVME_CC_SHN_SHIFT
) & 0x3;
1194 static inline u8
nvmet_cc_iosqes(u32 cc
)
1196 return (cc
>> NVME_CC_IOSQES_SHIFT
) & 0xf;
1199 static inline u8
nvmet_cc_iocqes(u32 cc
)
1201 return (cc
>> NVME_CC_IOCQES_SHIFT
) & 0xf;
1204 static inline bool nvmet_css_supported(u8 cc_css
)
1206 switch (cc_css
<< NVME_CC_CSS_SHIFT
) {
1207 case NVME_CC_CSS_NVM
:
1208 case NVME_CC_CSS_CSI
:
1215 static void nvmet_start_ctrl(struct nvmet_ctrl
*ctrl
)
1217 lockdep_assert_held(&ctrl
->lock
);
1220 * Only I/O controllers should verify iosqes,iocqes.
1221 * Strictly speaking, the spec says a discovery controller
1222 * should verify iosqes,iocqes are zeroed, however that
1223 * would break backwards compatibility, so don't enforce it.
1225 if (!nvmet_is_disc_subsys(ctrl
->subsys
) &&
1226 (nvmet_cc_iosqes(ctrl
->cc
) != NVME_NVM_IOSQES
||
1227 nvmet_cc_iocqes(ctrl
->cc
) != NVME_NVM_IOCQES
)) {
1228 ctrl
->csts
= NVME_CSTS_CFS
;
1232 if (nvmet_cc_mps(ctrl
->cc
) != 0 ||
1233 nvmet_cc_ams(ctrl
->cc
) != 0 ||
1234 !nvmet_css_supported(nvmet_cc_css(ctrl
->cc
))) {
1235 ctrl
->csts
= NVME_CSTS_CFS
;
1239 ctrl
->csts
= NVME_CSTS_RDY
;
1242 * Controllers that are not yet enabled should not really enforce the
1243 * keep alive timeout, but we still want to track a timeout and cleanup
1244 * in case a host died before it enabled the controller. Hence, simply
1245 * reset the keep alive timer when the controller is enabled.
1248 mod_delayed_work(nvmet_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
1251 static void nvmet_clear_ctrl(struct nvmet_ctrl
*ctrl
)
1253 lockdep_assert_held(&ctrl
->lock
);
1255 /* XXX: tear down queues? */
1256 ctrl
->csts
&= ~NVME_CSTS_RDY
;
1260 void nvmet_update_cc(struct nvmet_ctrl
*ctrl
, u32
new)
1264 mutex_lock(&ctrl
->lock
);
1268 if (nvmet_cc_en(new) && !nvmet_cc_en(old
))
1269 nvmet_start_ctrl(ctrl
);
1270 if (!nvmet_cc_en(new) && nvmet_cc_en(old
))
1271 nvmet_clear_ctrl(ctrl
);
1272 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old
)) {
1273 nvmet_clear_ctrl(ctrl
);
1274 ctrl
->csts
|= NVME_CSTS_SHST_CMPLT
;
1276 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old
))
1277 ctrl
->csts
&= ~NVME_CSTS_SHST_CMPLT
;
1278 mutex_unlock(&ctrl
->lock
);
1281 static void nvmet_init_cap(struct nvmet_ctrl
*ctrl
)
1283 /* command sets supported: NVMe command set: */
1284 ctrl
->cap
= (1ULL << 37);
1285 /* Controller supports one or more I/O Command Sets */
1286 ctrl
->cap
|= (1ULL << 43);
1287 /* CC.EN timeout in 500msec units: */
1288 ctrl
->cap
|= (15ULL << 24);
1289 /* maximum queue entries supported: */
1290 if (ctrl
->ops
->get_max_queue_size
)
1291 ctrl
->cap
|= min_t(u16
, ctrl
->ops
->get_max_queue_size(ctrl
),
1292 ctrl
->port
->max_queue_size
) - 1;
1294 ctrl
->cap
|= ctrl
->port
->max_queue_size
- 1;
1296 if (nvmet_is_passthru_subsys(ctrl
->subsys
))
1297 nvmet_passthrough_override_cap(ctrl
);
1300 struct nvmet_ctrl
*nvmet_ctrl_find_get(const char *subsysnqn
,
1301 const char *hostnqn
, u16 cntlid
,
1302 struct nvmet_req
*req
)
1304 struct nvmet_ctrl
*ctrl
= NULL
;
1305 struct nvmet_subsys
*subsys
;
1307 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
1309 pr_warn("connect request for invalid subsystem %s!\n",
1311 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
1315 mutex_lock(&subsys
->lock
);
1316 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
1317 if (ctrl
->cntlid
== cntlid
) {
1318 if (strncmp(hostnqn
, ctrl
->hostnqn
, NVMF_NQN_SIZE
)) {
1319 pr_warn("hostnqn mismatch.\n");
1322 if (!kref_get_unless_zero(&ctrl
->ref
))
1330 ctrl
= NULL
; /* ctrl not found */
1331 pr_warn("could not find controller %d for subsys %s / host %s\n",
1332 cntlid
, subsysnqn
, hostnqn
);
1333 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(cntlid
);
1336 mutex_unlock(&subsys
->lock
);
1337 nvmet_subsys_put(subsys
);
1342 u16
nvmet_check_ctrl_status(struct nvmet_req
*req
)
1344 if (unlikely(!(req
->sq
->ctrl
->cc
& NVME_CC_ENABLE
))) {
1345 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1346 req
->cmd
->common
.opcode
, req
->sq
->qid
);
1347 return NVME_SC_CMD_SEQ_ERROR
| NVME_STATUS_DNR
;
1350 if (unlikely(!(req
->sq
->ctrl
->csts
& NVME_CSTS_RDY
))) {
1351 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1352 req
->cmd
->common
.opcode
, req
->sq
->qid
);
1353 return NVME_SC_CMD_SEQ_ERROR
| NVME_STATUS_DNR
;
1356 if (unlikely(!nvmet_check_auth_status(req
))) {
1357 pr_warn("qid %d not authenticated\n", req
->sq
->qid
);
1358 return NVME_SC_AUTH_REQUIRED
| NVME_STATUS_DNR
;
1363 bool nvmet_host_allowed(struct nvmet_subsys
*subsys
, const char *hostnqn
)
1365 struct nvmet_host_link
*p
;
1367 lockdep_assert_held(&nvmet_config_sem
);
1369 if (subsys
->allow_any_host
)
1372 if (nvmet_is_disc_subsys(subsys
)) /* allow all access to disc subsys */
1375 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
1376 if (!strcmp(nvmet_host_name(p
->host
), hostnqn
))
1384 * Note: ctrl->subsys->lock should be held when calling this function
1386 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl
*ctrl
,
1387 struct nvmet_req
*req
)
1389 struct nvmet_ns
*ns
;
1392 if (!req
->p2p_client
)
1395 ctrl
->p2p_client
= get_device(req
->p2p_client
);
1397 xa_for_each(&ctrl
->subsys
->namespaces
, idx
, ns
)
1398 nvmet_p2pmem_ns_add_p2p(ctrl
, ns
);
1402 * Note: ctrl->subsys->lock should be held when calling this function
1404 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl
*ctrl
)
1406 struct radix_tree_iter iter
;
1409 radix_tree_for_each_slot(slot
, &ctrl
->p2p_ns_map
, &iter
, 0)
1410 pci_dev_put(radix_tree_deref_slot(slot
));
1412 put_device(ctrl
->p2p_client
);
1415 static void nvmet_fatal_error_handler(struct work_struct
*work
)
1417 struct nvmet_ctrl
*ctrl
=
1418 container_of(work
, struct nvmet_ctrl
, fatal_err_work
);
1420 pr_err("ctrl %d fatal error occurred!\n", ctrl
->cntlid
);
1421 ctrl
->ops
->delete_ctrl(ctrl
);
1424 u16
nvmet_alloc_ctrl(const char *subsysnqn
, const char *hostnqn
,
1425 struct nvmet_req
*req
, u32 kato
, struct nvmet_ctrl
**ctrlp
,
1428 struct nvmet_subsys
*subsys
;
1429 struct nvmet_ctrl
*ctrl
;
1433 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_STATUS_DNR
;
1434 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
1436 pr_warn("connect request for invalid subsystem %s!\n",
1438 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
1439 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
1443 down_read(&nvmet_config_sem
);
1444 if (!nvmet_host_allowed(subsys
, hostnqn
)) {
1445 pr_info("connect by host %s for subsystem %s not allowed\n",
1446 hostnqn
, subsysnqn
);
1447 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(hostnqn
);
1448 up_read(&nvmet_config_sem
);
1449 status
= NVME_SC_CONNECT_INVALID_HOST
| NVME_STATUS_DNR
;
1450 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
1451 goto out_put_subsystem
;
1453 up_read(&nvmet_config_sem
);
1455 status
= NVME_SC_INTERNAL
;
1456 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
1458 goto out_put_subsystem
;
1459 mutex_init(&ctrl
->lock
);
1461 ctrl
->port
= req
->port
;
1462 ctrl
->ops
= req
->ops
;
1464 #ifdef CONFIG_NVME_TARGET_PASSTHRU
1465 /* By default, set loop targets to clear IDS by default */
1466 if (ctrl
->port
->disc_addr
.trtype
== NVMF_TRTYPE_LOOP
)
1467 subsys
->clear_ids
= 1;
1470 INIT_WORK(&ctrl
->async_event_work
, nvmet_async_event_work
);
1471 INIT_LIST_HEAD(&ctrl
->async_events
);
1472 INIT_RADIX_TREE(&ctrl
->p2p_ns_map
, GFP_KERNEL
);
1473 INIT_WORK(&ctrl
->fatal_err_work
, nvmet_fatal_error_handler
);
1474 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvmet_keep_alive_timer
);
1476 memcpy(ctrl
->subsysnqn
, subsysnqn
, NVMF_NQN_SIZE
);
1477 memcpy(ctrl
->hostnqn
, hostnqn
, NVMF_NQN_SIZE
);
1479 kref_init(&ctrl
->ref
);
1480 ctrl
->subsys
= subsys
;
1481 ctrl
->pi_support
= ctrl
->port
->pi_enable
&& ctrl
->subsys
->pi_support
;
1482 nvmet_init_cap(ctrl
);
1483 WRITE_ONCE(ctrl
->aen_enabled
, NVMET_AEN_CFG_OPTIONAL
);
1485 ctrl
->changed_ns_list
= kmalloc_array(NVME_MAX_CHANGED_NAMESPACES
,
1486 sizeof(__le32
), GFP_KERNEL
);
1487 if (!ctrl
->changed_ns_list
)
1490 ctrl
->sqs
= kcalloc(subsys
->max_qid
+ 1,
1491 sizeof(struct nvmet_sq
*),
1494 goto out_free_changed_ns_list
;
1496 ret
= ida_alloc_range(&cntlid_ida
,
1497 subsys
->cntlid_min
, subsys
->cntlid_max
,
1500 status
= NVME_SC_CONNECT_CTRL_BUSY
| NVME_STATUS_DNR
;
1505 uuid_copy(&ctrl
->hostid
, hostid
);
1508 * Discovery controllers may use some arbitrary high value
1509 * in order to cleanup stale discovery sessions
1511 if (nvmet_is_disc_subsys(ctrl
->subsys
) && !kato
)
1512 kato
= NVMET_DISC_KATO_MS
;
1514 /* keep-alive timeout in seconds */
1515 ctrl
->kato
= DIV_ROUND_UP(kato
, 1000);
1517 ctrl
->err_counter
= 0;
1518 spin_lock_init(&ctrl
->error_lock
);
1520 nvmet_start_keep_alive_timer(ctrl
);
1522 mutex_lock(&subsys
->lock
);
1523 ret
= nvmet_ctrl_init_pr(ctrl
);
1526 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
1527 nvmet_setup_p2p_ns_map(ctrl
, req
);
1528 nvmet_debugfs_ctrl_setup(ctrl
);
1529 mutex_unlock(&subsys
->lock
);
1535 mutex_unlock(&subsys
->lock
);
1536 nvmet_stop_keep_alive_timer(ctrl
);
1537 ida_free(&cntlid_ida
, ctrl
->cntlid
);
1540 out_free_changed_ns_list
:
1541 kfree(ctrl
->changed_ns_list
);
1545 nvmet_subsys_put(subsys
);
1550 static void nvmet_ctrl_free(struct kref
*ref
)
1552 struct nvmet_ctrl
*ctrl
= container_of(ref
, struct nvmet_ctrl
, ref
);
1553 struct nvmet_subsys
*subsys
= ctrl
->subsys
;
1555 mutex_lock(&subsys
->lock
);
1556 nvmet_ctrl_destroy_pr(ctrl
);
1557 nvmet_release_p2p_ns_map(ctrl
);
1558 list_del(&ctrl
->subsys_entry
);
1559 mutex_unlock(&subsys
->lock
);
1561 nvmet_stop_keep_alive_timer(ctrl
);
1563 flush_work(&ctrl
->async_event_work
);
1564 cancel_work_sync(&ctrl
->fatal_err_work
);
1566 nvmet_destroy_auth(ctrl
);
1568 nvmet_debugfs_ctrl_free(ctrl
);
1570 ida_free(&cntlid_ida
, ctrl
->cntlid
);
1572 nvmet_async_events_free(ctrl
);
1574 kfree(ctrl
->changed_ns_list
);
1577 nvmet_subsys_put(subsys
);
1580 void nvmet_ctrl_put(struct nvmet_ctrl
*ctrl
)
1582 kref_put(&ctrl
->ref
, nvmet_ctrl_free
);
1585 void nvmet_ctrl_fatal_error(struct nvmet_ctrl
*ctrl
)
1587 mutex_lock(&ctrl
->lock
);
1588 if (!(ctrl
->csts
& NVME_CSTS_CFS
)) {
1589 ctrl
->csts
|= NVME_CSTS_CFS
;
1590 queue_work(nvmet_wq
, &ctrl
->fatal_err_work
);
1592 mutex_unlock(&ctrl
->lock
);
1594 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error
);
1596 ssize_t
nvmet_ctrl_host_traddr(struct nvmet_ctrl
*ctrl
,
1597 char *traddr
, size_t traddr_len
)
1599 if (!ctrl
->ops
->host_traddr
)
1601 return ctrl
->ops
->host_traddr(ctrl
, traddr
, traddr_len
);
1604 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
1605 const char *subsysnqn
)
1607 struct nvmet_subsys_link
*p
;
1612 if (!strcmp(NVME_DISC_SUBSYS_NAME
, subsysnqn
)) {
1613 if (!kref_get_unless_zero(&nvmet_disc_subsys
->ref
))
1615 return nvmet_disc_subsys
;
1618 down_read(&nvmet_config_sem
);
1619 if (!strncmp(nvmet_disc_subsys
->subsysnqn
, subsysnqn
,
1621 if (kref_get_unless_zero(&nvmet_disc_subsys
->ref
)) {
1622 up_read(&nvmet_config_sem
);
1623 return nvmet_disc_subsys
;
1626 list_for_each_entry(p
, &port
->subsystems
, entry
) {
1627 if (!strncmp(p
->subsys
->subsysnqn
, subsysnqn
,
1629 if (!kref_get_unless_zero(&p
->subsys
->ref
))
1631 up_read(&nvmet_config_sem
);
1635 up_read(&nvmet_config_sem
);
1639 struct nvmet_subsys
*nvmet_subsys_alloc(const char *subsysnqn
,
1640 enum nvme_subsys_type type
)
1642 struct nvmet_subsys
*subsys
;
1643 char serial
[NVMET_SN_MAX_SIZE
/ 2];
1646 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
1648 return ERR_PTR(-ENOMEM
);
1650 subsys
->ver
= NVMET_DEFAULT_VS
;
1651 /* generate a random serial number as our controllers are ephemeral: */
1652 get_random_bytes(&serial
, sizeof(serial
));
1653 bin2hex(subsys
->serial
, &serial
, sizeof(serial
));
1655 subsys
->model_number
= kstrdup(NVMET_DEFAULT_CTRL_MODEL
, GFP_KERNEL
);
1656 if (!subsys
->model_number
) {
1661 subsys
->ieee_oui
= 0;
1663 subsys
->firmware_rev
= kstrndup(UTS_RELEASE
, NVMET_FR_MAX_SIZE
, GFP_KERNEL
);
1664 if (!subsys
->firmware_rev
) {
1671 subsys
->max_qid
= NVMET_NR_QUEUES
;
1675 subsys
->max_qid
= 0;
1678 pr_err("%s: Unknown Subsystem type - %d\n", __func__
, type
);
1682 subsys
->type
= type
;
1683 subsys
->subsysnqn
= kstrndup(subsysnqn
, NVMF_NQN_SIZE
,
1685 if (!subsys
->subsysnqn
) {
1689 subsys
->cntlid_min
= NVME_CNTLID_MIN
;
1690 subsys
->cntlid_max
= NVME_CNTLID_MAX
;
1691 kref_init(&subsys
->ref
);
1693 mutex_init(&subsys
->lock
);
1694 xa_init(&subsys
->namespaces
);
1695 INIT_LIST_HEAD(&subsys
->ctrls
);
1696 INIT_LIST_HEAD(&subsys
->hosts
);
1698 ret
= nvmet_debugfs_subsys_setup(subsys
);
1700 goto free_subsysnqn
;
1705 kfree(subsys
->subsysnqn
);
1707 kfree(subsys
->firmware_rev
);
1709 kfree(subsys
->model_number
);
1712 return ERR_PTR(ret
);
1715 static void nvmet_subsys_free(struct kref
*ref
)
1717 struct nvmet_subsys
*subsys
=
1718 container_of(ref
, struct nvmet_subsys
, ref
);
1720 WARN_ON_ONCE(!xa_empty(&subsys
->namespaces
));
1722 nvmet_debugfs_subsys_free(subsys
);
1724 xa_destroy(&subsys
->namespaces
);
1725 nvmet_passthru_subsys_free(subsys
);
1727 kfree(subsys
->subsysnqn
);
1728 kfree(subsys
->model_number
);
1729 kfree(subsys
->firmware_rev
);
1733 void nvmet_subsys_del_ctrls(struct nvmet_subsys
*subsys
)
1735 struct nvmet_ctrl
*ctrl
;
1737 mutex_lock(&subsys
->lock
);
1738 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
1739 ctrl
->ops
->delete_ctrl(ctrl
);
1740 mutex_unlock(&subsys
->lock
);
1743 void nvmet_subsys_put(struct nvmet_subsys
*subsys
)
1745 kref_put(&subsys
->ref
, nvmet_subsys_free
);
1748 static int __init
nvmet_init(void)
1750 int error
= -ENOMEM
;
1752 nvmet_ana_group_enabled
[NVMET_DEFAULT_ANA_GRPID
] = 1;
1754 nvmet_bvec_cache
= kmem_cache_create("nvmet-bvec",
1755 NVMET_MAX_MPOOL_BVEC
* sizeof(struct bio_vec
), 0,
1756 SLAB_HWCACHE_ALIGN
, NULL
);
1757 if (!nvmet_bvec_cache
)
1760 zbd_wq
= alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM
, 0);
1762 goto out_destroy_bvec_cache
;
1764 buffered_io_wq
= alloc_workqueue("nvmet-buffered-io-wq",
1766 if (!buffered_io_wq
)
1767 goto out_free_zbd_work_queue
;
1769 nvmet_wq
= alloc_workqueue("nvmet-wq",
1770 WQ_MEM_RECLAIM
| WQ_UNBOUND
| WQ_SYSFS
, 0);
1772 goto out_free_buffered_work_queue
;
1774 error
= nvmet_init_discovery();
1776 goto out_free_nvmet_work_queue
;
1778 error
= nvmet_init_debugfs();
1780 goto out_exit_discovery
;
1782 error
= nvmet_init_configfs();
1784 goto out_exit_debugfs
;
1789 nvmet_exit_debugfs();
1791 nvmet_exit_discovery();
1792 out_free_nvmet_work_queue
:
1793 destroy_workqueue(nvmet_wq
);
1794 out_free_buffered_work_queue
:
1795 destroy_workqueue(buffered_io_wq
);
1796 out_free_zbd_work_queue
:
1797 destroy_workqueue(zbd_wq
);
1798 out_destroy_bvec_cache
:
1799 kmem_cache_destroy(nvmet_bvec_cache
);
1803 static void __exit
nvmet_exit(void)
1805 nvmet_exit_configfs();
1806 nvmet_exit_debugfs();
1807 nvmet_exit_discovery();
1808 ida_destroy(&cntlid_ida
);
1809 destroy_workqueue(nvmet_wq
);
1810 destroy_workqueue(buffered_io_wq
);
1811 destroy_workqueue(zbd_wq
);
1812 kmem_cache_destroy(nvmet_bvec_cache
);
1814 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry
) != 1024);
1815 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr
) != 1024);
1818 module_init(nvmet_init
);
1819 module_exit(nvmet_exit
);
1821 MODULE_DESCRIPTION("NVMe target core framework");
1822 MODULE_LICENSE("GPL v2");