2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/rculist.h>
18 #include <linux/pci-p2pdma.h>
22 struct workqueue_struct
*buffered_io_wq
;
23 static const struct nvmet_fabrics_ops
*nvmet_transports
[NVMF_TRTYPE_MAX
];
24 static DEFINE_IDA(cntlid_ida
);
27 * This read/write semaphore is used to synchronize access to configuration
28 * information on a target system that will result in discovery log page
29 * information change for at least one host.
30 * The full list of resources to protected by this semaphore is:
33 * - per-subsystem allowed hosts list
34 * - allow_any_host subsystem attribute
36 * - the nvmet_transports array
38 * When updating any of those lists/structures write lock should be obtained,
39 * while when reading (popolating discovery log page or checking host-subsystem
40 * link) read lock is obtained to allow concurrent reads.
42 DECLARE_RWSEM(nvmet_config_sem
);
44 u32 nvmet_ana_group_enabled
[NVMET_MAX_ANAGRPS
+ 1];
46 DECLARE_RWSEM(nvmet_ana_sem
);
48 inline u16
errno_to_nvme_status(struct nvmet_req
*req
, int errno
)
54 req
->error_loc
= offsetof(struct nvme_rw_command
, length
);
55 status
= NVME_SC_CAP_EXCEEDED
| NVME_SC_DNR
;
58 req
->error_loc
= offsetof(struct nvme_rw_command
, slba
);
59 status
= NVME_SC_LBA_RANGE
| NVME_SC_DNR
;
62 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
63 switch (req
->cmd
->common
.opcode
) {
65 case nvme_cmd_write_zeroes
:
66 status
= NVME_SC_ONCS_NOT_SUPPORTED
| NVME_SC_DNR
;
69 status
= NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
73 req
->error_loc
= offsetof(struct nvme_rw_command
, nsid
);
74 status
= NVME_SC_ACCESS_DENIED
;
79 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
80 status
= NVME_SC_INTERNAL
| NVME_SC_DNR
;
86 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
87 const char *subsysnqn
);
89 u16
nvmet_copy_to_sgl(struct nvmet_req
*req
, off_t off
, const void *buf
,
92 if (sg_pcopy_from_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
) {
93 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
94 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
99 u16
nvmet_copy_from_sgl(struct nvmet_req
*req
, off_t off
, void *buf
, size_t len
)
101 if (sg_pcopy_to_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
) {
102 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
103 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
108 u16
nvmet_zero_sgl(struct nvmet_req
*req
, off_t off
, size_t len
)
110 if (sg_zero_buffer(req
->sg
, req
->sg_cnt
, len
, off
) != len
) {
111 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
112 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
117 static unsigned int nvmet_max_nsid(struct nvmet_subsys
*subsys
)
121 if (list_empty(&subsys
->namespaces
))
124 ns
= list_last_entry(&subsys
->namespaces
, struct nvmet_ns
, dev_link
);
128 static u32
nvmet_async_event_result(struct nvmet_async_event
*aen
)
130 return aen
->event_type
| (aen
->event_info
<< 8) | (aen
->log_page
<< 16);
133 static void nvmet_async_events_free(struct nvmet_ctrl
*ctrl
)
135 struct nvmet_req
*req
;
138 mutex_lock(&ctrl
->lock
);
139 if (!ctrl
->nr_async_event_cmds
) {
140 mutex_unlock(&ctrl
->lock
);
144 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
145 mutex_unlock(&ctrl
->lock
);
146 nvmet_req_complete(req
, NVME_SC_INTERNAL
| NVME_SC_DNR
);
150 static void nvmet_async_event_work(struct work_struct
*work
)
152 struct nvmet_ctrl
*ctrl
=
153 container_of(work
, struct nvmet_ctrl
, async_event_work
);
154 struct nvmet_async_event
*aen
;
155 struct nvmet_req
*req
;
158 mutex_lock(&ctrl
->lock
);
159 aen
= list_first_entry_or_null(&ctrl
->async_events
,
160 struct nvmet_async_event
, entry
);
161 if (!aen
|| !ctrl
->nr_async_event_cmds
) {
162 mutex_unlock(&ctrl
->lock
);
166 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
167 nvmet_set_result(req
, nvmet_async_event_result(aen
));
169 list_del(&aen
->entry
);
172 mutex_unlock(&ctrl
->lock
);
173 nvmet_req_complete(req
, 0);
177 void nvmet_add_async_event(struct nvmet_ctrl
*ctrl
, u8 event_type
,
178 u8 event_info
, u8 log_page
)
180 struct nvmet_async_event
*aen
;
182 aen
= kmalloc(sizeof(*aen
), GFP_KERNEL
);
186 aen
->event_type
= event_type
;
187 aen
->event_info
= event_info
;
188 aen
->log_page
= log_page
;
190 mutex_lock(&ctrl
->lock
);
191 list_add_tail(&aen
->entry
, &ctrl
->async_events
);
192 mutex_unlock(&ctrl
->lock
);
194 schedule_work(&ctrl
->async_event_work
);
197 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl
*ctrl
, __le32 nsid
)
201 mutex_lock(&ctrl
->lock
);
202 if (ctrl
->nr_changed_ns
> NVME_MAX_CHANGED_NAMESPACES
)
205 for (i
= 0; i
< ctrl
->nr_changed_ns
; i
++) {
206 if (ctrl
->changed_ns_list
[i
] == nsid
)
210 if (ctrl
->nr_changed_ns
== NVME_MAX_CHANGED_NAMESPACES
) {
211 ctrl
->changed_ns_list
[0] = cpu_to_le32(0xffffffff);
212 ctrl
->nr_changed_ns
= U32_MAX
;
216 ctrl
->changed_ns_list
[ctrl
->nr_changed_ns
++] = nsid
;
218 mutex_unlock(&ctrl
->lock
);
221 void nvmet_ns_changed(struct nvmet_subsys
*subsys
, u32 nsid
)
223 struct nvmet_ctrl
*ctrl
;
225 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
226 nvmet_add_to_changed_ns_log(ctrl
, cpu_to_le32(nsid
));
227 if (nvmet_aen_bit_disabled(ctrl
, NVME_AEN_BIT_NS_ATTR
))
229 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
,
230 NVME_AER_NOTICE_NS_CHANGED
,
231 NVME_LOG_CHANGED_NS
);
235 void nvmet_send_ana_event(struct nvmet_subsys
*subsys
,
236 struct nvmet_port
*port
)
238 struct nvmet_ctrl
*ctrl
;
240 mutex_lock(&subsys
->lock
);
241 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
242 if (port
&& ctrl
->port
!= port
)
244 if (nvmet_aen_bit_disabled(ctrl
, NVME_AEN_BIT_ANA_CHANGE
))
246 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
,
247 NVME_AER_NOTICE_ANA
, NVME_LOG_ANA
);
249 mutex_unlock(&subsys
->lock
);
252 void nvmet_port_send_ana_event(struct nvmet_port
*port
)
254 struct nvmet_subsys_link
*p
;
256 down_read(&nvmet_config_sem
);
257 list_for_each_entry(p
, &port
->subsystems
, entry
)
258 nvmet_send_ana_event(p
->subsys
, port
);
259 up_read(&nvmet_config_sem
);
262 int nvmet_register_transport(const struct nvmet_fabrics_ops
*ops
)
266 down_write(&nvmet_config_sem
);
267 if (nvmet_transports
[ops
->type
])
270 nvmet_transports
[ops
->type
] = ops
;
271 up_write(&nvmet_config_sem
);
275 EXPORT_SYMBOL_GPL(nvmet_register_transport
);
277 void nvmet_unregister_transport(const struct nvmet_fabrics_ops
*ops
)
279 down_write(&nvmet_config_sem
);
280 nvmet_transports
[ops
->type
] = NULL
;
281 up_write(&nvmet_config_sem
);
283 EXPORT_SYMBOL_GPL(nvmet_unregister_transport
);
285 int nvmet_enable_port(struct nvmet_port
*port
)
287 const struct nvmet_fabrics_ops
*ops
;
290 lockdep_assert_held(&nvmet_config_sem
);
292 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
294 up_write(&nvmet_config_sem
);
295 request_module("nvmet-transport-%d", port
->disc_addr
.trtype
);
296 down_write(&nvmet_config_sem
);
297 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
299 pr_err("transport type %d not supported\n",
300 port
->disc_addr
.trtype
);
305 if (!try_module_get(ops
->owner
))
308 ret
= ops
->add_port(port
);
310 module_put(ops
->owner
);
314 /* If the transport didn't set inline_data_size, then disable it. */
315 if (port
->inline_data_size
< 0)
316 port
->inline_data_size
= 0;
318 port
->enabled
= true;
322 void nvmet_disable_port(struct nvmet_port
*port
)
324 const struct nvmet_fabrics_ops
*ops
;
326 lockdep_assert_held(&nvmet_config_sem
);
328 port
->enabled
= false;
330 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
331 ops
->remove_port(port
);
332 module_put(ops
->owner
);
335 static void nvmet_keep_alive_timer(struct work_struct
*work
)
337 struct nvmet_ctrl
*ctrl
= container_of(to_delayed_work(work
),
338 struct nvmet_ctrl
, ka_work
);
339 bool cmd_seen
= ctrl
->cmd_seen
;
341 ctrl
->cmd_seen
= false;
343 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
345 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
349 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
350 ctrl
->cntlid
, ctrl
->kato
);
352 nvmet_ctrl_fatal_error(ctrl
);
355 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
357 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
358 ctrl
->cntlid
, ctrl
->kato
);
360 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvmet_keep_alive_timer
);
361 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
364 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
366 pr_debug("ctrl %d stop keep-alive\n", ctrl
->cntlid
);
368 cancel_delayed_work_sync(&ctrl
->ka_work
);
371 static struct nvmet_ns
*__nvmet_find_namespace(struct nvmet_ctrl
*ctrl
,
376 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
377 if (ns
->nsid
== le32_to_cpu(nsid
))
384 struct nvmet_ns
*nvmet_find_namespace(struct nvmet_ctrl
*ctrl
, __le32 nsid
)
389 ns
= __nvmet_find_namespace(ctrl
, nsid
);
391 percpu_ref_get(&ns
->ref
);
397 static void nvmet_destroy_namespace(struct percpu_ref
*ref
)
399 struct nvmet_ns
*ns
= container_of(ref
, struct nvmet_ns
, ref
);
401 complete(&ns
->disable_done
);
404 void nvmet_put_namespace(struct nvmet_ns
*ns
)
406 percpu_ref_put(&ns
->ref
);
409 static void nvmet_ns_dev_disable(struct nvmet_ns
*ns
)
411 nvmet_bdev_ns_disable(ns
);
412 nvmet_file_ns_disable(ns
);
415 static int nvmet_p2pmem_ns_enable(struct nvmet_ns
*ns
)
418 struct pci_dev
*p2p_dev
;
424 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
428 if (!blk_queue_pci_p2pdma(ns
->bdev
->bd_queue
)) {
429 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
435 ret
= pci_p2pdma_distance(ns
->p2p_dev
, nvmet_ns_dev(ns
), true);
440 * Right now we just check that there is p2pmem available so
441 * we can report an error to the user right away if there
442 * is not. We'll find the actual device to use once we
443 * setup the controller when the port's device is available.
446 p2p_dev
= pci_p2pmem_find(nvmet_ns_dev(ns
));
448 pr_err("no peer-to-peer memory is available for %s\n",
453 pci_dev_put(p2p_dev
);
460 * Note: ctrl->subsys->lock should be held when calling this function
462 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl
*ctrl
,
465 struct device
*clients
[2];
466 struct pci_dev
*p2p_dev
;
469 if (!ctrl
->p2p_client
|| !ns
->use_p2pmem
)
473 ret
= pci_p2pdma_distance(ns
->p2p_dev
, ctrl
->p2p_client
, true);
477 p2p_dev
= pci_dev_get(ns
->p2p_dev
);
479 clients
[0] = ctrl
->p2p_client
;
480 clients
[1] = nvmet_ns_dev(ns
);
482 p2p_dev
= pci_p2pmem_find_many(clients
, ARRAY_SIZE(clients
));
484 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
485 dev_name(ctrl
->p2p_client
), ns
->device_path
);
490 ret
= radix_tree_insert(&ctrl
->p2p_ns_map
, ns
->nsid
, p2p_dev
);
492 pci_dev_put(p2p_dev
);
494 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev
),
498 int nvmet_ns_enable(struct nvmet_ns
*ns
)
500 struct nvmet_subsys
*subsys
= ns
->subsys
;
501 struct nvmet_ctrl
*ctrl
;
504 mutex_lock(&subsys
->lock
);
506 if (subsys
->nr_namespaces
== NVMET_MAX_NAMESPACES
)
512 ret
= nvmet_bdev_ns_enable(ns
);
514 ret
= nvmet_file_ns_enable(ns
);
518 ret
= nvmet_p2pmem_ns_enable(ns
);
522 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
523 nvmet_p2pmem_ns_add_p2p(ctrl
, ns
);
525 ret
= percpu_ref_init(&ns
->ref
, nvmet_destroy_namespace
,
530 if (ns
->nsid
> subsys
->max_nsid
)
531 subsys
->max_nsid
= ns
->nsid
;
534 * The namespaces list needs to be sorted to simplify the implementation
535 * of the Identify Namepace List subcommand.
537 if (list_empty(&subsys
->namespaces
)) {
538 list_add_tail_rcu(&ns
->dev_link
, &subsys
->namespaces
);
540 struct nvmet_ns
*old
;
542 list_for_each_entry_rcu(old
, &subsys
->namespaces
, dev_link
) {
543 BUG_ON(ns
->nsid
== old
->nsid
);
544 if (ns
->nsid
< old
->nsid
)
548 list_add_tail_rcu(&ns
->dev_link
, &old
->dev_link
);
550 subsys
->nr_namespaces
++;
552 nvmet_ns_changed(subsys
, ns
->nsid
);
556 mutex_unlock(&subsys
->lock
);
559 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
560 pci_dev_put(radix_tree_delete(&ctrl
->p2p_ns_map
, ns
->nsid
));
562 nvmet_ns_dev_disable(ns
);
566 void nvmet_ns_disable(struct nvmet_ns
*ns
)
568 struct nvmet_subsys
*subsys
= ns
->subsys
;
569 struct nvmet_ctrl
*ctrl
;
571 mutex_lock(&subsys
->lock
);
576 list_del_rcu(&ns
->dev_link
);
577 if (ns
->nsid
== subsys
->max_nsid
)
578 subsys
->max_nsid
= nvmet_max_nsid(subsys
);
580 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
581 pci_dev_put(radix_tree_delete(&ctrl
->p2p_ns_map
, ns
->nsid
));
583 mutex_unlock(&subsys
->lock
);
586 * Now that we removed the namespaces from the lookup list, we
587 * can kill the per_cpu ref and wait for any remaining references
588 * to be dropped, as well as a RCU grace period for anyone only
589 * using the namepace under rcu_read_lock(). Note that we can't
590 * use call_rcu here as we need to ensure the namespaces have
591 * been fully destroyed before unloading the module.
593 percpu_ref_kill(&ns
->ref
);
595 wait_for_completion(&ns
->disable_done
);
596 percpu_ref_exit(&ns
->ref
);
598 mutex_lock(&subsys
->lock
);
600 subsys
->nr_namespaces
--;
601 nvmet_ns_changed(subsys
, ns
->nsid
);
602 nvmet_ns_dev_disable(ns
);
604 mutex_unlock(&subsys
->lock
);
607 void nvmet_ns_free(struct nvmet_ns
*ns
)
609 nvmet_ns_disable(ns
);
611 down_write(&nvmet_ana_sem
);
612 nvmet_ana_group_enabled
[ns
->anagrpid
]--;
613 up_write(&nvmet_ana_sem
);
615 kfree(ns
->device_path
);
619 struct nvmet_ns
*nvmet_ns_alloc(struct nvmet_subsys
*subsys
, u32 nsid
)
623 ns
= kzalloc(sizeof(*ns
), GFP_KERNEL
);
627 INIT_LIST_HEAD(&ns
->dev_link
);
628 init_completion(&ns
->disable_done
);
633 down_write(&nvmet_ana_sem
);
634 ns
->anagrpid
= NVMET_DEFAULT_ANA_GRPID
;
635 nvmet_ana_group_enabled
[ns
->anagrpid
]++;
636 up_write(&nvmet_ana_sem
);
639 ns
->buffered_io
= false;
644 static void nvmet_update_sq_head(struct nvmet_req
*req
)
647 u32 old_sqhd
, new_sqhd
;
650 old_sqhd
= req
->sq
->sqhd
;
651 new_sqhd
= (old_sqhd
+ 1) % req
->sq
->size
;
652 } while (cmpxchg(&req
->sq
->sqhd
, old_sqhd
, new_sqhd
) !=
655 req
->rsp
->sq_head
= cpu_to_le16(req
->sq
->sqhd
& 0x0000FFFF);
658 static void nvmet_set_error(struct nvmet_req
*req
, u16 status
)
660 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
661 struct nvme_error_slot
*new_error_slot
;
664 req
->rsp
->status
= cpu_to_le16(status
<< 1);
666 if (!ctrl
|| req
->error_loc
== NVMET_NO_ERROR_LOC
)
669 spin_lock_irqsave(&ctrl
->error_lock
, flags
);
672 &ctrl
->slots
[ctrl
->err_counter
% NVMET_ERROR_LOG_SLOTS
];
674 new_error_slot
->error_count
= cpu_to_le64(ctrl
->err_counter
);
675 new_error_slot
->sqid
= cpu_to_le16(req
->sq
->qid
);
676 new_error_slot
->cmdid
= cpu_to_le16(req
->cmd
->common
.command_id
);
677 new_error_slot
->status_field
= cpu_to_le16(status
<< 1);
678 new_error_slot
->param_error_location
= cpu_to_le16(req
->error_loc
);
679 new_error_slot
->lba
= cpu_to_le64(req
->error_slba
);
680 new_error_slot
->nsid
= req
->cmd
->common
.nsid
;
681 spin_unlock_irqrestore(&ctrl
->error_lock
, flags
);
683 /* set the more bit for this request */
684 req
->rsp
->status
|= cpu_to_le16(1 << 14);
687 static void __nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
689 if (!req
->sq
->sqhd_disabled
)
690 nvmet_update_sq_head(req
);
691 req
->rsp
->sq_id
= cpu_to_le16(req
->sq
->qid
);
692 req
->rsp
->command_id
= req
->cmd
->common
.command_id
;
694 if (unlikely(status
))
695 nvmet_set_error(req
, status
);
697 nvmet_put_namespace(req
->ns
);
698 req
->ops
->queue_response(req
);
701 void nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
703 __nvmet_req_complete(req
, status
);
704 percpu_ref_put(&req
->sq
->ref
);
706 EXPORT_SYMBOL_GPL(nvmet_req_complete
);
708 void nvmet_cq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_cq
*cq
,
717 void nvmet_sq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_sq
*sq
,
727 static void nvmet_confirm_sq(struct percpu_ref
*ref
)
729 struct nvmet_sq
*sq
= container_of(ref
, struct nvmet_sq
, ref
);
731 complete(&sq
->confirm_done
);
734 void nvmet_sq_destroy(struct nvmet_sq
*sq
)
737 * If this is the admin queue, complete all AERs so that our
738 * queue doesn't have outstanding requests on it.
740 if (sq
->ctrl
&& sq
->ctrl
->sqs
&& sq
->ctrl
->sqs
[0] == sq
)
741 nvmet_async_events_free(sq
->ctrl
);
742 percpu_ref_kill_and_confirm(&sq
->ref
, nvmet_confirm_sq
);
743 wait_for_completion(&sq
->confirm_done
);
744 wait_for_completion(&sq
->free_done
);
745 percpu_ref_exit(&sq
->ref
);
748 nvmet_ctrl_put(sq
->ctrl
);
749 sq
->ctrl
= NULL
; /* allows reusing the queue later */
752 EXPORT_SYMBOL_GPL(nvmet_sq_destroy
);
754 static void nvmet_sq_free(struct percpu_ref
*ref
)
756 struct nvmet_sq
*sq
= container_of(ref
, struct nvmet_sq
, ref
);
758 complete(&sq
->free_done
);
761 int nvmet_sq_init(struct nvmet_sq
*sq
)
765 ret
= percpu_ref_init(&sq
->ref
, nvmet_sq_free
, 0, GFP_KERNEL
);
767 pr_err("percpu_ref init failed!\n");
770 init_completion(&sq
->free_done
);
771 init_completion(&sq
->confirm_done
);
775 EXPORT_SYMBOL_GPL(nvmet_sq_init
);
777 static inline u16
nvmet_check_ana_state(struct nvmet_port
*port
,
780 enum nvme_ana_state state
= port
->ana_state
[ns
->anagrpid
];
782 if (unlikely(state
== NVME_ANA_INACCESSIBLE
))
783 return NVME_SC_ANA_INACCESSIBLE
;
784 if (unlikely(state
== NVME_ANA_PERSISTENT_LOSS
))
785 return NVME_SC_ANA_PERSISTENT_LOSS
;
786 if (unlikely(state
== NVME_ANA_CHANGE
))
787 return NVME_SC_ANA_TRANSITION
;
791 static inline u16
nvmet_io_cmd_check_access(struct nvmet_req
*req
)
793 if (unlikely(req
->ns
->readonly
)) {
794 switch (req
->cmd
->common
.opcode
) {
799 return NVME_SC_NS_WRITE_PROTECTED
;
806 static u16
nvmet_parse_io_cmd(struct nvmet_req
*req
)
808 struct nvme_command
*cmd
= req
->cmd
;
811 ret
= nvmet_check_ctrl_status(req
, cmd
);
815 req
->ns
= nvmet_find_namespace(req
->sq
->ctrl
, cmd
->rw
.nsid
);
816 if (unlikely(!req
->ns
)) {
817 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
818 return NVME_SC_INVALID_NS
| NVME_SC_DNR
;
820 ret
= nvmet_check_ana_state(req
->port
, req
->ns
);
822 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
825 ret
= nvmet_io_cmd_check_access(req
);
827 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
832 return nvmet_file_parse_io_cmd(req
);
834 return nvmet_bdev_parse_io_cmd(req
);
837 bool nvmet_req_init(struct nvmet_req
*req
, struct nvmet_cq
*cq
,
838 struct nvmet_sq
*sq
, const struct nvmet_fabrics_ops
*ops
)
840 u8 flags
= req
->cmd
->common
.flags
;
848 req
->transfer_len
= 0;
849 req
->rsp
->status
= 0;
850 req
->rsp
->sq_head
= 0;
852 req
->error_loc
= NVMET_NO_ERROR_LOC
;
855 /* no support for fused commands yet */
856 if (unlikely(flags
& (NVME_CMD_FUSE_FIRST
| NVME_CMD_FUSE_SECOND
))) {
857 req
->error_loc
= offsetof(struct nvme_common_command
, flags
);
858 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
863 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
864 * contains an address of a single contiguous physical buffer that is
867 if (unlikely((flags
& NVME_CMD_SGL_ALL
) != NVME_CMD_SGL_METABUF
)) {
868 req
->error_loc
= offsetof(struct nvme_common_command
, flags
);
869 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
873 if (unlikely(!req
->sq
->ctrl
))
874 /* will return an error for any Non-connect command: */
875 status
= nvmet_parse_connect_cmd(req
);
876 else if (likely(req
->sq
->qid
!= 0))
877 status
= nvmet_parse_io_cmd(req
);
878 else if (req
->cmd
->common
.opcode
== nvme_fabrics_command
)
879 status
= nvmet_parse_fabrics_cmd(req
);
880 else if (req
->sq
->ctrl
->subsys
->type
== NVME_NQN_DISC
)
881 status
= nvmet_parse_discovery_cmd(req
);
883 status
= nvmet_parse_admin_cmd(req
);
888 if (unlikely(!percpu_ref_tryget_live(&sq
->ref
))) {
889 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
894 sq
->ctrl
->cmd_seen
= true;
899 __nvmet_req_complete(req
, status
);
902 EXPORT_SYMBOL_GPL(nvmet_req_init
);
904 void nvmet_req_uninit(struct nvmet_req
*req
)
906 percpu_ref_put(&req
->sq
->ref
);
908 nvmet_put_namespace(req
->ns
);
910 EXPORT_SYMBOL_GPL(nvmet_req_uninit
);
912 void nvmet_req_execute(struct nvmet_req
*req
)
914 if (unlikely(req
->data_len
!= req
->transfer_len
)) {
915 req
->error_loc
= offsetof(struct nvme_common_command
, dptr
);
916 nvmet_req_complete(req
, NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
);
920 EXPORT_SYMBOL_GPL(nvmet_req_execute
);
922 int nvmet_req_alloc_sgl(struct nvmet_req
*req
)
924 struct pci_dev
*p2p_dev
= NULL
;
926 if (IS_ENABLED(CONFIG_PCI_P2PDMA
)) {
927 if (req
->sq
->ctrl
&& req
->ns
)
928 p2p_dev
= radix_tree_lookup(&req
->sq
->ctrl
->p2p_ns_map
,
932 if (req
->sq
->qid
&& p2p_dev
) {
933 req
->sg
= pci_p2pmem_alloc_sgl(p2p_dev
, &req
->sg_cnt
,
936 req
->p2p_dev
= p2p_dev
;
942 * If no P2P memory was available we fallback to using
947 req
->sg
= sgl_alloc(req
->transfer_len
, GFP_KERNEL
, &req
->sg_cnt
);
953 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl
);
955 void nvmet_req_free_sgl(struct nvmet_req
*req
)
958 pci_p2pmem_free_sgl(req
->p2p_dev
, req
->sg
);
965 EXPORT_SYMBOL_GPL(nvmet_req_free_sgl
);
967 static inline bool nvmet_cc_en(u32 cc
)
969 return (cc
>> NVME_CC_EN_SHIFT
) & 0x1;
972 static inline u8
nvmet_cc_css(u32 cc
)
974 return (cc
>> NVME_CC_CSS_SHIFT
) & 0x7;
977 static inline u8
nvmet_cc_mps(u32 cc
)
979 return (cc
>> NVME_CC_MPS_SHIFT
) & 0xf;
982 static inline u8
nvmet_cc_ams(u32 cc
)
984 return (cc
>> NVME_CC_AMS_SHIFT
) & 0x7;
987 static inline u8
nvmet_cc_shn(u32 cc
)
989 return (cc
>> NVME_CC_SHN_SHIFT
) & 0x3;
992 static inline u8
nvmet_cc_iosqes(u32 cc
)
994 return (cc
>> NVME_CC_IOSQES_SHIFT
) & 0xf;
997 static inline u8
nvmet_cc_iocqes(u32 cc
)
999 return (cc
>> NVME_CC_IOCQES_SHIFT
) & 0xf;
1002 static void nvmet_start_ctrl(struct nvmet_ctrl
*ctrl
)
1004 lockdep_assert_held(&ctrl
->lock
);
1006 if (nvmet_cc_iosqes(ctrl
->cc
) != NVME_NVM_IOSQES
||
1007 nvmet_cc_iocqes(ctrl
->cc
) != NVME_NVM_IOCQES
||
1008 nvmet_cc_mps(ctrl
->cc
) != 0 ||
1009 nvmet_cc_ams(ctrl
->cc
) != 0 ||
1010 nvmet_cc_css(ctrl
->cc
) != 0) {
1011 ctrl
->csts
= NVME_CSTS_CFS
;
1015 ctrl
->csts
= NVME_CSTS_RDY
;
1018 * Controllers that are not yet enabled should not really enforce the
1019 * keep alive timeout, but we still want to track a timeout and cleanup
1020 * in case a host died before it enabled the controller. Hence, simply
1021 * reset the keep alive timer when the controller is enabled.
1023 mod_delayed_work(system_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
1026 static void nvmet_clear_ctrl(struct nvmet_ctrl
*ctrl
)
1028 lockdep_assert_held(&ctrl
->lock
);
1030 /* XXX: tear down queues? */
1031 ctrl
->csts
&= ~NVME_CSTS_RDY
;
1035 void nvmet_update_cc(struct nvmet_ctrl
*ctrl
, u32
new)
1039 mutex_lock(&ctrl
->lock
);
1043 if (nvmet_cc_en(new) && !nvmet_cc_en(old
))
1044 nvmet_start_ctrl(ctrl
);
1045 if (!nvmet_cc_en(new) && nvmet_cc_en(old
))
1046 nvmet_clear_ctrl(ctrl
);
1047 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old
)) {
1048 nvmet_clear_ctrl(ctrl
);
1049 ctrl
->csts
|= NVME_CSTS_SHST_CMPLT
;
1051 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old
))
1052 ctrl
->csts
&= ~NVME_CSTS_SHST_CMPLT
;
1053 mutex_unlock(&ctrl
->lock
);
1056 static void nvmet_init_cap(struct nvmet_ctrl
*ctrl
)
1058 /* command sets supported: NVMe command set: */
1059 ctrl
->cap
= (1ULL << 37);
1060 /* CC.EN timeout in 500msec units: */
1061 ctrl
->cap
|= (15ULL << 24);
1062 /* maximum queue entries supported: */
1063 ctrl
->cap
|= NVMET_QUEUE_SIZE
- 1;
1066 u16
nvmet_ctrl_find_get(const char *subsysnqn
, const char *hostnqn
, u16 cntlid
,
1067 struct nvmet_req
*req
, struct nvmet_ctrl
**ret
)
1069 struct nvmet_subsys
*subsys
;
1070 struct nvmet_ctrl
*ctrl
;
1073 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
1075 pr_warn("connect request for invalid subsystem %s!\n",
1077 req
->rsp
->result
.u32
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
1078 return NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1081 mutex_lock(&subsys
->lock
);
1082 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
1083 if (ctrl
->cntlid
== cntlid
) {
1084 if (strncmp(hostnqn
, ctrl
->hostnqn
, NVMF_NQN_SIZE
)) {
1085 pr_warn("hostnqn mismatch.\n");
1088 if (!kref_get_unless_zero(&ctrl
->ref
))
1096 pr_warn("could not find controller %d for subsys %s / host %s\n",
1097 cntlid
, subsysnqn
, hostnqn
);
1098 req
->rsp
->result
.u32
= IPO_IATTR_CONNECT_DATA(cntlid
);
1099 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1102 mutex_unlock(&subsys
->lock
);
1103 nvmet_subsys_put(subsys
);
1107 u16
nvmet_check_ctrl_status(struct nvmet_req
*req
, struct nvme_command
*cmd
)
1109 if (unlikely(!(req
->sq
->ctrl
->cc
& NVME_CC_ENABLE
))) {
1110 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1111 cmd
->common
.opcode
, req
->sq
->qid
);
1112 return NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
1115 if (unlikely(!(req
->sq
->ctrl
->csts
& NVME_CSTS_RDY
))) {
1116 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1117 cmd
->common
.opcode
, req
->sq
->qid
);
1118 return NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
1123 bool nvmet_host_allowed(struct nvmet_subsys
*subsys
, const char *hostnqn
)
1125 struct nvmet_host_link
*p
;
1127 lockdep_assert_held(&nvmet_config_sem
);
1129 if (subsys
->allow_any_host
)
1132 if (subsys
->type
== NVME_NQN_DISC
) /* allow all access to disc subsys */
1135 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
1136 if (!strcmp(nvmet_host_name(p
->host
), hostnqn
))
1144 * Note: ctrl->subsys->lock should be held when calling this function
1146 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl
*ctrl
,
1147 struct nvmet_req
*req
)
1149 struct nvmet_ns
*ns
;
1151 if (!req
->p2p_client
)
1154 ctrl
->p2p_client
= get_device(req
->p2p_client
);
1156 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
)
1157 nvmet_p2pmem_ns_add_p2p(ctrl
, ns
);
1161 * Note: ctrl->subsys->lock should be held when calling this function
1163 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl
*ctrl
)
1165 struct radix_tree_iter iter
;
1168 radix_tree_for_each_slot(slot
, &ctrl
->p2p_ns_map
, &iter
, 0)
1169 pci_dev_put(radix_tree_deref_slot(slot
));
1171 put_device(ctrl
->p2p_client
);
1174 u16
nvmet_alloc_ctrl(const char *subsysnqn
, const char *hostnqn
,
1175 struct nvmet_req
*req
, u32 kato
, struct nvmet_ctrl
**ctrlp
)
1177 struct nvmet_subsys
*subsys
;
1178 struct nvmet_ctrl
*ctrl
;
1182 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1183 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
1185 pr_warn("connect request for invalid subsystem %s!\n",
1187 req
->rsp
->result
.u32
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
1191 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
1192 down_read(&nvmet_config_sem
);
1193 if (!nvmet_host_allowed(subsys
, hostnqn
)) {
1194 pr_info("connect by host %s for subsystem %s not allowed\n",
1195 hostnqn
, subsysnqn
);
1196 req
->rsp
->result
.u32
= IPO_IATTR_CONNECT_DATA(hostnqn
);
1197 up_read(&nvmet_config_sem
);
1198 status
= NVME_SC_CONNECT_INVALID_HOST
| NVME_SC_DNR
;
1199 goto out_put_subsystem
;
1201 up_read(&nvmet_config_sem
);
1203 status
= NVME_SC_INTERNAL
;
1204 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
1206 goto out_put_subsystem
;
1207 mutex_init(&ctrl
->lock
);
1209 nvmet_init_cap(ctrl
);
1211 ctrl
->port
= req
->port
;
1213 INIT_WORK(&ctrl
->async_event_work
, nvmet_async_event_work
);
1214 INIT_LIST_HEAD(&ctrl
->async_events
);
1215 INIT_RADIX_TREE(&ctrl
->p2p_ns_map
, GFP_KERNEL
);
1217 memcpy(ctrl
->subsysnqn
, subsysnqn
, NVMF_NQN_SIZE
);
1218 memcpy(ctrl
->hostnqn
, hostnqn
, NVMF_NQN_SIZE
);
1220 kref_init(&ctrl
->ref
);
1221 ctrl
->subsys
= subsys
;
1222 WRITE_ONCE(ctrl
->aen_enabled
, NVMET_AEN_CFG_OPTIONAL
);
1224 ctrl
->changed_ns_list
= kmalloc_array(NVME_MAX_CHANGED_NAMESPACES
,
1225 sizeof(__le32
), GFP_KERNEL
);
1226 if (!ctrl
->changed_ns_list
)
1229 ctrl
->cqs
= kcalloc(subsys
->max_qid
+ 1,
1230 sizeof(struct nvmet_cq
*),
1233 goto out_free_changed_ns_list
;
1235 ctrl
->sqs
= kcalloc(subsys
->max_qid
+ 1,
1236 sizeof(struct nvmet_sq
*),
1241 ret
= ida_simple_get(&cntlid_ida
,
1242 NVME_CNTLID_MIN
, NVME_CNTLID_MAX
,
1245 status
= NVME_SC_CONNECT_CTRL_BUSY
| NVME_SC_DNR
;
1250 ctrl
->ops
= req
->ops
;
1253 * Discovery controllers may use some arbitrary high value
1254 * in order to cleanup stale discovery sessions
1256 if ((ctrl
->subsys
->type
== NVME_NQN_DISC
) && !kato
)
1257 kato
= NVMET_DISC_KATO_MS
;
1259 /* keep-alive timeout in seconds */
1260 ctrl
->kato
= DIV_ROUND_UP(kato
, 1000);
1262 ctrl
->err_counter
= 0;
1263 spin_lock_init(&ctrl
->error_lock
);
1265 nvmet_start_keep_alive_timer(ctrl
);
1267 mutex_lock(&subsys
->lock
);
1268 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
1269 nvmet_setup_p2p_ns_map(ctrl
, req
);
1270 mutex_unlock(&subsys
->lock
);
1279 out_free_changed_ns_list
:
1280 kfree(ctrl
->changed_ns_list
);
1284 nvmet_subsys_put(subsys
);
1289 static void nvmet_ctrl_free(struct kref
*ref
)
1291 struct nvmet_ctrl
*ctrl
= container_of(ref
, struct nvmet_ctrl
, ref
);
1292 struct nvmet_subsys
*subsys
= ctrl
->subsys
;
1294 mutex_lock(&subsys
->lock
);
1295 nvmet_release_p2p_ns_map(ctrl
);
1296 list_del(&ctrl
->subsys_entry
);
1297 mutex_unlock(&subsys
->lock
);
1299 nvmet_stop_keep_alive_timer(ctrl
);
1301 flush_work(&ctrl
->async_event_work
);
1302 cancel_work_sync(&ctrl
->fatal_err_work
);
1304 ida_simple_remove(&cntlid_ida
, ctrl
->cntlid
);
1308 kfree(ctrl
->changed_ns_list
);
1311 nvmet_subsys_put(subsys
);
1314 void nvmet_ctrl_put(struct nvmet_ctrl
*ctrl
)
1316 kref_put(&ctrl
->ref
, nvmet_ctrl_free
);
1319 static void nvmet_fatal_error_handler(struct work_struct
*work
)
1321 struct nvmet_ctrl
*ctrl
=
1322 container_of(work
, struct nvmet_ctrl
, fatal_err_work
);
1324 pr_err("ctrl %d fatal error occurred!\n", ctrl
->cntlid
);
1325 ctrl
->ops
->delete_ctrl(ctrl
);
1328 void nvmet_ctrl_fatal_error(struct nvmet_ctrl
*ctrl
)
1330 mutex_lock(&ctrl
->lock
);
1331 if (!(ctrl
->csts
& NVME_CSTS_CFS
)) {
1332 ctrl
->csts
|= NVME_CSTS_CFS
;
1333 INIT_WORK(&ctrl
->fatal_err_work
, nvmet_fatal_error_handler
);
1334 schedule_work(&ctrl
->fatal_err_work
);
1336 mutex_unlock(&ctrl
->lock
);
1338 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error
);
1340 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
1341 const char *subsysnqn
)
1343 struct nvmet_subsys_link
*p
;
1348 if (!strcmp(NVME_DISC_SUBSYS_NAME
, subsysnqn
)) {
1349 if (!kref_get_unless_zero(&nvmet_disc_subsys
->ref
))
1351 return nvmet_disc_subsys
;
1354 down_read(&nvmet_config_sem
);
1355 list_for_each_entry(p
, &port
->subsystems
, entry
) {
1356 if (!strncmp(p
->subsys
->subsysnqn
, subsysnqn
,
1358 if (!kref_get_unless_zero(&p
->subsys
->ref
))
1360 up_read(&nvmet_config_sem
);
1364 up_read(&nvmet_config_sem
);
1368 struct nvmet_subsys
*nvmet_subsys_alloc(const char *subsysnqn
,
1369 enum nvme_subsys_type type
)
1371 struct nvmet_subsys
*subsys
;
1373 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
1377 subsys
->ver
= NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
1378 /* generate a random serial number as our controllers are ephemeral: */
1379 get_random_bytes(&subsys
->serial
, sizeof(subsys
->serial
));
1383 subsys
->max_qid
= NVMET_NR_QUEUES
;
1386 subsys
->max_qid
= 0;
1389 pr_err("%s: Unknown Subsystem type - %d\n", __func__
, type
);
1393 subsys
->type
= type
;
1394 subsys
->subsysnqn
= kstrndup(subsysnqn
, NVMF_NQN_SIZE
,
1396 if (!subsys
->subsysnqn
) {
1401 kref_init(&subsys
->ref
);
1403 mutex_init(&subsys
->lock
);
1404 INIT_LIST_HEAD(&subsys
->namespaces
);
1405 INIT_LIST_HEAD(&subsys
->ctrls
);
1406 INIT_LIST_HEAD(&subsys
->hosts
);
1411 static void nvmet_subsys_free(struct kref
*ref
)
1413 struct nvmet_subsys
*subsys
=
1414 container_of(ref
, struct nvmet_subsys
, ref
);
1416 WARN_ON_ONCE(!list_empty(&subsys
->namespaces
));
1418 kfree(subsys
->subsysnqn
);
1422 void nvmet_subsys_del_ctrls(struct nvmet_subsys
*subsys
)
1424 struct nvmet_ctrl
*ctrl
;
1426 mutex_lock(&subsys
->lock
);
1427 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
1428 ctrl
->ops
->delete_ctrl(ctrl
);
1429 mutex_unlock(&subsys
->lock
);
1432 void nvmet_subsys_put(struct nvmet_subsys
*subsys
)
1434 kref_put(&subsys
->ref
, nvmet_subsys_free
);
1437 static int __init
nvmet_init(void)
1441 nvmet_ana_group_enabled
[NVMET_DEFAULT_ANA_GRPID
] = 1;
1443 buffered_io_wq
= alloc_workqueue("nvmet-buffered-io-wq",
1445 if (!buffered_io_wq
) {
1450 error
= nvmet_init_discovery();
1452 goto out_free_work_queue
;
1454 error
= nvmet_init_configfs();
1456 goto out_exit_discovery
;
1460 nvmet_exit_discovery();
1461 out_free_work_queue
:
1462 destroy_workqueue(buffered_io_wq
);
1467 static void __exit
nvmet_exit(void)
1469 nvmet_exit_configfs();
1470 nvmet_exit_discovery();
1471 ida_destroy(&cntlid_ida
);
1472 destroy_workqueue(buffered_io_wq
);
1474 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry
) != 1024);
1475 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr
) != 1024);
1478 module_init(nvmet_init
);
1479 module_exit(nvmet_exit
);
1481 MODULE_LICENSE("GPL v2");