1 // SPDX-License-Identifier: GPL-2.0
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/hdreg.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/backing-dev.h>
15 #include <linux/list_sort.h>
16 #include <linux/slab.h>
17 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/nvme_ioctl.h>
21 #include <linux/t10-pi.h>
22 #include <linux/pm_qos.h>
23 #include <asm/unaligned.h>
28 #define CREATE_TRACE_POINTS
31 #define NVME_MINORS (1U << MINORBITS)
33 unsigned int admin_timeout
= 60;
34 module_param(admin_timeout
, uint
, 0644);
35 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
36 EXPORT_SYMBOL_GPL(admin_timeout
);
38 unsigned int nvme_io_timeout
= 30;
39 module_param_named(io_timeout
, nvme_io_timeout
, uint
, 0644);
40 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
41 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
43 static unsigned char shutdown_timeout
= 5;
44 module_param(shutdown_timeout
, byte
, 0644);
45 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
47 static u8 nvme_max_retries
= 5;
48 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
49 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
51 static unsigned long default_ps_max_latency_us
= 100000;
52 module_param(default_ps_max_latency_us
, ulong
, 0644);
53 MODULE_PARM_DESC(default_ps_max_latency_us
,
54 "max power saving latency for new devices; use PM QOS to change per device");
56 static bool force_apst
;
57 module_param(force_apst
, bool, 0644);
58 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
61 module_param(streams
, bool, 0644);
62 MODULE_PARM_DESC(streams
, "turn on support for Streams write directives");
65 * nvme_wq - hosts nvme related works that are not reset or delete
66 * nvme_reset_wq - hosts nvme reset works
67 * nvme_delete_wq - hosts nvme delete works
69 * nvme_wq will host works such are scan, aen handling, fw activation,
70 * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
71 * runs reset works which also flush works hosted on nvme_wq for
72 * serialization purposes. nvme_delete_wq host controller deletion
73 * works which flush reset works for serialization.
75 struct workqueue_struct
*nvme_wq
;
76 EXPORT_SYMBOL_GPL(nvme_wq
);
78 struct workqueue_struct
*nvme_reset_wq
;
79 EXPORT_SYMBOL_GPL(nvme_reset_wq
);
81 struct workqueue_struct
*nvme_delete_wq
;
82 EXPORT_SYMBOL_GPL(nvme_delete_wq
);
84 static LIST_HEAD(nvme_subsystems
);
85 static DEFINE_MUTEX(nvme_subsystems_lock
);
87 static DEFINE_IDA(nvme_instance_ida
);
88 static dev_t nvme_chr_devt
;
89 static struct class *nvme_class
;
90 static struct class *nvme_subsys_class
;
92 static int nvme_revalidate_disk(struct gendisk
*disk
);
93 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
);
94 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
97 static void nvme_set_queue_dying(struct nvme_ns
*ns
)
100 * Revalidating a dead namespace sets capacity to 0. This will end
101 * buffered writers dirtying pages that can't be synced.
103 if (!ns
->disk
|| test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
105 blk_set_queue_dying(ns
->queue
);
106 /* Forcibly unquiesce queues to avoid blocking dispatch */
107 blk_mq_unquiesce_queue(ns
->queue
);
109 * Revalidate after unblocking dispatchers that may be holding bd_butex
111 revalidate_disk(ns
->disk
);
114 static void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
117 * Only new queue scan work when admin and IO queues are both alive
119 if (ctrl
->state
== NVME_CTRL_LIVE
&& ctrl
->tagset
)
120 queue_work(nvme_wq
, &ctrl
->scan_work
);
124 * Use this function to proceed with scheduling reset_work for a controller
125 * that had previously been set to the resetting state. This is intended for
126 * code paths that can't be interrupted by other reset attempts. A hot removal
127 * may prevent this from succeeding.
129 int nvme_try_sched_reset(struct nvme_ctrl
*ctrl
)
131 if (ctrl
->state
!= NVME_CTRL_RESETTING
)
133 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
137 EXPORT_SYMBOL_GPL(nvme_try_sched_reset
);
139 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
141 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
143 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
147 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
149 int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
153 ret
= nvme_reset_ctrl(ctrl
);
155 flush_work(&ctrl
->reset_work
);
156 if (ctrl
->state
!= NVME_CTRL_LIVE
)
162 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync
);
164 static void nvme_do_delete_ctrl(struct nvme_ctrl
*ctrl
)
166 dev_info(ctrl
->device
,
167 "Removing ctrl: NQN \"%s\"\n", ctrl
->opts
->subsysnqn
);
169 flush_work(&ctrl
->reset_work
);
170 nvme_stop_ctrl(ctrl
);
171 nvme_remove_namespaces(ctrl
);
172 ctrl
->ops
->delete_ctrl(ctrl
);
173 nvme_uninit_ctrl(ctrl
);
177 static void nvme_delete_ctrl_work(struct work_struct
*work
)
179 struct nvme_ctrl
*ctrl
=
180 container_of(work
, struct nvme_ctrl
, delete_work
);
182 nvme_do_delete_ctrl(ctrl
);
185 int nvme_delete_ctrl(struct nvme_ctrl
*ctrl
)
187 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
189 if (!queue_work(nvme_delete_wq
, &ctrl
->delete_work
))
193 EXPORT_SYMBOL_GPL(nvme_delete_ctrl
);
195 static int nvme_delete_ctrl_sync(struct nvme_ctrl
*ctrl
)
200 * Keep a reference until nvme_do_delete_ctrl() complete,
201 * since ->delete_ctrl can free the controller.
204 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
207 nvme_do_delete_ctrl(ctrl
);
212 static inline bool nvme_ns_has_pi(struct nvme_ns
*ns
)
214 return ns
->pi_type
&& ns
->ms
== sizeof(struct t10_pi_tuple
);
217 static blk_status_t
nvme_error_status(u16 status
)
219 switch (status
& 0x7ff) {
220 case NVME_SC_SUCCESS
:
222 case NVME_SC_CAP_EXCEEDED
:
223 return BLK_STS_NOSPC
;
224 case NVME_SC_LBA_RANGE
:
225 case NVME_SC_CMD_INTERRUPTED
:
226 case NVME_SC_NS_NOT_READY
:
227 return BLK_STS_TARGET
;
228 case NVME_SC_BAD_ATTRIBUTES
:
229 case NVME_SC_ONCS_NOT_SUPPORTED
:
230 case NVME_SC_INVALID_OPCODE
:
231 case NVME_SC_INVALID_FIELD
:
232 case NVME_SC_INVALID_NS
:
233 return BLK_STS_NOTSUPP
;
234 case NVME_SC_WRITE_FAULT
:
235 case NVME_SC_READ_ERROR
:
236 case NVME_SC_UNWRITTEN_BLOCK
:
237 case NVME_SC_ACCESS_DENIED
:
238 case NVME_SC_READ_ONLY
:
239 case NVME_SC_COMPARE_FAILED
:
240 return BLK_STS_MEDIUM
;
241 case NVME_SC_GUARD_CHECK
:
242 case NVME_SC_APPTAG_CHECK
:
243 case NVME_SC_REFTAG_CHECK
:
244 case NVME_SC_INVALID_PI
:
245 return BLK_STS_PROTECTION
;
246 case NVME_SC_RESERVATION_CONFLICT
:
247 return BLK_STS_NEXUS
;
248 case NVME_SC_HOST_PATH_ERROR
:
249 return BLK_STS_TRANSPORT
;
251 return BLK_STS_IOERR
;
255 static inline bool nvme_req_needs_retry(struct request
*req
)
257 if (blk_noretry_request(req
))
259 if (nvme_req(req
)->status
& NVME_SC_DNR
)
261 if (nvme_req(req
)->retries
>= nvme_max_retries
)
266 static void nvme_retry_req(struct request
*req
)
268 struct nvme_ns
*ns
= req
->q
->queuedata
;
269 unsigned long delay
= 0;
272 /* The mask and shift result must be <= 3 */
273 crd
= (nvme_req(req
)->status
& NVME_SC_CRD
) >> 11;
275 delay
= ns
->ctrl
->crdt
[crd
- 1] * 100;
277 nvme_req(req
)->retries
++;
278 blk_mq_requeue_request(req
, false);
279 blk_mq_delay_kick_requeue_list(req
->q
, delay
);
282 void nvme_complete_rq(struct request
*req
)
284 blk_status_t status
= nvme_error_status(nvme_req(req
)->status
);
286 trace_nvme_complete_rq(req
);
288 nvme_cleanup_cmd(req
);
290 if (nvme_req(req
)->ctrl
->kas
)
291 nvme_req(req
)->ctrl
->comp_seen
= true;
293 if (unlikely(status
!= BLK_STS_OK
&& nvme_req_needs_retry(req
))) {
294 if ((req
->cmd_flags
& REQ_NVME_MPATH
) &&
295 blk_path_error(status
)) {
296 nvme_failover_req(req
);
300 if (!blk_queue_dying(req
->q
)) {
306 nvme_trace_bio_complete(req
, status
);
307 blk_mq_end_request(req
, status
);
309 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
311 bool nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
313 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
314 "Cancelling I/O %d", req
->tag
);
316 /* don't abort one completed request */
317 if (blk_mq_request_completed(req
))
320 nvme_req(req
)->status
= NVME_SC_HOST_ABORTED_CMD
;
321 blk_mq_complete_request(req
);
324 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
326 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
327 enum nvme_ctrl_state new_state
)
329 enum nvme_ctrl_state old_state
;
331 bool changed
= false;
333 spin_lock_irqsave(&ctrl
->lock
, flags
);
335 old_state
= ctrl
->state
;
340 case NVME_CTRL_RESETTING
:
341 case NVME_CTRL_CONNECTING
:
348 case NVME_CTRL_RESETTING
:
358 case NVME_CTRL_CONNECTING
:
361 case NVME_CTRL_RESETTING
:
368 case NVME_CTRL_DELETING
:
371 case NVME_CTRL_RESETTING
:
372 case NVME_CTRL_CONNECTING
:
381 case NVME_CTRL_DELETING
:
393 ctrl
->state
= new_state
;
394 wake_up_all(&ctrl
->state_wq
);
397 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
398 if (changed
&& ctrl
->state
== NVME_CTRL_LIVE
)
399 nvme_kick_requeue_lists(ctrl
);
402 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
405 * Returns true for sink states that can't ever transition back to live.
407 static bool nvme_state_terminal(struct nvme_ctrl
*ctrl
)
409 switch (ctrl
->state
) {
412 case NVME_CTRL_RESETTING
:
413 case NVME_CTRL_CONNECTING
:
415 case NVME_CTRL_DELETING
:
419 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl
->state
);
425 * Waits for the controller state to be resetting, or returns false if it is
426 * not possible to ever transition to that state.
428 bool nvme_wait_reset(struct nvme_ctrl
*ctrl
)
430 wait_event(ctrl
->state_wq
,
431 nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
) ||
432 nvme_state_terminal(ctrl
));
433 return ctrl
->state
== NVME_CTRL_RESETTING
;
435 EXPORT_SYMBOL_GPL(nvme_wait_reset
);
437 static void nvme_free_ns_head(struct kref
*ref
)
439 struct nvme_ns_head
*head
=
440 container_of(ref
, struct nvme_ns_head
, ref
);
442 nvme_mpath_remove_disk(head
);
443 ida_simple_remove(&head
->subsys
->ns_ida
, head
->instance
);
444 list_del_init(&head
->entry
);
445 cleanup_srcu_struct(&head
->srcu
);
446 nvme_put_subsystem(head
->subsys
);
450 static void nvme_put_ns_head(struct nvme_ns_head
*head
)
452 kref_put(&head
->ref
, nvme_free_ns_head
);
455 static void nvme_free_ns(struct kref
*kref
)
457 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
460 nvme_nvm_unregister(ns
);
463 nvme_put_ns_head(ns
->head
);
464 nvme_put_ctrl(ns
->ctrl
);
468 static void nvme_put_ns(struct nvme_ns
*ns
)
470 kref_put(&ns
->kref
, nvme_free_ns
);
473 static inline void nvme_clear_nvme_request(struct request
*req
)
475 if (!(req
->rq_flags
& RQF_DONTPREP
)) {
476 nvme_req(req
)->retries
= 0;
477 nvme_req(req
)->flags
= 0;
478 req
->rq_flags
|= RQF_DONTPREP
;
482 struct request
*nvme_alloc_request(struct request_queue
*q
,
483 struct nvme_command
*cmd
, blk_mq_req_flags_t flags
, int qid
)
485 unsigned op
= nvme_is_write(cmd
) ? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
;
488 if (qid
== NVME_QID_ANY
) {
489 req
= blk_mq_alloc_request(q
, op
, flags
);
491 req
= blk_mq_alloc_request_hctx(q
, op
, flags
,
497 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
498 nvme_clear_nvme_request(req
);
499 nvme_req(req
)->cmd
= cmd
;
503 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
505 static int nvme_toggle_streams(struct nvme_ctrl
*ctrl
, bool enable
)
507 struct nvme_command c
;
509 memset(&c
, 0, sizeof(c
));
511 c
.directive
.opcode
= nvme_admin_directive_send
;
512 c
.directive
.nsid
= cpu_to_le32(NVME_NSID_ALL
);
513 c
.directive
.doper
= NVME_DIR_SND_ID_OP_ENABLE
;
514 c
.directive
.dtype
= NVME_DIR_IDENTIFY
;
515 c
.directive
.tdtype
= NVME_DIR_STREAMS
;
516 c
.directive
.endir
= enable
? NVME_DIR_ENDIR
: 0;
518 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, NULL
, 0);
521 static int nvme_disable_streams(struct nvme_ctrl
*ctrl
)
523 return nvme_toggle_streams(ctrl
, false);
526 static int nvme_enable_streams(struct nvme_ctrl
*ctrl
)
528 return nvme_toggle_streams(ctrl
, true);
531 static int nvme_get_stream_params(struct nvme_ctrl
*ctrl
,
532 struct streams_directive_params
*s
, u32 nsid
)
534 struct nvme_command c
;
536 memset(&c
, 0, sizeof(c
));
537 memset(s
, 0, sizeof(*s
));
539 c
.directive
.opcode
= nvme_admin_directive_recv
;
540 c
.directive
.nsid
= cpu_to_le32(nsid
);
541 c
.directive
.numd
= cpu_to_le32((sizeof(*s
) >> 2) - 1);
542 c
.directive
.doper
= NVME_DIR_RCV_ST_OP_PARAM
;
543 c
.directive
.dtype
= NVME_DIR_STREAMS
;
545 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, s
, sizeof(*s
));
548 static int nvme_configure_directives(struct nvme_ctrl
*ctrl
)
550 struct streams_directive_params s
;
553 if (!(ctrl
->oacs
& NVME_CTRL_OACS_DIRECTIVES
))
558 ret
= nvme_enable_streams(ctrl
);
562 ret
= nvme_get_stream_params(ctrl
, &s
, NVME_NSID_ALL
);
566 ctrl
->nssa
= le16_to_cpu(s
.nssa
);
567 if (ctrl
->nssa
< BLK_MAX_WRITE_HINTS
- 1) {
568 dev_info(ctrl
->device
, "too few streams (%u) available\n",
570 nvme_disable_streams(ctrl
);
574 ctrl
->nr_streams
= min_t(unsigned, ctrl
->nssa
, BLK_MAX_WRITE_HINTS
- 1);
575 dev_info(ctrl
->device
, "Using %u streams\n", ctrl
->nr_streams
);
580 * Check if 'req' has a write hint associated with it. If it does, assign
581 * a valid namespace stream to the write.
583 static void nvme_assign_write_stream(struct nvme_ctrl
*ctrl
,
584 struct request
*req
, u16
*control
,
587 enum rw_hint streamid
= req
->write_hint
;
589 if (streamid
== WRITE_LIFE_NOT_SET
|| streamid
== WRITE_LIFE_NONE
)
593 if (WARN_ON_ONCE(streamid
> ctrl
->nr_streams
))
596 *control
|= NVME_RW_DTYPE_STREAMS
;
597 *dsmgmt
|= streamid
<< 16;
600 if (streamid
< ARRAY_SIZE(req
->q
->write_hints
))
601 req
->q
->write_hints
[streamid
] += blk_rq_bytes(req
) >> 9;
604 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
605 struct nvme_command
*cmnd
)
607 cmnd
->common
.opcode
= nvme_cmd_flush
;
608 cmnd
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
611 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
612 struct nvme_command
*cmnd
)
614 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
615 struct nvme_dsm_range
*range
;
619 * Some devices do not consider the DSM 'Number of Ranges' field when
620 * determining how much data to DMA. Always allocate memory for maximum
621 * number of segments to prevent device reading beyond end of buffer.
623 static const size_t alloc_size
= sizeof(*range
) * NVME_DSM_MAX_RANGES
;
625 range
= kzalloc(alloc_size
, GFP_ATOMIC
| __GFP_NOWARN
);
628 * If we fail allocation our range, fallback to the controller
629 * discard page. If that's also busy, it's safe to return
630 * busy, as we know we can make progress once that's freed.
632 if (test_and_set_bit_lock(0, &ns
->ctrl
->discard_page_busy
))
633 return BLK_STS_RESOURCE
;
635 range
= page_address(ns
->ctrl
->discard_page
);
638 __rq_for_each_bio(bio
, req
) {
639 u64 slba
= nvme_sect_to_lba(ns
, bio
->bi_iter
.bi_sector
);
640 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
643 range
[n
].cattr
= cpu_to_le32(0);
644 range
[n
].nlb
= cpu_to_le32(nlb
);
645 range
[n
].slba
= cpu_to_le64(slba
);
650 if (WARN_ON_ONCE(n
!= segments
)) {
651 if (virt_to_page(range
) == ns
->ctrl
->discard_page
)
652 clear_bit_unlock(0, &ns
->ctrl
->discard_page_busy
);
655 return BLK_STS_IOERR
;
658 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
659 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
660 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
661 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
663 req
->special_vec
.bv_page
= virt_to_page(range
);
664 req
->special_vec
.bv_offset
= offset_in_page(range
);
665 req
->special_vec
.bv_len
= alloc_size
;
666 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
671 static inline blk_status_t
nvme_setup_write_zeroes(struct nvme_ns
*ns
,
672 struct request
*req
, struct nvme_command
*cmnd
)
674 if (ns
->ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
675 return nvme_setup_discard(ns
, req
, cmnd
);
677 cmnd
->write_zeroes
.opcode
= nvme_cmd_write_zeroes
;
678 cmnd
->write_zeroes
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
679 cmnd
->write_zeroes
.slba
=
680 cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
681 cmnd
->write_zeroes
.length
=
682 cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
683 cmnd
->write_zeroes
.control
= 0;
687 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
688 struct request
*req
, struct nvme_command
*cmnd
)
690 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
694 if (req
->cmd_flags
& REQ_FUA
)
695 control
|= NVME_RW_FUA
;
696 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
697 control
|= NVME_RW_LR
;
699 if (req
->cmd_flags
& REQ_RAHEAD
)
700 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
702 cmnd
->rw
.opcode
= (rq_data_dir(req
) ? nvme_cmd_write
: nvme_cmd_read
);
703 cmnd
->rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
704 cmnd
->rw
.slba
= cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
705 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
707 if (req_op(req
) == REQ_OP_WRITE
&& ctrl
->nr_streams
)
708 nvme_assign_write_stream(ctrl
, req
, &control
, &dsmgmt
);
712 * If formated with metadata, the block layer always provides a
713 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
714 * we enable the PRACT bit for protection information or set the
715 * namespace capacity to zero to prevent any I/O.
717 if (!blk_integrity_rq(req
)) {
718 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns
)))
719 return BLK_STS_NOTSUPP
;
720 control
|= NVME_RW_PRINFO_PRACT
;
723 switch (ns
->pi_type
) {
724 case NVME_NS_DPS_PI_TYPE3
:
725 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
727 case NVME_NS_DPS_PI_TYPE1
:
728 case NVME_NS_DPS_PI_TYPE2
:
729 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
730 NVME_RW_PRINFO_PRCHK_REF
;
731 cmnd
->rw
.reftag
= cpu_to_le32(t10_pi_ref_tag(req
));
736 cmnd
->rw
.control
= cpu_to_le16(control
);
737 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
741 void nvme_cleanup_cmd(struct request
*req
)
743 if (req
->rq_flags
& RQF_SPECIAL_PAYLOAD
) {
744 struct nvme_ns
*ns
= req
->rq_disk
->private_data
;
745 struct page
*page
= req
->special_vec
.bv_page
;
747 if (page
== ns
->ctrl
->discard_page
)
748 clear_bit_unlock(0, &ns
->ctrl
->discard_page_busy
);
750 kfree(page_address(page
) + req
->special_vec
.bv_offset
);
753 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd
);
755 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
756 struct nvme_command
*cmd
)
758 blk_status_t ret
= BLK_STS_OK
;
760 nvme_clear_nvme_request(req
);
762 memset(cmd
, 0, sizeof(*cmd
));
763 switch (req_op(req
)) {
766 memcpy(cmd
, nvme_req(req
)->cmd
, sizeof(*cmd
));
769 nvme_setup_flush(ns
, cmd
);
771 case REQ_OP_WRITE_ZEROES
:
772 ret
= nvme_setup_write_zeroes(ns
, req
, cmd
);
775 ret
= nvme_setup_discard(ns
, req
, cmd
);
779 ret
= nvme_setup_rw(ns
, req
, cmd
);
783 return BLK_STS_IOERR
;
786 cmd
->common
.command_id
= req
->tag
;
787 trace_nvme_setup_cmd(req
, cmd
);
790 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
792 static void nvme_end_sync_rq(struct request
*rq
, blk_status_t error
)
794 struct completion
*waiting
= rq
->end_io_data
;
796 rq
->end_io_data
= NULL
;
800 static void nvme_execute_rq_polled(struct request_queue
*q
,
801 struct gendisk
*bd_disk
, struct request
*rq
, int at_head
)
803 DECLARE_COMPLETION_ONSTACK(wait
);
805 WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
));
807 rq
->cmd_flags
|= REQ_HIPRI
;
808 rq
->end_io_data
= &wait
;
809 blk_execute_rq_nowait(q
, bd_disk
, rq
, at_head
, nvme_end_sync_rq
);
811 while (!completion_done(&wait
)) {
812 blk_poll(q
, request_to_qc_t(rq
->mq_hctx
, rq
), true);
818 * Returns 0 on success. If the result is negative, it's a Linux error code;
819 * if the result is positive, it's an NVM Express status code
821 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
822 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
823 unsigned timeout
, int qid
, int at_head
,
824 blk_mq_req_flags_t flags
, bool poll
)
829 req
= nvme_alloc_request(q
, cmd
, flags
, qid
);
833 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
835 if (buffer
&& bufflen
) {
836 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
842 nvme_execute_rq_polled(req
->q
, NULL
, req
, at_head
);
844 blk_execute_rq(req
->q
, NULL
, req
, at_head
);
846 *result
= nvme_req(req
)->result
;
847 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
850 ret
= nvme_req(req
)->status
;
852 blk_mq_free_request(req
);
855 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
857 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
858 void *buffer
, unsigned bufflen
)
860 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
861 NVME_QID_ANY
, 0, 0, false);
863 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
865 static void *nvme_add_user_metadata(struct bio
*bio
, void __user
*ubuf
,
866 unsigned len
, u32 seed
, bool write
)
868 struct bio_integrity_payload
*bip
;
872 buf
= kmalloc(len
, GFP_KERNEL
);
877 if (write
&& copy_from_user(buf
, ubuf
, len
))
880 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
886 bip
->bip_iter
.bi_size
= len
;
887 bip
->bip_iter
.bi_sector
= seed
;
888 ret
= bio_integrity_add_page(bio
, virt_to_page(buf
), len
,
889 offset_in_page(buf
));
899 static int nvme_submit_user_cmd(struct request_queue
*q
,
900 struct nvme_command
*cmd
, void __user
*ubuffer
,
901 unsigned bufflen
, void __user
*meta_buffer
, unsigned meta_len
,
902 u32 meta_seed
, u64
*result
, unsigned timeout
)
904 bool write
= nvme_is_write(cmd
);
905 struct nvme_ns
*ns
= q
->queuedata
;
906 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
908 struct bio
*bio
= NULL
;
912 req
= nvme_alloc_request(q
, cmd
, 0, NVME_QID_ANY
);
916 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
917 nvme_req(req
)->flags
|= NVME_REQ_USERCMD
;
919 if (ubuffer
&& bufflen
) {
920 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
926 if (disk
&& meta_buffer
&& meta_len
) {
927 meta
= nvme_add_user_metadata(bio
, meta_buffer
, meta_len
,
933 req
->cmd_flags
|= REQ_INTEGRITY
;
937 blk_execute_rq(req
->q
, disk
, req
, 0);
938 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
941 ret
= nvme_req(req
)->status
;
943 *result
= le64_to_cpu(nvme_req(req
)->result
.u64
);
944 if (meta
&& !ret
&& !write
) {
945 if (copy_to_user(meta_buffer
, meta
, meta_len
))
951 blk_rq_unmap_user(bio
);
953 blk_mq_free_request(req
);
957 static void nvme_keep_alive_end_io(struct request
*rq
, blk_status_t status
)
959 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
961 bool startka
= false;
963 blk_mq_free_request(rq
);
966 dev_err(ctrl
->device
,
967 "failed nvme_keep_alive_end_io error=%d\n",
972 ctrl
->comp_seen
= false;
973 spin_lock_irqsave(&ctrl
->lock
, flags
);
974 if (ctrl
->state
== NVME_CTRL_LIVE
||
975 ctrl
->state
== NVME_CTRL_CONNECTING
)
977 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
979 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
982 static int nvme_keep_alive(struct nvme_ctrl
*ctrl
)
986 rq
= nvme_alloc_request(ctrl
->admin_q
, &ctrl
->ka_cmd
, BLK_MQ_REQ_RESERVED
,
991 rq
->timeout
= ctrl
->kato
* HZ
;
992 rq
->end_io_data
= ctrl
;
994 blk_execute_rq_nowait(rq
->q
, NULL
, rq
, 0, nvme_keep_alive_end_io
);
999 static void nvme_keep_alive_work(struct work_struct
*work
)
1001 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
1002 struct nvme_ctrl
, ka_work
);
1003 bool comp_seen
= ctrl
->comp_seen
;
1005 if ((ctrl
->ctratt
& NVME_CTRL_ATTR_TBKAS
) && comp_seen
) {
1006 dev_dbg(ctrl
->device
,
1007 "reschedule traffic based keep-alive timer\n");
1008 ctrl
->comp_seen
= false;
1009 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
1013 if (nvme_keep_alive(ctrl
)) {
1014 /* allocation failure, reset the controller */
1015 dev_err(ctrl
->device
, "keep-alive failed\n");
1016 nvme_reset_ctrl(ctrl
);
1021 static void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
1023 if (unlikely(ctrl
->kato
== 0))
1026 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
1029 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
1031 if (unlikely(ctrl
->kato
== 0))
1034 cancel_delayed_work_sync(&ctrl
->ka_work
);
1036 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
1038 static int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
1040 struct nvme_command c
= { };
1043 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1044 c
.identify
.opcode
= nvme_admin_identify
;
1045 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
1047 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
1051 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
1052 sizeof(struct nvme_id_ctrl
));
1058 static int nvme_identify_ns_descs(struct nvme_ctrl
*ctrl
, unsigned nsid
,
1059 struct nvme_ns_ids
*ids
)
1061 struct nvme_command c
= { };
1067 c
.identify
.opcode
= nvme_admin_identify
;
1068 c
.identify
.nsid
= cpu_to_le32(nsid
);
1069 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
1071 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
1075 status
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, data
,
1076 NVME_IDENTIFY_DATA_SIZE
);
1080 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
1081 struct nvme_ns_id_desc
*cur
= data
+ pos
;
1086 switch (cur
->nidt
) {
1087 case NVME_NIDT_EUI64
:
1088 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
1089 dev_warn(ctrl
->device
,
1090 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
1094 len
= NVME_NIDT_EUI64_LEN
;
1095 memcpy(ids
->eui64
, data
+ pos
+ sizeof(*cur
), len
);
1097 case NVME_NIDT_NGUID
:
1098 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
1099 dev_warn(ctrl
->device
,
1100 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
1104 len
= NVME_NIDT_NGUID_LEN
;
1105 memcpy(ids
->nguid
, data
+ pos
+ sizeof(*cur
), len
);
1107 case NVME_NIDT_UUID
:
1108 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
1109 dev_warn(ctrl
->device
,
1110 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
1114 len
= NVME_NIDT_UUID_LEN
;
1115 uuid_copy(&ids
->uuid
, data
+ pos
+ sizeof(*cur
));
1118 /* Skip unknown types */
1123 len
+= sizeof(*cur
);
1130 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
1132 struct nvme_command c
= { };
1134 c
.identify
.opcode
= nvme_admin_identify
;
1135 c
.identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
;
1136 c
.identify
.nsid
= cpu_to_le32(nsid
);
1137 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
,
1138 NVME_IDENTIFY_DATA_SIZE
);
1141 static int nvme_identify_ns(struct nvme_ctrl
*ctrl
,
1142 unsigned nsid
, struct nvme_id_ns
**id
)
1144 struct nvme_command c
= { };
1147 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1148 c
.identify
.opcode
= nvme_admin_identify
;
1149 c
.identify
.nsid
= cpu_to_le32(nsid
);
1150 c
.identify
.cns
= NVME_ID_CNS_NS
;
1152 *id
= kmalloc(sizeof(**id
), GFP_KERNEL
);
1156 error
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, *id
, sizeof(**id
));
1158 dev_warn(ctrl
->device
, "Identify namespace failed (%d)\n", error
);
1165 static int nvme_features(struct nvme_ctrl
*dev
, u8 op
, unsigned int fid
,
1166 unsigned int dword11
, void *buffer
, size_t buflen
, u32
*result
)
1168 struct nvme_command c
;
1169 union nvme_result res
;
1172 memset(&c
, 0, sizeof(c
));
1173 c
.features
.opcode
= op
;
1174 c
.features
.fid
= cpu_to_le32(fid
);
1175 c
.features
.dword11
= cpu_to_le32(dword11
);
1177 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
1178 buffer
, buflen
, 0, NVME_QID_ANY
, 0, 0, false);
1179 if (ret
>= 0 && result
)
1180 *result
= le32_to_cpu(res
.u32
);
1184 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1185 unsigned int dword11
, void *buffer
, size_t buflen
,
1188 return nvme_features(dev
, nvme_admin_set_features
, fid
, dword11
, buffer
,
1191 EXPORT_SYMBOL_GPL(nvme_set_features
);
1193 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1194 unsigned int dword11
, void *buffer
, size_t buflen
,
1197 return nvme_features(dev
, nvme_admin_get_features
, fid
, dword11
, buffer
,
1200 EXPORT_SYMBOL_GPL(nvme_get_features
);
1202 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
1204 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
1206 int status
, nr_io_queues
;
1208 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
1214 * Degraded controllers might return an error when setting the queue
1215 * count. We still want to be able to bring them online and offer
1216 * access to the admin queue, as that might be only way to fix them up.
1219 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
1222 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
1223 *count
= min(*count
, nr_io_queues
);
1228 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
1230 #define NVME_AEN_SUPPORTED \
1231 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1232 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1234 static void nvme_enable_aen(struct nvme_ctrl
*ctrl
)
1236 u32 result
, supported_aens
= ctrl
->oaes
& NVME_AEN_SUPPORTED
;
1239 if (!supported_aens
)
1242 status
= nvme_set_features(ctrl
, NVME_FEAT_ASYNC_EVENT
, supported_aens
,
1245 dev_warn(ctrl
->device
, "Failed to configure AEN (cfg %x)\n",
1248 queue_work(nvme_wq
, &ctrl
->async_event_work
);
1251 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
1253 struct nvme_user_io io
;
1254 struct nvme_command c
;
1255 unsigned length
, meta_len
;
1256 void __user
*metadata
;
1258 if (copy_from_user(&io
, uio
, sizeof(io
)))
1263 switch (io
.opcode
) {
1264 case nvme_cmd_write
:
1266 case nvme_cmd_compare
:
1272 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
1273 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
1274 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
1279 } else if (meta_len
) {
1280 if ((io
.metadata
& 3) || !io
.metadata
)
1284 memset(&c
, 0, sizeof(c
));
1285 c
.rw
.opcode
= io
.opcode
;
1286 c
.rw
.flags
= io
.flags
;
1287 c
.rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1288 c
.rw
.slba
= cpu_to_le64(io
.slba
);
1289 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
1290 c
.rw
.control
= cpu_to_le16(io
.control
);
1291 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
1292 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
1293 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
1294 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
1296 return nvme_submit_user_cmd(ns
->queue
, &c
,
1297 (void __user
*)(uintptr_t)io
.addr
, length
,
1298 metadata
, meta_len
, lower_32_bits(io
.slba
), NULL
, 0);
1301 static u32
nvme_known_admin_effects(u8 opcode
)
1304 case nvme_admin_format_nvm
:
1305 return NVME_CMD_EFFECTS_CSUPP
| NVME_CMD_EFFECTS_LBCC
|
1306 NVME_CMD_EFFECTS_CSE_MASK
;
1307 case nvme_admin_sanitize_nvm
:
1308 return NVME_CMD_EFFECTS_CSE_MASK
;
1315 static u32
nvme_passthru_start(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1322 effects
= le32_to_cpu(ctrl
->effects
->iocs
[opcode
]);
1323 if (effects
& ~(NVME_CMD_EFFECTS_CSUPP
| NVME_CMD_EFFECTS_LBCC
))
1324 dev_warn(ctrl
->device
,
1325 "IO command:%02x has unhandled effects:%08x\n",
1331 effects
= le32_to_cpu(ctrl
->effects
->acs
[opcode
]);
1332 effects
|= nvme_known_admin_effects(opcode
);
1335 * For simplicity, IO to all namespaces is quiesced even if the command
1336 * effects say only one namespace is affected.
1338 if (effects
& (NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
)) {
1339 mutex_lock(&ctrl
->scan_lock
);
1340 mutex_lock(&ctrl
->subsys
->lock
);
1341 nvme_mpath_start_freeze(ctrl
->subsys
);
1342 nvme_mpath_wait_freeze(ctrl
->subsys
);
1343 nvme_start_freeze(ctrl
);
1344 nvme_wait_freeze(ctrl
);
1349 static void nvme_update_formats(struct nvme_ctrl
*ctrl
)
1353 down_read(&ctrl
->namespaces_rwsem
);
1354 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
1355 if (ns
->disk
&& nvme_revalidate_disk(ns
->disk
))
1356 nvme_set_queue_dying(ns
);
1357 up_read(&ctrl
->namespaces_rwsem
);
1360 static void nvme_passthru_end(struct nvme_ctrl
*ctrl
, u32 effects
)
1363 * Revalidate LBA changes prior to unfreezing. This is necessary to
1364 * prevent memory corruption if a logical block size was changed by
1367 if (effects
& NVME_CMD_EFFECTS_LBCC
)
1368 nvme_update_formats(ctrl
);
1369 if (effects
& (NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
)) {
1370 nvme_unfreeze(ctrl
);
1371 nvme_mpath_unfreeze(ctrl
->subsys
);
1372 mutex_unlock(&ctrl
->subsys
->lock
);
1373 nvme_remove_invalid_namespaces(ctrl
, NVME_NSID_ALL
);
1374 mutex_unlock(&ctrl
->scan_lock
);
1376 if (effects
& NVME_CMD_EFFECTS_CCC
)
1377 nvme_init_identify(ctrl
);
1378 if (effects
& (NVME_CMD_EFFECTS_NIC
| NVME_CMD_EFFECTS_NCC
))
1379 nvme_queue_scan(ctrl
);
1382 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1383 struct nvme_passthru_cmd __user
*ucmd
)
1385 struct nvme_passthru_cmd cmd
;
1386 struct nvme_command c
;
1387 unsigned timeout
= 0;
1392 if (!capable(CAP_SYS_ADMIN
))
1394 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
1399 memset(&c
, 0, sizeof(c
));
1400 c
.common
.opcode
= cmd
.opcode
;
1401 c
.common
.flags
= cmd
.flags
;
1402 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
1403 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
1404 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
1405 c
.common
.cdw10
= cpu_to_le32(cmd
.cdw10
);
1406 c
.common
.cdw11
= cpu_to_le32(cmd
.cdw11
);
1407 c
.common
.cdw12
= cpu_to_le32(cmd
.cdw12
);
1408 c
.common
.cdw13
= cpu_to_le32(cmd
.cdw13
);
1409 c
.common
.cdw14
= cpu_to_le32(cmd
.cdw14
);
1410 c
.common
.cdw15
= cpu_to_le32(cmd
.cdw15
);
1413 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
1415 effects
= nvme_passthru_start(ctrl
, ns
, cmd
.opcode
);
1416 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
1417 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
1418 (void __user
*)(uintptr_t)cmd
.metadata
,
1419 cmd
.metadata_len
, 0, &result
, timeout
);
1420 nvme_passthru_end(ctrl
, effects
);
1423 if (put_user(result
, &ucmd
->result
))
1430 static int nvme_user_cmd64(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1431 struct nvme_passthru_cmd64 __user
*ucmd
)
1433 struct nvme_passthru_cmd64 cmd
;
1434 struct nvme_command c
;
1435 unsigned timeout
= 0;
1439 if (!capable(CAP_SYS_ADMIN
))
1441 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
1446 memset(&c
, 0, sizeof(c
));
1447 c
.common
.opcode
= cmd
.opcode
;
1448 c
.common
.flags
= cmd
.flags
;
1449 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
1450 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
1451 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
1452 c
.common
.cdw10
= cpu_to_le32(cmd
.cdw10
);
1453 c
.common
.cdw11
= cpu_to_le32(cmd
.cdw11
);
1454 c
.common
.cdw12
= cpu_to_le32(cmd
.cdw12
);
1455 c
.common
.cdw13
= cpu_to_le32(cmd
.cdw13
);
1456 c
.common
.cdw14
= cpu_to_le32(cmd
.cdw14
);
1457 c
.common
.cdw15
= cpu_to_le32(cmd
.cdw15
);
1460 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
1462 effects
= nvme_passthru_start(ctrl
, ns
, cmd
.opcode
);
1463 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
1464 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
1465 (void __user
*)(uintptr_t)cmd
.metadata
, cmd
.metadata_len
,
1466 0, &cmd
.result
, timeout
);
1467 nvme_passthru_end(ctrl
, effects
);
1470 if (put_user(cmd
.result
, &ucmd
->result
))
1478 * Issue ioctl requests on the first available path. Note that unlike normal
1479 * block layer requests we will not retry failed request on another controller.
1481 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
,
1482 struct nvme_ns_head
**head
, int *srcu_idx
)
1484 #ifdef CONFIG_NVME_MULTIPATH
1485 if (disk
->fops
== &nvme_ns_head_ops
) {
1488 *head
= disk
->private_data
;
1489 *srcu_idx
= srcu_read_lock(&(*head
)->srcu
);
1490 ns
= nvme_find_path(*head
);
1492 srcu_read_unlock(&(*head
)->srcu
, *srcu_idx
);
1498 return disk
->private_data
;
1501 static void nvme_put_ns_from_disk(struct nvme_ns_head
*head
, int idx
)
1504 srcu_read_unlock(&head
->srcu
, idx
);
1507 static bool is_ctrl_ioctl(unsigned int cmd
)
1509 if (cmd
== NVME_IOCTL_ADMIN_CMD
|| cmd
== NVME_IOCTL_ADMIN64_CMD
)
1511 if (is_sed_ioctl(cmd
))
1516 static int nvme_handle_ctrl_ioctl(struct nvme_ns
*ns
, unsigned int cmd
,
1518 struct nvme_ns_head
*head
,
1521 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1524 nvme_get_ctrl(ns
->ctrl
);
1525 nvme_put_ns_from_disk(head
, srcu_idx
);
1528 case NVME_IOCTL_ADMIN_CMD
:
1529 ret
= nvme_user_cmd(ctrl
, NULL
, argp
);
1531 case NVME_IOCTL_ADMIN64_CMD
:
1532 ret
= nvme_user_cmd64(ctrl
, NULL
, argp
);
1535 ret
= sed_ioctl(ctrl
->opal_dev
, cmd
, argp
);
1538 nvme_put_ctrl(ctrl
);
1542 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
1543 unsigned int cmd
, unsigned long arg
)
1545 struct nvme_ns_head
*head
= NULL
;
1546 void __user
*argp
= (void __user
*)arg
;
1550 ns
= nvme_get_ns_from_disk(bdev
->bd_disk
, &head
, &srcu_idx
);
1552 return -EWOULDBLOCK
;
1555 * Handle ioctls that apply to the controller instead of the namespace
1556 * seperately and drop the ns SRCU reference early. This avoids a
1557 * deadlock when deleting namespaces using the passthrough interface.
1559 if (is_ctrl_ioctl(cmd
))
1560 return nvme_handle_ctrl_ioctl(ns
, cmd
, argp
, head
, srcu_idx
);
1564 force_successful_syscall_return();
1565 ret
= ns
->head
->ns_id
;
1567 case NVME_IOCTL_IO_CMD
:
1568 ret
= nvme_user_cmd(ns
->ctrl
, ns
, argp
);
1570 case NVME_IOCTL_SUBMIT_IO
:
1571 ret
= nvme_submit_io(ns
, argp
);
1573 case NVME_IOCTL_IO64_CMD
:
1574 ret
= nvme_user_cmd64(ns
->ctrl
, ns
, argp
);
1578 ret
= nvme_nvm_ioctl(ns
, cmd
, arg
);
1583 nvme_put_ns_from_disk(head
, srcu_idx
);
1587 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
1589 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1591 #ifdef CONFIG_NVME_MULTIPATH
1592 /* should never be called due to GENHD_FL_HIDDEN */
1593 if (WARN_ON_ONCE(ns
->head
->disk
))
1596 if (!kref_get_unless_zero(&ns
->kref
))
1598 if (!try_module_get(ns
->ctrl
->ops
->module
))
1609 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
1611 struct nvme_ns
*ns
= disk
->private_data
;
1613 module_put(ns
->ctrl
->ops
->module
);
1617 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1619 /* some standard values */
1620 geo
->heads
= 1 << 6;
1621 geo
->sectors
= 1 << 5;
1622 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1626 #ifdef CONFIG_BLK_DEV_INTEGRITY
1627 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
)
1629 struct blk_integrity integrity
;
1631 memset(&integrity
, 0, sizeof(integrity
));
1633 case NVME_NS_DPS_PI_TYPE3
:
1634 integrity
.profile
= &t10_pi_type3_crc
;
1635 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1636 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1638 case NVME_NS_DPS_PI_TYPE1
:
1639 case NVME_NS_DPS_PI_TYPE2
:
1640 integrity
.profile
= &t10_pi_type1_crc
;
1641 integrity
.tag_size
= sizeof(u16
);
1642 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1645 integrity
.profile
= NULL
;
1648 integrity
.tuple_size
= ms
;
1649 blk_integrity_register(disk
, &integrity
);
1650 blk_queue_max_integrity_segments(disk
->queue
, 1);
1653 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
)
1656 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1658 static void nvme_set_chunk_size(struct nvme_ns
*ns
)
1660 u32 chunk_size
= nvme_lba_to_sect(ns
, ns
->noiob
);
1661 blk_queue_chunk_sectors(ns
->queue
, rounddown_pow_of_two(chunk_size
));
1664 static void nvme_config_discard(struct gendisk
*disk
, struct nvme_ns
*ns
)
1666 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1667 struct request_queue
*queue
= disk
->queue
;
1668 u32 size
= queue_logical_block_size(queue
);
1670 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)) {
1671 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, queue
);
1675 if (ctrl
->nr_streams
&& ns
->sws
&& ns
->sgs
)
1676 size
*= ns
->sws
* ns
->sgs
;
1678 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1679 NVME_DSM_MAX_RANGES
);
1681 queue
->limits
.discard_alignment
= 0;
1682 queue
->limits
.discard_granularity
= size
;
1684 /* If discard is already enabled, don't reset queue limits */
1685 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD
, queue
))
1688 blk_queue_max_discard_sectors(queue
, UINT_MAX
);
1689 blk_queue_max_discard_segments(queue
, NVME_DSM_MAX_RANGES
);
1691 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1692 blk_queue_max_write_zeroes_sectors(queue
, UINT_MAX
);
1695 static void nvme_config_write_zeroes(struct gendisk
*disk
, struct nvme_ns
*ns
)
1699 if (!(ns
->ctrl
->oncs
& NVME_CTRL_ONCS_WRITE_ZEROES
) ||
1700 (ns
->ctrl
->quirks
& NVME_QUIRK_DISABLE_WRITE_ZEROES
))
1703 * Even though NVMe spec explicitly states that MDTS is not
1704 * applicable to the write-zeroes:- "The restriction does not apply to
1705 * commands that do not transfer data between the host and the
1706 * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
1707 * In order to be more cautious use controller's max_hw_sectors value
1708 * to configure the maximum sectors for the write-zeroes which is
1709 * configured based on the controller's MDTS field in the
1710 * nvme_init_identify() if available.
1712 if (ns
->ctrl
->max_hw_sectors
== UINT_MAX
)
1713 max_blocks
= (u64
)USHRT_MAX
+ 1;
1715 max_blocks
= ns
->ctrl
->max_hw_sectors
+ 1;
1717 blk_queue_max_write_zeroes_sectors(disk
->queue
,
1718 nvme_lba_to_sect(ns
, max_blocks
));
1721 static int nvme_report_ns_ids(struct nvme_ctrl
*ctrl
, unsigned int nsid
,
1722 struct nvme_id_ns
*id
, struct nvme_ns_ids
*ids
)
1726 memset(ids
, 0, sizeof(*ids
));
1728 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
1729 memcpy(ids
->eui64
, id
->eui64
, sizeof(id
->eui64
));
1730 if (ctrl
->vs
>= NVME_VS(1, 2, 0))
1731 memcpy(ids
->nguid
, id
->nguid
, sizeof(id
->nguid
));
1732 if (ctrl
->vs
>= NVME_VS(1, 3, 0)) {
1733 /* Don't treat error as fatal we potentially
1734 * already have a NGUID or EUI-64
1736 ret
= nvme_identify_ns_descs(ctrl
, nsid
, ids
);
1738 dev_warn(ctrl
->device
,
1739 "Identify Descriptors failed (%d)\n", ret
);
1746 static bool nvme_ns_ids_valid(struct nvme_ns_ids
*ids
)
1748 return !uuid_is_null(&ids
->uuid
) ||
1749 memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)) ||
1750 memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
));
1753 static bool nvme_ns_ids_equal(struct nvme_ns_ids
*a
, struct nvme_ns_ids
*b
)
1755 return uuid_equal(&a
->uuid
, &b
->uuid
) &&
1756 memcmp(&a
->nguid
, &b
->nguid
, sizeof(a
->nguid
)) == 0 &&
1757 memcmp(&a
->eui64
, &b
->eui64
, sizeof(a
->eui64
)) == 0;
1760 static void nvme_update_disk_info(struct gendisk
*disk
,
1761 struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1763 sector_t capacity
= nvme_lba_to_sect(ns
, le64_to_cpu(id
->nsze
));
1764 unsigned short bs
= 1 << ns
->lba_shift
;
1765 u32 atomic_bs
, phys_bs
, io_opt
;
1767 if (ns
->lba_shift
> PAGE_SHIFT
) {
1768 /* unsupported block size, set capacity to 0 later */
1771 blk_mq_freeze_queue(disk
->queue
);
1772 blk_integrity_unregister(disk
);
1774 if (id
->nabo
== 0) {
1776 * Bit 1 indicates whether NAWUPF is defined for this namespace
1777 * and whether it should be used instead of AWUPF. If NAWUPF ==
1778 * 0 then AWUPF must be used instead.
1780 if (id
->nsfeat
& (1 << 1) && id
->nawupf
)
1781 atomic_bs
= (1 + le16_to_cpu(id
->nawupf
)) * bs
;
1783 atomic_bs
= (1 + ns
->ctrl
->subsys
->awupf
) * bs
;
1789 if (id
->nsfeat
& (1 << 4)) {
1790 /* NPWG = Namespace Preferred Write Granularity */
1791 phys_bs
*= 1 + le16_to_cpu(id
->npwg
);
1792 /* NOWS = Namespace Optimal Write Size */
1793 io_opt
*= 1 + le16_to_cpu(id
->nows
);
1796 blk_queue_logical_block_size(disk
->queue
, bs
);
1798 * Linux filesystems assume writing a single physical block is
1799 * an atomic operation. Hence limit the physical block size to the
1800 * value of the Atomic Write Unit Power Fail parameter.
1802 blk_queue_physical_block_size(disk
->queue
, min(phys_bs
, atomic_bs
));
1803 blk_queue_io_min(disk
->queue
, phys_bs
);
1804 blk_queue_io_opt(disk
->queue
, io_opt
);
1806 if (ns
->ms
&& !ns
->ext
&&
1807 (ns
->ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
))
1808 nvme_init_integrity(disk
, ns
->ms
, ns
->pi_type
);
1809 if ((ns
->ms
&& !nvme_ns_has_pi(ns
) && !blk_get_integrity(disk
)) ||
1810 ns
->lba_shift
> PAGE_SHIFT
)
1813 set_capacity(disk
, capacity
);
1815 nvme_config_discard(disk
, ns
);
1816 nvme_config_write_zeroes(disk
, ns
);
1818 if (id
->nsattr
& (1 << 0))
1819 set_disk_ro(disk
, true);
1821 set_disk_ro(disk
, false);
1823 blk_mq_unfreeze_queue(disk
->queue
);
1826 static void __nvme_revalidate_disk(struct gendisk
*disk
, struct nvme_id_ns
*id
)
1828 struct nvme_ns
*ns
= disk
->private_data
;
1831 * If identify namespace failed, use default 512 byte block size so
1832 * block layer can use before failing read/write for 0 capacity.
1834 ns
->lba_shift
= id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ds
;
1835 if (ns
->lba_shift
== 0)
1837 ns
->noiob
= le16_to_cpu(id
->noiob
);
1838 ns
->ms
= le16_to_cpu(id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ms
);
1839 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
1840 /* the PI implementation requires metadata equal t10 pi tuple size */
1841 if (ns
->ms
== sizeof(struct t10_pi_tuple
))
1842 ns
->pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
1847 nvme_set_chunk_size(ns
);
1848 nvme_update_disk_info(disk
, ns
, id
);
1849 #ifdef CONFIG_NVME_MULTIPATH
1850 if (ns
->head
->disk
) {
1851 nvme_update_disk_info(ns
->head
->disk
, ns
, id
);
1852 blk_queue_stack_limits(ns
->head
->disk
->queue
, ns
->queue
);
1853 revalidate_disk(ns
->head
->disk
);
1858 static int nvme_revalidate_disk(struct gendisk
*disk
)
1860 struct nvme_ns
*ns
= disk
->private_data
;
1861 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1862 struct nvme_id_ns
*id
;
1863 struct nvme_ns_ids ids
;
1866 if (test_bit(NVME_NS_DEAD
, &ns
->flags
)) {
1867 set_capacity(disk
, 0);
1871 ret
= nvme_identify_ns(ctrl
, ns
->head
->ns_id
, &id
);
1875 if (id
->ncap
== 0) {
1880 __nvme_revalidate_disk(disk
, id
);
1881 ret
= nvme_report_ns_ids(ctrl
, ns
->head
->ns_id
, id
, &ids
);
1885 if (!nvme_ns_ids_equal(&ns
->head
->ids
, &ids
)) {
1886 dev_err(ctrl
->device
,
1887 "identifiers changed for nsid %d\n", ns
->head
->ns_id
);
1895 * Only fail the function if we got a fatal error back from the
1896 * device, otherwise ignore the error and just move on.
1898 if (ret
== -ENOMEM
|| (ret
> 0 && !(ret
& NVME_SC_DNR
)))
1901 ret
= blk_status_to_errno(nvme_error_status(ret
));
1905 static char nvme_pr_type(enum pr_type type
)
1908 case PR_WRITE_EXCLUSIVE
:
1910 case PR_EXCLUSIVE_ACCESS
:
1912 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
1914 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
1916 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
1918 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
1925 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
1926 u64 key
, u64 sa_key
, u8 op
)
1928 struct nvme_ns_head
*head
= NULL
;
1930 struct nvme_command c
;
1932 u8 data
[16] = { 0, };
1934 ns
= nvme_get_ns_from_disk(bdev
->bd_disk
, &head
, &srcu_idx
);
1936 return -EWOULDBLOCK
;
1938 put_unaligned_le64(key
, &data
[0]);
1939 put_unaligned_le64(sa_key
, &data
[8]);
1941 memset(&c
, 0, sizeof(c
));
1942 c
.common
.opcode
= op
;
1943 c
.common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1944 c
.common
.cdw10
= cpu_to_le32(cdw10
);
1946 ret
= nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
1947 nvme_put_ns_from_disk(head
, srcu_idx
);
1951 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
1952 u64
new, unsigned flags
)
1956 if (flags
& ~PR_FL_IGNORE_KEY
)
1959 cdw10
= old
? 2 : 0;
1960 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
1961 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
1962 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
1965 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
1966 enum pr_type type
, unsigned flags
)
1970 if (flags
& ~PR_FL_IGNORE_KEY
)
1973 cdw10
= nvme_pr_type(type
) << 8;
1974 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
1975 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
1978 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
1979 enum pr_type type
, bool abort
)
1981 u32 cdw10
= nvme_pr_type(type
) << 8 | (abort
? 2 : 1);
1982 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
1985 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
1987 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
1988 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
1991 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
1993 u32 cdw10
= nvme_pr_type(type
) << 8 | (key
? 1 << 3 : 0);
1994 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
1997 static const struct pr_ops nvme_pr_ops
= {
1998 .pr_register
= nvme_pr_register
,
1999 .pr_reserve
= nvme_pr_reserve
,
2000 .pr_release
= nvme_pr_release
,
2001 .pr_preempt
= nvme_pr_preempt
,
2002 .pr_clear
= nvme_pr_clear
,
2005 #ifdef CONFIG_BLK_SED_OPAL
2006 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
2009 struct nvme_ctrl
*ctrl
= data
;
2010 struct nvme_command cmd
;
2012 memset(&cmd
, 0, sizeof(cmd
));
2014 cmd
.common
.opcode
= nvme_admin_security_send
;
2016 cmd
.common
.opcode
= nvme_admin_security_recv
;
2017 cmd
.common
.nsid
= 0;
2018 cmd
.common
.cdw10
= cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
2019 cmd
.common
.cdw11
= cpu_to_le32(len
);
2021 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
,
2022 ADMIN_TIMEOUT
, NVME_QID_ANY
, 1, 0, false);
2024 EXPORT_SYMBOL_GPL(nvme_sec_submit
);
2025 #endif /* CONFIG_BLK_SED_OPAL */
2027 static const struct block_device_operations nvme_fops
= {
2028 .owner
= THIS_MODULE
,
2029 .ioctl
= nvme_ioctl
,
2030 .compat_ioctl
= nvme_ioctl
,
2032 .release
= nvme_release
,
2033 .getgeo
= nvme_getgeo
,
2034 .revalidate_disk
= nvme_revalidate_disk
,
2035 .pr_ops
= &nvme_pr_ops
,
2038 #ifdef CONFIG_NVME_MULTIPATH
2039 static int nvme_ns_head_open(struct block_device
*bdev
, fmode_t mode
)
2041 struct nvme_ns_head
*head
= bdev
->bd_disk
->private_data
;
2043 if (!kref_get_unless_zero(&head
->ref
))
2048 static void nvme_ns_head_release(struct gendisk
*disk
, fmode_t mode
)
2050 nvme_put_ns_head(disk
->private_data
);
2053 const struct block_device_operations nvme_ns_head_ops
= {
2054 .owner
= THIS_MODULE
,
2055 .open
= nvme_ns_head_open
,
2056 .release
= nvme_ns_head_release
,
2057 .ioctl
= nvme_ioctl
,
2058 .compat_ioctl
= nvme_ioctl
,
2059 .getgeo
= nvme_getgeo
,
2060 .pr_ops
= &nvme_pr_ops
,
2062 #endif /* CONFIG_NVME_MULTIPATH */
2064 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
2066 unsigned long timeout
=
2067 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
2068 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
2071 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2074 if ((csts
& NVME_CSTS_RDY
) == bit
)
2078 if (fatal_signal_pending(current
))
2080 if (time_after(jiffies
, timeout
)) {
2081 dev_err(ctrl
->device
,
2082 "Device not ready; aborting %s\n", enabled
?
2083 "initialisation" : "reset");
2092 * If the device has been passed off to us in an enabled state, just clear
2093 * the enabled bit. The spec says we should set the 'shutdown notification
2094 * bits', but doing so may cause the device to complete commands to the
2095 * admin queue ... and we don't know what memory that might be pointing at!
2097 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
)
2101 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2102 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
2104 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2108 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
2109 msleep(NVME_QUIRK_DELAY_AMOUNT
);
2111 return nvme_wait_ready(ctrl
, ctrl
->cap
, false);
2113 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
2115 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
)
2118 * Default to a 4K page size, with the intention to update this
2119 * path in the future to accomodate architectures with differing
2120 * kernel and IO page sizes.
2122 unsigned dev_page_min
, page_shift
= 12;
2125 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &ctrl
->cap
);
2127 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
2130 dev_page_min
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12;
2132 if (page_shift
< dev_page_min
) {
2133 dev_err(ctrl
->device
,
2134 "Minimum device page size %u too large for host (%u)\n",
2135 1 << dev_page_min
, 1 << page_shift
);
2139 ctrl
->page_size
= 1 << page_shift
;
2141 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
2142 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
2143 ctrl
->ctrl_config
|= NVME_CC_AMS_RR
| NVME_CC_SHN_NONE
;
2144 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
2145 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
2147 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2150 return nvme_wait_ready(ctrl
, ctrl
->cap
, true);
2152 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
2154 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
2156 unsigned long timeout
= jiffies
+ (ctrl
->shutdown_timeout
* HZ
);
2160 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2161 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
2163 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2167 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2168 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
2172 if (fatal_signal_pending(current
))
2174 if (time_after(jiffies
, timeout
)) {
2175 dev_err(ctrl
->device
,
2176 "Device shutdown incomplete; abort shutdown\n");
2183 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
2185 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
2186 struct request_queue
*q
)
2190 if (ctrl
->max_hw_sectors
) {
2192 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1;
2194 max_segments
= min_not_zero(max_segments
, ctrl
->max_segments
);
2195 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
2196 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
2198 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) &&
2199 is_power_of_2(ctrl
->max_hw_sectors
))
2200 blk_queue_chunk_sectors(q
, ctrl
->max_hw_sectors
);
2201 blk_queue_virt_boundary(q
, ctrl
->page_size
- 1);
2202 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
2204 blk_queue_write_cache(q
, vwc
, vwc
);
2207 static int nvme_configure_timestamp(struct nvme_ctrl
*ctrl
)
2212 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_TIMESTAMP
))
2215 ts
= cpu_to_le64(ktime_to_ms(ktime_get_real()));
2216 ret
= nvme_set_features(ctrl
, NVME_FEAT_TIMESTAMP
, 0, &ts
, sizeof(ts
),
2219 dev_warn_once(ctrl
->device
,
2220 "could not set timestamp (%d)\n", ret
);
2224 static int nvme_configure_acre(struct nvme_ctrl
*ctrl
)
2226 struct nvme_feat_host_behavior
*host
;
2229 /* Don't bother enabling the feature if retry delay is not reported */
2233 host
= kzalloc(sizeof(*host
), GFP_KERNEL
);
2237 host
->acre
= NVME_ENABLE_ACRE
;
2238 ret
= nvme_set_features(ctrl
, NVME_FEAT_HOST_BEHAVIOR
, 0,
2239 host
, sizeof(*host
), NULL
);
2244 static int nvme_configure_apst(struct nvme_ctrl
*ctrl
)
2247 * APST (Autonomous Power State Transition) lets us program a
2248 * table of power state transitions that the controller will
2249 * perform automatically. We configure it with a simple
2250 * heuristic: we are willing to spend at most 2% of the time
2251 * transitioning between power states. Therefore, when running
2252 * in any given state, we will enter the next lower-power
2253 * non-operational state after waiting 50 * (enlat + exlat)
2254 * microseconds, as long as that state's exit latency is under
2255 * the requested maximum latency.
2257 * We will not autonomously enter any non-operational state for
2258 * which the total latency exceeds ps_max_latency_us. Users
2259 * can set ps_max_latency_us to zero to turn off APST.
2263 struct nvme_feat_auto_pst
*table
;
2269 * If APST isn't supported or if we haven't been initialized yet,
2270 * then don't do anything.
2275 if (ctrl
->npss
> 31) {
2276 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
2280 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
2284 if (!ctrl
->apst_enabled
|| ctrl
->ps_max_latency_us
== 0) {
2285 /* Turn off APST. */
2287 dev_dbg(ctrl
->device
, "APST disabled\n");
2289 __le64 target
= cpu_to_le64(0);
2293 * Walk through all states from lowest- to highest-power.
2294 * According to the spec, lower-numbered states use more
2295 * power. NPSS, despite the name, is the index of the
2296 * lowest-power state, not the number of states.
2298 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
2299 u64 total_latency_us
, exit_latency_us
, transition_ms
;
2302 table
->entries
[state
] = target
;
2305 * Don't allow transitions to the deepest state
2306 * if it's quirked off.
2308 if (state
== ctrl
->npss
&&
2309 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
2313 * Is this state a useful non-operational state for
2314 * higher-power states to autonomously transition to?
2316 if (!(ctrl
->psd
[state
].flags
&
2317 NVME_PS_FLAGS_NON_OP_STATE
))
2321 (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
2322 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
2327 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
2330 * This state is good. Use it as the APST idle
2331 * target for higher power states.
2333 transition_ms
= total_latency_us
+ 19;
2334 do_div(transition_ms
, 20);
2335 if (transition_ms
> (1 << 24) - 1)
2336 transition_ms
= (1 << 24) - 1;
2338 target
= cpu_to_le64((state
<< 3) |
2339 (transition_ms
<< 8));
2344 if (total_latency_us
> max_lat_us
)
2345 max_lat_us
= total_latency_us
;
2351 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
2353 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2354 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
2358 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
2359 table
, sizeof(*table
), NULL
);
2361 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
2367 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
2369 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2373 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
2374 case PM_QOS_LATENCY_ANY
:
2382 if (ctrl
->ps_max_latency_us
!= latency
) {
2383 ctrl
->ps_max_latency_us
= latency
;
2384 nvme_configure_apst(ctrl
);
2388 struct nvme_core_quirk_entry
{
2390 * NVMe model and firmware strings are padded with spaces. For
2391 * simplicity, strings in the quirk table are padded with NULLs
2397 unsigned long quirks
;
2400 static const struct nvme_core_quirk_entry core_quirks
[] = {
2403 * This Toshiba device seems to die using any APST states. See:
2404 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2407 .mn
= "THNSF5256GPUK TOSHIBA",
2408 .quirks
= NVME_QUIRK_NO_APST
,
2412 * This LiteON CL1-3D*-Q11 firmware version has a race
2413 * condition associated with actions related to suspend to idle
2414 * LiteON has resolved the problem in future firmware
2418 .quirks
= NVME_QUIRK_SIMPLE_SUSPEND
,
2422 /* match is null-terminated but idstr is space-padded. */
2423 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
2430 matchlen
= strlen(match
);
2431 WARN_ON_ONCE(matchlen
> len
);
2433 if (memcmp(idstr
, match
, matchlen
))
2436 for (; matchlen
< len
; matchlen
++)
2437 if (idstr
[matchlen
] != ' ')
2443 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
2444 const struct nvme_core_quirk_entry
*q
)
2446 return q
->vid
== le16_to_cpu(id
->vid
) &&
2447 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
2448 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
2451 static void nvme_init_subnqn(struct nvme_subsystem
*subsys
, struct nvme_ctrl
*ctrl
,
2452 struct nvme_id_ctrl
*id
)
2457 if(!(ctrl
->quirks
& NVME_QUIRK_IGNORE_DEV_SUBNQN
)) {
2458 nqnlen
= strnlen(id
->subnqn
, NVMF_NQN_SIZE
);
2459 if (nqnlen
> 0 && nqnlen
< NVMF_NQN_SIZE
) {
2460 strlcpy(subsys
->subnqn
, id
->subnqn
, NVMF_NQN_SIZE
);
2464 if (ctrl
->vs
>= NVME_VS(1, 2, 1))
2465 dev_warn(ctrl
->device
, "missing or invalid SUBNQN field.\n");
2468 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2469 off
= snprintf(subsys
->subnqn
, NVMF_NQN_SIZE
,
2470 "nqn.2014.08.org.nvmexpress:%04x%04x",
2471 le16_to_cpu(id
->vid
), le16_to_cpu(id
->ssvid
));
2472 memcpy(subsys
->subnqn
+ off
, id
->sn
, sizeof(id
->sn
));
2473 off
+= sizeof(id
->sn
);
2474 memcpy(subsys
->subnqn
+ off
, id
->mn
, sizeof(id
->mn
));
2475 off
+= sizeof(id
->mn
);
2476 memset(subsys
->subnqn
+ off
, 0, sizeof(subsys
->subnqn
) - off
);
2479 static void nvme_release_subsystem(struct device
*dev
)
2481 struct nvme_subsystem
*subsys
=
2482 container_of(dev
, struct nvme_subsystem
, dev
);
2484 if (subsys
->instance
>= 0)
2485 ida_simple_remove(&nvme_instance_ida
, subsys
->instance
);
2489 static void nvme_destroy_subsystem(struct kref
*ref
)
2491 struct nvme_subsystem
*subsys
=
2492 container_of(ref
, struct nvme_subsystem
, ref
);
2494 mutex_lock(&nvme_subsystems_lock
);
2495 list_del(&subsys
->entry
);
2496 mutex_unlock(&nvme_subsystems_lock
);
2498 ida_destroy(&subsys
->ns_ida
);
2499 device_del(&subsys
->dev
);
2500 put_device(&subsys
->dev
);
2503 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
)
2505 kref_put(&subsys
->ref
, nvme_destroy_subsystem
);
2508 static struct nvme_subsystem
*__nvme_find_get_subsystem(const char *subsysnqn
)
2510 struct nvme_subsystem
*subsys
;
2512 lockdep_assert_held(&nvme_subsystems_lock
);
2515 * Fail matches for discovery subsystems. This results
2516 * in each discovery controller bound to a unique subsystem.
2517 * This avoids issues with validating controller values
2518 * that can only be true when there is a single unique subsystem.
2519 * There may be multiple and completely independent entities
2520 * that provide discovery controllers.
2522 if (!strcmp(subsysnqn
, NVME_DISC_SUBSYS_NAME
))
2525 list_for_each_entry(subsys
, &nvme_subsystems
, entry
) {
2526 if (strcmp(subsys
->subnqn
, subsysnqn
))
2528 if (!kref_get_unless_zero(&subsys
->ref
))
2536 #define SUBSYS_ATTR_RO(_name, _mode, _show) \
2537 struct device_attribute subsys_attr_##_name = \
2538 __ATTR(_name, _mode, _show, NULL)
2540 static ssize_t
nvme_subsys_show_nqn(struct device
*dev
,
2541 struct device_attribute
*attr
,
2544 struct nvme_subsystem
*subsys
=
2545 container_of(dev
, struct nvme_subsystem
, dev
);
2547 return snprintf(buf
, PAGE_SIZE
, "%s\n", subsys
->subnqn
);
2549 static SUBSYS_ATTR_RO(subsysnqn
, S_IRUGO
, nvme_subsys_show_nqn
);
2551 #define nvme_subsys_show_str_function(field) \
2552 static ssize_t subsys_##field##_show(struct device *dev, \
2553 struct device_attribute *attr, char *buf) \
2555 struct nvme_subsystem *subsys = \
2556 container_of(dev, struct nvme_subsystem, dev); \
2557 return sprintf(buf, "%.*s\n", \
2558 (int)sizeof(subsys->field), subsys->field); \
2560 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2562 nvme_subsys_show_str_function(model
);
2563 nvme_subsys_show_str_function(serial
);
2564 nvme_subsys_show_str_function(firmware_rev
);
2566 static struct attribute
*nvme_subsys_attrs
[] = {
2567 &subsys_attr_model
.attr
,
2568 &subsys_attr_serial
.attr
,
2569 &subsys_attr_firmware_rev
.attr
,
2570 &subsys_attr_subsysnqn
.attr
,
2571 #ifdef CONFIG_NVME_MULTIPATH
2572 &subsys_attr_iopolicy
.attr
,
2577 static struct attribute_group nvme_subsys_attrs_group
= {
2578 .attrs
= nvme_subsys_attrs
,
2581 static const struct attribute_group
*nvme_subsys_attrs_groups
[] = {
2582 &nvme_subsys_attrs_group
,
2586 static bool nvme_validate_cntlid(struct nvme_subsystem
*subsys
,
2587 struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2589 struct nvme_ctrl
*tmp
;
2591 lockdep_assert_held(&nvme_subsystems_lock
);
2593 list_for_each_entry(tmp
, &subsys
->ctrls
, subsys_entry
) {
2594 if (tmp
->state
== NVME_CTRL_DELETING
||
2595 tmp
->state
== NVME_CTRL_DEAD
)
2598 if (tmp
->cntlid
== ctrl
->cntlid
) {
2599 dev_err(ctrl
->device
,
2600 "Duplicate cntlid %u with %s, rejecting\n",
2601 ctrl
->cntlid
, dev_name(tmp
->device
));
2605 if ((id
->cmic
& (1 << 1)) ||
2606 (ctrl
->opts
&& ctrl
->opts
->discovery_nqn
))
2609 dev_err(ctrl
->device
,
2610 "Subsystem does not support multiple controllers\n");
2617 static int nvme_init_subsystem(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2619 struct nvme_subsystem
*subsys
, *found
;
2622 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
2626 subsys
->instance
= -1;
2627 mutex_init(&subsys
->lock
);
2628 kref_init(&subsys
->ref
);
2629 INIT_LIST_HEAD(&subsys
->ctrls
);
2630 INIT_LIST_HEAD(&subsys
->nsheads
);
2631 nvme_init_subnqn(subsys
, ctrl
, id
);
2632 memcpy(subsys
->serial
, id
->sn
, sizeof(subsys
->serial
));
2633 memcpy(subsys
->model
, id
->mn
, sizeof(subsys
->model
));
2634 memcpy(subsys
->firmware_rev
, id
->fr
, sizeof(subsys
->firmware_rev
));
2635 subsys
->vendor_id
= le16_to_cpu(id
->vid
);
2636 subsys
->cmic
= id
->cmic
;
2637 subsys
->awupf
= le16_to_cpu(id
->awupf
);
2638 #ifdef CONFIG_NVME_MULTIPATH
2639 subsys
->iopolicy
= NVME_IOPOLICY_NUMA
;
2642 subsys
->dev
.class = nvme_subsys_class
;
2643 subsys
->dev
.release
= nvme_release_subsystem
;
2644 subsys
->dev
.groups
= nvme_subsys_attrs_groups
;
2645 dev_set_name(&subsys
->dev
, "nvme-subsys%d", ctrl
->instance
);
2646 device_initialize(&subsys
->dev
);
2648 mutex_lock(&nvme_subsystems_lock
);
2649 found
= __nvme_find_get_subsystem(subsys
->subnqn
);
2651 put_device(&subsys
->dev
);
2654 if (!nvme_validate_cntlid(subsys
, ctrl
, id
)) {
2656 goto out_put_subsystem
;
2659 ret
= device_add(&subsys
->dev
);
2661 dev_err(ctrl
->device
,
2662 "failed to register subsystem device.\n");
2663 put_device(&subsys
->dev
);
2666 ida_init(&subsys
->ns_ida
);
2667 list_add_tail(&subsys
->entry
, &nvme_subsystems
);
2670 ret
= sysfs_create_link(&subsys
->dev
.kobj
, &ctrl
->device
->kobj
,
2671 dev_name(ctrl
->device
));
2673 dev_err(ctrl
->device
,
2674 "failed to create sysfs link from subsystem.\n");
2675 goto out_put_subsystem
;
2679 subsys
->instance
= ctrl
->instance
;
2680 ctrl
->subsys
= subsys
;
2681 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
2682 mutex_unlock(&nvme_subsystems_lock
);
2686 nvme_put_subsystem(subsys
);
2688 mutex_unlock(&nvme_subsystems_lock
);
2692 int nvme_get_log(struct nvme_ctrl
*ctrl
, u32 nsid
, u8 log_page
, u8 lsp
,
2693 void *log
, size_t size
, u64 offset
)
2695 struct nvme_command c
= { };
2696 unsigned long dwlen
= size
/ 4 - 1;
2698 c
.get_log_page
.opcode
= nvme_admin_get_log_page
;
2699 c
.get_log_page
.nsid
= cpu_to_le32(nsid
);
2700 c
.get_log_page
.lid
= log_page
;
2701 c
.get_log_page
.lsp
= lsp
;
2702 c
.get_log_page
.numdl
= cpu_to_le16(dwlen
& ((1 << 16) - 1));
2703 c
.get_log_page
.numdu
= cpu_to_le16(dwlen
>> 16);
2704 c
.get_log_page
.lpol
= cpu_to_le32(lower_32_bits(offset
));
2705 c
.get_log_page
.lpou
= cpu_to_le32(upper_32_bits(offset
));
2707 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, log
, size
);
2710 static int nvme_get_effects_log(struct nvme_ctrl
*ctrl
)
2715 ctrl
->effects
= kzalloc(sizeof(*ctrl
->effects
), GFP_KERNEL
);
2720 ret
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_CMD_EFFECTS
, 0,
2721 ctrl
->effects
, sizeof(*ctrl
->effects
), 0);
2723 kfree(ctrl
->effects
);
2724 ctrl
->effects
= NULL
;
2730 * Initialize the cached copies of the Identify data and various controller
2731 * register in our nvme_ctrl structure. This should be called as soon as
2732 * the admin queue is fully up and running.
2734 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
2736 struct nvme_id_ctrl
*id
;
2737 int ret
, page_shift
;
2739 bool prev_apst_enabled
;
2741 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
2743 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
2746 page_shift
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12;
2747 ctrl
->sqsize
= min_t(int, NVME_CAP_MQES(ctrl
->cap
), ctrl
->sqsize
);
2749 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
2750 ctrl
->subsystem
= NVME_CAP_NSSRC(ctrl
->cap
);
2752 ret
= nvme_identify_ctrl(ctrl
, &id
);
2754 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
2758 if (id
->lpa
& NVME_CTRL_LPA_CMD_EFFECTS_LOG
) {
2759 ret
= nvme_get_effects_log(ctrl
);
2764 if (!(ctrl
->ops
->flags
& NVME_F_FABRICS
))
2765 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
2767 if (!ctrl
->identified
) {
2770 ret
= nvme_init_subsystem(ctrl
, id
);
2775 * Check for quirks. Quirk can depend on firmware version,
2776 * so, in principle, the set of quirks present can change
2777 * across a reset. As a possible future enhancement, we
2778 * could re-scan for quirks every time we reinitialize
2779 * the device, but we'd have to make sure that the driver
2780 * behaves intelligently if the quirks change.
2782 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
2783 if (quirk_matches(id
, &core_quirks
[i
]))
2784 ctrl
->quirks
|= core_quirks
[i
].quirks
;
2788 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
2789 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2790 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
2793 ctrl
->crdt
[0] = le16_to_cpu(id
->crdt1
);
2794 ctrl
->crdt
[1] = le16_to_cpu(id
->crdt2
);
2795 ctrl
->crdt
[2] = le16_to_cpu(id
->crdt3
);
2797 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
2798 ctrl
->oncs
= le16_to_cpu(id
->oncs
);
2799 ctrl
->mtfa
= le16_to_cpu(id
->mtfa
);
2800 ctrl
->oaes
= le32_to_cpu(id
->oaes
);
2801 ctrl
->wctemp
= le16_to_cpu(id
->wctemp
);
2802 ctrl
->cctemp
= le16_to_cpu(id
->cctemp
);
2804 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
2805 ctrl
->vwc
= id
->vwc
;
2807 max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
2809 max_hw_sectors
= UINT_MAX
;
2810 ctrl
->max_hw_sectors
=
2811 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
2813 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
2814 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
2815 ctrl
->kas
= le16_to_cpu(id
->kas
);
2816 ctrl
->max_namespaces
= le32_to_cpu(id
->mnan
);
2817 ctrl
->ctratt
= le32_to_cpu(id
->ctratt
);
2821 u32 transition_time
= le32_to_cpu(id
->rtd3e
) / 1000000;
2823 ctrl
->shutdown_timeout
= clamp_t(unsigned int, transition_time
,
2824 shutdown_timeout
, 60);
2826 if (ctrl
->shutdown_timeout
!= shutdown_timeout
)
2827 dev_info(ctrl
->device
,
2828 "Shutdown timeout set to %u seconds\n",
2829 ctrl
->shutdown_timeout
);
2831 ctrl
->shutdown_timeout
= shutdown_timeout
;
2833 ctrl
->npss
= id
->npss
;
2834 ctrl
->apsta
= id
->apsta
;
2835 prev_apst_enabled
= ctrl
->apst_enabled
;
2836 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
2837 if (force_apst
&& id
->apsta
) {
2838 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2839 ctrl
->apst_enabled
= true;
2841 ctrl
->apst_enabled
= false;
2844 ctrl
->apst_enabled
= id
->apsta
;
2846 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
2848 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
2849 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
2850 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
2851 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
2852 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
2855 * In fabrics we need to verify the cntlid matches the
2858 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
)) {
2859 dev_err(ctrl
->device
,
2860 "Mismatching cntlid: Connect %u vs Identify "
2862 ctrl
->cntlid
, le16_to_cpu(id
->cntlid
));
2867 if (!ctrl
->opts
->discovery_nqn
&& !ctrl
->kas
) {
2868 dev_err(ctrl
->device
,
2869 "keep-alive support is mandatory for fabrics\n");
2874 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
2875 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
2876 ctrl
->hmminds
= le32_to_cpu(id
->hmminds
);
2877 ctrl
->hmmaxd
= le16_to_cpu(id
->hmmaxd
);
2880 ret
= nvme_mpath_init(ctrl
, id
);
2886 if (ctrl
->apst_enabled
&& !prev_apst_enabled
)
2887 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
2888 else if (!ctrl
->apst_enabled
&& prev_apst_enabled
)
2889 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
2891 ret
= nvme_configure_apst(ctrl
);
2895 ret
= nvme_configure_timestamp(ctrl
);
2899 ret
= nvme_configure_directives(ctrl
);
2903 ret
= nvme_configure_acre(ctrl
);
2907 if (!ctrl
->identified
)
2908 nvme_hwmon_init(ctrl
);
2910 ctrl
->identified
= true;
2918 EXPORT_SYMBOL_GPL(nvme_init_identify
);
2920 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
2922 struct nvme_ctrl
*ctrl
=
2923 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
2925 switch (ctrl
->state
) {
2926 case NVME_CTRL_LIVE
:
2929 return -EWOULDBLOCK
;
2932 file
->private_data
= ctrl
;
2936 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
2941 down_read(&ctrl
->namespaces_rwsem
);
2942 if (list_empty(&ctrl
->namespaces
)) {
2947 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
2948 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
2949 dev_warn(ctrl
->device
,
2950 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
2955 dev_warn(ctrl
->device
,
2956 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
2957 kref_get(&ns
->kref
);
2958 up_read(&ctrl
->namespaces_rwsem
);
2960 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
2965 up_read(&ctrl
->namespaces_rwsem
);
2969 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
2972 struct nvme_ctrl
*ctrl
= file
->private_data
;
2973 void __user
*argp
= (void __user
*)arg
;
2976 case NVME_IOCTL_ADMIN_CMD
:
2977 return nvme_user_cmd(ctrl
, NULL
, argp
);
2978 case NVME_IOCTL_ADMIN64_CMD
:
2979 return nvme_user_cmd64(ctrl
, NULL
, argp
);
2980 case NVME_IOCTL_IO_CMD
:
2981 return nvme_dev_user_cmd(ctrl
, argp
);
2982 case NVME_IOCTL_RESET
:
2983 dev_warn(ctrl
->device
, "resetting controller\n");
2984 return nvme_reset_ctrl_sync(ctrl
);
2985 case NVME_IOCTL_SUBSYS_RESET
:
2986 return nvme_reset_subsystem(ctrl
);
2987 case NVME_IOCTL_RESCAN
:
2988 nvme_queue_scan(ctrl
);
2995 static const struct file_operations nvme_dev_fops
= {
2996 .owner
= THIS_MODULE
,
2997 .open
= nvme_dev_open
,
2998 .unlocked_ioctl
= nvme_dev_ioctl
,
2999 .compat_ioctl
= compat_ptr_ioctl
,
3002 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
3003 struct device_attribute
*attr
, const char *buf
,
3006 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3009 ret
= nvme_reset_ctrl_sync(ctrl
);
3014 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
3016 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
3017 struct device_attribute
*attr
, const char *buf
,
3020 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3022 nvme_queue_scan(ctrl
);
3025 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
3027 static inline struct nvme_ns_head
*dev_to_ns_head(struct device
*dev
)
3029 struct gendisk
*disk
= dev_to_disk(dev
);
3031 if (disk
->fops
== &nvme_fops
)
3032 return nvme_get_ns_from_dev(dev
)->head
;
3034 return disk
->private_data
;
3037 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
3040 struct nvme_ns_head
*head
= dev_to_ns_head(dev
);
3041 struct nvme_ns_ids
*ids
= &head
->ids
;
3042 struct nvme_subsystem
*subsys
= head
->subsys
;
3043 int serial_len
= sizeof(subsys
->serial
);
3044 int model_len
= sizeof(subsys
->model
);
3046 if (!uuid_is_null(&ids
->uuid
))
3047 return sprintf(buf
, "uuid.%pU\n", &ids
->uuid
);
3049 if (memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3050 return sprintf(buf
, "eui.%16phN\n", ids
->nguid
);
3052 if (memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
3053 return sprintf(buf
, "eui.%8phN\n", ids
->eui64
);
3055 while (serial_len
> 0 && (subsys
->serial
[serial_len
- 1] == ' ' ||
3056 subsys
->serial
[serial_len
- 1] == '\0'))
3058 while (model_len
> 0 && (subsys
->model
[model_len
- 1] == ' ' ||
3059 subsys
->model
[model_len
- 1] == '\0'))
3062 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", subsys
->vendor_id
,
3063 serial_len
, subsys
->serial
, model_len
, subsys
->model
,
3066 static DEVICE_ATTR_RO(wwid
);
3068 static ssize_t
nguid_show(struct device
*dev
, struct device_attribute
*attr
,
3071 return sprintf(buf
, "%pU\n", dev_to_ns_head(dev
)->ids
.nguid
);
3073 static DEVICE_ATTR_RO(nguid
);
3075 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
3078 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
3080 /* For backward compatibility expose the NGUID to userspace if
3081 * we have no UUID set
3083 if (uuid_is_null(&ids
->uuid
)) {
3084 printk_ratelimited(KERN_WARNING
3085 "No UUID available providing old NGUID\n");
3086 return sprintf(buf
, "%pU\n", ids
->nguid
);
3088 return sprintf(buf
, "%pU\n", &ids
->uuid
);
3090 static DEVICE_ATTR_RO(uuid
);
3092 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
3095 return sprintf(buf
, "%8ph\n", dev_to_ns_head(dev
)->ids
.eui64
);
3097 static DEVICE_ATTR_RO(eui
);
3099 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
3102 return sprintf(buf
, "%d\n", dev_to_ns_head(dev
)->ns_id
);
3104 static DEVICE_ATTR_RO(nsid
);
3106 static struct attribute
*nvme_ns_id_attrs
[] = {
3107 &dev_attr_wwid
.attr
,
3108 &dev_attr_uuid
.attr
,
3109 &dev_attr_nguid
.attr
,
3111 &dev_attr_nsid
.attr
,
3112 #ifdef CONFIG_NVME_MULTIPATH
3113 &dev_attr_ana_grpid
.attr
,
3114 &dev_attr_ana_state
.attr
,
3119 static umode_t
nvme_ns_id_attrs_are_visible(struct kobject
*kobj
,
3120 struct attribute
*a
, int n
)
3122 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3123 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
3125 if (a
== &dev_attr_uuid
.attr
) {
3126 if (uuid_is_null(&ids
->uuid
) &&
3127 !memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3130 if (a
== &dev_attr_nguid
.attr
) {
3131 if (!memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3134 if (a
== &dev_attr_eui
.attr
) {
3135 if (!memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
3138 #ifdef CONFIG_NVME_MULTIPATH
3139 if (a
== &dev_attr_ana_grpid
.attr
|| a
== &dev_attr_ana_state
.attr
) {
3140 if (dev_to_disk(dev
)->fops
!= &nvme_fops
) /* per-path attr */
3142 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev
)->ctrl
))
3149 static const struct attribute_group nvme_ns_id_attr_group
= {
3150 .attrs
= nvme_ns_id_attrs
,
3151 .is_visible
= nvme_ns_id_attrs_are_visible
,
3154 const struct attribute_group
*nvme_ns_id_attr_groups
[] = {
3155 &nvme_ns_id_attr_group
,
3157 &nvme_nvm_attr_group
,
3162 #define nvme_show_str_function(field) \
3163 static ssize_t field##_show(struct device *dev, \
3164 struct device_attribute *attr, char *buf) \
3166 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3167 return sprintf(buf, "%.*s\n", \
3168 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3170 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3172 nvme_show_str_function(model
);
3173 nvme_show_str_function(serial
);
3174 nvme_show_str_function(firmware_rev
);
3176 #define nvme_show_int_function(field) \
3177 static ssize_t field##_show(struct device *dev, \
3178 struct device_attribute *attr, char *buf) \
3180 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3181 return sprintf(buf, "%d\n", ctrl->field); \
3183 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3185 nvme_show_int_function(cntlid
);
3186 nvme_show_int_function(numa_node
);
3187 nvme_show_int_function(queue_count
);
3188 nvme_show_int_function(sqsize
);
3190 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
3191 struct device_attribute
*attr
, const char *buf
,
3194 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3196 if (device_remove_file_self(dev
, attr
))
3197 nvme_delete_ctrl_sync(ctrl
);
3200 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
3202 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
3203 struct device_attribute
*attr
,
3206 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3208 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->ops
->name
);
3210 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
3212 static ssize_t
nvme_sysfs_show_state(struct device
*dev
,
3213 struct device_attribute
*attr
,
3216 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3217 static const char *const state_name
[] = {
3218 [NVME_CTRL_NEW
] = "new",
3219 [NVME_CTRL_LIVE
] = "live",
3220 [NVME_CTRL_RESETTING
] = "resetting",
3221 [NVME_CTRL_CONNECTING
] = "connecting",
3222 [NVME_CTRL_DELETING
] = "deleting",
3223 [NVME_CTRL_DEAD
] = "dead",
3226 if ((unsigned)ctrl
->state
< ARRAY_SIZE(state_name
) &&
3227 state_name
[ctrl
->state
])
3228 return sprintf(buf
, "%s\n", state_name
[ctrl
->state
]);
3230 return sprintf(buf
, "unknown state\n");
3233 static DEVICE_ATTR(state
, S_IRUGO
, nvme_sysfs_show_state
, NULL
);
3235 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
3236 struct device_attribute
*attr
,
3239 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3241 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->subsys
->subnqn
);
3243 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
3245 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
3246 struct device_attribute
*attr
,
3249 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3251 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
3253 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
3255 static struct attribute
*nvme_dev_attrs
[] = {
3256 &dev_attr_reset_controller
.attr
,
3257 &dev_attr_rescan_controller
.attr
,
3258 &dev_attr_model
.attr
,
3259 &dev_attr_serial
.attr
,
3260 &dev_attr_firmware_rev
.attr
,
3261 &dev_attr_cntlid
.attr
,
3262 &dev_attr_delete_controller
.attr
,
3263 &dev_attr_transport
.attr
,
3264 &dev_attr_subsysnqn
.attr
,
3265 &dev_attr_address
.attr
,
3266 &dev_attr_state
.attr
,
3267 &dev_attr_numa_node
.attr
,
3268 &dev_attr_queue_count
.attr
,
3269 &dev_attr_sqsize
.attr
,
3273 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
3274 struct attribute
*a
, int n
)
3276 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3277 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3279 if (a
== &dev_attr_delete_controller
.attr
&& !ctrl
->ops
->delete_ctrl
)
3281 if (a
== &dev_attr_address
.attr
&& !ctrl
->ops
->get_address
)
3287 static struct attribute_group nvme_dev_attrs_group
= {
3288 .attrs
= nvme_dev_attrs
,
3289 .is_visible
= nvme_dev_attrs_are_visible
,
3292 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
3293 &nvme_dev_attrs_group
,
3297 static struct nvme_ns_head
*__nvme_find_ns_head(struct nvme_subsystem
*subsys
,
3300 struct nvme_ns_head
*h
;
3302 lockdep_assert_held(&subsys
->lock
);
3304 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
3305 if (h
->ns_id
== nsid
&& kref_get_unless_zero(&h
->ref
))
3312 static int __nvme_check_ids(struct nvme_subsystem
*subsys
,
3313 struct nvme_ns_head
*new)
3315 struct nvme_ns_head
*h
;
3317 lockdep_assert_held(&subsys
->lock
);
3319 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
3320 if (nvme_ns_ids_valid(&new->ids
) &&
3321 !list_empty(&h
->list
) &&
3322 nvme_ns_ids_equal(&new->ids
, &h
->ids
))
3329 static struct nvme_ns_head
*nvme_alloc_ns_head(struct nvme_ctrl
*ctrl
,
3330 unsigned nsid
, struct nvme_id_ns
*id
)
3332 struct nvme_ns_head
*head
;
3333 size_t size
= sizeof(*head
);
3336 #ifdef CONFIG_NVME_MULTIPATH
3337 size
+= num_possible_nodes() * sizeof(struct nvme_ns
*);
3340 head
= kzalloc(size
, GFP_KERNEL
);
3343 ret
= ida_simple_get(&ctrl
->subsys
->ns_ida
, 1, 0, GFP_KERNEL
);
3346 head
->instance
= ret
;
3347 INIT_LIST_HEAD(&head
->list
);
3348 ret
= init_srcu_struct(&head
->srcu
);
3350 goto out_ida_remove
;
3351 head
->subsys
= ctrl
->subsys
;
3353 kref_init(&head
->ref
);
3355 ret
= nvme_report_ns_ids(ctrl
, nsid
, id
, &head
->ids
);
3357 goto out_cleanup_srcu
;
3359 ret
= __nvme_check_ids(ctrl
->subsys
, head
);
3361 dev_err(ctrl
->device
,
3362 "duplicate IDs for nsid %d\n", nsid
);
3363 goto out_cleanup_srcu
;
3366 ret
= nvme_mpath_alloc_disk(ctrl
, head
);
3368 goto out_cleanup_srcu
;
3370 list_add_tail(&head
->entry
, &ctrl
->subsys
->nsheads
);
3372 kref_get(&ctrl
->subsys
->ref
);
3376 cleanup_srcu_struct(&head
->srcu
);
3378 ida_simple_remove(&ctrl
->subsys
->ns_ida
, head
->instance
);
3383 ret
= blk_status_to_errno(nvme_error_status(ret
));
3384 return ERR_PTR(ret
);
3387 static int nvme_init_ns_head(struct nvme_ns
*ns
, unsigned nsid
,
3388 struct nvme_id_ns
*id
)
3390 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
3391 bool is_shared
= id
->nmic
& (1 << 0);
3392 struct nvme_ns_head
*head
= NULL
;
3395 mutex_lock(&ctrl
->subsys
->lock
);
3397 head
= __nvme_find_ns_head(ctrl
->subsys
, nsid
);
3399 head
= nvme_alloc_ns_head(ctrl
, nsid
, id
);
3401 ret
= PTR_ERR(head
);
3405 struct nvme_ns_ids ids
;
3407 ret
= nvme_report_ns_ids(ctrl
, nsid
, id
, &ids
);
3411 if (!nvme_ns_ids_equal(&head
->ids
, &ids
)) {
3412 dev_err(ctrl
->device
,
3413 "IDs don't match for shared namespace %d\n",
3420 list_add_tail(&ns
->siblings
, &head
->list
);
3424 mutex_unlock(&ctrl
->subsys
->lock
);
3426 ret
= blk_status_to_errno(nvme_error_status(ret
));
3430 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
3432 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
3433 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
3435 return nsa
->head
->ns_id
- nsb
->head
->ns_id
;
3438 static struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3440 struct nvme_ns
*ns
, *ret
= NULL
;
3442 down_read(&ctrl
->namespaces_rwsem
);
3443 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
3444 if (ns
->head
->ns_id
== nsid
) {
3445 if (!kref_get_unless_zero(&ns
->kref
))
3450 if (ns
->head
->ns_id
> nsid
)
3453 up_read(&ctrl
->namespaces_rwsem
);
3457 static int nvme_setup_streams_ns(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
)
3459 struct streams_directive_params s
;
3462 if (!ctrl
->nr_streams
)
3465 ret
= nvme_get_stream_params(ctrl
, &s
, ns
->head
->ns_id
);
3469 ns
->sws
= le32_to_cpu(s
.sws
);
3470 ns
->sgs
= le16_to_cpu(s
.sgs
);
3473 unsigned int bs
= 1 << ns
->lba_shift
;
3475 blk_queue_io_min(ns
->queue
, bs
* ns
->sws
);
3477 blk_queue_io_opt(ns
->queue
, bs
* ns
->sws
* ns
->sgs
);
3483 static int nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3486 struct gendisk
*disk
;
3487 struct nvme_id_ns
*id
;
3488 char disk_name
[DISK_NAME_LEN
];
3489 int node
= ctrl
->numa_node
, flags
= GENHD_FL_EXT_DEVT
, ret
;
3491 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
3495 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
3496 if (IS_ERR(ns
->queue
)) {
3497 ret
= PTR_ERR(ns
->queue
);
3501 if (ctrl
->opts
&& ctrl
->opts
->data_digest
)
3502 ns
->queue
->backing_dev_info
->capabilities
3503 |= BDI_CAP_STABLE_WRITES
;
3505 blk_queue_flag_set(QUEUE_FLAG_NONROT
, ns
->queue
);
3506 if (ctrl
->ops
->flags
& NVME_F_PCI_P2PDMA
)
3507 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA
, ns
->queue
);
3509 ns
->queue
->queuedata
= ns
;
3512 kref_init(&ns
->kref
);
3513 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
3515 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
3516 nvme_set_queue_limits(ctrl
, ns
->queue
);
3518 ret
= nvme_identify_ns(ctrl
, nsid
, &id
);
3520 goto out_free_queue
;
3522 if (id
->ncap
== 0) {
3527 ret
= nvme_init_ns_head(ns
, nsid
, id
);
3530 nvme_setup_streams_ns(ctrl
, ns
);
3531 nvme_set_disk_name(disk_name
, ns
, ctrl
, &flags
);
3533 disk
= alloc_disk_node(0, node
);
3539 disk
->fops
= &nvme_fops
;
3540 disk
->private_data
= ns
;
3541 disk
->queue
= ns
->queue
;
3542 disk
->flags
= flags
;
3543 memcpy(disk
->disk_name
, disk_name
, DISK_NAME_LEN
);
3546 __nvme_revalidate_disk(disk
, id
);
3548 if ((ctrl
->quirks
& NVME_QUIRK_LIGHTNVM
) && id
->vs
[0] == 0x1) {
3549 ret
= nvme_nvm_register(ns
, disk_name
, node
);
3551 dev_warn(ctrl
->device
, "LightNVM init failure\n");
3556 down_write(&ctrl
->namespaces_rwsem
);
3557 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
3558 up_write(&ctrl
->namespaces_rwsem
);
3560 nvme_get_ctrl(ctrl
);
3562 device_add_disk(ctrl
->device
, ns
->disk
, nvme_ns_id_attr_groups
);
3564 nvme_mpath_add_disk(ns
, id
);
3565 nvme_fault_inject_init(&ns
->fault_inject
, ns
->disk
->disk_name
);
3572 mutex_lock(&ctrl
->subsys
->lock
);
3573 list_del_rcu(&ns
->siblings
);
3574 mutex_unlock(&ctrl
->subsys
->lock
);
3575 nvme_put_ns_head(ns
->head
);
3579 blk_cleanup_queue(ns
->queue
);
3583 ret
= blk_status_to_errno(nvme_error_status(ret
));
3587 static void nvme_ns_remove(struct nvme_ns
*ns
)
3589 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
3592 nvme_fault_inject_fini(&ns
->fault_inject
);
3594 mutex_lock(&ns
->ctrl
->subsys
->lock
);
3595 list_del_rcu(&ns
->siblings
);
3596 mutex_unlock(&ns
->ctrl
->subsys
->lock
);
3597 synchronize_rcu(); /* guarantee not available in head->list */
3598 nvme_mpath_clear_current_path(ns
);
3599 synchronize_srcu(&ns
->head
->srcu
); /* wait for concurrent submissions */
3601 if (ns
->disk
&& ns
->disk
->flags
& GENHD_FL_UP
) {
3602 del_gendisk(ns
->disk
);
3603 blk_cleanup_queue(ns
->queue
);
3604 if (blk_get_integrity(ns
->disk
))
3605 blk_integrity_unregister(ns
->disk
);
3608 down_write(&ns
->ctrl
->namespaces_rwsem
);
3609 list_del_init(&ns
->list
);
3610 up_write(&ns
->ctrl
->namespaces_rwsem
);
3612 nvme_mpath_check_last_path(ns
);
3616 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3620 ns
= nvme_find_get_ns(ctrl
, nsid
);
3622 if (ns
->disk
&& revalidate_disk(ns
->disk
))
3626 nvme_alloc_ns(ctrl
, nsid
);
3629 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
3632 struct nvme_ns
*ns
, *next
;
3635 down_write(&ctrl
->namespaces_rwsem
);
3636 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
3637 if (ns
->head
->ns_id
> nsid
|| test_bit(NVME_NS_DEAD
, &ns
->flags
))
3638 list_move_tail(&ns
->list
, &rm_list
);
3640 up_write(&ctrl
->namespaces_rwsem
);
3642 list_for_each_entry_safe(ns
, next
, &rm_list
, list
)
3647 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
3651 unsigned i
, j
, nsid
, prev
= 0;
3652 unsigned num_lists
= DIV_ROUND_UP_ULL((u64
)nn
, 1024);
3655 ns_list
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
3659 for (i
= 0; i
< num_lists
; i
++) {
3660 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
3664 for (j
= 0; j
< min(nn
, 1024U); j
++) {
3665 nsid
= le32_to_cpu(ns_list
[j
]);
3669 nvme_validate_ns(ctrl
, nsid
);
3671 while (++prev
< nsid
) {
3672 ns
= nvme_find_get_ns(ctrl
, prev
);
3682 nvme_remove_invalid_namespaces(ctrl
, prev
);
3688 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
, unsigned nn
)
3692 for (i
= 1; i
<= nn
; i
++)
3693 nvme_validate_ns(ctrl
, i
);
3695 nvme_remove_invalid_namespaces(ctrl
, nn
);
3698 static void nvme_clear_changed_ns_log(struct nvme_ctrl
*ctrl
)
3700 size_t log_size
= NVME_MAX_CHANGED_NAMESPACES
* sizeof(__le32
);
3704 log
= kzalloc(log_size
, GFP_KERNEL
);
3709 * We need to read the log to clear the AEN, but we don't want to rely
3710 * on it for the changed namespace information as userspace could have
3711 * raced with us in reading the log page, which could cause us to miss
3714 error
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_CHANGED_NS
, 0, log
,
3717 dev_warn(ctrl
->device
,
3718 "reading changed ns log failed: %d\n", error
);
3723 static void nvme_scan_work(struct work_struct
*work
)
3725 struct nvme_ctrl
*ctrl
=
3726 container_of(work
, struct nvme_ctrl
, scan_work
);
3727 struct nvme_id_ctrl
*id
;
3730 /* No tagset on a live ctrl means IO queues could not created */
3731 if (ctrl
->state
!= NVME_CTRL_LIVE
|| !ctrl
->tagset
)
3734 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
)) {
3735 dev_info(ctrl
->device
, "rescanning namespaces.\n");
3736 nvme_clear_changed_ns_log(ctrl
);
3739 if (nvme_identify_ctrl(ctrl
, &id
))
3742 mutex_lock(&ctrl
->scan_lock
);
3743 nn
= le32_to_cpu(id
->nn
);
3744 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
3745 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
3746 if (!nvme_scan_ns_list(ctrl
, nn
))
3749 nvme_scan_ns_sequential(ctrl
, nn
);
3751 mutex_unlock(&ctrl
->scan_lock
);
3753 down_write(&ctrl
->namespaces_rwsem
);
3754 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
3755 up_write(&ctrl
->namespaces_rwsem
);
3759 * This function iterates the namespace list unlocked to allow recovery from
3760 * controller failure. It is up to the caller to ensure the namespace list is
3761 * not modified by scan work while this function is executing.
3763 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
3765 struct nvme_ns
*ns
, *next
;
3769 * make sure to requeue I/O to all namespaces as these
3770 * might result from the scan itself and must complete
3771 * for the scan_work to make progress
3773 nvme_mpath_clear_ctrl_paths(ctrl
);
3775 /* prevent racing with ns scanning */
3776 flush_work(&ctrl
->scan_work
);
3779 * The dead states indicates the controller was not gracefully
3780 * disconnected. In that case, we won't be able to flush any data while
3781 * removing the namespaces' disks; fail all the queues now to avoid
3782 * potentially having to clean up the failed sync later.
3784 if (ctrl
->state
== NVME_CTRL_DEAD
)
3785 nvme_kill_queues(ctrl
);
3787 down_write(&ctrl
->namespaces_rwsem
);
3788 list_splice_init(&ctrl
->namespaces
, &ns_list
);
3789 up_write(&ctrl
->namespaces_rwsem
);
3791 list_for_each_entry_safe(ns
, next
, &ns_list
, list
)
3794 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
3796 static int nvme_class_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
3798 struct nvme_ctrl
*ctrl
=
3799 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
3800 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3803 ret
= add_uevent_var(env
, "NVME_TRTYPE=%s", ctrl
->ops
->name
);
3808 ret
= add_uevent_var(env
, "NVME_TRADDR=%s", opts
->traddr
);
3812 ret
= add_uevent_var(env
, "NVME_TRSVCID=%s",
3813 opts
->trsvcid
?: "none");
3817 ret
= add_uevent_var(env
, "NVME_HOST_TRADDR=%s",
3818 opts
->host_traddr
?: "none");
3823 static void nvme_aen_uevent(struct nvme_ctrl
*ctrl
)
3825 char *envp
[2] = { NULL
, NULL
};
3826 u32 aen_result
= ctrl
->aen_result
;
3828 ctrl
->aen_result
= 0;
3832 envp
[0] = kasprintf(GFP_KERNEL
, "NVME_AEN=%#08x", aen_result
);
3835 kobject_uevent_env(&ctrl
->device
->kobj
, KOBJ_CHANGE
, envp
);
3839 static void nvme_async_event_work(struct work_struct
*work
)
3841 struct nvme_ctrl
*ctrl
=
3842 container_of(work
, struct nvme_ctrl
, async_event_work
);
3844 nvme_aen_uevent(ctrl
);
3845 ctrl
->ops
->submit_async_event(ctrl
);
3848 static bool nvme_ctrl_pp_status(struct nvme_ctrl
*ctrl
)
3853 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
))
3859 return ((ctrl
->ctrl_config
& NVME_CC_ENABLE
) && (csts
& NVME_CSTS_PP
));
3862 static void nvme_get_fw_slot_info(struct nvme_ctrl
*ctrl
)
3864 struct nvme_fw_slot_info_log
*log
;
3866 log
= kmalloc(sizeof(*log
), GFP_KERNEL
);
3870 if (nvme_get_log(ctrl
, NVME_NSID_ALL
, 0, NVME_LOG_FW_SLOT
, log
,
3872 dev_warn(ctrl
->device
, "Get FW SLOT INFO log error\n");
3876 static void nvme_fw_act_work(struct work_struct
*work
)
3878 struct nvme_ctrl
*ctrl
= container_of(work
,
3879 struct nvme_ctrl
, fw_act_work
);
3880 unsigned long fw_act_timeout
;
3883 fw_act_timeout
= jiffies
+
3884 msecs_to_jiffies(ctrl
->mtfa
* 100);
3886 fw_act_timeout
= jiffies
+
3887 msecs_to_jiffies(admin_timeout
* 1000);
3889 nvme_stop_queues(ctrl
);
3890 while (nvme_ctrl_pp_status(ctrl
)) {
3891 if (time_after(jiffies
, fw_act_timeout
)) {
3892 dev_warn(ctrl
->device
,
3893 "Fw activation timeout, reset controller\n");
3894 nvme_try_sched_reset(ctrl
);
3900 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_LIVE
))
3903 nvme_start_queues(ctrl
);
3904 /* read FW slot information to clear the AER */
3905 nvme_get_fw_slot_info(ctrl
);
3908 static void nvme_handle_aen_notice(struct nvme_ctrl
*ctrl
, u32 result
)
3910 u32 aer_notice_type
= (result
& 0xff00) >> 8;
3912 trace_nvme_async_event(ctrl
, aer_notice_type
);
3914 switch (aer_notice_type
) {
3915 case NVME_AER_NOTICE_NS_CHANGED
:
3916 set_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
);
3917 nvme_queue_scan(ctrl
);
3919 case NVME_AER_NOTICE_FW_ACT_STARTING
:
3921 * We are (ab)using the RESETTING state to prevent subsequent
3922 * recovery actions from interfering with the controller's
3923 * firmware activation.
3925 if (nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
3926 queue_work(nvme_wq
, &ctrl
->fw_act_work
);
3928 #ifdef CONFIG_NVME_MULTIPATH
3929 case NVME_AER_NOTICE_ANA
:
3930 if (!ctrl
->ana_log_buf
)
3932 queue_work(nvme_wq
, &ctrl
->ana_work
);
3935 case NVME_AER_NOTICE_DISC_CHANGED
:
3936 ctrl
->aen_result
= result
;
3939 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
3943 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
3944 volatile union nvme_result
*res
)
3946 u32 result
= le32_to_cpu(res
->u32
);
3947 u32 aer_type
= result
& 0x07;
3949 if (le16_to_cpu(status
) >> 1 != NVME_SC_SUCCESS
)
3953 case NVME_AER_NOTICE
:
3954 nvme_handle_aen_notice(ctrl
, result
);
3956 case NVME_AER_ERROR
:
3957 case NVME_AER_SMART
:
3960 trace_nvme_async_event(ctrl
, aer_type
);
3961 ctrl
->aen_result
= result
;
3966 queue_work(nvme_wq
, &ctrl
->async_event_work
);
3968 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
3970 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
)
3972 nvme_mpath_stop(ctrl
);
3973 nvme_stop_keep_alive(ctrl
);
3974 flush_work(&ctrl
->async_event_work
);
3975 cancel_work_sync(&ctrl
->fw_act_work
);
3977 EXPORT_SYMBOL_GPL(nvme_stop_ctrl
);
3979 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
)
3982 nvme_start_keep_alive(ctrl
);
3984 nvme_enable_aen(ctrl
);
3986 if (ctrl
->queue_count
> 1) {
3987 nvme_queue_scan(ctrl
);
3988 nvme_start_queues(ctrl
);
3991 EXPORT_SYMBOL_GPL(nvme_start_ctrl
);
3993 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
3995 nvme_fault_inject_fini(&ctrl
->fault_inject
);
3996 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
3997 cdev_device_del(&ctrl
->cdev
, ctrl
->device
);
3999 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
4001 static void nvme_free_ctrl(struct device
*dev
)
4003 struct nvme_ctrl
*ctrl
=
4004 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
4005 struct nvme_subsystem
*subsys
= ctrl
->subsys
;
4007 if (subsys
&& ctrl
->instance
!= subsys
->instance
)
4008 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
4010 kfree(ctrl
->effects
);
4011 nvme_mpath_uninit(ctrl
);
4012 __free_page(ctrl
->discard_page
);
4015 mutex_lock(&nvme_subsystems_lock
);
4016 list_del(&ctrl
->subsys_entry
);
4017 sysfs_remove_link(&subsys
->dev
.kobj
, dev_name(ctrl
->device
));
4018 mutex_unlock(&nvme_subsystems_lock
);
4021 ctrl
->ops
->free_ctrl(ctrl
);
4024 nvme_put_subsystem(subsys
);
4028 * Initialize a NVMe controller structures. This needs to be called during
4029 * earliest initialization so that we have the initialized structured around
4032 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
4033 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
4037 ctrl
->state
= NVME_CTRL_NEW
;
4038 spin_lock_init(&ctrl
->lock
);
4039 mutex_init(&ctrl
->scan_lock
);
4040 INIT_LIST_HEAD(&ctrl
->namespaces
);
4041 init_rwsem(&ctrl
->namespaces_rwsem
);
4044 ctrl
->quirks
= quirks
;
4045 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
4046 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
4047 INIT_WORK(&ctrl
->fw_act_work
, nvme_fw_act_work
);
4048 INIT_WORK(&ctrl
->delete_work
, nvme_delete_ctrl_work
);
4049 init_waitqueue_head(&ctrl
->state_wq
);
4051 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
4052 memset(&ctrl
->ka_cmd
, 0, sizeof(ctrl
->ka_cmd
));
4053 ctrl
->ka_cmd
.common
.opcode
= nvme_admin_keep_alive
;
4055 BUILD_BUG_ON(NVME_DSM_MAX_RANGES
* sizeof(struct nvme_dsm_range
) >
4057 ctrl
->discard_page
= alloc_page(GFP_KERNEL
);
4058 if (!ctrl
->discard_page
) {
4063 ret
= ida_simple_get(&nvme_instance_ida
, 0, 0, GFP_KERNEL
);
4066 ctrl
->instance
= ret
;
4068 device_initialize(&ctrl
->ctrl_device
);
4069 ctrl
->device
= &ctrl
->ctrl_device
;
4070 ctrl
->device
->devt
= MKDEV(MAJOR(nvme_chr_devt
), ctrl
->instance
);
4071 ctrl
->device
->class = nvme_class
;
4072 ctrl
->device
->parent
= ctrl
->dev
;
4073 ctrl
->device
->groups
= nvme_dev_attr_groups
;
4074 ctrl
->device
->release
= nvme_free_ctrl
;
4075 dev_set_drvdata(ctrl
->device
, ctrl
);
4076 ret
= dev_set_name(ctrl
->device
, "nvme%d", ctrl
->instance
);
4078 goto out_release_instance
;
4080 cdev_init(&ctrl
->cdev
, &nvme_dev_fops
);
4081 ctrl
->cdev
.owner
= ops
->module
;
4082 ret
= cdev_device_add(&ctrl
->cdev
, ctrl
->device
);
4087 * Initialize latency tolerance controls. The sysfs files won't
4088 * be visible to userspace unless the device actually supports APST.
4090 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
4091 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
4092 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
4094 nvme_fault_inject_init(&ctrl
->fault_inject
, dev_name(ctrl
->device
));
4098 kfree_const(ctrl
->device
->kobj
.name
);
4099 out_release_instance
:
4100 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
4102 if (ctrl
->discard_page
)
4103 __free_page(ctrl
->discard_page
);
4106 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
4109 * nvme_kill_queues(): Ends all namespace queues
4110 * @ctrl: the dead controller that needs to end
4112 * Call this function when the driver determines it is unable to get the
4113 * controller in a state capable of servicing IO.
4115 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
4119 down_read(&ctrl
->namespaces_rwsem
);
4121 /* Forcibly unquiesce queues to avoid blocking dispatch */
4122 if (ctrl
->admin_q
&& !blk_queue_dying(ctrl
->admin_q
))
4123 blk_mq_unquiesce_queue(ctrl
->admin_q
);
4125 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4126 nvme_set_queue_dying(ns
);
4128 up_read(&ctrl
->namespaces_rwsem
);
4130 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
4132 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
4136 down_read(&ctrl
->namespaces_rwsem
);
4137 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4138 blk_mq_unfreeze_queue(ns
->queue
);
4139 up_read(&ctrl
->namespaces_rwsem
);
4141 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
4143 void nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
4147 down_read(&ctrl
->namespaces_rwsem
);
4148 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
4149 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
4153 up_read(&ctrl
->namespaces_rwsem
);
4155 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
4157 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
4161 down_read(&ctrl
->namespaces_rwsem
);
4162 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4163 blk_mq_freeze_queue_wait(ns
->queue
);
4164 up_read(&ctrl
->namespaces_rwsem
);
4166 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
4168 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
4172 down_read(&ctrl
->namespaces_rwsem
);
4173 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4174 blk_freeze_queue_start(ns
->queue
);
4175 up_read(&ctrl
->namespaces_rwsem
);
4177 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
4179 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
4183 down_read(&ctrl
->namespaces_rwsem
);
4184 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4185 blk_mq_quiesce_queue(ns
->queue
);
4186 up_read(&ctrl
->namespaces_rwsem
);
4188 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
4190 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
4194 down_read(&ctrl
->namespaces_rwsem
);
4195 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4196 blk_mq_unquiesce_queue(ns
->queue
);
4197 up_read(&ctrl
->namespaces_rwsem
);
4199 EXPORT_SYMBOL_GPL(nvme_start_queues
);
4202 void nvme_sync_queues(struct nvme_ctrl
*ctrl
)
4206 down_read(&ctrl
->namespaces_rwsem
);
4207 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4208 blk_sync_queue(ns
->queue
);
4209 up_read(&ctrl
->namespaces_rwsem
);
4212 blk_sync_queue(ctrl
->admin_q
);
4214 EXPORT_SYMBOL_GPL(nvme_sync_queues
);
4217 * Check we didn't inadvertently grow the command structure sizes:
4219 static inline void _nvme_check_size(void)
4221 BUILD_BUG_ON(sizeof(struct nvme_common_command
) != 64);
4222 BUILD_BUG_ON(sizeof(struct nvme_rw_command
) != 64);
4223 BUILD_BUG_ON(sizeof(struct nvme_identify
) != 64);
4224 BUILD_BUG_ON(sizeof(struct nvme_features
) != 64);
4225 BUILD_BUG_ON(sizeof(struct nvme_download_firmware
) != 64);
4226 BUILD_BUG_ON(sizeof(struct nvme_format_cmd
) != 64);
4227 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd
) != 64);
4228 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd
) != 64);
4229 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd
) != 64);
4230 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command
) != 64);
4231 BUILD_BUG_ON(sizeof(struct nvme_command
) != 64);
4232 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl
) != NVME_IDENTIFY_DATA_SIZE
);
4233 BUILD_BUG_ON(sizeof(struct nvme_id_ns
) != NVME_IDENTIFY_DATA_SIZE
);
4234 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type
) != 64);
4235 BUILD_BUG_ON(sizeof(struct nvme_smart_log
) != 512);
4236 BUILD_BUG_ON(sizeof(struct nvme_dbbuf
) != 64);
4237 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd
) != 64);
4241 static int __init
nvme_core_init(void)
4243 int result
= -ENOMEM
;
4247 nvme_wq
= alloc_workqueue("nvme-wq",
4248 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4252 nvme_reset_wq
= alloc_workqueue("nvme-reset-wq",
4253 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4257 nvme_delete_wq
= alloc_workqueue("nvme-delete-wq",
4258 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4259 if (!nvme_delete_wq
)
4260 goto destroy_reset_wq
;
4262 result
= alloc_chrdev_region(&nvme_chr_devt
, 0, NVME_MINORS
, "nvme");
4264 goto destroy_delete_wq
;
4266 nvme_class
= class_create(THIS_MODULE
, "nvme");
4267 if (IS_ERR(nvme_class
)) {
4268 result
= PTR_ERR(nvme_class
);
4269 goto unregister_chrdev
;
4271 nvme_class
->dev_uevent
= nvme_class_uevent
;
4273 nvme_subsys_class
= class_create(THIS_MODULE
, "nvme-subsystem");
4274 if (IS_ERR(nvme_subsys_class
)) {
4275 result
= PTR_ERR(nvme_subsys_class
);
4281 class_destroy(nvme_class
);
4283 unregister_chrdev_region(nvme_chr_devt
, NVME_MINORS
);
4285 destroy_workqueue(nvme_delete_wq
);
4287 destroy_workqueue(nvme_reset_wq
);
4289 destroy_workqueue(nvme_wq
);
4294 static void __exit
nvme_core_exit(void)
4296 class_destroy(nvme_subsys_class
);
4297 class_destroy(nvme_class
);
4298 unregister_chrdev_region(nvme_chr_devt
, NVME_MINORS
);
4299 destroy_workqueue(nvme_delete_wq
);
4300 destroy_workqueue(nvme_reset_wq
);
4301 destroy_workqueue(nvme_wq
);
4304 MODULE_LICENSE("GPL");
4305 MODULE_VERSION("1.0");
4306 module_init(nvme_core_init
);
4307 module_exit(nvme_core_exit
);