2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
29 #include <linux/pm_qos.h>
30 #include <asm/unaligned.h>
35 #define NVME_MINORS (1U << MINORBITS)
37 unsigned char admin_timeout
= 60;
38 module_param(admin_timeout
, byte
, 0644);
39 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
40 EXPORT_SYMBOL_GPL(admin_timeout
);
42 unsigned char nvme_io_timeout
= 30;
43 module_param_named(io_timeout
, nvme_io_timeout
, byte
, 0644);
44 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
45 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
47 static unsigned char shutdown_timeout
= 5;
48 module_param(shutdown_timeout
, byte
, 0644);
49 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
51 static u8 nvme_max_retries
= 5;
52 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
53 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
55 static int nvme_char_major
;
56 module_param(nvme_char_major
, int, 0);
58 static unsigned long default_ps_max_latency_us
= 100000;
59 module_param(default_ps_max_latency_us
, ulong
, 0644);
60 MODULE_PARM_DESC(default_ps_max_latency_us
,
61 "max power saving latency for new devices; use PM QOS to change per device");
63 static bool force_apst
;
64 module_param(force_apst
, bool, 0644);
65 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
68 module_param(streams
, bool, 0644);
69 MODULE_PARM_DESC(streams
, "turn on support for Streams write directives");
71 struct workqueue_struct
*nvme_wq
;
72 EXPORT_SYMBOL_GPL(nvme_wq
);
74 static LIST_HEAD(nvme_ctrl_list
);
75 static DEFINE_SPINLOCK(dev_list_lock
);
77 static struct class *nvme_class
;
79 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
81 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
83 if (!queue_work(nvme_wq
, &ctrl
->reset_work
))
87 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
89 static int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
93 ret
= nvme_reset_ctrl(ctrl
);
95 flush_work(&ctrl
->reset_work
);
99 static blk_status_t
nvme_error_status(struct request
*req
)
101 switch (nvme_req(req
)->status
& 0x7ff) {
102 case NVME_SC_SUCCESS
:
104 case NVME_SC_CAP_EXCEEDED
:
105 return BLK_STS_NOSPC
;
106 case NVME_SC_ONCS_NOT_SUPPORTED
:
107 return BLK_STS_NOTSUPP
;
108 case NVME_SC_WRITE_FAULT
:
109 case NVME_SC_READ_ERROR
:
110 case NVME_SC_UNWRITTEN_BLOCK
:
111 return BLK_STS_MEDIUM
;
113 return BLK_STS_IOERR
;
117 static inline bool nvme_req_needs_retry(struct request
*req
)
119 if (blk_noretry_request(req
))
121 if (nvme_req(req
)->status
& NVME_SC_DNR
)
123 if (jiffies
- req
->start_time
>= req
->timeout
)
125 if (nvme_req(req
)->retries
>= nvme_max_retries
)
130 void nvme_complete_rq(struct request
*req
)
132 if (unlikely(nvme_req(req
)->status
&& nvme_req_needs_retry(req
))) {
133 nvme_req(req
)->retries
++;
134 blk_mq_requeue_request(req
, true);
138 blk_mq_end_request(req
, nvme_error_status(req
));
140 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
142 void nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
146 if (!blk_mq_request_started(req
))
149 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
150 "Cancelling I/O %d", req
->tag
);
152 status
= NVME_SC_ABORT_REQ
;
153 if (blk_queue_dying(req
->q
))
154 status
|= NVME_SC_DNR
;
155 nvme_req(req
)->status
= status
;
156 blk_mq_complete_request(req
);
159 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
161 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
162 enum nvme_ctrl_state new_state
)
164 enum nvme_ctrl_state old_state
;
165 bool changed
= false;
167 spin_lock_irq(&ctrl
->lock
);
169 old_state
= ctrl
->state
;
174 case NVME_CTRL_RESETTING
:
175 case NVME_CTRL_RECONNECTING
:
182 case NVME_CTRL_RESETTING
:
192 case NVME_CTRL_RECONNECTING
:
201 case NVME_CTRL_DELETING
:
204 case NVME_CTRL_RESETTING
:
205 case NVME_CTRL_RECONNECTING
:
214 case NVME_CTRL_DELETING
:
226 ctrl
->state
= new_state
;
228 spin_unlock_irq(&ctrl
->lock
);
232 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
234 static void nvme_free_ns(struct kref
*kref
)
236 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
239 nvme_nvm_unregister(ns
);
242 spin_lock(&dev_list_lock
);
243 ns
->disk
->private_data
= NULL
;
244 spin_unlock(&dev_list_lock
);
248 ida_simple_remove(&ns
->ctrl
->ns_ida
, ns
->instance
);
249 nvme_put_ctrl(ns
->ctrl
);
253 static void nvme_put_ns(struct nvme_ns
*ns
)
255 kref_put(&ns
->kref
, nvme_free_ns
);
258 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
)
262 spin_lock(&dev_list_lock
);
263 ns
= disk
->private_data
;
265 if (!kref_get_unless_zero(&ns
->kref
))
267 if (!try_module_get(ns
->ctrl
->ops
->module
))
270 spin_unlock(&dev_list_lock
);
275 kref_put(&ns
->kref
, nvme_free_ns
);
277 spin_unlock(&dev_list_lock
);
281 struct request
*nvme_alloc_request(struct request_queue
*q
,
282 struct nvme_command
*cmd
, unsigned int flags
, int qid
)
284 unsigned op
= nvme_is_write(cmd
) ? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
;
287 if (qid
== NVME_QID_ANY
) {
288 req
= blk_mq_alloc_request(q
, op
, flags
);
290 req
= blk_mq_alloc_request_hctx(q
, op
, flags
,
296 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
297 nvme_req(req
)->cmd
= cmd
;
301 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
303 static int nvme_toggle_streams(struct nvme_ctrl
*ctrl
, bool enable
)
305 struct nvme_command c
;
307 memset(&c
, 0, sizeof(c
));
309 c
.directive
.opcode
= nvme_admin_directive_send
;
310 c
.directive
.nsid
= cpu_to_le32(0xffffffff);
311 c
.directive
.doper
= NVME_DIR_SND_ID_OP_ENABLE
;
312 c
.directive
.dtype
= NVME_DIR_IDENTIFY
;
313 c
.directive
.tdtype
= NVME_DIR_STREAMS
;
314 c
.directive
.endir
= enable
? NVME_DIR_ENDIR
: 0;
316 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, NULL
, 0);
319 static int nvme_disable_streams(struct nvme_ctrl
*ctrl
)
321 return nvme_toggle_streams(ctrl
, false);
324 static int nvme_enable_streams(struct nvme_ctrl
*ctrl
)
326 return nvme_toggle_streams(ctrl
, true);
329 static int nvme_get_stream_params(struct nvme_ctrl
*ctrl
,
330 struct streams_directive_params
*s
, u32 nsid
)
332 struct nvme_command c
;
334 memset(&c
, 0, sizeof(c
));
335 memset(s
, 0, sizeof(*s
));
337 c
.directive
.opcode
= nvme_admin_directive_recv
;
338 c
.directive
.nsid
= cpu_to_le32(nsid
);
339 c
.directive
.numd
= sizeof(*s
);
340 c
.directive
.doper
= NVME_DIR_RCV_ST_OP_PARAM
;
341 c
.directive
.dtype
= NVME_DIR_STREAMS
;
343 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, s
, sizeof(*s
));
346 static int nvme_configure_directives(struct nvme_ctrl
*ctrl
)
348 struct streams_directive_params s
;
351 if (!(ctrl
->oacs
& NVME_CTRL_OACS_DIRECTIVES
))
356 ret
= nvme_enable_streams(ctrl
);
360 ret
= nvme_get_stream_params(ctrl
, &s
, 0xffffffff);
364 ctrl
->nssa
= le16_to_cpu(s
.nssa
);
365 if (ctrl
->nssa
< BLK_MAX_WRITE_HINTS
- 1) {
366 dev_info(ctrl
->device
, "too few streams (%u) available\n",
368 nvme_disable_streams(ctrl
);
372 ctrl
->nr_streams
= min_t(unsigned, ctrl
->nssa
, BLK_MAX_WRITE_HINTS
- 1);
373 dev_info(ctrl
->device
, "Using %u streams\n", ctrl
->nr_streams
);
378 * Check if 'req' has a write hint associated with it. If it does, assign
379 * a valid namespace stream to the write.
381 static void nvme_assign_write_stream(struct nvme_ctrl
*ctrl
,
382 struct request
*req
, u16
*control
,
385 enum rw_hint streamid
= req
->write_hint
;
387 if (streamid
== WRITE_LIFE_NOT_SET
|| streamid
== WRITE_LIFE_NONE
)
391 if (WARN_ON_ONCE(streamid
> ctrl
->nr_streams
))
394 *control
|= NVME_RW_DTYPE_STREAMS
;
395 *dsmgmt
|= streamid
<< 16;
398 if (streamid
< ARRAY_SIZE(req
->q
->write_hints
))
399 req
->q
->write_hints
[streamid
] += blk_rq_bytes(req
) >> 9;
402 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
403 struct nvme_command
*cmnd
)
405 memset(cmnd
, 0, sizeof(*cmnd
));
406 cmnd
->common
.opcode
= nvme_cmd_flush
;
407 cmnd
->common
.nsid
= cpu_to_le32(ns
->ns_id
);
410 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
411 struct nvme_command
*cmnd
)
413 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
414 struct nvme_dsm_range
*range
;
417 range
= kmalloc_array(segments
, sizeof(*range
), GFP_ATOMIC
);
419 return BLK_STS_RESOURCE
;
421 __rq_for_each_bio(bio
, req
) {
422 u64 slba
= nvme_block_nr(ns
, bio
->bi_iter
.bi_sector
);
423 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
425 range
[n
].cattr
= cpu_to_le32(0);
426 range
[n
].nlb
= cpu_to_le32(nlb
);
427 range
[n
].slba
= cpu_to_le64(slba
);
431 if (WARN_ON_ONCE(n
!= segments
)) {
433 return BLK_STS_IOERR
;
436 memset(cmnd
, 0, sizeof(*cmnd
));
437 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
438 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->ns_id
);
439 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
440 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
442 req
->special_vec
.bv_page
= virt_to_page(range
);
443 req
->special_vec
.bv_offset
= offset_in_page(range
);
444 req
->special_vec
.bv_len
= sizeof(*range
) * segments
;
445 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
450 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
451 struct request
*req
, struct nvme_command
*cmnd
)
453 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
458 * If formated with metadata, require the block layer provide a buffer
459 * unless this namespace is formated such that the metadata can be
460 * stripped/generated by the controller with PRACT=1.
463 (!ns
->pi_type
|| ns
->ms
!= sizeof(struct t10_pi_tuple
)) &&
464 !blk_integrity_rq(req
) && !blk_rq_is_passthrough(req
))
465 return BLK_STS_NOTSUPP
;
467 if (req
->cmd_flags
& REQ_FUA
)
468 control
|= NVME_RW_FUA
;
469 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
470 control
|= NVME_RW_LR
;
472 if (req
->cmd_flags
& REQ_RAHEAD
)
473 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
475 memset(cmnd
, 0, sizeof(*cmnd
));
476 cmnd
->rw
.opcode
= (rq_data_dir(req
) ? nvme_cmd_write
: nvme_cmd_read
);
477 cmnd
->rw
.nsid
= cpu_to_le32(ns
->ns_id
);
478 cmnd
->rw
.slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
479 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
481 if (req_op(req
) == REQ_OP_WRITE
&& ctrl
->nr_streams
)
482 nvme_assign_write_stream(ctrl
, req
, &control
, &dsmgmt
);
485 switch (ns
->pi_type
) {
486 case NVME_NS_DPS_PI_TYPE3
:
487 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
489 case NVME_NS_DPS_PI_TYPE1
:
490 case NVME_NS_DPS_PI_TYPE2
:
491 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
492 NVME_RW_PRINFO_PRCHK_REF
;
493 cmnd
->rw
.reftag
= cpu_to_le32(
494 nvme_block_nr(ns
, blk_rq_pos(req
)));
497 if (!blk_integrity_rq(req
))
498 control
|= NVME_RW_PRINFO_PRACT
;
501 cmnd
->rw
.control
= cpu_to_le16(control
);
502 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
506 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
507 struct nvme_command
*cmd
)
509 blk_status_t ret
= BLK_STS_OK
;
511 if (!(req
->rq_flags
& RQF_DONTPREP
)) {
512 nvme_req(req
)->retries
= 0;
513 nvme_req(req
)->flags
= 0;
514 req
->rq_flags
|= RQF_DONTPREP
;
517 switch (req_op(req
)) {
520 memcpy(cmd
, nvme_req(req
)->cmd
, sizeof(*cmd
));
523 nvme_setup_flush(ns
, cmd
);
525 case REQ_OP_WRITE_ZEROES
:
526 /* currently only aliased to deallocate for a few ctrls: */
528 ret
= nvme_setup_discard(ns
, req
, cmd
);
532 ret
= nvme_setup_rw(ns
, req
, cmd
);
536 return BLK_STS_IOERR
;
539 cmd
->common
.command_id
= req
->tag
;
542 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
545 * Returns 0 on success. If the result is negative, it's a Linux error code;
546 * if the result is positive, it's an NVM Express status code
548 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
549 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
550 unsigned timeout
, int qid
, int at_head
, int flags
)
555 req
= nvme_alloc_request(q
, cmd
, flags
, qid
);
559 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
561 if (buffer
&& bufflen
) {
562 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
567 blk_execute_rq(req
->q
, NULL
, req
, at_head
);
569 *result
= nvme_req(req
)->result
;
570 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
573 ret
= nvme_req(req
)->status
;
575 blk_mq_free_request(req
);
578 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
580 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
581 void *buffer
, unsigned bufflen
)
583 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
586 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
588 int __nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
589 void __user
*ubuffer
, unsigned bufflen
,
590 void __user
*meta_buffer
, unsigned meta_len
, u32 meta_seed
,
591 u32
*result
, unsigned timeout
)
593 bool write
= nvme_is_write(cmd
);
594 struct nvme_ns
*ns
= q
->queuedata
;
595 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
597 struct bio
*bio
= NULL
;
601 req
= nvme_alloc_request(q
, cmd
, 0, NVME_QID_ANY
);
605 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
607 if (ubuffer
&& bufflen
) {
608 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
616 bio
->bi_bdev
= bdget_disk(disk
, 0);
622 if (meta_buffer
&& meta_len
) {
623 struct bio_integrity_payload
*bip
;
625 meta
= kmalloc(meta_len
, GFP_KERNEL
);
632 if (copy_from_user(meta
, meta_buffer
,
639 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
645 bip
->bip_iter
.bi_size
= meta_len
;
646 bip
->bip_iter
.bi_sector
= meta_seed
;
648 ret
= bio_integrity_add_page(bio
, virt_to_page(meta
),
649 meta_len
, offset_in_page(meta
));
650 if (ret
!= meta_len
) {
657 blk_execute_rq(req
->q
, disk
, req
, 0);
658 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
661 ret
= nvme_req(req
)->status
;
663 *result
= le32_to_cpu(nvme_req(req
)->result
.u32
);
664 if (meta
&& !ret
&& !write
) {
665 if (copy_to_user(meta_buffer
, meta
, meta_len
))
672 if (disk
&& bio
->bi_bdev
)
674 blk_rq_unmap_user(bio
);
677 blk_mq_free_request(req
);
681 int nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
682 void __user
*ubuffer
, unsigned bufflen
, u32
*result
,
685 return __nvme_submit_user_cmd(q
, cmd
, ubuffer
, bufflen
, NULL
, 0, 0,
689 static void nvme_keep_alive_end_io(struct request
*rq
, blk_status_t status
)
691 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
693 blk_mq_free_request(rq
);
696 dev_err(ctrl
->device
,
697 "failed nvme_keep_alive_end_io error=%d\n",
702 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
705 static int nvme_keep_alive(struct nvme_ctrl
*ctrl
)
707 struct nvme_command c
;
710 memset(&c
, 0, sizeof(c
));
711 c
.common
.opcode
= nvme_admin_keep_alive
;
713 rq
= nvme_alloc_request(ctrl
->admin_q
, &c
, BLK_MQ_REQ_RESERVED
,
718 rq
->timeout
= ctrl
->kato
* HZ
;
719 rq
->end_io_data
= ctrl
;
721 blk_execute_rq_nowait(rq
->q
, NULL
, rq
, 0, nvme_keep_alive_end_io
);
726 static void nvme_keep_alive_work(struct work_struct
*work
)
728 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
729 struct nvme_ctrl
, ka_work
);
731 if (nvme_keep_alive(ctrl
)) {
732 /* allocation failure, reset the controller */
733 dev_err(ctrl
->device
, "keep-alive failed\n");
734 nvme_reset_ctrl(ctrl
);
739 void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
741 if (unlikely(ctrl
->kato
== 0))
744 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
745 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
747 EXPORT_SYMBOL_GPL(nvme_start_keep_alive
);
749 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
751 if (unlikely(ctrl
->kato
== 0))
754 cancel_delayed_work_sync(&ctrl
->ka_work
);
756 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
758 static int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
760 struct nvme_command c
= { };
763 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
764 c
.identify
.opcode
= nvme_admin_identify
;
765 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
767 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
771 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
772 sizeof(struct nvme_id_ctrl
));
778 static int nvme_identify_ns_descs(struct nvme_ns
*ns
, unsigned nsid
)
780 struct nvme_command c
= { };
786 c
.identify
.opcode
= nvme_admin_identify
;
787 c
.identify
.nsid
= cpu_to_le32(nsid
);
788 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
790 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
794 status
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, &c
, data
,
795 NVME_IDENTIFY_DATA_SIZE
);
799 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
800 struct nvme_ns_id_desc
*cur
= data
+ pos
;
806 case NVME_NIDT_EUI64
:
807 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
808 dev_warn(ns
->ctrl
->device
,
809 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
813 len
= NVME_NIDT_EUI64_LEN
;
814 memcpy(ns
->eui
, data
+ pos
+ sizeof(*cur
), len
);
816 case NVME_NIDT_NGUID
:
817 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
818 dev_warn(ns
->ctrl
->device
,
819 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
823 len
= NVME_NIDT_NGUID_LEN
;
824 memcpy(ns
->nguid
, data
+ pos
+ sizeof(*cur
), len
);
827 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
828 dev_warn(ns
->ctrl
->device
,
829 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
833 len
= NVME_NIDT_UUID_LEN
;
834 uuid_copy(&ns
->uuid
, data
+ pos
+ sizeof(*cur
));
837 /* Skip unnkown types */
849 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
851 struct nvme_command c
= { };
853 c
.identify
.opcode
= nvme_admin_identify
;
854 c
.identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
;
855 c
.identify
.nsid
= cpu_to_le32(nsid
);
856 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
, 0x1000);
859 static int nvme_identify_ns(struct nvme_ctrl
*dev
, unsigned nsid
,
860 struct nvme_id_ns
**id
)
862 struct nvme_command c
= { };
865 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
866 c
.identify
.opcode
= nvme_admin_identify
;
867 c
.identify
.nsid
= cpu_to_le32(nsid
);
868 c
.identify
.cns
= NVME_ID_CNS_NS
;
870 *id
= kmalloc(sizeof(struct nvme_id_ns
), GFP_KERNEL
);
874 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
875 sizeof(struct nvme_id_ns
));
881 static int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
882 void *buffer
, size_t buflen
, u32
*result
)
884 struct nvme_command c
;
885 union nvme_result res
;
888 memset(&c
, 0, sizeof(c
));
889 c
.features
.opcode
= nvme_admin_set_features
;
890 c
.features
.fid
= cpu_to_le32(fid
);
891 c
.features
.dword11
= cpu_to_le32(dword11
);
893 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
894 buffer
, buflen
, 0, NVME_QID_ANY
, 0, 0);
895 if (ret
>= 0 && result
)
896 *result
= le32_to_cpu(res
.u32
);
900 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
902 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
904 int status
, nr_io_queues
;
906 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
912 * Degraded controllers might return an error when setting the queue
913 * count. We still want to be able to bring them online and offer
914 * access to the admin queue, as that might be only way to fix them up.
917 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
920 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
921 *count
= min(*count
, nr_io_queues
);
926 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
928 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
930 struct nvme_user_io io
;
931 struct nvme_command c
;
932 unsigned length
, meta_len
;
933 void __user
*metadata
;
935 if (copy_from_user(&io
, uio
, sizeof(io
)))
943 case nvme_cmd_compare
:
949 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
950 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
951 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
956 } else if (meta_len
) {
957 if ((io
.metadata
& 3) || !io
.metadata
)
961 memset(&c
, 0, sizeof(c
));
962 c
.rw
.opcode
= io
.opcode
;
963 c
.rw
.flags
= io
.flags
;
964 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
965 c
.rw
.slba
= cpu_to_le64(io
.slba
);
966 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
967 c
.rw
.control
= cpu_to_le16(io
.control
);
968 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
969 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
970 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
971 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
973 return __nvme_submit_user_cmd(ns
->queue
, &c
,
974 (void __user
*)(uintptr_t)io
.addr
, length
,
975 metadata
, meta_len
, io
.slba
, NULL
, 0);
978 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
979 struct nvme_passthru_cmd __user
*ucmd
)
981 struct nvme_passthru_cmd cmd
;
982 struct nvme_command c
;
983 unsigned timeout
= 0;
986 if (!capable(CAP_SYS_ADMIN
))
988 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
993 memset(&c
, 0, sizeof(c
));
994 c
.common
.opcode
= cmd
.opcode
;
995 c
.common
.flags
= cmd
.flags
;
996 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
997 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
998 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
999 c
.common
.cdw10
[0] = cpu_to_le32(cmd
.cdw10
);
1000 c
.common
.cdw10
[1] = cpu_to_le32(cmd
.cdw11
);
1001 c
.common
.cdw10
[2] = cpu_to_le32(cmd
.cdw12
);
1002 c
.common
.cdw10
[3] = cpu_to_le32(cmd
.cdw13
);
1003 c
.common
.cdw10
[4] = cpu_to_le32(cmd
.cdw14
);
1004 c
.common
.cdw10
[5] = cpu_to_le32(cmd
.cdw15
);
1007 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
1009 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
1010 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
1011 &cmd
.result
, timeout
);
1013 if (put_user(cmd
.result
, &ucmd
->result
))
1020 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
1021 unsigned int cmd
, unsigned long arg
)
1023 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1027 force_successful_syscall_return();
1029 case NVME_IOCTL_ADMIN_CMD
:
1030 return nvme_user_cmd(ns
->ctrl
, NULL
, (void __user
*)arg
);
1031 case NVME_IOCTL_IO_CMD
:
1032 return nvme_user_cmd(ns
->ctrl
, ns
, (void __user
*)arg
);
1033 case NVME_IOCTL_SUBMIT_IO
:
1034 return nvme_submit_io(ns
, (void __user
*)arg
);
1038 return nvme_nvm_ioctl(ns
, cmd
, arg
);
1040 if (is_sed_ioctl(cmd
))
1041 return sed_ioctl(ns
->ctrl
->opal_dev
, cmd
,
1042 (void __user
*) arg
);
1047 #ifdef CONFIG_COMPAT
1048 static int nvme_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
1049 unsigned int cmd
, unsigned long arg
)
1051 return nvme_ioctl(bdev
, mode
, cmd
, arg
);
1054 #define nvme_compat_ioctl NULL
1057 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
1059 return nvme_get_ns_from_disk(bdev
->bd_disk
) ? 0 : -ENXIO
;
1062 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
1064 struct nvme_ns
*ns
= disk
->private_data
;
1066 module_put(ns
->ctrl
->ops
->module
);
1070 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1072 /* some standard values */
1073 geo
->heads
= 1 << 6;
1074 geo
->sectors
= 1 << 5;
1075 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1079 #ifdef CONFIG_BLK_DEV_INTEGRITY
1080 static void nvme_prep_integrity(struct gendisk
*disk
, struct nvme_id_ns
*id
,
1083 struct nvme_ns
*ns
= disk
->private_data
;
1084 u16 old_ms
= ns
->ms
;
1087 ns
->ms
= le16_to_cpu(id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ms
);
1088 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
1090 /* PI implementation requires metadata equal t10 pi tuple size */
1091 if (ns
->ms
== sizeof(struct t10_pi_tuple
))
1092 pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
1094 if (blk_get_integrity(disk
) &&
1095 (ns
->pi_type
!= pi_type
|| ns
->ms
!= old_ms
||
1096 bs
!= queue_logical_block_size(disk
->queue
) ||
1097 (ns
->ms
&& ns
->ext
)))
1098 blk_integrity_unregister(disk
);
1100 ns
->pi_type
= pi_type
;
1103 static void nvme_init_integrity(struct nvme_ns
*ns
)
1105 struct blk_integrity integrity
;
1107 memset(&integrity
, 0, sizeof(integrity
));
1108 switch (ns
->pi_type
) {
1109 case NVME_NS_DPS_PI_TYPE3
:
1110 integrity
.profile
= &t10_pi_type3_crc
;
1111 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1112 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1114 case NVME_NS_DPS_PI_TYPE1
:
1115 case NVME_NS_DPS_PI_TYPE2
:
1116 integrity
.profile
= &t10_pi_type1_crc
;
1117 integrity
.tag_size
= sizeof(u16
);
1118 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1121 integrity
.profile
= NULL
;
1124 integrity
.tuple_size
= ns
->ms
;
1125 blk_integrity_register(ns
->disk
, &integrity
);
1126 blk_queue_max_integrity_segments(ns
->queue
, 1);
1129 static void nvme_prep_integrity(struct gendisk
*disk
, struct nvme_id_ns
*id
,
1133 static void nvme_init_integrity(struct nvme_ns
*ns
)
1136 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1138 static void nvme_set_chunk_size(struct nvme_ns
*ns
)
1140 u32 chunk_size
= (((u32
)ns
->noiob
) << (ns
->lba_shift
- 9));
1141 blk_queue_chunk_sectors(ns
->queue
, rounddown_pow_of_two(chunk_size
));
1144 static void nvme_config_discard(struct nvme_ns
*ns
)
1146 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1147 u32 logical_block_size
= queue_logical_block_size(ns
->queue
);
1149 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1150 NVME_DSM_MAX_RANGES
);
1152 if (ctrl
->nr_streams
&& ns
->sws
&& ns
->sgs
) {
1153 unsigned int sz
= logical_block_size
* ns
->sws
* ns
->sgs
;
1155 ns
->queue
->limits
.discard_alignment
= sz
;
1156 ns
->queue
->limits
.discard_granularity
= sz
;
1158 ns
->queue
->limits
.discard_alignment
= logical_block_size
;
1159 ns
->queue
->limits
.discard_granularity
= logical_block_size
;
1161 blk_queue_max_discard_sectors(ns
->queue
, UINT_MAX
);
1162 blk_queue_max_discard_segments(ns
->queue
, NVME_DSM_MAX_RANGES
);
1163 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, ns
->queue
);
1165 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1166 blk_queue_max_write_zeroes_sectors(ns
->queue
, UINT_MAX
);
1169 static int nvme_revalidate_ns(struct nvme_ns
*ns
, struct nvme_id_ns
**id
)
1171 if (nvme_identify_ns(ns
->ctrl
, ns
->ns_id
, id
)) {
1172 dev_warn(ns
->ctrl
->dev
, "%s: Identify failure\n", __func__
);
1176 if ((*id
)->ncap
== 0) {
1181 if (ns
->ctrl
->vs
>= NVME_VS(1, 1, 0))
1182 memcpy(ns
->eui
, (*id
)->eui64
, sizeof(ns
->eui
));
1183 if (ns
->ctrl
->vs
>= NVME_VS(1, 2, 0))
1184 memcpy(ns
->nguid
, (*id
)->nguid
, sizeof(ns
->nguid
));
1185 if (ns
->ctrl
->vs
>= NVME_VS(1, 3, 0)) {
1186 /* Don't treat error as fatal we potentially
1187 * already have a NGUID or EUI-64
1189 if (nvme_identify_ns_descs(ns
, ns
->ns_id
))
1190 dev_warn(ns
->ctrl
->device
,
1191 "%s: Identify Descriptors failed\n", __func__
);
1197 static void __nvme_revalidate_disk(struct gendisk
*disk
, struct nvme_id_ns
*id
)
1199 struct nvme_ns
*ns
= disk
->private_data
;
1200 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1204 * If identify namespace failed, use default 512 byte block size so
1205 * block layer can use before failing read/write for 0 capacity.
1207 ns
->lba_shift
= id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ds
;
1208 if (ns
->lba_shift
== 0)
1210 bs
= 1 << ns
->lba_shift
;
1211 ns
->noiob
= le16_to_cpu(id
->noiob
);
1213 blk_mq_freeze_queue(disk
->queue
);
1215 if (ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
)
1216 nvme_prep_integrity(disk
, id
, bs
);
1217 blk_queue_logical_block_size(ns
->queue
, bs
);
1219 nvme_set_chunk_size(ns
);
1220 if (ns
->ms
&& !blk_get_integrity(disk
) && !ns
->ext
)
1221 nvme_init_integrity(ns
);
1222 if (ns
->ms
&& !(ns
->ms
== 8 && ns
->pi_type
) && !blk_get_integrity(disk
))
1223 set_capacity(disk
, 0);
1225 set_capacity(disk
, le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9));
1227 if (ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)
1228 nvme_config_discard(ns
);
1229 blk_mq_unfreeze_queue(disk
->queue
);
1232 static int nvme_revalidate_disk(struct gendisk
*disk
)
1234 struct nvme_ns
*ns
= disk
->private_data
;
1235 struct nvme_id_ns
*id
= NULL
;
1238 if (test_bit(NVME_NS_DEAD
, &ns
->flags
)) {
1239 set_capacity(disk
, 0);
1243 ret
= nvme_revalidate_ns(ns
, &id
);
1247 __nvme_revalidate_disk(disk
, id
);
1253 static char nvme_pr_type(enum pr_type type
)
1256 case PR_WRITE_EXCLUSIVE
:
1258 case PR_EXCLUSIVE_ACCESS
:
1260 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
1262 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
1264 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
1266 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
1273 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
1274 u64 key
, u64 sa_key
, u8 op
)
1276 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1277 struct nvme_command c
;
1278 u8 data
[16] = { 0, };
1280 put_unaligned_le64(key
, &data
[0]);
1281 put_unaligned_le64(sa_key
, &data
[8]);
1283 memset(&c
, 0, sizeof(c
));
1284 c
.common
.opcode
= op
;
1285 c
.common
.nsid
= cpu_to_le32(ns
->ns_id
);
1286 c
.common
.cdw10
[0] = cpu_to_le32(cdw10
);
1288 return nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
1291 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
1292 u64
new, unsigned flags
)
1296 if (flags
& ~PR_FL_IGNORE_KEY
)
1299 cdw10
= old
? 2 : 0;
1300 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
1301 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
1302 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
1305 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
1306 enum pr_type type
, unsigned flags
)
1310 if (flags
& ~PR_FL_IGNORE_KEY
)
1313 cdw10
= nvme_pr_type(type
) << 8;
1314 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
1315 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
1318 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
1319 enum pr_type type
, bool abort
)
1321 u32 cdw10
= nvme_pr_type(type
) << 8 | abort
? 2 : 1;
1322 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
1325 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
1327 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
1328 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
1331 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
1333 u32 cdw10
= nvme_pr_type(type
) << 8 | key
? 1 << 3 : 0;
1334 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
1337 static const struct pr_ops nvme_pr_ops
= {
1338 .pr_register
= nvme_pr_register
,
1339 .pr_reserve
= nvme_pr_reserve
,
1340 .pr_release
= nvme_pr_release
,
1341 .pr_preempt
= nvme_pr_preempt
,
1342 .pr_clear
= nvme_pr_clear
,
1345 #ifdef CONFIG_BLK_SED_OPAL
1346 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
1349 struct nvme_ctrl
*ctrl
= data
;
1350 struct nvme_command cmd
;
1352 memset(&cmd
, 0, sizeof(cmd
));
1354 cmd
.common
.opcode
= nvme_admin_security_send
;
1356 cmd
.common
.opcode
= nvme_admin_security_recv
;
1357 cmd
.common
.nsid
= 0;
1358 cmd
.common
.cdw10
[0] = cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
1359 cmd
.common
.cdw10
[1] = cpu_to_le32(len
);
1361 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
,
1362 ADMIN_TIMEOUT
, NVME_QID_ANY
, 1, 0);
1364 EXPORT_SYMBOL_GPL(nvme_sec_submit
);
1365 #endif /* CONFIG_BLK_SED_OPAL */
1367 static const struct block_device_operations nvme_fops
= {
1368 .owner
= THIS_MODULE
,
1369 .ioctl
= nvme_ioctl
,
1370 .compat_ioctl
= nvme_compat_ioctl
,
1372 .release
= nvme_release
,
1373 .getgeo
= nvme_getgeo
,
1374 .revalidate_disk
= nvme_revalidate_disk
,
1375 .pr_ops
= &nvme_pr_ops
,
1378 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
1380 unsigned long timeout
=
1381 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
1382 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
1385 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1388 if ((csts
& NVME_CSTS_RDY
) == bit
)
1392 if (fatal_signal_pending(current
))
1394 if (time_after(jiffies
, timeout
)) {
1395 dev_err(ctrl
->device
,
1396 "Device not ready; aborting %s\n", enabled
?
1397 "initialisation" : "reset");
1406 * If the device has been passed off to us in an enabled state, just clear
1407 * the enabled bit. The spec says we should set the 'shutdown notification
1408 * bits', but doing so may cause the device to complete commands to the
1409 * admin queue ... and we don't know what memory that might be pointing at!
1411 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1415 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1416 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
1418 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1422 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
1423 msleep(NVME_QUIRK_DELAY_AMOUNT
);
1425 return nvme_wait_ready(ctrl
, cap
, false);
1427 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
1429 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1432 * Default to a 4K page size, with the intention to update this
1433 * path in the future to accomodate architectures with differing
1434 * kernel and IO page sizes.
1436 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12, page_shift
= 12;
1439 if (page_shift
< dev_page_min
) {
1440 dev_err(ctrl
->device
,
1441 "Minimum device page size %u too large for host (%u)\n",
1442 1 << dev_page_min
, 1 << page_shift
);
1446 ctrl
->page_size
= 1 << page_shift
;
1448 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
1449 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
1450 ctrl
->ctrl_config
|= NVME_CC_ARB_RR
| NVME_CC_SHN_NONE
;
1451 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
1452 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
1454 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1457 return nvme_wait_ready(ctrl
, cap
, true);
1459 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
1461 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
1463 unsigned long timeout
= jiffies
+ (shutdown_timeout
* HZ
);
1467 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1468 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
1470 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1474 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1475 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
1479 if (fatal_signal_pending(current
))
1481 if (time_after(jiffies
, timeout
)) {
1482 dev_err(ctrl
->device
,
1483 "Device shutdown incomplete; abort shutdown\n");
1490 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
1492 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1493 struct request_queue
*q
)
1497 if (ctrl
->max_hw_sectors
) {
1499 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1;
1501 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1502 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1504 if (ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
)
1505 blk_queue_chunk_sectors(q
, ctrl
->max_hw_sectors
);
1506 blk_queue_virt_boundary(q
, ctrl
->page_size
- 1);
1507 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
1509 blk_queue_write_cache(q
, vwc
, vwc
);
1512 static void nvme_configure_apst(struct nvme_ctrl
*ctrl
)
1515 * APST (Autonomous Power State Transition) lets us program a
1516 * table of power state transitions that the controller will
1517 * perform automatically. We configure it with a simple
1518 * heuristic: we are willing to spend at most 2% of the time
1519 * transitioning between power states. Therefore, when running
1520 * in any given state, we will enter the next lower-power
1521 * non-operational state after waiting 50 * (enlat + exlat)
1522 * microseconds, as long as that state's exit latency is under
1523 * the requested maximum latency.
1525 * We will not autonomously enter any non-operational state for
1526 * which the total latency exceeds ps_max_latency_us. Users
1527 * can set ps_max_latency_us to zero to turn off APST.
1531 struct nvme_feat_auto_pst
*table
;
1537 * If APST isn't supported or if we haven't been initialized yet,
1538 * then don't do anything.
1543 if (ctrl
->npss
> 31) {
1544 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
1548 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
1552 if (!ctrl
->apst_enabled
|| ctrl
->ps_max_latency_us
== 0) {
1553 /* Turn off APST. */
1555 dev_dbg(ctrl
->device
, "APST disabled\n");
1557 __le64 target
= cpu_to_le64(0);
1561 * Walk through all states from lowest- to highest-power.
1562 * According to the spec, lower-numbered states use more
1563 * power. NPSS, despite the name, is the index of the
1564 * lowest-power state, not the number of states.
1566 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
1567 u64 total_latency_us
, exit_latency_us
, transition_ms
;
1570 table
->entries
[state
] = target
;
1573 * Don't allow transitions to the deepest state
1574 * if it's quirked off.
1576 if (state
== ctrl
->npss
&&
1577 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
1581 * Is this state a useful non-operational state for
1582 * higher-power states to autonomously transition to?
1584 if (!(ctrl
->psd
[state
].flags
&
1585 NVME_PS_FLAGS_NON_OP_STATE
))
1589 (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
1590 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
1595 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
1598 * This state is good. Use it as the APST idle
1599 * target for higher power states.
1601 transition_ms
= total_latency_us
+ 19;
1602 do_div(transition_ms
, 20);
1603 if (transition_ms
> (1 << 24) - 1)
1604 transition_ms
= (1 << 24) - 1;
1606 target
= cpu_to_le64((state
<< 3) |
1607 (transition_ms
<< 8));
1612 if (total_latency_us
> max_lat_us
)
1613 max_lat_us
= total_latency_us
;
1619 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
1621 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
1622 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
1626 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
1627 table
, sizeof(*table
), NULL
);
1629 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
1634 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
1636 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1640 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
1641 case PM_QOS_LATENCY_ANY
:
1649 if (ctrl
->ps_max_latency_us
!= latency
) {
1650 ctrl
->ps_max_latency_us
= latency
;
1651 nvme_configure_apst(ctrl
);
1655 struct nvme_core_quirk_entry
{
1657 * NVMe model and firmware strings are padded with spaces. For
1658 * simplicity, strings in the quirk table are padded with NULLs
1664 unsigned long quirks
;
1667 static const struct nvme_core_quirk_entry core_quirks
[] = {
1670 * This Toshiba device seems to die using any APST states. See:
1671 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
1674 .mn
= "THNSF5256GPUK TOSHIBA",
1675 .quirks
= NVME_QUIRK_NO_APST
,
1679 /* match is null-terminated but idstr is space-padded. */
1680 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
1687 matchlen
= strlen(match
);
1688 WARN_ON_ONCE(matchlen
> len
);
1690 if (memcmp(idstr
, match
, matchlen
))
1693 for (; matchlen
< len
; matchlen
++)
1694 if (idstr
[matchlen
] != ' ')
1700 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
1701 const struct nvme_core_quirk_entry
*q
)
1703 return q
->vid
== le16_to_cpu(id
->vid
) &&
1704 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
1705 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
1708 static void nvme_init_subnqn(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
1713 nqnlen
= strnlen(id
->subnqn
, NVMF_NQN_SIZE
);
1714 if (nqnlen
> 0 && nqnlen
< NVMF_NQN_SIZE
) {
1715 strcpy(ctrl
->subnqn
, id
->subnqn
);
1719 if (ctrl
->vs
>= NVME_VS(1, 2, 1))
1720 dev_warn(ctrl
->device
, "missing or invalid SUBNQN field.\n");
1722 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
1723 off
= snprintf(ctrl
->subnqn
, NVMF_NQN_SIZE
,
1724 "nqn.2014.08.org.nvmexpress:%4x%4x",
1725 le16_to_cpu(id
->vid
), le16_to_cpu(id
->ssvid
));
1726 memcpy(ctrl
->subnqn
+ off
, id
->sn
, sizeof(id
->sn
));
1727 off
+= sizeof(id
->sn
);
1728 memcpy(ctrl
->subnqn
+ off
, id
->mn
, sizeof(id
->mn
));
1729 off
+= sizeof(id
->mn
);
1730 memset(ctrl
->subnqn
+ off
, 0, sizeof(ctrl
->subnqn
) - off
);
1734 * Initialize the cached copies of the Identify data and various controller
1735 * register in our nvme_ctrl structure. This should be called as soon as
1736 * the admin queue is fully up and running.
1738 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
1740 struct nvme_id_ctrl
*id
;
1742 int ret
, page_shift
;
1744 bool prev_apst_enabled
;
1746 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
1748 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
1752 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &cap
);
1754 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
1757 page_shift
= NVME_CAP_MPSMIN(cap
) + 12;
1759 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
1760 ctrl
->subsystem
= NVME_CAP_NSSRC(cap
);
1762 ret
= nvme_identify_ctrl(ctrl
, &id
);
1764 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
1768 nvme_init_subnqn(ctrl
, id
);
1770 if (!ctrl
->identified
) {
1772 * Check for quirks. Quirk can depend on firmware version,
1773 * so, in principle, the set of quirks present can change
1774 * across a reset. As a possible future enhancement, we
1775 * could re-scan for quirks every time we reinitialize
1776 * the device, but we'd have to make sure that the driver
1777 * behaves intelligently if the quirks change.
1782 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
1783 if (quirk_matches(id
, &core_quirks
[i
]))
1784 ctrl
->quirks
|= core_quirks
[i
].quirks
;
1788 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
1789 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
1790 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
1793 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
1794 ctrl
->vid
= le16_to_cpu(id
->vid
);
1795 ctrl
->oncs
= le16_to_cpup(&id
->oncs
);
1796 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
1797 ctrl
->vwc
= id
->vwc
;
1798 ctrl
->cntlid
= le16_to_cpup(&id
->cntlid
);
1799 memcpy(ctrl
->serial
, id
->sn
, sizeof(id
->sn
));
1800 memcpy(ctrl
->model
, id
->mn
, sizeof(id
->mn
));
1801 memcpy(ctrl
->firmware_rev
, id
->fr
, sizeof(id
->fr
));
1803 max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
1805 max_hw_sectors
= UINT_MAX
;
1806 ctrl
->max_hw_sectors
=
1807 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
1809 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
1810 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
1811 ctrl
->kas
= le16_to_cpu(id
->kas
);
1813 ctrl
->npss
= id
->npss
;
1814 ctrl
->apsta
= id
->apsta
;
1815 prev_apst_enabled
= ctrl
->apst_enabled
;
1816 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
1817 if (force_apst
&& id
->apsta
) {
1818 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
1819 ctrl
->apst_enabled
= true;
1821 ctrl
->apst_enabled
= false;
1824 ctrl
->apst_enabled
= id
->apsta
;
1826 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
1828 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
1829 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
1830 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
1831 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
1832 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
1835 * In fabrics we need to verify the cntlid matches the
1838 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
))
1841 if (!ctrl
->opts
->discovery_nqn
&& !ctrl
->kas
) {
1842 dev_err(ctrl
->device
,
1843 "keep-alive support is mandatory for fabrics\n");
1847 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
1848 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
1849 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
1854 if (ctrl
->apst_enabled
&& !prev_apst_enabled
)
1855 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
1856 else if (!ctrl
->apst_enabled
&& prev_apst_enabled
)
1857 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
1859 nvme_configure_apst(ctrl
);
1860 nvme_configure_directives(ctrl
);
1862 ctrl
->identified
= true;
1866 EXPORT_SYMBOL_GPL(nvme_init_identify
);
1868 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
1870 struct nvme_ctrl
*ctrl
;
1871 int instance
= iminor(inode
);
1874 spin_lock(&dev_list_lock
);
1875 list_for_each_entry(ctrl
, &nvme_ctrl_list
, node
) {
1876 if (ctrl
->instance
!= instance
)
1879 if (!ctrl
->admin_q
) {
1883 if (!kref_get_unless_zero(&ctrl
->kref
))
1885 file
->private_data
= ctrl
;
1889 spin_unlock(&dev_list_lock
);
1894 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
1896 nvme_put_ctrl(file
->private_data
);
1900 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
1905 mutex_lock(&ctrl
->namespaces_mutex
);
1906 if (list_empty(&ctrl
->namespaces
)) {
1911 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
1912 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
1913 dev_warn(ctrl
->device
,
1914 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
1919 dev_warn(ctrl
->device
,
1920 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
1921 kref_get(&ns
->kref
);
1922 mutex_unlock(&ctrl
->namespaces_mutex
);
1924 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
1929 mutex_unlock(&ctrl
->namespaces_mutex
);
1933 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
1936 struct nvme_ctrl
*ctrl
= file
->private_data
;
1937 void __user
*argp
= (void __user
*)arg
;
1940 case NVME_IOCTL_ADMIN_CMD
:
1941 return nvme_user_cmd(ctrl
, NULL
, argp
);
1942 case NVME_IOCTL_IO_CMD
:
1943 return nvme_dev_user_cmd(ctrl
, argp
);
1944 case NVME_IOCTL_RESET
:
1945 dev_warn(ctrl
->device
, "resetting controller\n");
1946 return nvme_reset_ctrl_sync(ctrl
);
1947 case NVME_IOCTL_SUBSYS_RESET
:
1948 return nvme_reset_subsystem(ctrl
);
1949 case NVME_IOCTL_RESCAN
:
1950 nvme_queue_scan(ctrl
);
1957 static const struct file_operations nvme_dev_fops
= {
1958 .owner
= THIS_MODULE
,
1959 .open
= nvme_dev_open
,
1960 .release
= nvme_dev_release
,
1961 .unlocked_ioctl
= nvme_dev_ioctl
,
1962 .compat_ioctl
= nvme_dev_ioctl
,
1965 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
1966 struct device_attribute
*attr
, const char *buf
,
1969 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1972 ret
= nvme_reset_ctrl_sync(ctrl
);
1977 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
1979 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
1980 struct device_attribute
*attr
, const char *buf
,
1983 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1985 nvme_queue_scan(ctrl
);
1988 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
1990 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
1993 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
1994 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1995 int serial_len
= sizeof(ctrl
->serial
);
1996 int model_len
= sizeof(ctrl
->model
);
1998 if (memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
1999 return sprintf(buf
, "eui.%16phN\n", ns
->nguid
);
2001 if (memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
2002 return sprintf(buf
, "eui.%8phN\n", ns
->eui
);
2004 while (ctrl
->serial
[serial_len
- 1] == ' ')
2006 while (ctrl
->model
[model_len
- 1] == ' ')
2009 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl
->vid
,
2010 serial_len
, ctrl
->serial
, model_len
, ctrl
->model
, ns
->ns_id
);
2012 static DEVICE_ATTR(wwid
, S_IRUGO
, wwid_show
, NULL
);
2014 static ssize_t
nguid_show(struct device
*dev
, struct device_attribute
*attr
,
2017 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2018 return sprintf(buf
, "%pU\n", ns
->nguid
);
2020 static DEVICE_ATTR(nguid
, S_IRUGO
, nguid_show
, NULL
);
2022 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
2025 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2027 /* For backward compatibility expose the NGUID to userspace if
2028 * we have no UUID set
2030 if (uuid_is_null(&ns
->uuid
)) {
2031 printk_ratelimited(KERN_WARNING
2032 "No UUID available providing old NGUID\n");
2033 return sprintf(buf
, "%pU\n", ns
->nguid
);
2035 return sprintf(buf
, "%pU\n", &ns
->uuid
);
2037 static DEVICE_ATTR(uuid
, S_IRUGO
, uuid_show
, NULL
);
2039 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
2042 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2043 return sprintf(buf
, "%8phd\n", ns
->eui
);
2045 static DEVICE_ATTR(eui
, S_IRUGO
, eui_show
, NULL
);
2047 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
2050 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2051 return sprintf(buf
, "%d\n", ns
->ns_id
);
2053 static DEVICE_ATTR(nsid
, S_IRUGO
, nsid_show
, NULL
);
2055 static struct attribute
*nvme_ns_attrs
[] = {
2056 &dev_attr_wwid
.attr
,
2057 &dev_attr_uuid
.attr
,
2058 &dev_attr_nguid
.attr
,
2060 &dev_attr_nsid
.attr
,
2064 static umode_t
nvme_ns_attrs_are_visible(struct kobject
*kobj
,
2065 struct attribute
*a
, int n
)
2067 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
2068 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2070 if (a
== &dev_attr_uuid
.attr
) {
2071 if (uuid_is_null(&ns
->uuid
) ||
2072 !memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
2075 if (a
== &dev_attr_nguid
.attr
) {
2076 if (!memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
2079 if (a
== &dev_attr_eui
.attr
) {
2080 if (!memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
2086 static const struct attribute_group nvme_ns_attr_group
= {
2087 .attrs
= nvme_ns_attrs
,
2088 .is_visible
= nvme_ns_attrs_are_visible
,
2091 #define nvme_show_str_function(field) \
2092 static ssize_t field##_show(struct device *dev, \
2093 struct device_attribute *attr, char *buf) \
2095 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
2096 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
2098 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2100 #define nvme_show_int_function(field) \
2101 static ssize_t field##_show(struct device *dev, \
2102 struct device_attribute *attr, char *buf) \
2104 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
2105 return sprintf(buf, "%d\n", ctrl->field); \
2107 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2109 nvme_show_str_function(model
);
2110 nvme_show_str_function(serial
);
2111 nvme_show_str_function(firmware_rev
);
2112 nvme_show_int_function(cntlid
);
2114 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
2115 struct device_attribute
*attr
, const char *buf
,
2118 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2120 if (device_remove_file_self(dev
, attr
))
2121 ctrl
->ops
->delete_ctrl(ctrl
);
2124 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
2126 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
2127 struct device_attribute
*attr
,
2130 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2132 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->ops
->name
);
2134 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
2136 static ssize_t
nvme_sysfs_show_state(struct device
*dev
,
2137 struct device_attribute
*attr
,
2140 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2141 static const char *const state_name
[] = {
2142 [NVME_CTRL_NEW
] = "new",
2143 [NVME_CTRL_LIVE
] = "live",
2144 [NVME_CTRL_RESETTING
] = "resetting",
2145 [NVME_CTRL_RECONNECTING
]= "reconnecting",
2146 [NVME_CTRL_DELETING
] = "deleting",
2147 [NVME_CTRL_DEAD
] = "dead",
2150 if ((unsigned)ctrl
->state
< ARRAY_SIZE(state_name
) &&
2151 state_name
[ctrl
->state
])
2152 return sprintf(buf
, "%s\n", state_name
[ctrl
->state
]);
2154 return sprintf(buf
, "unknown state\n");
2157 static DEVICE_ATTR(state
, S_IRUGO
, nvme_sysfs_show_state
, NULL
);
2159 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
2160 struct device_attribute
*attr
,
2163 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2165 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->subnqn
);
2167 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
2169 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
2170 struct device_attribute
*attr
,
2173 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2175 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
2177 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
2179 static struct attribute
*nvme_dev_attrs
[] = {
2180 &dev_attr_reset_controller
.attr
,
2181 &dev_attr_rescan_controller
.attr
,
2182 &dev_attr_model
.attr
,
2183 &dev_attr_serial
.attr
,
2184 &dev_attr_firmware_rev
.attr
,
2185 &dev_attr_cntlid
.attr
,
2186 &dev_attr_delete_controller
.attr
,
2187 &dev_attr_transport
.attr
,
2188 &dev_attr_subsysnqn
.attr
,
2189 &dev_attr_address
.attr
,
2190 &dev_attr_state
.attr
,
2194 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
2195 struct attribute
*a
, int n
)
2197 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
2198 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2200 if (a
== &dev_attr_delete_controller
.attr
&& !ctrl
->ops
->delete_ctrl
)
2202 if (a
== &dev_attr_address
.attr
&& !ctrl
->ops
->get_address
)
2208 static struct attribute_group nvme_dev_attrs_group
= {
2209 .attrs
= nvme_dev_attrs
,
2210 .is_visible
= nvme_dev_attrs_are_visible
,
2213 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
2214 &nvme_dev_attrs_group
,
2218 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
2220 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
2221 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
2223 return nsa
->ns_id
- nsb
->ns_id
;
2226 static struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2228 struct nvme_ns
*ns
, *ret
= NULL
;
2230 mutex_lock(&ctrl
->namespaces_mutex
);
2231 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2232 if (ns
->ns_id
== nsid
) {
2233 kref_get(&ns
->kref
);
2237 if (ns
->ns_id
> nsid
)
2240 mutex_unlock(&ctrl
->namespaces_mutex
);
2244 static int nvme_setup_streams_ns(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
)
2246 struct streams_directive_params s
;
2249 if (!ctrl
->nr_streams
)
2252 ret
= nvme_get_stream_params(ctrl
, &s
, ns
->ns_id
);
2256 ns
->sws
= le32_to_cpu(s
.sws
);
2257 ns
->sgs
= le16_to_cpu(s
.sgs
);
2260 unsigned int bs
= 1 << ns
->lba_shift
;
2262 blk_queue_io_min(ns
->queue
, bs
* ns
->sws
);
2264 blk_queue_io_opt(ns
->queue
, bs
* ns
->sws
* ns
->sgs
);
2270 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2273 struct gendisk
*disk
;
2274 struct nvme_id_ns
*id
;
2275 char disk_name
[DISK_NAME_LEN
];
2276 int node
= dev_to_node(ctrl
->dev
);
2278 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
2282 ns
->instance
= ida_simple_get(&ctrl
->ns_ida
, 1, 0, GFP_KERNEL
);
2283 if (ns
->instance
< 0)
2286 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
2287 if (IS_ERR(ns
->queue
))
2288 goto out_release_instance
;
2289 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, ns
->queue
);
2290 ns
->queue
->queuedata
= ns
;
2293 kref_init(&ns
->kref
);
2295 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
2297 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
2298 nvme_set_queue_limits(ctrl
, ns
->queue
);
2299 nvme_setup_streams_ns(ctrl
, ns
);
2301 sprintf(disk_name
, "nvme%dn%d", ctrl
->instance
, ns
->instance
);
2303 if (nvme_revalidate_ns(ns
, &id
))
2304 goto out_free_queue
;
2306 if (nvme_nvm_ns_supported(ns
, id
) &&
2307 nvme_nvm_register(ns
, disk_name
, node
)) {
2308 dev_warn(ctrl
->device
, "%s: LightNVM init failure\n", __func__
);
2312 disk
= alloc_disk_node(0, node
);
2316 disk
->fops
= &nvme_fops
;
2317 disk
->private_data
= ns
;
2318 disk
->queue
= ns
->queue
;
2319 disk
->flags
= GENHD_FL_EXT_DEVT
;
2320 memcpy(disk
->disk_name
, disk_name
, DISK_NAME_LEN
);
2323 __nvme_revalidate_disk(disk
, id
);
2325 mutex_lock(&ctrl
->namespaces_mutex
);
2326 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
2327 mutex_unlock(&ctrl
->namespaces_mutex
);
2329 kref_get(&ctrl
->kref
);
2333 device_add_disk(ctrl
->device
, ns
->disk
);
2334 if (sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
2335 &nvme_ns_attr_group
))
2336 pr_warn("%s: failed to create sysfs group for identification\n",
2337 ns
->disk
->disk_name
);
2338 if (ns
->ndev
&& nvme_nvm_register_sysfs(ns
))
2339 pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
2340 ns
->disk
->disk_name
);
2345 blk_cleanup_queue(ns
->queue
);
2346 out_release_instance
:
2347 ida_simple_remove(&ctrl
->ns_ida
, ns
->instance
);
2352 static void nvme_ns_remove(struct nvme_ns
*ns
)
2354 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
2357 if (ns
->disk
&& ns
->disk
->flags
& GENHD_FL_UP
) {
2358 if (blk_get_integrity(ns
->disk
))
2359 blk_integrity_unregister(ns
->disk
);
2360 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
2361 &nvme_ns_attr_group
);
2363 nvme_nvm_unregister_sysfs(ns
);
2364 del_gendisk(ns
->disk
);
2365 blk_cleanup_queue(ns
->queue
);
2368 mutex_lock(&ns
->ctrl
->namespaces_mutex
);
2369 list_del_init(&ns
->list
);
2370 mutex_unlock(&ns
->ctrl
->namespaces_mutex
);
2375 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2379 ns
= nvme_find_get_ns(ctrl
, nsid
);
2381 if (ns
->disk
&& revalidate_disk(ns
->disk
))
2385 nvme_alloc_ns(ctrl
, nsid
);
2388 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
2391 struct nvme_ns
*ns
, *next
;
2393 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
2394 if (ns
->ns_id
> nsid
)
2399 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
2403 unsigned i
, j
, nsid
, prev
= 0, num_lists
= DIV_ROUND_UP(nn
, 1024);
2406 ns_list
= kzalloc(0x1000, GFP_KERNEL
);
2410 for (i
= 0; i
< num_lists
; i
++) {
2411 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
2415 for (j
= 0; j
< min(nn
, 1024U); j
++) {
2416 nsid
= le32_to_cpu(ns_list
[j
]);
2420 nvme_validate_ns(ctrl
, nsid
);
2422 while (++prev
< nsid
) {
2423 ns
= nvme_find_get_ns(ctrl
, prev
);
2433 nvme_remove_invalid_namespaces(ctrl
, prev
);
2439 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
, unsigned nn
)
2443 for (i
= 1; i
<= nn
; i
++)
2444 nvme_validate_ns(ctrl
, i
);
2446 nvme_remove_invalid_namespaces(ctrl
, nn
);
2449 static void nvme_scan_work(struct work_struct
*work
)
2451 struct nvme_ctrl
*ctrl
=
2452 container_of(work
, struct nvme_ctrl
, scan_work
);
2453 struct nvme_id_ctrl
*id
;
2456 if (ctrl
->state
!= NVME_CTRL_LIVE
)
2459 if (nvme_identify_ctrl(ctrl
, &id
))
2462 nn
= le32_to_cpu(id
->nn
);
2463 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
2464 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
2465 if (!nvme_scan_ns_list(ctrl
, nn
))
2468 nvme_scan_ns_sequential(ctrl
, nn
);
2470 mutex_lock(&ctrl
->namespaces_mutex
);
2471 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
2472 mutex_unlock(&ctrl
->namespaces_mutex
);
2476 void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
2479 * Do not queue new scan work when a controller is reset during
2482 if (ctrl
->state
== NVME_CTRL_LIVE
)
2483 queue_work(nvme_wq
, &ctrl
->scan_work
);
2485 EXPORT_SYMBOL_GPL(nvme_queue_scan
);
2488 * This function iterates the namespace list unlocked to allow recovery from
2489 * controller failure. It is up to the caller to ensure the namespace list is
2490 * not modified by scan work while this function is executing.
2492 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
2494 struct nvme_ns
*ns
, *next
;
2497 * The dead states indicates the controller was not gracefully
2498 * disconnected. In that case, we won't be able to flush any data while
2499 * removing the namespaces' disks; fail all the queues now to avoid
2500 * potentially having to clean up the failed sync later.
2502 if (ctrl
->state
== NVME_CTRL_DEAD
)
2503 nvme_kill_queues(ctrl
);
2505 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
)
2508 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
2510 static void nvme_async_event_work(struct work_struct
*work
)
2512 struct nvme_ctrl
*ctrl
=
2513 container_of(work
, struct nvme_ctrl
, async_event_work
);
2515 spin_lock_irq(&ctrl
->lock
);
2516 while (ctrl
->event_limit
> 0) {
2517 int aer_idx
= --ctrl
->event_limit
;
2519 spin_unlock_irq(&ctrl
->lock
);
2520 ctrl
->ops
->submit_async_event(ctrl
, aer_idx
);
2521 spin_lock_irq(&ctrl
->lock
);
2523 spin_unlock_irq(&ctrl
->lock
);
2526 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
2527 union nvme_result
*res
)
2529 u32 result
= le32_to_cpu(res
->u32
);
2532 switch (le16_to_cpu(status
) >> 1) {
2533 case NVME_SC_SUCCESS
:
2536 case NVME_SC_ABORT_REQ
:
2537 ++ctrl
->event_limit
;
2538 queue_work(nvme_wq
, &ctrl
->async_event_work
);
2547 switch (result
& 0xff07) {
2548 case NVME_AER_NOTICE_NS_CHANGED
:
2549 dev_info(ctrl
->device
, "rescanning\n");
2550 nvme_queue_scan(ctrl
);
2553 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
2556 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
2558 void nvme_queue_async_events(struct nvme_ctrl
*ctrl
)
2560 ctrl
->event_limit
= NVME_NR_AERS
;
2561 queue_work(nvme_wq
, &ctrl
->async_event_work
);
2563 EXPORT_SYMBOL_GPL(nvme_queue_async_events
);
2565 static DEFINE_IDA(nvme_instance_ida
);
2567 static int nvme_set_instance(struct nvme_ctrl
*ctrl
)
2569 int instance
, error
;
2572 if (!ida_pre_get(&nvme_instance_ida
, GFP_KERNEL
))
2575 spin_lock(&dev_list_lock
);
2576 error
= ida_get_new(&nvme_instance_ida
, &instance
);
2577 spin_unlock(&dev_list_lock
);
2578 } while (error
== -EAGAIN
);
2583 ctrl
->instance
= instance
;
2587 static void nvme_release_instance(struct nvme_ctrl
*ctrl
)
2589 spin_lock(&dev_list_lock
);
2590 ida_remove(&nvme_instance_ida
, ctrl
->instance
);
2591 spin_unlock(&dev_list_lock
);
2594 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
)
2596 nvme_stop_keep_alive(ctrl
);
2597 flush_work(&ctrl
->async_event_work
);
2598 flush_work(&ctrl
->scan_work
);
2600 EXPORT_SYMBOL_GPL(nvme_stop_ctrl
);
2602 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
)
2605 nvme_start_keep_alive(ctrl
);
2607 if (ctrl
->queue_count
> 1) {
2608 nvme_queue_scan(ctrl
);
2609 nvme_queue_async_events(ctrl
);
2610 nvme_start_queues(ctrl
);
2613 EXPORT_SYMBOL_GPL(nvme_start_ctrl
);
2615 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
2617 device_destroy(nvme_class
, MKDEV(nvme_char_major
, ctrl
->instance
));
2619 spin_lock(&dev_list_lock
);
2620 list_del(&ctrl
->node
);
2621 spin_unlock(&dev_list_lock
);
2623 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
2625 static void nvme_free_ctrl(struct kref
*kref
)
2627 struct nvme_ctrl
*ctrl
= container_of(kref
, struct nvme_ctrl
, kref
);
2629 put_device(ctrl
->device
);
2630 nvme_release_instance(ctrl
);
2631 ida_destroy(&ctrl
->ns_ida
);
2633 ctrl
->ops
->free_ctrl(ctrl
);
2636 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
)
2638 kref_put(&ctrl
->kref
, nvme_free_ctrl
);
2640 EXPORT_SYMBOL_GPL(nvme_put_ctrl
);
2643 * Initialize a NVMe controller structures. This needs to be called during
2644 * earliest initialization so that we have the initialized structured around
2647 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
2648 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
2652 ctrl
->state
= NVME_CTRL_NEW
;
2653 spin_lock_init(&ctrl
->lock
);
2654 INIT_LIST_HEAD(&ctrl
->namespaces
);
2655 mutex_init(&ctrl
->namespaces_mutex
);
2656 kref_init(&ctrl
->kref
);
2659 ctrl
->quirks
= quirks
;
2660 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
2661 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
2663 ret
= nvme_set_instance(ctrl
);
2667 ctrl
->device
= device_create_with_groups(nvme_class
, ctrl
->dev
,
2668 MKDEV(nvme_char_major
, ctrl
->instance
),
2669 ctrl
, nvme_dev_attr_groups
,
2670 "nvme%d", ctrl
->instance
);
2671 if (IS_ERR(ctrl
->device
)) {
2672 ret
= PTR_ERR(ctrl
->device
);
2673 goto out_release_instance
;
2675 get_device(ctrl
->device
);
2676 ida_init(&ctrl
->ns_ida
);
2678 spin_lock(&dev_list_lock
);
2679 list_add_tail(&ctrl
->node
, &nvme_ctrl_list
);
2680 spin_unlock(&dev_list_lock
);
2683 * Initialize latency tolerance controls. The sysfs files won't
2684 * be visible to userspace unless the device actually supports APST.
2686 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
2687 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
2688 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
2691 out_release_instance
:
2692 nvme_release_instance(ctrl
);
2696 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
2699 * nvme_kill_queues(): Ends all namespace queues
2700 * @ctrl: the dead controller that needs to end
2702 * Call this function when the driver determines it is unable to get the
2703 * controller in a state capable of servicing IO.
2705 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
2709 mutex_lock(&ctrl
->namespaces_mutex
);
2711 /* Forcibly unquiesce queues to avoid blocking dispatch */
2712 blk_mq_unquiesce_queue(ctrl
->admin_q
);
2714 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2716 * Revalidating a dead namespace sets capacity to 0. This will
2717 * end buffered writers dirtying pages that can't be synced.
2719 if (!ns
->disk
|| test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
2721 revalidate_disk(ns
->disk
);
2722 blk_set_queue_dying(ns
->queue
);
2724 /* Forcibly unquiesce queues to avoid blocking dispatch */
2725 blk_mq_unquiesce_queue(ns
->queue
);
2727 mutex_unlock(&ctrl
->namespaces_mutex
);
2729 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
2731 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
2735 mutex_lock(&ctrl
->namespaces_mutex
);
2736 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2737 blk_mq_unfreeze_queue(ns
->queue
);
2738 mutex_unlock(&ctrl
->namespaces_mutex
);
2740 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
2742 void nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
2746 mutex_lock(&ctrl
->namespaces_mutex
);
2747 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2748 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
2752 mutex_unlock(&ctrl
->namespaces_mutex
);
2754 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
2756 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
2760 mutex_lock(&ctrl
->namespaces_mutex
);
2761 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2762 blk_mq_freeze_queue_wait(ns
->queue
);
2763 mutex_unlock(&ctrl
->namespaces_mutex
);
2765 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
2767 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
2771 mutex_lock(&ctrl
->namespaces_mutex
);
2772 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2773 blk_freeze_queue_start(ns
->queue
);
2774 mutex_unlock(&ctrl
->namespaces_mutex
);
2776 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
2778 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
2782 mutex_lock(&ctrl
->namespaces_mutex
);
2783 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2784 blk_mq_quiesce_queue(ns
->queue
);
2785 mutex_unlock(&ctrl
->namespaces_mutex
);
2787 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
2789 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
2793 mutex_lock(&ctrl
->namespaces_mutex
);
2794 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2795 blk_mq_unquiesce_queue(ns
->queue
);
2796 mutex_unlock(&ctrl
->namespaces_mutex
);
2798 EXPORT_SYMBOL_GPL(nvme_start_queues
);
2800 int __init
nvme_core_init(void)
2804 nvme_wq
= alloc_workqueue("nvme-wq",
2805 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
2809 result
= __register_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme",
2813 else if (result
> 0)
2814 nvme_char_major
= result
;
2816 nvme_class
= class_create(THIS_MODULE
, "nvme");
2817 if (IS_ERR(nvme_class
)) {
2818 result
= PTR_ERR(nvme_class
);
2819 goto unregister_chrdev
;
2825 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
2827 destroy_workqueue(nvme_wq
);
2831 void nvme_core_exit(void)
2833 class_destroy(nvme_class
);
2834 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
2835 destroy_workqueue(nvme_wq
);
2838 MODULE_LICENSE("GPL");
2839 MODULE_VERSION("1.0");
2840 module_init(nvme_core_init
);
2841 module_exit(nvme_core_exit
);