1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017-2018 Christoph Hellwig.
6 #include <linux/moduleparam.h>
7 #include <trace/events/block.h>
10 static bool multipath
= true;
11 module_param(multipath
, bool, 0444);
12 MODULE_PARM_DESC(multipath
,
13 "turn on native support for multiple controllers per subsystem");
15 inline bool nvme_ctrl_use_ana(struct nvme_ctrl
*ctrl
)
17 return multipath
&& ctrl
->subsys
&& (ctrl
->subsys
->cmic
& (1 << 3));
21 * If multipathing is enabled we need to always use the subsystem instance
22 * number for numbering our devices to avoid conflicts between subsystems that
23 * have multiple controllers and thus use the multipath-aware subsystem node
24 * and those that have a single controller and use the controller node
27 void nvme_set_disk_name(char *disk_name
, struct nvme_ns
*ns
,
28 struct nvme_ctrl
*ctrl
, int *flags
)
31 sprintf(disk_name
, "nvme%dn%d", ctrl
->instance
, ns
->head
->instance
);
32 } else if (ns
->head
->disk
) {
33 sprintf(disk_name
, "nvme%dc%dn%d", ctrl
->subsys
->instance
,
34 ctrl
->instance
, ns
->head
->instance
);
35 *flags
= GENHD_FL_HIDDEN
;
37 sprintf(disk_name
, "nvme%dn%d", ctrl
->subsys
->instance
,
42 void nvme_failover_req(struct request
*req
)
44 struct nvme_ns
*ns
= req
->q
->queuedata
;
45 u16 status
= nvme_req(req
)->status
;
48 spin_lock_irqsave(&ns
->head
->requeue_lock
, flags
);
49 blk_steal_bios(&ns
->head
->requeue_list
, req
);
50 spin_unlock_irqrestore(&ns
->head
->requeue_lock
, flags
);
51 blk_mq_end_request(req
, 0);
53 switch (status
& 0x7ff) {
54 case NVME_SC_ANA_TRANSITION
:
55 case NVME_SC_ANA_INACCESSIBLE
:
56 case NVME_SC_ANA_PERSISTENT_LOSS
:
58 * If we got back an ANA error we know the controller is alive,
59 * but not ready to serve this namespaces. The spec suggests
60 * we should update our general state here, but due to the fact
61 * that the admin and I/O queues are not serialized that is
62 * fundamentally racy. So instead just clear the current path,
63 * mark the the path as pending and kick of a re-read of the ANA
66 nvme_mpath_clear_current_path(ns
);
67 if (ns
->ctrl
->ana_log_buf
) {
68 set_bit(NVME_NS_ANA_PENDING
, &ns
->flags
);
69 queue_work(nvme_wq
, &ns
->ctrl
->ana_work
);
72 case NVME_SC_HOST_PATH_ERROR
:
74 * Temporary transport disruption in talking to the controller.
75 * Try to send on a new path.
77 nvme_mpath_clear_current_path(ns
);
81 * Reset the controller for any non-ANA error as we don't know
82 * what caused the error.
84 nvme_reset_ctrl(ns
->ctrl
);
88 kblockd_schedule_work(&ns
->head
->requeue_work
);
91 void nvme_kick_requeue_lists(struct nvme_ctrl
*ctrl
)
95 down_read(&ctrl
->namespaces_rwsem
);
96 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
98 kblockd_schedule_work(&ns
->head
->requeue_work
);
100 up_read(&ctrl
->namespaces_rwsem
);
103 static const char *nvme_ana_state_names
[] = {
104 [0] = "invalid state",
105 [NVME_ANA_OPTIMIZED
] = "optimized",
106 [NVME_ANA_NONOPTIMIZED
] = "non-optimized",
107 [NVME_ANA_INACCESSIBLE
] = "inaccessible",
108 [NVME_ANA_PERSISTENT_LOSS
] = "persistent-loss",
109 [NVME_ANA_CHANGE
] = "change",
112 void nvme_mpath_clear_current_path(struct nvme_ns
*ns
)
114 struct nvme_ns_head
*head
= ns
->head
;
120 for_each_node(node
) {
121 if (ns
== rcu_access_pointer(head
->current_path
[node
]))
122 rcu_assign_pointer(head
->current_path
[node
], NULL
);
126 static bool nvme_path_is_disabled(struct nvme_ns
*ns
)
128 return ns
->ctrl
->state
!= NVME_CTRL_LIVE
||
129 test_bit(NVME_NS_ANA_PENDING
, &ns
->flags
) ||
130 test_bit(NVME_NS_REMOVING
, &ns
->flags
);
133 static struct nvme_ns
*__nvme_find_path(struct nvme_ns_head
*head
, int node
)
135 int found_distance
= INT_MAX
, fallback_distance
= INT_MAX
, distance
;
136 struct nvme_ns
*found
= NULL
, *fallback
= NULL
, *ns
;
138 list_for_each_entry_rcu(ns
, &head
->list
, siblings
) {
139 if (nvme_path_is_disabled(ns
))
142 if (READ_ONCE(head
->subsys
->iopolicy
) == NVME_IOPOLICY_NUMA
)
143 distance
= node_distance(node
, ns
->ctrl
->numa_node
);
145 distance
= LOCAL_DISTANCE
;
147 switch (ns
->ana_state
) {
148 case NVME_ANA_OPTIMIZED
:
149 if (distance
< found_distance
) {
150 found_distance
= distance
;
154 case NVME_ANA_NONOPTIMIZED
:
155 if (distance
< fallback_distance
) {
156 fallback_distance
= distance
;
168 rcu_assign_pointer(head
->current_path
[node
], found
);
172 static struct nvme_ns
*nvme_next_ns(struct nvme_ns_head
*head
,
175 ns
= list_next_or_null_rcu(&head
->list
, &ns
->siblings
, struct nvme_ns
,
179 return list_first_or_null_rcu(&head
->list
, struct nvme_ns
, siblings
);
182 static struct nvme_ns
*nvme_round_robin_path(struct nvme_ns_head
*head
,
183 int node
, struct nvme_ns
*old
)
185 struct nvme_ns
*ns
, *found
, *fallback
= NULL
;
187 if (list_is_singular(&head
->list
)) {
188 if (nvme_path_is_disabled(old
))
193 for (ns
= nvme_next_ns(head
, old
);
195 ns
= nvme_next_ns(head
, ns
)) {
196 if (nvme_path_is_disabled(ns
))
199 if (ns
->ana_state
== NVME_ANA_OPTIMIZED
) {
203 if (ns
->ana_state
== NVME_ANA_NONOPTIMIZED
)
211 rcu_assign_pointer(head
->current_path
[node
], found
);
215 static inline bool nvme_path_is_optimized(struct nvme_ns
*ns
)
217 return ns
->ctrl
->state
== NVME_CTRL_LIVE
&&
218 ns
->ana_state
== NVME_ANA_OPTIMIZED
;
221 inline struct nvme_ns
*nvme_find_path(struct nvme_ns_head
*head
)
223 int node
= numa_node_id();
226 ns
= srcu_dereference(head
->current_path
[node
], &head
->srcu
);
227 if (READ_ONCE(head
->subsys
->iopolicy
) == NVME_IOPOLICY_RR
&& ns
)
228 ns
= nvme_round_robin_path(head
, node
, ns
);
229 if (unlikely(!ns
|| !nvme_path_is_optimized(ns
)))
230 ns
= __nvme_find_path(head
, node
);
234 static blk_qc_t
nvme_ns_head_make_request(struct request_queue
*q
,
237 struct nvme_ns_head
*head
= q
->queuedata
;
238 struct device
*dev
= disk_to_dev(head
->disk
);
240 blk_qc_t ret
= BLK_QC_T_NONE
;
244 * The namespace might be going away and the bio might
245 * be moved to a different queue via blk_steal_bios(),
246 * so we need to use the bio_split pool from the original
247 * queue to allocate the bvecs from.
249 blk_queue_split(q
, &bio
);
251 srcu_idx
= srcu_read_lock(&head
->srcu
);
252 ns
= nvme_find_path(head
);
254 bio
->bi_disk
= ns
->disk
;
255 bio
->bi_opf
|= REQ_NVME_MPATH
;
256 trace_block_bio_remap(bio
->bi_disk
->queue
, bio
,
257 disk_devt(ns
->head
->disk
),
258 bio
->bi_iter
.bi_sector
);
259 ret
= direct_make_request(bio
);
260 } else if (!list_empty_careful(&head
->list
)) {
261 dev_warn_ratelimited(dev
, "no path available - requeuing I/O\n");
263 spin_lock_irq(&head
->requeue_lock
);
264 bio_list_add(&head
->requeue_list
, bio
);
265 spin_unlock_irq(&head
->requeue_lock
);
267 dev_warn_ratelimited(dev
, "no path - failing I/O\n");
269 bio
->bi_status
= BLK_STS_IOERR
;
273 srcu_read_unlock(&head
->srcu
, srcu_idx
);
277 static void nvme_requeue_work(struct work_struct
*work
)
279 struct nvme_ns_head
*head
=
280 container_of(work
, struct nvme_ns_head
, requeue_work
);
281 struct bio
*bio
, *next
;
283 spin_lock_irq(&head
->requeue_lock
);
284 next
= bio_list_get(&head
->requeue_list
);
285 spin_unlock_irq(&head
->requeue_lock
);
287 while ((bio
= next
) != NULL
) {
292 * Reset disk to the mpath node and resubmit to select a new
295 bio
->bi_disk
= head
->disk
;
296 generic_make_request(bio
);
300 int nvme_mpath_alloc_disk(struct nvme_ctrl
*ctrl
, struct nvme_ns_head
*head
)
302 struct request_queue
*q
;
305 mutex_init(&head
->lock
);
306 bio_list_init(&head
->requeue_list
);
307 spin_lock_init(&head
->requeue_lock
);
308 INIT_WORK(&head
->requeue_work
, nvme_requeue_work
);
311 * Add a multipath node if the subsystems supports multiple controllers.
312 * We also do this for private namespaces as the namespace sharing data could
313 * change after a rescan.
315 if (!(ctrl
->subsys
->cmic
& (1 << 1)) || !multipath
)
318 q
= blk_alloc_queue_node(GFP_KERNEL
, ctrl
->numa_node
);
322 blk_queue_make_request(q
, nvme_ns_head_make_request
);
323 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
324 /* set to a default value for 512 until disk is validated */
325 blk_queue_logical_block_size(q
, 512);
326 blk_set_stacking_limits(&q
->limits
);
328 /* we need to propagate up the VMC settings */
329 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
331 blk_queue_write_cache(q
, vwc
, vwc
);
333 head
->disk
= alloc_disk(0);
335 goto out_cleanup_queue
;
336 head
->disk
->fops
= &nvme_ns_head_ops
;
337 head
->disk
->private_data
= head
;
338 head
->disk
->queue
= q
;
339 head
->disk
->flags
= GENHD_FL_EXT_DEVT
;
340 sprintf(head
->disk
->disk_name
, "nvme%dn%d",
341 ctrl
->subsys
->instance
, head
->instance
);
345 blk_cleanup_queue(q
);
350 static void nvme_mpath_set_live(struct nvme_ns
*ns
)
352 struct nvme_ns_head
*head
= ns
->head
;
354 lockdep_assert_held(&ns
->head
->lock
);
359 if (!(head
->disk
->flags
& GENHD_FL_UP
))
360 device_add_disk(&head
->subsys
->dev
, head
->disk
,
361 nvme_ns_id_attr_groups
);
363 if (nvme_path_is_optimized(ns
)) {
366 srcu_idx
= srcu_read_lock(&head
->srcu
);
368 __nvme_find_path(head
, node
);
369 srcu_read_unlock(&head
->srcu
, srcu_idx
);
372 kblockd_schedule_work(&ns
->head
->requeue_work
);
375 static int nvme_parse_ana_log(struct nvme_ctrl
*ctrl
, void *data
,
376 int (*cb
)(struct nvme_ctrl
*ctrl
, struct nvme_ana_group_desc
*,
379 void *base
= ctrl
->ana_log_buf
;
380 size_t offset
= sizeof(struct nvme_ana_rsp_hdr
);
383 lockdep_assert_held(&ctrl
->ana_lock
);
385 for (i
= 0; i
< le16_to_cpu(ctrl
->ana_log_buf
->ngrps
); i
++) {
386 struct nvme_ana_group_desc
*desc
= base
+ offset
;
387 u32 nr_nsids
= le32_to_cpu(desc
->nnsids
);
388 size_t nsid_buf_size
= nr_nsids
* sizeof(__le32
);
390 if (WARN_ON_ONCE(desc
->grpid
== 0))
392 if (WARN_ON_ONCE(le32_to_cpu(desc
->grpid
) > ctrl
->anagrpmax
))
394 if (WARN_ON_ONCE(desc
->state
== 0))
396 if (WARN_ON_ONCE(desc
->state
> NVME_ANA_CHANGE
))
399 offset
+= sizeof(*desc
);
400 if (WARN_ON_ONCE(offset
> ctrl
->ana_log_size
- nsid_buf_size
))
403 error
= cb(ctrl
, desc
, data
);
407 offset
+= nsid_buf_size
;
408 if (WARN_ON_ONCE(offset
> ctrl
->ana_log_size
- sizeof(*desc
)))
415 static inline bool nvme_state_is_live(enum nvme_ana_state state
)
417 return state
== NVME_ANA_OPTIMIZED
|| state
== NVME_ANA_NONOPTIMIZED
;
420 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc
*desc
,
423 mutex_lock(&ns
->head
->lock
);
424 ns
->ana_grpid
= le32_to_cpu(desc
->grpid
);
425 ns
->ana_state
= desc
->state
;
426 clear_bit(NVME_NS_ANA_PENDING
, &ns
->flags
);
428 if (nvme_state_is_live(ns
->ana_state
))
429 nvme_mpath_set_live(ns
);
430 mutex_unlock(&ns
->head
->lock
);
433 static int nvme_update_ana_state(struct nvme_ctrl
*ctrl
,
434 struct nvme_ana_group_desc
*desc
, void *data
)
436 u32 nr_nsids
= le32_to_cpu(desc
->nnsids
), n
= 0;
437 unsigned *nr_change_groups
= data
;
440 dev_dbg(ctrl
->device
, "ANA group %d: %s.\n",
441 le32_to_cpu(desc
->grpid
),
442 nvme_ana_state_names
[desc
->state
]);
444 if (desc
->state
== NVME_ANA_CHANGE
)
445 (*nr_change_groups
)++;
450 down_write(&ctrl
->namespaces_rwsem
);
451 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
452 if (ns
->head
->ns_id
!= le32_to_cpu(desc
->nsids
[n
]))
454 nvme_update_ns_ana_state(desc
, ns
);
458 up_write(&ctrl
->namespaces_rwsem
);
459 WARN_ON_ONCE(n
< nr_nsids
);
463 static int nvme_read_ana_log(struct nvme_ctrl
*ctrl
, bool groups_only
)
465 u32 nr_change_groups
= 0;
468 mutex_lock(&ctrl
->ana_lock
);
469 error
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_ANA
,
470 groups_only
? NVME_ANA_LOG_RGO
: 0,
471 ctrl
->ana_log_buf
, ctrl
->ana_log_size
, 0);
473 dev_warn(ctrl
->device
, "Failed to get ANA log: %d\n", error
);
477 error
= nvme_parse_ana_log(ctrl
, &nr_change_groups
,
478 nvme_update_ana_state
);
483 * In theory we should have an ANATT timer per group as they might enter
484 * the change state at different times. But that is a lot of overhead
485 * just to protect against a target that keeps entering new changes
486 * states while never finishing previous ones. But we'll still
487 * eventually time out once all groups are in change state, so this
490 * We also double the ANATT value to provide some slack for transports
491 * or AEN processing overhead.
493 if (nr_change_groups
)
494 mod_timer(&ctrl
->anatt_timer
, ctrl
->anatt
* HZ
* 2 + jiffies
);
496 del_timer_sync(&ctrl
->anatt_timer
);
498 mutex_unlock(&ctrl
->ana_lock
);
502 static void nvme_ana_work(struct work_struct
*work
)
504 struct nvme_ctrl
*ctrl
= container_of(work
, struct nvme_ctrl
, ana_work
);
506 nvme_read_ana_log(ctrl
, false);
509 static void nvme_anatt_timeout(struct timer_list
*t
)
511 struct nvme_ctrl
*ctrl
= from_timer(ctrl
, t
, anatt_timer
);
513 dev_info(ctrl
->device
, "ANATT timeout, resetting controller.\n");
514 nvme_reset_ctrl(ctrl
);
517 void nvme_mpath_stop(struct nvme_ctrl
*ctrl
)
519 if (!nvme_ctrl_use_ana(ctrl
))
521 del_timer_sync(&ctrl
->anatt_timer
);
522 cancel_work_sync(&ctrl
->ana_work
);
525 #define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \
526 struct device_attribute subsys_attr_##_name = \
527 __ATTR(_name, _mode, _show, _store)
529 static const char *nvme_iopolicy_names
[] = {
530 [NVME_IOPOLICY_NUMA
] = "numa",
531 [NVME_IOPOLICY_RR
] = "round-robin",
534 static ssize_t
nvme_subsys_iopolicy_show(struct device
*dev
,
535 struct device_attribute
*attr
, char *buf
)
537 struct nvme_subsystem
*subsys
=
538 container_of(dev
, struct nvme_subsystem
, dev
);
540 return sprintf(buf
, "%s\n",
541 nvme_iopolicy_names
[READ_ONCE(subsys
->iopolicy
)]);
544 static ssize_t
nvme_subsys_iopolicy_store(struct device
*dev
,
545 struct device_attribute
*attr
, const char *buf
, size_t count
)
547 struct nvme_subsystem
*subsys
=
548 container_of(dev
, struct nvme_subsystem
, dev
);
551 for (i
= 0; i
< ARRAY_SIZE(nvme_iopolicy_names
); i
++) {
552 if (sysfs_streq(buf
, nvme_iopolicy_names
[i
])) {
553 WRITE_ONCE(subsys
->iopolicy
, i
);
560 SUBSYS_ATTR_RW(iopolicy
, S_IRUGO
| S_IWUSR
,
561 nvme_subsys_iopolicy_show
, nvme_subsys_iopolicy_store
);
563 static ssize_t
ana_grpid_show(struct device
*dev
, struct device_attribute
*attr
,
566 return sprintf(buf
, "%d\n", nvme_get_ns_from_dev(dev
)->ana_grpid
);
568 DEVICE_ATTR_RO(ana_grpid
);
570 static ssize_t
ana_state_show(struct device
*dev
, struct device_attribute
*attr
,
573 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
575 return sprintf(buf
, "%s\n", nvme_ana_state_names
[ns
->ana_state
]);
577 DEVICE_ATTR_RO(ana_state
);
579 static int nvme_set_ns_ana_state(struct nvme_ctrl
*ctrl
,
580 struct nvme_ana_group_desc
*desc
, void *data
)
582 struct nvme_ns
*ns
= data
;
584 if (ns
->ana_grpid
== le32_to_cpu(desc
->grpid
)) {
585 nvme_update_ns_ana_state(desc
, ns
);
586 return -ENXIO
; /* just break out of the loop */
592 void nvme_mpath_add_disk(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
594 if (nvme_ctrl_use_ana(ns
->ctrl
)) {
595 mutex_lock(&ns
->ctrl
->ana_lock
);
596 ns
->ana_grpid
= le32_to_cpu(id
->anagrpid
);
597 nvme_parse_ana_log(ns
->ctrl
, ns
, nvme_set_ns_ana_state
);
598 mutex_unlock(&ns
->ctrl
->ana_lock
);
600 mutex_lock(&ns
->head
->lock
);
601 ns
->ana_state
= NVME_ANA_OPTIMIZED
;
602 nvme_mpath_set_live(ns
);
603 mutex_unlock(&ns
->head
->lock
);
607 void nvme_mpath_remove_disk(struct nvme_ns_head
*head
)
611 if (head
->disk
->flags
& GENHD_FL_UP
)
612 del_gendisk(head
->disk
);
613 blk_set_queue_dying(head
->disk
->queue
);
614 /* make sure all pending bios are cleaned up */
615 kblockd_schedule_work(&head
->requeue_work
);
616 flush_work(&head
->requeue_work
);
617 blk_cleanup_queue(head
->disk
->queue
);
618 put_disk(head
->disk
);
621 int nvme_mpath_init(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
625 if (!nvme_ctrl_use_ana(ctrl
))
628 ctrl
->anacap
= id
->anacap
;
629 ctrl
->anatt
= id
->anatt
;
630 ctrl
->nanagrpid
= le32_to_cpu(id
->nanagrpid
);
631 ctrl
->anagrpmax
= le32_to_cpu(id
->anagrpmax
);
633 mutex_init(&ctrl
->ana_lock
);
634 timer_setup(&ctrl
->anatt_timer
, nvme_anatt_timeout
, 0);
635 ctrl
->ana_log_size
= sizeof(struct nvme_ana_rsp_hdr
) +
636 ctrl
->nanagrpid
* sizeof(struct nvme_ana_group_desc
);
637 ctrl
->ana_log_size
+= ctrl
->max_namespaces
* sizeof(__le32
);
639 if (ctrl
->ana_log_size
> ctrl
->max_hw_sectors
<< SECTOR_SHIFT
) {
640 dev_err(ctrl
->device
,
641 "ANA log page size (%zd) larger than MDTS (%d).\n",
643 ctrl
->max_hw_sectors
<< SECTOR_SHIFT
);
644 dev_err(ctrl
->device
, "disabling ANA support.\n");
648 INIT_WORK(&ctrl
->ana_work
, nvme_ana_work
);
649 ctrl
->ana_log_buf
= kmalloc(ctrl
->ana_log_size
, GFP_KERNEL
);
650 if (!ctrl
->ana_log_buf
) {
655 error
= nvme_read_ana_log(ctrl
, true);
657 goto out_free_ana_log_buf
;
659 out_free_ana_log_buf
:
660 kfree(ctrl
->ana_log_buf
);
661 ctrl
->ana_log_buf
= NULL
;
666 void nvme_mpath_uninit(struct nvme_ctrl
*ctrl
)
668 kfree(ctrl
->ana_log_buf
);
669 ctrl
->ana_log_buf
= NULL
;