2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
11 #include "dm-bio-record.h"
12 #include "dm-path-selector.h"
13 #include "dm-uevent.h"
15 #include <linux/blkdev.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/time.h>
23 #include <linux/timer.h>
24 #include <linux/workqueue.h>
25 #include <linux/delay.h>
26 #include <scsi/scsi_dh.h>
27 #include <linux/atomic.h>
28 #include <linux/blk-mq.h>
30 #define DM_MSG_PREFIX "multipath"
31 #define DM_PG_INIT_DELAY_MSECS 2000
32 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
33 #define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
35 static unsigned long queue_if_no_path_timeout_secs
= QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT
;
39 struct list_head list
;
41 struct priority_group
*pg
; /* Owning PG */
42 unsigned fail_count
; /* Cumulative failure count */
45 struct delayed_work activate_path
;
47 bool is_active
:1; /* Path status */
50 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
53 * Paths are grouped into Priority Groups and numbered from 1 upwards.
54 * Each has a path selector which controls which path gets used.
56 struct priority_group
{
57 struct list_head list
;
59 struct multipath
*m
; /* Owning multipath instance */
60 struct path_selector ps
;
62 unsigned pg_num
; /* Reference number */
63 unsigned nr_pgpaths
; /* Number of paths in PG */
64 struct list_head pgpaths
;
66 bool bypassed
:1; /* Temporarily bypass this PG? */
69 /* Multipath context */
71 unsigned long flags
; /* Multipath state flags */
74 enum dm_queue_mode queue_mode
;
76 struct pgpath
*current_pgpath
;
77 struct priority_group
*current_pg
;
78 struct priority_group
*next_pg
; /* Switch to this PG if set */
80 atomic_t nr_valid_paths
; /* Total number of usable paths */
81 unsigned nr_priority_groups
;
82 struct list_head priority_groups
;
84 const char *hw_handler_name
;
85 char *hw_handler_params
;
86 wait_queue_head_t pg_init_wait
; /* Wait for pg_init completion */
87 unsigned pg_init_retries
; /* Number of times to retry pg_init */
88 unsigned pg_init_delay_msecs
; /* Number of msecs before pg_init retry */
89 atomic_t pg_init_in_progress
; /* Only one pg_init allowed at once */
90 atomic_t pg_init_count
; /* Number of times pg_init called */
92 struct mutex work_mutex
;
93 struct work_struct trigger_event
;
96 struct work_struct process_queued_bios
;
97 struct bio_list queued_bios
;
99 struct timer_list nopath_timer
; /* Timeout for queue_if_no_path */
103 * Context information attached to each io we process.
106 struct pgpath
*pgpath
;
110 typedef int (*action_fn
) (struct pgpath
*pgpath
);
112 static struct workqueue_struct
*kmultipathd
, *kmpath_handlerd
;
113 static void trigger_event(struct work_struct
*work
);
114 static void activate_or_offline_path(struct pgpath
*pgpath
);
115 static void activate_path_work(struct work_struct
*work
);
116 static void process_queued_bios(struct work_struct
*work
);
117 static void queue_if_no_path_timeout_work(struct timer_list
*t
);
119 /*-----------------------------------------------
120 * Multipath state flags.
121 *-----------------------------------------------*/
123 #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
124 #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
125 #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
126 #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
127 #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
128 #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
129 #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
131 static bool mpath_double_check_test_bit(int MPATHF_bit
, struct multipath
*m
)
133 bool r
= test_bit(MPATHF_bit
, &m
->flags
);
137 spin_lock_irqsave(&m
->lock
, flags
);
138 r
= test_bit(MPATHF_bit
, &m
->flags
);
139 spin_unlock_irqrestore(&m
->lock
, flags
);
145 /*-----------------------------------------------
146 * Allocation routines
147 *-----------------------------------------------*/
149 static struct pgpath
*alloc_pgpath(void)
151 struct pgpath
*pgpath
= kzalloc(sizeof(*pgpath
), GFP_KERNEL
);
156 pgpath
->is_active
= true;
161 static void free_pgpath(struct pgpath
*pgpath
)
166 static struct priority_group
*alloc_priority_group(void)
168 struct priority_group
*pg
;
170 pg
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
173 INIT_LIST_HEAD(&pg
->pgpaths
);
178 static void free_pgpaths(struct list_head
*pgpaths
, struct dm_target
*ti
)
180 struct pgpath
*pgpath
, *tmp
;
182 list_for_each_entry_safe(pgpath
, tmp
, pgpaths
, list
) {
183 list_del(&pgpath
->list
);
184 dm_put_device(ti
, pgpath
->path
.dev
);
189 static void free_priority_group(struct priority_group
*pg
,
190 struct dm_target
*ti
)
192 struct path_selector
*ps
= &pg
->ps
;
195 ps
->type
->destroy(ps
);
196 dm_put_path_selector(ps
->type
);
199 free_pgpaths(&pg
->pgpaths
, ti
);
203 static struct multipath
*alloc_multipath(struct dm_target
*ti
)
207 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
209 INIT_LIST_HEAD(&m
->priority_groups
);
210 spin_lock_init(&m
->lock
);
211 atomic_set(&m
->nr_valid_paths
, 0);
212 INIT_WORK(&m
->trigger_event
, trigger_event
);
213 mutex_init(&m
->work_mutex
);
215 m
->queue_mode
= DM_TYPE_NONE
;
220 timer_setup(&m
->nopath_timer
, queue_if_no_path_timeout_work
, 0);
226 static int alloc_multipath_stage2(struct dm_target
*ti
, struct multipath
*m
)
228 if (m
->queue_mode
== DM_TYPE_NONE
) {
229 m
->queue_mode
= DM_TYPE_REQUEST_BASED
;
230 } else if (m
->queue_mode
== DM_TYPE_BIO_BASED
) {
231 INIT_WORK(&m
->process_queued_bios
, process_queued_bios
);
233 * bio-based doesn't support any direct scsi_dh management;
234 * it just discovers if a scsi_dh is attached.
236 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
);
239 dm_table_set_type(ti
->table
, m
->queue_mode
);
242 * Init fields that are only used when a scsi_dh is attached
243 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
245 set_bit(MPATHF_QUEUE_IO
, &m
->flags
);
246 atomic_set(&m
->pg_init_in_progress
, 0);
247 atomic_set(&m
->pg_init_count
, 0);
248 m
->pg_init_delay_msecs
= DM_PG_INIT_DELAY_DEFAULT
;
249 init_waitqueue_head(&m
->pg_init_wait
);
254 static void free_multipath(struct multipath
*m
)
256 struct priority_group
*pg
, *tmp
;
258 list_for_each_entry_safe(pg
, tmp
, &m
->priority_groups
, list
) {
260 free_priority_group(pg
, m
->ti
);
263 kfree(m
->hw_handler_name
);
264 kfree(m
->hw_handler_params
);
265 mutex_destroy(&m
->work_mutex
);
269 static struct dm_mpath_io
*get_mpio(union map_info
*info
)
274 static size_t multipath_per_bio_data_size(void)
276 return sizeof(struct dm_mpath_io
) + sizeof(struct dm_bio_details
);
279 static struct dm_mpath_io
*get_mpio_from_bio(struct bio
*bio
)
281 return dm_per_bio_data(bio
, multipath_per_bio_data_size());
284 static struct dm_bio_details
*get_bio_details_from_mpio(struct dm_mpath_io
*mpio
)
286 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
287 void *bio_details
= mpio
+ 1;
291 static void multipath_init_per_bio_data(struct bio
*bio
, struct dm_mpath_io
**mpio_p
)
293 struct dm_mpath_io
*mpio
= get_mpio_from_bio(bio
);
294 struct dm_bio_details
*bio_details
= get_bio_details_from_mpio(mpio
);
296 mpio
->nr_bytes
= bio
->bi_iter
.bi_size
;
300 dm_bio_record(bio_details
, bio
);
303 /*-----------------------------------------------
305 *-----------------------------------------------*/
307 static int __pg_init_all_paths(struct multipath
*m
)
309 struct pgpath
*pgpath
;
310 unsigned long pg_init_delay
= 0;
312 lockdep_assert_held(&m
->lock
);
314 if (atomic_read(&m
->pg_init_in_progress
) || test_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
))
317 atomic_inc(&m
->pg_init_count
);
318 clear_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
320 /* Check here to reset pg_init_required */
324 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
))
325 pg_init_delay
= msecs_to_jiffies(m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
?
326 m
->pg_init_delay_msecs
: DM_PG_INIT_DELAY_MSECS
);
327 list_for_each_entry(pgpath
, &m
->current_pg
->pgpaths
, list
) {
328 /* Skip failed paths */
329 if (!pgpath
->is_active
)
331 if (queue_delayed_work(kmpath_handlerd
, &pgpath
->activate_path
,
333 atomic_inc(&m
->pg_init_in_progress
);
335 return atomic_read(&m
->pg_init_in_progress
);
338 static int pg_init_all_paths(struct multipath
*m
)
343 spin_lock_irqsave(&m
->lock
, flags
);
344 ret
= __pg_init_all_paths(m
);
345 spin_unlock_irqrestore(&m
->lock
, flags
);
350 static void __switch_pg(struct multipath
*m
, struct priority_group
*pg
)
352 lockdep_assert_held(&m
->lock
);
356 /* Must we initialise the PG first, and queue I/O till it's ready? */
357 if (m
->hw_handler_name
) {
358 set_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
359 set_bit(MPATHF_QUEUE_IO
, &m
->flags
);
361 clear_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
362 clear_bit(MPATHF_QUEUE_IO
, &m
->flags
);
365 atomic_set(&m
->pg_init_count
, 0);
368 static struct pgpath
*choose_path_in_pg(struct multipath
*m
,
369 struct priority_group
*pg
,
373 struct dm_path
*path
;
374 struct pgpath
*pgpath
;
376 path
= pg
->ps
.type
->select_path(&pg
->ps
, nr_bytes
);
378 return ERR_PTR(-ENXIO
);
380 pgpath
= path_to_pgpath(path
);
382 if (unlikely(READ_ONCE(m
->current_pg
) != pg
)) {
383 /* Only update current_pgpath if pg changed */
384 spin_lock_irqsave(&m
->lock
, flags
);
385 m
->current_pgpath
= pgpath
;
387 spin_unlock_irqrestore(&m
->lock
, flags
);
393 static struct pgpath
*choose_pgpath(struct multipath
*m
, size_t nr_bytes
)
396 struct priority_group
*pg
;
397 struct pgpath
*pgpath
;
398 unsigned bypassed
= 1;
400 if (!atomic_read(&m
->nr_valid_paths
)) {
401 spin_lock_irqsave(&m
->lock
, flags
);
402 clear_bit(MPATHF_QUEUE_IO
, &m
->flags
);
403 spin_unlock_irqrestore(&m
->lock
, flags
);
407 /* Were we instructed to switch PG? */
408 if (READ_ONCE(m
->next_pg
)) {
409 spin_lock_irqsave(&m
->lock
, flags
);
412 spin_unlock_irqrestore(&m
->lock
, flags
);
413 goto check_current_pg
;
416 spin_unlock_irqrestore(&m
->lock
, flags
);
417 pgpath
= choose_path_in_pg(m
, pg
, nr_bytes
);
418 if (!IS_ERR_OR_NULL(pgpath
))
422 /* Don't change PG until it has no remaining paths */
424 pg
= READ_ONCE(m
->current_pg
);
426 pgpath
= choose_path_in_pg(m
, pg
, nr_bytes
);
427 if (!IS_ERR_OR_NULL(pgpath
))
432 * Loop through priority groups until we find a valid path.
433 * First time we skip PGs marked 'bypassed'.
434 * Second time we only try the ones we skipped, but set
435 * pg_init_delay_retry so we do not hammer controllers.
438 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
439 if (pg
->bypassed
== !!bypassed
)
441 pgpath
= choose_path_in_pg(m
, pg
, nr_bytes
);
442 if (!IS_ERR_OR_NULL(pgpath
)) {
444 spin_lock_irqsave(&m
->lock
, flags
);
445 set_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
);
446 spin_unlock_irqrestore(&m
->lock
, flags
);
451 } while (bypassed
--);
454 spin_lock_irqsave(&m
->lock
, flags
);
455 m
->current_pgpath
= NULL
;
456 m
->current_pg
= NULL
;
457 spin_unlock_irqrestore(&m
->lock
, flags
);
463 * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
464 * report the function name and line number of the function from which
465 * it has been invoked.
467 #define dm_report_EIO(m) \
469 DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
470 dm_table_device_name((m)->ti->table), \
471 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
472 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
473 dm_noflush_suspending((m)->ti)); \
477 * Check whether bios must be queued in the device-mapper core rather
478 * than here in the target.
480 static bool __must_push_back(struct multipath
*m
)
482 return dm_noflush_suspending(m
->ti
);
485 static bool must_push_back_rq(struct multipath
*m
)
490 spin_lock_irqsave(&m
->lock
, flags
);
491 ret
= (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
) || __must_push_back(m
));
492 spin_unlock_irqrestore(&m
->lock
, flags
);
498 * Map cloned requests (request-based multipath)
500 static int multipath_clone_and_map(struct dm_target
*ti
, struct request
*rq
,
501 union map_info
*map_context
,
502 struct request
**__clone
)
504 struct multipath
*m
= ti
->private;
505 size_t nr_bytes
= blk_rq_bytes(rq
);
506 struct pgpath
*pgpath
;
507 struct block_device
*bdev
;
508 struct dm_mpath_io
*mpio
= get_mpio(map_context
);
509 struct request_queue
*q
;
510 struct request
*clone
;
512 /* Do we need to select a new pgpath? */
513 pgpath
= READ_ONCE(m
->current_pgpath
);
514 if (!pgpath
|| !mpath_double_check_test_bit(MPATHF_QUEUE_IO
, m
))
515 pgpath
= choose_pgpath(m
, nr_bytes
);
518 if (must_push_back_rq(m
))
519 return DM_MAPIO_DELAY_REQUEUE
;
520 dm_report_EIO(m
); /* Failed */
521 return DM_MAPIO_KILL
;
522 } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO
, m
) ||
523 mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED
, m
)) {
524 pg_init_all_paths(m
);
525 return DM_MAPIO_DELAY_REQUEUE
;
528 mpio
->pgpath
= pgpath
;
529 mpio
->nr_bytes
= nr_bytes
;
531 bdev
= pgpath
->path
.dev
->bdev
;
532 q
= bdev_get_queue(bdev
);
533 clone
= blk_get_request(q
, rq
->cmd_flags
| REQ_NOMERGE
,
536 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
537 if (blk_queue_dying(q
)) {
538 atomic_inc(&m
->pg_init_in_progress
);
539 activate_or_offline_path(pgpath
);
540 return DM_MAPIO_DELAY_REQUEUE
;
544 * blk-mq's SCHED_RESTART can cover this requeue, so we
545 * needn't deal with it by DELAY_REQUEUE. More importantly,
546 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
547 * get the queue busy feedback (via BLK_STS_RESOURCE),
548 * otherwise I/O merging can suffer.
550 return DM_MAPIO_REQUEUE
;
552 clone
->bio
= clone
->biotail
= NULL
;
553 clone
->rq_disk
= bdev
->bd_disk
;
554 clone
->cmd_flags
|= REQ_FAILFAST_TRANSPORT
;
557 if (pgpath
->pg
->ps
.type
->start_io
)
558 pgpath
->pg
->ps
.type
->start_io(&pgpath
->pg
->ps
,
561 return DM_MAPIO_REMAPPED
;
564 static void multipath_release_clone(struct request
*clone
,
565 union map_info
*map_context
)
567 if (unlikely(map_context
)) {
569 * non-NULL map_context means caller is still map
570 * method; must undo multipath_clone_and_map()
572 struct dm_mpath_io
*mpio
= get_mpio(map_context
);
573 struct pgpath
*pgpath
= mpio
->pgpath
;
575 if (pgpath
&& pgpath
->pg
->ps
.type
->end_io
)
576 pgpath
->pg
->ps
.type
->end_io(&pgpath
->pg
->ps
,
579 clone
->io_start_time_ns
);
582 blk_put_request(clone
);
586 * Map cloned bios (bio-based multipath)
589 static void __multipath_queue_bio(struct multipath
*m
, struct bio
*bio
)
591 /* Queue for the daemon to resubmit */
592 bio_list_add(&m
->queued_bios
, bio
);
593 if (!test_bit(MPATHF_QUEUE_IO
, &m
->flags
))
594 queue_work(kmultipathd
, &m
->process_queued_bios
);
597 static void multipath_queue_bio(struct multipath
*m
, struct bio
*bio
)
601 spin_lock_irqsave(&m
->lock
, flags
);
602 __multipath_queue_bio(m
, bio
);
603 spin_unlock_irqrestore(&m
->lock
, flags
);
606 static struct pgpath
*__map_bio(struct multipath
*m
, struct bio
*bio
)
608 struct pgpath
*pgpath
;
611 /* Do we need to select a new pgpath? */
612 pgpath
= READ_ONCE(m
->current_pgpath
);
613 if (!pgpath
|| !mpath_double_check_test_bit(MPATHF_QUEUE_IO
, m
))
614 pgpath
= choose_pgpath(m
, bio
->bi_iter
.bi_size
);
617 spin_lock_irqsave(&m
->lock
, flags
);
618 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
)) {
619 __multipath_queue_bio(m
, bio
);
620 pgpath
= ERR_PTR(-EAGAIN
);
622 spin_unlock_irqrestore(&m
->lock
, flags
);
624 } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO
, m
) ||
625 mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED
, m
)) {
626 multipath_queue_bio(m
, bio
);
627 pg_init_all_paths(m
);
628 return ERR_PTR(-EAGAIN
);
634 static int __multipath_map_bio(struct multipath
*m
, struct bio
*bio
,
635 struct dm_mpath_io
*mpio
)
637 struct pgpath
*pgpath
= __map_bio(m
, bio
);
640 return DM_MAPIO_SUBMITTED
;
643 if (__must_push_back(m
))
644 return DM_MAPIO_REQUEUE
;
646 return DM_MAPIO_KILL
;
649 mpio
->pgpath
= pgpath
;
652 bio_set_dev(bio
, pgpath
->path
.dev
->bdev
);
653 bio
->bi_opf
|= REQ_FAILFAST_TRANSPORT
;
655 if (pgpath
->pg
->ps
.type
->start_io
)
656 pgpath
->pg
->ps
.type
->start_io(&pgpath
->pg
->ps
,
659 return DM_MAPIO_REMAPPED
;
662 static int multipath_map_bio(struct dm_target
*ti
, struct bio
*bio
)
664 struct multipath
*m
= ti
->private;
665 struct dm_mpath_io
*mpio
= NULL
;
667 multipath_init_per_bio_data(bio
, &mpio
);
668 return __multipath_map_bio(m
, bio
, mpio
);
671 static void process_queued_io_list(struct multipath
*m
)
673 if (m
->queue_mode
== DM_TYPE_REQUEST_BASED
)
674 dm_mq_kick_requeue_list(dm_table_get_md(m
->ti
->table
));
675 else if (m
->queue_mode
== DM_TYPE_BIO_BASED
)
676 queue_work(kmultipathd
, &m
->process_queued_bios
);
679 static void process_queued_bios(struct work_struct
*work
)
684 struct bio_list bios
;
685 struct blk_plug plug
;
686 struct multipath
*m
=
687 container_of(work
, struct multipath
, process_queued_bios
);
689 bio_list_init(&bios
);
691 spin_lock_irqsave(&m
->lock
, flags
);
693 if (bio_list_empty(&m
->queued_bios
)) {
694 spin_unlock_irqrestore(&m
->lock
, flags
);
698 bio_list_merge(&bios
, &m
->queued_bios
);
699 bio_list_init(&m
->queued_bios
);
701 spin_unlock_irqrestore(&m
->lock
, flags
);
703 blk_start_plug(&plug
);
704 while ((bio
= bio_list_pop(&bios
))) {
705 struct dm_mpath_io
*mpio
= get_mpio_from_bio(bio
);
706 dm_bio_restore(get_bio_details_from_mpio(mpio
), bio
);
707 r
= __multipath_map_bio(m
, bio
, mpio
);
710 bio
->bi_status
= BLK_STS_IOERR
;
713 case DM_MAPIO_REQUEUE
:
714 bio
->bi_status
= BLK_STS_DM_REQUEUE
;
717 case DM_MAPIO_REMAPPED
:
718 submit_bio_noacct(bio
);
720 case DM_MAPIO_SUBMITTED
:
723 WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r
);
726 blk_finish_plug(&plug
);
730 * If we run out of usable paths, should we queue I/O or error it?
732 static int queue_if_no_path(struct multipath
*m
, bool queue_if_no_path
,
733 bool save_old_value
, const char *caller
)
736 bool queue_if_no_path_bit
, saved_queue_if_no_path_bit
;
737 const char *dm_dev_name
= dm_table_device_name(m
->ti
->table
);
739 DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
740 dm_dev_name
, __func__
, caller
, queue_if_no_path
, save_old_value
);
742 spin_lock_irqsave(&m
->lock
, flags
);
744 queue_if_no_path_bit
= test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
);
745 saved_queue_if_no_path_bit
= test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
);
747 if (save_old_value
) {
748 if (unlikely(!queue_if_no_path_bit
&& saved_queue_if_no_path_bit
)) {
749 DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
752 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
, queue_if_no_path_bit
);
753 } else if (!queue_if_no_path
&& saved_queue_if_no_path_bit
) {
754 /* due to "fail_if_no_path" message, need to honor it. */
755 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
);
757 assign_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
, queue_if_no_path
);
759 DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
760 dm_dev_name
, __func__
,
761 test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
),
762 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
),
763 dm_noflush_suspending(m
->ti
));
765 spin_unlock_irqrestore(&m
->lock
, flags
);
767 if (!queue_if_no_path
) {
768 dm_table_run_md_queue_async(m
->ti
->table
);
769 process_queued_io_list(m
);
776 * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
777 * process any queued I/O.
779 static void queue_if_no_path_timeout_work(struct timer_list
*t
)
781 struct multipath
*m
= from_timer(m
, t
, nopath_timer
);
783 DMWARN("queue_if_no_path timeout on %s, failing queued IO",
784 dm_table_device_name(m
->ti
->table
));
785 queue_if_no_path(m
, false, false, __func__
);
789 * Enable the queue_if_no_path timeout if necessary.
790 * Called with m->lock held.
792 static void enable_nopath_timeout(struct multipath
*m
)
794 unsigned long queue_if_no_path_timeout
=
795 READ_ONCE(queue_if_no_path_timeout_secs
) * HZ
;
797 lockdep_assert_held(&m
->lock
);
799 if (queue_if_no_path_timeout
> 0 &&
800 atomic_read(&m
->nr_valid_paths
) == 0 &&
801 test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
)) {
802 mod_timer(&m
->nopath_timer
,
803 jiffies
+ queue_if_no_path_timeout
);
807 static void disable_nopath_timeout(struct multipath
*m
)
809 del_timer_sync(&m
->nopath_timer
);
813 * An event is triggered whenever a path is taken out of use.
814 * Includes path failure and PG bypass.
816 static void trigger_event(struct work_struct
*work
)
818 struct multipath
*m
=
819 container_of(work
, struct multipath
, trigger_event
);
821 dm_table_event(m
->ti
->table
);
824 /*-----------------------------------------------------------------
825 * Constructor/argument parsing:
826 * <#multipath feature args> [<arg>]*
827 * <#hw_handler args> [hw_handler [<arg>]*]
829 * <initial priority group>
830 * [<selector> <#selector args> [<arg>]*
831 * <#paths> <#per-path selector args>
832 * [<path> [<arg>]* ]+ ]+
833 *---------------------------------------------------------------*/
834 static int parse_path_selector(struct dm_arg_set
*as
, struct priority_group
*pg
,
835 struct dm_target
*ti
)
838 struct path_selector_type
*pst
;
841 static const struct dm_arg _args
[] = {
842 {0, 1024, "invalid number of path selector args"},
845 pst
= dm_get_path_selector(dm_shift_arg(as
));
847 ti
->error
= "unknown path selector type";
851 r
= dm_read_arg_group(_args
, as
, &ps_argc
, &ti
->error
);
853 dm_put_path_selector(pst
);
857 r
= pst
->create(&pg
->ps
, ps_argc
, as
->argv
);
859 dm_put_path_selector(pst
);
860 ti
->error
= "path selector constructor failed";
865 dm_consume_args(as
, ps_argc
);
870 static int setup_scsi_dh(struct block_device
*bdev
, struct multipath
*m
,
871 const char **attached_handler_name
, char **error
)
873 struct request_queue
*q
= bdev_get_queue(bdev
);
876 if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, m
)) {
878 if (*attached_handler_name
) {
880 * Clear any hw_handler_params associated with a
881 * handler that isn't already attached.
883 if (m
->hw_handler_name
&& strcmp(*attached_handler_name
, m
->hw_handler_name
)) {
884 kfree(m
->hw_handler_params
);
885 m
->hw_handler_params
= NULL
;
889 * Reset hw_handler_name to match the attached handler
891 * NB. This modifies the table line to show the actual
892 * handler instead of the original table passed in.
894 kfree(m
->hw_handler_name
);
895 m
->hw_handler_name
= *attached_handler_name
;
896 *attached_handler_name
= NULL
;
900 if (m
->hw_handler_name
) {
901 r
= scsi_dh_attach(q
, m
->hw_handler_name
);
903 char b
[BDEVNAME_SIZE
];
905 printk(KERN_INFO
"dm-mpath: retaining handler on device %s\n",
910 *error
= "error attaching hardware handler";
914 if (m
->hw_handler_params
) {
915 r
= scsi_dh_set_params(q
, m
->hw_handler_params
);
917 *error
= "unable to set hardware handler parameters";
926 static struct pgpath
*parse_path(struct dm_arg_set
*as
, struct path_selector
*ps
,
927 struct dm_target
*ti
)
931 struct multipath
*m
= ti
->private;
932 struct request_queue
*q
;
933 const char *attached_handler_name
= NULL
;
935 /* we need at least a path arg */
937 ti
->error
= "no device given";
938 return ERR_PTR(-EINVAL
);
943 return ERR_PTR(-ENOMEM
);
945 r
= dm_get_device(ti
, dm_shift_arg(as
), dm_table_get_mode(ti
->table
),
948 ti
->error
= "error getting device";
952 q
= bdev_get_queue(p
->path
.dev
->bdev
);
953 attached_handler_name
= scsi_dh_attached_handler_name(q
, GFP_KERNEL
);
954 if (attached_handler_name
|| m
->hw_handler_name
) {
955 INIT_DELAYED_WORK(&p
->activate_path
, activate_path_work
);
956 r
= setup_scsi_dh(p
->path
.dev
->bdev
, m
, &attached_handler_name
, &ti
->error
);
957 kfree(attached_handler_name
);
959 dm_put_device(ti
, p
->path
.dev
);
964 r
= ps
->type
->add_path(ps
, &p
->path
, as
->argc
, as
->argv
, &ti
->error
);
966 dm_put_device(ti
, p
->path
.dev
);
976 static struct priority_group
*parse_priority_group(struct dm_arg_set
*as
,
979 static const struct dm_arg _args
[] = {
980 {1, 1024, "invalid number of paths"},
981 {0, 1024, "invalid number of selector args"}
985 unsigned i
, nr_selector_args
, nr_args
;
986 struct priority_group
*pg
;
987 struct dm_target
*ti
= m
->ti
;
991 ti
->error
= "not enough priority group arguments";
992 return ERR_PTR(-EINVAL
);
995 pg
= alloc_priority_group();
997 ti
->error
= "couldn't allocate priority group";
998 return ERR_PTR(-ENOMEM
);
1002 r
= parse_path_selector(as
, pg
, ti
);
1009 r
= dm_read_arg(_args
, as
, &pg
->nr_pgpaths
, &ti
->error
);
1013 r
= dm_read_arg(_args
+ 1, as
, &nr_selector_args
, &ti
->error
);
1017 nr_args
= 1 + nr_selector_args
;
1018 for (i
= 0; i
< pg
->nr_pgpaths
; i
++) {
1019 struct pgpath
*pgpath
;
1020 struct dm_arg_set path_args
;
1022 if (as
->argc
< nr_args
) {
1023 ti
->error
= "not enough path parameters";
1028 path_args
.argc
= nr_args
;
1029 path_args
.argv
= as
->argv
;
1031 pgpath
= parse_path(&path_args
, &pg
->ps
, ti
);
1032 if (IS_ERR(pgpath
)) {
1033 r
= PTR_ERR(pgpath
);
1038 list_add_tail(&pgpath
->list
, &pg
->pgpaths
);
1039 dm_consume_args(as
, nr_args
);
1045 free_priority_group(pg
, ti
);
1049 static int parse_hw_handler(struct dm_arg_set
*as
, struct multipath
*m
)
1053 struct dm_target
*ti
= m
->ti
;
1055 static const struct dm_arg _args
[] = {
1056 {0, 1024, "invalid number of hardware handler args"},
1059 if (dm_read_arg_group(_args
, as
, &hw_argc
, &ti
->error
))
1065 if (m
->queue_mode
== DM_TYPE_BIO_BASED
) {
1066 dm_consume_args(as
, hw_argc
);
1067 DMERR("bio-based multipath doesn't allow hardware handler args");
1071 m
->hw_handler_name
= kstrdup(dm_shift_arg(as
), GFP_KERNEL
);
1072 if (!m
->hw_handler_name
)
1079 for (i
= 0; i
<= hw_argc
- 2; i
++)
1080 len
+= strlen(as
->argv
[i
]) + 1;
1081 p
= m
->hw_handler_params
= kzalloc(len
, GFP_KERNEL
);
1083 ti
->error
= "memory allocation failed";
1087 j
= sprintf(p
, "%d", hw_argc
- 1);
1088 for (i
= 0, p
+=j
+1; i
<= hw_argc
- 2; i
++, p
+=j
+1)
1089 j
= sprintf(p
, "%s", as
->argv
[i
]);
1091 dm_consume_args(as
, hw_argc
- 1);
1095 kfree(m
->hw_handler_name
);
1096 m
->hw_handler_name
= NULL
;
1100 static int parse_features(struct dm_arg_set
*as
, struct multipath
*m
)
1104 struct dm_target
*ti
= m
->ti
;
1105 const char *arg_name
;
1107 static const struct dm_arg _args
[] = {
1108 {0, 8, "invalid number of feature args"},
1109 {1, 50, "pg_init_retries must be between 1 and 50"},
1110 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1113 r
= dm_read_arg_group(_args
, as
, &argc
, &ti
->error
);
1121 arg_name
= dm_shift_arg(as
);
1124 if (!strcasecmp(arg_name
, "queue_if_no_path")) {
1125 r
= queue_if_no_path(m
, true, false, __func__
);
1129 if (!strcasecmp(arg_name
, "retain_attached_hw_handler")) {
1130 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
);
1134 if (!strcasecmp(arg_name
, "pg_init_retries") &&
1136 r
= dm_read_arg(_args
+ 1, as
, &m
->pg_init_retries
, &ti
->error
);
1141 if (!strcasecmp(arg_name
, "pg_init_delay_msecs") &&
1143 r
= dm_read_arg(_args
+ 2, as
, &m
->pg_init_delay_msecs
, &ti
->error
);
1148 if (!strcasecmp(arg_name
, "queue_mode") &&
1150 const char *queue_mode_name
= dm_shift_arg(as
);
1152 if (!strcasecmp(queue_mode_name
, "bio"))
1153 m
->queue_mode
= DM_TYPE_BIO_BASED
;
1154 else if (!strcasecmp(queue_mode_name
, "rq") ||
1155 !strcasecmp(queue_mode_name
, "mq"))
1156 m
->queue_mode
= DM_TYPE_REQUEST_BASED
;
1158 ti
->error
= "Unknown 'queue_mode' requested";
1165 ti
->error
= "Unrecognised multipath feature request";
1167 } while (argc
&& !r
);
1172 static int multipath_ctr(struct dm_target
*ti
, unsigned argc
, char **argv
)
1174 /* target arguments */
1175 static const struct dm_arg _args
[] = {
1176 {0, 1024, "invalid number of priority groups"},
1177 {0, 1024, "invalid initial priority group number"},
1181 struct multipath
*m
;
1182 struct dm_arg_set as
;
1183 unsigned pg_count
= 0;
1184 unsigned next_pg_num
;
1185 unsigned long flags
;
1190 m
= alloc_multipath(ti
);
1192 ti
->error
= "can't allocate multipath";
1196 r
= parse_features(&as
, m
);
1200 r
= alloc_multipath_stage2(ti
, m
);
1204 r
= parse_hw_handler(&as
, m
);
1208 r
= dm_read_arg(_args
, &as
, &m
->nr_priority_groups
, &ti
->error
);
1212 r
= dm_read_arg(_args
+ 1, &as
, &next_pg_num
, &ti
->error
);
1216 if ((!m
->nr_priority_groups
&& next_pg_num
) ||
1217 (m
->nr_priority_groups
&& !next_pg_num
)) {
1218 ti
->error
= "invalid initial priority group";
1223 /* parse the priority groups */
1225 struct priority_group
*pg
;
1226 unsigned nr_valid_paths
= atomic_read(&m
->nr_valid_paths
);
1228 pg
= parse_priority_group(&as
, m
);
1234 nr_valid_paths
+= pg
->nr_pgpaths
;
1235 atomic_set(&m
->nr_valid_paths
, nr_valid_paths
);
1237 list_add_tail(&pg
->list
, &m
->priority_groups
);
1239 pg
->pg_num
= pg_count
;
1244 if (pg_count
!= m
->nr_priority_groups
) {
1245 ti
->error
= "priority group count mismatch";
1250 spin_lock_irqsave(&m
->lock
, flags
);
1251 enable_nopath_timeout(m
);
1252 spin_unlock_irqrestore(&m
->lock
, flags
);
1254 ti
->num_flush_bios
= 1;
1255 ti
->num_discard_bios
= 1;
1256 ti
->num_write_same_bios
= 1;
1257 ti
->num_write_zeroes_bios
= 1;
1258 if (m
->queue_mode
== DM_TYPE_BIO_BASED
)
1259 ti
->per_io_data_size
= multipath_per_bio_data_size();
1261 ti
->per_io_data_size
= sizeof(struct dm_mpath_io
);
1270 static void multipath_wait_for_pg_init_completion(struct multipath
*m
)
1275 prepare_to_wait(&m
->pg_init_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
1277 if (!atomic_read(&m
->pg_init_in_progress
))
1282 finish_wait(&m
->pg_init_wait
, &wait
);
1285 static void flush_multipath_work(struct multipath
*m
)
1287 if (m
->hw_handler_name
) {
1288 unsigned long flags
;
1290 if (!atomic_read(&m
->pg_init_in_progress
))
1293 spin_lock_irqsave(&m
->lock
, flags
);
1294 if (atomic_read(&m
->pg_init_in_progress
) &&
1295 !test_and_set_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
)) {
1296 spin_unlock_irqrestore(&m
->lock
, flags
);
1298 flush_workqueue(kmpath_handlerd
);
1299 multipath_wait_for_pg_init_completion(m
);
1301 spin_lock_irqsave(&m
->lock
, flags
);
1302 clear_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
);
1304 spin_unlock_irqrestore(&m
->lock
, flags
);
1307 if (m
->queue_mode
== DM_TYPE_BIO_BASED
)
1308 flush_work(&m
->process_queued_bios
);
1309 flush_work(&m
->trigger_event
);
1312 static void multipath_dtr(struct dm_target
*ti
)
1314 struct multipath
*m
= ti
->private;
1316 disable_nopath_timeout(m
);
1317 flush_multipath_work(m
);
1322 * Take a path out of use.
1324 static int fail_path(struct pgpath
*pgpath
)
1326 unsigned long flags
;
1327 struct multipath
*m
= pgpath
->pg
->m
;
1329 spin_lock_irqsave(&m
->lock
, flags
);
1331 if (!pgpath
->is_active
)
1334 DMWARN("%s: Failing path %s.",
1335 dm_table_device_name(m
->ti
->table
),
1336 pgpath
->path
.dev
->name
);
1338 pgpath
->pg
->ps
.type
->fail_path(&pgpath
->pg
->ps
, &pgpath
->path
);
1339 pgpath
->is_active
= false;
1340 pgpath
->fail_count
++;
1342 atomic_dec(&m
->nr_valid_paths
);
1344 if (pgpath
== m
->current_pgpath
)
1345 m
->current_pgpath
= NULL
;
1347 dm_path_uevent(DM_UEVENT_PATH_FAILED
, m
->ti
,
1348 pgpath
->path
.dev
->name
, atomic_read(&m
->nr_valid_paths
));
1350 schedule_work(&m
->trigger_event
);
1352 enable_nopath_timeout(m
);
1355 spin_unlock_irqrestore(&m
->lock
, flags
);
1361 * Reinstate a previously-failed path
1363 static int reinstate_path(struct pgpath
*pgpath
)
1365 int r
= 0, run_queue
= 0;
1366 unsigned long flags
;
1367 struct multipath
*m
= pgpath
->pg
->m
;
1368 unsigned nr_valid_paths
;
1370 spin_lock_irqsave(&m
->lock
, flags
);
1372 if (pgpath
->is_active
)
1375 DMWARN("%s: Reinstating path %s.",
1376 dm_table_device_name(m
->ti
->table
),
1377 pgpath
->path
.dev
->name
);
1379 r
= pgpath
->pg
->ps
.type
->reinstate_path(&pgpath
->pg
->ps
, &pgpath
->path
);
1383 pgpath
->is_active
= true;
1385 nr_valid_paths
= atomic_inc_return(&m
->nr_valid_paths
);
1386 if (nr_valid_paths
== 1) {
1387 m
->current_pgpath
= NULL
;
1389 } else if (m
->hw_handler_name
&& (m
->current_pg
== pgpath
->pg
)) {
1390 if (queue_work(kmpath_handlerd
, &pgpath
->activate_path
.work
))
1391 atomic_inc(&m
->pg_init_in_progress
);
1394 dm_path_uevent(DM_UEVENT_PATH_REINSTATED
, m
->ti
,
1395 pgpath
->path
.dev
->name
, nr_valid_paths
);
1397 schedule_work(&m
->trigger_event
);
1400 spin_unlock_irqrestore(&m
->lock
, flags
);
1402 dm_table_run_md_queue_async(m
->ti
->table
);
1403 process_queued_io_list(m
);
1406 if (pgpath
->is_active
)
1407 disable_nopath_timeout(m
);
1413 * Fail or reinstate all paths that match the provided struct dm_dev.
1415 static int action_dev(struct multipath
*m
, struct dm_dev
*dev
,
1419 struct pgpath
*pgpath
;
1420 struct priority_group
*pg
;
1422 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1423 list_for_each_entry(pgpath
, &pg
->pgpaths
, list
) {
1424 if (pgpath
->path
.dev
== dev
)
1433 * Temporarily try to avoid having to use the specified PG
1435 static void bypass_pg(struct multipath
*m
, struct priority_group
*pg
,
1438 unsigned long flags
;
1440 spin_lock_irqsave(&m
->lock
, flags
);
1442 pg
->bypassed
= bypassed
;
1443 m
->current_pgpath
= NULL
;
1444 m
->current_pg
= NULL
;
1446 spin_unlock_irqrestore(&m
->lock
, flags
);
1448 schedule_work(&m
->trigger_event
);
1452 * Switch to using the specified PG from the next I/O that gets mapped
1454 static int switch_pg_num(struct multipath
*m
, const char *pgstr
)
1456 struct priority_group
*pg
;
1458 unsigned long flags
;
1461 if (!pgstr
|| (sscanf(pgstr
, "%u%c", &pgnum
, &dummy
) != 1) || !pgnum
||
1462 !m
->nr_priority_groups
|| (pgnum
> m
->nr_priority_groups
)) {
1463 DMWARN("invalid PG number supplied to switch_pg_num");
1467 spin_lock_irqsave(&m
->lock
, flags
);
1468 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1469 pg
->bypassed
= false;
1473 m
->current_pgpath
= NULL
;
1474 m
->current_pg
= NULL
;
1477 spin_unlock_irqrestore(&m
->lock
, flags
);
1479 schedule_work(&m
->trigger_event
);
1484 * Set/clear bypassed status of a PG.
1485 * PGs are numbered upwards from 1 in the order they were declared.
1487 static int bypass_pg_num(struct multipath
*m
, const char *pgstr
, bool bypassed
)
1489 struct priority_group
*pg
;
1493 if (!pgstr
|| (sscanf(pgstr
, "%u%c", &pgnum
, &dummy
) != 1) || !pgnum
||
1494 !m
->nr_priority_groups
|| (pgnum
> m
->nr_priority_groups
)) {
1495 DMWARN("invalid PG number supplied to bypass_pg");
1499 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1504 bypass_pg(m
, pg
, bypassed
);
1509 * Should we retry pg_init immediately?
1511 static bool pg_init_limit_reached(struct multipath
*m
, struct pgpath
*pgpath
)
1513 unsigned long flags
;
1514 bool limit_reached
= false;
1516 spin_lock_irqsave(&m
->lock
, flags
);
1518 if (atomic_read(&m
->pg_init_count
) <= m
->pg_init_retries
&&
1519 !test_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
))
1520 set_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
1522 limit_reached
= true;
1524 spin_unlock_irqrestore(&m
->lock
, flags
);
1526 return limit_reached
;
1529 static void pg_init_done(void *data
, int errors
)
1531 struct pgpath
*pgpath
= data
;
1532 struct priority_group
*pg
= pgpath
->pg
;
1533 struct multipath
*m
= pg
->m
;
1534 unsigned long flags
;
1535 bool delay_retry
= false;
1537 /* device or driver problems */
1542 if (!m
->hw_handler_name
) {
1546 DMERR("Could not failover the device: Handler scsi_dh_%s "
1547 "Error %d.", m
->hw_handler_name
, errors
);
1549 * Fail path for now, so we do not ping pong
1553 case SCSI_DH_DEV_TEMP_BUSY
:
1555 * Probably doing something like FW upgrade on the
1556 * controller so try the other pg.
1558 bypass_pg(m
, pg
, true);
1561 /* Wait before retrying. */
1564 case SCSI_DH_IMM_RETRY
:
1565 case SCSI_DH_RES_TEMP_UNAVAIL
:
1566 if (pg_init_limit_reached(m
, pgpath
))
1570 case SCSI_DH_DEV_OFFLINED
:
1573 * We probably do not want to fail the path for a device
1574 * error, but this is what the old dm did. In future
1575 * patches we can do more advanced handling.
1580 spin_lock_irqsave(&m
->lock
, flags
);
1582 if (pgpath
== m
->current_pgpath
) {
1583 DMERR("Could not failover device. Error %d.", errors
);
1584 m
->current_pgpath
= NULL
;
1585 m
->current_pg
= NULL
;
1587 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
))
1588 pg
->bypassed
= false;
1590 if (atomic_dec_return(&m
->pg_init_in_progress
) > 0)
1591 /* Activations of other paths are still on going */
1594 if (test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
)) {
1596 set_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
);
1598 clear_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
);
1600 if (__pg_init_all_paths(m
))
1603 clear_bit(MPATHF_QUEUE_IO
, &m
->flags
);
1605 process_queued_io_list(m
);
1608 * Wake up any thread waiting to suspend.
1610 wake_up(&m
->pg_init_wait
);
1613 spin_unlock_irqrestore(&m
->lock
, flags
);
1616 static void activate_or_offline_path(struct pgpath
*pgpath
)
1618 struct request_queue
*q
= bdev_get_queue(pgpath
->path
.dev
->bdev
);
1620 if (pgpath
->is_active
&& !blk_queue_dying(q
))
1621 scsi_dh_activate(q
, pg_init_done
, pgpath
);
1623 pg_init_done(pgpath
, SCSI_DH_DEV_OFFLINED
);
1626 static void activate_path_work(struct work_struct
*work
)
1628 struct pgpath
*pgpath
=
1629 container_of(work
, struct pgpath
, activate_path
.work
);
1631 activate_or_offline_path(pgpath
);
1634 static int multipath_end_io(struct dm_target
*ti
, struct request
*clone
,
1635 blk_status_t error
, union map_info
*map_context
)
1637 struct dm_mpath_io
*mpio
= get_mpio(map_context
);
1638 struct pgpath
*pgpath
= mpio
->pgpath
;
1639 int r
= DM_ENDIO_DONE
;
1642 * We don't queue any clone request inside the multipath target
1643 * during end I/O handling, since those clone requests don't have
1644 * bio clones. If we queue them inside the multipath target,
1645 * we need to make bio clones, that requires memory allocation.
1646 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1647 * don't have bio clones.)
1648 * Instead of queueing the clone request here, we queue the original
1649 * request into dm core, which will remake a clone request and
1650 * clone bios for it and resubmit it later.
1652 if (error
&& blk_path_error(error
)) {
1653 struct multipath
*m
= ti
->private;
1655 if (error
== BLK_STS_RESOURCE
)
1656 r
= DM_ENDIO_DELAY_REQUEUE
;
1658 r
= DM_ENDIO_REQUEUE
;
1663 if (!atomic_read(&m
->nr_valid_paths
) &&
1664 !must_push_back_rq(m
)) {
1665 if (error
== BLK_STS_IOERR
)
1667 /* complete with the original error */
1673 struct path_selector
*ps
= &pgpath
->pg
->ps
;
1675 if (ps
->type
->end_io
)
1676 ps
->type
->end_io(ps
, &pgpath
->path
, mpio
->nr_bytes
,
1677 clone
->io_start_time_ns
);
1683 static int multipath_end_io_bio(struct dm_target
*ti
, struct bio
*clone
,
1684 blk_status_t
*error
)
1686 struct multipath
*m
= ti
->private;
1687 struct dm_mpath_io
*mpio
= get_mpio_from_bio(clone
);
1688 struct pgpath
*pgpath
= mpio
->pgpath
;
1689 unsigned long flags
;
1690 int r
= DM_ENDIO_DONE
;
1692 if (!*error
|| !blk_path_error(*error
))
1698 if (!atomic_read(&m
->nr_valid_paths
)) {
1699 spin_lock_irqsave(&m
->lock
, flags
);
1700 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
)) {
1701 if (__must_push_back(m
)) {
1702 r
= DM_ENDIO_REQUEUE
;
1705 *error
= BLK_STS_IOERR
;
1707 spin_unlock_irqrestore(&m
->lock
, flags
);
1710 spin_unlock_irqrestore(&m
->lock
, flags
);
1713 multipath_queue_bio(m
, clone
);
1714 r
= DM_ENDIO_INCOMPLETE
;
1717 struct path_selector
*ps
= &pgpath
->pg
->ps
;
1719 if (ps
->type
->end_io
)
1720 ps
->type
->end_io(ps
, &pgpath
->path
, mpio
->nr_bytes
,
1721 dm_start_time_ns_from_clone(clone
));
1728 * Suspend with flush can't complete until all the I/O is processed
1729 * so if the last path fails we must error any remaining I/O.
1730 * - Note that if the freeze_bdev fails while suspending, the
1731 * queue_if_no_path state is lost - userspace should reset it.
1732 * Otherwise, during noflush suspend, queue_if_no_path will not change.
1734 static void multipath_presuspend(struct dm_target
*ti
)
1736 struct multipath
*m
= ti
->private;
1738 /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
1739 if (m
->queue_mode
== DM_TYPE_BIO_BASED
|| !dm_noflush_suspending(m
->ti
))
1740 queue_if_no_path(m
, false, true, __func__
);
1743 static void multipath_postsuspend(struct dm_target
*ti
)
1745 struct multipath
*m
= ti
->private;
1747 mutex_lock(&m
->work_mutex
);
1748 flush_multipath_work(m
);
1749 mutex_unlock(&m
->work_mutex
);
1753 * Restore the queue_if_no_path setting.
1755 static void multipath_resume(struct dm_target
*ti
)
1757 struct multipath
*m
= ti
->private;
1758 unsigned long flags
;
1760 spin_lock_irqsave(&m
->lock
, flags
);
1761 if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
)) {
1762 set_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
);
1763 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
);
1766 DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
1767 dm_table_device_name(m
->ti
->table
), __func__
,
1768 test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
),
1769 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
));
1771 spin_unlock_irqrestore(&m
->lock
, flags
);
1775 * Info output has the following format:
1776 * num_multipath_feature_args [multipath_feature_args]*
1777 * num_handler_status_args [handler_status_args]*
1778 * num_groups init_group_number
1779 * [A|D|E num_ps_status_args [ps_status_args]*
1780 * num_paths num_selector_args
1781 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1783 * Table output has the following format (identical to the constructor string):
1784 * num_feature_args [features_args]*
1785 * num_handler_args hw_handler [hw_handler_args]*
1786 * num_groups init_group_number
1787 * [priority selector-name num_ps_args [ps_args]*
1788 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1790 static void multipath_status(struct dm_target
*ti
, status_type_t type
,
1791 unsigned status_flags
, char *result
, unsigned maxlen
)
1794 unsigned long flags
;
1795 struct multipath
*m
= ti
->private;
1796 struct priority_group
*pg
;
1801 spin_lock_irqsave(&m
->lock
, flags
);
1804 if (type
== STATUSTYPE_INFO
)
1805 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO
, &m
->flags
),
1806 atomic_read(&m
->pg_init_count
));
1808 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
) +
1809 (m
->pg_init_retries
> 0) * 2 +
1810 (m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
) * 2 +
1811 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
) +
1812 (m
->queue_mode
!= DM_TYPE_REQUEST_BASED
) * 2);
1814 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
1815 DMEMIT("queue_if_no_path ");
1816 if (m
->pg_init_retries
)
1817 DMEMIT("pg_init_retries %u ", m
->pg_init_retries
);
1818 if (m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
)
1819 DMEMIT("pg_init_delay_msecs %u ", m
->pg_init_delay_msecs
);
1820 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
))
1821 DMEMIT("retain_attached_hw_handler ");
1822 if (m
->queue_mode
!= DM_TYPE_REQUEST_BASED
) {
1823 switch(m
->queue_mode
) {
1824 case DM_TYPE_BIO_BASED
:
1825 DMEMIT("queue_mode bio ");
1834 if (!m
->hw_handler_name
|| type
== STATUSTYPE_INFO
)
1837 DMEMIT("1 %s ", m
->hw_handler_name
);
1839 DMEMIT("%u ", m
->nr_priority_groups
);
1842 pg_num
= m
->next_pg
->pg_num
;
1843 else if (m
->current_pg
)
1844 pg_num
= m
->current_pg
->pg_num
;
1846 pg_num
= (m
->nr_priority_groups
? 1 : 0);
1848 DMEMIT("%u ", pg_num
);
1851 case STATUSTYPE_INFO
:
1852 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1854 state
= 'D'; /* Disabled */
1855 else if (pg
== m
->current_pg
)
1856 state
= 'A'; /* Currently Active */
1858 state
= 'E'; /* Enabled */
1860 DMEMIT("%c ", state
);
1862 if (pg
->ps
.type
->status
)
1863 sz
+= pg
->ps
.type
->status(&pg
->ps
, NULL
, type
,
1869 DMEMIT("%u %u ", pg
->nr_pgpaths
,
1870 pg
->ps
.type
->info_args
);
1872 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1873 DMEMIT("%s %s %u ", p
->path
.dev
->name
,
1874 p
->is_active
? "A" : "F",
1876 if (pg
->ps
.type
->status
)
1877 sz
+= pg
->ps
.type
->status(&pg
->ps
,
1878 &p
->path
, type
, result
+ sz
,
1884 case STATUSTYPE_TABLE
:
1885 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1886 DMEMIT("%s ", pg
->ps
.type
->name
);
1888 if (pg
->ps
.type
->status
)
1889 sz
+= pg
->ps
.type
->status(&pg
->ps
, NULL
, type
,
1895 DMEMIT("%u %u ", pg
->nr_pgpaths
,
1896 pg
->ps
.type
->table_args
);
1898 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1899 DMEMIT("%s ", p
->path
.dev
->name
);
1900 if (pg
->ps
.type
->status
)
1901 sz
+= pg
->ps
.type
->status(&pg
->ps
,
1902 &p
->path
, type
, result
+ sz
,
1909 spin_unlock_irqrestore(&m
->lock
, flags
);
1912 static int multipath_message(struct dm_target
*ti
, unsigned argc
, char **argv
,
1913 char *result
, unsigned maxlen
)
1917 struct multipath
*m
= ti
->private;
1919 unsigned long flags
;
1921 mutex_lock(&m
->work_mutex
);
1923 if (dm_suspended(ti
)) {
1929 if (!strcasecmp(argv
[0], "queue_if_no_path")) {
1930 r
= queue_if_no_path(m
, true, false, __func__
);
1931 spin_lock_irqsave(&m
->lock
, flags
);
1932 enable_nopath_timeout(m
);
1933 spin_unlock_irqrestore(&m
->lock
, flags
);
1935 } else if (!strcasecmp(argv
[0], "fail_if_no_path")) {
1936 r
= queue_if_no_path(m
, false, false, __func__
);
1937 disable_nopath_timeout(m
);
1943 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc
);
1947 if (!strcasecmp(argv
[0], "disable_group")) {
1948 r
= bypass_pg_num(m
, argv
[1], true);
1950 } else if (!strcasecmp(argv
[0], "enable_group")) {
1951 r
= bypass_pg_num(m
, argv
[1], false);
1953 } else if (!strcasecmp(argv
[0], "switch_group")) {
1954 r
= switch_pg_num(m
, argv
[1]);
1956 } else if (!strcasecmp(argv
[0], "reinstate_path"))
1957 action
= reinstate_path
;
1958 else if (!strcasecmp(argv
[0], "fail_path"))
1961 DMWARN("Unrecognised multipath message received: %s", argv
[0]);
1965 r
= dm_get_device(ti
, argv
[1], dm_table_get_mode(ti
->table
), &dev
);
1967 DMWARN("message: error getting device %s",
1972 r
= action_dev(m
, dev
, action
);
1974 dm_put_device(ti
, dev
);
1977 mutex_unlock(&m
->work_mutex
);
1981 static int multipath_prepare_ioctl(struct dm_target
*ti
,
1982 struct block_device
**bdev
)
1984 struct multipath
*m
= ti
->private;
1985 struct pgpath
*pgpath
;
1986 unsigned long flags
;
1989 pgpath
= READ_ONCE(m
->current_pgpath
);
1990 if (!pgpath
|| !mpath_double_check_test_bit(MPATHF_QUEUE_IO
, m
))
1991 pgpath
= choose_pgpath(m
, 0);
1994 if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO
, m
)) {
1995 *bdev
= pgpath
->path
.dev
->bdev
;
1998 /* pg_init has not started or completed */
2002 /* No path is available */
2004 spin_lock_irqsave(&m
->lock
, flags
);
2005 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
2007 spin_unlock_irqrestore(&m
->lock
, flags
);
2010 if (r
== -ENOTCONN
) {
2011 if (!READ_ONCE(m
->current_pg
)) {
2012 /* Path status changed, redo selection */
2013 (void) choose_pgpath(m
, 0);
2015 spin_lock_irqsave(&m
->lock
, flags
);
2016 if (test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
))
2017 (void) __pg_init_all_paths(m
);
2018 spin_unlock_irqrestore(&m
->lock
, flags
);
2019 dm_table_run_md_queue_async(m
->ti
->table
);
2020 process_queued_io_list(m
);
2024 * Only pass ioctls through if the device sizes match exactly.
2026 if (!r
&& ti
->len
!= i_size_read((*bdev
)->bd_inode
) >> SECTOR_SHIFT
)
2031 static int multipath_iterate_devices(struct dm_target
*ti
,
2032 iterate_devices_callout_fn fn
, void *data
)
2034 struct multipath
*m
= ti
->private;
2035 struct priority_group
*pg
;
2039 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
2040 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
2041 ret
= fn(ti
, p
->path
.dev
, ti
->begin
, ti
->len
, data
);
2051 static int pgpath_busy(struct pgpath
*pgpath
)
2053 struct request_queue
*q
= bdev_get_queue(pgpath
->path
.dev
->bdev
);
2055 return blk_lld_busy(q
);
2059 * We return "busy", only when we can map I/Os but underlying devices
2060 * are busy (so even if we map I/Os now, the I/Os will wait on
2061 * the underlying queue).
2062 * In other words, if we want to kill I/Os or queue them inside us
2063 * due to map unavailability, we don't return "busy". Otherwise,
2064 * dm core won't give us the I/Os and we can't do what we want.
2066 static int multipath_busy(struct dm_target
*ti
)
2068 bool busy
= false, has_active
= false;
2069 struct multipath
*m
= ti
->private;
2070 struct priority_group
*pg
, *next_pg
;
2071 struct pgpath
*pgpath
;
2073 /* pg_init in progress */
2074 if (atomic_read(&m
->pg_init_in_progress
))
2077 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2078 if (!atomic_read(&m
->nr_valid_paths
)) {
2079 unsigned long flags
;
2080 spin_lock_irqsave(&m
->lock
, flags
);
2081 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
)) {
2082 spin_unlock_irqrestore(&m
->lock
, flags
);
2083 return (m
->queue_mode
!= DM_TYPE_REQUEST_BASED
);
2085 spin_unlock_irqrestore(&m
->lock
, flags
);
2088 /* Guess which priority_group will be used at next mapping time */
2089 pg
= READ_ONCE(m
->current_pg
);
2090 next_pg
= READ_ONCE(m
->next_pg
);
2091 if (unlikely(!READ_ONCE(m
->current_pgpath
) && next_pg
))
2096 * We don't know which pg will be used at next mapping time.
2097 * We don't call choose_pgpath() here to avoid to trigger
2098 * pg_init just by busy checking.
2099 * So we don't know whether underlying devices we will be using
2100 * at next mapping time are busy or not. Just try mapping.
2106 * If there is one non-busy active path at least, the path selector
2107 * will be able to select it. So we consider such a pg as not busy.
2110 list_for_each_entry(pgpath
, &pg
->pgpaths
, list
) {
2111 if (pgpath
->is_active
) {
2113 if (!pgpath_busy(pgpath
)) {
2122 * No active path in this pg, so this pg won't be used and
2123 * the current_pg will be changed at next mapping time.
2124 * We need to try mapping to determine it.
2132 /*-----------------------------------------------------------------
2134 *---------------------------------------------------------------*/
2135 static struct target_type multipath_target
= {
2136 .name
= "multipath",
2137 .version
= {1, 14, 0},
2138 .features
= DM_TARGET_SINGLETON
| DM_TARGET_IMMUTABLE
|
2139 DM_TARGET_PASSES_INTEGRITY
,
2140 .module
= THIS_MODULE
,
2141 .ctr
= multipath_ctr
,
2142 .dtr
= multipath_dtr
,
2143 .clone_and_map_rq
= multipath_clone_and_map
,
2144 .release_clone_rq
= multipath_release_clone
,
2145 .rq_end_io
= multipath_end_io
,
2146 .map
= multipath_map_bio
,
2147 .end_io
= multipath_end_io_bio
,
2148 .presuspend
= multipath_presuspend
,
2149 .postsuspend
= multipath_postsuspend
,
2150 .resume
= multipath_resume
,
2151 .status
= multipath_status
,
2152 .message
= multipath_message
,
2153 .prepare_ioctl
= multipath_prepare_ioctl
,
2154 .iterate_devices
= multipath_iterate_devices
,
2155 .busy
= multipath_busy
,
2158 static int __init
dm_multipath_init(void)
2162 kmultipathd
= alloc_workqueue("kmpathd", WQ_MEM_RECLAIM
, 0);
2164 DMERR("failed to create workqueue kmpathd");
2166 goto bad_alloc_kmultipathd
;
2170 * A separate workqueue is used to handle the device handlers
2171 * to avoid overloading existing workqueue. Overloading the
2172 * old workqueue would also create a bottleneck in the
2173 * path of the storage hardware device activation.
2175 kmpath_handlerd
= alloc_ordered_workqueue("kmpath_handlerd",
2177 if (!kmpath_handlerd
) {
2178 DMERR("failed to create workqueue kmpath_handlerd");
2180 goto bad_alloc_kmpath_handlerd
;
2183 r
= dm_register_target(&multipath_target
);
2185 DMERR("request-based register failed %d", r
);
2187 goto bad_register_target
;
2192 bad_register_target
:
2193 destroy_workqueue(kmpath_handlerd
);
2194 bad_alloc_kmpath_handlerd
:
2195 destroy_workqueue(kmultipathd
);
2196 bad_alloc_kmultipathd
:
2200 static void __exit
dm_multipath_exit(void)
2202 destroy_workqueue(kmpath_handlerd
);
2203 destroy_workqueue(kmultipathd
);
2205 dm_unregister_target(&multipath_target
);
2208 module_init(dm_multipath_init
);
2209 module_exit(dm_multipath_exit
);
2211 module_param_named(queue_if_no_path_timeout_secs
,
2212 queue_if_no_path_timeout_secs
, ulong
, S_IRUGO
| S_IWUSR
);
2213 MODULE_PARM_DESC(queue_if_no_path_timeout_secs
, "No available paths queue IO timeout in seconds");
2215 MODULE_DESCRIPTION(DM_NAME
" multipath target");
2216 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2217 MODULE_LICENSE("GPL");