2 md.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 #include <linux/blkdev.h>
19 #include <linux/backing-dev.h>
20 #include <linux/kobject.h>
21 #include <linux/list.h>
23 #include <linux/mutex.h>
24 #include <linux/timer.h>
25 #include <linux/wait.h>
26 #include <linux/workqueue.h>
27 #include "md-cluster.h"
29 #define MaxSector (~(sector_t)0)
31 /* Bad block numbers are stored sorted in a single page.
32 * 64bits is used for each block or extent.
33 * 54 bits are sector number, 9 bits are extent size,
34 * 1 bit is an 'acknowledged' flag.
36 #define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
39 * MD's 'extended' device
42 struct list_head same_set
; /* RAID devices within the same set */
44 sector_t sectors
; /* Device size (in 512bytes sectors) */
45 struct mddev
*mddev
; /* RAID array if running */
46 int last_events
; /* IO event timestamp */
49 * If meta_bdev is non-NULL, it means that a separate device is
50 * being used to store the metadata (superblock/bitmap) which
51 * would otherwise be contained on the same device as the data (bdev).
53 struct block_device
*meta_bdev
;
54 struct block_device
*bdev
; /* block device handle */
56 struct page
*sb_page
, *bb_page
;
59 sector_t data_offset
; /* start of data in array */
60 sector_t new_data_offset
;/* only relevant while reshaping */
61 sector_t sb_start
; /* offset of the super block (in 512byte sectors) */
62 int sb_size
; /* bytes in the superblock */
63 int preferred_minor
; /* autorun support */
67 /* A device can be in one of three states based on two flags:
68 * Not working: faulty==1 in_sync==0
69 * Fully working: faulty==0 in_sync==1
72 * faulty==0 in_sync==0
74 * It can never have faulty==1, in_sync==1
75 * This reduces the burden of testing multiple flags in many cases
78 unsigned long flags
; /* bit set of 'enum flag_bits' bits. */
79 wait_queue_head_t blocked_wait
;
81 int desc_nr
; /* descriptor index in the superblock */
82 int raid_disk
; /* role of device in array */
83 int new_raid_disk
; /* role that the device will have in
84 * the array after a level-change completes.
86 int saved_raid_disk
; /* role that device used to have in the
87 * array and could again if we did a partial
88 * resync from the bitmap
90 sector_t recovery_offset
;/* If this device has been partially
91 * recovered, this is where we were
95 atomic_t nr_pending
; /* number of pending requests.
96 * only maintained for arrays that
99 atomic_t read_errors
; /* number of consecutive read errors that
100 * we have tried to ignore.
102 struct timespec last_read_error
; /* monotonic time since our
105 atomic_t corrected_errors
; /* number of corrected read errors,
106 * for reporting to userspace and storing
109 struct work_struct del_work
; /* used for delayed sysfs removal */
111 struct kernfs_node
*sysfs_state
; /* handle for 'state'
115 int count
; /* count of bad blocks */
116 int unacked_exist
; /* there probably are unacknowledged
117 * bad blocks. This is only cleared
118 * when a read discovers none
120 int shift
; /* shift from sectors to block size
121 * a -ve shift means badblocks are
123 u64
*page
; /* badblock list */
128 sector_t size
; /* in sectors */
132 Faulty
, /* device is known to have a fault */
133 In_sync
, /* device is in_sync with rest of array */
134 Bitmap_sync
, /* ..actually, not quite In_sync. Need a
135 * bitmap-based recovery to get fully in sync
137 Unmerged
, /* device is being added to array and should
138 * be considerred for bvec_merge_fn but not
141 WriteMostly
, /* Avoid reading if at all possible */
142 AutoDetected
, /* added by auto-detect */
143 Blocked
, /* An error occurred but has not yet
144 * been acknowledged by the metadata
145 * handler, so don't allow writes
146 * until it is cleared */
147 WriteErrorSeen
, /* A write error has been seen on this
150 FaultRecorded
, /* Intermediate state for clearing
151 * Blocked. The Fault is/will-be
152 * recorded in the metadata, but that
153 * metadata hasn't been stored safely
156 BlockedBadBlocks
, /* A writer is blocked because they
157 * found an unacknowledged bad-block.
158 * This can safely be cleared at any
159 * time, and the writer will re-check.
160 * It may be set at any time, and at
161 * worst the writer will timeout and
162 * re-check. So setting it as
163 * accurately as possible is good, but
164 * not absolutely critical.
166 WantReplacement
, /* This device is a candidate to be
167 * hot-replaced, either because it has
168 * reported some faults, or because
169 * of explicit request.
171 Replacement
, /* This device is a replacement for
172 * a want_replacement device with same
175 Candidate
, /* For clustered environments only:
176 * This device is seen locally but not
177 * by the whole cluster
181 #define BB_LEN_MASK (0x00000000000001FFULL)
182 #define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
183 #define BB_ACK_MASK (0x8000000000000000ULL)
184 #define BB_MAX_LEN 512
185 #define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
186 #define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
187 #define BB_ACK(x) (!!((x) & BB_ACK_MASK))
188 #define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
190 extern int md_is_badblock(struct badblocks
*bb
, sector_t s
, int sectors
,
191 sector_t
*first_bad
, int *bad_sectors
);
192 static inline int is_badblock(struct md_rdev
*rdev
, sector_t s
, int sectors
,
193 sector_t
*first_bad
, int *bad_sectors
)
195 if (unlikely(rdev
->badblocks
.count
)) {
196 int rv
= md_is_badblock(&rdev
->badblocks
, rdev
->data_offset
+ s
,
198 first_bad
, bad_sectors
);
200 *first_bad
-= rdev
->data_offset
;
205 extern int rdev_set_badblocks(struct md_rdev
*rdev
, sector_t s
, int sectors
,
207 extern int rdev_clear_badblocks(struct md_rdev
*rdev
, sector_t s
, int sectors
,
209 extern void md_ack_all_badblocks(struct badblocks
*bb
);
211 struct md_cluster_info
;
215 struct md_personality
*pers
;
218 struct list_head disks
;
220 #define MD_CHANGE_DEVS 0 /* Some device status has changed */
221 #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
222 #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
223 #define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
224 #define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
225 #define MD_STILL_CLOSED 4 /* If set, then array has not been opened since
226 * md_ioctl checked on it.
232 int sysfs_active
; /* set when sysfs deletes
233 * are happening, so run/
234 * takeover/stop are not safe
236 int ready
; /* See when safe to pass
237 * IO requests down */
238 struct gendisk
*gendisk
;
242 #define UNTIL_IOCTL 1
245 /* Superblock information */
250 int external
; /* metadata is
251 * managed externally */
252 char metadata_type
[17]; /* externally set*/
259 sector_t dev_sectors
; /* used size of
260 * component devices */
261 sector_t array_sectors
; /* exported array size */
262 int external_size
; /* size managed
265 /* If the last 'event' was simply a clean->dirty transition, and
266 * we didn't write it to the spares, then it is safe and simple
267 * to just decrement the event count on a dirty->clean transition.
268 * So we record that possibility here.
270 int can_decrease_events
;
274 /* If the array is being reshaped, we need to record the
275 * new shape and an indication of where we are up to.
276 * This is written to the superblock.
277 * If reshape_position is MaxSector, then no reshape is happening (yet).
279 sector_t reshape_position
;
280 int delta_disks
, new_level
, new_layout
;
281 int new_chunk_sectors
;
282 int reshape_backwards
;
284 struct md_thread
*thread
; /* management thread */
285 struct md_thread
*sync_thread
; /* doing resync or reconstruct */
287 /* 'last_sync_action' is initialized to "none". It is set when a
288 * sync operation (i.e "data-check", "requested-resync", "resync",
289 * "recovery", or "reshape") is started. It holds this value even
290 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
291 * or finished). It is overwritten when a new sync operation is begun.
293 char *last_sync_action
;
294 sector_t curr_resync
; /* last block scheduled */
295 /* As resync requests can complete out of order, we cannot easily track
296 * how much resync has been completed. So we occasionally pause until
297 * everything completes, then set curr_resync_completed to curr_resync.
298 * As such it may be well behind the real resync mark, but it is a value
301 sector_t curr_resync_completed
;
302 unsigned long resync_mark
; /* a recent timestamp */
303 sector_t resync_mark_cnt
;/* blocks written at resync_mark */
304 sector_t curr_mark_cnt
; /* blocks scheduled now */
306 sector_t resync_max_sectors
; /* may be set by personality */
308 atomic64_t resync_mismatches
; /* count of sectors where
309 * parity/replica mismatch found
312 /* allow user-space to request suspension of IO to regions of the array */
315 /* if zero, use the system-wide default */
319 /* resync even though the same disks are shared among md-devices */
322 int ok_start_degraded
;
323 /* recovery/resync flags
324 * NEEDED: we might need to start a resync/recover
325 * RUNNING: a thread is running, or about to be started
326 * SYNC: actually doing a resync, not a recovery
327 * RECOVER: doing recovery, or need to try it.
328 * INTR: resync needs to be aborted for some reason
329 * DONE: thread is done and is waiting to be reaped
330 * REQUEST: user-space has requested a sync (used with SYNC)
331 * CHECK: user-space request for check-only, no repair
332 * RESHAPE: A reshape is happening
333 * ERROR: sync-action interrupted because io-error
335 * If neither SYNC or RESHAPE are set, then it is a recovery.
337 #define MD_RECOVERY_RUNNING 0
338 #define MD_RECOVERY_SYNC 1
339 #define MD_RECOVERY_RECOVER 2
340 #define MD_RECOVERY_INTR 3
341 #define MD_RECOVERY_DONE 4
342 #define MD_RECOVERY_NEEDED 5
343 #define MD_RECOVERY_REQUESTED 6
344 #define MD_RECOVERY_CHECK 7
345 #define MD_RECOVERY_RESHAPE 8
346 #define MD_RECOVERY_FROZEN 9
347 #define MD_RECOVERY_ERROR 10
349 unsigned long recovery
;
350 /* If a RAID personality determines that recovery (of a particular
351 * device) will fail due to a read error on the source device, it
352 * takes a copy of this number and does not attempt recovery again
353 * until this number changes.
355 int recovery_disabled
;
357 int in_sync
; /* know to not need resync */
358 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
359 * that we are never stopping an array while it is open.
360 * 'reconfig_mutex' protects all other reconfiguration.
361 * These locks are separate due to conflicting interactions
362 * with bdev->bd_mutex.
364 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
365 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
367 struct mutex open_mutex
;
368 struct mutex reconfig_mutex
;
369 atomic_t active
; /* general refcount */
370 atomic_t openers
; /* number of active opens */
372 int changed
; /* True if we might need to
373 * reread partition info */
374 int degraded
; /* whether md should consider
377 int merge_check_needed
; /* at least one
382 atomic_t recovery_active
; /* blocks scheduled, but not written */
383 wait_queue_head_t recovery_wait
;
384 sector_t recovery_cp
;
385 sector_t resync_min
; /* user requested sync
387 sector_t resync_max
; /* resync should pause
388 * when it gets here */
390 struct kernfs_node
*sysfs_state
; /* handle for 'array_state'
393 struct kernfs_node
*sysfs_action
; /* handle for 'sync_action' */
395 struct work_struct del_work
; /* used for delayed sysfs removal */
398 * flush_bio transition from NULL to !NULL
399 * rdev superblocks, events
400 * clearing MD_CHANGE_*
401 * in_sync - and related safemode and MD_CHANGE changes
402 * pers (also protected by reconfig_mutex and pending IO).
404 * clearing ->bitmap_info.file
405 * changing ->resync_{min,max}
406 * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
409 wait_queue_head_t sb_wait
; /* for waiting on superblock updates */
410 atomic_t pending_writes
; /* number of active superblock writes */
412 unsigned int safemode
; /* if set, update "clean" superblock
413 * when no writes pending.
415 unsigned int safemode_delay
;
416 struct timer_list safemode_timer
;
417 atomic_t writes_pending
;
418 struct request_queue
*queue
; /* for plugging ... */
420 struct bitmap
*bitmap
; /* the bitmap for the device */
422 struct file
*file
; /* the bitmap file */
423 loff_t offset
; /* offset from superblock of
424 * start of bitmap. May be
425 * negative, but not '0'
426 * For external metadata, offset
427 * from start of device.
429 unsigned long space
; /* space available at this offset */
430 loff_t default_offset
; /* this is the offset to use when
431 * hot-adding a bitmap. It should
432 * eventually be settable by sysfs.
434 unsigned long default_space
; /* space available at
437 unsigned long chunksize
;
438 unsigned long daemon_sleep
; /* how many jiffies between updates? */
439 unsigned long max_write_behind
; /* write-behind mode */
441 int nodes
; /* Maximum number of nodes in the cluster */
442 char cluster_name
[64]; /* Name of the cluster */
445 atomic_t max_corr_read_errors
; /* max read retries */
446 struct list_head all_mddevs
;
448 struct attribute_group
*to_remove
;
450 struct bio_set
*bio_set
;
452 /* Generic flush handling.
453 * The last to finish preflush schedules a worker to submit
454 * the rest of the request (without the REQ_FLUSH flag).
456 struct bio
*flush_bio
;
457 atomic_t flush_pending
;
458 struct work_struct flush_work
;
459 struct work_struct event_work
; /* used by dm to report failure event */
460 void (*sync_super
)(struct mddev
*mddev
, struct md_rdev
*rdev
);
461 struct md_cluster_info
*cluster_info
;
464 static inline int __must_check
mddev_lock(struct mddev
*mddev
)
466 return mutex_lock_interruptible(&mddev
->reconfig_mutex
);
469 /* Sometimes we need to take the lock in a situation where
470 * failure due to interrupts is not acceptable.
472 static inline void mddev_lock_nointr(struct mddev
*mddev
)
474 mutex_lock(&mddev
->reconfig_mutex
);
477 static inline int mddev_is_locked(struct mddev
*mddev
)
479 return mutex_is_locked(&mddev
->reconfig_mutex
);
482 static inline int mddev_trylock(struct mddev
*mddev
)
484 return mutex_trylock(&mddev
->reconfig_mutex
);
486 extern void mddev_unlock(struct mddev
*mddev
);
488 static inline void md_sync_acct(struct block_device
*bdev
, unsigned long nr_sectors
)
490 atomic_add(nr_sectors
, &bdev
->bd_contains
->bd_disk
->sync_io
);
493 struct md_personality
497 struct list_head list
;
498 struct module
*owner
;
499 void (*make_request
)(struct mddev
*mddev
, struct bio
*bio
);
500 int (*run
)(struct mddev
*mddev
);
501 void (*free
)(struct mddev
*mddev
, void *priv
);
502 void (*status
)(struct seq_file
*seq
, struct mddev
*mddev
);
503 /* error_handler must set ->faulty and clear ->in_sync
504 * if appropriate, and should abort recovery if needed
506 void (*error_handler
)(struct mddev
*mddev
, struct md_rdev
*rdev
);
507 int (*hot_add_disk
) (struct mddev
*mddev
, struct md_rdev
*rdev
);
508 int (*hot_remove_disk
) (struct mddev
*mddev
, struct md_rdev
*rdev
);
509 int (*spare_active
) (struct mddev
*mddev
);
510 sector_t (*sync_request
)(struct mddev
*mddev
, sector_t sector_nr
, int *skipped
);
511 int (*resize
) (struct mddev
*mddev
, sector_t sectors
);
512 sector_t (*size
) (struct mddev
*mddev
, sector_t sectors
, int raid_disks
);
513 int (*check_reshape
) (struct mddev
*mddev
);
514 int (*start_reshape
) (struct mddev
*mddev
);
515 void (*finish_reshape
) (struct mddev
*mddev
);
516 /* quiesce moves between quiescence states
518 * 1 - no new requests allowed
521 void (*quiesce
) (struct mddev
*mddev
, int state
);
522 /* takeover is used to transition an array from one
523 * personality to another. The new personality must be able
524 * to handle the data in the current layout.
525 * e.g. 2drive raid1 -> 2drive raid5
526 * ndrive raid5 -> degraded n+1drive raid6 with special layout
527 * If the takeover succeeds, a new 'private' structure is returned.
528 * This needs to be installed and then ->run used to activate the
531 void *(*takeover
) (struct mddev
*mddev
);
532 /* congested implements bdi.congested_fn().
533 * Will not be called while array is 'suspended' */
534 int (*congested
)(struct mddev
*mddev
, int bits
);
535 /* mergeable_bvec is use to implement ->merge_bvec_fn */
536 int (*mergeable_bvec
)(struct mddev
*mddev
,
537 struct bvec_merge_data
*bvm
,
538 struct bio_vec
*biovec
);
541 struct md_sysfs_entry
{
542 struct attribute attr
;
543 ssize_t (*show
)(struct mddev
*, char *);
544 ssize_t (*store
)(struct mddev
*, const char *, size_t);
546 extern struct attribute_group md_bitmap_group
;
548 static inline struct kernfs_node
*sysfs_get_dirent_safe(struct kernfs_node
*sd
, char *name
)
551 return sysfs_get_dirent(sd
, name
);
554 static inline void sysfs_notify_dirent_safe(struct kernfs_node
*sd
)
557 sysfs_notify_dirent(sd
);
560 static inline char * mdname (struct mddev
* mddev
)
562 return mddev
->gendisk
? mddev
->gendisk
->disk_name
: "mdX";
565 static inline int sysfs_link_rdev(struct mddev
*mddev
, struct md_rdev
*rdev
)
568 if (!test_bit(Replacement
, &rdev
->flags
) && mddev
->kobj
.sd
) {
569 sprintf(nm
, "rd%d", rdev
->raid_disk
);
570 return sysfs_create_link(&mddev
->kobj
, &rdev
->kobj
, nm
);
575 static inline void sysfs_unlink_rdev(struct mddev
*mddev
, struct md_rdev
*rdev
)
578 if (!test_bit(Replacement
, &rdev
->flags
) && mddev
->kobj
.sd
) {
579 sprintf(nm
, "rd%d", rdev
->raid_disk
);
580 sysfs_remove_link(&mddev
->kobj
, nm
);
585 * iterates through some rdev ringlist. It's safe to remove the
586 * current 'rdev'. Dont touch 'tmp' though.
588 #define rdev_for_each_list(rdev, tmp, head) \
589 list_for_each_entry_safe(rdev, tmp, head, same_set)
592 * iterates through the 'same array disks' ringlist
594 #define rdev_for_each(rdev, mddev) \
595 list_for_each_entry(rdev, &((mddev)->disks), same_set)
597 #define rdev_for_each_safe(rdev, tmp, mddev) \
598 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
600 #define rdev_for_each_rcu(rdev, mddev) \
601 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
604 void (*run
) (struct md_thread
*thread
);
606 wait_queue_head_t wqueue
;
608 struct task_struct
*tsk
;
609 unsigned long timeout
;
613 #define THREAD_WAKEUP 0
615 static inline void safe_put_page(struct page
*p
)
620 extern int register_md_personality(struct md_personality
*p
);
621 extern int unregister_md_personality(struct md_personality
*p
);
622 extern int register_md_cluster_operations(struct md_cluster_operations
*ops
,
623 struct module
*module
);
624 extern int unregister_md_cluster_operations(void);
625 extern int md_setup_cluster(struct mddev
*mddev
, int nodes
);
626 extern void md_cluster_stop(struct mddev
*mddev
);
627 extern struct md_thread
*md_register_thread(
628 void (*run
)(struct md_thread
*thread
),
631 extern void md_unregister_thread(struct md_thread
**threadp
);
632 extern void md_wakeup_thread(struct md_thread
*thread
);
633 extern void md_check_recovery(struct mddev
*mddev
);
634 extern void md_reap_sync_thread(struct mddev
*mddev
);
635 extern void md_write_start(struct mddev
*mddev
, struct bio
*bi
);
636 extern void md_write_end(struct mddev
*mddev
);
637 extern void md_done_sync(struct mddev
*mddev
, int blocks
, int ok
);
638 extern void md_error(struct mddev
*mddev
, struct md_rdev
*rdev
);
639 extern void md_finish_reshape(struct mddev
*mddev
);
641 extern int mddev_congested(struct mddev
*mddev
, int bits
);
642 extern void md_flush_request(struct mddev
*mddev
, struct bio
*bio
);
643 extern void md_super_write(struct mddev
*mddev
, struct md_rdev
*rdev
,
644 sector_t sector
, int size
, struct page
*page
);
645 extern void md_super_wait(struct mddev
*mddev
);
646 extern int sync_page_io(struct md_rdev
*rdev
, sector_t sector
, int size
,
647 struct page
*page
, int rw
, bool metadata_op
);
648 extern void md_do_sync(struct md_thread
*thread
);
649 extern void md_new_event(struct mddev
*mddev
);
650 extern int md_allow_write(struct mddev
*mddev
);
651 extern void md_wait_for_blocked_rdev(struct md_rdev
*rdev
, struct mddev
*mddev
);
652 extern void md_set_array_sectors(struct mddev
*mddev
, sector_t array_sectors
);
653 extern int md_check_no_bitmap(struct mddev
*mddev
);
654 extern int md_integrity_register(struct mddev
*mddev
);
655 extern void md_integrity_add_rdev(struct md_rdev
*rdev
, struct mddev
*mddev
);
656 extern int strict_strtoul_scaled(const char *cp
, unsigned long *res
, int scale
);
658 extern void mddev_init(struct mddev
*mddev
);
659 extern int md_run(struct mddev
*mddev
);
660 extern void md_stop(struct mddev
*mddev
);
661 extern void md_stop_writes(struct mddev
*mddev
);
662 extern int md_rdev_init(struct md_rdev
*rdev
);
663 extern void md_rdev_clear(struct md_rdev
*rdev
);
665 extern void mddev_suspend(struct mddev
*mddev
);
666 extern void mddev_resume(struct mddev
*mddev
);
667 extern struct bio
*bio_clone_mddev(struct bio
*bio
, gfp_t gfp_mask
,
668 struct mddev
*mddev
);
669 extern struct bio
*bio_alloc_mddev(gfp_t gfp_mask
, int nr_iovecs
,
670 struct mddev
*mddev
);
672 extern void md_unplug(struct blk_plug_cb
*cb
, bool from_schedule
);
673 extern void md_reload_sb(struct mddev
*mddev
);
674 extern void md_update_sb(struct mddev
*mddev
, int force
);
675 extern void md_kick_rdev_from_array(struct md_rdev
* rdev
);
676 struct md_rdev
*md_find_rdev_nr_rcu(struct mddev
*mddev
, int nr
);
677 static inline int mddev_check_plugged(struct mddev
*mddev
)
679 return !!blk_check_plugged(md_unplug
, mddev
,
680 sizeof(struct blk_plug_cb
));
683 static inline void rdev_dec_pending(struct md_rdev
*rdev
, struct mddev
*mddev
)
685 int faulty
= test_bit(Faulty
, &rdev
->flags
);
686 if (atomic_dec_and_test(&rdev
->nr_pending
) && faulty
) {
687 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
688 md_wakeup_thread(mddev
->thread
);
692 extern struct md_cluster_operations
*md_cluster_ops
;
693 static inline int mddev_is_clustered(struct mddev
*mddev
)
695 return mddev
->cluster_info
&& mddev
->bitmap_info
.nodes
> 1;
697 #endif /* _MD_MD_H */