2 md.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 #include <linux/blkdev.h>
19 #include <linux/kobject.h>
20 #include <linux/list.h>
22 #include <linux/mutex.h>
23 #include <linux/timer.h>
24 #include <linux/wait.h>
25 #include <linux/workqueue.h>
26 #include "md-cluster.h"
28 #define MaxSector (~(sector_t)0)
30 /* Bad block numbers are stored sorted in a single page.
31 * 64bits is used for each block or extent.
32 * 54 bits are sector number, 9 bits are extent size,
33 * 1 bit is an 'acknowledged' flag.
35 #define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
38 * MD's 'extended' device
41 struct list_head same_set
; /* RAID devices within the same set */
43 sector_t sectors
; /* Device size (in 512bytes sectors) */
44 struct mddev
*mddev
; /* RAID array if running */
45 int last_events
; /* IO event timestamp */
48 * If meta_bdev is non-NULL, it means that a separate device is
49 * being used to store the metadata (superblock/bitmap) which
50 * would otherwise be contained on the same device as the data (bdev).
52 struct block_device
*meta_bdev
;
53 struct block_device
*bdev
; /* block device handle */
55 struct page
*sb_page
, *bb_page
;
58 sector_t data_offset
; /* start of data in array */
59 sector_t new_data_offset
;/* only relevant while reshaping */
60 sector_t sb_start
; /* offset of the super block (in 512byte sectors) */
61 int sb_size
; /* bytes in the superblock */
62 int preferred_minor
; /* autorun support */
66 /* A device can be in one of three states based on two flags:
67 * Not working: faulty==1 in_sync==0
68 * Fully working: faulty==0 in_sync==1
71 * faulty==0 in_sync==0
73 * It can never have faulty==1, in_sync==1
74 * This reduces the burden of testing multiple flags in many cases
77 unsigned long flags
; /* bit set of 'enum flag_bits' bits. */
78 wait_queue_head_t blocked_wait
;
80 int desc_nr
; /* descriptor index in the superblock */
81 int raid_disk
; /* role of device in array */
82 int new_raid_disk
; /* role that the device will have in
83 * the array after a level-change completes.
85 int saved_raid_disk
; /* role that device used to have in the
86 * array and could again if we did a partial
87 * resync from the bitmap
89 sector_t recovery_offset
;/* If this device has been partially
90 * recovered, this is where we were
94 atomic_t nr_pending
; /* number of pending requests.
95 * only maintained for arrays that
98 atomic_t read_errors
; /* number of consecutive read errors that
99 * we have tried to ignore.
101 struct timespec last_read_error
; /* monotonic time since our
104 atomic_t corrected_errors
; /* number of corrected read errors,
105 * for reporting to userspace and storing
108 struct work_struct del_work
; /* used for delayed sysfs removal */
110 struct kernfs_node
*sysfs_state
; /* handle for 'state'
114 int count
; /* count of bad blocks */
115 int unacked_exist
; /* there probably are unacknowledged
116 * bad blocks. This is only cleared
117 * when a read discovers none
119 int shift
; /* shift from sectors to block size
120 * a -ve shift means badblocks are
122 u64
*page
; /* badblock list */
127 sector_t size
; /* in sectors */
131 Faulty
, /* device is known to have a fault */
132 In_sync
, /* device is in_sync with rest of array */
133 Bitmap_sync
, /* ..actually, not quite In_sync. Need a
134 * bitmap-based recovery to get fully in sync
136 Unmerged
, /* device is being added to array and should
137 * be considerred for bvec_merge_fn but not
140 WriteMostly
, /* Avoid reading if at all possible */
141 AutoDetected
, /* added by auto-detect */
142 Blocked
, /* An error occurred but has not yet
143 * been acknowledged by the metadata
144 * handler, so don't allow writes
145 * until it is cleared */
146 WriteErrorSeen
, /* A write error has been seen on this
149 FaultRecorded
, /* Intermediate state for clearing
150 * Blocked. The Fault is/will-be
151 * recorded in the metadata, but that
152 * metadata hasn't been stored safely
155 BlockedBadBlocks
, /* A writer is blocked because they
156 * found an unacknowledged bad-block.
157 * This can safely be cleared at any
158 * time, and the writer will re-check.
159 * It may be set at any time, and at
160 * worst the writer will timeout and
161 * re-check. So setting it as
162 * accurately as possible is good, but
163 * not absolutely critical.
165 WantReplacement
, /* This device is a candidate to be
166 * hot-replaced, either because it has
167 * reported some faults, or because
168 * of explicit request.
170 Replacement
, /* This device is a replacement for
171 * a want_replacement device with same
174 Candidate
, /* For clustered environments only:
175 * This device is seen locally but not
176 * by the whole cluster
180 #define BB_LEN_MASK (0x00000000000001FFULL)
181 #define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
182 #define BB_ACK_MASK (0x8000000000000000ULL)
183 #define BB_MAX_LEN 512
184 #define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
185 #define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
186 #define BB_ACK(x) (!!((x) & BB_ACK_MASK))
187 #define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
189 extern int md_is_badblock(struct badblocks
*bb
, sector_t s
, int sectors
,
190 sector_t
*first_bad
, int *bad_sectors
);
191 static inline int is_badblock(struct md_rdev
*rdev
, sector_t s
, int sectors
,
192 sector_t
*first_bad
, int *bad_sectors
)
194 if (unlikely(rdev
->badblocks
.count
)) {
195 int rv
= md_is_badblock(&rdev
->badblocks
, rdev
->data_offset
+ s
,
197 first_bad
, bad_sectors
);
199 *first_bad
-= rdev
->data_offset
;
204 extern int rdev_set_badblocks(struct md_rdev
*rdev
, sector_t s
, int sectors
,
206 extern int rdev_clear_badblocks(struct md_rdev
*rdev
, sector_t s
, int sectors
,
208 extern void md_ack_all_badblocks(struct badblocks
*bb
);
210 struct md_cluster_info
;
214 struct md_personality
*pers
;
217 struct list_head disks
;
219 #define MD_CHANGE_DEVS 0 /* Some device status has changed */
220 #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
221 #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
222 #define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
223 #define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
224 #define MD_STILL_CLOSED 4 /* If set, then array has not been opened since
225 * md_ioctl checked on it.
231 int sysfs_active
; /* set when sysfs deletes
232 * are happening, so run/
233 * takeover/stop are not safe
235 int ready
; /* See when safe to pass
236 * IO requests down */
237 struct gendisk
*gendisk
;
241 #define UNTIL_IOCTL 1
244 /* Superblock information */
249 int external
; /* metadata is
250 * managed externally */
251 char metadata_type
[17]; /* externally set*/
258 sector_t dev_sectors
; /* used size of
259 * component devices */
260 sector_t array_sectors
; /* exported array size */
261 int external_size
; /* size managed
264 /* If the last 'event' was simply a clean->dirty transition, and
265 * we didn't write it to the spares, then it is safe and simple
266 * to just decrement the event count on a dirty->clean transition.
267 * So we record that possibility here.
269 int can_decrease_events
;
273 /* If the array is being reshaped, we need to record the
274 * new shape and an indication of where we are up to.
275 * This is written to the superblock.
276 * If reshape_position is MaxSector, then no reshape is happening (yet).
278 sector_t reshape_position
;
279 int delta_disks
, new_level
, new_layout
;
280 int new_chunk_sectors
;
281 int reshape_backwards
;
283 struct md_thread
*thread
; /* management thread */
284 struct md_thread
*sync_thread
; /* doing resync or reconstruct */
286 /* 'last_sync_action' is initialized to "none". It is set when a
287 * sync operation (i.e "data-check", "requested-resync", "resync",
288 * "recovery", or "reshape") is started. It holds this value even
289 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
290 * or finished). It is overwritten when a new sync operation is begun.
292 char *last_sync_action
;
293 sector_t curr_resync
; /* last block scheduled */
294 /* As resync requests can complete out of order, we cannot easily track
295 * how much resync has been completed. So we occasionally pause until
296 * everything completes, then set curr_resync_completed to curr_resync.
297 * As such it may be well behind the real resync mark, but it is a value
300 sector_t curr_resync_completed
;
301 unsigned long resync_mark
; /* a recent timestamp */
302 sector_t resync_mark_cnt
;/* blocks written at resync_mark */
303 sector_t curr_mark_cnt
; /* blocks scheduled now */
305 sector_t resync_max_sectors
; /* may be set by personality */
307 atomic64_t resync_mismatches
; /* count of sectors where
308 * parity/replica mismatch found
311 /* allow user-space to request suspension of IO to regions of the array */
314 /* if zero, use the system-wide default */
318 /* resync even though the same disks are shared among md-devices */
321 int ok_start_degraded
;
322 /* recovery/resync flags
323 * NEEDED: we might need to start a resync/recover
324 * RUNNING: a thread is running, or about to be started
325 * SYNC: actually doing a resync, not a recovery
326 * RECOVER: doing recovery, or need to try it.
327 * INTR: resync needs to be aborted for some reason
328 * DONE: thread is done and is waiting to be reaped
329 * REQUEST: user-space has requested a sync (used with SYNC)
330 * CHECK: user-space request for check-only, no repair
331 * RESHAPE: A reshape is happening
332 * ERROR: sync-action interrupted because io-error
334 * If neither SYNC or RESHAPE are set, then it is a recovery.
336 #define MD_RECOVERY_RUNNING 0
337 #define MD_RECOVERY_SYNC 1
338 #define MD_RECOVERY_RECOVER 2
339 #define MD_RECOVERY_INTR 3
340 #define MD_RECOVERY_DONE 4
341 #define MD_RECOVERY_NEEDED 5
342 #define MD_RECOVERY_REQUESTED 6
343 #define MD_RECOVERY_CHECK 7
344 #define MD_RECOVERY_RESHAPE 8
345 #define MD_RECOVERY_FROZEN 9
346 #define MD_RECOVERY_ERROR 10
348 unsigned long recovery
;
349 /* If a RAID personality determines that recovery (of a particular
350 * device) will fail due to a read error on the source device, it
351 * takes a copy of this number and does not attempt recovery again
352 * until this number changes.
354 int recovery_disabled
;
356 int in_sync
; /* know to not need resync */
357 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
358 * that we are never stopping an array while it is open.
359 * 'reconfig_mutex' protects all other reconfiguration.
360 * These locks are separate due to conflicting interactions
361 * with bdev->bd_mutex.
363 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
364 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
366 struct mutex open_mutex
;
367 struct mutex reconfig_mutex
;
368 atomic_t active
; /* general refcount */
369 atomic_t openers
; /* number of active opens */
371 int changed
; /* True if we might need to
372 * reread partition info */
373 int degraded
; /* whether md should consider
376 int merge_check_needed
; /* at least one
381 atomic_t recovery_active
; /* blocks scheduled, but not written */
382 wait_queue_head_t recovery_wait
;
383 sector_t recovery_cp
;
384 sector_t resync_min
; /* user requested sync
386 sector_t resync_max
; /* resync should pause
387 * when it gets here */
389 struct kernfs_node
*sysfs_state
; /* handle for 'array_state'
392 struct kernfs_node
*sysfs_action
; /* handle for 'sync_action' */
394 struct work_struct del_work
; /* used for delayed sysfs removal */
397 * flush_bio transition from NULL to !NULL
398 * rdev superblocks, events
399 * clearing MD_CHANGE_*
400 * in_sync - and related safemode and MD_CHANGE changes
401 * pers (also protected by reconfig_mutex and pending IO).
403 * clearing ->bitmap_info.file
404 * changing ->resync_{min,max}
405 * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
408 wait_queue_head_t sb_wait
; /* for waiting on superblock updates */
409 atomic_t pending_writes
; /* number of active superblock writes */
411 unsigned int safemode
; /* if set, update "clean" superblock
412 * when no writes pending.
414 unsigned int safemode_delay
;
415 struct timer_list safemode_timer
;
416 atomic_t writes_pending
;
417 struct request_queue
*queue
; /* for plugging ... */
419 struct bitmap
*bitmap
; /* the bitmap for the device */
421 struct file
*file
; /* the bitmap file */
422 loff_t offset
; /* offset from superblock of
423 * start of bitmap. May be
424 * negative, but not '0'
425 * For external metadata, offset
426 * from start of device.
428 unsigned long space
; /* space available at this offset */
429 loff_t default_offset
; /* this is the offset to use when
430 * hot-adding a bitmap. It should
431 * eventually be settable by sysfs.
433 unsigned long default_space
; /* space available at
436 unsigned long chunksize
;
437 unsigned long daemon_sleep
; /* how many jiffies between updates? */
438 unsigned long max_write_behind
; /* write-behind mode */
440 int nodes
; /* Maximum number of nodes in the cluster */
441 char cluster_name
[64]; /* Name of the cluster */
444 atomic_t max_corr_read_errors
; /* max read retries */
445 struct list_head all_mddevs
;
447 struct attribute_group
*to_remove
;
449 struct bio_set
*bio_set
;
451 /* Generic flush handling.
452 * The last to finish preflush schedules a worker to submit
453 * the rest of the request (without the REQ_FLUSH flag).
455 struct bio
*flush_bio
;
456 atomic_t flush_pending
;
457 struct work_struct flush_work
;
458 struct work_struct event_work
; /* used by dm to report failure event */
459 void (*sync_super
)(struct mddev
*mddev
, struct md_rdev
*rdev
);
460 struct md_cluster_info
*cluster_info
;
463 static inline int __must_check
mddev_lock(struct mddev
*mddev
)
465 return mutex_lock_interruptible(&mddev
->reconfig_mutex
);
468 /* Sometimes we need to take the lock in a situation where
469 * failure due to interrupts is not acceptable.
471 static inline void mddev_lock_nointr(struct mddev
*mddev
)
473 mutex_lock(&mddev
->reconfig_mutex
);
476 static inline int mddev_is_locked(struct mddev
*mddev
)
478 return mutex_is_locked(&mddev
->reconfig_mutex
);
481 static inline int mddev_trylock(struct mddev
*mddev
)
483 return mutex_trylock(&mddev
->reconfig_mutex
);
485 extern void mddev_unlock(struct mddev
*mddev
);
487 static inline void md_sync_acct(struct block_device
*bdev
, unsigned long nr_sectors
)
489 atomic_add(nr_sectors
, &bdev
->bd_contains
->bd_disk
->sync_io
);
492 struct md_personality
496 struct list_head list
;
497 struct module
*owner
;
498 void (*make_request
)(struct mddev
*mddev
, struct bio
*bio
);
499 int (*run
)(struct mddev
*mddev
);
500 void (*free
)(struct mddev
*mddev
, void *priv
);
501 void (*status
)(struct seq_file
*seq
, struct mddev
*mddev
);
502 /* error_handler must set ->faulty and clear ->in_sync
503 * if appropriate, and should abort recovery if needed
505 void (*error_handler
)(struct mddev
*mddev
, struct md_rdev
*rdev
);
506 int (*hot_add_disk
) (struct mddev
*mddev
, struct md_rdev
*rdev
);
507 int (*hot_remove_disk
) (struct mddev
*mddev
, struct md_rdev
*rdev
);
508 int (*spare_active
) (struct mddev
*mddev
);
509 sector_t (*sync_request
)(struct mddev
*mddev
, sector_t sector_nr
, int *skipped
);
510 int (*resize
) (struct mddev
*mddev
, sector_t sectors
);
511 sector_t (*size
) (struct mddev
*mddev
, sector_t sectors
, int raid_disks
);
512 int (*check_reshape
) (struct mddev
*mddev
);
513 int (*start_reshape
) (struct mddev
*mddev
);
514 void (*finish_reshape
) (struct mddev
*mddev
);
515 /* quiesce moves between quiescence states
517 * 1 - no new requests allowed
520 void (*quiesce
) (struct mddev
*mddev
, int state
);
521 /* takeover is used to transition an array from one
522 * personality to another. The new personality must be able
523 * to handle the data in the current layout.
524 * e.g. 2drive raid1 -> 2drive raid5
525 * ndrive raid5 -> degraded n+1drive raid6 with special layout
526 * If the takeover succeeds, a new 'private' structure is returned.
527 * This needs to be installed and then ->run used to activate the
530 void *(*takeover
) (struct mddev
*mddev
);
531 /* congested implements bdi.congested_fn().
532 * Will not be called while array is 'suspended' */
533 int (*congested
)(struct mddev
*mddev
, int bits
);
534 /* mergeable_bvec is use to implement ->merge_bvec_fn */
535 int (*mergeable_bvec
)(struct mddev
*mddev
,
536 struct bvec_merge_data
*bvm
,
537 struct bio_vec
*biovec
);
540 struct md_sysfs_entry
{
541 struct attribute attr
;
542 ssize_t (*show
)(struct mddev
*, char *);
543 ssize_t (*store
)(struct mddev
*, const char *, size_t);
545 extern struct attribute_group md_bitmap_group
;
547 static inline struct kernfs_node
*sysfs_get_dirent_safe(struct kernfs_node
*sd
, char *name
)
550 return sysfs_get_dirent(sd
, name
);
553 static inline void sysfs_notify_dirent_safe(struct kernfs_node
*sd
)
556 sysfs_notify_dirent(sd
);
559 static inline char * mdname (struct mddev
* mddev
)
561 return mddev
->gendisk
? mddev
->gendisk
->disk_name
: "mdX";
564 static inline int sysfs_link_rdev(struct mddev
*mddev
, struct md_rdev
*rdev
)
567 if (!test_bit(Replacement
, &rdev
->flags
) && mddev
->kobj
.sd
) {
568 sprintf(nm
, "rd%d", rdev
->raid_disk
);
569 return sysfs_create_link(&mddev
->kobj
, &rdev
->kobj
, nm
);
574 static inline void sysfs_unlink_rdev(struct mddev
*mddev
, struct md_rdev
*rdev
)
577 if (!test_bit(Replacement
, &rdev
->flags
) && mddev
->kobj
.sd
) {
578 sprintf(nm
, "rd%d", rdev
->raid_disk
);
579 sysfs_remove_link(&mddev
->kobj
, nm
);
584 * iterates through some rdev ringlist. It's safe to remove the
585 * current 'rdev'. Dont touch 'tmp' though.
587 #define rdev_for_each_list(rdev, tmp, head) \
588 list_for_each_entry_safe(rdev, tmp, head, same_set)
591 * iterates through the 'same array disks' ringlist
593 #define rdev_for_each(rdev, mddev) \
594 list_for_each_entry(rdev, &((mddev)->disks), same_set)
596 #define rdev_for_each_safe(rdev, tmp, mddev) \
597 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
599 #define rdev_for_each_rcu(rdev, mddev) \
600 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
603 void (*run
) (struct md_thread
*thread
);
605 wait_queue_head_t wqueue
;
607 struct task_struct
*tsk
;
608 unsigned long timeout
;
612 #define THREAD_WAKEUP 0
614 static inline void safe_put_page(struct page
*p
)
619 extern int register_md_personality(struct md_personality
*p
);
620 extern int unregister_md_personality(struct md_personality
*p
);
621 extern int register_md_cluster_operations(struct md_cluster_operations
*ops
,
622 struct module
*module
);
623 extern int unregister_md_cluster_operations(void);
624 extern int md_setup_cluster(struct mddev
*mddev
, int nodes
);
625 extern void md_cluster_stop(struct mddev
*mddev
);
626 extern struct md_thread
*md_register_thread(
627 void (*run
)(struct md_thread
*thread
),
630 extern void md_unregister_thread(struct md_thread
**threadp
);
631 extern void md_wakeup_thread(struct md_thread
*thread
);
632 extern void md_check_recovery(struct mddev
*mddev
);
633 extern void md_reap_sync_thread(struct mddev
*mddev
);
634 extern void md_write_start(struct mddev
*mddev
, struct bio
*bi
);
635 extern void md_write_end(struct mddev
*mddev
);
636 extern void md_done_sync(struct mddev
*mddev
, int blocks
, int ok
);
637 extern void md_error(struct mddev
*mddev
, struct md_rdev
*rdev
);
638 extern void md_finish_reshape(struct mddev
*mddev
);
640 extern int mddev_congested(struct mddev
*mddev
, int bits
);
641 extern void md_flush_request(struct mddev
*mddev
, struct bio
*bio
);
642 extern void md_super_write(struct mddev
*mddev
, struct md_rdev
*rdev
,
643 sector_t sector
, int size
, struct page
*page
);
644 extern void md_super_wait(struct mddev
*mddev
);
645 extern int sync_page_io(struct md_rdev
*rdev
, sector_t sector
, int size
,
646 struct page
*page
, int rw
, bool metadata_op
);
647 extern void md_do_sync(struct md_thread
*thread
);
648 extern void md_new_event(struct mddev
*mddev
);
649 extern int md_allow_write(struct mddev
*mddev
);
650 extern void md_wait_for_blocked_rdev(struct md_rdev
*rdev
, struct mddev
*mddev
);
651 extern void md_set_array_sectors(struct mddev
*mddev
, sector_t array_sectors
);
652 extern int md_check_no_bitmap(struct mddev
*mddev
);
653 extern int md_integrity_register(struct mddev
*mddev
);
654 extern void md_integrity_add_rdev(struct md_rdev
*rdev
, struct mddev
*mddev
);
655 extern int strict_strtoul_scaled(const char *cp
, unsigned long *res
, int scale
);
657 extern void mddev_init(struct mddev
*mddev
);
658 extern int md_run(struct mddev
*mddev
);
659 extern void md_stop(struct mddev
*mddev
);
660 extern void md_stop_writes(struct mddev
*mddev
);
661 extern int md_rdev_init(struct md_rdev
*rdev
);
662 extern void md_rdev_clear(struct md_rdev
*rdev
);
664 extern void mddev_suspend(struct mddev
*mddev
);
665 extern void mddev_resume(struct mddev
*mddev
);
666 extern struct bio
*bio_clone_mddev(struct bio
*bio
, gfp_t gfp_mask
,
667 struct mddev
*mddev
);
668 extern struct bio
*bio_alloc_mddev(gfp_t gfp_mask
, int nr_iovecs
,
669 struct mddev
*mddev
);
671 extern void md_unplug(struct blk_plug_cb
*cb
, bool from_schedule
);
672 extern void md_reload_sb(struct mddev
*mddev
);
673 extern void md_update_sb(struct mddev
*mddev
, int force
);
674 extern void md_kick_rdev_from_array(struct md_rdev
* rdev
);
675 struct md_rdev
*md_find_rdev_nr_rcu(struct mddev
*mddev
, int nr
);
676 static inline int mddev_check_plugged(struct mddev
*mddev
)
678 return !!blk_check_plugged(md_unplug
, mddev
,
679 sizeof(struct blk_plug_cb
));
682 static inline void rdev_dec_pending(struct md_rdev
*rdev
, struct mddev
*mddev
)
684 int faulty
= test_bit(Faulty
, &rdev
->flags
);
685 if (atomic_dec_and_test(&rdev
->nr_pending
) && faulty
) {
686 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
687 md_wakeup_thread(mddev
->thread
);
691 extern struct md_cluster_operations
*md_cluster_ops
;
692 static inline int mddev_is_clustered(struct mddev
*mddev
)
694 return mddev
->cluster_info
&& mddev
->bitmap_info
.nodes
> 1;
696 #endif /* _MD_MD_H */