mm/hmm.c: remove superfluous RCU protection around radix tree lookup
[linux/fpc-iii.git] / drivers / md / dm-raid.c
blob6f823f44b4aa251592a49bd46f08aa9a5fa82ff3
1 /*
2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
6 */
8 #include <linux/slab.h>
9 #include <linux/module.h>
11 #include "md.h"
12 #include "raid1.h"
13 #include "raid5.h"
14 #include "raid10.h"
15 #include "md-bitmap.h"
17 #include <linux/device-mapper.h>
19 #define DM_MSG_PREFIX "raid"
20 #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
23 * Minimum sectors of free reshape space per raid device
25 #define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
28 * Minimum journal space 4 MiB in sectors.
30 #define MIN_RAID456_JOURNAL_SPACE (4*2048)
32 /* Global list of all raid sets */
33 static LIST_HEAD(raid_sets);
35 static bool devices_handle_discard_safely = false;
38 * The following flags are used by dm-raid.c to set up the array state.
39 * They must be cleared before md_run is called.
41 #define FirstUse 10 /* rdev flag */
43 struct raid_dev {
45 * Two DM devices, one to hold metadata and one to hold the
46 * actual data/parity. The reason for this is to not confuse
47 * ti->len and give more flexibility in altering size and
48 * characteristics.
50 * While it is possible for this device to be associated
51 * with a different physical device than the data_dev, it
52 * is intended for it to be the same.
53 * |--------- Physical Device ---------|
54 * |- meta_dev -|------ data_dev ------|
56 struct dm_dev *meta_dev;
57 struct dm_dev *data_dev;
58 struct md_rdev rdev;
62 * Bits for establishing rs->ctr_flags
64 * 1 = no flag value
65 * 2 = flag with value
67 #define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */
68 #define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */
69 #define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */
70 #define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */
71 #define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */
72 #define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */
73 #define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */
74 #define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */
75 #define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */
76 #define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */
77 #define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */
78 #define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */
79 /* New for v1.9.0 */
80 #define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid1/4/5/6/10! */
81 #define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */
82 #define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */
84 /* New for v1.10.0 */
85 #define __CTR_FLAG_JOURNAL_DEV 15 /* 2 */ /* Only with raid4/5/6 (journal device)! */
87 /* New for v1.11.1 */
88 #define __CTR_FLAG_JOURNAL_MODE 16 /* 2 */ /* Only with raid4/5/6 (journal mode)! */
91 * Flags for rs->ctr_flags field.
93 #define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
94 #define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
95 #define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
96 #define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
97 #define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
98 #define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
99 #define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
100 #define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
101 #define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
102 #define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
103 #define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
104 #define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
105 #define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
106 #define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
107 #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
108 #define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
109 #define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
112 * Definitions of various constructor flags to
113 * be used in checks of valid / invalid flags
114 * per raid level.
116 /* Define all any sync flags */
117 #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
119 /* Define flags for options without argument (e.g. 'nosync') */
120 #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
121 CTR_FLAG_RAID10_USE_NEAR_SETS)
123 /* Define flags for options with one argument (e.g. 'delta_disks +2') */
124 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
125 CTR_FLAG_WRITE_MOSTLY | \
126 CTR_FLAG_DAEMON_SLEEP | \
127 CTR_FLAG_MIN_RECOVERY_RATE | \
128 CTR_FLAG_MAX_RECOVERY_RATE | \
129 CTR_FLAG_MAX_WRITE_BEHIND | \
130 CTR_FLAG_STRIPE_CACHE | \
131 CTR_FLAG_REGION_SIZE | \
132 CTR_FLAG_RAID10_COPIES | \
133 CTR_FLAG_RAID10_FORMAT | \
134 CTR_FLAG_DELTA_DISKS | \
135 CTR_FLAG_DATA_OFFSET)
137 /* Valid options definitions per raid level... */
139 /* "raid0" does only accept data offset */
140 #define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
142 /* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */
143 #define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
144 CTR_FLAG_REBUILD | \
145 CTR_FLAG_WRITE_MOSTLY | \
146 CTR_FLAG_DAEMON_SLEEP | \
147 CTR_FLAG_MIN_RECOVERY_RATE | \
148 CTR_FLAG_MAX_RECOVERY_RATE | \
149 CTR_FLAG_MAX_WRITE_BEHIND | \
150 CTR_FLAG_REGION_SIZE | \
151 CTR_FLAG_DELTA_DISKS | \
152 CTR_FLAG_DATA_OFFSET)
154 /* "raid10" does not accept any raid1 or stripe cache options */
155 #define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
156 CTR_FLAG_REBUILD | \
157 CTR_FLAG_DAEMON_SLEEP | \
158 CTR_FLAG_MIN_RECOVERY_RATE | \
159 CTR_FLAG_MAX_RECOVERY_RATE | \
160 CTR_FLAG_REGION_SIZE | \
161 CTR_FLAG_RAID10_COPIES | \
162 CTR_FLAG_RAID10_FORMAT | \
163 CTR_FLAG_DELTA_DISKS | \
164 CTR_FLAG_DATA_OFFSET | \
165 CTR_FLAG_RAID10_USE_NEAR_SETS)
168 * "raid4/5/6" do not accept any raid1 or raid10 specific options
170 * "raid6" does not accept "nosync", because it is not guaranteed
171 * that both parity and q-syndrome are being written properly with
172 * any writes
174 #define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
175 CTR_FLAG_REBUILD | \
176 CTR_FLAG_DAEMON_SLEEP | \
177 CTR_FLAG_MIN_RECOVERY_RATE | \
178 CTR_FLAG_MAX_RECOVERY_RATE | \
179 CTR_FLAG_STRIPE_CACHE | \
180 CTR_FLAG_REGION_SIZE | \
181 CTR_FLAG_DELTA_DISKS | \
182 CTR_FLAG_DATA_OFFSET | \
183 CTR_FLAG_JOURNAL_DEV | \
184 CTR_FLAG_JOURNAL_MODE)
186 #define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
187 CTR_FLAG_REBUILD | \
188 CTR_FLAG_DAEMON_SLEEP | \
189 CTR_FLAG_MIN_RECOVERY_RATE | \
190 CTR_FLAG_MAX_RECOVERY_RATE | \
191 CTR_FLAG_STRIPE_CACHE | \
192 CTR_FLAG_REGION_SIZE | \
193 CTR_FLAG_DELTA_DISKS | \
194 CTR_FLAG_DATA_OFFSET | \
195 CTR_FLAG_JOURNAL_DEV | \
196 CTR_FLAG_JOURNAL_MODE)
197 /* ...valid options definitions per raid level */
200 * Flags for rs->runtime_flags field
201 * (RT_FLAG prefix meaning "runtime flag")
203 * These are all internal and used to define runtime state,
204 * e.g. to prevent another resume from preresume processing
205 * the raid set all over again.
207 #define RT_FLAG_RS_PRERESUMED 0
208 #define RT_FLAG_RS_RESUMED 1
209 #define RT_FLAG_RS_BITMAP_LOADED 2
210 #define RT_FLAG_UPDATE_SBS 3
211 #define RT_FLAG_RESHAPE_RS 4
212 #define RT_FLAG_RS_SUSPENDED 5
213 #define RT_FLAG_RS_IN_SYNC 6
214 #define RT_FLAG_RS_RESYNCING 7
216 /* Array elements of 64 bit needed for rebuild/failed disk bits */
217 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
220 * raid set level, layout and chunk sectors backup/restore
222 struct rs_layout {
223 int new_level;
224 int new_layout;
225 int new_chunk_sectors;
228 struct raid_set {
229 struct dm_target *ti;
230 struct list_head list;
232 uint32_t stripe_cache_entries;
233 unsigned long ctr_flags;
234 unsigned long runtime_flags;
236 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
238 int raid_disks;
239 int delta_disks;
240 int data_offset;
241 int raid10_copies;
242 int requested_bitmap_chunk_sectors;
244 struct mddev md;
245 struct raid_type *raid_type;
246 struct dm_target_callbacks callbacks;
248 /* Optional raid4/5/6 journal device */
249 struct journal_dev {
250 struct dm_dev *dev;
251 struct md_rdev rdev;
252 int mode;
253 } journal_dev;
255 struct raid_dev dev[0];
258 static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
260 struct mddev *mddev = &rs->md;
262 l->new_level = mddev->new_level;
263 l->new_layout = mddev->new_layout;
264 l->new_chunk_sectors = mddev->new_chunk_sectors;
267 static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
269 struct mddev *mddev = &rs->md;
271 mddev->new_level = l->new_level;
272 mddev->new_layout = l->new_layout;
273 mddev->new_chunk_sectors = l->new_chunk_sectors;
276 /* Find any raid_set in active slot for @rs on global list */
277 static struct raid_set *rs_find_active(struct raid_set *rs)
279 struct raid_set *r;
280 struct mapped_device *md = dm_table_get_md(rs->ti->table);
282 list_for_each_entry(r, &raid_sets, list)
283 if (r != rs && dm_table_get_md(r->ti->table) == md)
284 return r;
286 return NULL;
289 /* raid10 algorithms (i.e. formats) */
290 #define ALGORITHM_RAID10_DEFAULT 0
291 #define ALGORITHM_RAID10_NEAR 1
292 #define ALGORITHM_RAID10_OFFSET 2
293 #define ALGORITHM_RAID10_FAR 3
295 /* Supported raid types and properties. */
296 static struct raid_type {
297 const char *name; /* RAID algorithm. */
298 const char *descr; /* Descriptor text for logging. */
299 const unsigned int parity_devs; /* # of parity devices. */
300 const unsigned int minimal_devs;/* minimal # of devices in set. */
301 const unsigned int level; /* RAID level. */
302 const unsigned int algorithm; /* RAID algorithm. */
303 } raid_types[] = {
304 {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */},
305 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */},
306 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
307 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
308 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
309 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
310 {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
311 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
312 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
313 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
314 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
315 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
316 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
317 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
318 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
319 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
320 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
321 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
322 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
323 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
326 /* True, if @v is in inclusive range [@min, @max] */
327 static bool __within_range(long v, long min, long max)
329 return v >= min && v <= max;
332 /* All table line arguments are defined here */
333 static struct arg_name_flag {
334 const unsigned long flag;
335 const char *name;
336 } __arg_name_flags[] = {
337 { CTR_FLAG_SYNC, "sync"},
338 { CTR_FLAG_NOSYNC, "nosync"},
339 { CTR_FLAG_REBUILD, "rebuild"},
340 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
341 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
342 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
343 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
344 { CTR_FLAG_WRITE_MOSTLY, "write_mostly"},
345 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
346 { CTR_FLAG_REGION_SIZE, "region_size"},
347 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
348 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
349 { CTR_FLAG_DATA_OFFSET, "data_offset"},
350 { CTR_FLAG_DELTA_DISKS, "delta_disks"},
351 { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
352 { CTR_FLAG_JOURNAL_DEV, "journal_dev" },
353 { CTR_FLAG_JOURNAL_MODE, "journal_mode" },
356 /* Return argument name string for given @flag */
357 static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
359 if (hweight32(flag) == 1) {
360 struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
362 while (anf-- > __arg_name_flags)
363 if (flag & anf->flag)
364 return anf->name;
366 } else
367 DMERR("%s called with more than one flag!", __func__);
369 return NULL;
372 /* Define correlation of raid456 journal cache modes and dm-raid target line parameters */
373 static struct {
374 const int mode;
375 const char *param;
376 } _raid456_journal_mode[] = {
377 { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
378 { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" }
381 /* Return MD raid4/5/6 journal mode for dm @journal_mode one */
382 static int dm_raid_journal_mode_to_md(const char *mode)
384 int m = ARRAY_SIZE(_raid456_journal_mode);
386 while (m--)
387 if (!strcasecmp(mode, _raid456_journal_mode[m].param))
388 return _raid456_journal_mode[m].mode;
390 return -EINVAL;
393 /* Return dm-raid raid4/5/6 journal mode string for @mode */
394 static const char *md_journal_mode_to_dm_raid(const int mode)
396 int m = ARRAY_SIZE(_raid456_journal_mode);
398 while (m--)
399 if (mode == _raid456_journal_mode[m].mode)
400 return _raid456_journal_mode[m].param;
402 return "unknown";
406 * Bool helpers to test for various raid levels of a raid set.
407 * It's level as reported by the superblock rather than
408 * the requested raid_type passed to the constructor.
410 /* Return true, if raid set in @rs is raid0 */
411 static bool rs_is_raid0(struct raid_set *rs)
413 return !rs->md.level;
416 /* Return true, if raid set in @rs is raid1 */
417 static bool rs_is_raid1(struct raid_set *rs)
419 return rs->md.level == 1;
422 /* Return true, if raid set in @rs is raid10 */
423 static bool rs_is_raid10(struct raid_set *rs)
425 return rs->md.level == 10;
428 /* Return true, if raid set in @rs is level 6 */
429 static bool rs_is_raid6(struct raid_set *rs)
431 return rs->md.level == 6;
434 /* Return true, if raid set in @rs is level 4, 5 or 6 */
435 static bool rs_is_raid456(struct raid_set *rs)
437 return __within_range(rs->md.level, 4, 6);
440 /* Return true, if raid set in @rs is reshapable */
441 static bool __is_raid10_far(int layout);
442 static bool rs_is_reshapable(struct raid_set *rs)
444 return rs_is_raid456(rs) ||
445 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
448 /* Return true, if raid set in @rs is recovering */
449 static bool rs_is_recovering(struct raid_set *rs)
451 return rs->md.recovery_cp < rs->md.dev_sectors;
454 /* Return true, if raid set in @rs is reshaping */
455 static bool rs_is_reshaping(struct raid_set *rs)
457 return rs->md.reshape_position != MaxSector;
461 * bool helpers to test for various raid levels of a raid type @rt
464 /* Return true, if raid type in @rt is raid0 */
465 static bool rt_is_raid0(struct raid_type *rt)
467 return !rt->level;
470 /* Return true, if raid type in @rt is raid1 */
471 static bool rt_is_raid1(struct raid_type *rt)
473 return rt->level == 1;
476 /* Return true, if raid type in @rt is raid10 */
477 static bool rt_is_raid10(struct raid_type *rt)
479 return rt->level == 10;
482 /* Return true, if raid type in @rt is raid4/5 */
483 static bool rt_is_raid45(struct raid_type *rt)
485 return __within_range(rt->level, 4, 5);
488 /* Return true, if raid type in @rt is raid6 */
489 static bool rt_is_raid6(struct raid_type *rt)
491 return rt->level == 6;
494 /* Return true, if raid type in @rt is raid4/5/6 */
495 static bool rt_is_raid456(struct raid_type *rt)
497 return __within_range(rt->level, 4, 6);
499 /* END: raid level bools */
501 /* Return valid ctr flags for the raid level of @rs */
502 static unsigned long __valid_flags(struct raid_set *rs)
504 if (rt_is_raid0(rs->raid_type))
505 return RAID0_VALID_FLAGS;
506 else if (rt_is_raid1(rs->raid_type))
507 return RAID1_VALID_FLAGS;
508 else if (rt_is_raid10(rs->raid_type))
509 return RAID10_VALID_FLAGS;
510 else if (rt_is_raid45(rs->raid_type))
511 return RAID45_VALID_FLAGS;
512 else if (rt_is_raid6(rs->raid_type))
513 return RAID6_VALID_FLAGS;
515 return 0;
519 * Check for valid flags set on @rs
521 * Has to be called after parsing of the ctr flags!
523 static int rs_check_for_valid_flags(struct raid_set *rs)
525 if (rs->ctr_flags & ~__valid_flags(rs)) {
526 rs->ti->error = "Invalid flags combination";
527 return -EINVAL;
530 return 0;
533 /* MD raid10 bit definitions and helpers */
534 #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */
535 #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */
536 #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */
537 #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */
539 /* Return md raid10 near copies for @layout */
540 static unsigned int __raid10_near_copies(int layout)
542 return layout & 0xFF;
545 /* Return md raid10 far copies for @layout */
546 static unsigned int __raid10_far_copies(int layout)
548 return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
551 /* Return true if md raid10 offset for @layout */
552 static bool __is_raid10_offset(int layout)
554 return !!(layout & RAID10_OFFSET);
557 /* Return true if md raid10 near for @layout */
558 static bool __is_raid10_near(int layout)
560 return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
563 /* Return true if md raid10 far for @layout */
564 static bool __is_raid10_far(int layout)
566 return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
569 /* Return md raid10 layout string for @layout */
570 static const char *raid10_md_layout_to_format(int layout)
573 * Bit 16 stands for "offset"
574 * (i.e. adjacent stripes hold copies)
576 * Refer to MD's raid10.c for details
578 if (__is_raid10_offset(layout))
579 return "offset";
581 if (__raid10_near_copies(layout) > 1)
582 return "near";
584 if (__raid10_far_copies(layout) > 1)
585 return "far";
587 return "unknown";
590 /* Return md raid10 algorithm for @name */
591 static const int raid10_name_to_format(const char *name)
593 if (!strcasecmp(name, "near"))
594 return ALGORITHM_RAID10_NEAR;
595 else if (!strcasecmp(name, "offset"))
596 return ALGORITHM_RAID10_OFFSET;
597 else if (!strcasecmp(name, "far"))
598 return ALGORITHM_RAID10_FAR;
600 return -EINVAL;
603 /* Return md raid10 copies for @layout */
604 static unsigned int raid10_md_layout_to_copies(int layout)
606 return max(__raid10_near_copies(layout), __raid10_far_copies(layout));
609 /* Return md raid10 format id for @format string */
610 static int raid10_format_to_md_layout(struct raid_set *rs,
611 unsigned int algorithm,
612 unsigned int copies)
614 unsigned int n = 1, f = 1, r = 0;
617 * MD resilienece flaw:
619 * enabling use_far_sets for far/offset formats causes copies
620 * to be colocated on the same devs together with their origins!
622 * -> disable it for now in the definition above
624 if (algorithm == ALGORITHM_RAID10_DEFAULT ||
625 algorithm == ALGORITHM_RAID10_NEAR)
626 n = copies;
628 else if (algorithm == ALGORITHM_RAID10_OFFSET) {
629 f = copies;
630 r = RAID10_OFFSET;
631 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
632 r |= RAID10_USE_FAR_SETS;
634 } else if (algorithm == ALGORITHM_RAID10_FAR) {
635 f = copies;
636 r = !RAID10_OFFSET;
637 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
638 r |= RAID10_USE_FAR_SETS;
640 } else
641 return -EINVAL;
643 return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
645 /* END: MD raid10 bit definitions and helpers */
647 /* Check for any of the raid10 algorithms */
648 static bool __got_raid10(struct raid_type *rtp, const int layout)
650 if (rtp->level == 10) {
651 switch (rtp->algorithm) {
652 case ALGORITHM_RAID10_DEFAULT:
653 case ALGORITHM_RAID10_NEAR:
654 return __is_raid10_near(layout);
655 case ALGORITHM_RAID10_OFFSET:
656 return __is_raid10_offset(layout);
657 case ALGORITHM_RAID10_FAR:
658 return __is_raid10_far(layout);
659 default:
660 break;
664 return false;
667 /* Return raid_type for @name */
668 static struct raid_type *get_raid_type(const char *name)
670 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
672 while (rtp-- > raid_types)
673 if (!strcasecmp(rtp->name, name))
674 return rtp;
676 return NULL;
679 /* Return raid_type for @name based derived from @level and @layout */
680 static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
682 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
684 while (rtp-- > raid_types) {
685 /* RAID10 special checks based on @layout flags/properties */
686 if (rtp->level == level &&
687 (__got_raid10(rtp, layout) || rtp->algorithm == layout))
688 return rtp;
691 return NULL;
694 /* Adjust rdev sectors */
695 static void rs_set_rdev_sectors(struct raid_set *rs)
697 struct mddev *mddev = &rs->md;
698 struct md_rdev *rdev;
701 * raid10 sets rdev->sector to the device size, which
702 * is unintended in case of out-of-place reshaping
704 rdev_for_each(rdev, mddev)
705 if (!test_bit(Journal, &rdev->flags))
706 rdev->sectors = mddev->dev_sectors;
710 * Change bdev capacity of @rs in case of a disk add/remove reshape
712 static void rs_set_capacity(struct raid_set *rs)
714 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
716 set_capacity(gendisk, rs->md.array_sectors);
717 revalidate_disk(gendisk);
721 * Set the mddev properties in @rs to the current
722 * ones retrieved from the freshest superblock
724 static void rs_set_cur(struct raid_set *rs)
726 struct mddev *mddev = &rs->md;
728 mddev->new_level = mddev->level;
729 mddev->new_layout = mddev->layout;
730 mddev->new_chunk_sectors = mddev->chunk_sectors;
734 * Set the mddev properties in @rs to the new
735 * ones requested by the ctr
737 static void rs_set_new(struct raid_set *rs)
739 struct mddev *mddev = &rs->md;
741 mddev->level = mddev->new_level;
742 mddev->layout = mddev->new_layout;
743 mddev->chunk_sectors = mddev->new_chunk_sectors;
744 mddev->raid_disks = rs->raid_disks;
745 mddev->delta_disks = 0;
748 static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
749 unsigned int raid_devs)
751 unsigned int i;
752 struct raid_set *rs;
754 if (raid_devs <= raid_type->parity_devs) {
755 ti->error = "Insufficient number of devices";
756 return ERR_PTR(-EINVAL);
759 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
760 if (!rs) {
761 ti->error = "Cannot allocate raid context";
762 return ERR_PTR(-ENOMEM);
765 mddev_init(&rs->md);
767 INIT_LIST_HEAD(&rs->list);
768 rs->raid_disks = raid_devs;
769 rs->delta_disks = 0;
771 rs->ti = ti;
772 rs->raid_type = raid_type;
773 rs->stripe_cache_entries = 256;
774 rs->md.raid_disks = raid_devs;
775 rs->md.level = raid_type->level;
776 rs->md.new_level = rs->md.level;
777 rs->md.layout = raid_type->algorithm;
778 rs->md.new_layout = rs->md.layout;
779 rs->md.delta_disks = 0;
780 rs->md.recovery_cp = MaxSector;
782 for (i = 0; i < raid_devs; i++)
783 md_rdev_init(&rs->dev[i].rdev);
785 /* Add @rs to global list. */
786 list_add(&rs->list, &raid_sets);
789 * Remaining items to be initialized by further RAID params:
790 * rs->md.persistent
791 * rs->md.external
792 * rs->md.chunk_sectors
793 * rs->md.new_chunk_sectors
794 * rs->md.dev_sectors
797 return rs;
800 /* Free all @rs allocations and remove it from global list. */
801 static void raid_set_free(struct raid_set *rs)
803 int i;
805 if (rs->journal_dev.dev) {
806 md_rdev_clear(&rs->journal_dev.rdev);
807 dm_put_device(rs->ti, rs->journal_dev.dev);
810 for (i = 0; i < rs->raid_disks; i++) {
811 if (rs->dev[i].meta_dev)
812 dm_put_device(rs->ti, rs->dev[i].meta_dev);
813 md_rdev_clear(&rs->dev[i].rdev);
814 if (rs->dev[i].data_dev)
815 dm_put_device(rs->ti, rs->dev[i].data_dev);
818 list_del(&rs->list);
820 kfree(rs);
824 * For every device we have two words
825 * <meta_dev>: meta device name or '-' if missing
826 * <data_dev>: data device name or '-' if missing
828 * The following are permitted:
829 * - -
830 * - <data_dev>
831 * <meta_dev> <data_dev>
833 * The following is not allowed:
834 * <meta_dev> -
836 * This code parses those words. If there is a failure,
837 * the caller must use raid_set_free() to unwind the operations.
839 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
841 int i;
842 int rebuild = 0;
843 int metadata_available = 0;
844 int r = 0;
845 const char *arg;
847 /* Put off the number of raid devices argument to get to dev pairs */
848 arg = dm_shift_arg(as);
849 if (!arg)
850 return -EINVAL;
852 for (i = 0; i < rs->raid_disks; i++) {
853 rs->dev[i].rdev.raid_disk = i;
855 rs->dev[i].meta_dev = NULL;
856 rs->dev[i].data_dev = NULL;
859 * There are no offsets initially.
860 * Out of place reshape will set them accordingly.
862 rs->dev[i].rdev.data_offset = 0;
863 rs->dev[i].rdev.new_data_offset = 0;
864 rs->dev[i].rdev.mddev = &rs->md;
866 arg = dm_shift_arg(as);
867 if (!arg)
868 return -EINVAL;
870 if (strcmp(arg, "-")) {
871 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
872 &rs->dev[i].meta_dev);
873 if (r) {
874 rs->ti->error = "RAID metadata device lookup failure";
875 return r;
878 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
879 if (!rs->dev[i].rdev.sb_page) {
880 rs->ti->error = "Failed to allocate superblock page";
881 return -ENOMEM;
885 arg = dm_shift_arg(as);
886 if (!arg)
887 return -EINVAL;
889 if (!strcmp(arg, "-")) {
890 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
891 (!rs->dev[i].rdev.recovery_offset)) {
892 rs->ti->error = "Drive designated for rebuild not specified";
893 return -EINVAL;
896 if (rs->dev[i].meta_dev) {
897 rs->ti->error = "No data device supplied with metadata device";
898 return -EINVAL;
901 continue;
904 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
905 &rs->dev[i].data_dev);
906 if (r) {
907 rs->ti->error = "RAID device lookup failure";
908 return r;
911 if (rs->dev[i].meta_dev) {
912 metadata_available = 1;
913 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
915 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
916 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
917 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
918 rebuild++;
921 if (rs->journal_dev.dev)
922 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
924 if (metadata_available) {
925 rs->md.external = 0;
926 rs->md.persistent = 1;
927 rs->md.major_version = 2;
928 } else if (rebuild && !rs->md.recovery_cp) {
930 * Without metadata, we will not be able to tell if the array
931 * is in-sync or not - we must assume it is not. Therefore,
932 * it is impossible to rebuild a drive.
934 * Even if there is metadata, the on-disk information may
935 * indicate that the array is not in-sync and it will then
936 * fail at that time.
938 * User could specify 'nosync' option if desperate.
940 rs->ti->error = "Unable to rebuild drive while array is not in-sync";
941 return -EINVAL;
944 return 0;
948 * validate_region_size
949 * @rs
950 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
952 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
953 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
955 * Returns: 0 on success, -EINVAL on failure.
957 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
959 unsigned long min_region_size = rs->ti->len / (1 << 21);
961 if (rs_is_raid0(rs))
962 return 0;
964 if (!region_size) {
966 * Choose a reasonable default. All figures in sectors.
968 if (min_region_size > (1 << 13)) {
969 /* If not a power of 2, make it the next power of 2 */
970 region_size = roundup_pow_of_two(min_region_size);
971 DMINFO("Choosing default region size of %lu sectors",
972 region_size);
973 } else {
974 DMINFO("Choosing default region size of 4MiB");
975 region_size = 1 << 13; /* sectors */
977 } else {
979 * Validate user-supplied value.
981 if (region_size > rs->ti->len) {
982 rs->ti->error = "Supplied region size is too large";
983 return -EINVAL;
986 if (region_size < min_region_size) {
987 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
988 region_size, min_region_size);
989 rs->ti->error = "Supplied region size is too small";
990 return -EINVAL;
993 if (!is_power_of_2(region_size)) {
994 rs->ti->error = "Region size is not a power of 2";
995 return -EINVAL;
998 if (region_size < rs->md.chunk_sectors) {
999 rs->ti->error = "Region size is smaller than the chunk size";
1000 return -EINVAL;
1005 * Convert sectors to bytes.
1007 rs->md.bitmap_info.chunksize = to_bytes(region_size);
1009 return 0;
1013 * validate_raid_redundancy
1014 * @rs
1016 * Determine if there are enough devices in the array that haven't
1017 * failed (or are being rebuilt) to form a usable array.
1019 * Returns: 0 on success, -EINVAL on failure.
1021 static int validate_raid_redundancy(struct raid_set *rs)
1023 unsigned int i, rebuild_cnt = 0;
1024 unsigned int rebuilds_per_group = 0, copies;
1025 unsigned int group_size, last_group_start;
1027 for (i = 0; i < rs->md.raid_disks; i++)
1028 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1029 !rs->dev[i].rdev.sb_page)
1030 rebuild_cnt++;
1032 switch (rs->md.level) {
1033 case 0:
1034 break;
1035 case 1:
1036 if (rebuild_cnt >= rs->md.raid_disks)
1037 goto too_many;
1038 break;
1039 case 4:
1040 case 5:
1041 case 6:
1042 if (rebuild_cnt > rs->raid_type->parity_devs)
1043 goto too_many;
1044 break;
1045 case 10:
1046 copies = raid10_md_layout_to_copies(rs->md.new_layout);
1047 if (copies < 2) {
1048 DMERR("Bogus raid10 data copies < 2!");
1049 return -EINVAL;
1052 if (rebuild_cnt < copies)
1053 break;
1056 * It is possible to have a higher rebuild count for RAID10,
1057 * as long as the failed devices occur in different mirror
1058 * groups (i.e. different stripes).
1060 * When checking "near" format, make sure no adjacent devices
1061 * have failed beyond what can be handled. In addition to the
1062 * simple case where the number of devices is a multiple of the
1063 * number of copies, we must also handle cases where the number
1064 * of devices is not a multiple of the number of copies.
1065 * E.g. dev1 dev2 dev3 dev4 dev5
1066 * A A B B C
1067 * C D D E E
1069 if (__is_raid10_near(rs->md.new_layout)) {
1070 for (i = 0; i < rs->md.raid_disks; i++) {
1071 if (!(i % copies))
1072 rebuilds_per_group = 0;
1073 if ((!rs->dev[i].rdev.sb_page ||
1074 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1075 (++rebuilds_per_group >= copies))
1076 goto too_many;
1078 break;
1082 * When checking "far" and "offset" formats, we need to ensure
1083 * that the device that holds its copy is not also dead or
1084 * being rebuilt. (Note that "far" and "offset" formats only
1085 * support two copies right now. These formats also only ever
1086 * use the 'use_far_sets' variant.)
1088 * This check is somewhat complicated by the need to account
1089 * for arrays that are not a multiple of (far) copies. This
1090 * results in the need to treat the last (potentially larger)
1091 * set differently.
1093 group_size = (rs->md.raid_disks / copies);
1094 last_group_start = (rs->md.raid_disks / group_size) - 1;
1095 last_group_start *= group_size;
1096 for (i = 0; i < rs->md.raid_disks; i++) {
1097 if (!(i % copies) && !(i > last_group_start))
1098 rebuilds_per_group = 0;
1099 if ((!rs->dev[i].rdev.sb_page ||
1100 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1101 (++rebuilds_per_group >= copies))
1102 goto too_many;
1104 break;
1105 default:
1106 if (rebuild_cnt)
1107 return -EINVAL;
1110 return 0;
1112 too_many:
1113 return -EINVAL;
1117 * Possible arguments are...
1118 * <chunk_size> [optional_args]
1120 * Argument definitions
1121 * <chunk_size> The number of sectors per disk that
1122 * will form the "stripe"
1123 * [[no]sync] Force or prevent recovery of the
1124 * entire array
1125 * [rebuild <idx>] Rebuild the drive indicated by the index
1126 * [daemon_sleep <ms>] Time between bitmap daemon work to
1127 * clear bits
1128 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1129 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1130 * [write_mostly <idx>] Indicate a write mostly drive via index
1131 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
1132 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
1133 * [region_size <sectors>] Defines granularity of bitmap
1134 * [journal_dev <dev>] raid4/5/6 journaling deviice
1135 * (i.e. write hole closing log)
1137 * RAID10-only options:
1138 * [raid10_copies <# copies>] Number of copies. (Default: 2)
1139 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
1141 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1142 unsigned int num_raid_params)
1144 int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
1145 unsigned int raid10_copies = 2;
1146 unsigned int i, write_mostly = 0;
1147 unsigned int region_size = 0;
1148 sector_t max_io_len;
1149 const char *arg, *key;
1150 struct raid_dev *rd;
1151 struct raid_type *rt = rs->raid_type;
1153 arg = dm_shift_arg(as);
1154 num_raid_params--; /* Account for chunk_size argument */
1156 if (kstrtoint(arg, 10, &value) < 0) {
1157 rs->ti->error = "Bad numerical argument given for chunk_size";
1158 return -EINVAL;
1162 * First, parse the in-order required arguments
1163 * "chunk_size" is the only argument of this type.
1165 if (rt_is_raid1(rt)) {
1166 if (value)
1167 DMERR("Ignoring chunk size parameter for RAID 1");
1168 value = 0;
1169 } else if (!is_power_of_2(value)) {
1170 rs->ti->error = "Chunk size must be a power of 2";
1171 return -EINVAL;
1172 } else if (value < 8) {
1173 rs->ti->error = "Chunk size value is too small";
1174 return -EINVAL;
1177 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
1180 * We set each individual device as In_sync with a completed
1181 * 'recovery_offset'. If there has been a device failure or
1182 * replacement then one of the following cases applies:
1184 * 1) User specifies 'rebuild'.
1185 * - Device is reset when param is read.
1186 * 2) A new device is supplied.
1187 * - No matching superblock found, resets device.
1188 * 3) Device failure was transient and returns on reload.
1189 * - Failure noticed, resets device for bitmap replay.
1190 * 4) Device hadn't completed recovery after previous failure.
1191 * - Superblock is read and overrides recovery_offset.
1193 * What is found in the superblocks of the devices is always
1194 * authoritative, unless 'rebuild' or '[no]sync' was specified.
1196 for (i = 0; i < rs->raid_disks; i++) {
1197 set_bit(In_sync, &rs->dev[i].rdev.flags);
1198 rs->dev[i].rdev.recovery_offset = MaxSector;
1202 * Second, parse the unordered optional arguments
1204 for (i = 0; i < num_raid_params; i++) {
1205 key = dm_shift_arg(as);
1206 if (!key) {
1207 rs->ti->error = "Not enough raid parameters given";
1208 return -EINVAL;
1211 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
1212 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1213 rs->ti->error = "Only one 'nosync' argument allowed";
1214 return -EINVAL;
1216 continue;
1218 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
1219 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
1220 rs->ti->error = "Only one 'sync' argument allowed";
1221 return -EINVAL;
1223 continue;
1225 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1226 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1227 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1228 return -EINVAL;
1230 continue;
1233 arg = dm_shift_arg(as);
1234 i++; /* Account for the argument pairs */
1235 if (!arg) {
1236 rs->ti->error = "Wrong number of raid parameters given";
1237 return -EINVAL;
1241 * Parameters that take a string value are checked here.
1243 /* "raid10_format {near|offset|far} */
1244 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1245 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1246 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1247 return -EINVAL;
1249 if (!rt_is_raid10(rt)) {
1250 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
1251 return -EINVAL;
1253 raid10_format = raid10_name_to_format(arg);
1254 if (raid10_format < 0) {
1255 rs->ti->error = "Invalid 'raid10_format' value given";
1256 return raid10_format;
1258 continue;
1261 /* "journal_dev <dev>" */
1262 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV))) {
1263 int r;
1264 struct md_rdev *jdev;
1266 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1267 rs->ti->error = "Only one raid4/5/6 set journaling device allowed";
1268 return -EINVAL;
1270 if (!rt_is_raid456(rt)) {
1271 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type";
1272 return -EINVAL;
1274 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
1275 &rs->journal_dev.dev);
1276 if (r) {
1277 rs->ti->error = "raid4/5/6 journal device lookup failure";
1278 return r;
1280 jdev = &rs->journal_dev.rdev;
1281 md_rdev_init(jdev);
1282 jdev->mddev = &rs->md;
1283 jdev->bdev = rs->journal_dev.dev->bdev;
1284 jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
1285 if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
1286 rs->ti->error = "No space for raid4/5/6 journal";
1287 return -ENOSPC;
1289 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
1290 set_bit(Journal, &jdev->flags);
1291 continue;
1294 /* "journal_mode <mode>" ("journal_dev" mandatory!) */
1295 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE))) {
1296 int r;
1298 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1299 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'";
1300 return -EINVAL;
1302 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
1303 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed";
1304 return -EINVAL;
1306 r = dm_raid_journal_mode_to_md(arg);
1307 if (r < 0) {
1308 rs->ti->error = "Invalid 'journal_mode' argument";
1309 return r;
1311 rs->journal_dev.mode = r;
1312 continue;
1316 * Parameters with number values from here on.
1318 if (kstrtoint(arg, 10, &value) < 0) {
1319 rs->ti->error = "Bad numerical argument given in raid params";
1320 return -EINVAL;
1323 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
1325 * "rebuild" is being passed in by userspace to provide
1326 * indexes of replaced devices and to set up additional
1327 * devices on raid level takeover.
1329 if (!__within_range(value, 0, rs->raid_disks - 1)) {
1330 rs->ti->error = "Invalid rebuild index given";
1331 return -EINVAL;
1334 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
1335 rs->ti->error = "rebuild for this index already given";
1336 return -EINVAL;
1339 rd = rs->dev + value;
1340 clear_bit(In_sync, &rd->rdev.flags);
1341 clear_bit(Faulty, &rd->rdev.flags);
1342 rd->rdev.recovery_offset = 0;
1343 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
1344 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1345 if (!rt_is_raid1(rt)) {
1346 rs->ti->error = "write_mostly option is only valid for RAID1";
1347 return -EINVAL;
1350 if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
1351 rs->ti->error = "Invalid write_mostly index given";
1352 return -EINVAL;
1355 write_mostly++;
1356 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1357 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1358 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1359 if (!rt_is_raid1(rt)) {
1360 rs->ti->error = "max_write_behind option is only valid for RAID1";
1361 return -EINVAL;
1364 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1365 rs->ti->error = "Only one max_write_behind argument pair allowed";
1366 return -EINVAL;
1370 * In device-mapper, we specify things in sectors, but
1371 * MD records this value in kB
1373 if (value < 0 || value / 2 > COUNTER_MAX) {
1374 rs->ti->error = "Max write-behind limit out of range";
1375 return -EINVAL;
1378 rs->md.bitmap_info.max_write_behind = value / 2;
1379 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1380 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1381 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1382 return -EINVAL;
1384 if (value < 0) {
1385 rs->ti->error = "daemon sleep period out of range";
1386 return -EINVAL;
1388 rs->md.bitmap_info.daemon_sleep = value;
1389 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
1390 /* Userspace passes new data_offset after having extended the the data image LV */
1391 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1392 rs->ti->error = "Only one data_offset argument pair allowed";
1393 return -EINVAL;
1395 /* Ensure sensible data offset */
1396 if (value < 0 ||
1397 (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
1398 rs->ti->error = "Bogus data_offset value";
1399 return -EINVAL;
1401 rs->data_offset = value;
1402 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
1403 /* Define the +/-# of disks to add to/remove from the given raid set */
1404 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1405 rs->ti->error = "Only one delta_disks argument pair allowed";
1406 return -EINVAL;
1408 /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */
1409 if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
1410 rs->ti->error = "Too many delta_disk requested";
1411 return -EINVAL;
1414 rs->delta_disks = value;
1415 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1416 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1417 rs->ti->error = "Only one stripe_cache argument pair allowed";
1418 return -EINVAL;
1421 if (!rt_is_raid456(rt)) {
1422 rs->ti->error = "Inappropriate argument: stripe_cache";
1423 return -EINVAL;
1426 if (value < 0) {
1427 rs->ti->error = "Bogus stripe cache entries value";
1428 return -EINVAL;
1430 rs->stripe_cache_entries = value;
1431 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1432 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1433 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1434 return -EINVAL;
1437 if (value < 0) {
1438 rs->ti->error = "min_recovery_rate out of range";
1439 return -EINVAL;
1441 rs->md.sync_speed_min = value;
1442 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1443 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) {
1444 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1445 return -EINVAL;
1448 if (value < 0) {
1449 rs->ti->error = "max_recovery_rate out of range";
1450 return -EINVAL;
1452 rs->md.sync_speed_max = value;
1453 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
1454 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1455 rs->ti->error = "Only one region_size argument pair allowed";
1456 return -EINVAL;
1459 region_size = value;
1460 rs->requested_bitmap_chunk_sectors = value;
1461 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
1462 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1463 rs->ti->error = "Only one raid10_copies argument pair allowed";
1464 return -EINVAL;
1467 if (!__within_range(value, 2, rs->md.raid_disks)) {
1468 rs->ti->error = "Bad value for 'raid10_copies'";
1469 return -EINVAL;
1472 raid10_copies = value;
1473 } else {
1474 DMERR("Unable to parse RAID parameter: %s", key);
1475 rs->ti->error = "Unable to parse RAID parameter";
1476 return -EINVAL;
1480 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
1481 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1482 rs->ti->error = "sync and nosync are mutually exclusive";
1483 return -EINVAL;
1486 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) &&
1487 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ||
1488 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) {
1489 rs->ti->error = "sync/nosync and rebuild are mutually exclusive";
1490 return -EINVAL;
1493 if (write_mostly >= rs->md.raid_disks) {
1494 rs->ti->error = "Can't set all raid1 devices to write_mostly";
1495 return -EINVAL;
1498 if (rs->md.sync_speed_max &&
1499 rs->md.sync_speed_min > rs->md.sync_speed_max) {
1500 rs->ti->error = "Bogus recovery rates";
1501 return -EINVAL;
1504 if (validate_region_size(rs, region_size))
1505 return -EINVAL;
1507 if (rs->md.chunk_sectors)
1508 max_io_len = rs->md.chunk_sectors;
1509 else
1510 max_io_len = region_size;
1512 if (dm_set_target_max_io_len(rs->ti, max_io_len))
1513 return -EINVAL;
1515 if (rt_is_raid10(rt)) {
1516 if (raid10_copies > rs->md.raid_disks) {
1517 rs->ti->error = "Not enough devices to satisfy specification";
1518 return -EINVAL;
1521 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1522 if (rs->md.new_layout < 0) {
1523 rs->ti->error = "Error getting raid10 format";
1524 return rs->md.new_layout;
1527 rt = get_raid_type_by_ll(10, rs->md.new_layout);
1528 if (!rt) {
1529 rs->ti->error = "Failed to recognize new raid10 layout";
1530 return -EINVAL;
1533 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1534 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1535 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1536 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1537 return -EINVAL;
1541 rs->raid10_copies = raid10_copies;
1543 /* Assume there are no metadata devices until the drives are parsed */
1544 rs->md.persistent = 0;
1545 rs->md.external = 1;
1547 /* Check, if any invalid ctr arguments have been passed in for the raid level */
1548 return rs_check_for_valid_flags(rs);
1551 /* Set raid4/5/6 cache size */
1552 static int rs_set_raid456_stripe_cache(struct raid_set *rs)
1554 int r;
1555 struct r5conf *conf;
1556 struct mddev *mddev = &rs->md;
1557 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
1558 uint32_t nr_stripes = rs->stripe_cache_entries;
1560 if (!rt_is_raid456(rs->raid_type)) {
1561 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
1562 return -EINVAL;
1565 if (nr_stripes < min_stripes) {
1566 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
1567 nr_stripes, min_stripes);
1568 nr_stripes = min_stripes;
1571 conf = mddev->private;
1572 if (!conf) {
1573 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
1574 return -EINVAL;
1577 /* Try setting number of stripes in raid456 stripe cache */
1578 if (conf->min_nr_stripes != nr_stripes) {
1579 r = raid5_set_cache_size(mddev, nr_stripes);
1580 if (r) {
1581 rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
1582 return r;
1585 DMINFO("%u stripe cache entries", nr_stripes);
1588 return 0;
1591 /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */
1592 static unsigned int mddev_data_stripes(struct raid_set *rs)
1594 return rs->md.raid_disks - rs->raid_type->parity_devs;
1597 /* Return # of data stripes of @rs (i.e. as of ctr) */
1598 static unsigned int rs_data_stripes(struct raid_set *rs)
1600 return rs->raid_disks - rs->raid_type->parity_devs;
1604 * Retrieve rdev->sectors from any valid raid device of @rs
1605 * to allow userpace to pass in arbitray "- -" device tupples.
1607 static sector_t __rdev_sectors(struct raid_set *rs)
1609 int i;
1611 for (i = 0; i < rs->md.raid_disks; i++) {
1612 struct md_rdev *rdev = &rs->dev[i].rdev;
1614 if (!test_bit(Journal, &rdev->flags) &&
1615 rdev->bdev && rdev->sectors)
1616 return rdev->sectors;
1619 return 0;
1622 /* Check that calculated dev_sectors fits all component devices. */
1623 static int _check_data_dev_sectors(struct raid_set *rs)
1625 sector_t ds = ~0;
1626 struct md_rdev *rdev;
1628 rdev_for_each(rdev, &rs->md)
1629 if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
1630 ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
1631 if (ds < rs->md.dev_sectors) {
1632 rs->ti->error = "Component device(s) too small";
1633 return -EINVAL;
1637 return 0;
1640 /* Calculate the sectors per device and per array used for @rs */
1641 static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
1643 int delta_disks;
1644 unsigned int data_stripes;
1645 struct mddev *mddev = &rs->md;
1646 struct md_rdev *rdev;
1647 sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
1649 if (use_mddev) {
1650 delta_disks = mddev->delta_disks;
1651 data_stripes = mddev_data_stripes(rs);
1652 } else {
1653 delta_disks = rs->delta_disks;
1654 data_stripes = rs_data_stripes(rs);
1657 /* Special raid1 case w/o delta_disks support (yet) */
1658 if (rt_is_raid1(rs->raid_type))
1660 else if (rt_is_raid10(rs->raid_type)) {
1661 if (rs->raid10_copies < 2 ||
1662 delta_disks < 0) {
1663 rs->ti->error = "Bogus raid10 data copies or delta disks";
1664 return -EINVAL;
1667 dev_sectors *= rs->raid10_copies;
1668 if (sector_div(dev_sectors, data_stripes))
1669 goto bad;
1671 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1672 if (sector_div(array_sectors, rs->raid10_copies))
1673 goto bad;
1675 } else if (sector_div(dev_sectors, data_stripes))
1676 goto bad;
1678 else
1679 /* Striped layouts */
1680 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1682 rdev_for_each(rdev, mddev)
1683 if (!test_bit(Journal, &rdev->flags))
1684 rdev->sectors = dev_sectors;
1686 mddev->array_sectors = array_sectors;
1687 mddev->dev_sectors = dev_sectors;
1689 return _check_data_dev_sectors(rs);
1690 bad:
1691 rs->ti->error = "Target length not divisible by number of data devices";
1692 return -EINVAL;
1695 /* Setup recovery on @rs */
1696 static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1698 /* raid0 does not recover */
1699 if (rs_is_raid0(rs))
1700 rs->md.recovery_cp = MaxSector;
1702 * A raid6 set has to be recovered either
1703 * completely or for the grown part to
1704 * ensure proper parity and Q-Syndrome
1706 else if (rs_is_raid6(rs))
1707 rs->md.recovery_cp = dev_sectors;
1709 * Other raid set types may skip recovery
1710 * depending on the 'nosync' flag.
1712 else
1713 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
1714 ? MaxSector : dev_sectors;
1717 /* Setup recovery on @rs based on raid type, device size and 'nosync' flag */
1718 static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1720 if (!dev_sectors)
1721 /* New raid set or 'sync' flag provided */
1722 __rs_setup_recovery(rs, 0);
1723 else if (dev_sectors == MaxSector)
1724 /* Prevent recovery */
1725 __rs_setup_recovery(rs, MaxSector);
1726 else if (__rdev_sectors(rs) < dev_sectors)
1727 /* Grown raid set */
1728 __rs_setup_recovery(rs, __rdev_sectors(rs));
1729 else
1730 __rs_setup_recovery(rs, MaxSector);
1733 static void do_table_event(struct work_struct *ws)
1735 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
1737 smp_rmb(); /* Make sure we access most actual mddev properties */
1738 if (!rs_is_reshaping(rs)) {
1739 if (rs_is_raid10(rs))
1740 rs_set_rdev_sectors(rs);
1741 rs_set_capacity(rs);
1743 dm_table_event(rs->ti->table);
1746 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
1748 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
1750 return mddev_congested(&rs->md, bits);
1754 * Make sure a valid takover (level switch) is being requested on @rs
1756 * Conversions of raid sets from one MD personality to another
1757 * have to conform to restrictions which are enforced here.
1759 static int rs_check_takeover(struct raid_set *rs)
1761 struct mddev *mddev = &rs->md;
1762 unsigned int near_copies;
1764 if (rs->md.degraded) {
1765 rs->ti->error = "Can't takeover degraded raid set";
1766 return -EPERM;
1769 if (rs_is_reshaping(rs)) {
1770 rs->ti->error = "Can't takeover reshaping raid set";
1771 return -EPERM;
1774 switch (mddev->level) {
1775 case 0:
1776 /* raid0 -> raid1/5 with one disk */
1777 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1778 mddev->raid_disks == 1)
1779 return 0;
1781 /* raid0 -> raid10 */
1782 if (mddev->new_level == 10 &&
1783 !(rs->raid_disks % mddev->raid_disks))
1784 return 0;
1786 /* raid0 with multiple disks -> raid4/5/6 */
1787 if (__within_range(mddev->new_level, 4, 6) &&
1788 mddev->new_layout == ALGORITHM_PARITY_N &&
1789 mddev->raid_disks > 1)
1790 return 0;
1792 break;
1794 case 10:
1795 /* Can't takeover raid10_offset! */
1796 if (__is_raid10_offset(mddev->layout))
1797 break;
1799 near_copies = __raid10_near_copies(mddev->layout);
1801 /* raid10* -> raid0 */
1802 if (mddev->new_level == 0) {
1803 /* Can takeover raid10_near with raid disks divisable by data copies! */
1804 if (near_copies > 1 &&
1805 !(mddev->raid_disks % near_copies)) {
1806 mddev->raid_disks /= near_copies;
1807 mddev->delta_disks = mddev->raid_disks;
1808 return 0;
1811 /* Can takeover raid10_far */
1812 if (near_copies == 1 &&
1813 __raid10_far_copies(mddev->layout) > 1)
1814 return 0;
1816 break;
1819 /* raid10_{near,far} -> raid1 */
1820 if (mddev->new_level == 1 &&
1821 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
1822 return 0;
1824 /* raid10_{near,far} with 2 disks -> raid4/5 */
1825 if (__within_range(mddev->new_level, 4, 5) &&
1826 mddev->raid_disks == 2)
1827 return 0;
1828 break;
1830 case 1:
1831 /* raid1 with 2 disks -> raid4/5 */
1832 if (__within_range(mddev->new_level, 4, 5) &&
1833 mddev->raid_disks == 2) {
1834 mddev->degraded = 1;
1835 return 0;
1838 /* raid1 -> raid0 */
1839 if (mddev->new_level == 0 &&
1840 mddev->raid_disks == 1)
1841 return 0;
1843 /* raid1 -> raid10 */
1844 if (mddev->new_level == 10)
1845 return 0;
1846 break;
1848 case 4:
1849 /* raid4 -> raid0 */
1850 if (mddev->new_level == 0)
1851 return 0;
1853 /* raid4 -> raid1/5 with 2 disks */
1854 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1855 mddev->raid_disks == 2)
1856 return 0;
1858 /* raid4 -> raid5/6 with parity N */
1859 if (__within_range(mddev->new_level, 5, 6) &&
1860 mddev->layout == ALGORITHM_PARITY_N)
1861 return 0;
1862 break;
1864 case 5:
1865 /* raid5 with parity N -> raid0 */
1866 if (mddev->new_level == 0 &&
1867 mddev->layout == ALGORITHM_PARITY_N)
1868 return 0;
1870 /* raid5 with parity N -> raid4 */
1871 if (mddev->new_level == 4 &&
1872 mddev->layout == ALGORITHM_PARITY_N)
1873 return 0;
1875 /* raid5 with 2 disks -> raid1/4/10 */
1876 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
1877 mddev->raid_disks == 2)
1878 return 0;
1880 /* raid5_* -> raid6_*_6 with Q-Syndrome N (e.g. raid5_ra -> raid6_ra_6 */
1881 if (mddev->new_level == 6 &&
1882 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1883 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
1884 return 0;
1885 break;
1887 case 6:
1888 /* raid6 with parity N -> raid0 */
1889 if (mddev->new_level == 0 &&
1890 mddev->layout == ALGORITHM_PARITY_N)
1891 return 0;
1893 /* raid6 with parity N -> raid4 */
1894 if (mddev->new_level == 4 &&
1895 mddev->layout == ALGORITHM_PARITY_N)
1896 return 0;
1898 /* raid6_*_n with Q-Syndrome N -> raid5_* */
1899 if (mddev->new_level == 5 &&
1900 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1901 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
1902 return 0;
1904 default:
1905 break;
1908 rs->ti->error = "takeover not possible";
1909 return -EINVAL;
1912 /* True if @rs requested to be taken over */
1913 static bool rs_takeover_requested(struct raid_set *rs)
1915 return rs->md.new_level != rs->md.level;
1918 /* True if @rs is requested to reshape by ctr */
1919 static bool rs_reshape_requested(struct raid_set *rs)
1921 bool change;
1922 struct mddev *mddev = &rs->md;
1924 if (rs_takeover_requested(rs))
1925 return false;
1927 if (rs_is_raid0(rs))
1928 return false;
1930 change = mddev->new_layout != mddev->layout ||
1931 mddev->new_chunk_sectors != mddev->chunk_sectors ||
1932 rs->delta_disks;
1934 /* Historical case to support raid1 reshape without delta disks */
1935 if (rs_is_raid1(rs)) {
1936 if (rs->delta_disks)
1937 return !!rs->delta_disks;
1939 return !change &&
1940 mddev->raid_disks != rs->raid_disks;
1943 if (rs_is_raid10(rs))
1944 return change &&
1945 !__is_raid10_far(mddev->new_layout) &&
1946 rs->delta_disks >= 0;
1948 return change;
1951 /* Features */
1952 #define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */
1954 /* State flags for sb->flags */
1955 #define SB_FLAG_RESHAPE_ACTIVE 0x1
1956 #define SB_FLAG_RESHAPE_BACKWARDS 0x2
1959 * This structure is never routinely used by userspace, unlike md superblocks.
1960 * Devices with this superblock should only ever be accessed via device-mapper.
1962 #define DM_RAID_MAGIC 0x64526D44
1963 struct dm_raid_superblock {
1964 __le32 magic; /* "DmRd" */
1965 __le32 compat_features; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */
1967 __le32 num_devices; /* Number of devices in this raid set. (Max 64) */
1968 __le32 array_position; /* The position of this drive in the raid set */
1970 __le64 events; /* Incremented by md when superblock updated */
1971 __le64 failed_devices; /* Pre 1.9.0 part of bit field of devices to */
1972 /* indicate failures (see extension below) */
1975 * This offset tracks the progress of the repair or replacement of
1976 * an individual drive.
1978 __le64 disk_recovery_offset;
1981 * This offset tracks the progress of the initial raid set
1982 * synchronisation/parity calculation.
1984 __le64 array_resync_offset;
1987 * raid characteristics
1989 __le32 level;
1990 __le32 layout;
1991 __le32 stripe_sectors;
1993 /********************************************************************
1994 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
1996 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
1999 __le32 flags; /* Flags defining array states for reshaping */
2002 * This offset tracks the progress of a raid
2003 * set reshape in order to be able to restart it
2005 __le64 reshape_position;
2008 * These define the properties of the array in case of an interrupted reshape
2010 __le32 new_level;
2011 __le32 new_layout;
2012 __le32 new_stripe_sectors;
2013 __le32 delta_disks;
2015 __le64 array_sectors; /* Array size in sectors */
2018 * Sector offsets to data on devices (reshaping).
2019 * Needed to support out of place reshaping, thus
2020 * not writing over any stripes whilst converting
2021 * them from old to new layout
2023 __le64 data_offset;
2024 __le64 new_data_offset;
2026 __le64 sectors; /* Used device size in sectors */
2029 * Additonal Bit field of devices indicating failures to support
2030 * up to 256 devices with the 1.9.0 on-disk metadata format
2032 __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
2034 __le32 incompat_features; /* Used to indicate any incompatible features */
2036 /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */
2037 } __packed;
2040 * Check for reshape constraints on raid set @rs:
2042 * - reshape function non-existent
2043 * - degraded set
2044 * - ongoing recovery
2045 * - ongoing reshape
2047 * Returns 0 if none or -EPERM if given constraint
2048 * and error message reference in @errmsg
2050 static int rs_check_reshape(struct raid_set *rs)
2052 struct mddev *mddev = &rs->md;
2054 if (!mddev->pers || !mddev->pers->check_reshape)
2055 rs->ti->error = "Reshape not supported";
2056 else if (mddev->degraded)
2057 rs->ti->error = "Can't reshape degraded raid set";
2058 else if (rs_is_recovering(rs))
2059 rs->ti->error = "Convert request on recovering raid set prohibited";
2060 else if (rs_is_reshaping(rs))
2061 rs->ti->error = "raid set already reshaping!";
2062 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs)))
2063 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10";
2064 else
2065 return 0;
2067 return -EPERM;
2070 static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
2072 BUG_ON(!rdev->sb_page);
2074 if (rdev->sb_loaded && !force_reload)
2075 return 0;
2077 rdev->sb_loaded = 0;
2079 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
2080 DMERR("Failed to read superblock of device at position %d",
2081 rdev->raid_disk);
2082 md_error(rdev->mddev, rdev);
2083 set_bit(Faulty, &rdev->flags);
2084 return -EIO;
2087 rdev->sb_loaded = 1;
2089 return 0;
2092 static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2094 failed_devices[0] = le64_to_cpu(sb->failed_devices);
2095 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
2097 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2098 int i = ARRAY_SIZE(sb->extended_failed_devices);
2100 while (i--)
2101 failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
2105 static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2107 int i = ARRAY_SIZE(sb->extended_failed_devices);
2109 sb->failed_devices = cpu_to_le64(failed_devices[0]);
2110 while (i--)
2111 sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
2115 * Synchronize the superblock members with the raid set properties
2117 * All superblock data is little endian.
2119 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2121 bool update_failed_devices = false;
2122 unsigned int i;
2123 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2124 struct dm_raid_superblock *sb;
2125 struct raid_set *rs = container_of(mddev, struct raid_set, md);
2127 /* No metadata device, no superblock */
2128 if (!rdev->meta_bdev)
2129 return;
2131 BUG_ON(!rdev->sb_page);
2133 sb = page_address(rdev->sb_page);
2135 sb_retrieve_failed_devices(sb, failed_devices);
2137 for (i = 0; i < rs->raid_disks; i++)
2138 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
2139 update_failed_devices = true;
2140 set_bit(i, (void *) failed_devices);
2143 if (update_failed_devices)
2144 sb_update_failed_devices(sb, failed_devices);
2146 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
2147 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2149 sb->num_devices = cpu_to_le32(mddev->raid_disks);
2150 sb->array_position = cpu_to_le32(rdev->raid_disk);
2152 sb->events = cpu_to_le64(mddev->events);
2154 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
2155 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
2157 sb->level = cpu_to_le32(mddev->level);
2158 sb->layout = cpu_to_le32(mddev->layout);
2159 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
2161 /********************************************************************
2162 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
2164 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
2166 sb->new_level = cpu_to_le32(mddev->new_level);
2167 sb->new_layout = cpu_to_le32(mddev->new_layout);
2168 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
2170 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2172 smp_rmb(); /* Make sure we access most recent reshape position */
2173 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2174 if (le64_to_cpu(sb->reshape_position) != MaxSector) {
2175 /* Flag ongoing reshape */
2176 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
2178 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
2179 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
2180 } else {
2181 /* Clear reshape flags */
2182 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
2185 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
2186 sb->data_offset = cpu_to_le64(rdev->data_offset);
2187 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
2188 sb->sectors = cpu_to_le64(rdev->sectors);
2189 sb->incompat_features = cpu_to_le32(0);
2191 /* Zero out the rest of the payload after the size of the superblock */
2192 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
2196 * super_load
2198 * This function creates a superblock if one is not found on the device
2199 * and will decide which superblock to use if there's a choice.
2201 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
2203 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2205 int r;
2206 struct dm_raid_superblock *sb;
2207 struct dm_raid_superblock *refsb;
2208 uint64_t events_sb, events_refsb;
2210 r = read_disk_sb(rdev, rdev->sb_size, false);
2211 if (r)
2212 return r;
2214 sb = page_address(rdev->sb_page);
2217 * Two cases that we want to write new superblocks and rebuild:
2218 * 1) New device (no matching magic number)
2219 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
2221 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
2222 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
2223 super_sync(rdev->mddev, rdev);
2225 set_bit(FirstUse, &rdev->flags);
2226 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2228 /* Force writing of superblocks to disk */
2229 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2231 /* Any superblock is better than none, choose that if given */
2232 return refdev ? 0 : 1;
2235 if (!refdev)
2236 return 1;
2238 events_sb = le64_to_cpu(sb->events);
2240 refsb = page_address(refdev->sb_page);
2241 events_refsb = le64_to_cpu(refsb->events);
2243 return (events_sb > events_refsb) ? 1 : 0;
2246 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2248 int role;
2249 unsigned int d;
2250 struct mddev *mddev = &rs->md;
2251 uint64_t events_sb;
2252 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2253 struct dm_raid_superblock *sb;
2254 uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
2255 struct md_rdev *r;
2256 struct dm_raid_superblock *sb2;
2258 sb = page_address(rdev->sb_page);
2259 events_sb = le64_to_cpu(sb->events);
2262 * Initialise to 1 if this is a new superblock.
2264 mddev->events = events_sb ? : 1;
2266 mddev->reshape_position = MaxSector;
2268 mddev->raid_disks = le32_to_cpu(sb->num_devices);
2269 mddev->level = le32_to_cpu(sb->level);
2270 mddev->layout = le32_to_cpu(sb->layout);
2271 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
2274 * Reshaping is supported, e.g. reshape_position is valid
2275 * in superblock and superblock content is authoritative.
2277 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2278 /* Superblock is authoritative wrt given raid set layout! */
2279 mddev->new_level = le32_to_cpu(sb->new_level);
2280 mddev->new_layout = le32_to_cpu(sb->new_layout);
2281 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
2282 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2283 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
2285 /* raid was reshaping and got interrupted */
2286 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
2287 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
2288 DMERR("Reshape requested but raid set is still reshaping");
2289 return -EINVAL;
2292 if (mddev->delta_disks < 0 ||
2293 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
2294 mddev->reshape_backwards = 1;
2295 else
2296 mddev->reshape_backwards = 0;
2298 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2299 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
2302 } else {
2304 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
2306 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
2307 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
2309 if (rs_takeover_requested(rs)) {
2310 if (rt_cur && rt_new)
2311 DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
2312 rt_cur->name, rt_new->name);
2313 else
2314 DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
2315 return -EINVAL;
2316 } else if (rs_reshape_requested(rs)) {
2317 DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
2318 if (mddev->layout != mddev->new_layout) {
2319 if (rt_cur && rt_new)
2320 DMERR(" current layout %s vs new layout %s",
2321 rt_cur->name, rt_new->name);
2322 else
2323 DMERR(" current layout 0x%X vs new layout 0x%X",
2324 le32_to_cpu(sb->layout), mddev->new_layout);
2326 if (mddev->chunk_sectors != mddev->new_chunk_sectors)
2327 DMERR(" current stripe sectors %u vs new stripe sectors %u",
2328 mddev->chunk_sectors, mddev->new_chunk_sectors);
2329 if (rs->delta_disks)
2330 DMERR(" current %u disks vs new %u disks",
2331 mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
2332 if (rs_is_raid10(rs)) {
2333 DMERR(" Old layout: %s w/ %u copies",
2334 raid10_md_layout_to_format(mddev->layout),
2335 raid10_md_layout_to_copies(mddev->layout));
2336 DMERR(" New layout: %s w/ %u copies",
2337 raid10_md_layout_to_format(mddev->new_layout),
2338 raid10_md_layout_to_copies(mddev->new_layout));
2340 return -EINVAL;
2343 DMINFO("Discovered old metadata format; upgrading to extended metadata format");
2346 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
2347 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
2350 * During load, we set FirstUse if a new superblock was written.
2351 * There are two reasons we might not have a superblock:
2352 * 1) The raid set is brand new - in which case, all of the
2353 * devices must have their In_sync bit set. Also,
2354 * recovery_cp must be 0, unless forced.
2355 * 2) This is a new device being added to an old raid set
2356 * and the new device needs to be rebuilt - in which
2357 * case the In_sync bit will /not/ be set and
2358 * recovery_cp must be MaxSector.
2359 * 3) This is/are a new device(s) being added to an old
2360 * raid set during takeover to a higher raid level
2361 * to provide capacity for redundancy or during reshape
2362 * to add capacity to grow the raid set.
2364 d = 0;
2365 rdev_for_each(r, mddev) {
2366 if (test_bit(Journal, &rdev->flags))
2367 continue;
2369 if (test_bit(FirstUse, &r->flags))
2370 new_devs++;
2372 if (!test_bit(In_sync, &r->flags)) {
2373 DMINFO("Device %d specified for rebuild; clearing superblock",
2374 r->raid_disk);
2375 rebuilds++;
2377 if (test_bit(FirstUse, &r->flags))
2378 rebuild_and_new++;
2381 d++;
2384 if (new_devs == rs->raid_disks || !rebuilds) {
2385 /* Replace a broken device */
2386 if (new_devs == 1 && !rs->delta_disks)
2388 if (new_devs == rs->raid_disks) {
2389 DMINFO("Superblocks created for new raid set");
2390 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2391 } else if (new_devs != rebuilds &&
2392 new_devs != rs->delta_disks) {
2393 DMERR("New device injected into existing raid set without "
2394 "'delta_disks' or 'rebuild' parameter specified");
2395 return -EINVAL;
2397 } else if (new_devs && new_devs != rebuilds) {
2398 DMERR("%u 'rebuild' devices cannot be injected into"
2399 " a raid set with %u other first-time devices",
2400 rebuilds, new_devs);
2401 return -EINVAL;
2402 } else if (rebuilds) {
2403 if (rebuild_and_new && rebuilds != rebuild_and_new) {
2404 DMERR("new device%s provided without 'rebuild'",
2405 new_devs > 1 ? "s" : "");
2406 return -EINVAL;
2407 } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
2408 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
2409 (unsigned long long) mddev->recovery_cp);
2410 return -EINVAL;
2411 } else if (rs_is_reshaping(rs)) {
2412 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
2413 (unsigned long long) mddev->reshape_position);
2414 return -EINVAL;
2419 * Now we set the Faulty bit for those devices that are
2420 * recorded in the superblock as failed.
2422 sb_retrieve_failed_devices(sb, failed_devices);
2423 rdev_for_each(r, mddev) {
2424 if (test_bit(Journal, &rdev->flags) ||
2425 !r->sb_page)
2426 continue;
2427 sb2 = page_address(r->sb_page);
2428 sb2->failed_devices = 0;
2429 memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
2432 * Check for any device re-ordering.
2434 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
2435 role = le32_to_cpu(sb2->array_position);
2436 if (role < 0)
2437 continue;
2439 if (role != r->raid_disk) {
2440 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
2441 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
2442 rs->raid_disks % rs->raid10_copies) {
2443 rs->ti->error =
2444 "Cannot change raid10 near set to odd # of devices!";
2445 return -EINVAL;
2448 sb2->array_position = cpu_to_le32(r->raid_disk);
2450 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
2451 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
2452 !rt_is_raid1(rs->raid_type)) {
2453 rs->ti->error = "Cannot change device positions in raid set";
2454 return -EINVAL;
2457 DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
2461 * Partial recovery is performed on
2462 * returning failed devices.
2464 if (test_bit(role, (void *) failed_devices))
2465 set_bit(Faulty, &r->flags);
2469 return 0;
2472 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2474 struct mddev *mddev = &rs->md;
2475 struct dm_raid_superblock *sb;
2477 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
2478 return 0;
2480 sb = page_address(rdev->sb_page);
2483 * If mddev->events is not set, we know we have not yet initialized
2484 * the array.
2486 if (!mddev->events && super_init_validation(rs, rdev))
2487 return -EINVAL;
2489 if (le32_to_cpu(sb->compat_features) &&
2490 le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2491 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2492 return -EINVAL;
2495 if (sb->incompat_features) {
2496 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
2497 return -EINVAL;
2500 /* Enable bitmap creation for RAID levels != 0 */
2501 mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
2502 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2504 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2506 * Retrieve rdev size stored in superblock to be prepared for shrink.
2507 * Check extended superblock members are present otherwise the size
2508 * will not be set!
2510 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
2511 rdev->sectors = le64_to_cpu(sb->sectors);
2513 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2514 if (rdev->recovery_offset == MaxSector)
2515 set_bit(In_sync, &rdev->flags);
2517 * If no reshape in progress -> we're recovering single
2518 * disk(s) and have to set the device(s) to out-of-sync
2520 else if (!rs_is_reshaping(rs))
2521 clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */
2525 * If a device comes back, set it as not In_sync and no longer faulty.
2527 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2528 rdev->recovery_offset = 0;
2529 clear_bit(In_sync, &rdev->flags);
2530 rdev->saved_raid_disk = rdev->raid_disk;
2533 /* Reshape support -> restore repective data offsets */
2534 rdev->data_offset = le64_to_cpu(sb->data_offset);
2535 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2537 return 0;
2541 * Analyse superblocks and select the freshest.
2543 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2545 int r;
2546 struct md_rdev *rdev, *freshest;
2547 struct mddev *mddev = &rs->md;
2549 freshest = NULL;
2550 rdev_for_each(rdev, mddev) {
2551 if (test_bit(Journal, &rdev->flags))
2552 continue;
2554 if (!rdev->meta_bdev)
2555 continue;
2557 /* Set superblock offset/size for metadata device. */
2558 rdev->sb_start = 0;
2559 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
2560 if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
2561 DMERR("superblock size of a logical block is no longer valid");
2562 return -EINVAL;
2566 * Skipping super_load due to CTR_FLAG_SYNC will cause
2567 * the array to undergo initialization again as
2568 * though it were new. This is the intended effect
2569 * of the "sync" directive.
2571 * With reshaping capability added, we must ensure that
2572 * that the "sync" directive is disallowed during the reshape.
2574 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2575 continue;
2577 r = super_load(rdev, freshest);
2579 switch (r) {
2580 case 1:
2581 freshest = rdev;
2582 break;
2583 case 0:
2584 break;
2585 default:
2586 /* This is a failure to read the superblock from the metadata device. */
2588 * We have to keep any raid0 data/metadata device pairs or
2589 * the MD raid0 personality will fail to start the array.
2591 if (rs_is_raid0(rs))
2592 continue;
2595 * We keep the dm_devs to be able to emit the device tuple
2596 * properly on the table line in raid_status() (rather than
2597 * mistakenly acting as if '- -' got passed into the constructor).
2599 * The rdev has to stay on the same_set list to allow for
2600 * the attempt to restore faulty devices on second resume.
2602 rdev->raid_disk = rdev->saved_raid_disk = -1;
2603 break;
2607 if (!freshest)
2608 return 0;
2611 * Validation of the freshest device provides the source of
2612 * validation for the remaining devices.
2614 rs->ti->error = "Unable to assemble array: Invalid superblocks";
2615 if (super_validate(rs, freshest))
2616 return -EINVAL;
2618 if (validate_raid_redundancy(rs)) {
2619 rs->ti->error = "Insufficient redundancy to activate array";
2620 return -EINVAL;
2623 rdev_for_each(rdev, mddev)
2624 if (!test_bit(Journal, &rdev->flags) &&
2625 rdev != freshest &&
2626 super_validate(rs, rdev))
2627 return -EINVAL;
2628 return 0;
2632 * Adjust data_offset and new_data_offset on all disk members of @rs
2633 * for out of place reshaping if requested by contructor
2635 * We need free space at the beginning of each raid disk for forward
2636 * and at the end for backward reshapes which userspace has to provide
2637 * via remapping/reordering of space.
2639 static int rs_adjust_data_offsets(struct raid_set *rs)
2641 sector_t data_offset = 0, new_data_offset = 0;
2642 struct md_rdev *rdev;
2644 /* Constructor did not request data offset change */
2645 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
2646 if (!rs_is_reshapable(rs))
2647 goto out;
2649 return 0;
2652 /* HM FIXME: get InSync raid_dev? */
2653 rdev = &rs->dev[0].rdev;
2655 if (rs->delta_disks < 0) {
2657 * Removing disks (reshaping backwards):
2659 * - before reshape: data is at offset 0 and free space
2660 * is at end of each component LV
2662 * - after reshape: data is at offset rs->data_offset != 0 on each component LV
2664 data_offset = 0;
2665 new_data_offset = rs->data_offset;
2667 } else if (rs->delta_disks > 0) {
2669 * Adding disks (reshaping forwards):
2671 * - before reshape: data is at offset rs->data_offset != 0 and
2672 * free space is at begin of each component LV
2674 * - after reshape: data is at offset 0 on each component LV
2676 data_offset = rs->data_offset;
2677 new_data_offset = 0;
2679 } else {
2681 * User space passes in 0 for data offset after having removed reshape space
2683 * - or - (data offset != 0)
2685 * Changing RAID layout or chunk size -> toggle offsets
2687 * - before reshape: data is at offset rs->data_offset 0 and
2688 * free space is at end of each component LV
2689 * -or-
2690 * data is at offset rs->data_offset != 0 and
2691 * free space is at begin of each component LV
2693 * - after reshape: data is at offset 0 if it was at offset != 0
2694 * or at offset != 0 if it was at offset 0
2695 * on each component LV
2698 data_offset = rs->data_offset ? rdev->data_offset : 0;
2699 new_data_offset = data_offset ? 0 : rs->data_offset;
2700 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2704 * Make sure we got a minimum amount of free sectors per device
2706 if (rs->data_offset &&
2707 to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
2708 rs->ti->error = data_offset ? "No space for forward reshape" :
2709 "No space for backward reshape";
2710 return -ENOSPC;
2712 out:
2714 * Raise recovery_cp in case data_offset != 0 to
2715 * avoid false recovery positives in the constructor.
2717 if (rs->md.recovery_cp < rs->md.dev_sectors)
2718 rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
2720 /* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
2721 rdev_for_each(rdev, &rs->md) {
2722 if (!test_bit(Journal, &rdev->flags)) {
2723 rdev->data_offset = data_offset;
2724 rdev->new_data_offset = new_data_offset;
2728 return 0;
2731 /* Userpace reordered disks -> adjust raid_disk indexes in @rs */
2732 static void __reorder_raid_disk_indexes(struct raid_set *rs)
2734 int i = 0;
2735 struct md_rdev *rdev;
2737 rdev_for_each(rdev, &rs->md) {
2738 if (!test_bit(Journal, &rdev->flags)) {
2739 rdev->raid_disk = i++;
2740 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2746 * Setup @rs for takeover by a different raid level
2748 static int rs_setup_takeover(struct raid_set *rs)
2750 struct mddev *mddev = &rs->md;
2751 struct md_rdev *rdev;
2752 unsigned int d = mddev->raid_disks = rs->raid_disks;
2753 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2755 if (rt_is_raid10(rs->raid_type)) {
2756 if (rs_is_raid0(rs)) {
2757 /* Userpace reordered disks -> adjust raid_disk indexes */
2758 __reorder_raid_disk_indexes(rs);
2760 /* raid0 -> raid10_far layout */
2761 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
2762 rs->raid10_copies);
2763 } else if (rs_is_raid1(rs))
2764 /* raid1 -> raid10_near layout */
2765 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2766 rs->raid_disks);
2767 else
2768 return -EINVAL;
2772 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2773 mddev->recovery_cp = MaxSector;
2775 while (d--) {
2776 rdev = &rs->dev[d].rdev;
2778 if (test_bit(d, (void *) rs->rebuild_disks)) {
2779 clear_bit(In_sync, &rdev->flags);
2780 clear_bit(Faulty, &rdev->flags);
2781 mddev->recovery_cp = rdev->recovery_offset = 0;
2782 /* Bitmap has to be created when we do an "up" takeover */
2783 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2786 rdev->new_data_offset = new_data_offset;
2789 return 0;
2792 /* Prepare @rs for reshape */
2793 static int rs_prepare_reshape(struct raid_set *rs)
2795 bool reshape;
2796 struct mddev *mddev = &rs->md;
2798 if (rs_is_raid10(rs)) {
2799 if (rs->raid_disks != mddev->raid_disks &&
2800 __is_raid10_near(mddev->layout) &&
2801 rs->raid10_copies &&
2802 rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
2804 * raid disk have to be multiple of data copies to allow this conversion,
2806 * This is actually not a reshape it is a
2807 * rebuild of any additional mirrors per group
2809 if (rs->raid_disks % rs->raid10_copies) {
2810 rs->ti->error = "Can't reshape raid10 mirror groups";
2811 return -EINVAL;
2814 /* Userpace reordered disks to add/remove mirrors -> adjust raid_disk indexes */
2815 __reorder_raid_disk_indexes(rs);
2816 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2817 rs->raid10_copies);
2818 mddev->new_layout = mddev->layout;
2819 reshape = false;
2820 } else
2821 reshape = true;
2823 } else if (rs_is_raid456(rs))
2824 reshape = true;
2826 else if (rs_is_raid1(rs)) {
2827 if (rs->delta_disks) {
2828 /* Process raid1 via delta_disks */
2829 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
2830 reshape = true;
2831 } else {
2832 /* Process raid1 without delta_disks */
2833 mddev->raid_disks = rs->raid_disks;
2834 reshape = false;
2836 } else {
2837 rs->ti->error = "Called with bogus raid type";
2838 return -EINVAL;
2841 if (reshape) {
2842 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2843 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2844 } else if (mddev->raid_disks < rs->raid_disks)
2845 /* Create new superblocks and bitmaps, if any new disks */
2846 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2848 return 0;
2851 /* Get reshape sectors from data_offsets or raid set */
2852 static sector_t _get_reshape_sectors(struct raid_set *rs)
2854 struct md_rdev *rdev;
2855 sector_t reshape_sectors = 0;
2857 rdev_for_each(rdev, &rs->md)
2858 if (!test_bit(Journal, &rdev->flags)) {
2859 reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
2860 rdev->data_offset - rdev->new_data_offset :
2861 rdev->new_data_offset - rdev->data_offset;
2862 break;
2865 return max(reshape_sectors, (sector_t) rs->data_offset);
2870 * - change raid layout
2871 * - change chunk size
2872 * - add disks
2873 * - remove disks
2875 static int rs_setup_reshape(struct raid_set *rs)
2877 int r = 0;
2878 unsigned int cur_raid_devs, d;
2879 sector_t reshape_sectors = _get_reshape_sectors(rs);
2880 struct mddev *mddev = &rs->md;
2881 struct md_rdev *rdev;
2883 mddev->delta_disks = rs->delta_disks;
2884 cur_raid_devs = mddev->raid_disks;
2886 /* Ignore impossible layout change whilst adding/removing disks */
2887 if (mddev->delta_disks &&
2888 mddev->layout != mddev->new_layout) {
2889 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
2890 mddev->new_layout = mddev->layout;
2894 * Adjust array size:
2896 * - in case of adding disk(s), array size has
2897 * to grow after the disk adding reshape,
2898 * which'll hapen in the event handler;
2899 * reshape will happen forward, so space has to
2900 * be available at the beginning of each disk
2902 * - in case of removing disk(s), array size
2903 * has to shrink before starting the reshape,
2904 * which'll happen here;
2905 * reshape will happen backward, so space has to
2906 * be available at the end of each disk
2908 * - data_offset and new_data_offset are
2909 * adjusted for aforementioned out of place
2910 * reshaping based on userspace passing in
2911 * the "data_offset <sectors>" key/value
2912 * pair via the constructor
2915 /* Add disk(s) */
2916 if (rs->delta_disks > 0) {
2917 /* Prepare disks for check in raid4/5/6/10 {check|start}_reshape */
2918 for (d = cur_raid_devs; d < rs->raid_disks; d++) {
2919 rdev = &rs->dev[d].rdev;
2920 clear_bit(In_sync, &rdev->flags);
2923 * save_raid_disk needs to be -1, or recovery_offset will be set to 0
2924 * by md, which'll store that erroneously in the superblock on reshape
2926 rdev->saved_raid_disk = -1;
2927 rdev->raid_disk = d;
2929 rdev->sectors = mddev->dev_sectors;
2930 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
2933 mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */
2935 /* Remove disk(s) */
2936 } else if (rs->delta_disks < 0) {
2937 r = rs_set_dev_and_array_sectors(rs, true);
2938 mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
2940 /* Change layout and/or chunk size */
2941 } else {
2943 * Reshape layout (e.g. raid5_ls -> raid5_n) and/or chunk size:
2945 * keeping number of disks and do layout change ->
2947 * toggle reshape_backward depending on data_offset:
2949 * - free space upfront -> reshape forward
2951 * - free space at the end -> reshape backward
2954 * This utilizes free reshape space avoiding the need
2955 * for userspace to move (parts of) LV segments in
2956 * case of layout/chunksize change (for disk
2957 * adding/removing reshape space has to be at
2958 * the proper address (see above with delta_disks):
2960 * add disk(s) -> begin
2961 * remove disk(s)-> end
2963 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2967 * Adjust device size for forward reshape
2968 * because md_finish_reshape() reduces it.
2970 if (!mddev->reshape_backwards)
2971 rdev_for_each(rdev, &rs->md)
2972 if (!test_bit(Journal, &rdev->flags))
2973 rdev->sectors += reshape_sectors;
2975 return r;
2979 * Enable/disable discard support on RAID set depending on
2980 * RAID level and discard properties of underlying RAID members.
2982 static void configure_discard_support(struct raid_set *rs)
2984 int i;
2985 bool raid456;
2986 struct dm_target *ti = rs->ti;
2989 * XXX: RAID level 4,5,6 require zeroing for safety.
2991 raid456 = rs_is_raid456(rs);
2993 for (i = 0; i < rs->raid_disks; i++) {
2994 struct request_queue *q;
2996 if (!rs->dev[i].rdev.bdev)
2997 continue;
2999 q = bdev_get_queue(rs->dev[i].rdev.bdev);
3000 if (!q || !blk_queue_discard(q))
3001 return;
3003 if (raid456) {
3004 if (!devices_handle_discard_safely) {
3005 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
3006 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
3007 return;
3013 * RAID1 and RAID10 personalities require bio splitting,
3014 * RAID0/4/5/6 don't and process large discard bios properly.
3016 ti->split_discard_bios = !!(rs_is_raid1(rs) || rs_is_raid10(rs));
3017 ti->num_discard_bios = 1;
3021 * Construct a RAID0/1/10/4/5/6 mapping:
3022 * Args:
3023 * <raid_type> <#raid_params> <raid_params>{0,} \
3024 * <#raid_devs> [<meta_dev1> <dev1>]{1,}
3026 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
3027 * details on possible <raid_params>.
3029 * Userspace is free to initialize the metadata devices, hence the superblocks to
3030 * enforce recreation based on the passed in table parameters.
3033 static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3035 int r;
3036 bool resize = false;
3037 struct raid_type *rt;
3038 unsigned int num_raid_params, num_raid_devs;
3039 sector_t calculated_dev_sectors, rdev_sectors, reshape_sectors;
3040 struct raid_set *rs = NULL;
3041 const char *arg;
3042 struct rs_layout rs_layout;
3043 struct dm_arg_set as = { argc, argv }, as_nrd;
3044 struct dm_arg _args[] = {
3045 { 0, as.argc, "Cannot understand number of raid parameters" },
3046 { 1, 254, "Cannot understand number of raid devices parameters" }
3049 /* Must have <raid_type> */
3050 arg = dm_shift_arg(&as);
3051 if (!arg) {
3052 ti->error = "No arguments";
3053 return -EINVAL;
3056 rt = get_raid_type(arg);
3057 if (!rt) {
3058 ti->error = "Unrecognised raid_type";
3059 return -EINVAL;
3062 /* Must have <#raid_params> */
3063 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
3064 return -EINVAL;
3066 /* number of raid device tupples <meta_dev data_dev> */
3067 as_nrd = as;
3068 dm_consume_args(&as_nrd, num_raid_params);
3069 _args[1].max = (as_nrd.argc - 1) / 2;
3070 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
3071 return -EINVAL;
3073 if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
3074 ti->error = "Invalid number of supplied raid devices";
3075 return -EINVAL;
3078 rs = raid_set_alloc(ti, rt, num_raid_devs);
3079 if (IS_ERR(rs))
3080 return PTR_ERR(rs);
3082 r = parse_raid_params(rs, &as, num_raid_params);
3083 if (r)
3084 goto bad;
3086 r = parse_dev_params(rs, &as);
3087 if (r)
3088 goto bad;
3090 rs->md.sync_super = super_sync;
3093 * Calculate ctr requested array and device sizes to allow
3094 * for superblock analysis needing device sizes defined.
3096 * Any existing superblock will overwrite the array and device sizes
3098 r = rs_set_dev_and_array_sectors(rs, false);
3099 if (r)
3100 goto bad;
3102 calculated_dev_sectors = rs->md.dev_sectors;
3105 * Backup any new raid set level, layout, ...
3106 * requested to be able to compare to superblock
3107 * members for conversion decisions.
3109 rs_config_backup(rs, &rs_layout);
3111 r = analyse_superblocks(ti, rs);
3112 if (r)
3113 goto bad;
3115 rdev_sectors = __rdev_sectors(rs);
3116 if (!rdev_sectors) {
3117 ti->error = "Invalid rdev size";
3118 r = -EINVAL;
3119 goto bad;
3123 reshape_sectors = _get_reshape_sectors(rs);
3124 if (calculated_dev_sectors != rdev_sectors)
3125 resize = calculated_dev_sectors != (reshape_sectors ? rdev_sectors - reshape_sectors : rdev_sectors);
3127 INIT_WORK(&rs->md.event_work, do_table_event);
3128 ti->private = rs;
3129 ti->num_flush_bios = 1;
3131 /* Restore any requested new layout for conversion decision */
3132 rs_config_restore(rs, &rs_layout);
3135 * Now that we have any superblock metadata available,
3136 * check for new, recovering, reshaping, to be taken over,
3137 * to be reshaped or an existing, unchanged raid set to
3138 * run in sequence.
3140 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
3141 /* A new raid6 set has to be recovered to ensure proper parity and Q-Syndrome */
3142 if (rs_is_raid6(rs) &&
3143 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
3144 ti->error = "'nosync' not allowed for new raid6 set";
3145 r = -EINVAL;
3146 goto bad;
3148 rs_setup_recovery(rs, 0);
3149 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3150 rs_set_new(rs);
3151 } else if (rs_is_recovering(rs)) {
3152 /* A recovering raid set may be resized */
3153 ; /* skip setup rs */
3154 } else if (rs_is_reshaping(rs)) {
3155 /* Have to reject size change request during reshape */
3156 if (resize) {
3157 ti->error = "Can't resize a reshaping raid set";
3158 r = -EPERM;
3159 goto bad;
3161 /* skip setup rs */
3162 } else if (rs_takeover_requested(rs)) {
3163 if (rs_is_reshaping(rs)) {
3164 ti->error = "Can't takeover a reshaping raid set";
3165 r = -EPERM;
3166 goto bad;
3169 /* We can't takeover a journaled raid4/5/6 */
3170 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3171 ti->error = "Can't takeover a journaled raid4/5/6 set";
3172 r = -EPERM;
3173 goto bad;
3177 * If a takeover is needed, userspace sets any additional
3178 * devices to rebuild and we can check for a valid request here.
3180 * If acceptible, set the level to the new requested
3181 * one, prohibit requesting recovery, allow the raid
3182 * set to run and store superblocks during resume.
3184 r = rs_check_takeover(rs);
3185 if (r)
3186 goto bad;
3188 r = rs_setup_takeover(rs);
3189 if (r)
3190 goto bad;
3192 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3193 /* Takeover ain't recovery, so disable recovery */
3194 rs_setup_recovery(rs, MaxSector);
3195 rs_set_new(rs);
3196 } else if (rs_reshape_requested(rs)) {
3198 * No need to check for 'ongoing' takeover here, because takeover
3199 * is an instant operation as oposed to an ongoing reshape.
3202 /* We can't reshape a journaled raid4/5/6 */
3203 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3204 ti->error = "Can't reshape a journaled raid4/5/6 set";
3205 r = -EPERM;
3206 goto bad;
3209 /* Out-of-place space has to be available to allow for a reshape unless raid1! */
3210 if (reshape_sectors || rs_is_raid1(rs)) {
3212 * We can only prepare for a reshape here, because the
3213 * raid set needs to run to provide the repective reshape
3214 * check functions via its MD personality instance.
3216 * So do the reshape check after md_run() succeeded.
3218 r = rs_prepare_reshape(rs);
3219 if (r)
3220 return r;
3222 /* Reshaping ain't recovery, so disable recovery */
3223 rs_setup_recovery(rs, MaxSector);
3225 rs_set_cur(rs);
3226 } else {
3227 /* May not set recovery when a device rebuild is requested */
3228 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3229 rs_setup_recovery(rs, MaxSector);
3230 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3231 } else
3232 rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
3233 0 : (resize ? calculated_dev_sectors : MaxSector));
3234 rs_set_cur(rs);
3237 /* If constructor requested it, change data and new_data offsets */
3238 r = rs_adjust_data_offsets(rs);
3239 if (r)
3240 goto bad;
3242 /* Start raid set read-only and assumed clean to change in raid_resume() */
3243 rs->md.ro = 1;
3244 rs->md.in_sync = 1;
3245 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
3247 /* Has to be held on running the array */
3248 mddev_lock_nointr(&rs->md);
3249 r = md_run(&rs->md);
3250 rs->md.in_sync = 0; /* Assume already marked dirty */
3251 if (r) {
3252 ti->error = "Failed to run raid array";
3253 mddev_unlock(&rs->md);
3254 goto bad;
3257 r = md_start(&rs->md);
3259 if (r) {
3260 ti->error = "Failed to start raid array";
3261 mddev_unlock(&rs->md);
3262 goto bad_md_start;
3265 rs->callbacks.congested_fn = raid_is_congested;
3266 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
3268 /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */
3269 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3270 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3271 if (r) {
3272 ti->error = "Failed to set raid4/5/6 journal mode";
3273 mddev_unlock(&rs->md);
3274 goto bad_journal_mode_set;
3278 mddev_suspend(&rs->md);
3279 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3281 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
3282 if (rs_is_raid456(rs)) {
3283 r = rs_set_raid456_stripe_cache(rs);
3284 if (r)
3285 goto bad_stripe_cache;
3288 /* Now do an early reshape check */
3289 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3290 r = rs_check_reshape(rs);
3291 if (r)
3292 goto bad_check_reshape;
3294 /* Restore new, ctr requested layout to perform check */
3295 rs_config_restore(rs, &rs_layout);
3297 if (rs->md.pers->start_reshape) {
3298 r = rs->md.pers->check_reshape(&rs->md);
3299 if (r) {
3300 ti->error = "Reshape check failed";
3301 goto bad_check_reshape;
3306 /* Disable/enable discard support on raid set. */
3307 configure_discard_support(rs);
3309 mddev_unlock(&rs->md);
3310 return 0;
3312 bad_md_start:
3313 bad_journal_mode_set:
3314 bad_stripe_cache:
3315 bad_check_reshape:
3316 md_stop(&rs->md);
3317 bad:
3318 raid_set_free(rs);
3320 return r;
3323 static void raid_dtr(struct dm_target *ti)
3325 struct raid_set *rs = ti->private;
3327 list_del_init(&rs->callbacks.list);
3328 md_stop(&rs->md);
3329 raid_set_free(rs);
3332 static int raid_map(struct dm_target *ti, struct bio *bio)
3334 struct raid_set *rs = ti->private;
3335 struct mddev *mddev = &rs->md;
3338 * If we're reshaping to add disk(s)), ti->len and
3339 * mddev->array_sectors will differ during the process
3340 * (ti->len > mddev->array_sectors), so we have to requeue
3341 * bios with addresses > mddev->array_sectors here or
3342 * there will occur accesses past EOD of the component
3343 * data images thus erroring the raid set.
3345 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
3346 return DM_MAPIO_REQUEUE;
3348 md_handle_request(mddev, bio);
3350 return DM_MAPIO_SUBMITTED;
3353 /* Return string describing the current sync action of @mddev */
3354 static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3356 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
3357 return "frozen";
3359 /* The MD sync thread can be done with io but still be running */
3360 if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
3361 (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
3362 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
3363 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
3364 return "reshape";
3366 if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
3367 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
3368 return "resync";
3369 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
3370 return "check";
3371 return "repair";
3374 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3375 return "recover";
3378 return "idle";
3382 * Return status string for @rdev
3384 * Status characters:
3386 * 'D' = Dead/Failed raid set component or raid4/5/6 journal device
3387 * 'a' = Alive but not in-sync raid set component _or_ alive raid4/5/6 'write_back' journal device
3388 * 'A' = Alive and in-sync raid set component _or_ alive raid4/5/6 'write_through' journal device
3389 * '-' = Non-existing device (i.e. uspace passed '- -' into the ctr)
3391 static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
3393 if (!rdev->bdev)
3394 return "-";
3395 else if (test_bit(Faulty, &rdev->flags))
3396 return "D";
3397 else if (test_bit(Journal, &rdev->flags))
3398 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
3399 else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) ||
3400 (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) &&
3401 !test_bit(In_sync, &rdev->flags)))
3402 return "a";
3403 else
3404 return "A";
3407 /* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */
3408 static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3409 sector_t resync_max_sectors)
3411 sector_t r;
3412 struct mddev *mddev = &rs->md;
3414 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3415 clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3417 if (rs_is_raid0(rs)) {
3418 r = resync_max_sectors;
3419 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3421 } else {
3422 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) &&
3423 !test_bit(MD_RECOVERY_INTR, &recovery) &&
3424 (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
3425 test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
3426 test_bit(MD_RECOVERY_RUNNING, &recovery)))
3427 r = mddev->curr_resync_completed;
3428 else
3429 r = mddev->recovery_cp;
3431 if (r >= resync_max_sectors &&
3432 (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
3433 (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
3434 !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
3435 !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
3437 * Sync complete.
3439 /* In case we have finished recovering, the array is in sync. */
3440 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3441 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3443 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
3445 * In case we are recovering, the array is not in sync
3446 * and health chars should show the recovering legs.
3450 } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
3451 !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
3453 * If "resync" is occurring, the raid set
3454 * is or may be out of sync hence the health
3455 * characters shall be 'a'.
3457 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3459 } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
3460 !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
3462 * If "reshape" is occurring, the raid set
3463 * is or may be out of sync hence the health
3464 * characters shall be 'a'.
3466 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3468 } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
3470 * If "check" or "repair" is occurring, the raid set has
3471 * undergone an initial sync and the health characters
3472 * should not be 'a' anymore.
3474 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3476 } else {
3477 struct md_rdev *rdev;
3480 * We are idle and recovery is needed, prevent 'A' chars race
3481 * caused by components still set to in-sync by constrcuctor.
3483 if (test_bit(MD_RECOVERY_NEEDED, &recovery))
3484 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3487 * The raid set may be doing an initial sync, or it may
3488 * be rebuilding individual components. If all the
3489 * devices are In_sync, then it is the raid set that is
3490 * being initialized.
3492 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3493 rdev_for_each(rdev, mddev)
3494 if (!test_bit(Journal, &rdev->flags) &&
3495 !test_bit(In_sync, &rdev->flags)) {
3496 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3497 break;
3502 return min(r, resync_max_sectors);
3505 /* Helper to return @dev name or "-" if !@dev */
3506 static const char *__get_dev_name(struct dm_dev *dev)
3508 return dev ? dev->name : "-";
3511 static void raid_status(struct dm_target *ti, status_type_t type,
3512 unsigned int status_flags, char *result, unsigned int maxlen)
3514 struct raid_set *rs = ti->private;
3515 struct mddev *mddev = &rs->md;
3516 struct r5conf *conf = mddev->private;
3517 int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
3518 unsigned long recovery;
3519 unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
3520 unsigned int sz = 0;
3521 unsigned int rebuild_disks;
3522 unsigned int write_mostly_params = 0;
3523 sector_t progress, resync_max_sectors, resync_mismatches;
3524 const char *sync_action;
3525 struct raid_type *rt;
3527 switch (type) {
3528 case STATUSTYPE_INFO:
3529 /* *Should* always succeed */
3530 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3531 if (!rt)
3532 return;
3534 DMEMIT("%s %d ", rt->name, mddev->raid_disks);
3536 /* Access most recent mddev properties for status output */
3537 smp_rmb();
3538 recovery = rs->md.recovery;
3539 /* Get sensible max sectors even if raid set not yet started */
3540 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
3541 mddev->resync_max_sectors : mddev->dev_sectors;
3542 progress = rs_get_progress(rs, recovery, resync_max_sectors);
3543 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3544 atomic64_read(&mddev->resync_mismatches) : 0;
3545 sync_action = decipher_sync_action(&rs->md, recovery);
3547 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
3548 for (i = 0; i < rs->raid_disks; i++)
3549 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3552 * In-sync/Reshape ratio:
3553 * The in-sync ratio shows the progress of:
3554 * - Initializing the raid set
3555 * - Rebuilding a subset of devices of the raid set
3556 * The user can distinguish between the two by referring
3557 * to the status characters.
3559 * The reshape ratio shows the progress of
3560 * changing the raid layout or the number of
3561 * disks of a raid set
3563 DMEMIT(" %llu/%llu", (unsigned long long) progress,
3564 (unsigned long long) resync_max_sectors);
3567 * v1.5.0+:
3569 * Sync action:
3570 * See Documentation/device-mapper/dm-raid.txt for
3571 * information on each of these states.
3573 DMEMIT(" %s", sync_action);
3576 * v1.5.0+:
3578 * resync_mismatches/mismatch_cnt
3579 * This field shows the number of discrepancies found when
3580 * performing a "check" of the raid set.
3582 DMEMIT(" %llu", (unsigned long long) resync_mismatches);
3585 * v1.9.0+:
3587 * data_offset (needed for out of space reshaping)
3588 * This field shows the data offset into the data
3589 * image LV where the first stripes data starts.
3591 * We keep data_offset equal on all raid disks of the set,
3592 * so retrieving it from the first raid disk is sufficient.
3594 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3597 * v1.10.0+:
3599 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
3600 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
3601 break;
3603 case STATUSTYPE_TABLE:
3604 /* Report the table line string you would use to construct this raid set */
3606 /* Calculate raid parameter count */
3607 for (i = 0; i < rs->raid_disks; i++)
3608 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3609 write_mostly_params += 2;
3610 rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks));
3611 raid_param_cnt += rebuild_disks * 2 +
3612 write_mostly_params +
3613 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
3614 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2 +
3615 (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? 2 : 0) +
3616 (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags) ? 2 : 0);
3618 /* Emit table line */
3619 /* This has to be in the documented order for userspace! */
3620 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
3621 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
3622 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
3623 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
3624 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
3625 if (rebuild_disks)
3626 for (i = 0; i < rs->raid_disks; i++)
3627 if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks))
3628 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
3629 rs->dev[i].rdev.raid_disk);
3630 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
3631 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
3632 mddev->bitmap_info.daemon_sleep);
3633 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
3634 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
3635 mddev->sync_speed_min);
3636 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
3637 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
3638 mddev->sync_speed_max);
3639 if (write_mostly_params)
3640 for (i = 0; i < rs->raid_disks; i++)
3641 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3642 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
3643 rs->dev[i].rdev.raid_disk);
3644 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
3645 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
3646 mddev->bitmap_info.max_write_behind);
3647 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
3648 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
3649 max_nr_stripes);
3650 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
3651 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
3652 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
3653 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
3654 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
3655 raid10_md_layout_to_copies(mddev->layout));
3656 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
3657 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
3658 raid10_md_layout_to_format(mddev->layout));
3659 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
3660 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
3661 max(rs->delta_disks, mddev->delta_disks));
3662 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
3663 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
3664 (unsigned long long) rs->data_offset);
3665 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags))
3666 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV),
3667 __get_dev_name(rs->journal_dev.dev));
3668 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags))
3669 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE),
3670 md_journal_mode_to_dm_raid(rs->journal_dev.mode));
3671 DMEMIT(" %d", rs->raid_disks);
3672 for (i = 0; i < rs->raid_disks; i++)
3673 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
3674 __get_dev_name(rs->dev[i].data_dev));
3678 static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
3679 char *result, unsigned maxlen)
3681 struct raid_set *rs = ti->private;
3682 struct mddev *mddev = &rs->md;
3684 if (!mddev->pers || !mddev->pers->sync_request)
3685 return -EINVAL;
3687 if (!strcasecmp(argv[0], "frozen"))
3688 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3689 else
3690 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3692 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
3693 if (mddev->sync_thread) {
3694 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3695 md_reap_sync_thread(mddev);
3697 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3698 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3699 return -EBUSY;
3700 else if (!strcasecmp(argv[0], "resync"))
3701 ; /* MD_RECOVERY_NEEDED set below */
3702 else if (!strcasecmp(argv[0], "recover"))
3703 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3704 else {
3705 if (!strcasecmp(argv[0], "check")) {
3706 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3707 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3708 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3709 } else if (!strcasecmp(argv[0], "repair")) {
3710 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3711 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3712 } else
3713 return -EINVAL;
3715 if (mddev->ro == 2) {
3716 /* A write to sync_action is enough to justify
3717 * canceling read-auto mode
3719 mddev->ro = 0;
3720 if (!mddev->suspended && mddev->sync_thread)
3721 md_wakeup_thread(mddev->sync_thread);
3723 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3724 if (!mddev->suspended && mddev->thread)
3725 md_wakeup_thread(mddev->thread);
3727 return 0;
3730 static int raid_iterate_devices(struct dm_target *ti,
3731 iterate_devices_callout_fn fn, void *data)
3733 struct raid_set *rs = ti->private;
3734 unsigned int i;
3735 int r = 0;
3737 for (i = 0; !r && i < rs->md.raid_disks; i++)
3738 if (rs->dev[i].data_dev)
3739 r = fn(ti,
3740 rs->dev[i].data_dev,
3741 0, /* No offset on data devs */
3742 rs->md.dev_sectors,
3743 data);
3745 return r;
3748 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
3750 struct raid_set *rs = ti->private;
3751 unsigned int chunk_size = to_bytes(rs->md.chunk_sectors);
3753 blk_limits_io_min(limits, chunk_size);
3754 blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
3757 static void raid_postsuspend(struct dm_target *ti)
3759 struct raid_set *rs = ti->private;
3761 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
3762 /* Writes have to be stopped before suspending to avoid deadlocks. */
3763 if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
3764 md_stop_writes(&rs->md);
3766 mddev_lock_nointr(&rs->md);
3767 mddev_suspend(&rs->md);
3768 mddev_unlock(&rs->md);
3772 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3774 int i;
3775 uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
3776 unsigned long flags;
3777 bool cleared = false;
3778 struct dm_raid_superblock *sb;
3779 struct mddev *mddev = &rs->md;
3780 struct md_rdev *r;
3782 /* RAID personalities have to provide hot add/remove methods or we need to bail out. */
3783 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
3784 return;
3786 memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
3788 for (i = 0; i < mddev->raid_disks; i++) {
3789 r = &rs->dev[i].rdev;
3790 /* HM FIXME: enhance journal device recovery processing */
3791 if (test_bit(Journal, &r->flags))
3792 continue;
3794 if (test_bit(Faulty, &r->flags) &&
3795 r->meta_bdev && !read_disk_sb(r, r->sb_size, true)) {
3796 DMINFO("Faulty %s device #%d has readable super block."
3797 " Attempting to revive it.",
3798 rs->raid_type->name, i);
3801 * Faulty bit may be set, but sometimes the array can
3802 * be suspended before the personalities can respond
3803 * by removing the device from the array (i.e. calling
3804 * 'hot_remove_disk'). If they haven't yet removed
3805 * the failed device, its 'raid_disk' number will be
3806 * '>= 0' - meaning we must call this function
3807 * ourselves.
3809 flags = r->flags;
3810 clear_bit(In_sync, &r->flags); /* Mandatory for hot remove. */
3811 if (r->raid_disk >= 0) {
3812 if (mddev->pers->hot_remove_disk(mddev, r)) {
3813 /* Failed to revive this device, try next */
3814 r->flags = flags;
3815 continue;
3817 } else
3818 r->raid_disk = r->saved_raid_disk = i;
3820 clear_bit(Faulty, &r->flags);
3821 clear_bit(WriteErrorSeen, &r->flags);
3823 if (mddev->pers->hot_add_disk(mddev, r)) {
3824 /* Failed to revive this device, try next */
3825 r->raid_disk = r->saved_raid_disk = -1;
3826 r->flags = flags;
3827 } else {
3828 clear_bit(In_sync, &r->flags);
3829 r->recovery_offset = 0;
3830 set_bit(i, (void *) cleared_failed_devices);
3831 cleared = true;
3836 /* If any failed devices could be cleared, update all sbs failed_devices bits */
3837 if (cleared) {
3838 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
3840 rdev_for_each(r, &rs->md) {
3841 if (test_bit(Journal, &r->flags))
3842 continue;
3844 sb = page_address(r->sb_page);
3845 sb_retrieve_failed_devices(sb, failed_devices);
3847 for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
3848 failed_devices[i] &= ~cleared_failed_devices[i];
3850 sb_update_failed_devices(sb, failed_devices);
3855 static int __load_dirty_region_bitmap(struct raid_set *rs)
3857 int r = 0;
3859 /* Try loading the bitmap unless "raid0", which does not have one */
3860 if (!rs_is_raid0(rs) &&
3861 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
3862 r = bitmap_load(&rs->md);
3863 if (r)
3864 DMERR("Failed to load bitmap");
3867 return r;
3870 /* Enforce updating all superblocks */
3871 static void rs_update_sbs(struct raid_set *rs)
3873 struct mddev *mddev = &rs->md;
3874 int ro = mddev->ro;
3876 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3877 mddev->ro = 0;
3878 md_update_sb(mddev, 1);
3879 mddev->ro = ro;
3883 * Reshape changes raid algorithm of @rs to new one within personality
3884 * (e.g. raid6_zr -> raid6_nc), changes stripe size, adds/removes
3885 * disks from a raid set thus growing/shrinking it or resizes the set
3887 * Call mddev_lock_nointr() before!
3889 static int rs_start_reshape(struct raid_set *rs)
3891 int r;
3892 struct mddev *mddev = &rs->md;
3893 struct md_personality *pers = mddev->pers;
3895 r = rs_setup_reshape(rs);
3896 if (r)
3897 return r;
3899 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
3900 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3901 mddev_resume(mddev);
3904 * Check any reshape constraints enforced by the personalility
3906 * May as well already kick the reshape off so that * pers->start_reshape() becomes optional.
3908 r = pers->check_reshape(mddev);
3909 if (r) {
3910 rs->ti->error = "pers->check_reshape() failed";
3911 return r;
3915 * Personality may not provide start reshape method in which
3916 * case check_reshape above has already covered everything
3918 if (pers->start_reshape) {
3919 r = pers->start_reshape(mddev);
3920 if (r) {
3921 rs->ti->error = "pers->start_reshape() failed";
3922 return r;
3926 /* Suspend because a resume will happen in raid_resume() */
3927 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3928 mddev_suspend(mddev);
3931 * Now reshape got set up, update superblocks to
3932 * reflect the fact so that a table reload will
3933 * access proper superblock content in the ctr.
3935 rs_update_sbs(rs);
3937 return 0;
3940 static int raid_preresume(struct dm_target *ti)
3942 int r;
3943 struct raid_set *rs = ti->private;
3944 struct mddev *mddev = &rs->md;
3946 /* This is a resume after a suspend of the set -> it's already started. */
3947 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3948 return 0;
3950 if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3951 struct raid_set *rs_active = rs_find_active(rs);
3953 if (rs_active) {
3955 * In case no rebuilds have been requested
3956 * and an active table slot exists, copy
3957 * current resynchonization completed and
3958 * reshape position pointers across from
3959 * suspended raid set in the active slot.
3961 * This resumes the new mapping at current
3962 * offsets to continue recover/reshape without
3963 * necessarily redoing a raid set partially or
3964 * causing data corruption in case of a reshape.
3966 if (rs_active->md.curr_resync_completed != MaxSector)
3967 mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
3968 if (rs_active->md.reshape_position != MaxSector)
3969 mddev->reshape_position = rs_active->md.reshape_position;
3974 * The superblocks need to be updated on disk if the
3975 * array is new or new devices got added (thus zeroed
3976 * out by userspace) or __load_dirty_region_bitmap
3977 * will overwrite them in core with old data or fail.
3979 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
3980 rs_update_sbs(rs);
3982 /* Load the bitmap from disk unless raid0 */
3983 r = __load_dirty_region_bitmap(rs);
3984 if (r)
3985 return r;
3987 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
3988 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3989 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
3990 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
3991 to_bytes(rs->requested_bitmap_chunk_sectors), 0);
3992 if (r)
3993 DMERR("Failed to resize bitmap");
3996 /* Check for any resize/reshape on @rs and adjust/initiate */
3997 /* Be prepared for mddev_resume() in raid_resume() */
3998 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3999 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
4000 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4001 mddev->resync_min = mddev->recovery_cp;
4004 /* Check for any reshape request unless new raid set */
4005 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
4006 /* Initiate a reshape. */
4007 rs_set_rdev_sectors(rs);
4008 mddev_lock_nointr(mddev);
4009 r = rs_start_reshape(rs);
4010 mddev_unlock(mddev);
4011 if (r)
4012 DMWARN("Failed to check/start reshape, continuing without change");
4013 r = 0;
4016 return r;
4019 static void raid_resume(struct dm_target *ti)
4021 struct raid_set *rs = ti->private;
4022 struct mddev *mddev = &rs->md;
4024 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
4026 * A secondary resume while the device is active.
4027 * Take this opportunity to check whether any failed
4028 * devices are reachable again.
4030 attempt_restore_of_faulty_devices(rs);
4033 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
4034 /* Only reduce raid set size before running a disk removing reshape. */
4035 if (mddev->delta_disks < 0)
4036 rs_set_capacity(rs);
4038 mddev_lock_nointr(mddev);
4039 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4040 mddev->ro = 0;
4041 mddev->in_sync = 0;
4042 mddev_resume(mddev);
4043 mddev_unlock(mddev);
4047 static struct target_type raid_target = {
4048 .name = "raid",
4049 .version = {1, 13, 2},
4050 .module = THIS_MODULE,
4051 .ctr = raid_ctr,
4052 .dtr = raid_dtr,
4053 .map = raid_map,
4054 .status = raid_status,
4055 .message = raid_message,
4056 .iterate_devices = raid_iterate_devices,
4057 .io_hints = raid_io_hints,
4058 .postsuspend = raid_postsuspend,
4059 .preresume = raid_preresume,
4060 .resume = raid_resume,
4063 static int __init dm_raid_init(void)
4065 DMINFO("Loading target version %u.%u.%u",
4066 raid_target.version[0],
4067 raid_target.version[1],
4068 raid_target.version[2]);
4069 return dm_register_target(&raid_target);
4072 static void __exit dm_raid_exit(void)
4074 dm_unregister_target(&raid_target);
4077 module_init(dm_raid_init);
4078 module_exit(dm_raid_exit);
4080 module_param(devices_handle_discard_safely, bool, 0644);
4081 MODULE_PARM_DESC(devices_handle_discard_safely,
4082 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
4084 MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
4085 MODULE_ALIAS("dm-raid0");
4086 MODULE_ALIAS("dm-raid1");
4087 MODULE_ALIAS("dm-raid10");
4088 MODULE_ALIAS("dm-raid4");
4089 MODULE_ALIAS("dm-raid5");
4090 MODULE_ALIAS("dm-raid6");
4091 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
4092 MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
4093 MODULE_LICENSE("GPL");