Linux 4.7.4
[linux/fpc-iii.git] / drivers / md / dm-raid.c
blob52532745a50f85d6eb765d8d6fcaa838301334b6
1 /*
2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
6 */
8 #include <linux/slab.h>
9 #include <linux/module.h>
11 #include "md.h"
12 #include "raid1.h"
13 #include "raid5.h"
14 #include "raid10.h"
15 #include "bitmap.h"
17 #include <linux/device-mapper.h>
19 #define DM_MSG_PREFIX "raid"
20 #define MAX_RAID_DEVICES 253 /* raid4/5/6 limit */
22 static bool devices_handle_discard_safely = false;
25 * The following flags are used by dm-raid.c to set up the array state.
26 * They must be cleared before md_run is called.
28 #define FirstUse 10 /* rdev flag */
30 struct raid_dev {
32 * Two DM devices, one to hold metadata and one to hold the
33 * actual data/parity. The reason for this is to not confuse
34 * ti->len and give more flexibility in altering size and
35 * characteristics.
37 * While it is possible for this device to be associated
38 * with a different physical device than the data_dev, it
39 * is intended for it to be the same.
40 * |--------- Physical Device ---------|
41 * |- meta_dev -|------ data_dev ------|
43 struct dm_dev *meta_dev;
44 struct dm_dev *data_dev;
45 struct md_rdev rdev;
49 * Flags for rs->ctr_flags field.
51 #define CTR_FLAG_SYNC 0x1
52 #define CTR_FLAG_NOSYNC 0x2
53 #define CTR_FLAG_REBUILD 0x4
54 #define CTR_FLAG_DAEMON_SLEEP 0x8
55 #define CTR_FLAG_MIN_RECOVERY_RATE 0x10
56 #define CTR_FLAG_MAX_RECOVERY_RATE 0x20
57 #define CTR_FLAG_MAX_WRITE_BEHIND 0x40
58 #define CTR_FLAG_STRIPE_CACHE 0x80
59 #define CTR_FLAG_REGION_SIZE 0x100
60 #define CTR_FLAG_RAID10_COPIES 0x200
61 #define CTR_FLAG_RAID10_FORMAT 0x400
63 struct raid_set {
64 struct dm_target *ti;
66 uint32_t bitmap_loaded;
67 uint32_t ctr_flags;
69 struct mddev md;
70 struct raid_type *raid_type;
71 struct dm_target_callbacks callbacks;
73 struct raid_dev dev[0];
76 /* Supported raid types and properties. */
77 static struct raid_type {
78 const char *name; /* RAID algorithm. */
79 const char *descr; /* Descriptor text for logging. */
80 const unsigned parity_devs; /* # of parity devices. */
81 const unsigned minimal_devs; /* minimal # of devices in set. */
82 const unsigned level; /* RAID level. */
83 const unsigned algorithm; /* RAID algorithm. */
84 } raid_types[] = {
85 {"raid0", "RAID0 (striping)", 0, 2, 0, 0 /* NONE */},
86 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
87 {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */},
88 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
89 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
90 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
91 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
92 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
93 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
94 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
95 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
98 static char *raid10_md_layout_to_format(int layout)
101 * Bit 16 and 17 stand for "offset" and "use_far_sets"
102 * Refer to MD's raid10.c for details
104 if ((layout & 0x10000) && (layout & 0x20000))
105 return "offset";
107 if ((layout & 0xFF) > 1)
108 return "near";
110 return "far";
113 static unsigned raid10_md_layout_to_copies(int layout)
115 if ((layout & 0xFF) > 1)
116 return layout & 0xFF;
117 return (layout >> 8) & 0xFF;
120 static int raid10_format_to_md_layout(char *format, unsigned copies)
122 unsigned n = 1, f = 1;
124 if (!strcasecmp("near", format))
125 n = copies;
126 else
127 f = copies;
129 if (!strcasecmp("offset", format))
130 return 0x30000 | (f << 8) | n;
132 if (!strcasecmp("far", format))
133 return 0x20000 | (f << 8) | n;
135 return (f << 8) | n;
138 static struct raid_type *get_raid_type(char *name)
140 int i;
142 for (i = 0; i < ARRAY_SIZE(raid_types); i++)
143 if (!strcmp(raid_types[i].name, name))
144 return &raid_types[i];
146 return NULL;
149 static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
151 unsigned i;
152 struct raid_set *rs;
154 if (raid_devs <= raid_type->parity_devs) {
155 ti->error = "Insufficient number of devices";
156 return ERR_PTR(-EINVAL);
159 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
160 if (!rs) {
161 ti->error = "Cannot allocate raid context";
162 return ERR_PTR(-ENOMEM);
165 mddev_init(&rs->md);
167 rs->ti = ti;
168 rs->raid_type = raid_type;
169 rs->md.raid_disks = raid_devs;
170 rs->md.level = raid_type->level;
171 rs->md.new_level = rs->md.level;
172 rs->md.layout = raid_type->algorithm;
173 rs->md.new_layout = rs->md.layout;
174 rs->md.delta_disks = 0;
175 rs->md.recovery_cp = 0;
177 for (i = 0; i < raid_devs; i++)
178 md_rdev_init(&rs->dev[i].rdev);
181 * Remaining items to be initialized by further RAID params:
182 * rs->md.persistent
183 * rs->md.external
184 * rs->md.chunk_sectors
185 * rs->md.new_chunk_sectors
186 * rs->md.dev_sectors
189 return rs;
192 static void context_free(struct raid_set *rs)
194 int i;
196 for (i = 0; i < rs->md.raid_disks; i++) {
197 if (rs->dev[i].meta_dev)
198 dm_put_device(rs->ti, rs->dev[i].meta_dev);
199 md_rdev_clear(&rs->dev[i].rdev);
200 if (rs->dev[i].data_dev)
201 dm_put_device(rs->ti, rs->dev[i].data_dev);
204 kfree(rs);
208 * For every device we have two words
209 * <meta_dev>: meta device name or '-' if missing
210 * <data_dev>: data device name or '-' if missing
212 * The following are permitted:
213 * - -
214 * - <data_dev>
215 * <meta_dev> <data_dev>
217 * The following is not allowed:
218 * <meta_dev> -
220 * This code parses those words. If there is a failure,
221 * the caller must use context_free to unwind the operations.
223 static int dev_parms(struct raid_set *rs, char **argv)
225 int i;
226 int rebuild = 0;
227 int metadata_available = 0;
228 int ret = 0;
230 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
231 rs->dev[i].rdev.raid_disk = i;
233 rs->dev[i].meta_dev = NULL;
234 rs->dev[i].data_dev = NULL;
237 * There are no offsets, since there is a separate device
238 * for data and metadata.
240 rs->dev[i].rdev.data_offset = 0;
241 rs->dev[i].rdev.mddev = &rs->md;
243 if (strcmp(argv[0], "-")) {
244 ret = dm_get_device(rs->ti, argv[0],
245 dm_table_get_mode(rs->ti->table),
246 &rs->dev[i].meta_dev);
247 rs->ti->error = "RAID metadata device lookup failure";
248 if (ret)
249 return ret;
251 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
252 if (!rs->dev[i].rdev.sb_page)
253 return -ENOMEM;
256 if (!strcmp(argv[1], "-")) {
257 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
258 (!rs->dev[i].rdev.recovery_offset)) {
259 rs->ti->error = "Drive designated for rebuild not specified";
260 return -EINVAL;
263 rs->ti->error = "No data device supplied with metadata device";
264 if (rs->dev[i].meta_dev)
265 return -EINVAL;
267 continue;
270 ret = dm_get_device(rs->ti, argv[1],
271 dm_table_get_mode(rs->ti->table),
272 &rs->dev[i].data_dev);
273 if (ret) {
274 rs->ti->error = "RAID device lookup failure";
275 return ret;
278 if (rs->dev[i].meta_dev) {
279 metadata_available = 1;
280 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
282 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
283 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
284 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
285 rebuild++;
288 if (metadata_available) {
289 rs->md.external = 0;
290 rs->md.persistent = 1;
291 rs->md.major_version = 2;
292 } else if (rebuild && !rs->md.recovery_cp) {
294 * Without metadata, we will not be able to tell if the array
295 * is in-sync or not - we must assume it is not. Therefore,
296 * it is impossible to rebuild a drive.
298 * Even if there is metadata, the on-disk information may
299 * indicate that the array is not in-sync and it will then
300 * fail at that time.
302 * User could specify 'nosync' option if desperate.
304 DMERR("Unable to rebuild drive while array is not in-sync");
305 rs->ti->error = "RAID device lookup failure";
306 return -EINVAL;
309 return 0;
313 * validate_region_size
314 * @rs
315 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
317 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
318 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
320 * Returns: 0 on success, -EINVAL on failure.
322 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
324 unsigned long min_region_size = rs->ti->len / (1 << 21);
326 if (!region_size) {
328 * Choose a reasonable default. All figures in sectors.
330 if (min_region_size > (1 << 13)) {
331 /* If not a power of 2, make it the next power of 2 */
332 region_size = roundup_pow_of_two(min_region_size);
333 DMINFO("Choosing default region size of %lu sectors",
334 region_size);
335 } else {
336 DMINFO("Choosing default region size of 4MiB");
337 region_size = 1 << 13; /* sectors */
339 } else {
341 * Validate user-supplied value.
343 if (region_size > rs->ti->len) {
344 rs->ti->error = "Supplied region size is too large";
345 return -EINVAL;
348 if (region_size < min_region_size) {
349 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
350 region_size, min_region_size);
351 rs->ti->error = "Supplied region size is too small";
352 return -EINVAL;
355 if (!is_power_of_2(region_size)) {
356 rs->ti->error = "Region size is not a power of 2";
357 return -EINVAL;
360 if (region_size < rs->md.chunk_sectors) {
361 rs->ti->error = "Region size is smaller than the chunk size";
362 return -EINVAL;
367 * Convert sectors to bytes.
369 rs->md.bitmap_info.chunksize = (region_size << 9);
371 return 0;
375 * validate_raid_redundancy
376 * @rs
378 * Determine if there are enough devices in the array that haven't
379 * failed (or are being rebuilt) to form a usable array.
381 * Returns: 0 on success, -EINVAL on failure.
383 static int validate_raid_redundancy(struct raid_set *rs)
385 unsigned i, rebuild_cnt = 0;
386 unsigned rebuilds_per_group = 0, copies, d;
387 unsigned group_size, last_group_start;
389 for (i = 0; i < rs->md.raid_disks; i++)
390 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
391 !rs->dev[i].rdev.sb_page)
392 rebuild_cnt++;
394 switch (rs->raid_type->level) {
395 case 1:
396 if (rebuild_cnt >= rs->md.raid_disks)
397 goto too_many;
398 break;
399 case 4:
400 case 5:
401 case 6:
402 if (rebuild_cnt > rs->raid_type->parity_devs)
403 goto too_many;
404 break;
405 case 10:
406 copies = raid10_md_layout_to_copies(rs->md.layout);
407 if (rebuild_cnt < copies)
408 break;
411 * It is possible to have a higher rebuild count for RAID10,
412 * as long as the failed devices occur in different mirror
413 * groups (i.e. different stripes).
415 * When checking "near" format, make sure no adjacent devices
416 * have failed beyond what can be handled. In addition to the
417 * simple case where the number of devices is a multiple of the
418 * number of copies, we must also handle cases where the number
419 * of devices is not a multiple of the number of copies.
420 * E.g. dev1 dev2 dev3 dev4 dev5
421 * A A B B C
422 * C D D E E
424 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
425 for (i = 0; i < rs->md.raid_disks * copies; i++) {
426 if (!(i % copies))
427 rebuilds_per_group = 0;
428 d = i % rs->md.raid_disks;
429 if ((!rs->dev[d].rdev.sb_page ||
430 !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
431 (++rebuilds_per_group >= copies))
432 goto too_many;
434 break;
438 * When checking "far" and "offset" formats, we need to ensure
439 * that the device that holds its copy is not also dead or
440 * being rebuilt. (Note that "far" and "offset" formats only
441 * support two copies right now. These formats also only ever
442 * use the 'use_far_sets' variant.)
444 * This check is somewhat complicated by the need to account
445 * for arrays that are not a multiple of (far) copies. This
446 * results in the need to treat the last (potentially larger)
447 * set differently.
449 group_size = (rs->md.raid_disks / copies);
450 last_group_start = (rs->md.raid_disks / group_size) - 1;
451 last_group_start *= group_size;
452 for (i = 0; i < rs->md.raid_disks; i++) {
453 if (!(i % copies) && !(i > last_group_start))
454 rebuilds_per_group = 0;
455 if ((!rs->dev[i].rdev.sb_page ||
456 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
457 (++rebuilds_per_group >= copies))
458 goto too_many;
460 break;
461 default:
462 if (rebuild_cnt)
463 return -EINVAL;
466 return 0;
468 too_many:
469 return -EINVAL;
473 * Possible arguments are...
474 * <chunk_size> [optional_args]
476 * Argument definitions
477 * <chunk_size> The number of sectors per disk that
478 * will form the "stripe"
479 * [[no]sync] Force or prevent recovery of the
480 * entire array
481 * [rebuild <idx>] Rebuild the drive indicated by the index
482 * [daemon_sleep <ms>] Time between bitmap daemon work to
483 * clear bits
484 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
485 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
486 * [write_mostly <idx>] Indicate a write mostly drive via index
487 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
488 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
489 * [region_size <sectors>] Defines granularity of bitmap
491 * RAID10-only options:
492 * [raid10_copies <# copies>] Number of copies. (Default: 2)
493 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
495 static int parse_raid_params(struct raid_set *rs, char **argv,
496 unsigned num_raid_params)
498 char *raid10_format = "near";
499 unsigned raid10_copies = 2;
500 unsigned i;
501 unsigned long value, region_size = 0;
502 sector_t sectors_per_dev = rs->ti->len;
503 sector_t max_io_len;
504 char *key;
507 * First, parse the in-order required arguments
508 * "chunk_size" is the only argument of this type.
510 if ((kstrtoul(argv[0], 10, &value) < 0)) {
511 rs->ti->error = "Bad chunk size";
512 return -EINVAL;
513 } else if (rs->raid_type->level == 1) {
514 if (value)
515 DMERR("Ignoring chunk size parameter for RAID 1");
516 value = 0;
517 } else if (!is_power_of_2(value)) {
518 rs->ti->error = "Chunk size must be a power of 2";
519 return -EINVAL;
520 } else if (value < 8) {
521 rs->ti->error = "Chunk size value is too small";
522 return -EINVAL;
525 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
526 argv++;
527 num_raid_params--;
530 * We set each individual device as In_sync with a completed
531 * 'recovery_offset'. If there has been a device failure or
532 * replacement then one of the following cases applies:
534 * 1) User specifies 'rebuild'.
535 * - Device is reset when param is read.
536 * 2) A new device is supplied.
537 * - No matching superblock found, resets device.
538 * 3) Device failure was transient and returns on reload.
539 * - Failure noticed, resets device for bitmap replay.
540 * 4) Device hadn't completed recovery after previous failure.
541 * - Superblock is read and overrides recovery_offset.
543 * What is found in the superblocks of the devices is always
544 * authoritative, unless 'rebuild' or '[no]sync' was specified.
546 for (i = 0; i < rs->md.raid_disks; i++) {
547 set_bit(In_sync, &rs->dev[i].rdev.flags);
548 rs->dev[i].rdev.recovery_offset = MaxSector;
552 * Second, parse the unordered optional arguments
554 for (i = 0; i < num_raid_params; i++) {
555 if (!strcasecmp(argv[i], "nosync")) {
556 rs->md.recovery_cp = MaxSector;
557 rs->ctr_flags |= CTR_FLAG_NOSYNC;
558 continue;
560 if (!strcasecmp(argv[i], "sync")) {
561 rs->md.recovery_cp = 0;
562 rs->ctr_flags |= CTR_FLAG_SYNC;
563 continue;
566 /* The rest of the optional arguments come in key/value pairs */
567 if ((i + 1) >= num_raid_params) {
568 rs->ti->error = "Wrong number of raid parameters given";
569 return -EINVAL;
572 key = argv[i++];
574 /* Parameters that take a string value are checked here. */
575 if (!strcasecmp(key, "raid10_format")) {
576 if (rs->raid_type->level != 10) {
577 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
578 return -EINVAL;
580 if (strcmp("near", argv[i]) &&
581 strcmp("far", argv[i]) &&
582 strcmp("offset", argv[i])) {
583 rs->ti->error = "Invalid 'raid10_format' value given";
584 return -EINVAL;
586 raid10_format = argv[i];
587 rs->ctr_flags |= CTR_FLAG_RAID10_FORMAT;
588 continue;
591 if (kstrtoul(argv[i], 10, &value) < 0) {
592 rs->ti->error = "Bad numerical argument given in raid params";
593 return -EINVAL;
596 /* Parameters that take a numeric value are checked here */
597 if (!strcasecmp(key, "rebuild")) {
598 if (value >= rs->md.raid_disks) {
599 rs->ti->error = "Invalid rebuild index given";
600 return -EINVAL;
602 clear_bit(In_sync, &rs->dev[value].rdev.flags);
603 rs->dev[value].rdev.recovery_offset = 0;
604 rs->ctr_flags |= CTR_FLAG_REBUILD;
605 } else if (!strcasecmp(key, "write_mostly")) {
606 if (rs->raid_type->level != 1) {
607 rs->ti->error = "write_mostly option is only valid for RAID1";
608 return -EINVAL;
610 if (value >= rs->md.raid_disks) {
611 rs->ti->error = "Invalid write_mostly drive index given";
612 return -EINVAL;
614 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
615 } else if (!strcasecmp(key, "max_write_behind")) {
616 if (rs->raid_type->level != 1) {
617 rs->ti->error = "max_write_behind option is only valid for RAID1";
618 return -EINVAL;
620 rs->ctr_flags |= CTR_FLAG_MAX_WRITE_BEHIND;
623 * In device-mapper, we specify things in sectors, but
624 * MD records this value in kB
626 value /= 2;
627 if (value > COUNTER_MAX) {
628 rs->ti->error = "Max write-behind limit out of range";
629 return -EINVAL;
631 rs->md.bitmap_info.max_write_behind = value;
632 } else if (!strcasecmp(key, "daemon_sleep")) {
633 rs->ctr_flags |= CTR_FLAG_DAEMON_SLEEP;
634 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
635 rs->ti->error = "daemon sleep period out of range";
636 return -EINVAL;
638 rs->md.bitmap_info.daemon_sleep = value;
639 } else if (!strcasecmp(key, "stripe_cache")) {
640 rs->ctr_flags |= CTR_FLAG_STRIPE_CACHE;
643 * In device-mapper, we specify things in sectors, but
644 * MD records this value in kB
646 value /= 2;
648 if ((rs->raid_type->level != 5) &&
649 (rs->raid_type->level != 6)) {
650 rs->ti->error = "Inappropriate argument: stripe_cache";
651 return -EINVAL;
653 if (raid5_set_cache_size(&rs->md, (int)value)) {
654 rs->ti->error = "Bad stripe_cache size";
655 return -EINVAL;
657 } else if (!strcasecmp(key, "min_recovery_rate")) {
658 rs->ctr_flags |= CTR_FLAG_MIN_RECOVERY_RATE;
659 if (value > INT_MAX) {
660 rs->ti->error = "min_recovery_rate out of range";
661 return -EINVAL;
663 rs->md.sync_speed_min = (int)value;
664 } else if (!strcasecmp(key, "max_recovery_rate")) {
665 rs->ctr_flags |= CTR_FLAG_MAX_RECOVERY_RATE;
666 if (value > INT_MAX) {
667 rs->ti->error = "max_recovery_rate out of range";
668 return -EINVAL;
670 rs->md.sync_speed_max = (int)value;
671 } else if (!strcasecmp(key, "region_size")) {
672 rs->ctr_flags |= CTR_FLAG_REGION_SIZE;
673 region_size = value;
674 } else if (!strcasecmp(key, "raid10_copies") &&
675 (rs->raid_type->level == 10)) {
676 if ((value < 2) || (value > 0xFF)) {
677 rs->ti->error = "Bad value for 'raid10_copies'";
678 return -EINVAL;
680 rs->ctr_flags |= CTR_FLAG_RAID10_COPIES;
681 raid10_copies = value;
682 } else {
683 DMERR("Unable to parse RAID parameter: %s", key);
684 rs->ti->error = "Unable to parse RAID parameters";
685 return -EINVAL;
689 if (validate_region_size(rs, region_size))
690 return -EINVAL;
692 if (rs->md.chunk_sectors)
693 max_io_len = rs->md.chunk_sectors;
694 else
695 max_io_len = region_size;
697 if (dm_set_target_max_io_len(rs->ti, max_io_len))
698 return -EINVAL;
700 if (rs->raid_type->level == 10) {
701 if (raid10_copies > rs->md.raid_disks) {
702 rs->ti->error = "Not enough devices to satisfy specification";
703 return -EINVAL;
707 * If the format is not "near", we only support
708 * two copies at the moment.
710 if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
711 rs->ti->error = "Too many copies for given RAID10 format.";
712 return -EINVAL;
715 /* (Len * #mirrors) / #devices */
716 sectors_per_dev = rs->ti->len * raid10_copies;
717 sector_div(sectors_per_dev, rs->md.raid_disks);
719 rs->md.layout = raid10_format_to_md_layout(raid10_format,
720 raid10_copies);
721 rs->md.new_layout = rs->md.layout;
722 } else if ((!rs->raid_type->level || rs->raid_type->level > 1) &&
723 sector_div(sectors_per_dev,
724 (rs->md.raid_disks - rs->raid_type->parity_devs))) {
725 rs->ti->error = "Target length not divisible by number of data devices";
726 return -EINVAL;
728 rs->md.dev_sectors = sectors_per_dev;
730 /* Assume there are no metadata devices until the drives are parsed */
731 rs->md.persistent = 0;
732 rs->md.external = 1;
734 return 0;
737 static void do_table_event(struct work_struct *ws)
739 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
741 dm_table_event(rs->ti->table);
744 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
746 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
748 return mddev_congested(&rs->md, bits);
752 * This structure is never routinely used by userspace, unlike md superblocks.
753 * Devices with this superblock should only ever be accessed via device-mapper.
755 #define DM_RAID_MAGIC 0x64526D44
756 struct dm_raid_superblock {
757 __le32 magic; /* "DmRd" */
758 __le32 features; /* Used to indicate possible future changes */
760 __le32 num_devices; /* Number of devices in this array. (Max 64) */
761 __le32 array_position; /* The position of this drive in the array */
763 __le64 events; /* Incremented by md when superblock updated */
764 __le64 failed_devices; /* Bit field of devices to indicate failures */
767 * This offset tracks the progress of the repair or replacement of
768 * an individual drive.
770 __le64 disk_recovery_offset;
773 * This offset tracks the progress of the initial array
774 * synchronisation/parity calculation.
776 __le64 array_resync_offset;
779 * RAID characteristics
781 __le32 level;
782 __le32 layout;
783 __le32 stripe_sectors;
785 /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
786 } __packed;
788 static int read_disk_sb(struct md_rdev *rdev, int size)
790 BUG_ON(!rdev->sb_page);
792 if (rdev->sb_loaded)
793 return 0;
795 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
796 DMERR("Failed to read superblock of device at position %d",
797 rdev->raid_disk);
798 md_error(rdev->mddev, rdev);
799 return -EINVAL;
802 rdev->sb_loaded = 1;
804 return 0;
807 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
809 int i;
810 uint64_t failed_devices;
811 struct dm_raid_superblock *sb;
812 struct raid_set *rs = container_of(mddev, struct raid_set, md);
814 sb = page_address(rdev->sb_page);
815 failed_devices = le64_to_cpu(sb->failed_devices);
817 for (i = 0; i < mddev->raid_disks; i++)
818 if (!rs->dev[i].data_dev ||
819 test_bit(Faulty, &(rs->dev[i].rdev.flags)))
820 failed_devices |= (1ULL << i);
822 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
824 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
825 sb->features = cpu_to_le32(0); /* No features yet */
827 sb->num_devices = cpu_to_le32(mddev->raid_disks);
828 sb->array_position = cpu_to_le32(rdev->raid_disk);
830 sb->events = cpu_to_le64(mddev->events);
831 sb->failed_devices = cpu_to_le64(failed_devices);
833 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
834 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
836 sb->level = cpu_to_le32(mddev->level);
837 sb->layout = cpu_to_le32(mddev->layout);
838 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
842 * super_load
844 * This function creates a superblock if one is not found on the device
845 * and will decide which superblock to use if there's a choice.
847 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
849 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
851 int ret;
852 struct dm_raid_superblock *sb;
853 struct dm_raid_superblock *refsb;
854 uint64_t events_sb, events_refsb;
856 rdev->sb_start = 0;
857 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
858 if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
859 DMERR("superblock size of a logical block is no longer valid");
860 return -EINVAL;
863 ret = read_disk_sb(rdev, rdev->sb_size);
864 if (ret)
865 return ret;
867 sb = page_address(rdev->sb_page);
870 * Two cases that we want to write new superblocks and rebuild:
871 * 1) New device (no matching magic number)
872 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
874 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
875 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
876 super_sync(rdev->mddev, rdev);
878 set_bit(FirstUse, &rdev->flags);
880 /* Force writing of superblocks to disk */
881 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
883 /* Any superblock is better than none, choose that if given */
884 return refdev ? 0 : 1;
887 if (!refdev)
888 return 1;
890 events_sb = le64_to_cpu(sb->events);
892 refsb = page_address(refdev->sb_page);
893 events_refsb = le64_to_cpu(refsb->events);
895 return (events_sb > events_refsb) ? 1 : 0;
898 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
900 int role;
901 struct raid_set *rs = container_of(mddev, struct raid_set, md);
902 uint64_t events_sb;
903 uint64_t failed_devices;
904 struct dm_raid_superblock *sb;
905 uint32_t new_devs = 0;
906 uint32_t rebuilds = 0;
907 struct md_rdev *r;
908 struct dm_raid_superblock *sb2;
910 sb = page_address(rdev->sb_page);
911 events_sb = le64_to_cpu(sb->events);
912 failed_devices = le64_to_cpu(sb->failed_devices);
915 * Initialise to 1 if this is a new superblock.
917 mddev->events = events_sb ? : 1;
920 * Reshaping is not currently allowed
922 if (le32_to_cpu(sb->level) != mddev->level) {
923 DMERR("Reshaping arrays not yet supported. (RAID level change)");
924 return -EINVAL;
926 if (le32_to_cpu(sb->layout) != mddev->layout) {
927 DMERR("Reshaping arrays not yet supported. (RAID layout change)");
928 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
929 DMERR(" Old layout: %s w/ %d copies",
930 raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
931 raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
932 DMERR(" New layout: %s w/ %d copies",
933 raid10_md_layout_to_format(mddev->layout),
934 raid10_md_layout_to_copies(mddev->layout));
935 return -EINVAL;
937 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
938 DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
939 return -EINVAL;
942 /* We can only change the number of devices in RAID1 right now */
943 if ((rs->raid_type->level != 1) &&
944 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
945 DMERR("Reshaping arrays not yet supported. (device count change)");
946 return -EINVAL;
949 if (!(rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)))
950 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
953 * During load, we set FirstUse if a new superblock was written.
954 * There are two reasons we might not have a superblock:
955 * 1) The array is brand new - in which case, all of the
956 * devices must have their In_sync bit set. Also,
957 * recovery_cp must be 0, unless forced.
958 * 2) This is a new device being added to an old array
959 * and the new device needs to be rebuilt - in which
960 * case the In_sync bit will /not/ be set and
961 * recovery_cp must be MaxSector.
963 rdev_for_each(r, mddev) {
964 if (!test_bit(In_sync, &r->flags)) {
965 DMINFO("Device %d specified for rebuild: "
966 "Clearing superblock", r->raid_disk);
967 rebuilds++;
968 } else if (test_bit(FirstUse, &r->flags))
969 new_devs++;
972 if (!rebuilds) {
973 if (new_devs == mddev->raid_disks) {
974 DMINFO("Superblocks created for new array");
975 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
976 } else if (new_devs) {
977 DMERR("New device injected "
978 "into existing array without 'rebuild' "
979 "parameter specified");
980 return -EINVAL;
982 } else if (new_devs) {
983 DMERR("'rebuild' devices cannot be "
984 "injected into an array with other first-time devices");
985 return -EINVAL;
986 } else if (mddev->recovery_cp != MaxSector) {
987 DMERR("'rebuild' specified while array is not in-sync");
988 return -EINVAL;
992 * Now we set the Faulty bit for those devices that are
993 * recorded in the superblock as failed.
995 rdev_for_each(r, mddev) {
996 if (!r->sb_page)
997 continue;
998 sb2 = page_address(r->sb_page);
999 sb2->failed_devices = 0;
1002 * Check for any device re-ordering.
1004 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
1005 role = le32_to_cpu(sb2->array_position);
1006 if (role != r->raid_disk) {
1007 if (rs->raid_type->level != 1) {
1008 rs->ti->error = "Cannot change device "
1009 "positions in RAID array";
1010 return -EINVAL;
1012 DMINFO("RAID1 device #%d now at position #%d",
1013 role, r->raid_disk);
1017 * Partial recovery is performed on
1018 * returning failed devices.
1020 if (failed_devices & (1 << role))
1021 set_bit(Faulty, &r->flags);
1025 return 0;
1028 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
1030 struct mddev *mddev = &rs->md;
1031 struct dm_raid_superblock *sb = page_address(rdev->sb_page);
1034 * If mddev->events is not set, we know we have not yet initialized
1035 * the array.
1037 if (!mddev->events && super_init_validation(mddev, rdev))
1038 return -EINVAL;
1040 if (le32_to_cpu(sb->features)) {
1041 rs->ti->error = "Unable to assemble array: No feature flags supported yet";
1042 return -EINVAL;
1045 /* Enable bitmap creation for RAID levels != 0 */
1046 mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
1047 rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
1049 if (!test_bit(FirstUse, &rdev->flags)) {
1050 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
1051 if (rdev->recovery_offset != MaxSector)
1052 clear_bit(In_sync, &rdev->flags);
1056 * If a device comes back, set it as not In_sync and no longer faulty.
1058 if (test_bit(Faulty, &rdev->flags)) {
1059 clear_bit(Faulty, &rdev->flags);
1060 clear_bit(In_sync, &rdev->flags);
1061 rdev->saved_raid_disk = rdev->raid_disk;
1062 rdev->recovery_offset = 0;
1065 clear_bit(FirstUse, &rdev->flags);
1067 return 0;
1071 * Analyse superblocks and select the freshest.
1073 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1075 int ret;
1076 struct raid_dev *dev;
1077 struct md_rdev *rdev, *tmp, *freshest;
1078 struct mddev *mddev = &rs->md;
1080 freshest = NULL;
1081 rdev_for_each_safe(rdev, tmp, mddev) {
1083 * Skipping super_load due to CTR_FLAG_SYNC will cause
1084 * the array to undergo initialization again as
1085 * though it were new. This is the intended effect
1086 * of the "sync" directive.
1088 * When reshaping capability is added, we must ensure
1089 * that the "sync" directive is disallowed during the
1090 * reshape.
1092 rdev->sectors = to_sector(i_size_read(rdev->bdev->bd_inode));
1094 if (rs->ctr_flags & CTR_FLAG_SYNC)
1095 continue;
1097 if (!rdev->meta_bdev)
1098 continue;
1100 ret = super_load(rdev, freshest);
1102 switch (ret) {
1103 case 1:
1104 freshest = rdev;
1105 break;
1106 case 0:
1107 break;
1108 default:
1109 dev = container_of(rdev, struct raid_dev, rdev);
1110 if (dev->meta_dev)
1111 dm_put_device(ti, dev->meta_dev);
1113 dev->meta_dev = NULL;
1114 rdev->meta_bdev = NULL;
1116 if (rdev->sb_page)
1117 put_page(rdev->sb_page);
1119 rdev->sb_page = NULL;
1121 rdev->sb_loaded = 0;
1124 * We might be able to salvage the data device
1125 * even though the meta device has failed. For
1126 * now, we behave as though '- -' had been
1127 * set for this device in the table.
1129 if (dev->data_dev)
1130 dm_put_device(ti, dev->data_dev);
1132 dev->data_dev = NULL;
1133 rdev->bdev = NULL;
1135 list_del(&rdev->same_set);
1139 if (!freshest)
1140 return 0;
1142 if (validate_raid_redundancy(rs)) {
1143 rs->ti->error = "Insufficient redundancy to activate array";
1144 return -EINVAL;
1148 * Validation of the freshest device provides the source of
1149 * validation for the remaining devices.
1151 ti->error = "Unable to assemble array: Invalid superblocks";
1152 if (super_validate(rs, freshest))
1153 return -EINVAL;
1155 rdev_for_each(rdev, mddev)
1156 if ((rdev != freshest) && super_validate(rs, rdev))
1157 return -EINVAL;
1159 return 0;
1163 * Enable/disable discard support on RAID set depending on
1164 * RAID level and discard properties of underlying RAID members.
1166 static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
1168 int i;
1169 bool raid456;
1171 /* Assume discards not supported until after checks below. */
1172 ti->discards_supported = false;
1174 /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
1175 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
1177 for (i = 0; i < rs->md.raid_disks; i++) {
1178 struct request_queue *q;
1180 if (!rs->dev[i].rdev.bdev)
1181 continue;
1183 q = bdev_get_queue(rs->dev[i].rdev.bdev);
1184 if (!q || !blk_queue_discard(q))
1185 return;
1187 if (raid456) {
1188 if (!q->limits.discard_zeroes_data)
1189 return;
1190 if (!devices_handle_discard_safely) {
1191 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
1192 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
1193 return;
1198 /* All RAID members properly support discards */
1199 ti->discards_supported = true;
1202 * RAID1 and RAID10 personalities require bio splitting,
1203 * RAID0/4/5/6 don't and process large discard bios properly.
1205 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
1206 ti->num_discard_bios = 1;
1210 * Construct a RAID4/5/6 mapping:
1211 * Args:
1212 * <raid_type> <#raid_params> <raid_params> \
1213 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
1215 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
1216 * details on possible <raid_params>.
1218 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
1220 int ret;
1221 struct raid_type *rt;
1222 unsigned long num_raid_params, num_raid_devs;
1223 struct raid_set *rs = NULL;
1225 /* Must have at least <raid_type> <#raid_params> */
1226 if (argc < 2) {
1227 ti->error = "Too few arguments";
1228 return -EINVAL;
1231 /* raid type */
1232 rt = get_raid_type(argv[0]);
1233 if (!rt) {
1234 ti->error = "Unrecognised raid_type";
1235 return -EINVAL;
1237 argc--;
1238 argv++;
1240 /* number of RAID parameters */
1241 if (kstrtoul(argv[0], 10, &num_raid_params) < 0) {
1242 ti->error = "Cannot understand number of RAID parameters";
1243 return -EINVAL;
1245 argc--;
1246 argv++;
1248 /* Skip over RAID params for now and find out # of devices */
1249 if (num_raid_params >= argc) {
1250 ti->error = "Arguments do not agree with counts given";
1251 return -EINVAL;
1254 if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
1255 (num_raid_devs > MAX_RAID_DEVICES)) {
1256 ti->error = "Cannot understand number of raid devices";
1257 return -EINVAL;
1260 argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
1261 if (argc != (num_raid_devs * 2)) {
1262 ti->error = "Supplied RAID devices does not match the count given";
1263 return -EINVAL;
1266 rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
1267 if (IS_ERR(rs))
1268 return PTR_ERR(rs);
1270 ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
1271 if (ret)
1272 goto bad;
1274 argv += num_raid_params + 1;
1276 ret = dev_parms(rs, argv);
1277 if (ret)
1278 goto bad;
1280 rs->md.sync_super = super_sync;
1281 ret = analyse_superblocks(ti, rs);
1282 if (ret)
1283 goto bad;
1285 INIT_WORK(&rs->md.event_work, do_table_event);
1286 ti->private = rs;
1287 ti->num_flush_bios = 1;
1290 * Disable/enable discard support on RAID set.
1292 configure_discard_support(ti, rs);
1294 /* Has to be held on running the array */
1295 mddev_lock_nointr(&rs->md);
1296 ret = md_run(&rs->md);
1297 rs->md.in_sync = 0; /* Assume already marked dirty */
1298 mddev_unlock(&rs->md);
1300 if (ret) {
1301 ti->error = "Fail to run raid array";
1302 goto bad;
1305 if (ti->len != rs->md.array_sectors) {
1306 ti->error = "Array size does not match requested target length";
1307 ret = -EINVAL;
1308 goto size_mismatch;
1310 rs->callbacks.congested_fn = raid_is_congested;
1311 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
1313 mddev_suspend(&rs->md);
1314 return 0;
1316 size_mismatch:
1317 md_stop(&rs->md);
1318 bad:
1319 context_free(rs);
1321 return ret;
1324 static void raid_dtr(struct dm_target *ti)
1326 struct raid_set *rs = ti->private;
1328 list_del_init(&rs->callbacks.list);
1329 md_stop(&rs->md);
1330 context_free(rs);
1333 static int raid_map(struct dm_target *ti, struct bio *bio)
1335 struct raid_set *rs = ti->private;
1336 struct mddev *mddev = &rs->md;
1338 mddev->pers->make_request(mddev, bio);
1340 return DM_MAPIO_SUBMITTED;
1343 static const char *decipher_sync_action(struct mddev *mddev)
1345 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
1346 return "frozen";
1348 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1349 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
1350 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1351 return "reshape";
1353 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1354 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1355 return "resync";
1356 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1357 return "check";
1358 return "repair";
1361 if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
1362 return "recover";
1365 return "idle";
1368 static void raid_status(struct dm_target *ti, status_type_t type,
1369 unsigned status_flags, char *result, unsigned maxlen)
1371 struct raid_set *rs = ti->private;
1372 unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
1373 unsigned sz = 0;
1374 int i, array_in_sync = 0;
1375 sector_t sync;
1377 switch (type) {
1378 case STATUSTYPE_INFO:
1379 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1381 if (rs->raid_type->level) {
1382 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1383 sync = rs->md.curr_resync_completed;
1384 else
1385 sync = rs->md.recovery_cp;
1387 if (sync >= rs->md.resync_max_sectors) {
1389 * Sync complete.
1391 array_in_sync = 1;
1392 sync = rs->md.resync_max_sectors;
1393 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
1395 * If "check" or "repair" is occurring, the array has
1396 * undergone and initial sync and the health characters
1397 * should not be 'a' anymore.
1399 array_in_sync = 1;
1400 } else {
1402 * The array may be doing an initial sync, or it may
1403 * be rebuilding individual components. If all the
1404 * devices are In_sync, then it is the array that is
1405 * being initialized.
1407 for (i = 0; i < rs->md.raid_disks; i++)
1408 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1409 array_in_sync = 1;
1411 } else {
1412 /* RAID0 */
1413 array_in_sync = 1;
1414 sync = rs->md.resync_max_sectors;
1418 * Status characters:
1419 * 'D' = Dead/Failed device
1420 * 'a' = Alive but not in-sync
1421 * 'A' = Alive and in-sync
1423 for (i = 0; i < rs->md.raid_disks; i++) {
1424 if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1425 DMEMIT("D");
1426 else if (!array_in_sync ||
1427 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1428 DMEMIT("a");
1429 else
1430 DMEMIT("A");
1434 * In-sync ratio:
1435 * The in-sync ratio shows the progress of:
1436 * - Initializing the array
1437 * - Rebuilding a subset of devices of the array
1438 * The user can distinguish between the two by referring
1439 * to the status characters.
1441 DMEMIT(" %llu/%llu",
1442 (unsigned long long) sync,
1443 (unsigned long long) rs->md.resync_max_sectors);
1446 * Sync action:
1447 * See Documentation/device-mapper/dm-raid.c for
1448 * information on each of these states.
1450 DMEMIT(" %s", decipher_sync_action(&rs->md));
1453 * resync_mismatches/mismatch_cnt
1454 * This field shows the number of discrepancies found when
1455 * performing a "check" of the array.
1457 DMEMIT(" %llu",
1458 (strcmp(rs->md.last_sync_action, "check")) ? 0 :
1459 (unsigned long long)
1460 atomic64_read(&rs->md.resync_mismatches));
1461 break;
1462 case STATUSTYPE_TABLE:
1463 /* The string you would use to construct this array */
1464 for (i = 0; i < rs->md.raid_disks; i++) {
1465 if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
1466 rs->dev[i].data_dev &&
1467 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1468 raid_param_cnt += 2; /* for rebuilds */
1469 if (rs->dev[i].data_dev &&
1470 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1471 raid_param_cnt += 2;
1474 raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2);
1475 if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))
1476 raid_param_cnt--;
1478 DMEMIT("%s %u %u", rs->raid_type->name,
1479 raid_param_cnt, rs->md.chunk_sectors);
1481 if ((rs->ctr_flags & CTR_FLAG_SYNC) &&
1482 (rs->md.recovery_cp == MaxSector))
1483 DMEMIT(" sync");
1484 if (rs->ctr_flags & CTR_FLAG_NOSYNC)
1485 DMEMIT(" nosync");
1487 for (i = 0; i < rs->md.raid_disks; i++)
1488 if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
1489 rs->dev[i].data_dev &&
1490 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1491 DMEMIT(" rebuild %u", i);
1493 if (rs->ctr_flags & CTR_FLAG_DAEMON_SLEEP)
1494 DMEMIT(" daemon_sleep %lu",
1495 rs->md.bitmap_info.daemon_sleep);
1497 if (rs->ctr_flags & CTR_FLAG_MIN_RECOVERY_RATE)
1498 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1500 if (rs->ctr_flags & CTR_FLAG_MAX_RECOVERY_RATE)
1501 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1503 for (i = 0; i < rs->md.raid_disks; i++)
1504 if (rs->dev[i].data_dev &&
1505 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1506 DMEMIT(" write_mostly %u", i);
1508 if (rs->ctr_flags & CTR_FLAG_MAX_WRITE_BEHIND)
1509 DMEMIT(" max_write_behind %lu",
1510 rs->md.bitmap_info.max_write_behind);
1512 if (rs->ctr_flags & CTR_FLAG_STRIPE_CACHE) {
1513 struct r5conf *conf = rs->md.private;
1515 /* convert from kiB to sectors */
1516 DMEMIT(" stripe_cache %d",
1517 conf ? conf->max_nr_stripes * 2 : 0);
1520 if (rs->ctr_flags & CTR_FLAG_REGION_SIZE)
1521 DMEMIT(" region_size %lu",
1522 rs->md.bitmap_info.chunksize >> 9);
1524 if (rs->ctr_flags & CTR_FLAG_RAID10_COPIES)
1525 DMEMIT(" raid10_copies %u",
1526 raid10_md_layout_to_copies(rs->md.layout));
1528 if (rs->ctr_flags & CTR_FLAG_RAID10_FORMAT)
1529 DMEMIT(" raid10_format %s",
1530 raid10_md_layout_to_format(rs->md.layout));
1532 DMEMIT(" %d", rs->md.raid_disks);
1533 for (i = 0; i < rs->md.raid_disks; i++) {
1534 if (rs->dev[i].meta_dev)
1535 DMEMIT(" %s", rs->dev[i].meta_dev->name);
1536 else
1537 DMEMIT(" -");
1539 if (rs->dev[i].data_dev)
1540 DMEMIT(" %s", rs->dev[i].data_dev->name);
1541 else
1542 DMEMIT(" -");
1547 static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
1549 struct raid_set *rs = ti->private;
1550 struct mddev *mddev = &rs->md;
1552 if (!strcasecmp(argv[0], "reshape")) {
1553 DMERR("Reshape not supported.");
1554 return -EINVAL;
1557 if (!mddev->pers || !mddev->pers->sync_request)
1558 return -EINVAL;
1560 if (!strcasecmp(argv[0], "frozen"))
1561 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1562 else
1563 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1565 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
1566 if (mddev->sync_thread) {
1567 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1568 md_reap_sync_thread(mddev);
1570 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1571 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1572 return -EBUSY;
1573 else if (!strcasecmp(argv[0], "resync"))
1574 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1575 else if (!strcasecmp(argv[0], "recover")) {
1576 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
1577 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1578 } else {
1579 if (!strcasecmp(argv[0], "check"))
1580 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1581 else if (!!strcasecmp(argv[0], "repair"))
1582 return -EINVAL;
1583 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1584 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1586 if (mddev->ro == 2) {
1587 /* A write to sync_action is enough to justify
1588 * canceling read-auto mode
1590 mddev->ro = 0;
1591 if (!mddev->suspended)
1592 md_wakeup_thread(mddev->sync_thread);
1594 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1595 if (!mddev->suspended)
1596 md_wakeup_thread(mddev->thread);
1598 return 0;
1601 static int raid_iterate_devices(struct dm_target *ti,
1602 iterate_devices_callout_fn fn, void *data)
1604 struct raid_set *rs = ti->private;
1605 unsigned i;
1606 int ret = 0;
1608 for (i = 0; !ret && i < rs->md.raid_disks; i++)
1609 if (rs->dev[i].data_dev)
1610 ret = fn(ti,
1611 rs->dev[i].data_dev,
1612 0, /* No offset on data devs */
1613 rs->md.dev_sectors,
1614 data);
1616 return ret;
1619 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1621 struct raid_set *rs = ti->private;
1622 unsigned chunk_size = rs->md.chunk_sectors << 9;
1623 struct r5conf *conf = rs->md.private;
1625 blk_limits_io_min(limits, chunk_size);
1626 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
1629 static void raid_presuspend(struct dm_target *ti)
1631 struct raid_set *rs = ti->private;
1633 md_stop_writes(&rs->md);
1636 static void raid_postsuspend(struct dm_target *ti)
1638 struct raid_set *rs = ti->private;
1640 mddev_suspend(&rs->md);
1643 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
1645 int i;
1646 uint64_t failed_devices, cleared_failed_devices = 0;
1647 unsigned long flags;
1648 struct dm_raid_superblock *sb;
1649 struct md_rdev *r;
1651 for (i = 0; i < rs->md.raid_disks; i++) {
1652 r = &rs->dev[i].rdev;
1653 if (test_bit(Faulty, &r->flags) && r->sb_page &&
1654 sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
1655 DMINFO("Faulty %s device #%d has readable super block."
1656 " Attempting to revive it.",
1657 rs->raid_type->name, i);
1660 * Faulty bit may be set, but sometimes the array can
1661 * be suspended before the personalities can respond
1662 * by removing the device from the array (i.e. calling
1663 * 'hot_remove_disk'). If they haven't yet removed
1664 * the failed device, its 'raid_disk' number will be
1665 * '>= 0' - meaning we must call this function
1666 * ourselves.
1668 if ((r->raid_disk >= 0) &&
1669 (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
1670 /* Failed to revive this device, try next */
1671 continue;
1673 r->raid_disk = i;
1674 r->saved_raid_disk = i;
1675 flags = r->flags;
1676 clear_bit(Faulty, &r->flags);
1677 clear_bit(WriteErrorSeen, &r->flags);
1678 clear_bit(In_sync, &r->flags);
1679 if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
1680 r->raid_disk = -1;
1681 r->saved_raid_disk = -1;
1682 r->flags = flags;
1683 } else {
1684 r->recovery_offset = 0;
1685 cleared_failed_devices |= 1 << i;
1689 if (cleared_failed_devices) {
1690 rdev_for_each(r, &rs->md) {
1691 sb = page_address(r->sb_page);
1692 failed_devices = le64_to_cpu(sb->failed_devices);
1693 failed_devices &= ~cleared_failed_devices;
1694 sb->failed_devices = cpu_to_le64(failed_devices);
1699 static void raid_resume(struct dm_target *ti)
1701 struct raid_set *rs = ti->private;
1703 if (rs->raid_type->level) {
1704 set_bit(MD_CHANGE_DEVS, &rs->md.flags);
1706 if (!rs->bitmap_loaded) {
1707 bitmap_load(&rs->md);
1708 rs->bitmap_loaded = 1;
1709 } else {
1711 * A secondary resume while the device is active.
1712 * Take this opportunity to check whether any failed
1713 * devices are reachable again.
1715 attempt_restore_of_faulty_devices(rs);
1718 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
1721 mddev_resume(&rs->md);
1724 static struct target_type raid_target = {
1725 .name = "raid",
1726 .version = {1, 8, 0},
1727 .module = THIS_MODULE,
1728 .ctr = raid_ctr,
1729 .dtr = raid_dtr,
1730 .map = raid_map,
1731 .status = raid_status,
1732 .message = raid_message,
1733 .iterate_devices = raid_iterate_devices,
1734 .io_hints = raid_io_hints,
1735 .presuspend = raid_presuspend,
1736 .postsuspend = raid_postsuspend,
1737 .resume = raid_resume,
1740 static int __init dm_raid_init(void)
1742 DMINFO("Loading target version %u.%u.%u",
1743 raid_target.version[0],
1744 raid_target.version[1],
1745 raid_target.version[2]);
1746 return dm_register_target(&raid_target);
1749 static void __exit dm_raid_exit(void)
1751 dm_unregister_target(&raid_target);
1754 module_init(dm_raid_init);
1755 module_exit(dm_raid_exit);
1757 module_param(devices_handle_discard_safely, bool, 0644);
1758 MODULE_PARM_DESC(devices_handle_discard_safely,
1759 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
1761 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1762 MODULE_ALIAS("dm-raid1");
1763 MODULE_ALIAS("dm-raid10");
1764 MODULE_ALIAS("dm-raid4");
1765 MODULE_ALIAS("dm-raid5");
1766 MODULE_ALIAS("dm-raid6");
1767 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1768 MODULE_LICENSE("GPL");