1 // SPDX-License-Identifier: GPL-2.0-or-later
3 raid0.c : Multiple Devices driver for Linux
4 Copyright (C) 1994-96 Marc ZYNGIER
5 <zyngier@ufr-info-p7.ibp.fr> or
7 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
13 #include <linux/blkdev.h>
14 #include <linux/seq_file.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <trace/events/block.h>
22 static int default_layout
= 0;
23 module_param(default_layout
, int, 0644);
25 #define UNSUPPORTED_MDDEV_FLAGS \
26 ((1L << MD_HAS_JOURNAL) | \
27 (1L << MD_JOURNAL_CLEAN) | \
28 (1L << MD_FAILFAST_SUPPORTED) |\
29 (1L << MD_HAS_PPL) | \
30 (1L << MD_HAS_MULTIPLE_PPLS))
33 * inform the user of the raid configuration
35 static void dump_zones(struct mddev
*mddev
)
38 sector_t zone_size
= 0;
39 sector_t zone_start
= 0;
40 struct r0conf
*conf
= mddev
->private;
41 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
42 pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
44 conf
->nr_strip_zones
, conf
->nr_strip_zones
==1?"":"s");
45 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
49 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
50 len
+= scnprintf(line
+len
, 200-len
, "%s%pg", k
?"/":"",
51 conf
->devlist
[j
* raid_disks
+ k
]->bdev
);
52 pr_debug("md: zone%d=[%s]\n", j
, line
);
54 zone_size
= conf
->strip_zone
[j
].zone_end
- zone_start
;
55 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
56 (unsigned long long)zone_start
>>1,
57 (unsigned long long)conf
->strip_zone
[j
].dev_start
>>1,
58 (unsigned long long)zone_size
>>1);
59 zone_start
= conf
->strip_zone
[j
].zone_end
;
63 static int create_strip_zones(struct mddev
*mddev
, struct r0conf
**private_conf
)
66 sector_t curr_zone_end
, sectors
;
67 struct md_rdev
*smallest
, *rdev1
, *rdev2
, *rdev
, **dev
;
68 struct strip_zone
*zone
;
70 struct r0conf
*conf
= kzalloc(sizeof(*conf
), GFP_KERNEL
);
71 unsigned blksize
= 512;
73 *private_conf
= ERR_PTR(-ENOMEM
);
76 rdev_for_each(rdev1
, mddev
) {
77 pr_debug("md/raid0:%s: looking at %pg\n",
82 /* round size to chunk_size */
83 sectors
= rdev1
->sectors
;
84 sector_div(sectors
, mddev
->chunk_sectors
);
85 rdev1
->sectors
= sectors
* mddev
->chunk_sectors
;
87 blksize
= max(blksize
, queue_logical_block_size(
88 rdev1
->bdev
->bd_disk
->queue
));
90 rdev_for_each(rdev2
, mddev
) {
91 pr_debug("md/raid0:%s: comparing %pg(%llu)"
95 (unsigned long long)rdev1
->sectors
,
97 (unsigned long long)rdev2
->sectors
);
99 pr_debug("md/raid0:%s: END\n",
103 if (rdev2
->sectors
== rdev1
->sectors
) {
105 * Not unique, don't count it as a new
108 pr_debug("md/raid0:%s: EQUAL\n",
113 pr_debug("md/raid0:%s: NOT EQUAL\n",
117 pr_debug("md/raid0:%s: ==> UNIQUE\n",
119 conf
->nr_strip_zones
++;
120 pr_debug("md/raid0:%s: %d zones\n",
121 mdname(mddev
), conf
->nr_strip_zones
);
124 pr_debug("md/raid0:%s: FINAL %d zones\n",
125 mdname(mddev
), conf
->nr_strip_zones
);
128 * now since we have the hard sector sizes, we can make sure
129 * chunk size is a multiple of that sector size
131 if ((mddev
->chunk_sectors
<< 9) % blksize
) {
132 pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
134 mddev
->chunk_sectors
<< 9, blksize
);
140 conf
->strip_zone
= kcalloc(conf
->nr_strip_zones
,
141 sizeof(struct strip_zone
),
143 if (!conf
->strip_zone
)
145 conf
->devlist
= kzalloc(array3_size(sizeof(struct md_rdev
*),
146 conf
->nr_strip_zones
,
152 /* The first zone must contain all devices, so here we check that
153 * there is a proper alignment of slots to devices and find them all
155 zone
= &conf
->strip_zone
[0];
160 rdev_for_each(rdev1
, mddev
) {
161 int j
= rdev1
->raid_disk
;
163 if (mddev
->level
== 10) {
164 /* taking over a raid10-n2 array */
166 rdev1
->new_raid_disk
= j
;
169 if (mddev
->level
== 1) {
170 /* taiking over a raid1 array-
171 * we have only one active disk
174 rdev1
->new_raid_disk
= j
;
178 pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
182 if (j
>= mddev
->raid_disks
) {
183 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
188 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
194 if (!smallest
|| (rdev1
->sectors
< smallest
->sectors
))
198 if (cnt
!= mddev
->raid_disks
) {
199 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
200 mdname(mddev
), cnt
, mddev
->raid_disks
);
204 zone
->zone_end
= smallest
->sectors
* cnt
;
206 curr_zone_end
= zone
->zone_end
;
208 /* now do the other zones */
209 for (i
= 1; i
< conf
->nr_strip_zones
; i
++)
213 zone
= conf
->strip_zone
+ i
;
214 dev
= conf
->devlist
+ i
* mddev
->raid_disks
;
216 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev
), i
);
217 zone
->dev_start
= smallest
->sectors
;
221 for (j
=0; j
<cnt
; j
++) {
222 rdev
= conf
->devlist
[j
];
223 if (rdev
->sectors
<= zone
->dev_start
) {
224 pr_debug("md/raid0:%s: checking %pg ... nope\n",
229 pr_debug("md/raid0:%s: checking %pg ..."
230 " contained as device %d\n",
235 if (!smallest
|| rdev
->sectors
< smallest
->sectors
) {
237 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
239 (unsigned long long)rdev
->sectors
);
244 sectors
= (smallest
->sectors
- zone
->dev_start
) * c
;
245 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
247 zone
->nb_dev
, (unsigned long long)sectors
);
249 curr_zone_end
+= sectors
;
250 zone
->zone_end
= curr_zone_end
;
252 pr_debug("md/raid0:%s: current zone start: %llu\n",
254 (unsigned long long)smallest
->sectors
);
257 if (conf
->nr_strip_zones
== 1 || conf
->strip_zone
[1].nb_dev
== 1) {
258 conf
->layout
= RAID0_ORIG_LAYOUT
;
259 } else if (mddev
->layout
== RAID0_ORIG_LAYOUT
||
260 mddev
->layout
== RAID0_ALT_MULTIZONE_LAYOUT
) {
261 conf
->layout
= mddev
->layout
;
262 } else if (default_layout
== RAID0_ORIG_LAYOUT
||
263 default_layout
== RAID0_ALT_MULTIZONE_LAYOUT
) {
264 conf
->layout
= default_layout
;
266 pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
268 pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
273 if (conf
->layout
== RAID0_ORIG_LAYOUT
) {
274 for (i
= 1; i
< conf
->nr_strip_zones
; i
++) {
275 sector_t first_sector
= conf
->strip_zone
[i
-1].zone_end
;
277 sector_div(first_sector
, mddev
->chunk_sectors
);
278 zone
= conf
->strip_zone
+ i
;
279 /* disk_shift is first disk index used in the zone */
280 zone
->disk_shift
= sector_div(first_sector
,
285 pr_debug("md/raid0:%s: done.\n", mdname(mddev
));
286 *private_conf
= conf
;
290 kfree(conf
->strip_zone
);
291 kfree(conf
->devlist
);
293 *private_conf
= ERR_PTR(err
);
297 /* Find the zone which holds a particular offset
298 * Update *sectorp to be an offset in that zone
300 static struct strip_zone
*find_zone(struct r0conf
*conf
,
304 struct strip_zone
*z
= conf
->strip_zone
;
305 sector_t sector
= *sectorp
;
307 for (i
= 0; i
< conf
->nr_strip_zones
; i
++)
308 if (sector
< z
[i
].zone_end
) {
310 *sectorp
= sector
- z
[i
-1].zone_end
;
317 * remaps the bio to the target device. we separate two flows.
318 * power 2 flow and a general flow for the sake of performance
320 static struct md_rdev
*map_sector(struct mddev
*mddev
, struct strip_zone
*zone
,
321 sector_t sector
, sector_t
*sector_offset
)
323 unsigned int sect_in_chunk
;
325 struct r0conf
*conf
= mddev
->private;
326 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
327 unsigned int chunk_sects
= mddev
->chunk_sectors
;
329 if (is_power_of_2(chunk_sects
)) {
330 int chunksect_bits
= ffz(~chunk_sects
);
331 /* find the sector offset inside the chunk */
332 sect_in_chunk
= sector
& (chunk_sects
- 1);
333 sector
>>= chunksect_bits
;
335 chunk
= *sector_offset
;
336 /* quotient is the chunk in real device*/
337 sector_div(chunk
, zone
->nb_dev
<< chunksect_bits
);
339 sect_in_chunk
= sector_div(sector
, chunk_sects
);
340 chunk
= *sector_offset
;
341 sector_div(chunk
, chunk_sects
* zone
->nb_dev
);
344 * position the bio over the real device
345 * real sector = chunk in device + starting of zone
346 * + the position in the chunk
348 *sector_offset
= (chunk
* chunk_sects
) + sect_in_chunk
;
349 return conf
->devlist
[(zone
- conf
->strip_zone
)*raid_disks
350 + sector_div(sector
, zone
->nb_dev
)];
353 static sector_t
raid0_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
)
355 sector_t array_sectors
= 0;
356 struct md_rdev
*rdev
;
358 WARN_ONCE(sectors
|| raid_disks
,
359 "%s does not support generic reshape\n", __func__
);
361 rdev_for_each(rdev
, mddev
)
362 array_sectors
+= (rdev
->sectors
&
363 ~(sector_t
)(mddev
->chunk_sectors
-1));
365 return array_sectors
;
368 static void raid0_free(struct mddev
*mddev
, void *priv
)
370 struct r0conf
*conf
= priv
;
372 kfree(conf
->strip_zone
);
373 kfree(conf
->devlist
);
377 static int raid0_set_limits(struct mddev
*mddev
)
379 struct queue_limits lim
;
382 md_init_stacking_limits(&lim
);
383 lim
.max_hw_sectors
= mddev
->chunk_sectors
;
384 lim
.max_write_zeroes_sectors
= mddev
->chunk_sectors
;
385 lim
.io_min
= mddev
->chunk_sectors
<< 9;
386 lim
.io_opt
= lim
.io_min
* mddev
->raid_disks
;
387 lim
.features
|= BLK_FEAT_ATOMIC_WRITES_STACKED
;
388 err
= mddev_stack_rdev_limits(mddev
, &lim
, MDDEV_STACK_INTEGRITY
);
390 queue_limits_cancel_update(mddev
->gendisk
->queue
);
393 return queue_limits_set(mddev
->gendisk
->queue
, &lim
);
396 static int raid0_run(struct mddev
*mddev
)
401 if (mddev
->chunk_sectors
== 0) {
402 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev
));
405 if (md_check_no_bitmap(mddev
))
408 /* if private is not null, we are here after takeover */
409 if (mddev
->private == NULL
) {
410 ret
= create_strip_zones(mddev
, &conf
);
413 mddev
->private = conf
;
415 conf
= mddev
->private;
416 if (!mddev_is_dm(mddev
)) {
417 ret
= raid0_set_limits(mddev
);
422 /* calculate array device size */
423 md_set_array_sectors(mddev
, raid0_size(mddev
, 0, 0));
425 pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
427 (unsigned long long)mddev
->array_sectors
);
431 return md_integrity_register(mddev
);
435 * Convert disk_index to the disk order in which it is read/written.
436 * For example, if we have 4 disks, they are numbered 0,1,2,3. If we
437 * write the disks starting at disk 3, then the read/write order would
438 * be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift()
439 * to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map
440 * to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in
441 * that 'output' space to understand the read/write disk ordering.
443 static int map_disk_shift(int disk_index
, int num_disks
, int disk_shift
)
445 return ((disk_index
+ num_disks
- disk_shift
) % num_disks
);
448 static void raid0_handle_discard(struct mddev
*mddev
, struct bio
*bio
)
450 struct r0conf
*conf
= mddev
->private;
451 struct strip_zone
*zone
;
452 sector_t start
= bio
->bi_iter
.bi_sector
;
454 unsigned int stripe_size
;
455 sector_t first_stripe_index
, last_stripe_index
;
456 sector_t start_disk_offset
;
457 unsigned int start_disk_index
;
458 sector_t end_disk_offset
;
459 unsigned int end_disk_index
;
461 sector_t orig_start
, orig_end
;
464 zone
= find_zone(conf
, &start
);
466 if (bio_end_sector(bio
) > zone
->zone_end
) {
467 struct bio
*split
= bio_split(bio
,
468 zone
->zone_end
- bio
->bi_iter
.bi_sector
, GFP_NOIO
,
472 bio
->bi_status
= errno_to_blk_status(PTR_ERR(split
));
476 bio_chain(split
, bio
);
477 submit_bio_noacct(bio
);
479 end
= zone
->zone_end
;
481 end
= bio_end_sector(bio
);
484 if (zone
!= conf
->strip_zone
)
485 end
= end
- zone
[-1].zone_end
;
487 /* Now start and end is the offset in zone */
488 stripe_size
= zone
->nb_dev
* mddev
->chunk_sectors
;
490 first_stripe_index
= start
;
491 sector_div(first_stripe_index
, stripe_size
);
492 last_stripe_index
= end
;
493 sector_div(last_stripe_index
, stripe_size
);
495 /* In the first zone the original and alternate layouts are the same */
496 if ((conf
->layout
== RAID0_ORIG_LAYOUT
) && (zone
!= conf
->strip_zone
)) {
497 sector_div(orig_start
, mddev
->chunk_sectors
);
498 start_disk_index
= sector_div(orig_start
, zone
->nb_dev
);
499 start_disk_index
= map_disk_shift(start_disk_index
,
502 sector_div(orig_end
, mddev
->chunk_sectors
);
503 end_disk_index
= sector_div(orig_end
, zone
->nb_dev
);
504 end_disk_index
= map_disk_shift(end_disk_index
,
505 zone
->nb_dev
, zone
->disk_shift
);
507 start_disk_index
= (int)(start
- first_stripe_index
* stripe_size
) /
508 mddev
->chunk_sectors
;
509 end_disk_index
= (int)(end
- last_stripe_index
* stripe_size
) /
510 mddev
->chunk_sectors
;
512 start_disk_offset
= ((int)(start
- first_stripe_index
* stripe_size
) %
513 mddev
->chunk_sectors
) +
514 first_stripe_index
* mddev
->chunk_sectors
;
515 end_disk_offset
= ((int)(end
- last_stripe_index
* stripe_size
) %
516 mddev
->chunk_sectors
) +
517 last_stripe_index
* mddev
->chunk_sectors
;
519 for (disk
= 0; disk
< zone
->nb_dev
; disk
++) {
520 sector_t dev_start
, dev_end
;
521 struct md_rdev
*rdev
;
524 compare_disk
= map_disk_shift(disk
, zone
->nb_dev
,
527 if (compare_disk
< start_disk_index
)
528 dev_start
= (first_stripe_index
+ 1) *
529 mddev
->chunk_sectors
;
530 else if (compare_disk
> start_disk_index
)
531 dev_start
= first_stripe_index
* mddev
->chunk_sectors
;
533 dev_start
= start_disk_offset
;
535 if (compare_disk
< end_disk_index
)
536 dev_end
= (last_stripe_index
+ 1) * mddev
->chunk_sectors
;
537 else if (compare_disk
> end_disk_index
)
538 dev_end
= last_stripe_index
* mddev
->chunk_sectors
;
540 dev_end
= end_disk_offset
;
542 if (dev_end
<= dev_start
)
545 rdev
= conf
->devlist
[(zone
- conf
->strip_zone
) *
546 conf
->strip_zone
[0].nb_dev
+ disk
];
547 md_submit_discard_bio(mddev
, rdev
, bio
,
548 dev_start
+ zone
->dev_start
+ rdev
->data_offset
,
549 dev_end
- dev_start
);
554 static void raid0_map_submit_bio(struct mddev
*mddev
, struct bio
*bio
)
556 struct r0conf
*conf
= mddev
->private;
557 struct strip_zone
*zone
;
558 struct md_rdev
*tmp_dev
;
559 sector_t bio_sector
= bio
->bi_iter
.bi_sector
;
560 sector_t sector
= bio_sector
;
562 md_account_bio(mddev
, &bio
);
564 zone
= find_zone(mddev
->private, §or
);
565 switch (conf
->layout
) {
566 case RAID0_ORIG_LAYOUT
:
567 tmp_dev
= map_sector(mddev
, zone
, bio_sector
, §or
);
569 case RAID0_ALT_MULTIZONE_LAYOUT
:
570 tmp_dev
= map_sector(mddev
, zone
, sector
, §or
);
573 WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev
));
578 if (unlikely(is_rdev_broken(tmp_dev
))) {
580 md_error(mddev
, tmp_dev
);
584 bio_set_dev(bio
, tmp_dev
->bdev
);
585 bio
->bi_iter
.bi_sector
= sector
+ zone
->dev_start
+
586 tmp_dev
->data_offset
;
587 mddev_trace_remap(mddev
, bio
, bio_sector
);
588 mddev_check_write_zeroes(mddev
, bio
);
589 submit_bio_noacct(bio
);
592 static bool raid0_make_request(struct mddev
*mddev
, struct bio
*bio
)
595 unsigned chunk_sects
;
598 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
)
599 && md_flush_request(mddev
, bio
))
602 if (unlikely((bio_op(bio
) == REQ_OP_DISCARD
))) {
603 raid0_handle_discard(mddev
, bio
);
607 sector
= bio
->bi_iter
.bi_sector
;
608 chunk_sects
= mddev
->chunk_sectors
;
610 sectors
= chunk_sects
-
611 (likely(is_power_of_2(chunk_sects
))
612 ? (sector
& (chunk_sects
-1))
613 : sector_div(sector
, chunk_sects
));
615 if (sectors
< bio_sectors(bio
)) {
616 struct bio
*split
= bio_split(bio
, sectors
, GFP_NOIO
,
620 bio
->bi_status
= errno_to_blk_status(PTR_ERR(split
));
624 bio_chain(split
, bio
);
625 raid0_map_submit_bio(mddev
, bio
);
629 raid0_map_submit_bio(mddev
, bio
);
633 static void raid0_status(struct seq_file
*seq
, struct mddev
*mddev
)
635 seq_printf(seq
, " %dk chunks", mddev
->chunk_sectors
/ 2);
639 static void raid0_error(struct mddev
*mddev
, struct md_rdev
*rdev
)
641 if (!test_and_set_bit(MD_BROKEN
, &mddev
->flags
)) {
642 char *md_name
= mdname(mddev
);
644 pr_crit("md/raid0%s: Disk failure on %pg detected, failing array.\n",
645 md_name
, rdev
->bdev
);
649 static void *raid0_takeover_raid45(struct mddev
*mddev
)
651 struct md_rdev
*rdev
;
652 struct r0conf
*priv_conf
;
654 if (mddev
->degraded
!= 1) {
655 pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
658 return ERR_PTR(-EINVAL
);
661 rdev_for_each(rdev
, mddev
) {
662 /* check slot number for a disk */
663 if (rdev
->raid_disk
== mddev
->raid_disks
-1) {
664 pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
666 return ERR_PTR(-EINVAL
);
668 rdev
->sectors
= mddev
->dev_sectors
;
671 /* Set new parameters */
672 mddev
->new_level
= 0;
673 mddev
->new_layout
= 0;
674 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
676 mddev
->delta_disks
= -1;
677 /* make sure it will be not marked as dirty */
678 mddev
->recovery_cp
= MaxSector
;
679 mddev_clear_unsupported_flags(mddev
, UNSUPPORTED_MDDEV_FLAGS
);
681 create_strip_zones(mddev
, &priv_conf
);
686 static void *raid0_takeover_raid10(struct mddev
*mddev
)
688 struct r0conf
*priv_conf
;
691 * - far_copies must be 1
692 * - near_copies must be 2
693 * - disks number must be even
694 * - all mirrors must be already degraded
696 if (mddev
->layout
!= ((1 << 8) + 2)) {
697 pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
700 return ERR_PTR(-EINVAL
);
702 if (mddev
->raid_disks
& 1) {
703 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
705 return ERR_PTR(-EINVAL
);
707 if (mddev
->degraded
!= (mddev
->raid_disks
>>1)) {
708 pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
710 return ERR_PTR(-EINVAL
);
713 /* Set new parameters */
714 mddev
->new_level
= 0;
715 mddev
->new_layout
= 0;
716 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
717 mddev
->delta_disks
= - mddev
->raid_disks
/ 2;
718 mddev
->raid_disks
+= mddev
->delta_disks
;
720 /* make sure it will be not marked as dirty */
721 mddev
->recovery_cp
= MaxSector
;
722 mddev_clear_unsupported_flags(mddev
, UNSUPPORTED_MDDEV_FLAGS
);
724 create_strip_zones(mddev
, &priv_conf
);
728 static void *raid0_takeover_raid1(struct mddev
*mddev
)
730 struct r0conf
*priv_conf
;
734 * - (N - 1) mirror drives must be already faulty
736 if ((mddev
->raid_disks
- 1) != mddev
->degraded
) {
737 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
739 return ERR_PTR(-EINVAL
);
743 * a raid1 doesn't have the notion of chunk size, so
744 * figure out the largest suitable size we can use.
746 chunksect
= 64 * 2; /* 64K by default */
748 /* The array must be an exact multiple of chunksize */
749 while (chunksect
&& (mddev
->array_sectors
& (chunksect
- 1)))
752 if ((chunksect
<< 9) < PAGE_SIZE
)
753 /* array size does not allow a suitable chunk size */
754 return ERR_PTR(-EINVAL
);
756 /* Set new parameters */
757 mddev
->new_level
= 0;
758 mddev
->new_layout
= 0;
759 mddev
->new_chunk_sectors
= chunksect
;
760 mddev
->chunk_sectors
= chunksect
;
761 mddev
->delta_disks
= 1 - mddev
->raid_disks
;
762 mddev
->raid_disks
= 1;
763 /* make sure it will be not marked as dirty */
764 mddev
->recovery_cp
= MaxSector
;
765 mddev_clear_unsupported_flags(mddev
, UNSUPPORTED_MDDEV_FLAGS
);
767 create_strip_zones(mddev
, &priv_conf
);
771 static void *raid0_takeover(struct mddev
*mddev
)
773 /* raid0 can take over:
774 * raid4 - if all data disks are active.
775 * raid5 - providing it is Raid4 layout and one disk is faulty
776 * raid10 - assuming we have all necessary active disks
777 * raid1 - with (N -1) mirror drives faulty
781 pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
783 return ERR_PTR(-EBUSY
);
785 if (mddev
->level
== 4)
786 return raid0_takeover_raid45(mddev
);
788 if (mddev
->level
== 5) {
789 if (mddev
->layout
== ALGORITHM_PARITY_N
)
790 return raid0_takeover_raid45(mddev
);
792 pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
793 mdname(mddev
), ALGORITHM_PARITY_N
);
796 if (mddev
->level
== 10)
797 return raid0_takeover_raid10(mddev
);
799 if (mddev
->level
== 1)
800 return raid0_takeover_raid1(mddev
);
802 pr_warn("Takeover from raid%i to raid0 not supported\n",
805 return ERR_PTR(-EINVAL
);
808 static void raid0_quiesce(struct mddev
*mddev
, int quiesce
)
812 static struct md_personality raid0_personality
=
816 .owner
= THIS_MODULE
,
817 .make_request
= raid0_make_request
,
820 .status
= raid0_status
,
822 .takeover
= raid0_takeover
,
823 .quiesce
= raid0_quiesce
,
824 .error_handler
= raid0_error
,
827 static int __init
raid0_init (void)
829 return register_md_personality (&raid0_personality
);
832 static void raid0_exit (void)
834 unregister_md_personality (&raid0_personality
);
837 module_init(raid0_init
);
838 module_exit(raid0_exit
);
839 MODULE_LICENSE("GPL");
840 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
841 MODULE_ALIAS("md-personality-2"); /* RAID0 */
842 MODULE_ALIAS("md-raid0");
843 MODULE_ALIAS("md-level-0");