2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46 #include <linux/freezer.h>
48 #include <linux/init.h>
50 #include <linux/file.h>
53 #include <linux/kmod.h>
56 #include <asm/unaligned.h>
58 #define MAJOR_NR MD_MAJOR
61 /* 63 partitions with the alternate major number (mdp) */
62 #define MdpMinorShift 6
65 #define dprintk(x...) ((void)(DEBUG && printk(x)))
69 static void autostart_arrays (int part
);
72 static LIST_HEAD(pers_list
);
73 static DEFINE_SPINLOCK(pers_lock
);
75 static void md_print_devices(void);
77 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
80 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
81 * is 1000 KB/sec, so the extra system load does not show up that much.
82 * Increase it if you want to have more _guaranteed_ speed. Note that
83 * the RAID driver will use the maximum available bandwidth if the IO
84 * subsystem is idle. There is also an 'absolute maximum' reconstruction
85 * speed limit - in case reconstruction slows down your system despite
88 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
89 * or /sys/block/mdX/md/sync_speed_{min,max}
92 static int sysctl_speed_limit_min
= 1000;
93 static int sysctl_speed_limit_max
= 200000;
94 static inline int speed_min(mddev_t
*mddev
)
96 return mddev
->sync_speed_min
?
97 mddev
->sync_speed_min
: sysctl_speed_limit_min
;
100 static inline int speed_max(mddev_t
*mddev
)
102 return mddev
->sync_speed_max
?
103 mddev
->sync_speed_max
: sysctl_speed_limit_max
;
106 static struct ctl_table_header
*raid_table_header
;
108 static ctl_table raid_table
[] = {
110 .ctl_name
= DEV_RAID_SPEED_LIMIT_MIN
,
111 .procname
= "speed_limit_min",
112 .data
= &sysctl_speed_limit_min
,
113 .maxlen
= sizeof(int),
114 .mode
= S_IRUGO
|S_IWUSR
,
115 .proc_handler
= &proc_dointvec
,
118 .ctl_name
= DEV_RAID_SPEED_LIMIT_MAX
,
119 .procname
= "speed_limit_max",
120 .data
= &sysctl_speed_limit_max
,
121 .maxlen
= sizeof(int),
122 .mode
= S_IRUGO
|S_IWUSR
,
123 .proc_handler
= &proc_dointvec
,
128 static ctl_table raid_dir_table
[] = {
130 .ctl_name
= DEV_RAID
,
133 .mode
= S_IRUGO
|S_IXUGO
,
139 static ctl_table raid_root_table
[] = {
145 .child
= raid_dir_table
,
150 static struct block_device_operations md_fops
;
152 static int start_readonly
;
155 * We have a system wide 'event count' that is incremented
156 * on any 'interesting' event, and readers of /proc/mdstat
157 * can use 'poll' or 'select' to find out when the event
161 * start array, stop array, error, add device, remove device,
162 * start build, activate spare
164 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters
);
165 static atomic_t md_event_count
;
166 void md_new_event(mddev_t
*mddev
)
168 atomic_inc(&md_event_count
);
169 wake_up(&md_event_waiters
);
170 sysfs_notify(&mddev
->kobj
, NULL
, "sync_action");
172 EXPORT_SYMBOL_GPL(md_new_event
);
174 /* Alternate version that can be called from interrupts
175 * when calling sysfs_notify isn't needed.
177 static void md_new_event_inintr(mddev_t
*mddev
)
179 atomic_inc(&md_event_count
);
180 wake_up(&md_event_waiters
);
184 * Enables to iterate over all existing md arrays
185 * all_mddevs_lock protects this list.
187 static LIST_HEAD(all_mddevs
);
188 static DEFINE_SPINLOCK(all_mddevs_lock
);
192 * iterates through all used mddevs in the system.
193 * We take care to grab the all_mddevs_lock whenever navigating
194 * the list, and to always hold a refcount when unlocked.
195 * Any code which breaks out of this loop while own
196 * a reference to the current mddev and must mddev_put it.
198 #define for_each_mddev(mddev,tmp) \
200 for (({ spin_lock(&all_mddevs_lock); \
201 tmp = all_mddevs.next; \
203 ({ if (tmp != &all_mddevs) \
204 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
205 spin_unlock(&all_mddevs_lock); \
206 if (mddev) mddev_put(mddev); \
207 mddev = list_entry(tmp, mddev_t, all_mddevs); \
208 tmp != &all_mddevs;}); \
209 ({ spin_lock(&all_mddevs_lock); \
214 static int md_fail_request (struct request_queue
*q
, struct bio
*bio
)
220 static inline mddev_t
*mddev_get(mddev_t
*mddev
)
222 atomic_inc(&mddev
->active
);
226 static void mddev_put(mddev_t
*mddev
)
228 if (!atomic_dec_and_lock(&mddev
->active
, &all_mddevs_lock
))
230 if (!mddev
->raid_disks
&& list_empty(&mddev
->disks
)) {
231 list_del(&mddev
->all_mddevs
);
232 spin_unlock(&all_mddevs_lock
);
233 blk_cleanup_queue(mddev
->queue
);
234 kobject_put(&mddev
->kobj
);
236 spin_unlock(&all_mddevs_lock
);
239 static mddev_t
* mddev_find(dev_t unit
)
241 mddev_t
*mddev
, *new = NULL
;
244 spin_lock(&all_mddevs_lock
);
245 list_for_each_entry(mddev
, &all_mddevs
, all_mddevs
)
246 if (mddev
->unit
== unit
) {
248 spin_unlock(&all_mddevs_lock
);
254 list_add(&new->all_mddevs
, &all_mddevs
);
255 spin_unlock(&all_mddevs_lock
);
258 spin_unlock(&all_mddevs_lock
);
260 new = kzalloc(sizeof(*new), GFP_KERNEL
);
265 if (MAJOR(unit
) == MD_MAJOR
)
266 new->md_minor
= MINOR(unit
);
268 new->md_minor
= MINOR(unit
) >> MdpMinorShift
;
270 mutex_init(&new->reconfig_mutex
);
271 INIT_LIST_HEAD(&new->disks
);
272 INIT_LIST_HEAD(&new->all_mddevs
);
273 init_timer(&new->safemode_timer
);
274 atomic_set(&new->active
, 1);
275 spin_lock_init(&new->write_lock
);
276 init_waitqueue_head(&new->sb_wait
);
277 new->reshape_position
= MaxSector
;
278 new->resync_max
= MaxSector
;
280 new->queue
= blk_alloc_queue(GFP_KERNEL
);
285 set_bit(QUEUE_FLAG_CLUSTER
, &new->queue
->queue_flags
);
287 blk_queue_make_request(new->queue
, md_fail_request
);
292 static inline int mddev_lock(mddev_t
* mddev
)
294 return mutex_lock_interruptible(&mddev
->reconfig_mutex
);
297 static inline int mddev_trylock(mddev_t
* mddev
)
299 return mutex_trylock(&mddev
->reconfig_mutex
);
302 static inline void mddev_unlock(mddev_t
* mddev
)
304 mutex_unlock(&mddev
->reconfig_mutex
);
306 md_wakeup_thread(mddev
->thread
);
309 static mdk_rdev_t
* find_rdev_nr(mddev_t
*mddev
, int nr
)
312 struct list_head
*tmp
;
314 rdev_for_each(rdev
, tmp
, mddev
) {
315 if (rdev
->desc_nr
== nr
)
321 static mdk_rdev_t
* find_rdev(mddev_t
* mddev
, dev_t dev
)
323 struct list_head
*tmp
;
326 rdev_for_each(rdev
, tmp
, mddev
) {
327 if (rdev
->bdev
->bd_dev
== dev
)
333 static struct mdk_personality
*find_pers(int level
, char *clevel
)
335 struct mdk_personality
*pers
;
336 list_for_each_entry(pers
, &pers_list
, list
) {
337 if (level
!= LEVEL_NONE
&& pers
->level
== level
)
339 if (strcmp(pers
->name
, clevel
)==0)
345 static inline sector_t
calc_dev_sboffset(struct block_device
*bdev
)
347 sector_t size
= bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
348 return MD_NEW_SIZE_BLOCKS(size
);
351 static sector_t
calc_dev_size(mdk_rdev_t
*rdev
, unsigned chunk_size
)
355 size
= rdev
->sb_offset
;
358 size
&= ~((sector_t
)chunk_size
/1024 - 1);
362 static int alloc_disk_sb(mdk_rdev_t
* rdev
)
367 rdev
->sb_page
= alloc_page(GFP_KERNEL
);
368 if (!rdev
->sb_page
) {
369 printk(KERN_ALERT
"md: out of memory.\n");
376 static void free_disk_sb(mdk_rdev_t
* rdev
)
379 put_page(rdev
->sb_page
);
381 rdev
->sb_page
= NULL
;
388 static void super_written(struct bio
*bio
, int error
)
390 mdk_rdev_t
*rdev
= bio
->bi_private
;
391 mddev_t
*mddev
= rdev
->mddev
;
393 if (error
|| !test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
394 printk("md: super_written gets error=%d, uptodate=%d\n",
395 error
, test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
396 WARN_ON(test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
397 md_error(mddev
, rdev
);
400 if (atomic_dec_and_test(&mddev
->pending_writes
))
401 wake_up(&mddev
->sb_wait
);
405 static void super_written_barrier(struct bio
*bio
, int error
)
407 struct bio
*bio2
= bio
->bi_private
;
408 mdk_rdev_t
*rdev
= bio2
->bi_private
;
409 mddev_t
*mddev
= rdev
->mddev
;
411 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) &&
412 error
== -EOPNOTSUPP
) {
414 /* barriers don't appear to be supported :-( */
415 set_bit(BarriersNotsupp
, &rdev
->flags
);
416 mddev
->barriers_work
= 0;
417 spin_lock_irqsave(&mddev
->write_lock
, flags
);
418 bio2
->bi_next
= mddev
->biolist
;
419 mddev
->biolist
= bio2
;
420 spin_unlock_irqrestore(&mddev
->write_lock
, flags
);
421 wake_up(&mddev
->sb_wait
);
425 bio
->bi_private
= rdev
;
426 super_written(bio
, error
);
430 void md_super_write(mddev_t
*mddev
, mdk_rdev_t
*rdev
,
431 sector_t sector
, int size
, struct page
*page
)
433 /* write first size bytes of page to sector of rdev
434 * Increment mddev->pending_writes before returning
435 * and decrement it on completion, waking up sb_wait
436 * if zero is reached.
437 * If an error occurred, call md_error
439 * As we might need to resubmit the request if BIO_RW_BARRIER
440 * causes ENOTSUPP, we allocate a spare bio...
442 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
443 int rw
= (1<<BIO_RW
) | (1<<BIO_RW_SYNC
);
445 bio
->bi_bdev
= rdev
->bdev
;
446 bio
->bi_sector
= sector
;
447 bio_add_page(bio
, page
, size
, 0);
448 bio
->bi_private
= rdev
;
449 bio
->bi_end_io
= super_written
;
452 atomic_inc(&mddev
->pending_writes
);
453 if (!test_bit(BarriersNotsupp
, &rdev
->flags
)) {
455 rw
|= (1<<BIO_RW_BARRIER
);
456 rbio
= bio_clone(bio
, GFP_NOIO
);
457 rbio
->bi_private
= bio
;
458 rbio
->bi_end_io
= super_written_barrier
;
459 submit_bio(rw
, rbio
);
464 void md_super_wait(mddev_t
*mddev
)
466 /* wait for all superblock writes that were scheduled to complete.
467 * if any had to be retried (due to BARRIER problems), retry them
471 prepare_to_wait(&mddev
->sb_wait
, &wq
, TASK_UNINTERRUPTIBLE
);
472 if (atomic_read(&mddev
->pending_writes
)==0)
474 while (mddev
->biolist
) {
476 spin_lock_irq(&mddev
->write_lock
);
477 bio
= mddev
->biolist
;
478 mddev
->biolist
= bio
->bi_next
;
480 spin_unlock_irq(&mddev
->write_lock
);
481 submit_bio(bio
->bi_rw
, bio
);
485 finish_wait(&mddev
->sb_wait
, &wq
);
488 static void bi_complete(struct bio
*bio
, int error
)
490 complete((struct completion
*)bio
->bi_private
);
493 int sync_page_io(struct block_device
*bdev
, sector_t sector
, int size
,
494 struct page
*page
, int rw
)
496 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
497 struct completion event
;
500 rw
|= (1 << BIO_RW_SYNC
);
503 bio
->bi_sector
= sector
;
504 bio_add_page(bio
, page
, size
, 0);
505 init_completion(&event
);
506 bio
->bi_private
= &event
;
507 bio
->bi_end_io
= bi_complete
;
509 wait_for_completion(&event
);
511 ret
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
515 EXPORT_SYMBOL_GPL(sync_page_io
);
517 static int read_disk_sb(mdk_rdev_t
* rdev
, int size
)
519 char b
[BDEVNAME_SIZE
];
520 if (!rdev
->sb_page
) {
528 if (!sync_page_io(rdev
->bdev
, rdev
->sb_offset
<<1, size
, rdev
->sb_page
, READ
))
534 printk(KERN_WARNING
"md: disabled device %s, could not read superblock.\n",
535 bdevname(rdev
->bdev
,b
));
539 static int uuid_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
541 if ( (sb1
->set_uuid0
== sb2
->set_uuid0
) &&
542 (sb1
->set_uuid1
== sb2
->set_uuid1
) &&
543 (sb1
->set_uuid2
== sb2
->set_uuid2
) &&
544 (sb1
->set_uuid3
== sb2
->set_uuid3
))
552 static int sb_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
555 mdp_super_t
*tmp1
, *tmp2
;
557 tmp1
= kmalloc(sizeof(*tmp1
),GFP_KERNEL
);
558 tmp2
= kmalloc(sizeof(*tmp2
),GFP_KERNEL
);
560 if (!tmp1
|| !tmp2
) {
562 printk(KERN_INFO
"md.c: sb1 is not equal to sb2!\n");
570 * nr_disks is not constant
575 if (memcmp(tmp1
, tmp2
, MD_SB_GENERIC_CONSTANT_WORDS
* 4))
587 static u32
md_csum_fold(u32 csum
)
589 csum
= (csum
& 0xffff) + (csum
>> 16);
590 return (csum
& 0xffff) + (csum
>> 16);
593 static unsigned int calc_sb_csum(mdp_super_t
* sb
)
596 u32
*sb32
= (u32
*)sb
;
598 unsigned int disk_csum
, csum
;
600 disk_csum
= sb
->sb_csum
;
603 for (i
= 0; i
< MD_SB_BYTES
/4 ; i
++)
605 csum
= (newcsum
& 0xffffffff) + (newcsum
>>32);
609 /* This used to use csum_partial, which was wrong for several
610 * reasons including that different results are returned on
611 * different architectures. It isn't critical that we get exactly
612 * the same return value as before (we always csum_fold before
613 * testing, and that removes any differences). However as we
614 * know that csum_partial always returned a 16bit value on
615 * alphas, do a fold to maximise conformity to previous behaviour.
617 sb
->sb_csum
= md_csum_fold(disk_csum
);
619 sb
->sb_csum
= disk_csum
;
626 * Handle superblock details.
627 * We want to be able to handle multiple superblock formats
628 * so we have a common interface to them all, and an array of
629 * different handlers.
630 * We rely on user-space to write the initial superblock, and support
631 * reading and updating of superblocks.
632 * Interface methods are:
633 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
634 * loads and validates a superblock on dev.
635 * if refdev != NULL, compare superblocks on both devices
637 * 0 - dev has a superblock that is compatible with refdev
638 * 1 - dev has a superblock that is compatible and newer than refdev
639 * so dev should be used as the refdev in future
640 * -EINVAL superblock incompatible or invalid
641 * -othererror e.g. -EIO
643 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
644 * Verify that dev is acceptable into mddev.
645 * The first time, mddev->raid_disks will be 0, and data from
646 * dev should be merged in. Subsequent calls check that dev
647 * is new enough. Return 0 or -EINVAL
649 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
650 * Update the superblock for rdev with data in mddev
651 * This does not write to disc.
657 struct module
*owner
;
658 int (*load_super
)(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
);
659 int (*validate_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
660 void (*sync_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
664 * load_super for 0.90.0
666 static int super_90_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
668 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
674 * Calculate the position of the superblock,
675 * it's at the end of the disk.
677 * It also happens to be a multiple of 4Kb.
679 sb_offset
= calc_dev_sboffset(rdev
->bdev
);
680 rdev
->sb_offset
= sb_offset
;
682 ret
= read_disk_sb(rdev
, MD_SB_BYTES
);
687 bdevname(rdev
->bdev
, b
);
688 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
690 if (sb
->md_magic
!= MD_SB_MAGIC
) {
691 printk(KERN_ERR
"md: invalid raid superblock magic on %s\n",
696 if (sb
->major_version
!= 0 ||
697 sb
->minor_version
< 90 ||
698 sb
->minor_version
> 91) {
699 printk(KERN_WARNING
"Bad version number %d.%d on %s\n",
700 sb
->major_version
, sb
->minor_version
,
705 if (sb
->raid_disks
<= 0)
708 if (md_csum_fold(calc_sb_csum(sb
)) != md_csum_fold(sb
->sb_csum
)) {
709 printk(KERN_WARNING
"md: invalid superblock checksum on %s\n",
714 rdev
->preferred_minor
= sb
->md_minor
;
715 rdev
->data_offset
= 0;
716 rdev
->sb_size
= MD_SB_BYTES
;
718 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
719 if (sb
->level
!= 1 && sb
->level
!= 4
720 && sb
->level
!= 5 && sb
->level
!= 6
721 && sb
->level
!= 10) {
722 /* FIXME use a better test */
724 "md: bitmaps not supported for this level.\n");
729 if (sb
->level
== LEVEL_MULTIPATH
)
732 rdev
->desc_nr
= sb
->this_disk
.number
;
738 mdp_super_t
*refsb
= (mdp_super_t
*)page_address(refdev
->sb_page
);
739 if (!uuid_equal(refsb
, sb
)) {
740 printk(KERN_WARNING
"md: %s has different UUID to %s\n",
741 b
, bdevname(refdev
->bdev
,b2
));
744 if (!sb_equal(refsb
, sb
)) {
745 printk(KERN_WARNING
"md: %s has same UUID"
746 " but different superblock to %s\n",
747 b
, bdevname(refdev
->bdev
, b2
));
751 ev2
= md_event(refsb
);
757 rdev
->size
= calc_dev_size(rdev
, sb
->chunk_size
);
759 if (rdev
->size
< sb
->size
&& sb
->level
> 1)
760 /* "this cannot possibly happen" ... */
768 * validate_super for 0.90.0
770 static int super_90_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
773 mdp_super_t
*sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
774 __u64 ev1
= md_event(sb
);
776 rdev
->raid_disk
= -1;
777 clear_bit(Faulty
, &rdev
->flags
);
778 clear_bit(In_sync
, &rdev
->flags
);
779 clear_bit(WriteMostly
, &rdev
->flags
);
780 clear_bit(BarriersNotsupp
, &rdev
->flags
);
782 if (mddev
->raid_disks
== 0) {
783 mddev
->major_version
= 0;
784 mddev
->minor_version
= sb
->minor_version
;
785 mddev
->patch_version
= sb
->patch_version
;
787 mddev
->chunk_size
= sb
->chunk_size
;
788 mddev
->ctime
= sb
->ctime
;
789 mddev
->utime
= sb
->utime
;
790 mddev
->level
= sb
->level
;
791 mddev
->clevel
[0] = 0;
792 mddev
->layout
= sb
->layout
;
793 mddev
->raid_disks
= sb
->raid_disks
;
794 mddev
->size
= sb
->size
;
796 mddev
->bitmap_offset
= 0;
797 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
799 if (mddev
->minor_version
>= 91) {
800 mddev
->reshape_position
= sb
->reshape_position
;
801 mddev
->delta_disks
= sb
->delta_disks
;
802 mddev
->new_level
= sb
->new_level
;
803 mddev
->new_layout
= sb
->new_layout
;
804 mddev
->new_chunk
= sb
->new_chunk
;
806 mddev
->reshape_position
= MaxSector
;
807 mddev
->delta_disks
= 0;
808 mddev
->new_level
= mddev
->level
;
809 mddev
->new_layout
= mddev
->layout
;
810 mddev
->new_chunk
= mddev
->chunk_size
;
813 if (sb
->state
& (1<<MD_SB_CLEAN
))
814 mddev
->recovery_cp
= MaxSector
;
816 if (sb
->events_hi
== sb
->cp_events_hi
&&
817 sb
->events_lo
== sb
->cp_events_lo
) {
818 mddev
->recovery_cp
= sb
->recovery_cp
;
820 mddev
->recovery_cp
= 0;
823 memcpy(mddev
->uuid
+0, &sb
->set_uuid0
, 4);
824 memcpy(mddev
->uuid
+4, &sb
->set_uuid1
, 4);
825 memcpy(mddev
->uuid
+8, &sb
->set_uuid2
, 4);
826 memcpy(mddev
->uuid
+12,&sb
->set_uuid3
, 4);
828 mddev
->max_disks
= MD_SB_DISKS
;
830 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
) &&
831 mddev
->bitmap_file
== NULL
)
832 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
834 } else if (mddev
->pers
== NULL
) {
835 /* Insist on good event counter while assembling */
837 if (ev1
< mddev
->events
)
839 } else if (mddev
->bitmap
) {
840 /* if adding to array with a bitmap, then we can accept an
841 * older device ... but not too old.
843 if (ev1
< mddev
->bitmap
->events_cleared
)
846 if (ev1
< mddev
->events
)
847 /* just a hot-add of a new device, leave raid_disk at -1 */
851 if (mddev
->level
!= LEVEL_MULTIPATH
) {
852 desc
= sb
->disks
+ rdev
->desc_nr
;
854 if (desc
->state
& (1<<MD_DISK_FAULTY
))
855 set_bit(Faulty
, &rdev
->flags
);
856 else if (desc
->state
& (1<<MD_DISK_SYNC
) /* &&
857 desc->raid_disk < mddev->raid_disks */) {
858 set_bit(In_sync
, &rdev
->flags
);
859 rdev
->raid_disk
= desc
->raid_disk
;
861 if (desc
->state
& (1<<MD_DISK_WRITEMOSTLY
))
862 set_bit(WriteMostly
, &rdev
->flags
);
863 } else /* MULTIPATH are always insync */
864 set_bit(In_sync
, &rdev
->flags
);
869 * sync_super for 0.90.0
871 static void super_90_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
874 struct list_head
*tmp
;
876 int next_spare
= mddev
->raid_disks
;
879 /* make rdev->sb match mddev data..
882 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
883 * 3/ any empty disks < next_spare become removed
885 * disks[0] gets initialised to REMOVED because
886 * we cannot be sure from other fields if it has
887 * been initialised or not.
890 int active
=0, working
=0,failed
=0,spare
=0,nr_disks
=0;
892 rdev
->sb_size
= MD_SB_BYTES
;
894 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
896 memset(sb
, 0, sizeof(*sb
));
898 sb
->md_magic
= MD_SB_MAGIC
;
899 sb
->major_version
= mddev
->major_version
;
900 sb
->patch_version
= mddev
->patch_version
;
901 sb
->gvalid_words
= 0; /* ignored */
902 memcpy(&sb
->set_uuid0
, mddev
->uuid
+0, 4);
903 memcpy(&sb
->set_uuid1
, mddev
->uuid
+4, 4);
904 memcpy(&sb
->set_uuid2
, mddev
->uuid
+8, 4);
905 memcpy(&sb
->set_uuid3
, mddev
->uuid
+12,4);
907 sb
->ctime
= mddev
->ctime
;
908 sb
->level
= mddev
->level
;
909 sb
->size
= mddev
->size
;
910 sb
->raid_disks
= mddev
->raid_disks
;
911 sb
->md_minor
= mddev
->md_minor
;
912 sb
->not_persistent
= 0;
913 sb
->utime
= mddev
->utime
;
915 sb
->events_hi
= (mddev
->events
>>32);
916 sb
->events_lo
= (u32
)mddev
->events
;
918 if (mddev
->reshape_position
== MaxSector
)
919 sb
->minor_version
= 90;
921 sb
->minor_version
= 91;
922 sb
->reshape_position
= mddev
->reshape_position
;
923 sb
->new_level
= mddev
->new_level
;
924 sb
->delta_disks
= mddev
->delta_disks
;
925 sb
->new_layout
= mddev
->new_layout
;
926 sb
->new_chunk
= mddev
->new_chunk
;
928 mddev
->minor_version
= sb
->minor_version
;
931 sb
->recovery_cp
= mddev
->recovery_cp
;
932 sb
->cp_events_hi
= (mddev
->events
>>32);
933 sb
->cp_events_lo
= (u32
)mddev
->events
;
934 if (mddev
->recovery_cp
== MaxSector
)
935 sb
->state
= (1<< MD_SB_CLEAN
);
939 sb
->layout
= mddev
->layout
;
940 sb
->chunk_size
= mddev
->chunk_size
;
942 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
)
943 sb
->state
|= (1<<MD_SB_BITMAP_PRESENT
);
945 sb
->disks
[0].state
= (1<<MD_DISK_REMOVED
);
946 rdev_for_each(rdev2
, tmp
, mddev
) {
949 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
950 && !test_bit(Faulty
, &rdev2
->flags
))
951 desc_nr
= rdev2
->raid_disk
;
953 desc_nr
= next_spare
++;
954 rdev2
->desc_nr
= desc_nr
;
955 d
= &sb
->disks
[rdev2
->desc_nr
];
957 d
->number
= rdev2
->desc_nr
;
958 d
->major
= MAJOR(rdev2
->bdev
->bd_dev
);
959 d
->minor
= MINOR(rdev2
->bdev
->bd_dev
);
960 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
961 && !test_bit(Faulty
, &rdev2
->flags
))
962 d
->raid_disk
= rdev2
->raid_disk
;
964 d
->raid_disk
= rdev2
->desc_nr
; /* compatibility */
965 if (test_bit(Faulty
, &rdev2
->flags
))
966 d
->state
= (1<<MD_DISK_FAULTY
);
967 else if (test_bit(In_sync
, &rdev2
->flags
)) {
968 d
->state
= (1<<MD_DISK_ACTIVE
);
969 d
->state
|= (1<<MD_DISK_SYNC
);
977 if (test_bit(WriteMostly
, &rdev2
->flags
))
978 d
->state
|= (1<<MD_DISK_WRITEMOSTLY
);
980 /* now set the "removed" and "faulty" bits on any missing devices */
981 for (i
=0 ; i
< mddev
->raid_disks
; i
++) {
982 mdp_disk_t
*d
= &sb
->disks
[i
];
983 if (d
->state
== 0 && d
->number
== 0) {
986 d
->state
= (1<<MD_DISK_REMOVED
);
987 d
->state
|= (1<<MD_DISK_FAULTY
);
991 sb
->nr_disks
= nr_disks
;
992 sb
->active_disks
= active
;
993 sb
->working_disks
= working
;
994 sb
->failed_disks
= failed
;
995 sb
->spare_disks
= spare
;
997 sb
->this_disk
= sb
->disks
[rdev
->desc_nr
];
998 sb
->sb_csum
= calc_sb_csum(sb
);
1002 * version 1 superblock
1005 static __le32
calc_sb_1_csum(struct mdp_superblock_1
* sb
)
1009 unsigned long long newcsum
;
1010 int size
= 256 + le32_to_cpu(sb
->max_dev
)*2;
1011 __le32
*isuper
= (__le32
*)sb
;
1014 disk_csum
= sb
->sb_csum
;
1017 for (i
=0; size
>=4; size
-= 4 )
1018 newcsum
+= le32_to_cpu(*isuper
++);
1021 newcsum
+= le16_to_cpu(*(__le16
*) isuper
);
1023 csum
= (newcsum
& 0xffffffff) + (newcsum
>> 32);
1024 sb
->sb_csum
= disk_csum
;
1025 return cpu_to_le32(csum
);
1028 static int super_1_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
1030 struct mdp_superblock_1
*sb
;
1033 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
1037 * Calculate the position of the superblock.
1038 * It is always aligned to a 4K boundary and
1039 * depeding on minor_version, it can be:
1040 * 0: At least 8K, but less than 12K, from end of device
1041 * 1: At start of device
1042 * 2: 4K from start of device.
1044 switch(minor_version
) {
1046 sb_offset
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1048 sb_offset
&= ~(sector_t
)(4*2-1);
1049 /* convert from sectors to K */
1061 rdev
->sb_offset
= sb_offset
;
1063 /* superblock is rarely larger than 1K, but it can be larger,
1064 * and it is safe to read 4k, so we do that
1066 ret
= read_disk_sb(rdev
, 4096);
1067 if (ret
) return ret
;
1070 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1072 if (sb
->magic
!= cpu_to_le32(MD_SB_MAGIC
) ||
1073 sb
->major_version
!= cpu_to_le32(1) ||
1074 le32_to_cpu(sb
->max_dev
) > (4096-256)/2 ||
1075 le64_to_cpu(sb
->super_offset
) != (rdev
->sb_offset
<<1) ||
1076 (le32_to_cpu(sb
->feature_map
) & ~MD_FEATURE_ALL
) != 0)
1079 if (calc_sb_1_csum(sb
) != sb
->sb_csum
) {
1080 printk("md: invalid superblock checksum on %s\n",
1081 bdevname(rdev
->bdev
,b
));
1084 if (le64_to_cpu(sb
->data_size
) < 10) {
1085 printk("md: data_size too small on %s\n",
1086 bdevname(rdev
->bdev
,b
));
1089 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
)) {
1090 if (sb
->level
!= cpu_to_le32(1) &&
1091 sb
->level
!= cpu_to_le32(4) &&
1092 sb
->level
!= cpu_to_le32(5) &&
1093 sb
->level
!= cpu_to_le32(6) &&
1094 sb
->level
!= cpu_to_le32(10)) {
1096 "md: bitmaps not supported for this level.\n");
1101 rdev
->preferred_minor
= 0xffff;
1102 rdev
->data_offset
= le64_to_cpu(sb
->data_offset
);
1103 atomic_set(&rdev
->corrected_errors
, le32_to_cpu(sb
->cnt_corrected_read
));
1105 rdev
->sb_size
= le32_to_cpu(sb
->max_dev
) * 2 + 256;
1106 bmask
= queue_hardsect_size(rdev
->bdev
->bd_disk
->queue
)-1;
1107 if (rdev
->sb_size
& bmask
)
1108 <<<<<<< HEAD
:drivers
/md
/md
.c
1109 rdev
-> sb_size
= (rdev
->sb_size
| bmask
)+1;
1111 rdev
->sb_size
= (rdev
->sb_size
| bmask
) + 1;
1114 && rdev
->data_offset
< sb_offset
+ (rdev
->sb_size
/512))
1116 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
1118 if (sb
->level
== cpu_to_le32(LEVEL_MULTIPATH
))
1121 rdev
->desc_nr
= le32_to_cpu(sb
->dev_number
);
1127 struct mdp_superblock_1
*refsb
=
1128 (struct mdp_superblock_1
*)page_address(refdev
->sb_page
);
1130 if (memcmp(sb
->set_uuid
, refsb
->set_uuid
, 16) != 0 ||
1131 sb
->level
!= refsb
->level
||
1132 sb
->layout
!= refsb
->layout
||
1133 sb
->chunksize
!= refsb
->chunksize
) {
1134 printk(KERN_WARNING
"md: %s has strangely different"
1135 " superblock to %s\n",
1136 bdevname(rdev
->bdev
,b
),
1137 bdevname(refdev
->bdev
,b2
));
1140 ev1
= le64_to_cpu(sb
->events
);
1141 ev2
= le64_to_cpu(refsb
->events
);
1148 <<<<<<< HEAD
:drivers
/md
/md
.c
1152 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
1153 rdev
->size
= ((rdev
->bdev
->bd_inode
->i_size
>>9) - le64_to_cpu(sb
->data_offset
)) / 2;
1155 rdev
->size
= rdev
->sb_offset
;
1156 if (rdev
->size
< le64_to_cpu(sb
->data_size
)/2)
1158 rdev
->size
= le64_to_cpu(sb
->data_size
)/2;
1159 if (le32_to_cpu(sb
->chunksize
))
1160 rdev
->size
&= ~((sector_t
)le32_to_cpu(sb
->chunksize
)/2 - 1);
1162 if (le64_to_cpu(sb
->size
) > rdev
->size
*2)
1167 static int super_1_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1169 struct mdp_superblock_1
*sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1170 __u64 ev1
= le64_to_cpu(sb
->events
);
1172 rdev
->raid_disk
= -1;
1173 clear_bit(Faulty
, &rdev
->flags
);
1174 clear_bit(In_sync
, &rdev
->flags
);
1175 clear_bit(WriteMostly
, &rdev
->flags
);
1176 clear_bit(BarriersNotsupp
, &rdev
->flags
);
1178 if (mddev
->raid_disks
== 0) {
1179 mddev
->major_version
= 1;
1180 mddev
->patch_version
= 0;
1181 mddev
->external
= 0;
1182 mddev
->chunk_size
= le32_to_cpu(sb
->chunksize
) << 9;
1183 mddev
->ctime
= le64_to_cpu(sb
->ctime
) & ((1ULL << 32)-1);
1184 mddev
->utime
= le64_to_cpu(sb
->utime
) & ((1ULL << 32)-1);
1185 mddev
->level
= le32_to_cpu(sb
->level
);
1186 mddev
->clevel
[0] = 0;
1187 mddev
->layout
= le32_to_cpu(sb
->layout
);
1188 mddev
->raid_disks
= le32_to_cpu(sb
->raid_disks
);
1189 mddev
->size
= le64_to_cpu(sb
->size
)/2;
1190 mddev
->events
= ev1
;
1191 mddev
->bitmap_offset
= 0;
1192 mddev
->default_bitmap_offset
= 1024 >> 9;
1194 mddev
->recovery_cp
= le64_to_cpu(sb
->resync_offset
);
1195 memcpy(mddev
->uuid
, sb
->set_uuid
, 16);
1197 mddev
->max_disks
= (4096-256)/2;
1199 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
) &&
1200 mddev
->bitmap_file
== NULL
)
1201 mddev
->bitmap_offset
= (__s32
)le32_to_cpu(sb
->bitmap_offset
);
1203 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_RESHAPE_ACTIVE
)) {
1204 mddev
->reshape_position
= le64_to_cpu(sb
->reshape_position
);
1205 mddev
->delta_disks
= le32_to_cpu(sb
->delta_disks
);
1206 mddev
->new_level
= le32_to_cpu(sb
->new_level
);
1207 mddev
->new_layout
= le32_to_cpu(sb
->new_layout
);
1208 mddev
->new_chunk
= le32_to_cpu(sb
->new_chunk
)<<9;
1210 mddev
->reshape_position
= MaxSector
;
1211 mddev
->delta_disks
= 0;
1212 mddev
->new_level
= mddev
->level
;
1213 mddev
->new_layout
= mddev
->layout
;
1214 mddev
->new_chunk
= mddev
->chunk_size
;
1217 } else if (mddev
->pers
== NULL
) {
1218 /* Insist of good event counter while assembling */
1220 if (ev1
< mddev
->events
)
1222 } else if (mddev
->bitmap
) {
1223 /* If adding to array with a bitmap, then we can accept an
1224 * older device, but not too old.
1226 if (ev1
< mddev
->bitmap
->events_cleared
)
1229 if (ev1
< mddev
->events
)
1230 /* just a hot-add of a new device, leave raid_disk at -1 */
1233 if (mddev
->level
!= LEVEL_MULTIPATH
) {
1235 role
= le16_to_cpu(sb
->dev_roles
[rdev
->desc_nr
]);
1237 case 0xffff: /* spare */
1239 case 0xfffe: /* faulty */
1240 set_bit(Faulty
, &rdev
->flags
);
1243 if ((le32_to_cpu(sb
->feature_map
) &
1244 MD_FEATURE_RECOVERY_OFFSET
))
1245 rdev
->recovery_offset
= le64_to_cpu(sb
->recovery_offset
);
1247 set_bit(In_sync
, &rdev
->flags
);
1248 rdev
->raid_disk
= role
;
1251 if (sb
->devflags
& WriteMostly1
)
1252 set_bit(WriteMostly
, &rdev
->flags
);
1253 } else /* MULTIPATH are always insync */
1254 set_bit(In_sync
, &rdev
->flags
);
1259 static void super_1_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1261 struct mdp_superblock_1
*sb
;
1262 struct list_head
*tmp
;
1265 /* make rdev->sb match mddev and rdev data. */
1267 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1269 sb
->feature_map
= 0;
1271 sb
->recovery_offset
= cpu_to_le64(0);
1272 memset(sb
->pad1
, 0, sizeof(sb
->pad1
));
1273 memset(sb
->pad2
, 0, sizeof(sb
->pad2
));
1274 memset(sb
->pad3
, 0, sizeof(sb
->pad3
));
1276 sb
->utime
= cpu_to_le64((__u64
)mddev
->utime
);
1277 sb
->events
= cpu_to_le64(mddev
->events
);
1279 sb
->resync_offset
= cpu_to_le64(mddev
->recovery_cp
);
1281 sb
->resync_offset
= cpu_to_le64(0);
1283 sb
->cnt_corrected_read
= cpu_to_le32(atomic_read(&rdev
->corrected_errors
));
1285 sb
->raid_disks
= cpu_to_le32(mddev
->raid_disks
);
1286 sb
->size
= cpu_to_le64(mddev
->size
<<1);
1288 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
) {
1289 sb
->bitmap_offset
= cpu_to_le32((__u32
)mddev
->bitmap_offset
);
1290 sb
->feature_map
= cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
);
1293 if (rdev
->raid_disk
>= 0 &&
1294 !test_bit(In_sync
, &rdev
->flags
) &&
1295 rdev
->recovery_offset
> 0) {
1296 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET
);
1297 sb
->recovery_offset
= cpu_to_le64(rdev
->recovery_offset
);
1300 if (mddev
->reshape_position
!= MaxSector
) {
1301 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE
);
1302 sb
->reshape_position
= cpu_to_le64(mddev
->reshape_position
);
1303 sb
->new_layout
= cpu_to_le32(mddev
->new_layout
);
1304 sb
->delta_disks
= cpu_to_le32(mddev
->delta_disks
);
1305 sb
->new_level
= cpu_to_le32(mddev
->new_level
);
1306 sb
->new_chunk
= cpu_to_le32(mddev
->new_chunk
>>9);
1310 rdev_for_each(rdev2
, tmp
, mddev
)
1311 if (rdev2
->desc_nr
+1 > max_dev
)
1312 max_dev
= rdev2
->desc_nr
+1;
1314 if (max_dev
> le32_to_cpu(sb
->max_dev
))
1315 sb
->max_dev
= cpu_to_le32(max_dev
);
1316 for (i
=0; i
<max_dev
;i
++)
1317 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1319 rdev_for_each(rdev2
, tmp
, mddev
) {
1321 if (test_bit(Faulty
, &rdev2
->flags
))
1322 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1323 else if (test_bit(In_sync
, &rdev2
->flags
))
1324 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1325 else if (rdev2
->raid_disk
>= 0 && rdev2
->recovery_offset
> 0)
1326 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1328 sb
->dev_roles
[i
] = cpu_to_le16(0xffff);
1331 sb
->sb_csum
= calc_sb_1_csum(sb
);
1335 static struct super_type super_types
[] = {
1338 .owner
= THIS_MODULE
,
1339 .load_super
= super_90_load
,
1340 .validate_super
= super_90_validate
,
1341 .sync_super
= super_90_sync
,
1345 .owner
= THIS_MODULE
,
1346 .load_super
= super_1_load
,
1347 .validate_super
= super_1_validate
,
1348 .sync_super
= super_1_sync
,
1352 static int match_mddev_units(mddev_t
*mddev1
, mddev_t
*mddev2
)
1354 struct list_head
*tmp
, *tmp2
;
1355 mdk_rdev_t
*rdev
, *rdev2
;
1357 rdev_for_each(rdev
, tmp
, mddev1
)
1358 rdev_for_each(rdev2
, tmp2
, mddev2
)
1359 if (rdev
->bdev
->bd_contains
==
1360 rdev2
->bdev
->bd_contains
)
1366 static LIST_HEAD(pending_raid_disks
);
1368 static int bind_rdev_to_array(mdk_rdev_t
* rdev
, mddev_t
* mddev
)
1370 char b
[BDEVNAME_SIZE
];
1379 /* make sure rdev->size exceeds mddev->size */
1380 if (rdev
->size
&& (mddev
->size
== 0 || rdev
->size
< mddev
->size
)) {
1382 /* Cannot change size, so fail
1383 * If mddev->level <= 0, then we don't care
1384 * about aligning sizes (e.g. linear)
1386 if (mddev
->level
> 0)
1389 mddev
->size
= rdev
->size
;
1392 /* Verify rdev->desc_nr is unique.
1393 * If it is -1, assign a free number, else
1394 * check number is not in use
1396 if (rdev
->desc_nr
< 0) {
1398 if (mddev
->pers
) choice
= mddev
->raid_disks
;
1399 while (find_rdev_nr(mddev
, choice
))
1401 rdev
->desc_nr
= choice
;
1403 if (find_rdev_nr(mddev
, rdev
->desc_nr
))
1406 bdevname(rdev
->bdev
,b
);
1407 while ( (s
=strchr(b
, '/')) != NULL
)
1410 rdev
->mddev
= mddev
;
1411 printk(KERN_INFO
"md: bind<%s>\n", b
);
1413 if ((err
= kobject_add(&rdev
->kobj
, &mddev
->kobj
, "dev-%s", b
)))
1416 if (rdev
->bdev
->bd_part
)
1417 ko
= &rdev
->bdev
->bd_part
->dev
.kobj
;
1419 ko
= &rdev
->bdev
->bd_disk
->dev
.kobj
;
1420 if ((err
= sysfs_create_link(&rdev
->kobj
, ko
, "block"))) {
1421 kobject_del(&rdev
->kobj
);
1424 list_add(&rdev
->same_set
, &mddev
->disks
);
1425 bd_claim_by_disk(rdev
->bdev
, rdev
->bdev
->bd_holder
, mddev
->gendisk
);
1429 printk(KERN_WARNING
"md: failed to register dev-%s for %s\n",
1434 static void md_delayed_delete(struct work_struct
*ws
)
1436 mdk_rdev_t
*rdev
= container_of(ws
, mdk_rdev_t
, del_work
);
1437 kobject_del(&rdev
->kobj
);
1438 kobject_put(&rdev
->kobj
);
1441 static void unbind_rdev_from_array(mdk_rdev_t
* rdev
)
1443 char b
[BDEVNAME_SIZE
];
1448 bd_release_from_disk(rdev
->bdev
, rdev
->mddev
->gendisk
);
1449 list_del_init(&rdev
->same_set
);
1450 printk(KERN_INFO
"md: unbind<%s>\n", bdevname(rdev
->bdev
,b
));
1452 sysfs_remove_link(&rdev
->kobj
, "block");
1454 /* We need to delay this, otherwise we can deadlock when
1455 * writing to 'remove' to "dev/state"
1457 INIT_WORK(&rdev
->del_work
, md_delayed_delete
);
1458 kobject_get(&rdev
->kobj
);
1459 schedule_work(&rdev
->del_work
);
1463 * prevent the device from being mounted, repartitioned or
1464 * otherwise reused by a RAID array (or any other kernel
1465 * subsystem), by bd_claiming the device.
1467 static int lock_rdev(mdk_rdev_t
*rdev
, dev_t dev
, int shared
)
1470 struct block_device
*bdev
;
1471 char b
[BDEVNAME_SIZE
];
1473 bdev
= open_by_devnum(dev
, FMODE_READ
|FMODE_WRITE
);
1475 printk(KERN_ERR
"md: could not open %s.\n",
1476 __bdevname(dev
, b
));
1477 return PTR_ERR(bdev
);
1479 err
= bd_claim(bdev
, shared
? (mdk_rdev_t
*)lock_rdev
: rdev
);
1481 printk(KERN_ERR
"md: could not bd_claim %s.\n",
1487 set_bit(AllReserved
, &rdev
->flags
);
1492 static void unlock_rdev(mdk_rdev_t
*rdev
)
1494 struct block_device
*bdev
= rdev
->bdev
;
1502 void md_autodetect_dev(dev_t dev
);
1504 static void export_rdev(mdk_rdev_t
* rdev
)
1506 char b
[BDEVNAME_SIZE
];
1507 printk(KERN_INFO
"md: export_rdev(%s)\n",
1508 bdevname(rdev
->bdev
,b
));
1512 list_del_init(&rdev
->same_set
);
1514 <<<<<<< HEAD
:drivers
/md
/md
.c
1515 md_autodetect_dev(rdev
->bdev
->bd_dev
);
1517 if (test_bit(AutoDetected
, &rdev
->flags
))
1518 md_autodetect_dev(rdev
->bdev
->bd_dev
);
1519 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
1522 kobject_put(&rdev
->kobj
);
1525 static void kick_rdev_from_array(mdk_rdev_t
* rdev
)
1527 unbind_rdev_from_array(rdev
);
1531 static void export_array(mddev_t
*mddev
)
1533 struct list_head
*tmp
;
1536 rdev_for_each(rdev
, tmp
, mddev
) {
1541 kick_rdev_from_array(rdev
);
1543 if (!list_empty(&mddev
->disks
))
1545 mddev
->raid_disks
= 0;
1546 mddev
->major_version
= 0;
1549 static void print_desc(mdp_disk_t
*desc
)
1551 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc
->number
,
1552 desc
->major
,desc
->minor
,desc
->raid_disk
,desc
->state
);
1555 static void print_sb(mdp_super_t
*sb
)
1560 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1561 sb
->major_version
, sb
->minor_version
, sb
->patch_version
,
1562 sb
->set_uuid0
, sb
->set_uuid1
, sb
->set_uuid2
, sb
->set_uuid3
,
1564 printk(KERN_INFO
"md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1565 sb
->level
, sb
->size
, sb
->nr_disks
, sb
->raid_disks
,
1566 sb
->md_minor
, sb
->layout
, sb
->chunk_size
);
1567 printk(KERN_INFO
"md: UT:%08x ST:%d AD:%d WD:%d"
1568 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1569 sb
->utime
, sb
->state
, sb
->active_disks
, sb
->working_disks
,
1570 sb
->failed_disks
, sb
->spare_disks
,
1571 sb
->sb_csum
, (unsigned long)sb
->events_lo
);
1574 for (i
= 0; i
< MD_SB_DISKS
; i
++) {
1577 desc
= sb
->disks
+ i
;
1578 if (desc
->number
|| desc
->major
|| desc
->minor
||
1579 desc
->raid_disk
|| (desc
->state
&& (desc
->state
!= 4))) {
1580 printk(" D %2d: ", i
);
1584 printk(KERN_INFO
"md: THIS: ");
1585 print_desc(&sb
->this_disk
);
1589 static void print_rdev(mdk_rdev_t
*rdev
)
1591 char b
[BDEVNAME_SIZE
];
1592 printk(KERN_INFO
"md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1593 bdevname(rdev
->bdev
,b
), (unsigned long long)rdev
->size
,
1594 test_bit(Faulty
, &rdev
->flags
), test_bit(In_sync
, &rdev
->flags
),
1596 if (rdev
->sb_loaded
) {
1597 printk(KERN_INFO
"md: rdev superblock:\n");
1598 print_sb((mdp_super_t
*)page_address(rdev
->sb_page
));
1600 printk(KERN_INFO
"md: no rdev superblock!\n");
1603 static void md_print_devices(void)
1605 struct list_head
*tmp
, *tmp2
;
1608 char b
[BDEVNAME_SIZE
];
1611 printk("md: **********************************\n");
1612 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1613 printk("md: **********************************\n");
1614 for_each_mddev(mddev
, tmp
) {
1617 bitmap_print_sb(mddev
->bitmap
);
1619 printk("%s: ", mdname(mddev
));
1620 rdev_for_each(rdev
, tmp2
, mddev
)
1621 printk("<%s>", bdevname(rdev
->bdev
,b
));
1624 rdev_for_each(rdev
, tmp2
, mddev
)
1627 printk("md: **********************************\n");
1632 static void sync_sbs(mddev_t
* mddev
, int nospares
)
1634 /* Update each superblock (in-memory image), but
1635 * if we are allowed to, skip spares which already
1636 * have the right event counter, or have one earlier
1637 * (which would mean they aren't being marked as dirty
1638 * with the rest of the array)
1641 struct list_head
*tmp
;
1643 rdev_for_each(rdev
, tmp
, mddev
) {
1644 if (rdev
->sb_events
== mddev
->events
||
1646 rdev
->raid_disk
< 0 &&
1647 (rdev
->sb_events
&1)==0 &&
1648 rdev
->sb_events
+1 == mddev
->events
)) {
1649 /* Don't update this superblock */
1650 rdev
->sb_loaded
= 2;
1652 super_types
[mddev
->major_version
].
1653 sync_super(mddev
, rdev
);
1654 rdev
->sb_loaded
= 1;
1659 static void md_update_sb(mddev_t
* mddev
, int force_change
)
1661 struct list_head
*tmp
;
1667 spin_lock_irq(&mddev
->write_lock
);
1669 set_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1670 if (test_and_clear_bit(MD_CHANGE_DEVS
, &mddev
->flags
))
1672 if (test_and_clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
1673 /* just a clean<-> dirty transition, possibly leave spares alone,
1674 * though if events isn't the right even/odd, we will have to do
1680 if (mddev
->degraded
)
1681 /* If the array is degraded, then skipping spares is both
1682 * dangerous and fairly pointless.
1683 * Dangerous because a device that was removed from the array
1684 * might have a event_count that still looks up-to-date,
1685 * so it can be re-added without a resync.
1686 * Pointless because if there are any spares to skip,
1687 * then a recovery will happen and soon that array won't
1688 * be degraded any more and the spare can go back to sleep then.
1692 sync_req
= mddev
->in_sync
;
1693 mddev
->utime
= get_seconds();
1695 /* If this is just a dirty<->clean transition, and the array is clean
1696 * and 'events' is odd, we can roll back to the previous clean state */
1698 && (mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
1699 && (mddev
->events
& 1)
1700 && mddev
->events
!= 1)
1703 /* otherwise we have to go forward and ... */
1705 if (!mddev
->in_sync
|| mddev
->recovery_cp
!= MaxSector
) { /* not clean */
1706 /* .. if the array isn't clean, insist on an odd 'events' */
1707 if ((mddev
->events
&1)==0) {
1712 /* otherwise insist on an even 'events' (for clean states) */
1713 if ((mddev
->events
&1)) {
1720 if (!mddev
->events
) {
1722 * oops, this 64-bit counter should never wrap.
1723 * Either we are in around ~1 trillion A.C., assuming
1724 * 1 reboot per second, or we have a bug:
1731 * do not write anything to disk if using
1732 * nonpersistent superblocks
1734 if (!mddev
->persistent
) {
1735 if (!mddev
->external
)
1736 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1738 spin_unlock_irq(&mddev
->write_lock
);
1739 wake_up(&mddev
->sb_wait
);
1742 sync_sbs(mddev
, nospares
);
1743 spin_unlock_irq(&mddev
->write_lock
);
1746 "md: updating %s RAID superblock on device (in sync %d)\n",
1747 mdname(mddev
),mddev
->in_sync
);
1749 bitmap_update_sb(mddev
->bitmap
);
1750 rdev_for_each(rdev
, tmp
, mddev
) {
1751 char b
[BDEVNAME_SIZE
];
1752 dprintk(KERN_INFO
"md: ");
1753 if (rdev
->sb_loaded
!= 1)
1754 continue; /* no noise on spare devices */
1755 if (test_bit(Faulty
, &rdev
->flags
))
1756 dprintk("(skipping faulty ");
1758 dprintk("%s ", bdevname(rdev
->bdev
,b
));
1759 if (!test_bit(Faulty
, &rdev
->flags
)) {
1760 md_super_write(mddev
,rdev
,
1761 rdev
->sb_offset
<<1, rdev
->sb_size
,
1763 dprintk(KERN_INFO
"(write) %s's sb offset: %llu\n",
1764 bdevname(rdev
->bdev
,b
),
1765 (unsigned long long)rdev
->sb_offset
);
1766 rdev
->sb_events
= mddev
->events
;
1770 if (mddev
->level
== LEVEL_MULTIPATH
)
1771 /* only need to write one superblock... */
1774 md_super_wait(mddev
);
1775 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1777 spin_lock_irq(&mddev
->write_lock
);
1778 if (mddev
->in_sync
!= sync_req
||
1779 test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)) {
1780 /* have to write it out again */
1781 spin_unlock_irq(&mddev
->write_lock
);
1784 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1785 spin_unlock_irq(&mddev
->write_lock
);
1786 wake_up(&mddev
->sb_wait
);
1790 /* words written to sysfs files may, or my not, be \n terminated.
1791 * We want to accept with case. For this we use cmd_match.
1793 static int cmd_match(const char *cmd
, const char *str
)
1795 /* See if cmd, written into a sysfs file, matches
1796 * str. They must either be the same, or cmd can
1797 * have a trailing newline
1799 while (*cmd
&& *str
&& *cmd
== *str
) {
1810 struct rdev_sysfs_entry
{
1811 struct attribute attr
;
1812 ssize_t (*show
)(mdk_rdev_t
*, char *);
1813 ssize_t (*store
)(mdk_rdev_t
*, const char *, size_t);
1817 state_show(mdk_rdev_t
*rdev
, char *page
)
1822 if (test_bit(Faulty
, &rdev
->flags
)) {
1823 len
+= sprintf(page
+len
, "%sfaulty",sep
);
1826 if (test_bit(In_sync
, &rdev
->flags
)) {
1827 len
+= sprintf(page
+len
, "%sin_sync",sep
);
1830 if (test_bit(WriteMostly
, &rdev
->flags
)) {
1831 len
+= sprintf(page
+len
, "%swrite_mostly",sep
);
1834 if (!test_bit(Faulty
, &rdev
->flags
) &&
1835 !test_bit(In_sync
, &rdev
->flags
)) {
1836 len
+= sprintf(page
+len
, "%sspare", sep
);
1839 return len
+sprintf(page
+len
, "\n");
1843 state_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1846 * faulty - simulates and error
1847 * remove - disconnects the device
1848 * writemostly - sets write_mostly
1849 * -writemostly - clears write_mostly
1852 if (cmd_match(buf
, "faulty") && rdev
->mddev
->pers
) {
1853 md_error(rdev
->mddev
, rdev
);
1855 } else if (cmd_match(buf
, "remove")) {
1856 if (rdev
->raid_disk
>= 0)
1859 mddev_t
*mddev
= rdev
->mddev
;
1860 kick_rdev_from_array(rdev
);
1862 md_update_sb(mddev
, 1);
1863 md_new_event(mddev
);
1866 } else if (cmd_match(buf
, "writemostly")) {
1867 set_bit(WriteMostly
, &rdev
->flags
);
1869 } else if (cmd_match(buf
, "-writemostly")) {
1870 clear_bit(WriteMostly
, &rdev
->flags
);
1873 return err
? err
: len
;
1875 static struct rdev_sysfs_entry rdev_state
=
1876 __ATTR(state
, S_IRUGO
|S_IWUSR
, state_show
, state_store
);
1879 super_show(mdk_rdev_t
*rdev
, char *page
)
1881 if (rdev
->sb_loaded
&& rdev
->sb_size
) {
1882 memcpy(page
, page_address(rdev
->sb_page
), rdev
->sb_size
);
1883 return rdev
->sb_size
;
1887 static struct rdev_sysfs_entry rdev_super
= __ATTR_RO(super
);
1890 errors_show(mdk_rdev_t
*rdev
, char *page
)
1892 return sprintf(page
, "%d\n", atomic_read(&rdev
->corrected_errors
));
1896 errors_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1899 unsigned long n
= simple_strtoul(buf
, &e
, 10);
1900 if (*buf
&& (*e
== 0 || *e
== '\n')) {
1901 atomic_set(&rdev
->corrected_errors
, n
);
1906 static struct rdev_sysfs_entry rdev_errors
=
1907 __ATTR(errors
, S_IRUGO
|S_IWUSR
, errors_show
, errors_store
);
1910 slot_show(mdk_rdev_t
*rdev
, char *page
)
1912 if (rdev
->raid_disk
< 0)
1913 return sprintf(page
, "none\n");
1915 return sprintf(page
, "%d\n", rdev
->raid_disk
);
1919 slot_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1924 int slot
= simple_strtoul(buf
, &e
, 10);
1925 if (strncmp(buf
, "none", 4)==0)
1927 else if (e
==buf
|| (*e
&& *e
!= '\n'))
1929 if (rdev
->mddev
->pers
) {
1930 /* Setting 'slot' on an active array requires also
1931 * updating the 'rd%d' link, and communicating
1932 * with the personality with ->hot_*_disk.
1933 * For now we only support removing
1934 * failed/spare devices. This normally happens automatically,
1935 * but not when the metadata is externally managed.
1939 if (rdev
->raid_disk
== -1)
1941 /* personality does all needed checks */
1942 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
1944 err
= rdev
->mddev
->pers
->
1945 hot_remove_disk(rdev
->mddev
, rdev
->raid_disk
);
1948 sprintf(nm
, "rd%d", rdev
->raid_disk
);
1949 sysfs_remove_link(&rdev
->mddev
->kobj
, nm
);
1950 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
1951 md_wakeup_thread(rdev
->mddev
->thread
);
1953 if (slot
>= rdev
->mddev
->raid_disks
)
1955 rdev
->raid_disk
= slot
;
1956 /* assume it is working */
1957 clear_bit(Faulty
, &rdev
->flags
);
1958 clear_bit(WriteMostly
, &rdev
->flags
);
1959 set_bit(In_sync
, &rdev
->flags
);
1965 static struct rdev_sysfs_entry rdev_slot
=
1966 __ATTR(slot
, S_IRUGO
|S_IWUSR
, slot_show
, slot_store
);
1969 offset_show(mdk_rdev_t
*rdev
, char *page
)
1971 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->data_offset
);
1975 offset_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1978 unsigned long long offset
= simple_strtoull(buf
, &e
, 10);
1979 if (e
==buf
|| (*e
&& *e
!= '\n'))
1981 if (rdev
->mddev
->pers
)
1983 if (rdev
->size
&& rdev
->mddev
->external
)
1984 /* Must set offset before size, so overlap checks
1987 rdev
->data_offset
= offset
;
1991 static struct rdev_sysfs_entry rdev_offset
=
1992 __ATTR(offset
, S_IRUGO
|S_IWUSR
, offset_show
, offset_store
);
1995 rdev_size_show(mdk_rdev_t
*rdev
, char *page
)
1997 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->size
);
2000 static int overlaps(sector_t s1
, sector_t l1
, sector_t s2
, sector_t l2
)
2002 /* check if two start/length pairs overlap */
2011 rdev_size_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2014 unsigned long long size
= simple_strtoull(buf
, &e
, 10);
2015 unsigned long long oldsize
= rdev
->size
;
2016 <<<<<<< HEAD
:drivers
/md
/md
.c
2018 mddev_t
*my_mddev
= rdev
->mddev
;
2020 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
2021 if (e
==buf
|| (*e
&& *e
!= '\n'))
2023 <<<<<<< HEAD
:drivers
/md
/md
.c
2024 if (rdev
->mddev
->pers
)
2027 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
2030 if (size
> oldsize
&& rdev
->mddev
->external
) {
2031 /* need to check that all other rdevs with the same ->bdev
2032 * do not overlap. We need to unlock the mddev to avoid
2033 * a deadlock. We have already changed rdev->size, and if
2034 * we have to change it back, we will have the lock again.
2038 struct list_head
*tmp
, *tmp2
;
2040 <<<<<<< HEAD
:drivers
/md
/md
.c
2041 mddev_unlock(rdev
->mddev
);
2043 mddev_unlock(my_mddev
);
2044 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
2045 for_each_mddev(mddev
, tmp
) {
2049 rdev_for_each(rdev2
, tmp2
, mddev
)
2050 if (test_bit(AllReserved
, &rdev2
->flags
) ||
2051 (rdev
->bdev
== rdev2
->bdev
&&
2053 overlaps(rdev
->data_offset
, rdev
->size
,
2054 rdev2
->data_offset
, rdev2
->size
))) {
2058 mddev_unlock(mddev
);
2064 <<<<<<< HEAD
:drivers
/md
/md
.c
2065 mddev_lock(rdev
->mddev
);
2067 mddev_lock(my_mddev
);
2068 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
2070 /* Someone else could have slipped in a size
2071 * change here, but doing so is just silly.
2072 * We put oldsize back because we *know* it is
2073 * safe, and trust userspace not to race with
2076 rdev
->size
= oldsize
;
2080 <<<<<<< HEAD
:drivers
/md
/md
.c
2081 if (size
< rdev
->mddev
->size
|| rdev
->mddev
->size
== 0)
2082 rdev
->mddev
->size
= size
;
2084 if (size
< my_mddev
->size
|| my_mddev
->size
== 0)
2085 my_mddev
->size
= size
;
2086 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
2090 static struct rdev_sysfs_entry rdev_size
=
2091 __ATTR(size
, S_IRUGO
|S_IWUSR
, rdev_size_show
, rdev_size_store
);
2093 static struct attribute
*rdev_default_attrs
[] = {
2103 rdev_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
2105 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2106 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2107 <<<<<<< HEAD
:drivers
/md
/md
.c
2109 mddev_t
*mddev
= rdev
->mddev
;
2111 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
2115 <<<<<<< HEAD
:drivers
/md
/md
.c
2116 return entry
->show(rdev
, page
);
2119 rv
= mddev
? mddev_lock(mddev
) : -EBUSY
;
2121 if (rdev
->mddev
== NULL
)
2124 rv
= entry
->show(rdev
, page
);
2125 mddev_unlock(mddev
);
2128 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
2132 rdev_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
2133 const char *page
, size_t length
)
2135 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2136 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2137 <<<<<<< HEAD
:drivers
/md
/md
.c
2141 mddev_t
*mddev
= rdev
->mddev
;
2142 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
2146 if (!capable(CAP_SYS_ADMIN
))
2148 <<<<<<< HEAD
:drivers
/md
/md
.c
2149 rv
= mddev_lock(rdev
->mddev
);
2151 rv
= mddev
? mddev_lock(mddev
): -EBUSY
;
2152 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
2154 <<<<<<< HEAD
:drivers
/md
/md
.c
2155 rv
= entry
->store(rdev
, page
, length
);
2157 if (rdev
->mddev
== NULL
)
2160 rv
= entry
->store(rdev
, page
, length
);
2161 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
2162 mddev_unlock(rdev
->mddev
);
2167 static void rdev_free(struct kobject
*ko
)
2169 mdk_rdev_t
*rdev
= container_of(ko
, mdk_rdev_t
, kobj
);
2172 static struct sysfs_ops rdev_sysfs_ops
= {
2173 .show
= rdev_attr_show
,
2174 .store
= rdev_attr_store
,
2176 static struct kobj_type rdev_ktype
= {
2177 .release
= rdev_free
,
2178 .sysfs_ops
= &rdev_sysfs_ops
,
2179 .default_attrs
= rdev_default_attrs
,
2183 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2185 * mark the device faulty if:
2187 * - the device is nonexistent (zero size)
2188 * - the device has no valid superblock
2190 * a faulty rdev _never_ has rdev->sb set.
2192 static mdk_rdev_t
*md_import_device(dev_t newdev
, int super_format
, int super_minor
)
2194 char b
[BDEVNAME_SIZE
];
2199 rdev
= kzalloc(sizeof(*rdev
), GFP_KERNEL
);
2201 printk(KERN_ERR
"md: could not alloc mem for new device!\n");
2202 return ERR_PTR(-ENOMEM
);
2205 if ((err
= alloc_disk_sb(rdev
)))
2208 err
= lock_rdev(rdev
, newdev
, super_format
== -2);
2212 kobject_init(&rdev
->kobj
, &rdev_ktype
);
2215 rdev
->saved_raid_disk
= -1;
2216 rdev
->raid_disk
= -1;
2218 rdev
->data_offset
= 0;
2219 rdev
->sb_events
= 0;
2220 atomic_set(&rdev
->nr_pending
, 0);
2221 atomic_set(&rdev
->read_errors
, 0);
2222 atomic_set(&rdev
->corrected_errors
, 0);
2224 size
= rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
2227 "md: %s has zero or unknown size, marking faulty!\n",
2228 bdevname(rdev
->bdev
,b
));
2233 if (super_format
>= 0) {
2234 err
= super_types
[super_format
].
2235 load_super(rdev
, NULL
, super_minor
);
2236 if (err
== -EINVAL
) {
2238 "md: %s does not have a valid v%d.%d "
2239 "superblock, not importing!\n",
2240 bdevname(rdev
->bdev
,b
),
2241 super_format
, super_minor
);
2246 "md: could not read %s's sb, not importing!\n",
2247 bdevname(rdev
->bdev
,b
));
2251 INIT_LIST_HEAD(&rdev
->same_set
);
2256 if (rdev
->sb_page
) {
2262 return ERR_PTR(err
);
2266 * Check a full RAID array for plausibility
2270 static void analyze_sbs(mddev_t
* mddev
)
2273 struct list_head
*tmp
;
2274 mdk_rdev_t
*rdev
, *freshest
;
2275 char b
[BDEVNAME_SIZE
];
2278 rdev_for_each(rdev
, tmp
, mddev
)
2279 switch (super_types
[mddev
->major_version
].
2280 load_super(rdev
, freshest
, mddev
->minor_version
)) {
2288 "md: fatal superblock inconsistency in %s"
2289 " -- removing from array\n",
2290 bdevname(rdev
->bdev
,b
));
2291 kick_rdev_from_array(rdev
);
2295 super_types
[mddev
->major_version
].
2296 validate_super(mddev
, freshest
);
2299 rdev_for_each(rdev
, tmp
, mddev
) {
2300 if (rdev
!= freshest
)
2301 if (super_types
[mddev
->major_version
].
2302 validate_super(mddev
, rdev
)) {
2303 printk(KERN_WARNING
"md: kicking non-fresh %s"
2305 bdevname(rdev
->bdev
,b
));
2306 kick_rdev_from_array(rdev
);
2309 if (mddev
->level
== LEVEL_MULTIPATH
) {
2310 rdev
->desc_nr
= i
++;
2311 rdev
->raid_disk
= rdev
->desc_nr
;
2312 set_bit(In_sync
, &rdev
->flags
);
2313 } else if (rdev
->raid_disk
>= mddev
->raid_disks
) {
2314 rdev
->raid_disk
= -1;
2315 clear_bit(In_sync
, &rdev
->flags
);
2321 if (mddev
->recovery_cp
!= MaxSector
&&
2323 printk(KERN_ERR
"md: %s: raid array is not clean"
2324 " -- starting background reconstruction\n",
2330 safe_delay_show(mddev_t
*mddev
, char *page
)
2332 int msec
= (mddev
->safemode_delay
*1000)/HZ
;
2333 return sprintf(page
, "%d.%03d\n", msec
/1000, msec
%1000);
2336 safe_delay_store(mddev_t
*mddev
, const char *cbuf
, size_t len
)
2344 /* remove a period, and count digits after it */
2345 if (len
>= sizeof(buf
))
2347 strlcpy(buf
, cbuf
, len
);
2349 for (i
=0; i
<len
; i
++) {
2351 if (isdigit(buf
[i
])) {
2356 } else if (buf
[i
] == '.') {
2361 msec
= simple_strtoul(buf
, &e
, 10);
2362 if (e
== buf
|| (*e
&& *e
!= '\n'))
2364 msec
= (msec
* 1000) / scale
;
2366 mddev
->safemode_delay
= 0;
2368 mddev
->safemode_delay
= (msec
*HZ
)/1000;
2369 if (mddev
->safemode_delay
== 0)
2370 mddev
->safemode_delay
= 1;
2374 static struct md_sysfs_entry md_safe_delay
=
2375 __ATTR(safe_mode_delay
, S_IRUGO
|S_IWUSR
,safe_delay_show
, safe_delay_store
);
2378 level_show(mddev_t
*mddev
, char *page
)
2380 struct mdk_personality
*p
= mddev
->pers
;
2382 return sprintf(page
, "%s\n", p
->name
);
2383 else if (mddev
->clevel
[0])
2384 return sprintf(page
, "%s\n", mddev
->clevel
);
2385 else if (mddev
->level
!= LEVEL_NONE
)
2386 return sprintf(page
, "%d\n", mddev
->level
);
2392 level_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2399 if (len
>= sizeof(mddev
->clevel
))
2401 strncpy(mddev
->clevel
, buf
, len
);
2402 if (mddev
->clevel
[len
-1] == '\n')
2404 mddev
->clevel
[len
] = 0;
2405 mddev
->level
= LEVEL_NONE
;
2409 static struct md_sysfs_entry md_level
=
2410 __ATTR(level
, S_IRUGO
|S_IWUSR
, level_show
, level_store
);
2414 layout_show(mddev_t
*mddev
, char *page
)
2416 /* just a number, not meaningful for all levels */
2417 if (mddev
->reshape_position
!= MaxSector
&&
2418 mddev
->layout
!= mddev
->new_layout
)
2419 return sprintf(page
, "%d (%d)\n",
2420 mddev
->new_layout
, mddev
->layout
);
2421 return sprintf(page
, "%d\n", mddev
->layout
);
2425 layout_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2428 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2430 if (!*buf
|| (*e
&& *e
!= '\n'))
2435 if (mddev
->reshape_position
!= MaxSector
)
2436 mddev
->new_layout
= n
;
2441 static struct md_sysfs_entry md_layout
=
2442 __ATTR(layout
, S_IRUGO
|S_IWUSR
, layout_show
, layout_store
);
2446 raid_disks_show(mddev_t
*mddev
, char *page
)
2448 if (mddev
->raid_disks
== 0)
2450 if (mddev
->reshape_position
!= MaxSector
&&
2451 mddev
->delta_disks
!= 0)
2452 return sprintf(page
, "%d (%d)\n", mddev
->raid_disks
,
2453 mddev
->raid_disks
- mddev
->delta_disks
);
2454 return sprintf(page
, "%d\n", mddev
->raid_disks
);
2457 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
);
2460 raid_disks_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2464 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2466 if (!*buf
|| (*e
&& *e
!= '\n'))
2470 rv
= update_raid_disks(mddev
, n
);
2471 else if (mddev
->reshape_position
!= MaxSector
) {
2472 int olddisks
= mddev
->raid_disks
- mddev
->delta_disks
;
2473 mddev
->delta_disks
= n
- olddisks
;
2474 mddev
->raid_disks
= n
;
2476 mddev
->raid_disks
= n
;
2477 return rv
? rv
: len
;
2479 static struct md_sysfs_entry md_raid_disks
=
2480 __ATTR(raid_disks
, S_IRUGO
|S_IWUSR
, raid_disks_show
, raid_disks_store
);
2483 chunk_size_show(mddev_t
*mddev
, char *page
)
2485 if (mddev
->reshape_position
!= MaxSector
&&
2486 mddev
->chunk_size
!= mddev
->new_chunk
)
2487 return sprintf(page
, "%d (%d)\n", mddev
->new_chunk
,
2489 return sprintf(page
, "%d\n", mddev
->chunk_size
);
2493 chunk_size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2495 /* can only set chunk_size if array is not yet active */
2497 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2499 if (!*buf
|| (*e
&& *e
!= '\n'))
2504 else if (mddev
->reshape_position
!= MaxSector
)
2505 mddev
->new_chunk
= n
;
2507 mddev
->chunk_size
= n
;
2510 static struct md_sysfs_entry md_chunk_size
=
2511 __ATTR(chunk_size
, S_IRUGO
|S_IWUSR
, chunk_size_show
, chunk_size_store
);
2514 resync_start_show(mddev_t
*mddev
, char *page
)
2516 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->recovery_cp
);
2520 resync_start_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2522 /* can only set chunk_size if array is not yet active */
2524 unsigned long long n
= simple_strtoull(buf
, &e
, 10);
2528 if (!*buf
|| (*e
&& *e
!= '\n'))
2531 mddev
->recovery_cp
= n
;
2534 static struct md_sysfs_entry md_resync_start
=
2535 __ATTR(resync_start
, S_IRUGO
|S_IWUSR
, resync_start_show
, resync_start_store
);
2538 * The array state can be:
2541 * No devices, no size, no level
2542 * Equivalent to STOP_ARRAY ioctl
2544 * May have some settings, but array is not active
2545 * all IO results in error
2546 * When written, doesn't tear down array, but just stops it
2547 * suspended (not supported yet)
2548 * All IO requests will block. The array can be reconfigured.
2549 * Writing this, if accepted, will block until array is quiessent
2551 * no resync can happen. no superblocks get written.
2552 * write requests fail
2554 * like readonly, but behaves like 'clean' on a write request.
2556 * clean - no pending writes, but otherwise active.
2557 * When written to inactive array, starts without resync
2558 * If a write request arrives then
2559 * if metadata is known, mark 'dirty' and switch to 'active'.
2560 * if not known, block and switch to write-pending
2561 * If written to an active array that has pending writes, then fails.
2563 * fully active: IO and resync can be happening.
2564 * When written to inactive array, starts with resync
2567 * clean, but writes are blocked waiting for 'active' to be written.
2570 * like active, but no writes have been seen for a while (100msec).
2573 enum array_state
{ clear
, inactive
, suspended
, readonly
, read_auto
, clean
, active
,
2574 write_pending
, active_idle
, bad_word
};
2575 static char *array_states
[] = {
2576 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2577 "write-pending", "active-idle", NULL
};
2579 static int match_word(const char *word
, char **list
)
2582 for (n
=0; list
[n
]; n
++)
2583 if (cmd_match(word
, list
[n
]))
2589 array_state_show(mddev_t
*mddev
, char *page
)
2591 enum array_state st
= inactive
;
2604 else if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
2606 else if (mddev
->safemode
)
2612 if (list_empty(&mddev
->disks
) &&
2613 mddev
->raid_disks
== 0 &&
2619 return sprintf(page
, "%s\n", array_states
[st
]);
2622 static int do_md_stop(mddev_t
* mddev
, int ro
);
2623 static int do_md_run(mddev_t
* mddev
);
2624 static int restart_array(mddev_t
*mddev
);
2627 array_state_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2630 enum array_state st
= match_word(buf
, array_states
);
2635 /* stopping an active array */
2636 if (atomic_read(&mddev
->active
) > 1)
2638 err
= do_md_stop(mddev
, 0);
2641 /* stopping an active array */
2643 if (atomic_read(&mddev
->active
) > 1)
2645 err
= do_md_stop(mddev
, 2);
2647 err
= 0; /* already inactive */
2650 break; /* not supported yet */
2653 err
= do_md_stop(mddev
, 1);
2656 err
= do_md_run(mddev
);
2660 /* stopping an active array */
2662 err
= do_md_stop(mddev
, 1);
2664 mddev
->ro
= 2; /* FIXME mark devices writable */
2667 err
= do_md_run(mddev
);
2672 restart_array(mddev
);
2673 spin_lock_irq(&mddev
->write_lock
);
2674 if (atomic_read(&mddev
->writes_pending
) == 0) {
2675 if (mddev
->in_sync
== 0) {
2677 if (mddev
->persistent
)
2678 set_bit(MD_CHANGE_CLEAN
,
2684 spin_unlock_irq(&mddev
->write_lock
);
2687 mddev
->recovery_cp
= MaxSector
;
2688 err
= do_md_run(mddev
);
2693 restart_array(mddev
);
2694 if (mddev
->external
)
2695 clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
2696 wake_up(&mddev
->sb_wait
);
2700 err
= do_md_run(mddev
);
2705 /* these cannot be set */
2713 static struct md_sysfs_entry md_array_state
=
2714 __ATTR(array_state
, S_IRUGO
|S_IWUSR
, array_state_show
, array_state_store
);
2717 null_show(mddev_t
*mddev
, char *page
)
2723 new_dev_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2725 /* buf must be %d:%d\n? giving major and minor numbers */
2726 /* The new device is added to the array.
2727 * If the array has a persistent superblock, we read the
2728 * superblock to initialise info and check validity.
2729 * Otherwise, only checking done is that in bind_rdev_to_array,
2730 * which mainly checks size.
2733 int major
= simple_strtoul(buf
, &e
, 10);
2739 if (!*buf
|| *e
!= ':' || !e
[1] || e
[1] == '\n')
2741 minor
= simple_strtoul(e
+1, &e
, 10);
2742 if (*e
&& *e
!= '\n')
2744 dev
= MKDEV(major
, minor
);
2745 if (major
!= MAJOR(dev
) ||
2746 minor
!= MINOR(dev
))
2750 if (mddev
->persistent
) {
2751 rdev
= md_import_device(dev
, mddev
->major_version
,
2752 mddev
->minor_version
);
2753 if (!IS_ERR(rdev
) && !list_empty(&mddev
->disks
)) {
2754 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
2755 mdk_rdev_t
, same_set
);
2756 err
= super_types
[mddev
->major_version
]
2757 .load_super(rdev
, rdev0
, mddev
->minor_version
);
2761 } else if (mddev
->external
)
2762 rdev
= md_import_device(dev
, -2, -1);
2764 rdev
= md_import_device(dev
, -1, -1);
2767 return PTR_ERR(rdev
);
2768 err
= bind_rdev_to_array(rdev
, mddev
);
2772 return err
? err
: len
;
2775 static struct md_sysfs_entry md_new_device
=
2776 __ATTR(new_dev
, S_IWUSR
, null_show
, new_dev_store
);
2779 bitmap_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2782 unsigned long chunk
, end_chunk
;
2786 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2788 chunk
= end_chunk
= simple_strtoul(buf
, &end
, 0);
2789 if (buf
== end
) break;
2790 if (*end
== '-') { /* range */
2792 end_chunk
= simple_strtoul(buf
, &end
, 0);
2793 if (buf
== end
) break;
2795 if (*end
&& !isspace(*end
)) break;
2796 bitmap_dirty_bits(mddev
->bitmap
, chunk
, end_chunk
);
2798 while (isspace(*buf
)) buf
++;
2800 bitmap_unplug(mddev
->bitmap
); /* flush the bits to disk */
2805 static struct md_sysfs_entry md_bitmap
=
2806 __ATTR(bitmap_set_bits
, S_IWUSR
, null_show
, bitmap_store
);
2809 size_show(mddev_t
*mddev
, char *page
)
2811 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->size
);
2814 static int update_size(mddev_t
*mddev
, unsigned long size
);
2817 size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2819 /* If array is inactive, we can reduce the component size, but
2820 * not increase it (except from 0).
2821 * If array is active, we can try an on-line resize
2825 unsigned long long size
= simple_strtoull(buf
, &e
, 10);
2826 if (!*buf
|| *buf
== '\n' ||
2831 err
= update_size(mddev
, size
);
2832 md_update_sb(mddev
, 1);
2834 if (mddev
->size
== 0 ||
2840 return err
? err
: len
;
2843 static struct md_sysfs_entry md_size
=
2844 __ATTR(component_size
, S_IRUGO
|S_IWUSR
, size_show
, size_store
);
2849 * 'none' for arrays with no metadata (good luck...)
2850 * 'external' for arrays with externally managed metadata,
2851 * or N.M for internally known formats
2854 metadata_show(mddev_t
*mddev
, char *page
)
2856 if (mddev
->persistent
)
2857 return sprintf(page
, "%d.%d\n",
2858 mddev
->major_version
, mddev
->minor_version
);
2859 else if (mddev
->external
)
2860 return sprintf(page
, "external:%s\n", mddev
->metadata_type
);
2862 return sprintf(page
, "none\n");
2866 metadata_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2870 if (!list_empty(&mddev
->disks
))
2873 if (cmd_match(buf
, "none")) {
2874 mddev
->persistent
= 0;
2875 mddev
->external
= 0;
2876 mddev
->major_version
= 0;
2877 mddev
->minor_version
= 90;
2880 if (strncmp(buf
, "external:", 9) == 0) {
2881 size_t namelen
= len
-9;
2882 if (namelen
>= sizeof(mddev
->metadata_type
))
2883 namelen
= sizeof(mddev
->metadata_type
)-1;
2884 strncpy(mddev
->metadata_type
, buf
+9, namelen
);
2885 mddev
->metadata_type
[namelen
] = 0;
2886 if (namelen
&& mddev
->metadata_type
[namelen
-1] == '\n')
2887 mddev
->metadata_type
[--namelen
] = 0;
2888 mddev
->persistent
= 0;
2889 mddev
->external
= 1;
2890 mddev
->major_version
= 0;
2891 mddev
->minor_version
= 90;
2894 major
= simple_strtoul(buf
, &e
, 10);
2895 if (e
==buf
|| *e
!= '.')
2898 minor
= simple_strtoul(buf
, &e
, 10);
2899 if (e
==buf
|| (*e
&& *e
!= '\n') )
2901 if (major
>= ARRAY_SIZE(super_types
) || super_types
[major
].name
== NULL
)
2903 mddev
->major_version
= major
;
2904 mddev
->minor_version
= minor
;
2905 mddev
->persistent
= 1;
2906 mddev
->external
= 0;
2910 static struct md_sysfs_entry md_metadata
=
2911 __ATTR(metadata_version
, S_IRUGO
|S_IWUSR
, metadata_show
, metadata_store
);
2914 action_show(mddev_t
*mddev
, char *page
)
2916 char *type
= "idle";
2917 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
2918 (!mddev
->ro
&& test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))) {
2919 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
2921 else if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
2922 if (!test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
2924 else if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
2931 return sprintf(page
, "%s\n", type
);
2935 action_store(mddev_t
*mddev
, const char *page
, size_t len
)
2937 if (!mddev
->pers
|| !mddev
->pers
->sync_request
)
2940 if (cmd_match(page
, "idle")) {
2941 if (mddev
->sync_thread
) {
2942 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
2943 md_unregister_thread(mddev
->sync_thread
);
2944 mddev
->sync_thread
= NULL
;
2945 mddev
->recovery
= 0;
2947 } else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
2948 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
2950 else if (cmd_match(page
, "resync") || cmd_match(page
, "recover"))
2951 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
2952 else if (cmd_match(page
, "reshape")) {
2954 if (mddev
->pers
->start_reshape
== NULL
)
2956 err
= mddev
->pers
->start_reshape(mddev
);
2960 if (cmd_match(page
, "check"))
2961 set_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
2962 else if (!cmd_match(page
, "repair"))
2964 set_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
2965 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
2967 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
2968 md_wakeup_thread(mddev
->thread
);
2973 mismatch_cnt_show(mddev_t
*mddev
, char *page
)
2975 return sprintf(page
, "%llu\n",
2976 (unsigned long long) mddev
->resync_mismatches
);
2979 static struct md_sysfs_entry md_scan_mode
=
2980 __ATTR(sync_action
, S_IRUGO
|S_IWUSR
, action_show
, action_store
);
2983 static struct md_sysfs_entry md_mismatches
= __ATTR_RO(mismatch_cnt
);
2986 sync_min_show(mddev_t
*mddev
, char *page
)
2988 return sprintf(page
, "%d (%s)\n", speed_min(mddev
),
2989 mddev
->sync_speed_min
? "local": "system");
2993 sync_min_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2997 if (strncmp(buf
, "system", 6)==0) {
2998 mddev
->sync_speed_min
= 0;
3001 min
= simple_strtoul(buf
, &e
, 10);
3002 if (buf
== e
|| (*e
&& *e
!= '\n') || min
<= 0)
3004 mddev
->sync_speed_min
= min
;
3008 static struct md_sysfs_entry md_sync_min
=
3009 __ATTR(sync_speed_min
, S_IRUGO
|S_IWUSR
, sync_min_show
, sync_min_store
);
3012 sync_max_show(mddev_t
*mddev
, char *page
)
3014 return sprintf(page
, "%d (%s)\n", speed_max(mddev
),
3015 mddev
->sync_speed_max
? "local": "system");
3019 sync_max_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3023 if (strncmp(buf
, "system", 6)==0) {
3024 mddev
->sync_speed_max
= 0;
3027 max
= simple_strtoul(buf
, &e
, 10);
3028 if (buf
== e
|| (*e
&& *e
!= '\n') || max
<= 0)
3030 mddev
->sync_speed_max
= max
;
3034 static struct md_sysfs_entry md_sync_max
=
3035 __ATTR(sync_speed_max
, S_IRUGO
|S_IWUSR
, sync_max_show
, sync_max_store
);
3038 degraded_show(mddev_t
*mddev
, char *page
)
3040 return sprintf(page
, "%d\n", mddev
->degraded
);
3042 static struct md_sysfs_entry md_degraded
= __ATTR_RO(degraded
);
3045 sync_speed_show(mddev_t
*mddev
, char *page
)
3047 unsigned long resync
, dt
, db
;
3048 resync
= (mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
));
3049 dt
= ((jiffies
- mddev
->resync_mark
) / HZ
);
3051 db
= resync
- (mddev
->resync_mark_cnt
);
3052 return sprintf(page
, "%ld\n", db
/dt
/2); /* K/sec */
3055 static struct md_sysfs_entry md_sync_speed
= __ATTR_RO(sync_speed
);
3058 sync_completed_show(mddev_t
*mddev
, char *page
)
3060 unsigned long max_blocks
, resync
;
3062 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
3063 max_blocks
= mddev
->resync_max_sectors
;
3065 max_blocks
= mddev
->size
<< 1;
3067 resync
= (mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
));
3068 return sprintf(page
, "%lu / %lu\n", resync
, max_blocks
);
3071 static struct md_sysfs_entry md_sync_completed
= __ATTR_RO(sync_completed
);
3074 max_sync_show(mddev_t
*mddev
, char *page
)
3076 if (mddev
->resync_max
== MaxSector
)
3077 return sprintf(page
, "max\n");
3079 return sprintf(page
, "%llu\n",
3080 (unsigned long long)mddev
->resync_max
);
3083 max_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3085 if (strncmp(buf
, "max", 3) == 0)
3086 mddev
->resync_max
= MaxSector
;
3089 unsigned long long max
= simple_strtoull(buf
, &ep
, 10);
3090 if (ep
== buf
|| (*ep
!= 0 && *ep
!= '\n'))
3092 if (max
< mddev
->resync_max
&&
3093 test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3096 /* Must be a multiple of chunk_size */
3097 if (mddev
->chunk_size
) {
3098 if (max
& (sector_t
)((mddev
->chunk_size
>>9)-1))
3101 mddev
->resync_max
= max
;
3103 wake_up(&mddev
->recovery_wait
);
3107 static struct md_sysfs_entry md_max_sync
=
3108 __ATTR(sync_max
, S_IRUGO
|S_IWUSR
, max_sync_show
, max_sync_store
);
3111 suspend_lo_show(mddev_t
*mddev
, char *page
)
3113 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_lo
);
3117 suspend_lo_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3120 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3122 if (mddev
->pers
->quiesce
== NULL
)
3124 if (buf
== e
|| (*e
&& *e
!= '\n'))
3126 if (new >= mddev
->suspend_hi
||
3127 (new > mddev
->suspend_lo
&& new < mddev
->suspend_hi
)) {
3128 mddev
->suspend_lo
= new;
3129 mddev
->pers
->quiesce(mddev
, 2);
3134 static struct md_sysfs_entry md_suspend_lo
=
3135 __ATTR(suspend_lo
, S_IRUGO
|S_IWUSR
, suspend_lo_show
, suspend_lo_store
);
3139 suspend_hi_show(mddev_t
*mddev
, char *page
)
3141 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_hi
);
3145 suspend_hi_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3148 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3150 if (mddev
->pers
->quiesce
== NULL
)
3152 if (buf
== e
|| (*e
&& *e
!= '\n'))
3154 if ((new <= mddev
->suspend_lo
&& mddev
->suspend_lo
>= mddev
->suspend_hi
) ||
3155 (new > mddev
->suspend_lo
&& new > mddev
->suspend_hi
)) {
3156 mddev
->suspend_hi
= new;
3157 mddev
->pers
->quiesce(mddev
, 1);
3158 mddev
->pers
->quiesce(mddev
, 0);
3163 static struct md_sysfs_entry md_suspend_hi
=
3164 __ATTR(suspend_hi
, S_IRUGO
|S_IWUSR
, suspend_hi_show
, suspend_hi_store
);
3167 reshape_position_show(mddev_t
*mddev
, char *page
)
3169 if (mddev
->reshape_position
!= MaxSector
)
3170 return sprintf(page
, "%llu\n",
3171 (unsigned long long)mddev
->reshape_position
);
3172 strcpy(page
, "none\n");
3177 reshape_position_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3180 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3183 if (buf
== e
|| (*e
&& *e
!= '\n'))
3185 mddev
->reshape_position
= new;
3186 mddev
->delta_disks
= 0;
3187 mddev
->new_level
= mddev
->level
;
3188 mddev
->new_layout
= mddev
->layout
;
3189 mddev
->new_chunk
= mddev
->chunk_size
;
3193 static struct md_sysfs_entry md_reshape_position
=
3194 __ATTR(reshape_position
, S_IRUGO
|S_IWUSR
, reshape_position_show
,
3195 reshape_position_store
);
3198 static struct attribute
*md_default_attrs
[] = {
3201 &md_raid_disks
.attr
,
3202 &md_chunk_size
.attr
,
3204 &md_resync_start
.attr
,
3206 &md_new_device
.attr
,
3207 &md_safe_delay
.attr
,
3208 &md_array_state
.attr
,
3209 &md_reshape_position
.attr
,
3213 static struct attribute
*md_redundancy_attrs
[] = {
3215 &md_mismatches
.attr
,
3218 &md_sync_speed
.attr
,
3219 &md_sync_completed
.attr
,
3221 &md_suspend_lo
.attr
,
3222 &md_suspend_hi
.attr
,
3227 static struct attribute_group md_redundancy_group
= {
3229 .attrs
= md_redundancy_attrs
,
3234 md_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
3236 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3237 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3242 rv
= mddev_lock(mddev
);
3244 rv
= entry
->show(mddev
, page
);
3245 mddev_unlock(mddev
);
3251 md_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
3252 const char *page
, size_t length
)
3254 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3255 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3260 if (!capable(CAP_SYS_ADMIN
))
3262 rv
= mddev_lock(mddev
);
3264 rv
= entry
->store(mddev
, page
, length
);
3265 mddev_unlock(mddev
);
3270 static void md_free(struct kobject
*ko
)
3272 mddev_t
*mddev
= container_of(ko
, mddev_t
, kobj
);
3276 static struct sysfs_ops md_sysfs_ops
= {
3277 .show
= md_attr_show
,
3278 .store
= md_attr_store
,
3280 static struct kobj_type md_ktype
= {
3282 .sysfs_ops
= &md_sysfs_ops
,
3283 .default_attrs
= md_default_attrs
,
3288 static struct kobject
*md_probe(dev_t dev
, int *part
, void *data
)
3290 static DEFINE_MUTEX(disks_mutex
);
3291 mddev_t
*mddev
= mddev_find(dev
);
3292 struct gendisk
*disk
;
3293 int partitioned
= (MAJOR(dev
) != MD_MAJOR
);
3294 int shift
= partitioned
? MdpMinorShift
: 0;
3295 int unit
= MINOR(dev
) >> shift
;
3301 mutex_lock(&disks_mutex
);
3302 if (mddev
->gendisk
) {
3303 mutex_unlock(&disks_mutex
);
3307 disk
= alloc_disk(1 << shift
);
3309 mutex_unlock(&disks_mutex
);
3313 disk
->major
= MAJOR(dev
);
3314 disk
->first_minor
= unit
<< shift
;
3316 sprintf(disk
->disk_name
, "md_d%d", unit
);
3318 sprintf(disk
->disk_name
, "md%d", unit
);
3319 disk
->fops
= &md_fops
;
3320 disk
->private_data
= mddev
;
3321 disk
->queue
= mddev
->queue
;
3323 mddev
->gendisk
= disk
;
3324 mutex_unlock(&disks_mutex
);
3325 error
= kobject_init_and_add(&mddev
->kobj
, &md_ktype
, &disk
->dev
.kobj
,
3328 printk(KERN_WARNING
"md: cannot register %s/md - name in use\n",
3331 kobject_uevent(&mddev
->kobj
, KOBJ_ADD
);
3335 static void md_safemode_timeout(unsigned long data
)
3337 mddev_t
*mddev
= (mddev_t
*) data
;
3339 mddev
->safemode
= 1;
3340 md_wakeup_thread(mddev
->thread
);
3343 static int start_dirty_degraded
;
3345 static int do_md_run(mddev_t
* mddev
)
3349 struct list_head
*tmp
;
3351 struct gendisk
*disk
;
3352 struct mdk_personality
*pers
;
3353 char b
[BDEVNAME_SIZE
];
3355 if (list_empty(&mddev
->disks
))
3356 /* cannot run an array with no devices.. */
3363 * Analyze all RAID superblock(s)
3365 if (!mddev
->raid_disks
) {
3366 if (!mddev
->persistent
)
3371 chunk_size
= mddev
->chunk_size
;
3374 if (chunk_size
> MAX_CHUNK_SIZE
) {
3375 printk(KERN_ERR
"too big chunk_size: %d > %d\n",
3376 chunk_size
, MAX_CHUNK_SIZE
);
3380 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3382 if ( (1 << ffz(~chunk_size
)) != chunk_size
) {
3383 printk(KERN_ERR
"chunk_size of %d not valid\n", chunk_size
);
3386 if (chunk_size
< PAGE_SIZE
) {
3387 printk(KERN_ERR
"too small chunk_size: %d < %ld\n",
3388 chunk_size
, PAGE_SIZE
);
3392 /* devices must have minimum size of one chunk */
3393 rdev_for_each(rdev
, tmp
, mddev
) {
3394 if (test_bit(Faulty
, &rdev
->flags
))
3396 if (rdev
->size
< chunk_size
/ 1024) {
3398 "md: Dev %s smaller than chunk_size:"
3400 bdevname(rdev
->bdev
,b
),
3401 (unsigned long long)rdev
->size
,
3409 if (mddev
->level
!= LEVEL_NONE
)
3410 request_module("md-level-%d", mddev
->level
);
3411 else if (mddev
->clevel
[0])
3412 request_module("md-%s", mddev
->clevel
);
3416 * Drop all container device buffers, from now on
3417 * the only valid external interface is through the md
3420 rdev_for_each(rdev
, tmp
, mddev
) {
3421 if (test_bit(Faulty
, &rdev
->flags
))
3423 sync_blockdev(rdev
->bdev
);
3424 invalidate_bdev(rdev
->bdev
);
3426 /* perform some consistency tests on the device.
3427 * We don't want the data to overlap the metadata,
3428 * Internal Bitmap issues has handled elsewhere.
3430 if (rdev
->data_offset
< rdev
->sb_offset
) {
3432 rdev
->data_offset
+ mddev
->size
*2
3433 > rdev
->sb_offset
*2) {
3434 printk("md: %s: data overlaps metadata\n",
3439 if (rdev
->sb_offset
*2 + rdev
->sb_size
/512
3440 > rdev
->data_offset
) {
3441 printk("md: %s: metadata overlaps data\n",
3448 md_probe(mddev
->unit
, NULL
, NULL
);
3449 disk
= mddev
->gendisk
;
3453 spin_lock(&pers_lock
);
3454 pers
= find_pers(mddev
->level
, mddev
->clevel
);
3455 if (!pers
|| !try_module_get(pers
->owner
)) {
3456 spin_unlock(&pers_lock
);
3457 if (mddev
->level
!= LEVEL_NONE
)
3458 printk(KERN_WARNING
"md: personality for level %d is not loaded!\n",
3461 printk(KERN_WARNING
"md: personality for level %s is not loaded!\n",
3466 spin_unlock(&pers_lock
);
3467 mddev
->level
= pers
->level
;
3468 strlcpy(mddev
->clevel
, pers
->name
, sizeof(mddev
->clevel
));
3470 if (mddev
->reshape_position
!= MaxSector
&&
3471 pers
->start_reshape
== NULL
) {
3472 /* This personality cannot handle reshaping... */
3474 module_put(pers
->owner
);
3478 if (pers
->sync_request
) {
3479 /* Warn if this is a potentially silly
3482 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
3484 struct list_head
*tmp2
;
3486 rdev_for_each(rdev
, tmp
, mddev
) {
3487 rdev_for_each(rdev2
, tmp2
, mddev
) {
3489 rdev
->bdev
->bd_contains
==
3490 rdev2
->bdev
->bd_contains
) {
3492 "%s: WARNING: %s appears to be"
3493 " on the same physical disk as"
3496 bdevname(rdev
->bdev
,b
),
3497 bdevname(rdev2
->bdev
,b2
));
3504 "True protection against single-disk"
3505 " failure might be compromised.\n");
3508 mddev
->recovery
= 0;
3509 mddev
->resync_max_sectors
= mddev
->size
<< 1; /* may be over-ridden by personality */
3510 mddev
->barriers_work
= 1;
3511 mddev
->ok_start_degraded
= start_dirty_degraded
;
3514 mddev
->ro
= 2; /* read-only, but switch on first write */
3516 err
= mddev
->pers
->run(mddev
);
3517 if (!err
&& mddev
->pers
->sync_request
) {
3518 err
= bitmap_create(mddev
);
3520 printk(KERN_ERR
"%s: failed to create bitmap (%d)\n",
3521 mdname(mddev
), err
);
3522 mddev
->pers
->stop(mddev
);
3526 printk(KERN_ERR
"md: pers->run() failed ...\n");
3527 module_put(mddev
->pers
->owner
);
3529 bitmap_destroy(mddev
);
3532 if (mddev
->pers
->sync_request
) {
3533 if (sysfs_create_group(&mddev
->kobj
, &md_redundancy_group
))
3535 "md: cannot register extra attributes for %s\n",
3537 } else if (mddev
->ro
== 2) /* auto-readonly not meaningful */
3540 atomic_set(&mddev
->writes_pending
,0);
3541 mddev
->safemode
= 0;
3542 mddev
->safemode_timer
.function
= md_safemode_timeout
;
3543 mddev
->safemode_timer
.data
= (unsigned long) mddev
;
3544 mddev
->safemode_delay
= (200 * HZ
)/1000 +1; /* 200 msec delay */
3547 rdev_for_each(rdev
, tmp
, mddev
)
3548 if (rdev
->raid_disk
>= 0) {
3550 sprintf(nm
, "rd%d", rdev
->raid_disk
);
3551 if (sysfs_create_link(&mddev
->kobj
, &rdev
->kobj
, nm
))
3552 printk("md: cannot register %s for %s\n",
3556 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3559 md_update_sb(mddev
, 0);
3561 set_capacity(disk
, mddev
->array_size
<<1);
3563 /* If we call blk_queue_make_request here, it will
3564 * re-initialise max_sectors etc which may have been
3565 * refined inside -> run. So just set the bits we need to set.
3566 * Most initialisation happended when we called
3567 * blk_queue_make_request(..., md_fail_request)
3570 mddev
->queue
->queuedata
= mddev
;
3571 mddev
->queue
->make_request_fn
= mddev
->pers
->make_request
;
3573 /* If there is a partially-recovered drive we need to
3574 * start recovery here. If we leave it to md_check_recovery,
3575 * it will remove the drives and not do the right thing
3577 if (mddev
->degraded
&& !mddev
->sync_thread
) {
3578 struct list_head
*rtmp
;
3580 rdev_for_each(rdev
, rtmp
, mddev
)
3581 if (rdev
->raid_disk
>= 0 &&
3582 !test_bit(In_sync
, &rdev
->flags
) &&
3583 !test_bit(Faulty
, &rdev
->flags
))
3584 /* complete an interrupted recovery */
3586 if (spares
&& mddev
->pers
->sync_request
) {
3587 mddev
->recovery
= 0;
3588 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
3589 mddev
->sync_thread
= md_register_thread(md_do_sync
,
3592 if (!mddev
->sync_thread
) {
3593 printk(KERN_ERR
"%s: could not start resync"
3596 /* leave the spares where they are, it shouldn't hurt */
3597 mddev
->recovery
= 0;
3601 md_wakeup_thread(mddev
->thread
);
3602 md_wakeup_thread(mddev
->sync_thread
); /* possibly kick off a reshape */
3605 md_new_event(mddev
);
3606 kobject_uevent(&mddev
->gendisk
->dev
.kobj
, KOBJ_CHANGE
);
3610 static int restart_array(mddev_t
*mddev
)
3612 struct gendisk
*disk
= mddev
->gendisk
;
3616 * Complain if it has no devices
3619 if (list_empty(&mddev
->disks
))
3627 mddev
->safemode
= 0;
3629 set_disk_ro(disk
, 0);
3631 printk(KERN_INFO
"md: %s switched to read-write mode.\n",
3634 * Kick recovery or resync if necessary
3636 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3637 md_wakeup_thread(mddev
->thread
);
3638 md_wakeup_thread(mddev
->sync_thread
);
3647 /* similar to deny_write_access, but accounts for our holding a reference
3648 * to the file ourselves */
3649 static int deny_bitmap_write_access(struct file
* file
)
3651 struct inode
*inode
= file
->f_mapping
->host
;
3653 spin_lock(&inode
->i_lock
);
3654 if (atomic_read(&inode
->i_writecount
) > 1) {
3655 spin_unlock(&inode
->i_lock
);
3658 atomic_set(&inode
->i_writecount
, -1);
3659 spin_unlock(&inode
->i_lock
);
3664 static void restore_bitmap_write_access(struct file
*file
)
3666 struct inode
*inode
= file
->f_mapping
->host
;
3668 spin_lock(&inode
->i_lock
);
3669 atomic_set(&inode
->i_writecount
, 1);
3670 spin_unlock(&inode
->i_lock
);
3674 * 0 - completely stop and dis-assemble array
3675 * 1 - switch to readonly
3676 * 2 - stop but do not disassemble array
3678 static int do_md_stop(mddev_t
* mddev
, int mode
)
3681 struct gendisk
*disk
= mddev
->gendisk
;
3684 if (atomic_read(&mddev
->active
)>2) {
3685 printk("md: %s still in use.\n",mdname(mddev
));
3689 if (mddev
->sync_thread
) {
3690 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3691 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
3692 md_unregister_thread(mddev
->sync_thread
);
3693 mddev
->sync_thread
= NULL
;
3696 del_timer_sync(&mddev
->safemode_timer
);
3698 invalidate_partition(disk
, 0);
3701 case 1: /* readonly */
3707 case 0: /* disassemble */
3709 bitmap_flush(mddev
);
3710 md_super_wait(mddev
);
3712 set_disk_ro(disk
, 0);
3713 blk_queue_make_request(mddev
->queue
, md_fail_request
);
3714 mddev
->pers
->stop(mddev
);
3715 mddev
->queue
->merge_bvec_fn
= NULL
;
3716 mddev
->queue
->unplug_fn
= NULL
;
3717 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
3718 if (mddev
->pers
->sync_request
)
3719 sysfs_remove_group(&mddev
->kobj
, &md_redundancy_group
);
3721 module_put(mddev
->pers
->owner
);
3724 set_capacity(disk
, 0);
3730 if (!mddev
->in_sync
|| mddev
->flags
) {
3731 /* mark array as shutdown cleanly */
3733 md_update_sb(mddev
, 1);
3736 set_disk_ro(disk
, 1);
3737 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3741 * Free resources if final stop
3745 struct list_head
*tmp
;
3747 printk(KERN_INFO
"md: %s stopped.\n", mdname(mddev
));
3749 bitmap_destroy(mddev
);
3750 if (mddev
->bitmap_file
) {
3751 restore_bitmap_write_access(mddev
->bitmap_file
);
3752 fput(mddev
->bitmap_file
);
3753 mddev
->bitmap_file
= NULL
;
3755 mddev
->bitmap_offset
= 0;
3757 rdev_for_each(rdev
, tmp
, mddev
)
3758 if (rdev
->raid_disk
>= 0) {
3760 sprintf(nm
, "rd%d", rdev
->raid_disk
);
3761 sysfs_remove_link(&mddev
->kobj
, nm
);
3764 /* make sure all md_delayed_delete calls have finished */
3765 flush_scheduled_work();
3767 export_array(mddev
);
3769 mddev
->array_size
= 0;
3771 mddev
->raid_disks
= 0;
3772 mddev
->recovery_cp
= 0;
3773 mddev
->resync_max
= MaxSector
;
3774 mddev
->reshape_position
= MaxSector
;
3775 mddev
->external
= 0;
3776 mddev
->persistent
= 0;
3778 } else if (mddev
->pers
)
3779 printk(KERN_INFO
"md: %s switched to read-only mode.\n",
3782 md_new_event(mddev
);
3788 static void autorun_array(mddev_t
*mddev
)
3791 struct list_head
*tmp
;
3794 if (list_empty(&mddev
->disks
))
3797 printk(KERN_INFO
"md: running: ");
3799 rdev_for_each(rdev
, tmp
, mddev
) {
3800 char b
[BDEVNAME_SIZE
];
3801 printk("<%s>", bdevname(rdev
->bdev
,b
));
3805 err
= do_md_run (mddev
);
3807 printk(KERN_WARNING
"md: do_md_run() returned %d\n", err
);
3808 do_md_stop (mddev
, 0);
3813 * lets try to run arrays based on all disks that have arrived
3814 * until now. (those are in pending_raid_disks)
3816 * the method: pick the first pending disk, collect all disks with
3817 * the same UUID, remove all from the pending list and put them into
3818 * the 'same_array' list. Then order this list based on superblock
3819 * update time (freshest comes first), kick out 'old' disks and
3820 * compare superblocks. If everything's fine then run it.
3822 * If "unit" is allocated, then bump its reference count
3824 static void autorun_devices(int part
)
3826 struct list_head
*tmp
;
3827 mdk_rdev_t
*rdev0
, *rdev
;
3829 char b
[BDEVNAME_SIZE
];
3831 printk(KERN_INFO
"md: autorun ...\n");
3832 while (!list_empty(&pending_raid_disks
)) {
3835 LIST_HEAD(candidates
);
3836 rdev0
= list_entry(pending_raid_disks
.next
,
3837 mdk_rdev_t
, same_set
);
3839 printk(KERN_INFO
"md: considering %s ...\n",
3840 bdevname(rdev0
->bdev
,b
));
3841 INIT_LIST_HEAD(&candidates
);
3842 rdev_for_each_list(rdev
, tmp
, pending_raid_disks
)
3843 if (super_90_load(rdev
, rdev0
, 0) >= 0) {
3844 printk(KERN_INFO
"md: adding %s ...\n",
3845 bdevname(rdev
->bdev
,b
));
3846 list_move(&rdev
->same_set
, &candidates
);
3849 * now we have a set of devices, with all of them having
3850 * mostly sane superblocks. It's time to allocate the
3854 dev
= MKDEV(mdp_major
,
3855 rdev0
->preferred_minor
<< MdpMinorShift
);
3856 unit
= MINOR(dev
) >> MdpMinorShift
;
3858 dev
= MKDEV(MD_MAJOR
, rdev0
->preferred_minor
);
3861 if (rdev0
->preferred_minor
!= unit
) {
3862 printk(KERN_INFO
"md: unit number in %s is bad: %d\n",
3863 bdevname(rdev0
->bdev
, b
), rdev0
->preferred_minor
);
3867 md_probe(dev
, NULL
, NULL
);
3868 mddev
= mddev_find(dev
);
3871 "md: cannot allocate memory for md drive.\n");
3874 if (mddev_lock(mddev
))
3875 printk(KERN_WARNING
"md: %s locked, cannot run\n",
3877 else if (mddev
->raid_disks
|| mddev
->major_version
3878 || !list_empty(&mddev
->disks
)) {
3880 "md: %s already running, cannot run %s\n",
3881 mdname(mddev
), bdevname(rdev0
->bdev
,b
));
3882 mddev_unlock(mddev
);
3884 printk(KERN_INFO
"md: created %s\n", mdname(mddev
));
3885 mddev
->persistent
= 1;
3886 rdev_for_each_list(rdev
, tmp
, candidates
) {
3887 list_del_init(&rdev
->same_set
);
3888 if (bind_rdev_to_array(rdev
, mddev
))
3891 autorun_array(mddev
);
3892 mddev_unlock(mddev
);
3894 /* on success, candidates will be empty, on error
3897 rdev_for_each_list(rdev
, tmp
, candidates
)
3901 printk(KERN_INFO
"md: ... autorun DONE.\n");
3903 #endif /* !MODULE */
3905 static int get_version(void __user
* arg
)
3909 ver
.major
= MD_MAJOR_VERSION
;
3910 ver
.minor
= MD_MINOR_VERSION
;
3911 ver
.patchlevel
= MD_PATCHLEVEL_VERSION
;
3913 if (copy_to_user(arg
, &ver
, sizeof(ver
)))
3919 static int get_array_info(mddev_t
* mddev
, void __user
* arg
)
3921 mdu_array_info_t info
;
3922 int nr
,working
,active
,failed
,spare
;
3924 struct list_head
*tmp
;
3926 nr
=working
=active
=failed
=spare
=0;
3927 rdev_for_each(rdev
, tmp
, mddev
) {
3929 if (test_bit(Faulty
, &rdev
->flags
))
3933 if (test_bit(In_sync
, &rdev
->flags
))
3940 info
.major_version
= mddev
->major_version
;
3941 info
.minor_version
= mddev
->minor_version
;
3942 info
.patch_version
= MD_PATCHLEVEL_VERSION
;
3943 info
.ctime
= mddev
->ctime
;
3944 info
.level
= mddev
->level
;
3945 info
.size
= mddev
->size
;
3946 if (info
.size
!= mddev
->size
) /* overflow */
3949 info
.raid_disks
= mddev
->raid_disks
;
3950 info
.md_minor
= mddev
->md_minor
;
3951 info
.not_persistent
= !mddev
->persistent
;
3953 info
.utime
= mddev
->utime
;
3956 info
.state
= (1<<MD_SB_CLEAN
);
3957 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
3958 info
.state
= (1<<MD_SB_BITMAP_PRESENT
);
3959 info
.active_disks
= active
;
3960 info
.working_disks
= working
;
3961 info
.failed_disks
= failed
;
3962 info
.spare_disks
= spare
;
3964 info
.layout
= mddev
->layout
;
3965 info
.chunk_size
= mddev
->chunk_size
;
3967 if (copy_to_user(arg
, &info
, sizeof(info
)))
3973 static int get_bitmap_file(mddev_t
* mddev
, void __user
* arg
)
3975 mdu_bitmap_file_t
*file
= NULL
; /* too big for stack allocation */
3976 char *ptr
, *buf
= NULL
;
3979 md_allow_write(mddev
);
3981 file
= kmalloc(sizeof(*file
), GFP_KERNEL
);
3985 /* bitmap disabled, zero the first byte and copy out */
3986 if (!mddev
->bitmap
|| !mddev
->bitmap
->file
) {
3987 file
->pathname
[0] = '\0';
3991 buf
= kmalloc(sizeof(file
->pathname
), GFP_KERNEL
);
3995 ptr
= file_path(mddev
->bitmap
->file
, buf
, sizeof(file
->pathname
));
3999 strcpy(file
->pathname
, ptr
);
4003 if (copy_to_user(arg
, file
, sizeof(*file
)))
4011 static int get_disk_info(mddev_t
* mddev
, void __user
* arg
)
4013 mdu_disk_info_t info
;
4017 if (copy_from_user(&info
, arg
, sizeof(info
)))
4022 rdev
= find_rdev_nr(mddev
, nr
);
4024 info
.major
= MAJOR(rdev
->bdev
->bd_dev
);
4025 info
.minor
= MINOR(rdev
->bdev
->bd_dev
);
4026 info
.raid_disk
= rdev
->raid_disk
;
4028 if (test_bit(Faulty
, &rdev
->flags
))
4029 info
.state
|= (1<<MD_DISK_FAULTY
);
4030 else if (test_bit(In_sync
, &rdev
->flags
)) {
4031 info
.state
|= (1<<MD_DISK_ACTIVE
);
4032 info
.state
|= (1<<MD_DISK_SYNC
);
4034 if (test_bit(WriteMostly
, &rdev
->flags
))
4035 info
.state
|= (1<<MD_DISK_WRITEMOSTLY
);
4037 info
.major
= info
.minor
= 0;
4038 info
.raid_disk
= -1;
4039 info
.state
= (1<<MD_DISK_REMOVED
);
4042 if (copy_to_user(arg
, &info
, sizeof(info
)))
4048 static int add_new_disk(mddev_t
* mddev
, mdu_disk_info_t
*info
)
4050 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
4052 dev_t dev
= MKDEV(info
->major
,info
->minor
);
4054 if (info
->major
!= MAJOR(dev
) || info
->minor
!= MINOR(dev
))
4057 if (!mddev
->raid_disks
) {
4059 /* expecting a device which has a superblock */
4060 rdev
= md_import_device(dev
, mddev
->major_version
, mddev
->minor_version
);
4063 "md: md_import_device returned %ld\n",
4065 return PTR_ERR(rdev
);
4067 if (!list_empty(&mddev
->disks
)) {
4068 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
4069 mdk_rdev_t
, same_set
);
4070 int err
= super_types
[mddev
->major_version
]
4071 .load_super(rdev
, rdev0
, mddev
->minor_version
);
4074 "md: %s has different UUID to %s\n",
4075 bdevname(rdev
->bdev
,b
),
4076 bdevname(rdev0
->bdev
,b2
));
4081 err
= bind_rdev_to_array(rdev
, mddev
);
4088 * add_new_disk can be used once the array is assembled
4089 * to add "hot spares". They must already have a superblock
4094 if (!mddev
->pers
->hot_add_disk
) {
4096 "%s: personality does not support diskops!\n",
4100 if (mddev
->persistent
)
4101 rdev
= md_import_device(dev
, mddev
->major_version
,
4102 mddev
->minor_version
);
4104 rdev
= md_import_device(dev
, -1, -1);
4107 "md: md_import_device returned %ld\n",
4109 return PTR_ERR(rdev
);
4111 /* set save_raid_disk if appropriate */
4112 if (!mddev
->persistent
) {
4113 if (info
->state
& (1<<MD_DISK_SYNC
) &&
4114 info
->raid_disk
< mddev
->raid_disks
)
4115 rdev
->raid_disk
= info
->raid_disk
;
4117 rdev
->raid_disk
= -1;
4119 super_types
[mddev
->major_version
].
4120 validate_super(mddev
, rdev
);
4121 rdev
->saved_raid_disk
= rdev
->raid_disk
;
4123 clear_bit(In_sync
, &rdev
->flags
); /* just to be sure */
4124 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4125 set_bit(WriteMostly
, &rdev
->flags
);
4127 rdev
->raid_disk
= -1;
4128 err
= bind_rdev_to_array(rdev
, mddev
);
4129 if (!err
&& !mddev
->pers
->hot_remove_disk
) {
4130 /* If there is hot_add_disk but no hot_remove_disk
4131 * then added disks for geometry changes,
4132 * and should be added immediately.
4134 super_types
[mddev
->major_version
].
4135 validate_super(mddev
, rdev
);
4136 err
= mddev
->pers
->hot_add_disk(mddev
, rdev
);
4138 unbind_rdev_from_array(rdev
);
4143 md_update_sb(mddev
, 1);
4144 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4145 md_wakeup_thread(mddev
->thread
);
4149 /* otherwise, add_new_disk is only allowed
4150 * for major_version==0 superblocks
4152 if (mddev
->major_version
!= 0) {
4153 printk(KERN_WARNING
"%s: ADD_NEW_DISK not supported\n",
4158 if (!(info
->state
& (1<<MD_DISK_FAULTY
))) {
4160 rdev
= md_import_device (dev
, -1, 0);
4163 "md: error, md_import_device() returned %ld\n",
4165 return PTR_ERR(rdev
);
4167 rdev
->desc_nr
= info
->number
;
4168 if (info
->raid_disk
< mddev
->raid_disks
)
4169 rdev
->raid_disk
= info
->raid_disk
;
4171 rdev
->raid_disk
= -1;
4173 if (rdev
->raid_disk
< mddev
->raid_disks
)
4174 if (info
->state
& (1<<MD_DISK_SYNC
))
4175 set_bit(In_sync
, &rdev
->flags
);
4177 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4178 set_bit(WriteMostly
, &rdev
->flags
);
4180 if (!mddev
->persistent
) {
4181 printk(KERN_INFO
"md: nonpersistent superblock ...\n");
4182 rdev
->sb_offset
= rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
4184 rdev
->sb_offset
= calc_dev_sboffset(rdev
->bdev
);
4185 rdev
->size
= calc_dev_size(rdev
, mddev
->chunk_size
);
4187 err
= bind_rdev_to_array(rdev
, mddev
);
4197 static int hot_remove_disk(mddev_t
* mddev
, dev_t dev
)
4199 char b
[BDEVNAME_SIZE
];
4205 rdev
= find_rdev(mddev
, dev
);
4209 if (rdev
->raid_disk
>= 0)
4212 kick_rdev_from_array(rdev
);
4213 md_update_sb(mddev
, 1);
4214 md_new_event(mddev
);
4218 printk(KERN_WARNING
"md: cannot remove active disk %s from %s ... \n",
4219 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4223 static int hot_add_disk(mddev_t
* mddev
, dev_t dev
)
4225 char b
[BDEVNAME_SIZE
];
4233 if (mddev
->major_version
!= 0) {
4234 printk(KERN_WARNING
"%s: HOT_ADD may only be used with"
4235 " version-0 superblocks.\n",
4239 if (!mddev
->pers
->hot_add_disk
) {
4241 "%s: personality does not support diskops!\n",
4246 rdev
= md_import_device (dev
, -1, 0);
4249 "md: error, md_import_device() returned %ld\n",
4254 if (mddev
->persistent
)
4255 rdev
->sb_offset
= calc_dev_sboffset(rdev
->bdev
);
4258 rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
4260 size
= calc_dev_size(rdev
, mddev
->chunk_size
);
4263 if (test_bit(Faulty
, &rdev
->flags
)) {
4265 "md: can not hot-add faulty %s disk to %s!\n",
4266 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4270 clear_bit(In_sync
, &rdev
->flags
);
4272 rdev
->saved_raid_disk
= -1;
4273 err
= bind_rdev_to_array(rdev
, mddev
);
4278 * The rest should better be atomic, we can have disk failures
4279 * noticed in interrupt contexts ...
4282 if (rdev
->desc_nr
== mddev
->max_disks
) {
4283 printk(KERN_WARNING
"%s: can not hot-add to full array!\n",
4286 goto abort_unbind_export
;
4289 rdev
->raid_disk
= -1;
4291 md_update_sb(mddev
, 1);
4294 * Kick recovery, maybe this spare has to be added to the
4295 * array immediately.
4297 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4298 md_wakeup_thread(mddev
->thread
);
4299 md_new_event(mddev
);
4302 abort_unbind_export
:
4303 unbind_rdev_from_array(rdev
);
4310 static int set_bitmap_file(mddev_t
*mddev
, int fd
)
4315 if (!mddev
->pers
->quiesce
)
4317 if (mddev
->recovery
|| mddev
->sync_thread
)
4319 /* we should be able to change the bitmap.. */
4325 return -EEXIST
; /* cannot add when bitmap is present */
4326 mddev
->bitmap_file
= fget(fd
);
4328 if (mddev
->bitmap_file
== NULL
) {
4329 printk(KERN_ERR
"%s: error: failed to get bitmap file\n",
4334 err
= deny_bitmap_write_access(mddev
->bitmap_file
);
4336 printk(KERN_ERR
"%s: error: bitmap file is already in use\n",
4338 fput(mddev
->bitmap_file
);
4339 mddev
->bitmap_file
= NULL
;
4342 mddev
->bitmap_offset
= 0; /* file overrides offset */
4343 } else if (mddev
->bitmap
== NULL
)
4344 return -ENOENT
; /* cannot remove what isn't there */
4347 mddev
->pers
->quiesce(mddev
, 1);
4349 err
= bitmap_create(mddev
);
4350 if (fd
< 0 || err
) {
4351 bitmap_destroy(mddev
);
4352 fd
= -1; /* make sure to put the file */
4354 mddev
->pers
->quiesce(mddev
, 0);
4357 if (mddev
->bitmap_file
) {
4358 restore_bitmap_write_access(mddev
->bitmap_file
);
4359 fput(mddev
->bitmap_file
);
4361 mddev
->bitmap_file
= NULL
;
4368 * set_array_info is used two different ways
4369 * The original usage is when creating a new array.
4370 * In this usage, raid_disks is > 0 and it together with
4371 * level, size, not_persistent,layout,chunksize determine the
4372 * shape of the array.
4373 * This will always create an array with a type-0.90.0 superblock.
4374 * The newer usage is when assembling an array.
4375 * In this case raid_disks will be 0, and the major_version field is
4376 * use to determine which style super-blocks are to be found on the devices.
4377 * The minor and patch _version numbers are also kept incase the
4378 * super_block handler wishes to interpret them.
4380 static int set_array_info(mddev_t
* mddev
, mdu_array_info_t
*info
)
4383 if (info
->raid_disks
== 0) {
4384 /* just setting version number for superblock loading */
4385 if (info
->major_version
< 0 ||
4386 info
->major_version
>= ARRAY_SIZE(super_types
) ||
4387 super_types
[info
->major_version
].name
== NULL
) {
4388 /* maybe try to auto-load a module? */
4390 "md: superblock version %d not known\n",
4391 info
->major_version
);
4394 mddev
->major_version
= info
->major_version
;
4395 mddev
->minor_version
= info
->minor_version
;
4396 mddev
->patch_version
= info
->patch_version
;
4397 mddev
->persistent
= !info
->not_persistent
;
4400 mddev
->major_version
= MD_MAJOR_VERSION
;
4401 mddev
->minor_version
= MD_MINOR_VERSION
;
4402 mddev
->patch_version
= MD_PATCHLEVEL_VERSION
;
4403 mddev
->ctime
= get_seconds();
4405 mddev
->level
= info
->level
;
4406 mddev
->clevel
[0] = 0;
4407 mddev
->size
= info
->size
;
4408 mddev
->raid_disks
= info
->raid_disks
;
4409 /* don't set md_minor, it is determined by which /dev/md* was
4412 if (info
->state
& (1<<MD_SB_CLEAN
))
4413 mddev
->recovery_cp
= MaxSector
;
4415 mddev
->recovery_cp
= 0;
4416 mddev
->persistent
= ! info
->not_persistent
;
4417 mddev
->external
= 0;
4419 mddev
->layout
= info
->layout
;
4420 mddev
->chunk_size
= info
->chunk_size
;
4422 mddev
->max_disks
= MD_SB_DISKS
;
4424 if (mddev
->persistent
)
4426 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4428 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
4429 mddev
->bitmap_offset
= 0;
4431 mddev
->reshape_position
= MaxSector
;
4434 * Generate a 128 bit UUID
4436 get_random_bytes(mddev
->uuid
, 16);
4438 mddev
->new_level
= mddev
->level
;
4439 mddev
->new_chunk
= mddev
->chunk_size
;
4440 mddev
->new_layout
= mddev
->layout
;
4441 mddev
->delta_disks
= 0;
4446 static int update_size(mddev_t
*mddev
, unsigned long size
)
4450 struct list_head
*tmp
;
4451 int fit
= (size
== 0);
4453 if (mddev
->pers
->resize
== NULL
)
4455 /* The "size" is the amount of each device that is used.
4456 * This can only make sense for arrays with redundancy.
4457 * linear and raid0 always use whatever space is available
4458 * We can only consider changing the size if no resync
4459 * or reconstruction is happening, and if the new size
4460 * is acceptable. It must fit before the sb_offset or,
4461 * if that is <data_offset, it must fit before the
4462 * size of each device.
4463 * If size is zero, we find the largest size that fits.
4465 if (mddev
->sync_thread
)
4467 rdev_for_each(rdev
, tmp
, mddev
) {
4469 avail
= rdev
->size
* 2;
4471 if (fit
&& (size
== 0 || size
> avail
/2))
4473 if (avail
< ((sector_t
)size
<< 1))
4476 rv
= mddev
->pers
->resize(mddev
, (sector_t
)size
*2);
4478 struct block_device
*bdev
;
4480 bdev
= bdget_disk(mddev
->gendisk
, 0);
4482 mutex_lock(&bdev
->bd_inode
->i_mutex
);
4483 i_size_write(bdev
->bd_inode
, (loff_t
)mddev
->array_size
<< 10);
4484 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
4491 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
)
4494 /* change the number of raid disks */
4495 if (mddev
->pers
->check_reshape
== NULL
)
4497 if (raid_disks
<= 0 ||
4498 raid_disks
>= mddev
->max_disks
)
4500 if (mddev
->sync_thread
|| mddev
->reshape_position
!= MaxSector
)
4502 mddev
->delta_disks
= raid_disks
- mddev
->raid_disks
;
4504 rv
= mddev
->pers
->check_reshape(mddev
);
4510 * update_array_info is used to change the configuration of an
4512 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4513 * fields in the info are checked against the array.
4514 * Any differences that cannot be handled will cause an error.
4515 * Normally, only one change can be managed at a time.
4517 static int update_array_info(mddev_t
*mddev
, mdu_array_info_t
*info
)
4523 /* calculate expected state,ignoring low bits */
4524 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
4525 state
|= (1 << MD_SB_BITMAP_PRESENT
);
4527 if (mddev
->major_version
!= info
->major_version
||
4528 mddev
->minor_version
!= info
->minor_version
||
4529 /* mddev->patch_version != info->patch_version || */
4530 mddev
->ctime
!= info
->ctime
||
4531 mddev
->level
!= info
->level
||
4532 /* mddev->layout != info->layout || */
4533 !mddev
->persistent
!= info
->not_persistent
||
4534 mddev
->chunk_size
!= info
->chunk_size
||
4535 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4536 ((state
^info
->state
) & 0xfffffe00)
4539 /* Check there is only one change */
4540 if (info
->size
>= 0 && mddev
->size
!= info
->size
) cnt
++;
4541 if (mddev
->raid_disks
!= info
->raid_disks
) cnt
++;
4542 if (mddev
->layout
!= info
->layout
) cnt
++;
4543 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) cnt
++;
4544 if (cnt
== 0) return 0;
4545 if (cnt
> 1) return -EINVAL
;
4547 if (mddev
->layout
!= info
->layout
) {
4549 * we don't need to do anything at the md level, the
4550 * personality will take care of it all.
4552 if (mddev
->pers
->reconfig
== NULL
)
4555 return mddev
->pers
->reconfig(mddev
, info
->layout
, -1);
4557 if (info
->size
>= 0 && mddev
->size
!= info
->size
)
4558 rv
= update_size(mddev
, info
->size
);
4560 if (mddev
->raid_disks
!= info
->raid_disks
)
4561 rv
= update_raid_disks(mddev
, info
->raid_disks
);
4563 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) {
4564 if (mddev
->pers
->quiesce
== NULL
)
4566 if (mddev
->recovery
|| mddev
->sync_thread
)
4568 if (info
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
4569 /* add the bitmap */
4572 if (mddev
->default_bitmap_offset
== 0)
4574 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
4575 mddev
->pers
->quiesce(mddev
, 1);
4576 rv
= bitmap_create(mddev
);
4578 bitmap_destroy(mddev
);
4579 mddev
->pers
->quiesce(mddev
, 0);
4581 /* remove the bitmap */
4584 if (mddev
->bitmap
->file
)
4586 mddev
->pers
->quiesce(mddev
, 1);
4587 bitmap_destroy(mddev
);
4588 mddev
->pers
->quiesce(mddev
, 0);
4589 mddev
->bitmap_offset
= 0;
4592 md_update_sb(mddev
, 1);
4596 static int set_disk_faulty(mddev_t
*mddev
, dev_t dev
)
4600 if (mddev
->pers
== NULL
)
4603 rdev
= find_rdev(mddev
, dev
);
4607 md_error(mddev
, rdev
);
4611 static int md_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
4613 mddev_t
*mddev
= bdev
->bd_disk
->private_data
;
4617 geo
->cylinders
= get_capacity(mddev
->gendisk
) / 8;
4621 static int md_ioctl(struct inode
*inode
, struct file
*file
,
4622 unsigned int cmd
, unsigned long arg
)
4625 void __user
*argp
= (void __user
*)arg
;
4626 mddev_t
*mddev
= NULL
;
4628 if (!capable(CAP_SYS_ADMIN
))
4632 * Commands dealing with the RAID driver but not any
4638 err
= get_version(argp
);
4641 case PRINT_RAID_DEBUG
:
4649 autostart_arrays(arg
);
4656 * Commands creating/starting a new array:
4659 mddev
= inode
->i_bdev
->bd_disk
->private_data
;
4666 err
= mddev_lock(mddev
);
4669 "md: ioctl lock interrupted, reason %d, cmd %d\n",
4676 case SET_ARRAY_INFO
:
4678 mdu_array_info_t info
;
4680 memset(&info
, 0, sizeof(info
));
4681 else if (copy_from_user(&info
, argp
, sizeof(info
))) {
4686 err
= update_array_info(mddev
, &info
);
4688 printk(KERN_WARNING
"md: couldn't update"
4689 " array info. %d\n", err
);
4694 if (!list_empty(&mddev
->disks
)) {
4696 "md: array %s already has disks!\n",
4701 if (mddev
->raid_disks
) {
4703 "md: array %s already initialised!\n",
4708 err
= set_array_info(mddev
, &info
);
4710 printk(KERN_WARNING
"md: couldn't set"
4711 " array info. %d\n", err
);
4721 * Commands querying/configuring an existing array:
4723 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4724 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4725 if ((!mddev
->raid_disks
&& !mddev
->external
)
4726 && cmd
!= ADD_NEW_DISK
&& cmd
!= STOP_ARRAY
4727 && cmd
!= RUN_ARRAY
&& cmd
!= SET_BITMAP_FILE
4728 && cmd
!= GET_BITMAP_FILE
) {
4734 * Commands even a read-only array can execute:
4738 case GET_ARRAY_INFO
:
4739 err
= get_array_info(mddev
, argp
);
4742 case GET_BITMAP_FILE
:
4743 err
= get_bitmap_file(mddev
, argp
);
4747 err
= get_disk_info(mddev
, argp
);
4750 case RESTART_ARRAY_RW
:
4751 err
= restart_array(mddev
);
4755 err
= do_md_stop (mddev
, 0);
4759 err
= do_md_stop (mddev
, 1);
4763 * We have a problem here : there is no easy way to give a CHS
4764 * virtual geometry. We currently pretend that we have a 2 heads
4765 * 4 sectors (with a BIG number of cylinders...). This drives
4766 * dosfs just mad... ;-)
4771 * The remaining ioctls are changing the state of the
4772 * superblock, so we do not allow them on read-only arrays.
4773 * However non-MD ioctls (e.g. get-size) will still come through
4774 * here and hit the 'default' below, so only disallow
4775 * 'md' ioctls, and switch to rw mode if started auto-readonly.
4777 if (_IOC_TYPE(cmd
) == MD_MAJOR
&&
4778 mddev
->ro
&& mddev
->pers
) {
4779 if (mddev
->ro
== 2) {
4781 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4782 md_wakeup_thread(mddev
->thread
);
4794 mdu_disk_info_t info
;
4795 if (copy_from_user(&info
, argp
, sizeof(info
)))
4798 err
= add_new_disk(mddev
, &info
);
4802 case HOT_REMOVE_DISK
:
4803 err
= hot_remove_disk(mddev
, new_decode_dev(arg
));
4807 err
= hot_add_disk(mddev
, new_decode_dev(arg
));
4810 case SET_DISK_FAULTY
:
4811 err
= set_disk_faulty(mddev
, new_decode_dev(arg
));
4815 err
= do_md_run (mddev
);
4818 case SET_BITMAP_FILE
:
4819 err
= set_bitmap_file(mddev
, (int)arg
);
4829 mddev_unlock(mddev
);
4839 static int md_open(struct inode
*inode
, struct file
*file
)
4842 * Succeed if we can lock the mddev, which confirms that
4843 * it isn't being stopped right now.
4845 mddev_t
*mddev
= inode
->i_bdev
->bd_disk
->private_data
;
4848 if ((err
= mutex_lock_interruptible_nested(&mddev
->reconfig_mutex
, 1)))
4853 mddev_unlock(mddev
);
4855 check_disk_change(inode
->i_bdev
);
4860 static int md_release(struct inode
*inode
, struct file
* file
)
4862 mddev_t
*mddev
= inode
->i_bdev
->bd_disk
->private_data
;
4870 static int md_media_changed(struct gendisk
*disk
)
4872 mddev_t
*mddev
= disk
->private_data
;
4874 return mddev
->changed
;
4877 static int md_revalidate(struct gendisk
*disk
)
4879 mddev_t
*mddev
= disk
->private_data
;
4884 static struct block_device_operations md_fops
=
4886 .owner
= THIS_MODULE
,
4888 .release
= md_release
,
4890 .getgeo
= md_getgeo
,
4891 .media_changed
= md_media_changed
,
4892 .revalidate_disk
= md_revalidate
,
4895 static int md_thread(void * arg
)
4897 mdk_thread_t
*thread
= arg
;
4900 * md_thread is a 'system-thread', it's priority should be very
4901 * high. We avoid resource deadlocks individually in each
4902 * raid personality. (RAID5 does preallocation) We also use RR and
4903 * the very same RT priority as kswapd, thus we will never get
4904 * into a priority inversion deadlock.
4906 * we definitely have to have equal or higher priority than
4907 * bdflush, otherwise bdflush will deadlock if there are too
4908 * many dirty RAID5 blocks.
4911 allow_signal(SIGKILL
);
4912 while (!kthread_should_stop()) {
4914 /* We need to wait INTERRUPTIBLE so that
4915 * we don't add to the load-average.
4916 * That means we need to be sure no signals are
4919 if (signal_pending(current
))
4920 flush_signals(current
);
4922 wait_event_interruptible_timeout
4924 test_bit(THREAD_WAKEUP
, &thread
->flags
)
4925 || kthread_should_stop(),
4928 clear_bit(THREAD_WAKEUP
, &thread
->flags
);
4930 thread
->run(thread
->mddev
);
4936 void md_wakeup_thread(mdk_thread_t
*thread
)
4939 dprintk("md: waking up MD thread %s.\n", thread
->tsk
->comm
);
4940 set_bit(THREAD_WAKEUP
, &thread
->flags
);
4941 wake_up(&thread
->wqueue
);
4945 mdk_thread_t
*md_register_thread(void (*run
) (mddev_t
*), mddev_t
*mddev
,
4948 mdk_thread_t
*thread
;
4950 thread
= kzalloc(sizeof(mdk_thread_t
), GFP_KERNEL
);
4954 init_waitqueue_head(&thread
->wqueue
);
4957 thread
->mddev
= mddev
;
4958 thread
->timeout
= MAX_SCHEDULE_TIMEOUT
;
4959 thread
->tsk
= kthread_run(md_thread
, thread
, name
, mdname(thread
->mddev
));
4960 if (IS_ERR(thread
->tsk
)) {
4967 void md_unregister_thread(mdk_thread_t
*thread
)
4969 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread
->tsk
));
4971 kthread_stop(thread
->tsk
);
4975 void md_error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
4982 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
4985 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4987 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4988 __builtin_return_address(0),__builtin_return_address(1),
4989 __builtin_return_address(2),__builtin_return_address(3));
4993 if (!mddev
->pers
->error_handler
)
4995 mddev
->pers
->error_handler(mddev
,rdev
);
4996 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
4997 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4998 md_wakeup_thread(mddev
->thread
);
4999 md_new_event_inintr(mddev
);
5002 /* seq_file implementation /proc/mdstat */
5004 static void status_unused(struct seq_file
*seq
)
5008 struct list_head
*tmp
;
5010 seq_printf(seq
, "unused devices: ");
5012 rdev_for_each_list(rdev
, tmp
, pending_raid_disks
) {
5013 char b
[BDEVNAME_SIZE
];
5015 seq_printf(seq
, "%s ",
5016 bdevname(rdev
->bdev
,b
));
5019 seq_printf(seq
, "<none>");
5021 seq_printf(seq
, "\n");
5025 static void status_resync(struct seq_file
*seq
, mddev_t
* mddev
)
5027 sector_t max_blocks
, resync
, res
;
5028 unsigned long dt
, db
, rt
;
5030 unsigned int per_milli
;
5032 resync
= (mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
))/2;
5034 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
5035 max_blocks
= mddev
->resync_max_sectors
>> 1;
5037 max_blocks
= mddev
->size
;
5040 * Should not happen.
5046 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5047 * in a sector_t, and (max_blocks>>scale) will fit in a
5048 * u32, as those are the requirements for sector_div.
5049 * Thus 'scale' must be at least 10
5052 if (sizeof(sector_t
) > sizeof(unsigned long)) {
5053 while ( max_blocks
/2 > (1ULL<<(scale
+32)))
5056 res
= (resync
>>scale
)*1000;
5057 sector_div(res
, (u32
)((max_blocks
>>scale
)+1));
5061 int i
, x
= per_milli
/50, y
= 20-x
;
5062 seq_printf(seq
, "[");
5063 for (i
= 0; i
< x
; i
++)
5064 seq_printf(seq
, "=");
5065 seq_printf(seq
, ">");
5066 for (i
= 0; i
< y
; i
++)
5067 seq_printf(seq
, ".");
5068 seq_printf(seq
, "] ");
5070 seq_printf(seq
, " %s =%3u.%u%% (%llu/%llu)",
5071 (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)?
5073 (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)?
5075 (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) ?
5076 "resync" : "recovery"))),
5077 per_milli
/10, per_milli
% 10,
5078 (unsigned long long) resync
,
5079 (unsigned long long) max_blocks
);
5082 * We do not want to overflow, so the order of operands and
5083 * the * 100 / 100 trick are important. We do a +1 to be
5084 * safe against division by zero. We only estimate anyway.
5086 * dt: time from mark until now
5087 * db: blocks written from mark until now
5088 * rt: remaining time
5090 dt
= ((jiffies
- mddev
->resync_mark
) / HZ
);
5092 db
= (mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
))
5093 - mddev
->resync_mark_cnt
;
5094 rt
= (dt
* ((unsigned long)(max_blocks
-resync
) / (db
/2/100+1)))/100;
5096 seq_printf(seq
, " finish=%lu.%lumin", rt
/ 60, (rt
% 60)/6);
5098 seq_printf(seq
, " speed=%ldK/sec", db
/2/dt
);
5101 static void *md_seq_start(struct seq_file
*seq
, loff_t
*pos
)
5103 struct list_head
*tmp
;
5113 spin_lock(&all_mddevs_lock
);
5114 list_for_each(tmp
,&all_mddevs
)
5116 mddev
= list_entry(tmp
, mddev_t
, all_mddevs
);
5118 spin_unlock(&all_mddevs_lock
);
5121 spin_unlock(&all_mddevs_lock
);
5123 return (void*)2;/* tail */
5127 static void *md_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
5129 struct list_head
*tmp
;
5130 mddev_t
*next_mddev
, *mddev
= v
;
5136 spin_lock(&all_mddevs_lock
);
5138 tmp
= all_mddevs
.next
;
5140 tmp
= mddev
->all_mddevs
.next
;
5141 if (tmp
!= &all_mddevs
)
5142 next_mddev
= mddev_get(list_entry(tmp
,mddev_t
,all_mddevs
));
5144 next_mddev
= (void*)2;
5147 spin_unlock(&all_mddevs_lock
);
5155 static void md_seq_stop(struct seq_file
*seq
, void *v
)
5159 if (mddev
&& v
!= (void*)1 && v
!= (void*)2)
5163 struct mdstat_info
{
5167 static int md_seq_show(struct seq_file
*seq
, void *v
)
5171 struct list_head
*tmp2
;
5173 struct mdstat_info
*mi
= seq
->private;
5174 struct bitmap
*bitmap
;
5176 if (v
== (void*)1) {
5177 struct mdk_personality
*pers
;
5178 seq_printf(seq
, "Personalities : ");
5179 spin_lock(&pers_lock
);
5180 list_for_each_entry(pers
, &pers_list
, list
)
5181 seq_printf(seq
, "[%s] ", pers
->name
);
5183 spin_unlock(&pers_lock
);
5184 seq_printf(seq
, "\n");
5185 mi
->event
= atomic_read(&md_event_count
);
5188 if (v
== (void*)2) {
5193 if (mddev_lock(mddev
) < 0)
5196 if (mddev
->pers
|| mddev
->raid_disks
|| !list_empty(&mddev
->disks
)) {
5197 seq_printf(seq
, "%s : %sactive", mdname(mddev
),
5198 mddev
->pers
? "" : "in");
5201 seq_printf(seq
, " (read-only)");
5203 <<<<<<< HEAD
:drivers
/md
/md
.c
5204 seq_printf(seq
, "(auto-read-only)");
5206 seq_printf(seq
, " (auto-read-only)");
5207 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
5208 seq_printf(seq
, " %s", mddev
->pers
->name
);
5212 rdev_for_each(rdev
, tmp2
, mddev
) {
5213 char b
[BDEVNAME_SIZE
];
5214 seq_printf(seq
, " %s[%d]",
5215 bdevname(rdev
->bdev
,b
), rdev
->desc_nr
);
5216 if (test_bit(WriteMostly
, &rdev
->flags
))
5217 seq_printf(seq
, "(W)");
5218 if (test_bit(Faulty
, &rdev
->flags
)) {
5219 seq_printf(seq
, "(F)");
5221 } else if (rdev
->raid_disk
< 0)
5222 seq_printf(seq
, "(S)"); /* spare */
5226 if (!list_empty(&mddev
->disks
)) {
5228 seq_printf(seq
, "\n %llu blocks",
5229 (unsigned long long)mddev
->array_size
);
5231 seq_printf(seq
, "\n %llu blocks",
5232 (unsigned long long)size
);
5234 if (mddev
->persistent
) {
5235 if (mddev
->major_version
!= 0 ||
5236 mddev
->minor_version
!= 90) {
5237 seq_printf(seq
," super %d.%d",
5238 mddev
->major_version
,
5239 mddev
->minor_version
);
5241 } else if (mddev
->external
)
5242 seq_printf(seq
, " super external:%s",
5243 mddev
->metadata_type
);
5245 seq_printf(seq
, " super non-persistent");
5248 mddev
->pers
->status (seq
, mddev
);
5249 seq_printf(seq
, "\n ");
5250 if (mddev
->pers
->sync_request
) {
5251 if (mddev
->curr_resync
> 2) {
5252 status_resync (seq
, mddev
);
5253 seq_printf(seq
, "\n ");
5254 } else if (mddev
->curr_resync
== 1 || mddev
->curr_resync
== 2)
5255 seq_printf(seq
, "\tresync=DELAYED\n ");
5256 else if (mddev
->recovery_cp
< MaxSector
)
5257 seq_printf(seq
, "\tresync=PENDING\n ");
5260 seq_printf(seq
, "\n ");
5262 if ((bitmap
= mddev
->bitmap
)) {
5263 unsigned long chunk_kb
;
5264 unsigned long flags
;
5265 spin_lock_irqsave(&bitmap
->lock
, flags
);
5266 chunk_kb
= bitmap
->chunksize
>> 10;
5267 seq_printf(seq
, "bitmap: %lu/%lu pages [%luKB], "
5269 bitmap
->pages
- bitmap
->missing_pages
,
5271 (bitmap
->pages
- bitmap
->missing_pages
)
5272 << (PAGE_SHIFT
- 10),
5273 chunk_kb
? chunk_kb
: bitmap
->chunksize
,
5274 chunk_kb
? "KB" : "B");
5276 seq_printf(seq
, ", file: ");
5277 seq_path(seq
, &bitmap
->file
->f_path
, " \t\n");
5280 seq_printf(seq
, "\n");
5281 spin_unlock_irqrestore(&bitmap
->lock
, flags
);
5284 seq_printf(seq
, "\n");
5286 mddev_unlock(mddev
);
5291 static struct seq_operations md_seq_ops
= {
5292 .start
= md_seq_start
,
5293 .next
= md_seq_next
,
5294 .stop
= md_seq_stop
,
5295 .show
= md_seq_show
,
5298 static int md_seq_open(struct inode
*inode
, struct file
*file
)
5301 struct mdstat_info
*mi
= kmalloc(sizeof(*mi
), GFP_KERNEL
);
5305 error
= seq_open(file
, &md_seq_ops
);
5309 struct seq_file
*p
= file
->private_data
;
5311 mi
->event
= atomic_read(&md_event_count
);
5316 static unsigned int mdstat_poll(struct file
*filp
, poll_table
*wait
)
5318 struct seq_file
*m
= filp
->private_data
;
5319 struct mdstat_info
*mi
= m
->private;
5322 poll_wait(filp
, &md_event_waiters
, wait
);
5324 /* always allow read */
5325 mask
= POLLIN
| POLLRDNORM
;
5327 if (mi
->event
!= atomic_read(&md_event_count
))
5328 mask
|= POLLERR
| POLLPRI
;
5332 static const struct file_operations md_seq_fops
= {
5333 .owner
= THIS_MODULE
,
5334 .open
= md_seq_open
,
5336 .llseek
= seq_lseek
,
5337 .release
= seq_release_private
,
5338 .poll
= mdstat_poll
,
5341 int register_md_personality(struct mdk_personality
*p
)
5343 spin_lock(&pers_lock
);
5344 list_add_tail(&p
->list
, &pers_list
);
5345 printk(KERN_INFO
"md: %s personality registered for level %d\n", p
->name
, p
->level
);
5346 spin_unlock(&pers_lock
);
5350 int unregister_md_personality(struct mdk_personality
*p
)
5352 printk(KERN_INFO
"md: %s personality unregistered\n", p
->name
);
5353 spin_lock(&pers_lock
);
5354 list_del_init(&p
->list
);
5355 spin_unlock(&pers_lock
);
5359 static int is_mddev_idle(mddev_t
*mddev
)
5362 struct list_head
*tmp
;
5367 rdev_for_each(rdev
, tmp
, mddev
) {
5368 struct gendisk
*disk
= rdev
->bdev
->bd_contains
->bd_disk
;
5369 curr_events
= disk_stat_read(disk
, sectors
[0]) +
5370 disk_stat_read(disk
, sectors
[1]) -
5371 atomic_read(&disk
->sync_io
);
5372 /* sync IO will cause sync_io to increase before the disk_stats
5373 * as sync_io is counted when a request starts, and
5374 * disk_stats is counted when it completes.
5375 * So resync activity will cause curr_events to be smaller than
5376 * when there was no such activity.
5377 * non-sync IO will cause disk_stat to increase without
5378 * increasing sync_io so curr_events will (eventually)
5379 * be larger than it was before. Once it becomes
5380 * substantially larger, the test below will cause
5381 * the array to appear non-idle, and resync will slow
5383 * If there is a lot of outstanding resync activity when
5384 * we set last_event to curr_events, then all that activity
5385 * completing might cause the array to appear non-idle
5386 * and resync will be slowed down even though there might
5387 * not have been non-resync activity. This will only
5388 * happen once though. 'last_events' will soon reflect
5389 * the state where there is little or no outstanding
5390 * resync requests, and further resync activity will
5391 * always make curr_events less than last_events.
5394 if (curr_events
- rdev
->last_events
> 4096) {
5395 rdev
->last_events
= curr_events
;
5402 void md_done_sync(mddev_t
*mddev
, int blocks
, int ok
)
5404 /* another "blocks" (512byte) blocks have been synced */
5405 atomic_sub(blocks
, &mddev
->recovery_active
);
5406 wake_up(&mddev
->recovery_wait
);
5408 set_bit(MD_RECOVERY_ERR
, &mddev
->recovery
);
5409 md_wakeup_thread(mddev
->thread
);
5410 // stop recovery, signal do_sync ....
5415 /* md_write_start(mddev, bi)
5416 * If we need to update some array metadata (e.g. 'active' flag
5417 * in superblock) before writing, schedule a superblock update
5418 * and wait for it to complete.
5420 void md_write_start(mddev_t
*mddev
, struct bio
*bi
)
5422 if (bio_data_dir(bi
) != WRITE
)
5425 BUG_ON(mddev
->ro
== 1);
5426 if (mddev
->ro
== 2) {
5427 /* need to switch to read/write */
5429 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5430 md_wakeup_thread(mddev
->thread
);
5431 <<<<<<< HEAD
:drivers
/md
/md
.c
5433 md_wakeup_thread(mddev
->sync_thread
);
5434 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
5436 atomic_inc(&mddev
->writes_pending
);
5437 if (mddev
->in_sync
) {
5438 spin_lock_irq(&mddev
->write_lock
);
5439 if (mddev
->in_sync
) {
5441 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
5442 md_wakeup_thread(mddev
->thread
);
5444 spin_unlock_irq(&mddev
->write_lock
);
5446 wait_event(mddev
->sb_wait
, mddev
->flags
==0);
5449 void md_write_end(mddev_t
*mddev
)
5451 if (atomic_dec_and_test(&mddev
->writes_pending
)) {
5452 if (mddev
->safemode
== 2)
5453 md_wakeup_thread(mddev
->thread
);
5454 else if (mddev
->safemode_delay
)
5455 mod_timer(&mddev
->safemode_timer
, jiffies
+ mddev
->safemode_delay
);
5459 /* md_allow_write(mddev)
5460 * Calling this ensures that the array is marked 'active' so that writes
5461 * may proceed without blocking. It is important to call this before
5462 * attempting a GFP_KERNEL allocation while holding the mddev lock.
5463 * Must be called with mddev_lock held.
5465 void md_allow_write(mddev_t
*mddev
)
5472 spin_lock_irq(&mddev
->write_lock
);
5473 if (mddev
->in_sync
) {
5475 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
5476 if (mddev
->safemode_delay
&&
5477 mddev
->safemode
== 0)
5478 mddev
->safemode
= 1;
5479 spin_unlock_irq(&mddev
->write_lock
);
5480 md_update_sb(mddev
, 0);
5482 spin_unlock_irq(&mddev
->write_lock
);
5484 EXPORT_SYMBOL_GPL(md_allow_write
);
5486 static DECLARE_WAIT_QUEUE_HEAD(resync_wait
);
5488 #define SYNC_MARKS 10
5489 #define SYNC_MARK_STEP (3*HZ)
5490 void md_do_sync(mddev_t
*mddev
)
5493 unsigned int currspeed
= 0,
5495 sector_t max_sectors
,j
, io_sectors
;
5496 unsigned long mark
[SYNC_MARKS
];
5497 sector_t mark_cnt
[SYNC_MARKS
];
5499 struct list_head
*tmp
;
5500 sector_t last_check
;
5502 struct list_head
*rtmp
;
5506 /* just incase thread restarts... */
5507 if (test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
))
5509 if (mddev
->ro
) /* never try to sync a read-only array */
5512 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5513 if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
5514 desc
= "data-check";
5515 else if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
5516 desc
= "requested-resync";
5519 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
5524 /* we overload curr_resync somewhat here.
5525 * 0 == not engaged in resync at all
5526 * 2 == checking that there is no conflict with another sync
5527 * 1 == like 2, but have yielded to allow conflicting resync to
5529 * other == active in resync - this many blocks
5531 * Before starting a resync we must have set curr_resync to
5532 * 2, and then checked that every "conflicting" array has curr_resync
5533 * less than ours. When we find one that is the same or higher
5534 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
5535 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5536 * This will mean we have to start checking from the beginning again.
5541 mddev
->curr_resync
= 2;
5544 if (kthread_should_stop()) {
5545 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5548 for_each_mddev(mddev2
, tmp
) {
5549 if (mddev2
== mddev
)
5551 if (mddev2
->curr_resync
&&
5552 match_mddev_units(mddev
,mddev2
)) {
5554 if (mddev
< mddev2
&& mddev
->curr_resync
== 2) {
5555 /* arbitrarily yield */
5556 mddev
->curr_resync
= 1;
5557 wake_up(&resync_wait
);
5559 if (mddev
> mddev2
&& mddev
->curr_resync
== 1)
5560 /* no need to wait here, we can wait the next
5561 * time 'round when curr_resync == 2
5564 prepare_to_wait(&resync_wait
, &wq
, TASK_UNINTERRUPTIBLE
);
5565 if (!kthread_should_stop() &&
5566 mddev2
->curr_resync
>= mddev
->curr_resync
) {
5567 printk(KERN_INFO
"md: delaying %s of %s"
5568 " until %s has finished (they"
5569 " share one or more physical units)\n",
5570 desc
, mdname(mddev
), mdname(mddev2
));
5573 finish_wait(&resync_wait
, &wq
);
5576 finish_wait(&resync_wait
, &wq
);
5579 } while (mddev
->curr_resync
< 2);
5582 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5583 /* resync follows the size requested by the personality,
5584 * which defaults to physical size, but can be virtual size
5586 max_sectors
= mddev
->resync_max_sectors
;
5587 mddev
->resync_mismatches
= 0;
5588 /* we don't use the checkpoint if there's a bitmap */
5589 if (!mddev
->bitmap
&&
5590 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
5591 j
= mddev
->recovery_cp
;
5592 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
5593 max_sectors
= mddev
->size
<< 1;
5595 /* recovery follows the physical size of devices */
5596 max_sectors
= mddev
->size
<< 1;
5598 rdev_for_each(rdev
, rtmp
, mddev
)
5599 if (rdev
->raid_disk
>= 0 &&
5600 !test_bit(Faulty
, &rdev
->flags
) &&
5601 !test_bit(In_sync
, &rdev
->flags
) &&
5602 rdev
->recovery_offset
< j
)
5603 j
= rdev
->recovery_offset
;
5606 printk(KERN_INFO
"md: %s of RAID array %s\n", desc
, mdname(mddev
));
5607 printk(KERN_INFO
"md: minimum _guaranteed_ speed:"
5608 " %d KB/sec/disk.\n", speed_min(mddev
));
5609 printk(KERN_INFO
"md: using maximum available idle IO bandwidth "
5610 "(but not more than %d KB/sec) for %s.\n",
5611 speed_max(mddev
), desc
);
5613 is_mddev_idle(mddev
); /* this also initializes IO event counters */
5616 for (m
= 0; m
< SYNC_MARKS
; m
++) {
5618 mark_cnt
[m
] = io_sectors
;
5621 mddev
->resync_mark
= mark
[last_mark
];
5622 mddev
->resync_mark_cnt
= mark_cnt
[last_mark
];
5625 * Tune reconstruction:
5627 window
= 32*(PAGE_SIZE
/512);
5628 printk(KERN_INFO
"md: using %dk window, over a total of %llu blocks.\n",
5629 window
/2,(unsigned long long) max_sectors
/2);
5631 atomic_set(&mddev
->recovery_active
, 0);
5632 init_waitqueue_head(&mddev
->recovery_wait
);
5637 "md: resuming %s of %s from checkpoint.\n",
5638 desc
, mdname(mddev
));
5639 mddev
->curr_resync
= j
;
5642 while (j
< max_sectors
) {
5646 if (j
>= mddev
->resync_max
) {
5647 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5648 wait_event(mddev
->recovery_wait
,
5649 mddev
->resync_max
> j
5650 || kthread_should_stop());
5652 if (kthread_should_stop())
5654 sectors
= mddev
->pers
->sync_request(mddev
, j
, &skipped
,
5655 currspeed
< speed_min(mddev
));
5657 set_bit(MD_RECOVERY_ERR
, &mddev
->recovery
);
5661 if (!skipped
) { /* actual IO requested */
5662 io_sectors
+= sectors
;
5663 atomic_add(sectors
, &mddev
->recovery_active
);
5667 if (j
>1) mddev
->curr_resync
= j
;
5668 mddev
->curr_mark_cnt
= io_sectors
;
5669 if (last_check
== 0)
5670 /* this is the earliers that rebuilt will be
5671 * visible in /proc/mdstat
5673 md_new_event(mddev
);
5675 if (last_check
+ window
> io_sectors
|| j
== max_sectors
)
5678 last_check
= io_sectors
;
5680 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
) ||
5681 test_bit(MD_RECOVERY_ERR
, &mddev
->recovery
))
5685 if (time_after_eq(jiffies
, mark
[last_mark
] + SYNC_MARK_STEP
)) {
5687 int next
= (last_mark
+1) % SYNC_MARKS
;
5689 mddev
->resync_mark
= mark
[next
];
5690 mddev
->resync_mark_cnt
= mark_cnt
[next
];
5691 mark
[next
] = jiffies
;
5692 mark_cnt
[next
] = io_sectors
- atomic_read(&mddev
->recovery_active
);
5697 if (kthread_should_stop())
5702 * this loop exits only if either when we are slower than
5703 * the 'hard' speed limit, or the system was IO-idle for
5705 * the system might be non-idle CPU-wise, but we only care
5706 * about not overloading the IO subsystem. (things like an
5707 * e2fsck being done on the RAID array should execute fast)
5709 blk_unplug(mddev
->queue
);
5712 currspeed
= ((unsigned long)(io_sectors
-mddev
->resync_mark_cnt
))/2
5713 /((jiffies
-mddev
->resync_mark
)/HZ
+1) +1;
5715 if (currspeed
> speed_min(mddev
)) {
5716 if ((currspeed
> speed_max(mddev
)) ||
5717 !is_mddev_idle(mddev
)) {
5723 printk(KERN_INFO
"md: %s: %s done.\n",mdname(mddev
), desc
);
5725 * this also signals 'finished resyncing' to md_stop
5728 blk_unplug(mddev
->queue
);
5730 wait_event(mddev
->recovery_wait
, !atomic_read(&mddev
->recovery_active
));
5732 /* tell personality that we are finished */
5733 mddev
->pers
->sync_request(mddev
, max_sectors
, &skipped
, 1);
5735 if (!test_bit(MD_RECOVERY_ERR
, &mddev
->recovery
) &&
5736 !test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
) &&
5737 mddev
->curr_resync
> 2) {
5738 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5739 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5740 if (mddev
->curr_resync
>= mddev
->recovery_cp
) {
5742 "md: checkpointing %s of %s.\n",
5743 desc
, mdname(mddev
));
5744 mddev
->recovery_cp
= mddev
->curr_resync
;
5747 mddev
->recovery_cp
= MaxSector
;
5749 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
5750 mddev
->curr_resync
= MaxSector
;
5751 rdev_for_each(rdev
, rtmp
, mddev
)
5752 if (rdev
->raid_disk
>= 0 &&
5753 !test_bit(Faulty
, &rdev
->flags
) &&
5754 !test_bit(In_sync
, &rdev
->flags
) &&
5755 rdev
->recovery_offset
< mddev
->curr_resync
)
5756 rdev
->recovery_offset
= mddev
->curr_resync
;
5759 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5762 mddev
->curr_resync
= 0;
5763 mddev
->resync_max
= MaxSector
;
5764 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5765 wake_up(&resync_wait
);
5766 set_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
5767 md_wakeup_thread(mddev
->thread
);
5772 * got a signal, exit.
5775 "md: md_do_sync() got signal ... exiting\n");
5776 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5780 EXPORT_SYMBOL_GPL(md_do_sync
);
5783 static int remove_and_add_spares(mddev_t
*mddev
)
5786 struct list_head
*rtmp
;
5789 rdev_for_each(rdev
, rtmp
, mddev
)
5790 if (rdev
->raid_disk
>= 0 &&
5792 (test_bit(Faulty
, &rdev
->flags
) ||
5793 ! test_bit(In_sync
, &rdev
->flags
)) &&
5794 atomic_read(&rdev
->nr_pending
)==0) {
5795 if (mddev
->pers
->hot_remove_disk(
5796 mddev
, rdev
->raid_disk
)==0) {
5798 sprintf(nm
,"rd%d", rdev
->raid_disk
);
5799 sysfs_remove_link(&mddev
->kobj
, nm
);
5800 rdev
->raid_disk
= -1;
5804 if (mddev
->degraded
) {
5805 rdev_for_each(rdev
, rtmp
, mddev
)
5806 if (rdev
->raid_disk
< 0
5807 && !test_bit(Faulty
, &rdev
->flags
)) {
5808 rdev
->recovery_offset
= 0;
5809 if (mddev
->pers
->hot_add_disk(mddev
,rdev
)) {
5811 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5812 if (sysfs_create_link(&mddev
->kobj
,
5815 "md: cannot register "
5819 md_new_event(mddev
);
5827 * This routine is regularly called by all per-raid-array threads to
5828 * deal with generic issues like resync and super-block update.
5829 * Raid personalities that don't have a thread (linear/raid0) do not
5830 * need this as they never do any recovery or update the superblock.
5832 * It does not do any resync itself, but rather "forks" off other threads
5833 * to do that as needed.
5834 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5835 * "->recovery" and create a thread at ->sync_thread.
5836 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5837 * and wakeups up this thread which will reap the thread and finish up.
5838 * This thread also removes any faulty devices (with nr_pending == 0).
5840 * The overall approach is:
5841 * 1/ if the superblock needs updating, update it.
5842 * 2/ If a recovery thread is running, don't do anything else.
5843 * 3/ If recovery has finished, clean up, possibly marking spares active.
5844 * 4/ If there are any faulty devices, remove them.
5845 * 5/ If array is degraded, try to add spares devices
5846 * 6/ If array has spares or is not in-sync, start a resync thread.
5848 void md_check_recovery(mddev_t
*mddev
)
5851 struct list_head
*rtmp
;
5855 bitmap_daemon_work(mddev
->bitmap
);
5860 if (signal_pending(current
)) {
5861 if (mddev
->pers
->sync_request
) {
5862 printk(KERN_INFO
"md: %s in immediate safe mode\n",
5864 mddev
->safemode
= 2;
5866 flush_signals(current
);
5870 (mddev
->flags
&& !mddev
->external
) ||
5871 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
) ||
5872 test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
) ||
5873 (mddev
->safemode
== 1) ||
5874 (mddev
->safemode
== 2 && ! atomic_read(&mddev
->writes_pending
)
5875 && !mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
5879 if (mddev_trylock(mddev
)) {
5882 spin_lock_irq(&mddev
->write_lock
);
5883 if (mddev
->safemode
&& !atomic_read(&mddev
->writes_pending
) &&
5884 !mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
) {
5886 if (mddev
->persistent
)
5887 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
5889 if (mddev
->safemode
== 1)
5890 mddev
->safemode
= 0;
5891 spin_unlock_irq(&mddev
->write_lock
);
5894 md_update_sb(mddev
, 0);
5897 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) &&
5898 !test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
)) {
5899 /* resync/recovery still happening */
5900 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5903 if (mddev
->sync_thread
) {
5904 /* resync has finished, collect result */
5905 md_unregister_thread(mddev
->sync_thread
);
5906 mddev
->sync_thread
= NULL
;
5907 if (!test_bit(MD_RECOVERY_ERR
, &mddev
->recovery
) &&
5908 !test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5910 /* activate any spares */
5911 mddev
->pers
->spare_active(mddev
);
5913 md_update_sb(mddev
, 1);
5915 /* if array is no-longer degraded, then any saved_raid_disk
5916 * information must be scrapped
5918 if (!mddev
->degraded
)
5919 rdev_for_each(rdev
, rtmp
, mddev
)
5920 rdev
->saved_raid_disk
= -1;
5922 mddev
->recovery
= 0;
5923 /* flag recovery needed just to double check */
5924 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5925 md_new_event(mddev
);
5928 /* Clear some bits that don't mean anything, but
5931 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5932 clear_bit(MD_RECOVERY_ERR
, &mddev
->recovery
);
5933 clear_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5934 clear_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
5936 if (test_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
))
5938 /* no recovery is running.
5939 * remove any failed drives, then
5940 * add spares if possible.
5941 * Spare are also removed and re-added, to allow
5942 * the personality to fail the re-add.
5945 if (mddev
->reshape_position
!= MaxSector
) {
5946 if (mddev
->pers
->check_reshape(mddev
) != 0)
5947 /* Cannot proceed */
5949 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5950 } else if ((spares
= remove_and_add_spares(mddev
))) {
5951 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5952 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5953 } else if (mddev
->recovery_cp
< MaxSector
) {
5954 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5955 } else if (!test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
5956 /* nothing to be done ... */
5959 if (mddev
->pers
->sync_request
) {
5960 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5961 if (spares
&& mddev
->bitmap
&& ! mddev
->bitmap
->file
) {
5962 /* We are adding a device or devices to an array
5963 * which has the bitmap stored on all devices.
5964 * So make sure all bitmap pages get written
5966 bitmap_write_all(mddev
->bitmap
);
5968 mddev
->sync_thread
= md_register_thread(md_do_sync
,
5971 if (!mddev
->sync_thread
) {
5972 printk(KERN_ERR
"%s: could not start resync"
5975 /* leave the spares where they are, it shouldn't hurt */
5976 mddev
->recovery
= 0;
5978 md_wakeup_thread(mddev
->sync_thread
);
5979 md_new_event(mddev
);
5982 mddev_unlock(mddev
);
5986 static int md_notify_reboot(struct notifier_block
*this,
5987 unsigned long code
, void *x
)
5989 struct list_head
*tmp
;
5992 if ((code
== SYS_DOWN
) || (code
== SYS_HALT
) || (code
== SYS_POWER_OFF
)) {
5994 printk(KERN_INFO
"md: stopping all md devices.\n");
5996 for_each_mddev(mddev
, tmp
)
5997 if (mddev_trylock(mddev
)) {
5998 do_md_stop (mddev
, 1);
5999 mddev_unlock(mddev
);
6002 * certain more exotic SCSI devices are known to be
6003 * volatile wrt too early system reboots. While the
6004 * right place to handle this issue is the given
6005 * driver, we do want to have a safe RAID driver ...
6012 static struct notifier_block md_notifier
= {
6013 .notifier_call
= md_notify_reboot
,
6015 .priority
= INT_MAX
, /* before any real devices */
6018 static void md_geninit(void)
6020 struct proc_dir_entry
*p
;
6022 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t
));
6024 p
= create_proc_entry("mdstat", S_IRUGO
, NULL
);
6026 p
->proc_fops
= &md_seq_fops
;
6029 static int __init
md_init(void)
6031 if (register_blkdev(MAJOR_NR
, "md"))
6033 if ((mdp_major
=register_blkdev(0, "mdp"))<=0) {
6034 unregister_blkdev(MAJOR_NR
, "md");
6037 blk_register_region(MKDEV(MAJOR_NR
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6038 md_probe
, NULL
, NULL
);
6039 blk_register_region(MKDEV(mdp_major
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6040 md_probe
, NULL
, NULL
);
6042 register_reboot_notifier(&md_notifier
);
6043 raid_table_header
= register_sysctl_table(raid_root_table
);
6053 * Searches all registered partitions for autorun RAID arrays
6057 static LIST_HEAD(all_detected_devices
);
6058 struct detected_devices_node
{
6059 struct list_head list
;
6063 void md_autodetect_dev(dev_t dev
)
6065 struct detected_devices_node
*node_detected_dev
;
6067 node_detected_dev
= kzalloc(sizeof(*node_detected_dev
), GFP_KERNEL
);
6068 if (node_detected_dev
) {
6069 node_detected_dev
->dev
= dev
;
6070 list_add_tail(&node_detected_dev
->list
, &all_detected_devices
);
6072 printk(KERN_CRIT
"md: md_autodetect_dev: kzalloc failed"
6073 ", skipping dev(%d,%d)\n", MAJOR(dev
), MINOR(dev
));
6078 static void autostart_arrays(int part
)
6081 struct detected_devices_node
*node_detected_dev
;
6083 int i_scanned
, i_passed
;
6088 printk(KERN_INFO
"md: Autodetecting RAID arrays.\n");
6090 while (!list_empty(&all_detected_devices
) && i_scanned
< INT_MAX
) {
6092 node_detected_dev
= list_entry(all_detected_devices
.next
,
6093 struct detected_devices_node
, list
);
6094 list_del(&node_detected_dev
->list
);
6095 dev
= node_detected_dev
->dev
;
6096 kfree(node_detected_dev
);
6097 rdev
= md_import_device(dev
,0, 90);
6101 if (test_bit(Faulty
, &rdev
->flags
)) {
6105 <<<<<<< HEAD
:drivers
/md
/md
.c
6107 set_bit(AutoDetected
, &rdev
->flags
);
6108 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/md
/md
.c
6109 list_add(&rdev
->same_set
, &pending_raid_disks
);
6113 printk(KERN_INFO
"md: Scanned %d and added %d devices.\n",
6114 i_scanned
, i_passed
);
6116 autorun_devices(part
);
6119 #endif /* !MODULE */
6121 static __exit
void md_exit(void)
6124 struct list_head
*tmp
;
6126 blk_unregister_region(MKDEV(MAJOR_NR
,0), 1U << MINORBITS
);
6127 blk_unregister_region(MKDEV(mdp_major
,0), 1U << MINORBITS
);
6129 unregister_blkdev(MAJOR_NR
,"md");
6130 unregister_blkdev(mdp_major
, "mdp");
6131 unregister_reboot_notifier(&md_notifier
);
6132 unregister_sysctl_table(raid_table_header
);
6133 remove_proc_entry("mdstat", NULL
);
6134 for_each_mddev(mddev
, tmp
) {
6135 struct gendisk
*disk
= mddev
->gendisk
;
6138 export_array(mddev
);
6141 mddev
->gendisk
= NULL
;
6146 subsys_initcall(md_init
);
6147 module_exit(md_exit
)
6149 static int get_ro(char *buffer
, struct kernel_param
*kp
)
6151 return sprintf(buffer
, "%d", start_readonly
);
6153 static int set_ro(const char *val
, struct kernel_param
*kp
)
6156 int num
= simple_strtoul(val
, &e
, 10);
6157 if (*val
&& (*e
== '\0' || *e
== '\n')) {
6158 start_readonly
= num
;
6164 module_param_call(start_ro
, set_ro
, get_ro
, NULL
, S_IRUSR
|S_IWUSR
);
6165 module_param(start_dirty_degraded
, int, S_IRUGO
|S_IWUSR
);
6168 EXPORT_SYMBOL(register_md_personality
);
6169 EXPORT_SYMBOL(unregister_md_personality
);
6170 EXPORT_SYMBOL(md_error
);
6171 EXPORT_SYMBOL(md_done_sync
);
6172 EXPORT_SYMBOL(md_write_start
);
6173 EXPORT_SYMBOL(md_write_end
);
6174 EXPORT_SYMBOL(md_register_thread
);
6175 EXPORT_SYMBOL(md_unregister_thread
);
6176 EXPORT_SYMBOL(md_wakeup_thread
);
6177 EXPORT_SYMBOL(md_check_recovery
);
6178 MODULE_LICENSE("GPL");
6180 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR
);