2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/blkdev.h>
22 #include <linux/seq_file.h>
26 static void raid0_unplug(struct request_queue
*q
)
28 mddev_t
*mddev
= q
->queuedata
;
29 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
30 mdk_rdev_t
**devlist
= conf
->strip_zone
[0].dev
;
33 for (i
=0; i
<mddev
->raid_disks
; i
++) {
34 struct request_queue
*r_queue
= bdev_get_queue(devlist
[i
]->bdev
);
40 static int raid0_congested(void *data
, int bits
)
42 mddev_t
*mddev
= data
;
43 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
44 mdk_rdev_t
**devlist
= conf
->strip_zone
[0].dev
;
47 for (i
= 0; i
< mddev
->raid_disks
&& !ret
; i
++) {
48 struct request_queue
*q
= bdev_get_queue(devlist
[i
]->bdev
);
50 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
56 static int create_strip_zones (mddev_t
*mddev
)
59 sector_t current_start
, curr_zone_start
;
61 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
62 mdk_rdev_t
*smallest
, *rdev1
, *rdev2
, *rdev
;
63 struct strip_zone
*zone
;
65 char b
[BDEVNAME_SIZE
];
68 * The number of 'same size groups'
70 conf
->nr_strip_zones
= 0;
72 list_for_each_entry(rdev1
, &mddev
->disks
, same_set
) {
73 printk(KERN_INFO
"raid0: looking at %s\n",
74 bdevname(rdev1
->bdev
,b
));
76 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
77 printk(KERN_INFO
"raid0: comparing %s(%llu)",
78 bdevname(rdev1
->bdev
,b
),
79 (unsigned long long)rdev1
->sectors
);
80 printk(KERN_INFO
" with %s(%llu)\n",
81 bdevname(rdev2
->bdev
,b
),
82 (unsigned long long)rdev2
->sectors
);
84 printk(KERN_INFO
"raid0: END\n");
87 if (rdev2
->sectors
== rdev1
->sectors
) {
89 * Not unique, don't count it as a new
92 printk(KERN_INFO
"raid0: EQUAL\n");
96 printk(KERN_INFO
"raid0: NOT EQUAL\n");
99 printk(KERN_INFO
"raid0: ==> UNIQUE\n");
100 conf
->nr_strip_zones
++;
101 printk(KERN_INFO
"raid0: %d zones\n",
102 conf
->nr_strip_zones
);
105 printk(KERN_INFO
"raid0: FINAL %d zones\n", conf
->nr_strip_zones
);
107 conf
->strip_zone
= kzalloc(sizeof(struct strip_zone
)*
108 conf
->nr_strip_zones
, GFP_KERNEL
);
109 if (!conf
->strip_zone
)
111 conf
->devlist
= kzalloc(sizeof(mdk_rdev_t
*)*
112 conf
->nr_strip_zones
*mddev
->raid_disks
,
117 /* The first zone must contain all devices, so here we check that
118 * there is a proper alignment of slots to devices and find them all
120 zone
= &conf
->strip_zone
[0];
123 zone
->dev
= conf
->devlist
;
124 list_for_each_entry(rdev1
, &mddev
->disks
, same_set
) {
125 int j
= rdev1
->raid_disk
;
127 if (j
< 0 || j
>= mddev
->raid_disks
) {
128 printk(KERN_ERR
"raid0: bad disk number %d - "
133 printk(KERN_ERR
"raid0: multiple devices for %d - "
137 zone
->dev
[j
] = rdev1
;
139 blk_queue_stack_limits(mddev
->queue
,
140 rdev1
->bdev
->bd_disk
->queue
);
141 /* as we don't honour merge_bvec_fn, we must never risk
142 * violating it, so limit ->max_sector to one PAGE, as
143 * a one page request is never in violation.
146 if (rdev1
->bdev
->bd_disk
->queue
->merge_bvec_fn
&&
147 queue_max_sectors(mddev
->queue
) > (PAGE_SIZE
>>9))
148 blk_queue_max_sectors(mddev
->queue
, PAGE_SIZE
>>9);
150 if (!smallest
|| (rdev1
->sectors
< smallest
->sectors
))
154 if (cnt
!= mddev
->raid_disks
) {
155 printk(KERN_ERR
"raid0: too few disks (%d of %d) - "
156 "aborting!\n", cnt
, mddev
->raid_disks
);
160 zone
->sectors
= smallest
->sectors
* cnt
;
161 zone
->zone_start
= 0;
163 current_start
= smallest
->sectors
;
164 curr_zone_start
= zone
->sectors
;
166 /* now do the other zones */
167 for (i
= 1; i
< conf
->nr_strip_zones
; i
++)
169 zone
= conf
->strip_zone
+ i
;
170 zone
->dev
= conf
->strip_zone
[i
-1].dev
+ mddev
->raid_disks
;
172 printk(KERN_INFO
"raid0: zone %d\n", i
);
173 zone
->dev_start
= current_start
;
177 for (j
=0; j
<cnt
; j
++) {
178 char b
[BDEVNAME_SIZE
];
179 rdev
= conf
->strip_zone
[0].dev
[j
];
180 printk(KERN_INFO
"raid0: checking %s ...",
181 bdevname(rdev
->bdev
, b
));
182 if (rdev
->sectors
<= current_start
) {
183 printk(KERN_INFO
" nope.\n");
186 printk(KERN_INFO
" contained as device %d\n", c
);
189 if (!smallest
|| rdev
->sectors
< smallest
->sectors
) {
191 printk(KERN_INFO
" (%llu) is smallest!.\n",
192 (unsigned long long)rdev
->sectors
);
197 zone
->sectors
= (smallest
->sectors
- current_start
) * c
;
198 printk(KERN_INFO
"raid0: zone->nb_dev: %d, sectors: %llu\n",
199 zone
->nb_dev
, (unsigned long long)zone
->sectors
);
201 zone
->zone_start
= curr_zone_start
;
202 curr_zone_start
+= zone
->sectors
;
204 current_start
= smallest
->sectors
;
205 printk(KERN_INFO
"raid0: current zone start: %llu\n",
206 (unsigned long long)current_start
);
209 /* Now find appropriate hash spacing.
210 * We want a number which causes most hash entries to cover
211 * at most two strips, but the hash table must be at most
212 * 1 PAGE. We choose the smallest strip, or contiguous collection
213 * of strips, that has big enough size. We never consider the last
214 * strip though as it's size has no bearing on the efficacy of the hash
217 conf
->spacing
= curr_zone_start
;
218 min_spacing
= curr_zone_start
;
219 sector_div(min_spacing
, PAGE_SIZE
/sizeof(struct strip_zone
*));
220 for (i
=0; i
< conf
->nr_strip_zones
-1; i
++) {
222 for (j
= i
; j
< conf
->nr_strip_zones
- 1 &&
223 s
< min_spacing
; j
++)
224 s
+= conf
->strip_zone
[j
].sectors
;
225 if (s
>= min_spacing
&& s
< conf
->spacing
)
229 mddev
->queue
->unplug_fn
= raid0_unplug
;
231 mddev
->queue
->backing_dev_info
.congested_fn
= raid0_congested
;
232 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
234 printk(KERN_INFO
"raid0: done.\n");
241 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
243 * @bvm: properties of new bio
244 * @biovec: the request that could be merged to it.
246 * Return amount of bytes we can accept at this offset
248 static int raid0_mergeable_bvec(struct request_queue
*q
,
249 struct bvec_merge_data
*bvm
,
250 struct bio_vec
*biovec
)
252 mddev_t
*mddev
= q
->queuedata
;
253 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
255 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
256 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
258 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
259 if (max
< 0) max
= 0; /* bio_add cannot handle a negative return */
260 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
261 return biovec
->bv_len
;
266 static sector_t
raid0_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
268 sector_t array_sectors
= 0;
271 WARN_ONCE(sectors
|| raid_disks
,
272 "%s does not support generic reshape\n", __func__
);
274 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
275 array_sectors
+= rdev
->sectors
;
277 return array_sectors
;
280 static int raid0_run (mddev_t
*mddev
)
282 unsigned cur
=0, i
=0, nb_zone
;
286 if (mddev
->chunk_size
== 0) {
287 printk(KERN_ERR
"md/raid0: non-zero chunk size required.\n");
290 printk(KERN_INFO
"%s: setting max_sectors to %d, segment boundary to %d\n",
292 mddev
->chunk_size
>> 9,
293 (mddev
->chunk_size
>>1)-1);
294 blk_queue_max_sectors(mddev
->queue
, mddev
->chunk_size
>> 9);
295 blk_queue_segment_boundary(mddev
->queue
, (mddev
->chunk_size
>>1) - 1);
296 mddev
->queue
->queue_lock
= &mddev
->queue
->__queue_lock
;
298 conf
= kmalloc(sizeof (raid0_conf_t
), GFP_KERNEL
);
301 mddev
->private = (void *)conf
;
303 conf
->strip_zone
= NULL
;
304 conf
->devlist
= NULL
;
305 if (create_strip_zones (mddev
))
308 /* calculate array device size */
309 md_set_array_sectors(mddev
, raid0_size(mddev
, 0, 0));
311 printk(KERN_INFO
"raid0 : md_size is %llu sectors.\n",
312 (unsigned long long)mddev
->array_sectors
);
313 printk(KERN_INFO
"raid0 : conf->spacing is %llu sectors.\n",
314 (unsigned long long)conf
->spacing
);
316 sector_t s
= raid0_size(mddev
, 0, 0);
317 sector_t space
= conf
->spacing
;
319 conf
->sector_shift
= 0;
320 if (sizeof(sector_t
) > sizeof(u32
)) {
321 /*shift down space and s so that sector_div will work */
322 while (space
> (sector_t
) (~(u32
)0)) {
325 s
+= 1; /* force round-up */
326 conf
->sector_shift
++;
329 round
= sector_div(s
, (u32
)space
) ? 1 : 0;
332 printk(KERN_INFO
"raid0 : nb_zone is %d.\n", nb_zone
);
334 printk(KERN_INFO
"raid0 : Allocating %zu bytes for hash.\n",
335 nb_zone
*sizeof(struct strip_zone
*));
336 conf
->hash_table
= kmalloc (sizeof (struct strip_zone
*)*nb_zone
, GFP_KERNEL
);
337 if (!conf
->hash_table
)
339 sectors
= conf
->strip_zone
[cur
].sectors
;
341 conf
->hash_table
[0] = conf
->strip_zone
+ cur
;
342 for (i
=1; i
< nb_zone
; i
++) {
343 while (sectors
<= conf
->spacing
) {
345 sectors
+= conf
->strip_zone
[cur
].sectors
;
347 sectors
-= conf
->spacing
;
348 conf
->hash_table
[i
] = conf
->strip_zone
+ cur
;
350 if (conf
->sector_shift
) {
351 conf
->spacing
>>= conf
->sector_shift
;
352 /* round spacing up so when we divide by it, we
353 * err on the side of too-low, which is safest
358 /* calculate the max read-ahead size.
359 * For read-ahead of large files to be effective, we need to
360 * readahead at least twice a whole stripe. i.e. number of devices
361 * multiplied by chunk size times 2.
362 * If an individual device has an ra_pages greater than the
363 * chunk size, then we will not drive that device as hard as it
364 * wants. We consider this a configuration error: a larger
365 * chunksize should be used in that case.
368 int stripe
= mddev
->raid_disks
* mddev
->chunk_size
/ PAGE_SIZE
;
369 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2* stripe
)
370 mddev
->queue
->backing_dev_info
.ra_pages
= 2* stripe
;
374 blk_queue_merge_bvec(mddev
->queue
, raid0_mergeable_bvec
);
378 kfree(conf
->strip_zone
);
379 kfree(conf
->devlist
);
381 mddev
->private = NULL
;
386 static int raid0_stop (mddev_t
*mddev
)
388 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
390 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
391 kfree(conf
->hash_table
);
392 conf
->hash_table
= NULL
;
393 kfree(conf
->strip_zone
);
394 conf
->strip_zone
= NULL
;
396 mddev
->private = NULL
;
401 static int raid0_make_request (struct request_queue
*q
, struct bio
*bio
)
403 mddev_t
*mddev
= q
->queuedata
;
404 unsigned int sect_in_chunk
, chunksect_bits
, chunk_sects
;
405 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
406 struct strip_zone
*zone
;
409 sector_t sector
, rsect
;
410 const int rw
= bio_data_dir(bio
);
413 if (unlikely(bio_barrier(bio
))) {
414 bio_endio(bio
, -EOPNOTSUPP
);
418 cpu
= part_stat_lock();
419 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
420 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
424 chunk_sects
= mddev
->chunk_size
>> 9;
425 chunksect_bits
= ffz(~chunk_sects
);
426 sector
= bio
->bi_sector
;
428 if (unlikely(chunk_sects
< (bio
->bi_sector
& (chunk_sects
- 1)) + (bio
->bi_size
>> 9))) {
430 /* Sanity check -- queue functions should prevent this happening */
431 if (bio
->bi_vcnt
!= 1 ||
434 /* This is a one page bio that upper layers
435 * refuse to split for us, so we need to split it.
437 bp
= bio_split(bio
, chunk_sects
- (bio
->bi_sector
& (chunk_sects
- 1)));
438 if (raid0_make_request(q
, &bp
->bio1
))
439 generic_make_request(&bp
->bio1
);
440 if (raid0_make_request(q
, &bp
->bio2
))
441 generic_make_request(&bp
->bio2
);
443 bio_pair_release(bp
);
449 sector_t x
= sector
>> conf
->sector_shift
;
450 sector_div(x
, (u32
)conf
->spacing
);
451 zone
= conf
->hash_table
[x
];
454 while (sector
>= zone
->zone_start
+ zone
->sectors
)
457 sect_in_chunk
= bio
->bi_sector
& (chunk_sects
- 1);
461 sector_t x
= (sector
- zone
->zone_start
) >> chunksect_bits
;
463 sector_div(x
, zone
->nb_dev
);
466 x
= sector
>> chunksect_bits
;
467 tmp_dev
= zone
->dev
[sector_div(x
, zone
->nb_dev
)];
469 rsect
= (chunk
<< chunksect_bits
) + zone
->dev_start
+ sect_in_chunk
;
471 bio
->bi_bdev
= tmp_dev
->bdev
;
472 bio
->bi_sector
= rsect
+ tmp_dev
->data_offset
;
475 * Let the main block layer submit the IO and resolve recursion:
480 printk("raid0_make_request bug: can't convert block across chunks"
481 " or bigger than %dk %llu %d\n", chunk_sects
/ 2,
482 (unsigned long long)bio
->bi_sector
, bio
->bi_size
>> 10);
488 static void raid0_status (struct seq_file
*seq
, mddev_t
*mddev
)
493 char b
[BDEVNAME_SIZE
];
494 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
497 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
498 seq_printf(seq
, " z%d", j
);
499 if (conf
->hash_table
[h
] == conf
->strip_zone
+j
)
500 seq_printf(seq
, "(h%d)", h
++);
501 seq_printf(seq
, "=[");
502 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
503 seq_printf(seq
, "%s/", bdevname(
504 conf
->strip_zone
[j
].dev
[k
]->bdev
,b
));
506 seq_printf(seq
, "] zs=%d ds=%d s=%d\n",
507 conf
->strip_zone
[j
].zone_start
,
508 conf
->strip_zone
[j
].dev_start
,
509 conf
->strip_zone
[j
].sectors
);
512 seq_printf(seq
, " %dk chunks", mddev
->chunk_size
/1024);
516 static struct mdk_personality raid0_personality
=
520 .owner
= THIS_MODULE
,
521 .make_request
= raid0_make_request
,
524 .status
= raid0_status
,
528 static int __init
raid0_init (void)
530 return register_md_personality (&raid0_personality
);
533 static void raid0_exit (void)
535 unregister_md_personality (&raid0_personality
);
538 module_init(raid0_init
);
539 module_exit(raid0_exit
);
540 MODULE_LICENSE("GPL");
541 MODULE_ALIAS("md-personality-2"); /* RAID0 */
542 MODULE_ALIAS("md-raid0");
543 MODULE_ALIAS("md-level-0");