bsg: Kconfig updates
[wrt350n-kernel.git] / drivers / md / dm-raid1.c
blob1a876f9965e008f9a2008b37806a5fd95443aef8
1 /*
2 * Copyright (C) 2003 Sistina Software Limited.
4 * This file is released under the GPL.
5 */
7 #include "dm.h"
8 #include "dm-bio-list.h"
9 #include "dm-io.h"
10 #include "dm-log.h"
11 #include "kcopyd.h"
13 #include <linux/ctype.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/time.h>
20 #include <linux/vmalloc.h>
21 #include <linux/workqueue.h>
23 #define DM_MSG_PREFIX "raid1"
24 #define DM_IO_PAGES 64
26 #define DM_RAID1_HANDLE_ERRORS 0x01
27 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
29 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
31 /*-----------------------------------------------------------------
32 * Region hash
34 * The mirror splits itself up into discrete regions. Each
35 * region can be in one of three states: clean, dirty,
36 * nosync. There is no need to put clean regions in the hash.
38 * In addition to being present in the hash table a region _may_
39 * be present on one of three lists.
41 * clean_regions: Regions on this list have no io pending to
42 * them, they are in sync, we are no longer interested in them,
43 * they are dull. rh_update_states() will remove them from the
44 * hash table.
46 * quiesced_regions: These regions have been spun down, ready
47 * for recovery. rh_recovery_start() will remove regions from
48 * this list and hand them to kmirrord, which will schedule the
49 * recovery io with kcopyd.
51 * recovered_regions: Regions that kcopyd has successfully
52 * recovered. rh_update_states() will now schedule any delayed
53 * io, up the recovery_count, and remove the region from the
54 * hash.
56 * There are 2 locks:
57 * A rw spin lock 'hash_lock' protects just the hash table,
58 * this is never held in write mode from interrupt context,
59 * which I believe means that we only have to disable irqs when
60 * doing a write lock.
62 * An ordinary spin lock 'region_lock' that protects the three
63 * lists in the region_hash, with the 'state', 'list' and
64 * 'bhs_delayed' fields of the regions. This is used from irq
65 * context, so all other uses will have to suspend local irqs.
66 *---------------------------------------------------------------*/
67 struct mirror_set;
68 struct region_hash {
69 struct mirror_set *ms;
70 uint32_t region_size;
71 unsigned region_shift;
73 /* holds persistent region state */
74 struct dirty_log *log;
76 /* hash table */
77 rwlock_t hash_lock;
78 mempool_t *region_pool;
79 unsigned int mask;
80 unsigned int nr_buckets;
81 struct list_head *buckets;
83 spinlock_t region_lock;
84 atomic_t recovery_in_flight;
85 struct semaphore recovery_count;
86 struct list_head clean_regions;
87 struct list_head quiesced_regions;
88 struct list_head recovered_regions;
89 struct list_head failed_recovered_regions;
92 enum {
93 RH_CLEAN,
94 RH_DIRTY,
95 RH_NOSYNC,
96 RH_RECOVERING
99 struct region {
100 struct region_hash *rh; /* FIXME: can we get rid of this ? */
101 region_t key;
102 int state;
104 struct list_head hash_list;
105 struct list_head list;
107 atomic_t pending;
108 struct bio_list delayed_bios;
112 /*-----------------------------------------------------------------
113 * Mirror set structures.
114 *---------------------------------------------------------------*/
115 struct mirror {
116 atomic_t error_count;
117 struct dm_dev *dev;
118 sector_t offset;
121 struct mirror_set {
122 struct dm_target *ti;
123 struct list_head list;
124 struct region_hash rh;
125 struct kcopyd_client *kcopyd_client;
126 uint64_t features;
128 spinlock_t lock; /* protects the next two lists */
129 struct bio_list reads;
130 struct bio_list writes;
132 struct dm_io_client *io_client;
134 /* recovery */
135 region_t nr_regions;
136 int in_sync;
137 int log_failure;
139 struct mirror *default_mirror; /* Default mirror */
141 struct workqueue_struct *kmirrord_wq;
142 struct work_struct kmirrord_work;
144 unsigned int nr_mirrors;
145 struct mirror mirror[0];
149 * Conversion fns
151 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
153 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
156 static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
158 return region << rh->region_shift;
161 static void wake(struct mirror_set *ms)
163 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
166 /* FIXME move this */
167 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
169 #define MIN_REGIONS 64
170 #define MAX_RECOVERY 1
171 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
172 struct dirty_log *log, uint32_t region_size,
173 region_t nr_regions)
175 unsigned int nr_buckets, max_buckets;
176 size_t i;
179 * Calculate a suitable number of buckets for our hash
180 * table.
182 max_buckets = nr_regions >> 6;
183 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
185 nr_buckets >>= 1;
187 rh->ms = ms;
188 rh->log = log;
189 rh->region_size = region_size;
190 rh->region_shift = ffs(region_size) - 1;
191 rwlock_init(&rh->hash_lock);
192 rh->mask = nr_buckets - 1;
193 rh->nr_buckets = nr_buckets;
195 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
196 if (!rh->buckets) {
197 DMERR("unable to allocate region hash memory");
198 return -ENOMEM;
201 for (i = 0; i < nr_buckets; i++)
202 INIT_LIST_HEAD(rh->buckets + i);
204 spin_lock_init(&rh->region_lock);
205 sema_init(&rh->recovery_count, 0);
206 atomic_set(&rh->recovery_in_flight, 0);
207 INIT_LIST_HEAD(&rh->clean_regions);
208 INIT_LIST_HEAD(&rh->quiesced_regions);
209 INIT_LIST_HEAD(&rh->recovered_regions);
210 INIT_LIST_HEAD(&rh->failed_recovered_regions);
212 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
213 sizeof(struct region));
214 if (!rh->region_pool) {
215 vfree(rh->buckets);
216 rh->buckets = NULL;
217 return -ENOMEM;
220 return 0;
223 static void rh_exit(struct region_hash *rh)
225 unsigned int h;
226 struct region *reg, *nreg;
228 BUG_ON(!list_empty(&rh->quiesced_regions));
229 for (h = 0; h < rh->nr_buckets; h++) {
230 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
231 BUG_ON(atomic_read(&reg->pending));
232 mempool_free(reg, rh->region_pool);
236 if (rh->log)
237 dm_destroy_dirty_log(rh->log);
238 if (rh->region_pool)
239 mempool_destroy(rh->region_pool);
240 vfree(rh->buckets);
243 #define RH_HASH_MULT 2654435387U
245 static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
247 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
250 static struct region *__rh_lookup(struct region_hash *rh, region_t region)
252 struct region *reg;
254 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
255 if (reg->key == region)
256 return reg;
258 return NULL;
261 static void __rh_insert(struct region_hash *rh, struct region *reg)
263 unsigned int h = rh_hash(rh, reg->key);
264 list_add(&reg->hash_list, rh->buckets + h);
267 static struct region *__rh_alloc(struct region_hash *rh, region_t region)
269 struct region *reg, *nreg;
271 read_unlock(&rh->hash_lock);
272 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
273 if (unlikely(!nreg))
274 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
275 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
276 RH_CLEAN : RH_NOSYNC;
277 nreg->rh = rh;
278 nreg->key = region;
280 INIT_LIST_HEAD(&nreg->list);
282 atomic_set(&nreg->pending, 0);
283 bio_list_init(&nreg->delayed_bios);
284 write_lock_irq(&rh->hash_lock);
286 reg = __rh_lookup(rh, region);
287 if (reg)
288 /* we lost the race */
289 mempool_free(nreg, rh->region_pool);
291 else {
292 __rh_insert(rh, nreg);
293 if (nreg->state == RH_CLEAN) {
294 spin_lock(&rh->region_lock);
295 list_add(&nreg->list, &rh->clean_regions);
296 spin_unlock(&rh->region_lock);
298 reg = nreg;
300 write_unlock_irq(&rh->hash_lock);
301 read_lock(&rh->hash_lock);
303 return reg;
306 static inline struct region *__rh_find(struct region_hash *rh, region_t region)
308 struct region *reg;
310 reg = __rh_lookup(rh, region);
311 if (!reg)
312 reg = __rh_alloc(rh, region);
314 return reg;
317 static int rh_state(struct region_hash *rh, region_t region, int may_block)
319 int r;
320 struct region *reg;
322 read_lock(&rh->hash_lock);
323 reg = __rh_lookup(rh, region);
324 read_unlock(&rh->hash_lock);
326 if (reg)
327 return reg->state;
330 * The region wasn't in the hash, so we fall back to the
331 * dirty log.
333 r = rh->log->type->in_sync(rh->log, region, may_block);
336 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
337 * taken as a RH_NOSYNC
339 return r == 1 ? RH_CLEAN : RH_NOSYNC;
342 static inline int rh_in_sync(struct region_hash *rh,
343 region_t region, int may_block)
345 int state = rh_state(rh, region, may_block);
346 return state == RH_CLEAN || state == RH_DIRTY;
349 static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
351 struct bio *bio;
353 while ((bio = bio_list_pop(bio_list))) {
354 queue_bio(ms, bio, WRITE);
358 static void complete_resync_work(struct region *reg, int success)
360 struct region_hash *rh = reg->rh;
362 rh->log->type->set_region_sync(rh->log, reg->key, success);
363 dispatch_bios(rh->ms, &reg->delayed_bios);
364 if (atomic_dec_and_test(&rh->recovery_in_flight))
365 wake_up_all(&_kmirrord_recovery_stopped);
366 up(&rh->recovery_count);
369 static void rh_update_states(struct region_hash *rh)
371 struct region *reg, *next;
373 LIST_HEAD(clean);
374 LIST_HEAD(recovered);
375 LIST_HEAD(failed_recovered);
378 * Quickly grab the lists.
380 write_lock_irq(&rh->hash_lock);
381 spin_lock(&rh->region_lock);
382 if (!list_empty(&rh->clean_regions)) {
383 list_splice(&rh->clean_regions, &clean);
384 INIT_LIST_HEAD(&rh->clean_regions);
386 list_for_each_entry(reg, &clean, list)
387 list_del(&reg->hash_list);
390 if (!list_empty(&rh->recovered_regions)) {
391 list_splice(&rh->recovered_regions, &recovered);
392 INIT_LIST_HEAD(&rh->recovered_regions);
394 list_for_each_entry (reg, &recovered, list)
395 list_del(&reg->hash_list);
398 if (!list_empty(&rh->failed_recovered_regions)) {
399 list_splice(&rh->failed_recovered_regions, &failed_recovered);
400 INIT_LIST_HEAD(&rh->failed_recovered_regions);
402 list_for_each_entry(reg, &failed_recovered, list)
403 list_del(&reg->hash_list);
406 spin_unlock(&rh->region_lock);
407 write_unlock_irq(&rh->hash_lock);
410 * All the regions on the recovered and clean lists have
411 * now been pulled out of the system, so no need to do
412 * any more locking.
414 list_for_each_entry_safe (reg, next, &recovered, list) {
415 rh->log->type->clear_region(rh->log, reg->key);
416 complete_resync_work(reg, 1);
417 mempool_free(reg, rh->region_pool);
420 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
421 complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
422 mempool_free(reg, rh->region_pool);
425 list_for_each_entry_safe(reg, next, &clean, list) {
426 rh->log->type->clear_region(rh->log, reg->key);
427 mempool_free(reg, rh->region_pool);
430 rh->log->type->flush(rh->log);
433 static void rh_inc(struct region_hash *rh, region_t region)
435 struct region *reg;
437 read_lock(&rh->hash_lock);
438 reg = __rh_find(rh, region);
440 spin_lock_irq(&rh->region_lock);
441 atomic_inc(&reg->pending);
443 if (reg->state == RH_CLEAN) {
444 reg->state = RH_DIRTY;
445 list_del_init(&reg->list); /* take off the clean list */
446 spin_unlock_irq(&rh->region_lock);
448 rh->log->type->mark_region(rh->log, reg->key);
449 } else
450 spin_unlock_irq(&rh->region_lock);
453 read_unlock(&rh->hash_lock);
456 static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
458 struct bio *bio;
460 for (bio = bios->head; bio; bio = bio->bi_next)
461 rh_inc(rh, bio_to_region(rh, bio));
464 static void rh_dec(struct region_hash *rh, region_t region)
466 unsigned long flags;
467 struct region *reg;
468 int should_wake = 0;
470 read_lock(&rh->hash_lock);
471 reg = __rh_lookup(rh, region);
472 read_unlock(&rh->hash_lock);
474 spin_lock_irqsave(&rh->region_lock, flags);
475 if (atomic_dec_and_test(&reg->pending)) {
477 * There is no pending I/O for this region.
478 * We can move the region to corresponding list for next action.
479 * At this point, the region is not yet connected to any list.
481 * If the state is RH_NOSYNC, the region should be kept off
482 * from clean list.
483 * The hash entry for RH_NOSYNC will remain in memory
484 * until the region is recovered or the map is reloaded.
487 /* do nothing for RH_NOSYNC */
488 if (reg->state == RH_RECOVERING) {
489 list_add_tail(&reg->list, &rh->quiesced_regions);
490 } else if (reg->state == RH_DIRTY) {
491 reg->state = RH_CLEAN;
492 list_add(&reg->list, &rh->clean_regions);
494 should_wake = 1;
496 spin_unlock_irqrestore(&rh->region_lock, flags);
498 if (should_wake)
499 wake(rh->ms);
503 * Starts quiescing a region in preparation for recovery.
505 static int __rh_recovery_prepare(struct region_hash *rh)
507 int r;
508 struct region *reg;
509 region_t region;
512 * Ask the dirty log what's next.
514 r = rh->log->type->get_resync_work(rh->log, &region);
515 if (r <= 0)
516 return r;
519 * Get this region, and start it quiescing by setting the
520 * recovering flag.
522 read_lock(&rh->hash_lock);
523 reg = __rh_find(rh, region);
524 read_unlock(&rh->hash_lock);
526 spin_lock_irq(&rh->region_lock);
527 reg->state = RH_RECOVERING;
529 /* Already quiesced ? */
530 if (atomic_read(&reg->pending))
531 list_del_init(&reg->list);
532 else
533 list_move(&reg->list, &rh->quiesced_regions);
535 spin_unlock_irq(&rh->region_lock);
537 return 1;
540 static void rh_recovery_prepare(struct region_hash *rh)
542 /* Extra reference to avoid race with rh_stop_recovery */
543 atomic_inc(&rh->recovery_in_flight);
545 while (!down_trylock(&rh->recovery_count)) {
546 atomic_inc(&rh->recovery_in_flight);
547 if (__rh_recovery_prepare(rh) <= 0) {
548 atomic_dec(&rh->recovery_in_flight);
549 up(&rh->recovery_count);
550 break;
554 /* Drop the extra reference */
555 if (atomic_dec_and_test(&rh->recovery_in_flight))
556 wake_up_all(&_kmirrord_recovery_stopped);
560 * Returns any quiesced regions.
562 static struct region *rh_recovery_start(struct region_hash *rh)
564 struct region *reg = NULL;
566 spin_lock_irq(&rh->region_lock);
567 if (!list_empty(&rh->quiesced_regions)) {
568 reg = list_entry(rh->quiesced_regions.next,
569 struct region, list);
570 list_del_init(&reg->list); /* remove from the quiesced list */
572 spin_unlock_irq(&rh->region_lock);
574 return reg;
577 static void rh_recovery_end(struct region *reg, int success)
579 struct region_hash *rh = reg->rh;
581 spin_lock_irq(&rh->region_lock);
582 if (success)
583 list_add(&reg->list, &reg->rh->recovered_regions);
584 else {
585 reg->state = RH_NOSYNC;
586 list_add(&reg->list, &reg->rh->failed_recovered_regions);
588 spin_unlock_irq(&rh->region_lock);
590 wake(rh->ms);
593 static int rh_flush(struct region_hash *rh)
595 return rh->log->type->flush(rh->log);
598 static void rh_delay(struct region_hash *rh, struct bio *bio)
600 struct region *reg;
602 read_lock(&rh->hash_lock);
603 reg = __rh_find(rh, bio_to_region(rh, bio));
604 bio_list_add(&reg->delayed_bios, bio);
605 read_unlock(&rh->hash_lock);
608 static void rh_stop_recovery(struct region_hash *rh)
610 int i;
612 /* wait for any recovering regions */
613 for (i = 0; i < MAX_RECOVERY; i++)
614 down(&rh->recovery_count);
617 static void rh_start_recovery(struct region_hash *rh)
619 int i;
621 for (i = 0; i < MAX_RECOVERY; i++)
622 up(&rh->recovery_count);
624 wake(rh->ms);
628 * Every mirror should look like this one.
630 #define DEFAULT_MIRROR 0
633 * This is yucky. We squirrel the mirror_set struct away inside
634 * bi_next for write buffers. This is safe since the bh
635 * doesn't get submitted to the lower levels of block layer.
637 static struct mirror_set *bio_get_ms(struct bio *bio)
639 return (struct mirror_set *) bio->bi_next;
642 static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
644 bio->bi_next = (struct bio *) ms;
647 /*-----------------------------------------------------------------
648 * Recovery.
650 * When a mirror is first activated we may find that some regions
651 * are in the no-sync state. We have to recover these by
652 * recopying from the default mirror to all the others.
653 *---------------------------------------------------------------*/
654 static void recovery_complete(int read_err, unsigned int write_err,
655 void *context)
657 struct region *reg = (struct region *) context;
659 if (read_err)
660 /* Read error means the failure of default mirror. */
661 DMERR_LIMIT("Unable to read primary mirror during recovery");
663 if (write_err)
664 DMERR_LIMIT("Write error during recovery (error = 0x%x)",
665 write_err);
667 rh_recovery_end(reg, !(read_err || write_err));
670 static int recover(struct mirror_set *ms, struct region *reg)
672 int r;
673 unsigned int i;
674 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
675 struct mirror *m;
676 unsigned long flags = 0;
678 /* fill in the source */
679 m = ms->default_mirror;
680 from.bdev = m->dev->bdev;
681 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
682 if (reg->key == (ms->nr_regions - 1)) {
684 * The final region may be smaller than
685 * region_size.
687 from.count = ms->ti->len & (reg->rh->region_size - 1);
688 if (!from.count)
689 from.count = reg->rh->region_size;
690 } else
691 from.count = reg->rh->region_size;
693 /* fill in the destinations */
694 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
695 if (&ms->mirror[i] == ms->default_mirror)
696 continue;
698 m = ms->mirror + i;
699 dest->bdev = m->dev->bdev;
700 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
701 dest->count = from.count;
702 dest++;
705 /* hand to kcopyd */
706 set_bit(KCOPYD_IGNORE_ERROR, &flags);
707 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
708 recovery_complete, reg);
710 return r;
713 static void do_recovery(struct mirror_set *ms)
715 int r;
716 struct region *reg;
717 struct dirty_log *log = ms->rh.log;
720 * Start quiescing some regions.
722 rh_recovery_prepare(&ms->rh);
725 * Copy any already quiesced regions.
727 while ((reg = rh_recovery_start(&ms->rh))) {
728 r = recover(ms, reg);
729 if (r)
730 rh_recovery_end(reg, 0);
734 * Update the in sync flag.
736 if (!ms->in_sync &&
737 (log->type->get_sync_count(log) == ms->nr_regions)) {
738 /* the sync is complete */
739 dm_table_event(ms->ti->table);
740 ms->in_sync = 1;
744 /*-----------------------------------------------------------------
745 * Reads
746 *---------------------------------------------------------------*/
747 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
749 /* FIXME: add read balancing */
750 return ms->default_mirror;
754 * remap a buffer to a particular mirror.
756 static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
758 bio->bi_bdev = m->dev->bdev;
759 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
762 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
764 region_t region;
765 struct bio *bio;
766 struct mirror *m;
768 while ((bio = bio_list_pop(reads))) {
769 region = bio_to_region(&ms->rh, bio);
772 * We can only read balance if the region is in sync.
774 if (rh_in_sync(&ms->rh, region, 1))
775 m = choose_mirror(ms, bio->bi_sector);
776 else
777 m = ms->default_mirror;
779 map_bio(ms, m, bio);
780 generic_make_request(bio);
784 /*-----------------------------------------------------------------
785 * Writes.
787 * We do different things with the write io depending on the
788 * state of the region that it's in:
790 * SYNC: increment pending, use kcopyd to write to *all* mirrors
791 * RECOVERING: delay the io until recovery completes
792 * NOSYNC: increment pending, just write to the default mirror
793 *---------------------------------------------------------------*/
794 static void write_callback(unsigned long error, void *context)
796 unsigned int i;
797 int uptodate = 1;
798 struct bio *bio = (struct bio *) context;
799 struct mirror_set *ms;
801 ms = bio_get_ms(bio);
802 bio_set_ms(bio, NULL);
805 * NOTE: We don't decrement the pending count here,
806 * instead it is done by the targets endio function.
807 * This way we handle both writes to SYNC and NOSYNC
808 * regions with the same code.
811 if (error) {
813 * only error the io if all mirrors failed.
814 * FIXME: bogus
816 uptodate = 0;
817 for (i = 0; i < ms->nr_mirrors; i++)
818 if (!test_bit(i, &error)) {
819 uptodate = 1;
820 break;
823 bio_endio(bio, bio->bi_size, 0);
826 static void do_write(struct mirror_set *ms, struct bio *bio)
828 unsigned int i;
829 struct io_region io[KCOPYD_MAX_REGIONS+1];
830 struct mirror *m;
831 struct dm_io_request io_req = {
832 .bi_rw = WRITE,
833 .mem.type = DM_IO_BVEC,
834 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
835 .notify.fn = write_callback,
836 .notify.context = bio,
837 .client = ms->io_client,
840 for (i = 0; i < ms->nr_mirrors; i++) {
841 m = ms->mirror + i;
843 io[i].bdev = m->dev->bdev;
844 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
845 io[i].count = bio->bi_size >> 9;
848 bio_set_ms(bio, ms);
850 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
853 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
855 int state;
856 struct bio *bio;
857 struct bio_list sync, nosync, recover, *this_list = NULL;
859 if (!writes->head)
860 return;
863 * Classify each write.
865 bio_list_init(&sync);
866 bio_list_init(&nosync);
867 bio_list_init(&recover);
869 while ((bio = bio_list_pop(writes))) {
870 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
871 switch (state) {
872 case RH_CLEAN:
873 case RH_DIRTY:
874 this_list = &sync;
875 break;
877 case RH_NOSYNC:
878 this_list = &nosync;
879 break;
881 case RH_RECOVERING:
882 this_list = &recover;
883 break;
886 bio_list_add(this_list, bio);
890 * Increment the pending counts for any regions that will
891 * be written to (writes to recover regions are going to
892 * be delayed).
894 rh_inc_pending(&ms->rh, &sync);
895 rh_inc_pending(&ms->rh, &nosync);
896 ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
899 * Dispatch io.
901 if (unlikely(ms->log_failure))
902 while ((bio = bio_list_pop(&sync)))
903 bio_endio(bio, bio->bi_size, -EIO);
904 else while ((bio = bio_list_pop(&sync)))
905 do_write(ms, bio);
907 while ((bio = bio_list_pop(&recover)))
908 rh_delay(&ms->rh, bio);
910 while ((bio = bio_list_pop(&nosync))) {
911 map_bio(ms, ms->default_mirror, bio);
912 generic_make_request(bio);
916 /*-----------------------------------------------------------------
917 * kmirrord
918 *---------------------------------------------------------------*/
919 static void do_mirror(struct work_struct *work)
921 struct mirror_set *ms =container_of(work, struct mirror_set,
922 kmirrord_work);
923 struct bio_list reads, writes;
925 spin_lock(&ms->lock);
926 reads = ms->reads;
927 writes = ms->writes;
928 bio_list_init(&ms->reads);
929 bio_list_init(&ms->writes);
930 spin_unlock(&ms->lock);
932 rh_update_states(&ms->rh);
933 do_recovery(ms);
934 do_reads(ms, &reads);
935 do_writes(ms, &writes);
938 /*-----------------------------------------------------------------
939 * Target functions
940 *---------------------------------------------------------------*/
941 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
942 uint32_t region_size,
943 struct dm_target *ti,
944 struct dirty_log *dl)
946 size_t len;
947 struct mirror_set *ms = NULL;
949 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
950 return NULL;
952 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
954 ms = kmalloc(len, GFP_KERNEL);
955 if (!ms) {
956 ti->error = "Cannot allocate mirror context";
957 return NULL;
960 memset(ms, 0, len);
961 spin_lock_init(&ms->lock);
963 ms->ti = ti;
964 ms->nr_mirrors = nr_mirrors;
965 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
966 ms->in_sync = 0;
967 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
969 ms->io_client = dm_io_client_create(DM_IO_PAGES);
970 if (IS_ERR(ms->io_client)) {
971 ti->error = "Error creating dm_io client";
972 kfree(ms);
973 return NULL;
976 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
977 ti->error = "Error creating dirty region hash";
978 kfree(ms);
979 return NULL;
982 return ms;
985 static void free_context(struct mirror_set *ms, struct dm_target *ti,
986 unsigned int m)
988 while (m--)
989 dm_put_device(ti, ms->mirror[m].dev);
991 dm_io_client_destroy(ms->io_client);
992 rh_exit(&ms->rh);
993 kfree(ms);
996 static inline int _check_region_size(struct dm_target *ti, uint32_t size)
998 return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
999 size > ti->len);
1002 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1003 unsigned int mirror, char **argv)
1005 unsigned long long offset;
1007 if (sscanf(argv[1], "%llu", &offset) != 1) {
1008 ti->error = "Invalid offset";
1009 return -EINVAL;
1012 if (dm_get_device(ti, argv[0], offset, ti->len,
1013 dm_table_get_mode(ti->table),
1014 &ms->mirror[mirror].dev)) {
1015 ti->error = "Device lookup failure";
1016 return -ENXIO;
1019 ms->mirror[mirror].offset = offset;
1021 return 0;
1025 * Create dirty log: log_type #log_params <log_params>
1027 static struct dirty_log *create_dirty_log(struct dm_target *ti,
1028 unsigned int argc, char **argv,
1029 unsigned int *args_used)
1031 unsigned int param_count;
1032 struct dirty_log *dl;
1034 if (argc < 2) {
1035 ti->error = "Insufficient mirror log arguments";
1036 return NULL;
1039 if (sscanf(argv[1], "%u", &param_count) != 1) {
1040 ti->error = "Invalid mirror log argument count";
1041 return NULL;
1044 *args_used = 2 + param_count;
1046 if (argc < *args_used) {
1047 ti->error = "Insufficient mirror log arguments";
1048 return NULL;
1051 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1052 if (!dl) {
1053 ti->error = "Error creating mirror dirty log";
1054 return NULL;
1057 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1058 ti->error = "Invalid region size";
1059 dm_destroy_dirty_log(dl);
1060 return NULL;
1063 return dl;
1066 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1067 unsigned *args_used)
1069 unsigned num_features;
1070 struct dm_target *ti = ms->ti;
1072 *args_used = 0;
1074 if (!argc)
1075 return 0;
1077 if (sscanf(argv[0], "%u", &num_features) != 1) {
1078 ti->error = "Invalid number of features";
1079 return -EINVAL;
1082 argc--;
1083 argv++;
1084 (*args_used)++;
1086 if (num_features > argc) {
1087 ti->error = "Not enough arguments to support feature count";
1088 return -EINVAL;
1091 if (!strcmp("handle_errors", argv[0]))
1092 ms->features |= DM_RAID1_HANDLE_ERRORS;
1093 else {
1094 ti->error = "Unrecognised feature requested";
1095 return -EINVAL;
1098 (*args_used)++;
1100 return 0;
1104 * Construct a mirror mapping:
1106 * log_type #log_params <log_params>
1107 * #mirrors [mirror_path offset]{2,}
1108 * [#features <features>]
1110 * log_type is "core" or "disk"
1111 * #log_params is between 1 and 3
1113 * If present, features must be "handle_errors".
1115 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1117 int r;
1118 unsigned int nr_mirrors, m, args_used;
1119 struct mirror_set *ms;
1120 struct dirty_log *dl;
1122 dl = create_dirty_log(ti, argc, argv, &args_used);
1123 if (!dl)
1124 return -EINVAL;
1126 argv += args_used;
1127 argc -= args_used;
1129 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1130 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
1131 ti->error = "Invalid number of mirrors";
1132 dm_destroy_dirty_log(dl);
1133 return -EINVAL;
1136 argv++, argc--;
1138 if (argc < nr_mirrors * 2) {
1139 ti->error = "Too few mirror arguments";
1140 dm_destroy_dirty_log(dl);
1141 return -EINVAL;
1144 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1145 if (!ms) {
1146 dm_destroy_dirty_log(dl);
1147 return -ENOMEM;
1150 /* Get the mirror parameter sets */
1151 for (m = 0; m < nr_mirrors; m++) {
1152 r = get_mirror(ms, ti, m, argv);
1153 if (r) {
1154 free_context(ms, ti, m);
1155 return r;
1157 argv += 2;
1158 argc -= 2;
1161 ti->private = ms;
1162 ti->split_io = ms->rh.region_size;
1164 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1165 if (!ms->kmirrord_wq) {
1166 DMERR("couldn't start kmirrord");
1167 free_context(ms, ti, m);
1168 return -ENOMEM;
1170 INIT_WORK(&ms->kmirrord_work, do_mirror);
1172 r = parse_features(ms, argc, argv, &args_used);
1173 if (r) {
1174 free_context(ms, ti, ms->nr_mirrors);
1175 return r;
1178 argv += args_used;
1179 argc -= args_used;
1182 * Any read-balancing addition depends on the
1183 * DM_RAID1_HANDLE_ERRORS flag being present.
1184 * This is because the decision to balance depends
1185 * on the sync state of a region. If the above
1186 * flag is not present, we ignore errors; and
1187 * the sync state may be inaccurate.
1190 if (argc) {
1191 ti->error = "Too many mirror arguments";
1192 free_context(ms, ti, ms->nr_mirrors);
1193 return -EINVAL;
1196 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1197 if (r) {
1198 destroy_workqueue(ms->kmirrord_wq);
1199 free_context(ms, ti, ms->nr_mirrors);
1200 return r;
1203 wake(ms);
1204 return 0;
1207 static void mirror_dtr(struct dm_target *ti)
1209 struct mirror_set *ms = (struct mirror_set *) ti->private;
1211 flush_workqueue(ms->kmirrord_wq);
1212 kcopyd_client_destroy(ms->kcopyd_client);
1213 destroy_workqueue(ms->kmirrord_wq);
1214 free_context(ms, ti, ms->nr_mirrors);
1217 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1219 int should_wake = 0;
1220 struct bio_list *bl;
1222 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1223 spin_lock(&ms->lock);
1224 should_wake = !(bl->head);
1225 bio_list_add(bl, bio);
1226 spin_unlock(&ms->lock);
1228 if (should_wake)
1229 wake(ms);
1233 * Mirror mapping function
1235 static int mirror_map(struct dm_target *ti, struct bio *bio,
1236 union map_info *map_context)
1238 int r, rw = bio_rw(bio);
1239 struct mirror *m;
1240 struct mirror_set *ms = ti->private;
1242 map_context->ll = bio_to_region(&ms->rh, bio);
1244 if (rw == WRITE) {
1245 queue_bio(ms, bio, rw);
1246 return DM_MAPIO_SUBMITTED;
1249 r = ms->rh.log->type->in_sync(ms->rh.log,
1250 bio_to_region(&ms->rh, bio), 0);
1251 if (r < 0 && r != -EWOULDBLOCK)
1252 return r;
1254 if (r == -EWOULDBLOCK) /* FIXME: ugly */
1255 r = DM_MAPIO_SUBMITTED;
1258 * We don't want to fast track a recovery just for a read
1259 * ahead. So we just let it silently fail.
1260 * FIXME: get rid of this.
1262 if (!r && rw == READA)
1263 return -EIO;
1265 if (!r) {
1266 /* Pass this io over to the daemon */
1267 queue_bio(ms, bio, rw);
1268 return DM_MAPIO_SUBMITTED;
1271 m = choose_mirror(ms, bio->bi_sector);
1272 if (!m)
1273 return -EIO;
1275 map_bio(ms, m, bio);
1276 return DM_MAPIO_REMAPPED;
1279 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1280 int error, union map_info *map_context)
1282 int rw = bio_rw(bio);
1283 struct mirror_set *ms = (struct mirror_set *) ti->private;
1284 region_t region = map_context->ll;
1287 * We need to dec pending if this was a write.
1289 if (rw == WRITE)
1290 rh_dec(&ms->rh, region);
1292 return 0;
1295 static void mirror_postsuspend(struct dm_target *ti)
1297 struct mirror_set *ms = (struct mirror_set *) ti->private;
1298 struct dirty_log *log = ms->rh.log;
1300 rh_stop_recovery(&ms->rh);
1302 /* Wait for all I/O we generated to complete */
1303 wait_event(_kmirrord_recovery_stopped,
1304 !atomic_read(&ms->rh.recovery_in_flight));
1306 if (log->type->suspend && log->type->suspend(log))
1307 /* FIXME: need better error handling */
1308 DMWARN("log suspend failed");
1311 static void mirror_resume(struct dm_target *ti)
1313 struct mirror_set *ms = (struct mirror_set *) ti->private;
1314 struct dirty_log *log = ms->rh.log;
1315 if (log->type->resume && log->type->resume(log))
1316 /* FIXME: need better error handling */
1317 DMWARN("log resume failed");
1318 rh_start_recovery(&ms->rh);
1321 static int mirror_status(struct dm_target *ti, status_type_t type,
1322 char *result, unsigned int maxlen)
1324 unsigned int m, sz = 0;
1325 struct mirror_set *ms = (struct mirror_set *) ti->private;
1327 switch (type) {
1328 case STATUSTYPE_INFO:
1329 DMEMIT("%d ", ms->nr_mirrors);
1330 for (m = 0; m < ms->nr_mirrors; m++)
1331 DMEMIT("%s ", ms->mirror[m].dev->name);
1333 DMEMIT("%llu/%llu 0 ",
1334 (unsigned long long)ms->rh.log->type->
1335 get_sync_count(ms->rh.log),
1336 (unsigned long long)ms->nr_regions);
1338 sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
1340 break;
1342 case STATUSTYPE_TABLE:
1343 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1345 DMEMIT("%d", ms->nr_mirrors);
1346 for (m = 0; m < ms->nr_mirrors; m++)
1347 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1348 (unsigned long long)ms->mirror[m].offset);
1350 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1351 DMEMIT(" 1 handle_errors");
1354 return 0;
1357 static struct target_type mirror_target = {
1358 .name = "mirror",
1359 .version = {1, 0, 3},
1360 .module = THIS_MODULE,
1361 .ctr = mirror_ctr,
1362 .dtr = mirror_dtr,
1363 .map = mirror_map,
1364 .end_io = mirror_end_io,
1365 .postsuspend = mirror_postsuspend,
1366 .resume = mirror_resume,
1367 .status = mirror_status,
1370 static int __init dm_mirror_init(void)
1372 int r;
1374 r = dm_dirty_log_init();
1375 if (r)
1376 return r;
1378 r = dm_register_target(&mirror_target);
1379 if (r < 0) {
1380 DMERR("Failed to register mirror target");
1381 dm_dirty_log_exit();
1384 return r;
1387 static void __exit dm_mirror_exit(void)
1389 int r;
1391 r = dm_unregister_target(&mirror_target);
1392 if (r < 0)
1393 DMERR("unregister failed %d", r);
1395 dm_dirty_log_exit();
1398 /* Module hooks */
1399 module_init(dm_mirror_init);
1400 module_exit(dm_mirror_exit);
1402 MODULE_DESCRIPTION(DM_NAME " mirror target");
1403 MODULE_AUTHOR("Joe Thornber");
1404 MODULE_LICENSE("GPL");