2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/dm-dirty-log.h>
9 #include <linux/dm-region-hash.h>
11 #include <linux/ctype.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
19 #define DM_MSG_PREFIX "region hash"
21 /*-----------------------------------------------------------------
24 * The mirror splits itself up into discrete regions. Each
25 * region can be in one of three states: clean, dirty,
26 * nosync. There is no need to put clean regions in the hash.
28 * In addition to being present in the hash table a region _may_
29 * be present on one of three lists.
31 * clean_regions: Regions on this list have no io pending to
32 * them, they are in sync, we are no longer interested in them,
33 * they are dull. dm_rh_update_states() will remove them from the
36 * quiesced_regions: These regions have been spun down, ready
37 * for recovery. rh_recovery_start() will remove regions from
38 * this list and hand them to kmirrord, which will schedule the
39 * recovery io with kcopyd.
41 * recovered_regions: Regions that kcopyd has successfully
42 * recovered. dm_rh_update_states() will now schedule any delayed
43 * io, up the recovery_count, and remove the region from the
47 * A rw spin lock 'hash_lock' protects just the hash table,
48 * this is never held in write mode from interrupt context,
49 * which I believe means that we only have to disable irqs when
52 * An ordinary spin lock 'region_lock' that protects the three
53 * lists in the region_hash, with the 'state', 'list' and
54 * 'delayed_bios' fields of the regions. This is used from irq
55 * context, so all other uses will have to suspend local irqs.
56 *---------------------------------------------------------------*/
57 struct dm_region_hash
{
59 unsigned region_shift
;
61 /* holds persistent region state */
62 struct dm_dirty_log
*log
;
66 mempool_t
*region_pool
;
71 struct list_head
*buckets
;
73 unsigned max_recovery
; /* Max # of regions to recover in parallel */
75 spinlock_t region_lock
;
76 atomic_t recovery_in_flight
;
77 struct semaphore recovery_count
;
78 struct list_head clean_regions
;
79 struct list_head quiesced_regions
;
80 struct list_head recovered_regions
;
81 struct list_head failed_recovered_regions
;
84 * If there was a flush failure no regions can be marked clean.
89 sector_t target_begin
;
91 /* Callback function to schedule bios writes */
92 void (*dispatch_bios
)(void *context
, struct bio_list
*bios
);
94 /* Callback function to wakeup callers worker thread. */
95 void (*wakeup_workers
)(void *context
);
97 /* Callback function to wakeup callers recovery waiters. */
98 void (*wakeup_all_recovery_waiters
)(void *context
);
102 struct dm_region_hash
*rh
; /* FIXME: can we get rid of this ? */
106 struct list_head hash_list
;
107 struct list_head list
;
110 struct bio_list delayed_bios
;
116 static region_t
dm_rh_sector_to_region(struct dm_region_hash
*rh
, sector_t sector
)
118 return sector
>> rh
->region_shift
;
121 sector_t
dm_rh_region_to_sector(struct dm_region_hash
*rh
, region_t region
)
123 return region
<< rh
->region_shift
;
125 EXPORT_SYMBOL_GPL(dm_rh_region_to_sector
);
127 region_t
dm_rh_bio_to_region(struct dm_region_hash
*rh
, struct bio
*bio
)
129 return dm_rh_sector_to_region(rh
, bio
->bi_iter
.bi_sector
-
132 EXPORT_SYMBOL_GPL(dm_rh_bio_to_region
);
134 void *dm_rh_region_context(struct dm_region
*reg
)
136 return reg
->rh
->context
;
138 EXPORT_SYMBOL_GPL(dm_rh_region_context
);
140 region_t
dm_rh_get_region_key(struct dm_region
*reg
)
144 EXPORT_SYMBOL_GPL(dm_rh_get_region_key
);
146 sector_t
dm_rh_get_region_size(struct dm_region_hash
*rh
)
148 return rh
->region_size
;
150 EXPORT_SYMBOL_GPL(dm_rh_get_region_size
);
153 * FIXME: shall we pass in a structure instead of all these args to
154 * dm_region_hash_create()????
156 #define RH_HASH_MULT 2654435387U
157 #define RH_HASH_SHIFT 12
159 #define MIN_REGIONS 64
160 struct dm_region_hash
*dm_region_hash_create(
161 void *context
, void (*dispatch_bios
)(void *context
,
162 struct bio_list
*bios
),
163 void (*wakeup_workers
)(void *context
),
164 void (*wakeup_all_recovery_waiters
)(void *context
),
165 sector_t target_begin
, unsigned max_recovery
,
166 struct dm_dirty_log
*log
, uint32_t region_size
,
169 struct dm_region_hash
*rh
;
170 unsigned nr_buckets
, max_buckets
;
174 * Calculate a suitable number of buckets for our hash
177 max_buckets
= nr_regions
>> 6;
178 for (nr_buckets
= 128u; nr_buckets
< max_buckets
; nr_buckets
<<= 1)
182 rh
= kmalloc(sizeof(*rh
), GFP_KERNEL
);
184 DMERR("unable to allocate region hash memory");
185 return ERR_PTR(-ENOMEM
);
188 rh
->context
= context
;
189 rh
->dispatch_bios
= dispatch_bios
;
190 rh
->wakeup_workers
= wakeup_workers
;
191 rh
->wakeup_all_recovery_waiters
= wakeup_all_recovery_waiters
;
192 rh
->target_begin
= target_begin
;
193 rh
->max_recovery
= max_recovery
;
195 rh
->region_size
= region_size
;
196 rh
->region_shift
= __ffs(region_size
);
197 rwlock_init(&rh
->hash_lock
);
198 rh
->mask
= nr_buckets
- 1;
199 rh
->nr_buckets
= nr_buckets
;
201 rh
->shift
= RH_HASH_SHIFT
;
202 rh
->prime
= RH_HASH_MULT
;
204 rh
->buckets
= vmalloc(nr_buckets
* sizeof(*rh
->buckets
));
206 DMERR("unable to allocate region hash bucket memory");
208 return ERR_PTR(-ENOMEM
);
211 for (i
= 0; i
< nr_buckets
; i
++)
212 INIT_LIST_HEAD(rh
->buckets
+ i
);
214 spin_lock_init(&rh
->region_lock
);
215 sema_init(&rh
->recovery_count
, 0);
216 atomic_set(&rh
->recovery_in_flight
, 0);
217 INIT_LIST_HEAD(&rh
->clean_regions
);
218 INIT_LIST_HEAD(&rh
->quiesced_regions
);
219 INIT_LIST_HEAD(&rh
->recovered_regions
);
220 INIT_LIST_HEAD(&rh
->failed_recovered_regions
);
221 rh
->flush_failure
= 0;
223 rh
->region_pool
= mempool_create_kmalloc_pool(MIN_REGIONS
,
224 sizeof(struct dm_region
));
225 if (!rh
->region_pool
) {
228 rh
= ERR_PTR(-ENOMEM
);
233 EXPORT_SYMBOL_GPL(dm_region_hash_create
);
235 void dm_region_hash_destroy(struct dm_region_hash
*rh
)
238 struct dm_region
*reg
, *nreg
;
240 BUG_ON(!list_empty(&rh
->quiesced_regions
));
241 for (h
= 0; h
< rh
->nr_buckets
; h
++) {
242 list_for_each_entry_safe(reg
, nreg
, rh
->buckets
+ h
,
244 BUG_ON(atomic_read(®
->pending
));
245 mempool_free(reg
, rh
->region_pool
);
250 dm_dirty_log_destroy(rh
->log
);
252 mempool_destroy(rh
->region_pool
);
256 EXPORT_SYMBOL_GPL(dm_region_hash_destroy
);
258 struct dm_dirty_log
*dm_rh_dirty_log(struct dm_region_hash
*rh
)
262 EXPORT_SYMBOL_GPL(dm_rh_dirty_log
);
264 static unsigned rh_hash(struct dm_region_hash
*rh
, region_t region
)
266 return (unsigned) ((region
* rh
->prime
) >> rh
->shift
) & rh
->mask
;
269 static struct dm_region
*__rh_lookup(struct dm_region_hash
*rh
, region_t region
)
271 struct dm_region
*reg
;
272 struct list_head
*bucket
= rh
->buckets
+ rh_hash(rh
, region
);
274 list_for_each_entry(reg
, bucket
, hash_list
)
275 if (reg
->key
== region
)
281 static void __rh_insert(struct dm_region_hash
*rh
, struct dm_region
*reg
)
283 list_add(®
->hash_list
, rh
->buckets
+ rh_hash(rh
, reg
->key
));
286 static struct dm_region
*__rh_alloc(struct dm_region_hash
*rh
, region_t region
)
288 struct dm_region
*reg
, *nreg
;
290 nreg
= mempool_alloc(rh
->region_pool
, GFP_ATOMIC
);
292 nreg
= kmalloc(sizeof(*nreg
), GFP_NOIO
| __GFP_NOFAIL
);
294 nreg
->state
= rh
->log
->type
->in_sync(rh
->log
, region
, 1) ?
295 DM_RH_CLEAN
: DM_RH_NOSYNC
;
298 INIT_LIST_HEAD(&nreg
->list
);
299 atomic_set(&nreg
->pending
, 0);
300 bio_list_init(&nreg
->delayed_bios
);
302 write_lock_irq(&rh
->hash_lock
);
303 reg
= __rh_lookup(rh
, region
);
305 /* We lost the race. */
306 mempool_free(nreg
, rh
->region_pool
);
308 __rh_insert(rh
, nreg
);
309 if (nreg
->state
== DM_RH_CLEAN
) {
310 spin_lock(&rh
->region_lock
);
311 list_add(&nreg
->list
, &rh
->clean_regions
);
312 spin_unlock(&rh
->region_lock
);
317 write_unlock_irq(&rh
->hash_lock
);
322 static struct dm_region
*__rh_find(struct dm_region_hash
*rh
, region_t region
)
324 struct dm_region
*reg
;
326 reg
= __rh_lookup(rh
, region
);
328 read_unlock(&rh
->hash_lock
);
329 reg
= __rh_alloc(rh
, region
);
330 read_lock(&rh
->hash_lock
);
336 int dm_rh_get_state(struct dm_region_hash
*rh
, region_t region
, int may_block
)
339 struct dm_region
*reg
;
341 read_lock(&rh
->hash_lock
);
342 reg
= __rh_lookup(rh
, region
);
343 read_unlock(&rh
->hash_lock
);
349 * The region wasn't in the hash, so we fall back to the
352 r
= rh
->log
->type
->in_sync(rh
->log
, region
, may_block
);
355 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
356 * taken as a DM_RH_NOSYNC
358 return r
== 1 ? DM_RH_CLEAN
: DM_RH_NOSYNC
;
360 EXPORT_SYMBOL_GPL(dm_rh_get_state
);
362 static void complete_resync_work(struct dm_region
*reg
, int success
)
364 struct dm_region_hash
*rh
= reg
->rh
;
366 rh
->log
->type
->set_region_sync(rh
->log
, reg
->key
, success
);
369 * Dispatch the bios before we call 'wake_up_all'.
370 * This is important because if we are suspending,
371 * we want to know that recovery is complete and
372 * the work queue is flushed. If we wake_up_all
373 * before we dispatch_bios (queue bios and call wake()),
374 * then we risk suspending before the work queue
375 * has been properly flushed.
377 rh
->dispatch_bios(rh
->context
, ®
->delayed_bios
);
378 if (atomic_dec_and_test(&rh
->recovery_in_flight
))
379 rh
->wakeup_all_recovery_waiters(rh
->context
);
380 up(&rh
->recovery_count
);
387 * The bio was written on some mirror(s) but failed on other mirror(s).
388 * We can successfully endio the bio but should avoid the region being
389 * marked clean by setting the state DM_RH_NOSYNC.
391 * This function is _not_ safe in interrupt context!
393 void dm_rh_mark_nosync(struct dm_region_hash
*rh
, struct bio
*bio
)
396 struct dm_dirty_log
*log
= rh
->log
;
397 struct dm_region
*reg
;
398 region_t region
= dm_rh_bio_to_region(rh
, bio
);
401 if (bio
->bi_opf
& REQ_PREFLUSH
) {
402 rh
->flush_failure
= 1;
406 if (bio_op(bio
) == REQ_OP_DISCARD
)
409 /* We must inform the log that the sync count has changed. */
410 log
->type
->set_region_sync(log
, region
, 0);
412 read_lock(&rh
->hash_lock
);
413 reg
= __rh_find(rh
, region
);
414 read_unlock(&rh
->hash_lock
);
416 /* region hash entry should exist because write was in-flight */
418 BUG_ON(!list_empty(®
->list
));
420 spin_lock_irqsave(&rh
->region_lock
, flags
);
424 * 2) DM_RH_NOSYNC: was dirty, other preceding writes failed
425 * 3) DM_RH_RECOVERING: flushing pending writes
426 * Either case, the region should have not been connected to list.
428 recovering
= (reg
->state
== DM_RH_RECOVERING
);
429 reg
->state
= DM_RH_NOSYNC
;
430 BUG_ON(!list_empty(®
->list
));
431 spin_unlock_irqrestore(&rh
->region_lock
, flags
);
434 complete_resync_work(reg
, 0);
436 EXPORT_SYMBOL_GPL(dm_rh_mark_nosync
);
438 void dm_rh_update_states(struct dm_region_hash
*rh
, int errors_handled
)
440 struct dm_region
*reg
, *next
;
443 LIST_HEAD(recovered
);
444 LIST_HEAD(failed_recovered
);
447 * Quickly grab the lists.
449 write_lock_irq(&rh
->hash_lock
);
450 spin_lock(&rh
->region_lock
);
451 if (!list_empty(&rh
->clean_regions
)) {
452 list_splice_init(&rh
->clean_regions
, &clean
);
454 list_for_each_entry(reg
, &clean
, list
)
455 list_del(®
->hash_list
);
458 if (!list_empty(&rh
->recovered_regions
)) {
459 list_splice_init(&rh
->recovered_regions
, &recovered
);
461 list_for_each_entry(reg
, &recovered
, list
)
462 list_del(®
->hash_list
);
465 if (!list_empty(&rh
->failed_recovered_regions
)) {
466 list_splice_init(&rh
->failed_recovered_regions
,
469 list_for_each_entry(reg
, &failed_recovered
, list
)
470 list_del(®
->hash_list
);
473 spin_unlock(&rh
->region_lock
);
474 write_unlock_irq(&rh
->hash_lock
);
477 * All the regions on the recovered and clean lists have
478 * now been pulled out of the system, so no need to do
481 list_for_each_entry_safe(reg
, next
, &recovered
, list
) {
482 rh
->log
->type
->clear_region(rh
->log
, reg
->key
);
483 complete_resync_work(reg
, 1);
484 mempool_free(reg
, rh
->region_pool
);
487 list_for_each_entry_safe(reg
, next
, &failed_recovered
, list
) {
488 complete_resync_work(reg
, errors_handled
? 0 : 1);
489 mempool_free(reg
, rh
->region_pool
);
492 list_for_each_entry_safe(reg
, next
, &clean
, list
) {
493 rh
->log
->type
->clear_region(rh
->log
, reg
->key
);
494 mempool_free(reg
, rh
->region_pool
);
497 rh
->log
->type
->flush(rh
->log
);
499 EXPORT_SYMBOL_GPL(dm_rh_update_states
);
501 static void rh_inc(struct dm_region_hash
*rh
, region_t region
)
503 struct dm_region
*reg
;
505 read_lock(&rh
->hash_lock
);
506 reg
= __rh_find(rh
, region
);
508 spin_lock_irq(&rh
->region_lock
);
509 atomic_inc(®
->pending
);
511 if (reg
->state
== DM_RH_CLEAN
) {
512 reg
->state
= DM_RH_DIRTY
;
513 list_del_init(®
->list
); /* take off the clean list */
514 spin_unlock_irq(&rh
->region_lock
);
516 rh
->log
->type
->mark_region(rh
->log
, reg
->key
);
518 spin_unlock_irq(&rh
->region_lock
);
521 read_unlock(&rh
->hash_lock
);
524 void dm_rh_inc_pending(struct dm_region_hash
*rh
, struct bio_list
*bios
)
528 for (bio
= bios
->head
; bio
; bio
= bio
->bi_next
) {
529 if (bio
->bi_opf
& REQ_PREFLUSH
|| bio_op(bio
) == REQ_OP_DISCARD
)
531 rh_inc(rh
, dm_rh_bio_to_region(rh
, bio
));
534 EXPORT_SYMBOL_GPL(dm_rh_inc_pending
);
536 void dm_rh_dec(struct dm_region_hash
*rh
, region_t region
)
539 struct dm_region
*reg
;
542 read_lock(&rh
->hash_lock
);
543 reg
= __rh_lookup(rh
, region
);
544 read_unlock(&rh
->hash_lock
);
546 spin_lock_irqsave(&rh
->region_lock
, flags
);
547 if (atomic_dec_and_test(®
->pending
)) {
549 * There is no pending I/O for this region.
550 * We can move the region to corresponding list for next action.
551 * At this point, the region is not yet connected to any list.
553 * If the state is DM_RH_NOSYNC, the region should be kept off
555 * The hash entry for DM_RH_NOSYNC will remain in memory
556 * until the region is recovered or the map is reloaded.
559 /* do nothing for DM_RH_NOSYNC */
560 if (unlikely(rh
->flush_failure
)) {
562 * If a write flush failed some time ago, we
563 * don't know whether or not this write made it
564 * to the disk, so we must resync the device.
566 reg
->state
= DM_RH_NOSYNC
;
567 } else if (reg
->state
== DM_RH_RECOVERING
) {
568 list_add_tail(®
->list
, &rh
->quiesced_regions
);
569 } else if (reg
->state
== DM_RH_DIRTY
) {
570 reg
->state
= DM_RH_CLEAN
;
571 list_add(®
->list
, &rh
->clean_regions
);
575 spin_unlock_irqrestore(&rh
->region_lock
, flags
);
578 rh
->wakeup_workers(rh
->context
);
580 EXPORT_SYMBOL_GPL(dm_rh_dec
);
583 * Starts quiescing a region in preparation for recovery.
585 static int __rh_recovery_prepare(struct dm_region_hash
*rh
)
589 struct dm_region
*reg
;
592 * Ask the dirty log what's next.
594 r
= rh
->log
->type
->get_resync_work(rh
->log
, ®ion
);
599 * Get this region, and start it quiescing by setting the
602 read_lock(&rh
->hash_lock
);
603 reg
= __rh_find(rh
, region
);
604 read_unlock(&rh
->hash_lock
);
606 spin_lock_irq(&rh
->region_lock
);
607 reg
->state
= DM_RH_RECOVERING
;
609 /* Already quiesced ? */
610 if (atomic_read(®
->pending
))
611 list_del_init(®
->list
);
613 list_move(®
->list
, &rh
->quiesced_regions
);
615 spin_unlock_irq(&rh
->region_lock
);
620 void dm_rh_recovery_prepare(struct dm_region_hash
*rh
)
622 /* Extra reference to avoid race with dm_rh_stop_recovery */
623 atomic_inc(&rh
->recovery_in_flight
);
625 while (!down_trylock(&rh
->recovery_count
)) {
626 atomic_inc(&rh
->recovery_in_flight
);
627 if (__rh_recovery_prepare(rh
) <= 0) {
628 atomic_dec(&rh
->recovery_in_flight
);
629 up(&rh
->recovery_count
);
634 /* Drop the extra reference */
635 if (atomic_dec_and_test(&rh
->recovery_in_flight
))
636 rh
->wakeup_all_recovery_waiters(rh
->context
);
638 EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare
);
641 * Returns any quiesced regions.
643 struct dm_region
*dm_rh_recovery_start(struct dm_region_hash
*rh
)
645 struct dm_region
*reg
= NULL
;
647 spin_lock_irq(&rh
->region_lock
);
648 if (!list_empty(&rh
->quiesced_regions
)) {
649 reg
= list_entry(rh
->quiesced_regions
.next
,
650 struct dm_region
, list
);
651 list_del_init(®
->list
); /* remove from the quiesced list */
653 spin_unlock_irq(&rh
->region_lock
);
657 EXPORT_SYMBOL_GPL(dm_rh_recovery_start
);
659 void dm_rh_recovery_end(struct dm_region
*reg
, int success
)
661 struct dm_region_hash
*rh
= reg
->rh
;
663 spin_lock_irq(&rh
->region_lock
);
665 list_add(®
->list
, ®
->rh
->recovered_regions
);
667 list_add(®
->list
, ®
->rh
->failed_recovered_regions
);
669 spin_unlock_irq(&rh
->region_lock
);
671 rh
->wakeup_workers(rh
->context
);
673 EXPORT_SYMBOL_GPL(dm_rh_recovery_end
);
675 /* Return recovery in flight count. */
676 int dm_rh_recovery_in_flight(struct dm_region_hash
*rh
)
678 return atomic_read(&rh
->recovery_in_flight
);
680 EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight
);
682 int dm_rh_flush(struct dm_region_hash
*rh
)
684 return rh
->log
->type
->flush(rh
->log
);
686 EXPORT_SYMBOL_GPL(dm_rh_flush
);
688 void dm_rh_delay(struct dm_region_hash
*rh
, struct bio
*bio
)
690 struct dm_region
*reg
;
692 read_lock(&rh
->hash_lock
);
693 reg
= __rh_find(rh
, dm_rh_bio_to_region(rh
, bio
));
694 bio_list_add(®
->delayed_bios
, bio
);
695 read_unlock(&rh
->hash_lock
);
697 EXPORT_SYMBOL_GPL(dm_rh_delay
);
699 void dm_rh_stop_recovery(struct dm_region_hash
*rh
)
703 /* wait for any recovering regions */
704 for (i
= 0; i
< rh
->max_recovery
; i
++)
705 down(&rh
->recovery_count
);
707 EXPORT_SYMBOL_GPL(dm_rh_stop_recovery
);
709 void dm_rh_start_recovery(struct dm_region_hash
*rh
)
713 for (i
= 0; i
< rh
->max_recovery
; i
++)
714 up(&rh
->recovery_count
);
716 rh
->wakeup_workers(rh
->context
);
718 EXPORT_SYMBOL_GPL(dm_rh_start_recovery
);
720 MODULE_DESCRIPTION(DM_NAME
" region hash");
721 MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
722 MODULE_LICENSE("GPL");