OMAPDSS: VENC: fix NULL pointer dereference in DSS2 VENC sysfs debug attr on OMAP4
[zen-stable.git] / drivers / md / dm-region-hash.c
blob7771ed2121820ac50c026f45e2f6c0a288ffba85
1 /*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
6 */
8 #include <linux/dm-dirty-log.h>
9 #include <linux/dm-region-hash.h>
11 #include <linux/ctype.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
17 #include "dm.h"
19 #define DM_MSG_PREFIX "region hash"
21 /*-----------------------------------------------------------------
22 * Region hash
24 * The mirror splits itself up into discrete regions. Each
25 * region can be in one of three states: clean, dirty,
26 * nosync. There is no need to put clean regions in the hash.
28 * In addition to being present in the hash table a region _may_
29 * be present on one of three lists.
31 * clean_regions: Regions on this list have no io pending to
32 * them, they are in sync, we are no longer interested in them,
33 * they are dull. dm_rh_update_states() will remove them from the
34 * hash table.
36 * quiesced_regions: These regions have been spun down, ready
37 * for recovery. rh_recovery_start() will remove regions from
38 * this list and hand them to kmirrord, which will schedule the
39 * recovery io with kcopyd.
41 * recovered_regions: Regions that kcopyd has successfully
42 * recovered. dm_rh_update_states() will now schedule any delayed
43 * io, up the recovery_count, and remove the region from the
44 * hash.
46 * There are 2 locks:
47 * A rw spin lock 'hash_lock' protects just the hash table,
48 * this is never held in write mode from interrupt context,
49 * which I believe means that we only have to disable irqs when
50 * doing a write lock.
52 * An ordinary spin lock 'region_lock' that protects the three
53 * lists in the region_hash, with the 'state', 'list' and
54 * 'delayed_bios' fields of the regions. This is used from irq
55 * context, so all other uses will have to suspend local irqs.
56 *---------------------------------------------------------------*/
57 struct dm_region_hash {
58 uint32_t region_size;
59 unsigned region_shift;
61 /* holds persistent region state */
62 struct dm_dirty_log *log;
64 /* hash table */
65 rwlock_t hash_lock;
66 mempool_t *region_pool;
67 unsigned mask;
68 unsigned nr_buckets;
69 unsigned prime;
70 unsigned shift;
71 struct list_head *buckets;
73 unsigned max_recovery; /* Max # of regions to recover in parallel */
75 spinlock_t region_lock;
76 atomic_t recovery_in_flight;
77 struct semaphore recovery_count;
78 struct list_head clean_regions;
79 struct list_head quiesced_regions;
80 struct list_head recovered_regions;
81 struct list_head failed_recovered_regions;
84 * If there was a flush failure no regions can be marked clean.
86 int flush_failure;
88 void *context;
89 sector_t target_begin;
91 /* Callback function to schedule bios writes */
92 void (*dispatch_bios)(void *context, struct bio_list *bios);
94 /* Callback function to wakeup callers worker thread. */
95 void (*wakeup_workers)(void *context);
97 /* Callback function to wakeup callers recovery waiters. */
98 void (*wakeup_all_recovery_waiters)(void *context);
101 struct dm_region {
102 struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */
103 region_t key;
104 int state;
106 struct list_head hash_list;
107 struct list_head list;
109 atomic_t pending;
110 struct bio_list delayed_bios;
114 * Conversion fns
116 static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
118 return sector >> rh->region_shift;
121 sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
123 return region << rh->region_shift;
125 EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
127 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
129 return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
131 EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
133 void *dm_rh_region_context(struct dm_region *reg)
135 return reg->rh->context;
137 EXPORT_SYMBOL_GPL(dm_rh_region_context);
139 region_t dm_rh_get_region_key(struct dm_region *reg)
141 return reg->key;
143 EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
145 sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
147 return rh->region_size;
149 EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
152 * FIXME: shall we pass in a structure instead of all these args to
153 * dm_region_hash_create()????
155 #define RH_HASH_MULT 2654435387U
156 #define RH_HASH_SHIFT 12
158 #define MIN_REGIONS 64
159 struct dm_region_hash *dm_region_hash_create(
160 void *context, void (*dispatch_bios)(void *context,
161 struct bio_list *bios),
162 void (*wakeup_workers)(void *context),
163 void (*wakeup_all_recovery_waiters)(void *context),
164 sector_t target_begin, unsigned max_recovery,
165 struct dm_dirty_log *log, uint32_t region_size,
166 region_t nr_regions)
168 struct dm_region_hash *rh;
169 unsigned nr_buckets, max_buckets;
170 size_t i;
173 * Calculate a suitable number of buckets for our hash
174 * table.
176 max_buckets = nr_regions >> 6;
177 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
179 nr_buckets >>= 1;
181 rh = kmalloc(sizeof(*rh), GFP_KERNEL);
182 if (!rh) {
183 DMERR("unable to allocate region hash memory");
184 return ERR_PTR(-ENOMEM);
187 rh->context = context;
188 rh->dispatch_bios = dispatch_bios;
189 rh->wakeup_workers = wakeup_workers;
190 rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
191 rh->target_begin = target_begin;
192 rh->max_recovery = max_recovery;
193 rh->log = log;
194 rh->region_size = region_size;
195 rh->region_shift = ffs(region_size) - 1;
196 rwlock_init(&rh->hash_lock);
197 rh->mask = nr_buckets - 1;
198 rh->nr_buckets = nr_buckets;
200 rh->shift = RH_HASH_SHIFT;
201 rh->prime = RH_HASH_MULT;
203 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
204 if (!rh->buckets) {
205 DMERR("unable to allocate region hash bucket memory");
206 kfree(rh);
207 return ERR_PTR(-ENOMEM);
210 for (i = 0; i < nr_buckets; i++)
211 INIT_LIST_HEAD(rh->buckets + i);
213 spin_lock_init(&rh->region_lock);
214 sema_init(&rh->recovery_count, 0);
215 atomic_set(&rh->recovery_in_flight, 0);
216 INIT_LIST_HEAD(&rh->clean_regions);
217 INIT_LIST_HEAD(&rh->quiesced_regions);
218 INIT_LIST_HEAD(&rh->recovered_regions);
219 INIT_LIST_HEAD(&rh->failed_recovered_regions);
220 rh->flush_failure = 0;
222 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
223 sizeof(struct dm_region));
224 if (!rh->region_pool) {
225 vfree(rh->buckets);
226 kfree(rh);
227 rh = ERR_PTR(-ENOMEM);
230 return rh;
232 EXPORT_SYMBOL_GPL(dm_region_hash_create);
234 void dm_region_hash_destroy(struct dm_region_hash *rh)
236 unsigned h;
237 struct dm_region *reg, *nreg;
239 BUG_ON(!list_empty(&rh->quiesced_regions));
240 for (h = 0; h < rh->nr_buckets; h++) {
241 list_for_each_entry_safe(reg, nreg, rh->buckets + h,
242 hash_list) {
243 BUG_ON(atomic_read(&reg->pending));
244 mempool_free(reg, rh->region_pool);
248 if (rh->log)
249 dm_dirty_log_destroy(rh->log);
251 if (rh->region_pool)
252 mempool_destroy(rh->region_pool);
254 vfree(rh->buckets);
255 kfree(rh);
257 EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
259 struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
261 return rh->log;
263 EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
265 static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
267 return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
270 static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
272 struct dm_region *reg;
273 struct list_head *bucket = rh->buckets + rh_hash(rh, region);
275 list_for_each_entry(reg, bucket, hash_list)
276 if (reg->key == region)
277 return reg;
279 return NULL;
282 static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
284 list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
287 static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
289 struct dm_region *reg, *nreg;
291 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
292 if (unlikely(!nreg))
293 nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
295 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
296 DM_RH_CLEAN : DM_RH_NOSYNC;
297 nreg->rh = rh;
298 nreg->key = region;
299 INIT_LIST_HEAD(&nreg->list);
300 atomic_set(&nreg->pending, 0);
301 bio_list_init(&nreg->delayed_bios);
303 write_lock_irq(&rh->hash_lock);
304 reg = __rh_lookup(rh, region);
305 if (reg)
306 /* We lost the race. */
307 mempool_free(nreg, rh->region_pool);
308 else {
309 __rh_insert(rh, nreg);
310 if (nreg->state == DM_RH_CLEAN) {
311 spin_lock(&rh->region_lock);
312 list_add(&nreg->list, &rh->clean_regions);
313 spin_unlock(&rh->region_lock);
316 reg = nreg;
318 write_unlock_irq(&rh->hash_lock);
320 return reg;
323 static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
325 struct dm_region *reg;
327 reg = __rh_lookup(rh, region);
328 if (!reg) {
329 read_unlock(&rh->hash_lock);
330 reg = __rh_alloc(rh, region);
331 read_lock(&rh->hash_lock);
334 return reg;
337 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
339 int r;
340 struct dm_region *reg;
342 read_lock(&rh->hash_lock);
343 reg = __rh_lookup(rh, region);
344 read_unlock(&rh->hash_lock);
346 if (reg)
347 return reg->state;
350 * The region wasn't in the hash, so we fall back to the
351 * dirty log.
353 r = rh->log->type->in_sync(rh->log, region, may_block);
356 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
357 * taken as a DM_RH_NOSYNC
359 return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
361 EXPORT_SYMBOL_GPL(dm_rh_get_state);
363 static void complete_resync_work(struct dm_region *reg, int success)
365 struct dm_region_hash *rh = reg->rh;
367 rh->log->type->set_region_sync(rh->log, reg->key, success);
370 * Dispatch the bios before we call 'wake_up_all'.
371 * This is important because if we are suspending,
372 * we want to know that recovery is complete and
373 * the work queue is flushed. If we wake_up_all
374 * before we dispatch_bios (queue bios and call wake()),
375 * then we risk suspending before the work queue
376 * has been properly flushed.
378 rh->dispatch_bios(rh->context, &reg->delayed_bios);
379 if (atomic_dec_and_test(&rh->recovery_in_flight))
380 rh->wakeup_all_recovery_waiters(rh->context);
381 up(&rh->recovery_count);
384 /* dm_rh_mark_nosync
385 * @ms
386 * @bio
388 * The bio was written on some mirror(s) but failed on other mirror(s).
389 * We can successfully endio the bio but should avoid the region being
390 * marked clean by setting the state DM_RH_NOSYNC.
392 * This function is _not_ safe in interrupt context!
394 void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
396 unsigned long flags;
397 struct dm_dirty_log *log = rh->log;
398 struct dm_region *reg;
399 region_t region = dm_rh_bio_to_region(rh, bio);
400 int recovering = 0;
402 if (bio->bi_rw & REQ_FLUSH) {
403 rh->flush_failure = 1;
404 return;
407 /* We must inform the log that the sync count has changed. */
408 log->type->set_region_sync(log, region, 0);
410 read_lock(&rh->hash_lock);
411 reg = __rh_find(rh, region);
412 read_unlock(&rh->hash_lock);
414 /* region hash entry should exist because write was in-flight */
415 BUG_ON(!reg);
416 BUG_ON(!list_empty(&reg->list));
418 spin_lock_irqsave(&rh->region_lock, flags);
420 * Possible cases:
421 * 1) DM_RH_DIRTY
422 * 2) DM_RH_NOSYNC: was dirty, other preceding writes failed
423 * 3) DM_RH_RECOVERING: flushing pending writes
424 * Either case, the region should have not been connected to list.
426 recovering = (reg->state == DM_RH_RECOVERING);
427 reg->state = DM_RH_NOSYNC;
428 BUG_ON(!list_empty(&reg->list));
429 spin_unlock_irqrestore(&rh->region_lock, flags);
431 if (recovering)
432 complete_resync_work(reg, 0);
434 EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
436 void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
438 struct dm_region *reg, *next;
440 LIST_HEAD(clean);
441 LIST_HEAD(recovered);
442 LIST_HEAD(failed_recovered);
445 * Quickly grab the lists.
447 write_lock_irq(&rh->hash_lock);
448 spin_lock(&rh->region_lock);
449 if (!list_empty(&rh->clean_regions)) {
450 list_splice_init(&rh->clean_regions, &clean);
452 list_for_each_entry(reg, &clean, list)
453 list_del(&reg->hash_list);
456 if (!list_empty(&rh->recovered_regions)) {
457 list_splice_init(&rh->recovered_regions, &recovered);
459 list_for_each_entry(reg, &recovered, list)
460 list_del(&reg->hash_list);
463 if (!list_empty(&rh->failed_recovered_regions)) {
464 list_splice_init(&rh->failed_recovered_regions,
465 &failed_recovered);
467 list_for_each_entry(reg, &failed_recovered, list)
468 list_del(&reg->hash_list);
471 spin_unlock(&rh->region_lock);
472 write_unlock_irq(&rh->hash_lock);
475 * All the regions on the recovered and clean lists have
476 * now been pulled out of the system, so no need to do
477 * any more locking.
479 list_for_each_entry_safe(reg, next, &recovered, list) {
480 rh->log->type->clear_region(rh->log, reg->key);
481 complete_resync_work(reg, 1);
482 mempool_free(reg, rh->region_pool);
485 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
486 complete_resync_work(reg, errors_handled ? 0 : 1);
487 mempool_free(reg, rh->region_pool);
490 list_for_each_entry_safe(reg, next, &clean, list) {
491 rh->log->type->clear_region(rh->log, reg->key);
492 mempool_free(reg, rh->region_pool);
495 rh->log->type->flush(rh->log);
497 EXPORT_SYMBOL_GPL(dm_rh_update_states);
499 static void rh_inc(struct dm_region_hash *rh, region_t region)
501 struct dm_region *reg;
503 read_lock(&rh->hash_lock);
504 reg = __rh_find(rh, region);
506 spin_lock_irq(&rh->region_lock);
507 atomic_inc(&reg->pending);
509 if (reg->state == DM_RH_CLEAN) {
510 reg->state = DM_RH_DIRTY;
511 list_del_init(&reg->list); /* take off the clean list */
512 spin_unlock_irq(&rh->region_lock);
514 rh->log->type->mark_region(rh->log, reg->key);
515 } else
516 spin_unlock_irq(&rh->region_lock);
519 read_unlock(&rh->hash_lock);
522 void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
524 struct bio *bio;
526 for (bio = bios->head; bio; bio = bio->bi_next) {
527 if (bio->bi_rw & REQ_FLUSH)
528 continue;
529 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
532 EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
534 void dm_rh_dec(struct dm_region_hash *rh, region_t region)
536 unsigned long flags;
537 struct dm_region *reg;
538 int should_wake = 0;
540 read_lock(&rh->hash_lock);
541 reg = __rh_lookup(rh, region);
542 read_unlock(&rh->hash_lock);
544 spin_lock_irqsave(&rh->region_lock, flags);
545 if (atomic_dec_and_test(&reg->pending)) {
547 * There is no pending I/O for this region.
548 * We can move the region to corresponding list for next action.
549 * At this point, the region is not yet connected to any list.
551 * If the state is DM_RH_NOSYNC, the region should be kept off
552 * from clean list.
553 * The hash entry for DM_RH_NOSYNC will remain in memory
554 * until the region is recovered or the map is reloaded.
557 /* do nothing for DM_RH_NOSYNC */
558 if (unlikely(rh->flush_failure)) {
560 * If a write flush failed some time ago, we
561 * don't know whether or not this write made it
562 * to the disk, so we must resync the device.
564 reg->state = DM_RH_NOSYNC;
565 } else if (reg->state == DM_RH_RECOVERING) {
566 list_add_tail(&reg->list, &rh->quiesced_regions);
567 } else if (reg->state == DM_RH_DIRTY) {
568 reg->state = DM_RH_CLEAN;
569 list_add(&reg->list, &rh->clean_regions);
571 should_wake = 1;
573 spin_unlock_irqrestore(&rh->region_lock, flags);
575 if (should_wake)
576 rh->wakeup_workers(rh->context);
578 EXPORT_SYMBOL_GPL(dm_rh_dec);
581 * Starts quiescing a region in preparation for recovery.
583 static int __rh_recovery_prepare(struct dm_region_hash *rh)
585 int r;
586 region_t region;
587 struct dm_region *reg;
590 * Ask the dirty log what's next.
592 r = rh->log->type->get_resync_work(rh->log, &region);
593 if (r <= 0)
594 return r;
597 * Get this region, and start it quiescing by setting the
598 * recovering flag.
600 read_lock(&rh->hash_lock);
601 reg = __rh_find(rh, region);
602 read_unlock(&rh->hash_lock);
604 spin_lock_irq(&rh->region_lock);
605 reg->state = DM_RH_RECOVERING;
607 /* Already quiesced ? */
608 if (atomic_read(&reg->pending))
609 list_del_init(&reg->list);
610 else
611 list_move(&reg->list, &rh->quiesced_regions);
613 spin_unlock_irq(&rh->region_lock);
615 return 1;
618 void dm_rh_recovery_prepare(struct dm_region_hash *rh)
620 /* Extra reference to avoid race with dm_rh_stop_recovery */
621 atomic_inc(&rh->recovery_in_flight);
623 while (!down_trylock(&rh->recovery_count)) {
624 atomic_inc(&rh->recovery_in_flight);
625 if (__rh_recovery_prepare(rh) <= 0) {
626 atomic_dec(&rh->recovery_in_flight);
627 up(&rh->recovery_count);
628 break;
632 /* Drop the extra reference */
633 if (atomic_dec_and_test(&rh->recovery_in_flight))
634 rh->wakeup_all_recovery_waiters(rh->context);
636 EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
639 * Returns any quiesced regions.
641 struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
643 struct dm_region *reg = NULL;
645 spin_lock_irq(&rh->region_lock);
646 if (!list_empty(&rh->quiesced_regions)) {
647 reg = list_entry(rh->quiesced_regions.next,
648 struct dm_region, list);
649 list_del_init(&reg->list); /* remove from the quiesced list */
651 spin_unlock_irq(&rh->region_lock);
653 return reg;
655 EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
657 void dm_rh_recovery_end(struct dm_region *reg, int success)
659 struct dm_region_hash *rh = reg->rh;
661 spin_lock_irq(&rh->region_lock);
662 if (success)
663 list_add(&reg->list, &reg->rh->recovered_regions);
664 else
665 list_add(&reg->list, &reg->rh->failed_recovered_regions);
667 spin_unlock_irq(&rh->region_lock);
669 rh->wakeup_workers(rh->context);
671 EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
673 /* Return recovery in flight count. */
674 int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
676 return atomic_read(&rh->recovery_in_flight);
678 EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
680 int dm_rh_flush(struct dm_region_hash *rh)
682 return rh->log->type->flush(rh->log);
684 EXPORT_SYMBOL_GPL(dm_rh_flush);
686 void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
688 struct dm_region *reg;
690 read_lock(&rh->hash_lock);
691 reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
692 bio_list_add(&reg->delayed_bios, bio);
693 read_unlock(&rh->hash_lock);
695 EXPORT_SYMBOL_GPL(dm_rh_delay);
697 void dm_rh_stop_recovery(struct dm_region_hash *rh)
699 int i;
701 /* wait for any recovering regions */
702 for (i = 0; i < rh->max_recovery; i++)
703 down(&rh->recovery_count);
705 EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
707 void dm_rh_start_recovery(struct dm_region_hash *rh)
709 int i;
711 for (i = 0; i < rh->max_recovery; i++)
712 up(&rh->recovery_count);
714 rh->wakeup_workers(rh->context);
716 EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
718 MODULE_DESCRIPTION(DM_NAME " region hash");
719 MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
720 MODULE_LICENSE("GPL");