PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / md / bcache / alloc.c
blobc0d37d0824439f2daff4c049ab82dc6b3c6e646e
1 /*
2 * Primary bucket allocation code
4 * Copyright 2012 Google, Inc.
6 * Allocation in bcache is done in terms of buckets:
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
21 * been overwritten.
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
28 * free_inc isn't the only freelist - if it was, we'd often to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
35 * There is another freelist, because sometimes we have buckets that we know
36 * have nothing pointing into them - these we can reuse without waiting for
37 * priorities to be rewritten. These come from freed btree nodes and buckets
38 * that garbage collection discovered no longer had valid keys pointing into
39 * them (because they were overwritten). That's the unused list - buckets on the
40 * unused list move to the free list, optionally being discarded in the process.
42 * It's also important to ensure that gens don't wrap around - with respect to
43 * either the oldest gen in the btree or the gen on disk. This is quite
44 * difficult to do in practice, but we explicitly guard against it anyways - if
45 * a bucket is in danger of wrapping around we simply skip invalidating it that
46 * time around, and we garbage collect or rewrite the priorities sooner than we
47 * would have otherwise.
49 * bch_bucket_alloc() allocates a single bucket from a specific cache.
51 * bch_bucket_alloc_set() allocates one or more buckets from different caches
52 * out of a cache set.
54 * free_some_buckets() drives all the processes described above. It's called
55 * from bch_bucket_alloc() and a few other places that need to make sure free
56 * buckets are ready.
58 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
59 * invalidated, and then invalidate them and stick them on the free_inc list -
60 * in either lru or fifo order.
63 #include "bcache.h"
64 #include "btree.h"
66 #include <linux/blkdev.h>
67 #include <linux/freezer.h>
68 #include <linux/kthread.h>
69 #include <linux/random.h>
70 #include <trace/events/bcache.h>
72 /* Bucket heap / gen */
74 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
76 uint8_t ret = ++b->gen;
78 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
79 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
81 if (CACHE_SYNC(&ca->set->sb)) {
82 ca->need_save_prio = max(ca->need_save_prio,
83 bucket_disk_gen(b));
84 WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX);
87 return ret;
90 void bch_rescale_priorities(struct cache_set *c, int sectors)
92 struct cache *ca;
93 struct bucket *b;
94 unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
95 unsigned i;
96 int r;
98 atomic_sub(sectors, &c->rescale);
100 do {
101 r = atomic_read(&c->rescale);
103 if (r >= 0)
104 return;
105 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
107 mutex_lock(&c->bucket_lock);
109 c->min_prio = USHRT_MAX;
111 for_each_cache(ca, c, i)
112 for_each_bucket(b, ca)
113 if (b->prio &&
114 b->prio != BTREE_PRIO &&
115 !atomic_read(&b->pin)) {
116 b->prio--;
117 c->min_prio = min(c->min_prio, b->prio);
120 mutex_unlock(&c->bucket_lock);
123 /* Allocation */
125 static inline bool can_inc_bucket_gen(struct bucket *b)
127 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX &&
128 bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX;
131 bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
133 BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
135 if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) {
136 unsigned i;
138 for (i = 0; i < RESERVE_NONE; i++)
139 if (!fifo_full(&ca->free[i]))
140 goto add;
142 return false;
144 add:
145 b->prio = 0;
147 if (can_inc_bucket_gen(b) &&
148 fifo_push(&ca->unused, b - ca->buckets)) {
149 atomic_inc(&b->pin);
150 return true;
153 return false;
156 static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
158 return GC_MARK(b) == GC_MARK_RECLAIMABLE &&
159 !atomic_read(&b->pin) &&
160 can_inc_bucket_gen(b);
163 static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
165 bch_inc_gen(ca, b);
166 b->prio = INITIAL_PRIO;
167 atomic_inc(&b->pin);
168 fifo_push(&ca->free_inc, b - ca->buckets);
172 * Determines what order we're going to reuse buckets, smallest bucket_prio()
173 * first: we also take into account the number of sectors of live data in that
174 * bucket, and in order for that multiply to make sense we have to scale bucket
176 * Thus, we scale the bucket priorities so that the bucket with the smallest
177 * prio is worth 1/8th of what INITIAL_PRIO is worth.
180 #define bucket_prio(b) \
181 ({ \
182 unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
184 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
187 #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
188 #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
190 static void invalidate_buckets_lru(struct cache *ca)
192 struct bucket *b;
193 ssize_t i;
195 ca->heap.used = 0;
197 for_each_bucket(b, ca) {
199 * If we fill up the unused list, if we then return before
200 * adding anything to the free_inc list we'll skip writing
201 * prios/gens and just go back to allocating from the unused
202 * list:
204 if (fifo_full(&ca->unused))
205 return;
207 if (!can_invalidate_bucket(ca, b))
208 continue;
210 if (!GC_SECTORS_USED(b) &&
211 bch_bucket_add_unused(ca, b))
212 continue;
214 if (!heap_full(&ca->heap))
215 heap_add(&ca->heap, b, bucket_max_cmp);
216 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
217 ca->heap.data[0] = b;
218 heap_sift(&ca->heap, 0, bucket_max_cmp);
222 for (i = ca->heap.used / 2 - 1; i >= 0; --i)
223 heap_sift(&ca->heap, i, bucket_min_cmp);
225 while (!fifo_full(&ca->free_inc)) {
226 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
228 * We don't want to be calling invalidate_buckets()
229 * multiple times when it can't do anything
231 ca->invalidate_needs_gc = 1;
232 wake_up_gc(ca->set);
233 return;
236 invalidate_one_bucket(ca, b);
240 static void invalidate_buckets_fifo(struct cache *ca)
242 struct bucket *b;
243 size_t checked = 0;
245 while (!fifo_full(&ca->free_inc)) {
246 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
247 ca->fifo_last_bucket >= ca->sb.nbuckets)
248 ca->fifo_last_bucket = ca->sb.first_bucket;
250 b = ca->buckets + ca->fifo_last_bucket++;
252 if (can_invalidate_bucket(ca, b))
253 invalidate_one_bucket(ca, b);
255 if (++checked >= ca->sb.nbuckets) {
256 ca->invalidate_needs_gc = 1;
257 wake_up_gc(ca->set);
258 return;
263 static void invalidate_buckets_random(struct cache *ca)
265 struct bucket *b;
266 size_t checked = 0;
268 while (!fifo_full(&ca->free_inc)) {
269 size_t n;
270 get_random_bytes(&n, sizeof(n));
272 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
273 n += ca->sb.first_bucket;
275 b = ca->buckets + n;
277 if (can_invalidate_bucket(ca, b))
278 invalidate_one_bucket(ca, b);
280 if (++checked >= ca->sb.nbuckets / 2) {
281 ca->invalidate_needs_gc = 1;
282 wake_up_gc(ca->set);
283 return;
288 static void invalidate_buckets(struct cache *ca)
290 if (ca->invalidate_needs_gc)
291 return;
293 switch (CACHE_REPLACEMENT(&ca->sb)) {
294 case CACHE_REPLACEMENT_LRU:
295 invalidate_buckets_lru(ca);
296 break;
297 case CACHE_REPLACEMENT_FIFO:
298 invalidate_buckets_fifo(ca);
299 break;
300 case CACHE_REPLACEMENT_RANDOM:
301 invalidate_buckets_random(ca);
302 break;
305 trace_bcache_alloc_invalidate(ca);
308 #define allocator_wait(ca, cond) \
309 do { \
310 while (1) { \
311 set_current_state(TASK_INTERRUPTIBLE); \
312 if (cond) \
313 break; \
315 mutex_unlock(&(ca)->set->bucket_lock); \
316 if (kthread_should_stop()) \
317 return 0; \
319 try_to_freeze(); \
320 schedule(); \
321 mutex_lock(&(ca)->set->bucket_lock); \
323 __set_current_state(TASK_RUNNING); \
324 } while (0)
326 static int bch_allocator_push(struct cache *ca, long bucket)
328 unsigned i;
330 /* Prios/gens are actually the most important reserve */
331 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
332 return true;
334 for (i = 0; i < RESERVE_NR; i++)
335 if (fifo_push(&ca->free[i], bucket))
336 return true;
338 return false;
341 static int bch_allocator_thread(void *arg)
343 struct cache *ca = arg;
345 mutex_lock(&ca->set->bucket_lock);
347 while (1) {
349 * First, we pull buckets off of the unused and free_inc lists,
350 * possibly issue discards to them, then we add the bucket to
351 * the free list:
353 while (1) {
354 long bucket;
356 if ((!atomic_read(&ca->set->prio_blocked) ||
357 !CACHE_SYNC(&ca->set->sb)) &&
358 !fifo_empty(&ca->unused))
359 fifo_pop(&ca->unused, bucket);
360 else if (!fifo_empty(&ca->free_inc))
361 fifo_pop(&ca->free_inc, bucket);
362 else
363 break;
365 if (ca->discard) {
366 mutex_unlock(&ca->set->bucket_lock);
367 blkdev_issue_discard(ca->bdev,
368 bucket_to_sector(ca->set, bucket),
369 ca->sb.block_size, GFP_KERNEL, 0);
370 mutex_lock(&ca->set->bucket_lock);
373 allocator_wait(ca, bch_allocator_push(ca, bucket));
374 wake_up(&ca->set->bucket_wait);
378 * We've run out of free buckets, we need to find some buckets
379 * we can invalidate. First, invalidate them in memory and add
380 * them to the free_inc list:
383 allocator_wait(ca, ca->set->gc_mark_valid &&
384 (ca->need_save_prio > 64 ||
385 !ca->invalidate_needs_gc));
386 invalidate_buckets(ca);
389 * Now, we write their new gens to disk so we can start writing
390 * new stuff to them:
392 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
393 if (CACHE_SYNC(&ca->set->sb) &&
394 (!fifo_empty(&ca->free_inc) ||
395 ca->need_save_prio > 64))
396 bch_prio_write(ca);
400 long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
402 DEFINE_WAIT(w);
403 struct bucket *b;
404 long r;
406 /* fastpath */
407 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
408 fifo_pop(&ca->free[reserve], r))
409 goto out;
411 if (!wait)
412 return -1;
414 do {
415 prepare_to_wait(&ca->set->bucket_wait, &w,
416 TASK_UNINTERRUPTIBLE);
418 mutex_unlock(&ca->set->bucket_lock);
419 schedule();
420 mutex_lock(&ca->set->bucket_lock);
421 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
422 !fifo_pop(&ca->free[reserve], r));
424 finish_wait(&ca->set->bucket_wait, &w);
425 out:
426 wake_up_process(ca->alloc_thread);
428 if (expensive_debug_checks(ca->set)) {
429 size_t iter;
430 long i;
431 unsigned j;
433 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
434 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
436 for (j = 0; j < RESERVE_NR; j++)
437 fifo_for_each(i, &ca->free[j], iter)
438 BUG_ON(i == r);
439 fifo_for_each(i, &ca->free_inc, iter)
440 BUG_ON(i == r);
441 fifo_for_each(i, &ca->unused, iter)
442 BUG_ON(i == r);
445 b = ca->buckets + r;
447 BUG_ON(atomic_read(&b->pin) != 1);
449 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
451 if (reserve <= RESERVE_PRIO) {
452 SET_GC_MARK(b, GC_MARK_METADATA);
453 SET_GC_MOVE(b, 0);
454 b->prio = BTREE_PRIO;
455 } else {
456 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
457 SET_GC_MOVE(b, 0);
458 b->prio = INITIAL_PRIO;
461 return r;
464 void bch_bucket_free(struct cache_set *c, struct bkey *k)
466 unsigned i;
468 for (i = 0; i < KEY_PTRS(k); i++) {
469 struct bucket *b = PTR_BUCKET(c, k, i);
471 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
472 SET_GC_SECTORS_USED(b, 0);
473 bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
477 int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
478 struct bkey *k, int n, bool wait)
480 int i;
482 lockdep_assert_held(&c->bucket_lock);
483 BUG_ON(!n || n > c->caches_loaded || n > 8);
485 bkey_init(k);
487 /* sort by free space/prio of oldest data in caches */
489 for (i = 0; i < n; i++) {
490 struct cache *ca = c->cache_by_alloc[i];
491 long b = bch_bucket_alloc(ca, reserve, wait);
493 if (b == -1)
494 goto err;
496 k->ptr[i] = PTR(ca->buckets[b].gen,
497 bucket_to_sector(c, b),
498 ca->sb.nr_this_dev);
500 SET_KEY_PTRS(k, i + 1);
503 return 0;
504 err:
505 bch_bucket_free(c, k);
506 bkey_put(c, k);
507 return -1;
510 int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
511 struct bkey *k, int n, bool wait)
513 int ret;
514 mutex_lock(&c->bucket_lock);
515 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
516 mutex_unlock(&c->bucket_lock);
517 return ret;
520 /* Sector allocator */
522 struct open_bucket {
523 struct list_head list;
524 unsigned last_write_point;
525 unsigned sectors_free;
526 BKEY_PADDED(key);
530 * We keep multiple buckets open for writes, and try to segregate different
531 * write streams for better cache utilization: first we look for a bucket where
532 * the last write to it was sequential with the current write, and failing that
533 * we look for a bucket that was last used by the same task.
535 * The ideas is if you've got multiple tasks pulling data into the cache at the
536 * same time, you'll get better cache utilization if you try to segregate their
537 * data and preserve locality.
539 * For example, say you've starting Firefox at the same time you're copying a
540 * bunch of files. Firefox will likely end up being fairly hot and stay in the
541 * cache awhile, but the data you copied might not be; if you wrote all that
542 * data to the same buckets it'd get invalidated at the same time.
544 * Both of those tasks will be doing fairly random IO so we can't rely on
545 * detecting sequential IO to segregate their data, but going off of the task
546 * should be a sane heuristic.
548 static struct open_bucket *pick_data_bucket(struct cache_set *c,
549 const struct bkey *search,
550 unsigned write_point,
551 struct bkey *alloc)
553 struct open_bucket *ret, *ret_task = NULL;
555 list_for_each_entry_reverse(ret, &c->data_buckets, list)
556 if (!bkey_cmp(&ret->key, search))
557 goto found;
558 else if (ret->last_write_point == write_point)
559 ret_task = ret;
561 ret = ret_task ?: list_first_entry(&c->data_buckets,
562 struct open_bucket, list);
563 found:
564 if (!ret->sectors_free && KEY_PTRS(alloc)) {
565 ret->sectors_free = c->sb.bucket_size;
566 bkey_copy(&ret->key, alloc);
567 bkey_init(alloc);
570 if (!ret->sectors_free)
571 ret = NULL;
573 return ret;
577 * Allocates some space in the cache to write to, and k to point to the newly
578 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
579 * end of the newly allocated space).
581 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
582 * sectors were actually allocated.
584 * If s->writeback is true, will not fail.
586 bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
587 unsigned write_point, unsigned write_prio, bool wait)
589 struct open_bucket *b;
590 BKEY_PADDED(key) alloc;
591 unsigned i;
594 * We might have to allocate a new bucket, which we can't do with a
595 * spinlock held. So if we have to allocate, we drop the lock, allocate
596 * and then retry. KEY_PTRS() indicates whether alloc points to
597 * allocated bucket(s).
600 bkey_init(&alloc.key);
601 spin_lock(&c->data_bucket_lock);
603 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
604 unsigned watermark = write_prio
605 ? RESERVE_MOVINGGC
606 : RESERVE_NONE;
608 spin_unlock(&c->data_bucket_lock);
610 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
611 return false;
613 spin_lock(&c->data_bucket_lock);
617 * If we had to allocate, we might race and not need to allocate the
618 * second time we call find_data_bucket(). If we allocated a bucket but
619 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
621 if (KEY_PTRS(&alloc.key))
622 bkey_put(c, &alloc.key);
624 for (i = 0; i < KEY_PTRS(&b->key); i++)
625 EBUG_ON(ptr_stale(c, &b->key, i));
627 /* Set up the pointer to the space we're allocating: */
629 for (i = 0; i < KEY_PTRS(&b->key); i++)
630 k->ptr[i] = b->key.ptr[i];
632 sectors = min(sectors, b->sectors_free);
634 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
635 SET_KEY_SIZE(k, sectors);
636 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
639 * Move b to the end of the lru, and keep track of what this bucket was
640 * last used for:
642 list_move_tail(&b->list, &c->data_buckets);
643 bkey_copy_key(&b->key, k);
644 b->last_write_point = write_point;
646 b->sectors_free -= sectors;
648 for (i = 0; i < KEY_PTRS(&b->key); i++) {
649 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
651 atomic_long_add(sectors,
652 &PTR_CACHE(c, &b->key, i)->sectors_written);
655 if (b->sectors_free < c->sb.block_size)
656 b->sectors_free = 0;
659 * k takes refcounts on the buckets it points to until it's inserted
660 * into the btree, but if we're done with this bucket we just transfer
661 * get_data_bucket()'s refcount.
663 if (b->sectors_free)
664 for (i = 0; i < KEY_PTRS(&b->key); i++)
665 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
667 spin_unlock(&c->data_bucket_lock);
668 return true;
671 /* Init */
673 void bch_open_buckets_free(struct cache_set *c)
675 struct open_bucket *b;
677 while (!list_empty(&c->data_buckets)) {
678 b = list_first_entry(&c->data_buckets,
679 struct open_bucket, list);
680 list_del(&b->list);
681 kfree(b);
685 int bch_open_buckets_alloc(struct cache_set *c)
687 int i;
689 spin_lock_init(&c->data_bucket_lock);
691 for (i = 0; i < 6; i++) {
692 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
693 if (!b)
694 return -ENOMEM;
696 list_add(&b->list, &c->data_buckets);
699 return 0;
702 int bch_cache_allocator_start(struct cache *ca)
704 struct task_struct *k = kthread_run(bch_allocator_thread,
705 ca, "bcache_allocator");
706 if (IS_ERR(k))
707 return PTR_ERR(k);
709 ca->alloc_thread = k;
710 return 0;
713 int bch_cache_allocator_init(struct cache *ca)
716 * Reserve:
717 * Prio/gen writes first
718 * Then 8 for btree allocations
719 * Then half for the moving garbage collector
721 #if 0
722 ca->watermark[WATERMARK_PRIO] = 0;
724 ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
726 ca->watermark[WATERMARK_MOVINGGC] = 8 +
727 ca->watermark[WATERMARK_METADATA];
729 ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
730 ca->watermark[WATERMARK_MOVINGGC];
731 #endif
732 return 0;