gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / drivers / md / bcache / journal.c
blobfe080ad0e55841e5c95bfcb3dcf3a0f1a703b76c
1 /*
2 * bcache journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
5 */
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 #include "extents.h"
12 #include <trace/events/bcache.h>
15 * Journal replay/recovery:
17 * This code is all driven from run_cache_set(); we first read the journal
18 * entries, do some other stuff, then we mark all the keys in the journal
19 * entries (same as garbage collection would), then we replay them - reinserting
20 * them into the cache in precisely the same order as they appear in the
21 * journal.
23 * We only journal keys that go in leaf nodes, which simplifies things quite a
24 * bit.
27 static void journal_read_endio(struct bio *bio, int error)
29 struct closure *cl = bio->bi_private;
30 closure_put(cl);
33 static int journal_read_bucket(struct cache *ca, struct list_head *list,
34 unsigned bucket_index)
36 struct journal_device *ja = &ca->journal;
37 struct bio *bio = &ja->bio;
39 struct journal_replay *i;
40 struct jset *j, *data = ca->set->journal.w[0].data;
41 struct closure cl;
42 unsigned len, left, offset = 0;
43 int ret = 0;
44 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
46 closure_init_stack(&cl);
48 pr_debug("reading %u", bucket_index);
50 while (offset < ca->sb.bucket_size) {
51 reread: left = ca->sb.bucket_size - offset;
52 len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
54 bio_reset(bio);
55 bio->bi_iter.bi_sector = bucket + offset;
56 bio->bi_bdev = ca->bdev;
57 bio->bi_rw = READ;
58 bio->bi_iter.bi_size = len << 9;
60 bio->bi_end_io = journal_read_endio;
61 bio->bi_private = &cl;
62 bch_bio_map(bio, data);
64 closure_bio_submit(bio, &cl, ca);
65 closure_sync(&cl);
67 /* This function could be simpler now since we no longer write
68 * journal entries that overlap bucket boundaries; this means
69 * the start of a bucket will always have a valid journal entry
70 * if it has any journal entries at all.
73 j = data;
74 while (len) {
75 struct list_head *where;
76 size_t blocks, bytes = set_bytes(j);
78 if (j->magic != jset_magic(&ca->sb)) {
79 pr_debug("%u: bad magic", bucket_index);
80 return ret;
83 if (bytes > left << 9 ||
84 bytes > PAGE_SIZE << JSET_BITS) {
85 pr_info("%u: too big, %zu bytes, offset %u",
86 bucket_index, bytes, offset);
87 return ret;
90 if (bytes > len << 9)
91 goto reread;
93 if (j->csum != csum_set(j)) {
94 pr_info("%u: bad csum, %zu bytes, offset %u",
95 bucket_index, bytes, offset);
96 return ret;
99 blocks = set_blocks(j, block_bytes(ca->set));
101 while (!list_empty(list)) {
102 i = list_first_entry(list,
103 struct journal_replay, list);
104 if (i->j.seq >= j->last_seq)
105 break;
106 list_del(&i->list);
107 kfree(i);
110 list_for_each_entry_reverse(i, list, list) {
111 if (j->seq == i->j.seq)
112 goto next_set;
114 if (j->seq < i->j.last_seq)
115 goto next_set;
117 if (j->seq > i->j.seq) {
118 where = &i->list;
119 goto add;
123 where = list;
124 add:
125 i = kmalloc(offsetof(struct journal_replay, j) +
126 bytes, GFP_KERNEL);
127 if (!i)
128 return -ENOMEM;
129 memcpy(&i->j, j, bytes);
130 list_add(&i->list, where);
131 ret = 1;
133 ja->seq[bucket_index] = j->seq;
134 next_set:
135 offset += blocks * ca->sb.block_size;
136 len -= blocks * ca->sb.block_size;
137 j = ((void *) j) + blocks * block_bytes(ca);
141 return ret;
144 int bch_journal_read(struct cache_set *c, struct list_head *list)
146 #define read_bucket(b) \
147 ({ \
148 int ret = journal_read_bucket(ca, list, b); \
149 __set_bit(b, bitmap); \
150 if (ret < 0) \
151 return ret; \
152 ret; \
155 struct cache *ca;
156 unsigned iter;
158 for_each_cache(ca, c, iter) {
159 struct journal_device *ja = &ca->journal;
160 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
161 unsigned i, l, r, m;
162 uint64_t seq;
164 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
165 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
168 * Read journal buckets ordered by golden ratio hash to quickly
169 * find a sequence of buckets with valid journal entries
171 for (i = 0; i < ca->sb.njournal_buckets; i++) {
172 l = (i * 2654435769U) % ca->sb.njournal_buckets;
174 if (test_bit(l, bitmap))
175 break;
177 if (read_bucket(l))
178 goto bsearch;
182 * If that fails, check all the buckets we haven't checked
183 * already
185 pr_debug("falling back to linear search");
187 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
188 l < ca->sb.njournal_buckets;
189 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
190 if (read_bucket(l))
191 goto bsearch;
193 /* no journal entries on this device? */
194 if (l == ca->sb.njournal_buckets)
195 continue;
196 bsearch:
197 BUG_ON(list_empty(list));
199 /* Binary search */
200 m = l;
201 r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
202 pr_debug("starting binary search, l %u r %u", l, r);
204 while (l + 1 < r) {
205 seq = list_entry(list->prev, struct journal_replay,
206 list)->j.seq;
208 m = (l + r) >> 1;
209 read_bucket(m);
211 if (seq != list_entry(list->prev, struct journal_replay,
212 list)->j.seq)
213 l = m;
214 else
215 r = m;
219 * Read buckets in reverse order until we stop finding more
220 * journal entries
222 pr_debug("finishing up: m %u njournal_buckets %u",
223 m, ca->sb.njournal_buckets);
224 l = m;
226 while (1) {
227 if (!l--)
228 l = ca->sb.njournal_buckets - 1;
230 if (l == m)
231 break;
233 if (test_bit(l, bitmap))
234 continue;
236 if (!read_bucket(l))
237 break;
240 seq = 0;
242 for (i = 0; i < ca->sb.njournal_buckets; i++)
243 if (ja->seq[i] > seq) {
244 seq = ja->seq[i];
246 * When journal_reclaim() goes to allocate for
247 * the first time, it'll use the bucket after
248 * ja->cur_idx
250 ja->cur_idx = i;
251 ja->last_idx = ja->discard_idx = (i + 1) %
252 ca->sb.njournal_buckets;
257 if (!list_empty(list))
258 c->journal.seq = list_entry(list->prev,
259 struct journal_replay,
260 list)->j.seq;
262 return 0;
263 #undef read_bucket
266 void bch_journal_mark(struct cache_set *c, struct list_head *list)
268 atomic_t p = { 0 };
269 struct bkey *k;
270 struct journal_replay *i;
271 struct journal *j = &c->journal;
272 uint64_t last = j->seq;
275 * journal.pin should never fill up - we never write a journal
276 * entry when it would fill up. But if for some reason it does, we
277 * iterate over the list in reverse order so that we can just skip that
278 * refcount instead of bugging.
281 list_for_each_entry_reverse(i, list, list) {
282 BUG_ON(last < i->j.seq);
283 i->pin = NULL;
285 while (last-- != i->j.seq)
286 if (fifo_free(&j->pin) > 1) {
287 fifo_push_front(&j->pin, p);
288 atomic_set(&fifo_front(&j->pin), 0);
291 if (fifo_free(&j->pin) > 1) {
292 fifo_push_front(&j->pin, p);
293 i->pin = &fifo_front(&j->pin);
294 atomic_set(i->pin, 1);
297 for (k = i->j.start;
298 k < bset_bkey_last(&i->j);
299 k = bkey_next(k))
300 if (!__bch_extent_invalid(c, k)) {
301 unsigned j;
303 for (j = 0; j < KEY_PTRS(k); j++)
304 if (ptr_available(c, k, j))
305 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
307 bch_initial_mark_key(c, 0, k);
312 int bch_journal_replay(struct cache_set *s, struct list_head *list)
314 int ret = 0, keys = 0, entries = 0;
315 struct bkey *k;
316 struct journal_replay *i =
317 list_entry(list->prev, struct journal_replay, list);
319 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
320 struct keylist keylist;
322 list_for_each_entry(i, list, list) {
323 BUG_ON(i->pin && atomic_read(i->pin) != 1);
325 cache_set_err_on(n != i->j.seq, s,
326 "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
327 n, i->j.seq - 1, start, end);
329 for (k = i->j.start;
330 k < bset_bkey_last(&i->j);
331 k = bkey_next(k)) {
332 trace_bcache_journal_replay_key(k);
334 bch_keylist_init_single(&keylist, k);
336 ret = bch_btree_insert(s, &keylist, i->pin, NULL);
337 if (ret)
338 goto err;
340 BUG_ON(!bch_keylist_empty(&keylist));
341 keys++;
343 cond_resched();
346 if (i->pin)
347 atomic_dec(i->pin);
348 n = i->j.seq + 1;
349 entries++;
352 pr_info("journal replay done, %i keys in %i entries, seq %llu",
353 keys, entries, end);
354 err:
355 while (!list_empty(list)) {
356 i = list_first_entry(list, struct journal_replay, list);
357 list_del(&i->list);
358 kfree(i);
361 return ret;
364 /* Journalling */
366 static void btree_flush_write(struct cache_set *c)
369 * Try to find the btree node with that references the oldest journal
370 * entry, best is our current candidate and is locked if non NULL:
372 struct btree *b, *best;
373 unsigned i;
374 retry:
375 best = NULL;
377 for_each_cached_btree(b, c, i)
378 if (btree_current_write(b)->journal) {
379 if (!best)
380 best = b;
381 else if (journal_pin_cmp(c,
382 btree_current_write(best)->journal,
383 btree_current_write(b)->journal)) {
384 best = b;
388 b = best;
389 if (b) {
390 mutex_lock(&b->write_lock);
391 if (!btree_current_write(b)->journal) {
392 mutex_unlock(&b->write_lock);
393 /* We raced */
394 goto retry;
397 __bch_btree_node_write(b, NULL);
398 mutex_unlock(&b->write_lock);
402 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
404 static void journal_discard_endio(struct bio *bio, int error)
406 struct journal_device *ja =
407 container_of(bio, struct journal_device, discard_bio);
408 struct cache *ca = container_of(ja, struct cache, journal);
410 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
412 closure_wake_up(&ca->set->journal.wait);
413 closure_put(&ca->set->cl);
416 static void journal_discard_work(struct work_struct *work)
418 struct journal_device *ja =
419 container_of(work, struct journal_device, discard_work);
421 submit_bio(0, &ja->discard_bio);
424 static void do_journal_discard(struct cache *ca)
426 struct journal_device *ja = &ca->journal;
427 struct bio *bio = &ja->discard_bio;
429 if (!ca->discard) {
430 ja->discard_idx = ja->last_idx;
431 return;
434 switch (atomic_read(&ja->discard_in_flight)) {
435 case DISCARD_IN_FLIGHT:
436 return;
438 case DISCARD_DONE:
439 ja->discard_idx = (ja->discard_idx + 1) %
440 ca->sb.njournal_buckets;
442 atomic_set(&ja->discard_in_flight, DISCARD_READY);
443 /* fallthrough */
445 case DISCARD_READY:
446 if (ja->discard_idx == ja->last_idx)
447 return;
449 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
451 bio_init(bio);
452 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
453 ca->sb.d[ja->discard_idx]);
454 bio->bi_bdev = ca->bdev;
455 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
456 bio->bi_max_vecs = 1;
457 bio->bi_io_vec = bio->bi_inline_vecs;
458 bio->bi_iter.bi_size = bucket_bytes(ca);
459 bio->bi_end_io = journal_discard_endio;
461 closure_get(&ca->set->cl);
462 INIT_WORK(&ja->discard_work, journal_discard_work);
463 schedule_work(&ja->discard_work);
467 static void journal_reclaim(struct cache_set *c)
469 struct bkey *k = &c->journal.key;
470 struct cache *ca;
471 uint64_t last_seq;
472 unsigned iter, n = 0;
473 atomic_t p;
475 while (!atomic_read(&fifo_front(&c->journal.pin)))
476 fifo_pop(&c->journal.pin, p);
478 last_seq = last_seq(&c->journal);
480 /* Update last_idx */
482 for_each_cache(ca, c, iter) {
483 struct journal_device *ja = &ca->journal;
485 while (ja->last_idx != ja->cur_idx &&
486 ja->seq[ja->last_idx] < last_seq)
487 ja->last_idx = (ja->last_idx + 1) %
488 ca->sb.njournal_buckets;
491 for_each_cache(ca, c, iter)
492 do_journal_discard(ca);
494 if (c->journal.blocks_free)
495 goto out;
498 * Allocate:
499 * XXX: Sort by free journal space
502 for_each_cache(ca, c, iter) {
503 struct journal_device *ja = &ca->journal;
504 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
506 /* No space available on this device */
507 if (next == ja->discard_idx)
508 continue;
510 ja->cur_idx = next;
511 k->ptr[n++] = PTR(0,
512 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
513 ca->sb.nr_this_dev);
516 bkey_init(k);
517 SET_KEY_PTRS(k, n);
519 if (n)
520 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
521 out:
522 if (!journal_full(&c->journal))
523 __closure_wake_up(&c->journal.wait);
526 void bch_journal_next(struct journal *j)
528 atomic_t p = { 1 };
530 j->cur = (j->cur == j->w)
531 ? &j->w[1]
532 : &j->w[0];
535 * The fifo_push() needs to happen at the same time as j->seq is
536 * incremented for last_seq() to be calculated correctly
538 BUG_ON(!fifo_push(&j->pin, p));
539 atomic_set(&fifo_back(&j->pin), 1);
541 j->cur->data->seq = ++j->seq;
542 j->cur->dirty = false;
543 j->cur->need_write = false;
544 j->cur->data->keys = 0;
546 if (fifo_full(&j->pin))
547 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
550 static void journal_write_endio(struct bio *bio, int error)
552 struct journal_write *w = bio->bi_private;
554 cache_set_err_on(error, w->c, "journal io error");
555 closure_put(&w->c->journal.io);
558 static void journal_write(struct closure *);
560 static void journal_write_done(struct closure *cl)
562 struct journal *j = container_of(cl, struct journal, io);
563 struct journal_write *w = (j->cur == j->w)
564 ? &j->w[1]
565 : &j->w[0];
567 __closure_wake_up(&w->wait);
568 continue_at_nobarrier(cl, journal_write, system_wq);
571 static void journal_write_unlock(struct closure *cl)
573 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
575 c->journal.io_in_flight = 0;
576 spin_unlock(&c->journal.lock);
579 static void journal_write_unlocked(struct closure *cl)
580 __releases(c->journal.lock)
582 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
583 struct cache *ca;
584 struct journal_write *w = c->journal.cur;
585 struct bkey *k = &c->journal.key;
586 unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
587 c->sb.block_size;
589 struct bio *bio;
590 struct bio_list list;
591 bio_list_init(&list);
593 if (!w->need_write) {
594 closure_return_with_destructor(cl, journal_write_unlock);
595 } else if (journal_full(&c->journal)) {
596 journal_reclaim(c);
597 spin_unlock(&c->journal.lock);
599 btree_flush_write(c);
600 continue_at(cl, journal_write, system_wq);
603 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
605 w->data->btree_level = c->root->level;
607 bkey_copy(&w->data->btree_root, &c->root->key);
608 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
610 for_each_cache(ca, c, i)
611 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
613 w->data->magic = jset_magic(&c->sb);
614 w->data->version = BCACHE_JSET_VERSION;
615 w->data->last_seq = last_seq(&c->journal);
616 w->data->csum = csum_set(w->data);
618 for (i = 0; i < KEY_PTRS(k); i++) {
619 ca = PTR_CACHE(c, k, i);
620 bio = &ca->journal.bio;
622 atomic_long_add(sectors, &ca->meta_sectors_written);
624 bio_reset(bio);
625 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
626 bio->bi_bdev = ca->bdev;
627 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
628 bio->bi_iter.bi_size = sectors << 9;
630 bio->bi_end_io = journal_write_endio;
631 bio->bi_private = w;
632 bch_bio_map(bio, w->data);
634 trace_bcache_journal_write(bio);
635 bio_list_add(&list, bio);
637 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
639 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
642 atomic_dec_bug(&fifo_back(&c->journal.pin));
643 bch_journal_next(&c->journal);
644 journal_reclaim(c);
646 spin_unlock(&c->journal.lock);
648 while ((bio = bio_list_pop(&list)))
649 closure_bio_submit(bio, cl, c->cache[0]);
651 continue_at(cl, journal_write_done, NULL);
654 static void journal_write(struct closure *cl)
656 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
658 spin_lock(&c->journal.lock);
659 journal_write_unlocked(cl);
662 static void journal_try_write(struct cache_set *c)
663 __releases(c->journal.lock)
665 struct closure *cl = &c->journal.io;
666 struct journal_write *w = c->journal.cur;
668 w->need_write = true;
670 if (!c->journal.io_in_flight) {
671 c->journal.io_in_flight = 1;
672 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
673 } else {
674 spin_unlock(&c->journal.lock);
678 static struct journal_write *journal_wait_for_write(struct cache_set *c,
679 unsigned nkeys)
681 size_t sectors;
682 struct closure cl;
683 bool wait = false;
685 closure_init_stack(&cl);
687 spin_lock(&c->journal.lock);
689 while (1) {
690 struct journal_write *w = c->journal.cur;
692 sectors = __set_blocks(w->data, w->data->keys + nkeys,
693 block_bytes(c)) * c->sb.block_size;
695 if (sectors <= min_t(size_t,
696 c->journal.blocks_free * c->sb.block_size,
697 PAGE_SECTORS << JSET_BITS))
698 return w;
700 if (wait)
701 closure_wait(&c->journal.wait, &cl);
703 if (!journal_full(&c->journal)) {
704 if (wait)
705 trace_bcache_journal_entry_full(c);
708 * XXX: If we were inserting so many keys that they
709 * won't fit in an _empty_ journal write, we'll
710 * deadlock. For now, handle this in
711 * bch_keylist_realloc() - but something to think about.
713 BUG_ON(!w->data->keys);
715 journal_try_write(c); /* unlocks */
716 } else {
717 if (wait)
718 trace_bcache_journal_full(c);
720 journal_reclaim(c);
721 spin_unlock(&c->journal.lock);
723 btree_flush_write(c);
726 closure_sync(&cl);
727 spin_lock(&c->journal.lock);
728 wait = true;
732 static void journal_write_work(struct work_struct *work)
734 struct cache_set *c = container_of(to_delayed_work(work),
735 struct cache_set,
736 journal.work);
737 spin_lock(&c->journal.lock);
738 if (c->journal.cur->dirty)
739 journal_try_write(c);
740 else
741 spin_unlock(&c->journal.lock);
745 * Entry point to the journalling code - bio_insert() and btree_invalidate()
746 * pass bch_journal() a list of keys to be journalled, and then
747 * bch_journal() hands those same keys off to btree_insert_async()
750 atomic_t *bch_journal(struct cache_set *c,
751 struct keylist *keys,
752 struct closure *parent)
754 struct journal_write *w;
755 atomic_t *ret;
757 if (!CACHE_SYNC(&c->sb))
758 return NULL;
760 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
762 memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
763 w->data->keys += bch_keylist_nkeys(keys);
765 ret = &fifo_back(&c->journal.pin);
766 atomic_inc(ret);
768 if (parent) {
769 closure_wait(&w->wait, parent);
770 journal_try_write(c);
771 } else if (!w->dirty) {
772 w->dirty = true;
773 schedule_delayed_work(&c->journal.work,
774 msecs_to_jiffies(c->journal_delay_ms));
775 spin_unlock(&c->journal.lock);
776 } else {
777 spin_unlock(&c->journal.lock);
781 return ret;
784 void bch_journal_meta(struct cache_set *c, struct closure *cl)
786 struct keylist keys;
787 atomic_t *ref;
789 bch_keylist_init(&keys);
791 ref = bch_journal(c, &keys, cl);
792 if (ref)
793 atomic_dec_bug(ref);
796 void bch_journal_free(struct cache_set *c)
798 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
799 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
800 free_fifo(&c->journal.pin);
803 int bch_journal_alloc(struct cache_set *c)
805 struct journal *j = &c->journal;
807 spin_lock_init(&j->lock);
808 INIT_DELAYED_WORK(&j->work, journal_write_work);
810 c->journal_delay_ms = 100;
812 j->w[0].c = c;
813 j->w[1].c = c;
815 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
816 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
817 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
818 return -ENOMEM;
820 return 0;