2 * bcache journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
12 #include <trace/events/bcache.h>
15 * Journal replay/recovery:
17 * This code is all driven from run_cache_set(); we first read the journal
18 * entries, do some other stuff, then we mark all the keys in the journal
19 * entries (same as garbage collection would), then we replay them - reinserting
20 * them into the cache in precisely the same order as they appear in the
23 * We only journal keys that go in leaf nodes, which simplifies things quite a
27 static void journal_read_endio(struct bio
*bio
, int error
)
29 struct closure
*cl
= bio
->bi_private
;
33 static int journal_read_bucket(struct cache
*ca
, struct list_head
*list
,
34 struct btree_op
*op
, unsigned bucket_index
)
36 struct journal_device
*ja
= &ca
->journal
;
37 struct bio
*bio
= &ja
->bio
;
39 struct journal_replay
*i
;
40 struct jset
*j
, *data
= ca
->set
->journal
.w
[0].data
;
41 unsigned len
, left
, offset
= 0;
43 sector_t bucket
= bucket_to_sector(ca
->set
, ca
->sb
.d
[bucket_index
]);
45 pr_debug("reading %llu", (uint64_t) bucket
);
47 while (offset
< ca
->sb
.bucket_size
) {
48 reread
: left
= ca
->sb
.bucket_size
- offset
;
49 len
= min_t(unsigned, left
, PAGE_SECTORS
* 8);
52 bio
->bi_sector
= bucket
+ offset
;
53 bio
->bi_bdev
= ca
->bdev
;
55 bio
->bi_size
= len
<< 9;
57 bio
->bi_end_io
= journal_read_endio
;
58 bio
->bi_private
= &op
->cl
;
59 bch_bio_map(bio
, data
);
61 closure_bio_submit(bio
, &op
->cl
, ca
);
62 closure_sync(&op
->cl
);
64 /* This function could be simpler now since we no longer write
65 * journal entries that overlap bucket boundaries; this means
66 * the start of a bucket will always have a valid journal entry
67 * if it has any journal entries at all.
72 struct list_head
*where
;
73 size_t blocks
, bytes
= set_bytes(j
);
75 if (j
->magic
!= jset_magic(ca
->set
))
78 if (bytes
> left
<< 9)
84 if (j
->csum
!= csum_set(j
))
87 blocks
= set_blocks(j
, ca
->set
);
89 while (!list_empty(list
)) {
90 i
= list_first_entry(list
,
91 struct journal_replay
, list
);
92 if (i
->j
.seq
>= j
->last_seq
)
98 list_for_each_entry_reverse(i
, list
, list
) {
99 if (j
->seq
== i
->j
.seq
)
102 if (j
->seq
< i
->j
.last_seq
)
105 if (j
->seq
> i
->j
.seq
) {
113 i
= kmalloc(offsetof(struct journal_replay
, j
) +
117 memcpy(&i
->j
, j
, bytes
);
118 list_add(&i
->list
, where
);
121 ja
->seq
[bucket_index
] = j
->seq
;
123 offset
+= blocks
* ca
->sb
.block_size
;
124 len
-= blocks
* ca
->sb
.block_size
;
125 j
= ((void *) j
) + blocks
* block_bytes(ca
);
132 int bch_journal_read(struct cache_set
*c
, struct list_head
*list
,
135 #define read_bucket(b) \
137 int ret = journal_read_bucket(ca, list, op, b); \
138 __set_bit(b, bitmap); \
147 for_each_cache(ca
, c
, iter
) {
148 struct journal_device
*ja
= &ca
->journal
;
149 unsigned long bitmap
[SB_JOURNAL_BUCKETS
/ BITS_PER_LONG
];
153 bitmap_zero(bitmap
, SB_JOURNAL_BUCKETS
);
154 pr_debug("%u journal buckets", ca
->sb
.njournal_buckets
);
156 /* Read journal buckets ordered by golden ratio hash to quickly
157 * find a sequence of buckets with valid journal entries
159 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++) {
160 l
= (i
* 2654435769U) % ca
->sb
.njournal_buckets
;
162 if (test_bit(l
, bitmap
))
169 /* If that fails, check all the buckets we haven't checked
172 pr_debug("falling back to linear search");
174 for (l
= 0; l
< ca
->sb
.njournal_buckets
; l
++) {
175 if (test_bit(l
, bitmap
))
183 m
= r
= find_next_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1);
184 pr_debug("starting binary search, l %u r %u", l
, r
);
187 seq
= list_entry(list
->prev
, struct journal_replay
,
193 if (seq
!= list_entry(list
->prev
, struct journal_replay
,
200 /* Read buckets in reverse order until we stop finding more
203 pr_debug("finishing up");
208 l
= ca
->sb
.njournal_buckets
- 1;
213 if (test_bit(l
, bitmap
))
222 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++)
223 if (ja
->seq
[i
] > seq
) {
225 ja
->cur_idx
= ja
->discard_idx
=
231 c
->journal
.seq
= list_entry(list
->prev
,
232 struct journal_replay
,
239 void bch_journal_mark(struct cache_set
*c
, struct list_head
*list
)
243 struct journal_replay
*i
;
244 struct journal
*j
= &c
->journal
;
245 uint64_t last
= j
->seq
;
248 * journal.pin should never fill up - we never write a journal
249 * entry when it would fill up. But if for some reason it does, we
250 * iterate over the list in reverse order so that we can just skip that
251 * refcount instead of bugging.
254 list_for_each_entry_reverse(i
, list
, list
) {
255 BUG_ON(last
< i
->j
.seq
);
258 while (last
-- != i
->j
.seq
)
259 if (fifo_free(&j
->pin
) > 1) {
260 fifo_push_front(&j
->pin
, p
);
261 atomic_set(&fifo_front(&j
->pin
), 0);
264 if (fifo_free(&j
->pin
) > 1) {
265 fifo_push_front(&j
->pin
, p
);
266 i
->pin
= &fifo_front(&j
->pin
);
267 atomic_set(i
->pin
, 1);
275 for (j
= 0; j
< KEY_PTRS(k
); j
++) {
276 struct bucket
*g
= PTR_BUCKET(c
, k
, j
);
279 if (g
->prio
== BTREE_PRIO
&&
281 g
->prio
= INITIAL_PRIO
;
284 __bch_btree_mark_key(c
, 0, k
);
289 int bch_journal_replay(struct cache_set
*s
, struct list_head
*list
,
292 int ret
= 0, keys
= 0, entries
= 0;
294 struct journal_replay
*i
=
295 list_entry(list
->prev
, struct journal_replay
, list
);
297 uint64_t start
= i
->j
.last_seq
, end
= i
->j
.seq
, n
= start
;
299 list_for_each_entry(i
, list
, list
) {
300 BUG_ON(i
->pin
&& atomic_read(i
->pin
) != 1);
304 "journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
305 n
, i
->j
.seq
- 1, start
, end
);
310 trace_bcache_journal_replay_key(k
);
312 bkey_copy(op
->keys
.top
, k
);
313 bch_keylist_push(&op
->keys
);
315 op
->journal
= i
->pin
;
316 atomic_inc(op
->journal
);
318 ret
= bch_btree_insert(op
, s
);
322 BUG_ON(!bch_keylist_empty(&op
->keys
));
334 pr_info("journal replay done, %i keys in %i entries, seq %llu",
337 while (!list_empty(list
)) {
338 i
= list_first_entry(list
, struct journal_replay
, list
);
343 closure_sync(&op
->cl
);
349 static void btree_flush_write(struct cache_set
*c
)
352 * Try to find the btree node with that references the oldest journal
353 * entry, best is our current candidate and is locked if non NULL:
355 struct btree
*b
, *best
= NULL
;
358 for_each_cached_btree(b
, c
, iter
) {
359 if (!down_write_trylock(&b
->lock
))
362 if (!btree_node_dirty(b
) ||
363 !btree_current_write(b
)->journal
) {
370 else if (journal_pin_cmp(c
,
371 btree_current_write(best
),
372 btree_current_write(b
))) {
373 rw_unlock(true, best
);
382 /* We can't find the best btree node, just pick the first */
383 list_for_each_entry(b
, &c
->btree_cache
, list
)
384 if (!b
->level
&& btree_node_dirty(b
)) {
386 rw_lock(true, best
, best
->level
);
394 if (btree_node_dirty(best
))
395 bch_btree_node_write(best
, NULL
);
396 rw_unlock(true, best
);
399 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
401 static void journal_discard_endio(struct bio
*bio
, int error
)
403 struct journal_device
*ja
=
404 container_of(bio
, struct journal_device
, discard_bio
);
405 struct cache
*ca
= container_of(ja
, struct cache
, journal
);
407 atomic_set(&ja
->discard_in_flight
, DISCARD_DONE
);
409 closure_wake_up(&ca
->set
->journal
.wait
);
410 closure_put(&ca
->set
->cl
);
413 static void journal_discard_work(struct work_struct
*work
)
415 struct journal_device
*ja
=
416 container_of(work
, struct journal_device
, discard_work
);
418 submit_bio(0, &ja
->discard_bio
);
421 static void do_journal_discard(struct cache
*ca
)
423 struct journal_device
*ja
= &ca
->journal
;
424 struct bio
*bio
= &ja
->discard_bio
;
427 ja
->discard_idx
= ja
->last_idx
;
431 switch (atomic_read(&ja
->discard_in_flight
) == DISCARD_IN_FLIGHT
) {
432 case DISCARD_IN_FLIGHT
:
436 ja
->discard_idx
= (ja
->discard_idx
+ 1) %
437 ca
->sb
.njournal_buckets
;
439 atomic_set(&ja
->discard_in_flight
, DISCARD_READY
);
443 if (ja
->discard_idx
== ja
->last_idx
)
446 atomic_set(&ja
->discard_in_flight
, DISCARD_IN_FLIGHT
);
449 bio
->bi_sector
= bucket_to_sector(ca
->set
,
450 ca
->sb
.d
[ja
->discard_idx
]);
451 bio
->bi_bdev
= ca
->bdev
;
452 bio
->bi_rw
= REQ_WRITE
|REQ_DISCARD
;
453 bio
->bi_max_vecs
= 1;
454 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
455 bio
->bi_size
= bucket_bytes(ca
);
456 bio
->bi_end_io
= journal_discard_endio
;
458 closure_get(&ca
->set
->cl
);
459 INIT_WORK(&ja
->discard_work
, journal_discard_work
);
460 schedule_work(&ja
->discard_work
);
464 static void journal_reclaim(struct cache_set
*c
)
466 struct bkey
*k
= &c
->journal
.key
;
469 unsigned iter
, n
= 0;
472 while (!atomic_read(&fifo_front(&c
->journal
.pin
)))
473 fifo_pop(&c
->journal
.pin
, p
);
475 last_seq
= last_seq(&c
->journal
);
477 /* Update last_idx */
479 for_each_cache(ca
, c
, iter
) {
480 struct journal_device
*ja
= &ca
->journal
;
482 while (ja
->last_idx
!= ja
->cur_idx
&&
483 ja
->seq
[ja
->last_idx
] < last_seq
)
484 ja
->last_idx
= (ja
->last_idx
+ 1) %
485 ca
->sb
.njournal_buckets
;
488 for_each_cache(ca
, c
, iter
)
489 do_journal_discard(ca
);
491 if (c
->journal
.blocks_free
)
496 * XXX: Sort by free journal space
499 for_each_cache(ca
, c
, iter
) {
500 struct journal_device
*ja
= &ca
->journal
;
501 unsigned next
= (ja
->cur_idx
+ 1) % ca
->sb
.njournal_buckets
;
503 /* No space available on this device */
504 if (next
== ja
->discard_idx
)
509 bucket_to_sector(c
, ca
->sb
.d
[ja
->cur_idx
]),
517 c
->journal
.blocks_free
= c
->sb
.bucket_size
>> c
->block_bits
;
519 if (!journal_full(&c
->journal
))
520 __closure_wake_up(&c
->journal
.wait
);
523 void bch_journal_next(struct journal
*j
)
527 j
->cur
= (j
->cur
== j
->w
)
532 * The fifo_push() needs to happen at the same time as j->seq is
533 * incremented for last_seq() to be calculated correctly
535 BUG_ON(!fifo_push(&j
->pin
, p
));
536 atomic_set(&fifo_back(&j
->pin
), 1);
538 j
->cur
->data
->seq
= ++j
->seq
;
539 j
->cur
->need_write
= false;
540 j
->cur
->data
->keys
= 0;
542 if (fifo_full(&j
->pin
))
543 pr_debug("journal_pin full (%zu)", fifo_used(&j
->pin
));
546 static void journal_write_endio(struct bio
*bio
, int error
)
548 struct journal_write
*w
= bio
->bi_private
;
550 cache_set_err_on(error
, w
->c
, "journal io error");
551 closure_put(&w
->c
->journal
.io
.cl
);
554 static void journal_write(struct closure
*);
556 static void journal_write_done(struct closure
*cl
)
558 struct journal
*j
= container_of(cl
, struct journal
, io
.cl
);
559 struct cache_set
*c
= container_of(j
, struct cache_set
, journal
);
561 struct journal_write
*w
= (j
->cur
== j
->w
)
565 __closure_wake_up(&w
->wait
);
567 if (c
->journal_delay_ms
)
568 closure_delay(&j
->io
, msecs_to_jiffies(c
->journal_delay_ms
));
570 continue_at(cl
, journal_write
, system_wq
);
573 static void journal_write_unlocked(struct closure
*cl
)
574 __releases(c
->journal
.lock
)
576 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
.cl
);
578 struct journal_write
*w
= c
->journal
.cur
;
579 struct bkey
*k
= &c
->journal
.key
;
580 unsigned i
, sectors
= set_blocks(w
->data
, c
) * c
->sb
.block_size
;
583 struct bio_list list
;
584 bio_list_init(&list
);
586 if (!w
->need_write
) {
588 * XXX: have to unlock closure before we unlock journal lock,
589 * else we race with bch_journal(). But this way we race
590 * against cache set unregister. Doh.
592 set_closure_fn(cl
, NULL
, NULL
);
593 closure_sub(cl
, CLOSURE_RUNNING
+ 1);
594 spin_unlock(&c
->journal
.lock
);
596 } else if (journal_full(&c
->journal
)) {
598 spin_unlock(&c
->journal
.lock
);
600 btree_flush_write(c
);
601 continue_at(cl
, journal_write
, system_wq
);
604 c
->journal
.blocks_free
-= set_blocks(w
->data
, c
);
606 w
->data
->btree_level
= c
->root
->level
;
608 bkey_copy(&w
->data
->btree_root
, &c
->root
->key
);
609 bkey_copy(&w
->data
->uuid_bucket
, &c
->uuid_bucket
);
611 for_each_cache(ca
, c
, i
)
612 w
->data
->prio_bucket
[ca
->sb
.nr_this_dev
] = ca
->prio_buckets
[0];
614 w
->data
->magic
= jset_magic(c
);
615 w
->data
->version
= BCACHE_JSET_VERSION
;
616 w
->data
->last_seq
= last_seq(&c
->journal
);
617 w
->data
->csum
= csum_set(w
->data
);
619 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
620 ca
= PTR_CACHE(c
, k
, i
);
621 bio
= &ca
->journal
.bio
;
623 atomic_long_add(sectors
, &ca
->meta_sectors_written
);
626 bio
->bi_sector
= PTR_OFFSET(k
, i
);
627 bio
->bi_bdev
= ca
->bdev
;
628 bio
->bi_rw
= REQ_WRITE
|REQ_SYNC
|REQ_META
|REQ_FLUSH
|REQ_FUA
;
629 bio
->bi_size
= sectors
<< 9;
631 bio
->bi_end_io
= journal_write_endio
;
633 bch_bio_map(bio
, w
->data
);
635 trace_bcache_journal_write(bio
);
636 bio_list_add(&list
, bio
);
638 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + sectors
);
640 ca
->journal
.seq
[ca
->journal
.cur_idx
] = w
->data
->seq
;
643 atomic_dec_bug(&fifo_back(&c
->journal
.pin
));
644 bch_journal_next(&c
->journal
);
647 spin_unlock(&c
->journal
.lock
);
649 while ((bio
= bio_list_pop(&list
)))
650 closure_bio_submit(bio
, cl
, c
->cache
[0]);
652 continue_at(cl
, journal_write_done
, NULL
);
655 static void journal_write(struct closure
*cl
)
657 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
.cl
);
659 spin_lock(&c
->journal
.lock
);
660 journal_write_unlocked(cl
);
663 static void __journal_try_write(struct cache_set
*c
, bool noflush
)
664 __releases(c
->journal
.lock
)
666 struct closure
*cl
= &c
->journal
.io
.cl
;
668 if (!closure_trylock(cl
, &c
->cl
))
669 spin_unlock(&c
->journal
.lock
);
670 else if (noflush
&& journal_full(&c
->journal
)) {
671 spin_unlock(&c
->journal
.lock
);
672 continue_at(cl
, journal_write
, system_wq
);
674 journal_write_unlocked(cl
);
677 #define journal_try_write(c) __journal_try_write(c, false)
679 void bch_journal_meta(struct cache_set
*c
, struct closure
*cl
)
681 struct journal_write
*w
;
683 if (CACHE_SYNC(&c
->sb
)) {
684 spin_lock(&c
->journal
.lock
);
687 w
->need_write
= true;
690 BUG_ON(!closure_wait(&w
->wait
, cl
));
692 __journal_try_write(c
, true);
697 * Entry point to the journalling code - bio_insert() and btree_invalidate()
698 * pass bch_journal() a list of keys to be journalled, and then
699 * bch_journal() hands those same keys off to btree_insert_async()
702 void bch_journal(struct closure
*cl
)
704 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
705 struct cache_set
*c
= op
->c
;
706 struct journal_write
*w
;
707 size_t b
, n
= ((uint64_t *) op
->keys
.top
) - op
->keys
.list
;
709 if (op
->type
!= BTREE_INSERT
||
714 * If we're looping because we errored, might already be waiting on
715 * another journal write:
717 while (atomic_read(&cl
->parent
->remaining
) & CLOSURE_WAITING
)
718 closure_sync(cl
->parent
);
720 spin_lock(&c
->journal
.lock
);
722 if (journal_full(&c
->journal
)) {
723 trace_bcache_journal_full(c
);
725 closure_wait(&c
->journal
.wait
, cl
);
728 spin_unlock(&c
->journal
.lock
);
730 btree_flush_write(c
);
731 continue_at(cl
, bch_journal
, bcache_wq
);
735 w
->need_write
= true;
736 b
= __set_blocks(w
->data
, w
->data
->keys
+ n
, c
);
738 if (b
* c
->sb
.block_size
> PAGE_SECTORS
<< JSET_BITS
||
739 b
> c
->journal
.blocks_free
) {
740 trace_bcache_journal_entry_full(c
);
743 * XXX: If we were inserting so many keys that they won't fit in
744 * an _empty_ journal write, we'll deadlock. For now, handle
745 * this in bch_keylist_realloc() - but something to think about.
747 BUG_ON(!w
->data
->keys
);
749 BUG_ON(!closure_wait(&w
->wait
, cl
));
751 closure_flush(&c
->journal
.io
);
753 journal_try_write(c
);
754 continue_at(cl
, bch_journal
, bcache_wq
);
757 memcpy(end(w
->data
), op
->keys
.list
, n
* sizeof(uint64_t));
760 op
->journal
= &fifo_back(&c
->journal
.pin
);
761 atomic_inc(op
->journal
);
763 if (op
->flush_journal
) {
764 closure_flush(&c
->journal
.io
);
765 closure_wait(&w
->wait
, cl
->parent
);
768 journal_try_write(c
);
770 bch_btree_insert_async(cl
);
773 void bch_journal_free(struct cache_set
*c
)
775 free_pages((unsigned long) c
->journal
.w
[1].data
, JSET_BITS
);
776 free_pages((unsigned long) c
->journal
.w
[0].data
, JSET_BITS
);
777 free_fifo(&c
->journal
.pin
);
780 int bch_journal_alloc(struct cache_set
*c
)
782 struct journal
*j
= &c
->journal
;
784 closure_init_unlocked(&j
->io
);
785 spin_lock_init(&j
->lock
);
787 c
->journal_delay_ms
= 100;
792 if (!(init_fifo(&j
->pin
, JOURNAL_PIN
, GFP_KERNEL
)) ||
793 !(j
->w
[0].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)) ||
794 !(j
->w
[1].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)))