1 // SPDX-License-Identifier: GPL-2.0
3 * bcache journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
13 #include <trace/events/bcache.h>
16 * Journal replay/recovery:
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
28 static void journal_read_endio(struct bio
*bio
)
30 struct closure
*cl
= bio
->bi_private
;
34 static int journal_read_bucket(struct cache
*ca
, struct list_head
*list
,
35 unsigned bucket_index
)
37 struct journal_device
*ja
= &ca
->journal
;
38 struct bio
*bio
= &ja
->bio
;
40 struct journal_replay
*i
;
41 struct jset
*j
, *data
= ca
->set
->journal
.w
[0].data
;
43 unsigned len
, left
, offset
= 0;
45 sector_t bucket
= bucket_to_sector(ca
->set
, ca
->sb
.d
[bucket_index
]);
47 closure_init_stack(&cl
);
49 pr_debug("reading %u", bucket_index
);
51 while (offset
< ca
->sb
.bucket_size
) {
52 reread
: left
= ca
->sb
.bucket_size
- offset
;
53 len
= min_t(unsigned, left
, PAGE_SECTORS
<< JSET_BITS
);
56 bio
->bi_iter
.bi_sector
= bucket
+ offset
;
57 bio_set_dev(bio
, ca
->bdev
);
58 bio
->bi_iter
.bi_size
= len
<< 9;
60 bio
->bi_end_io
= journal_read_endio
;
61 bio
->bi_private
= &cl
;
62 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
63 bch_bio_map(bio
, data
);
65 closure_bio_submit(ca
->set
, bio
, &cl
);
68 /* This function could be simpler now since we no longer write
69 * journal entries that overlap bucket boundaries; this means
70 * the start of a bucket will always have a valid journal entry
71 * if it has any journal entries at all.
76 struct list_head
*where
;
77 size_t blocks
, bytes
= set_bytes(j
);
79 if (j
->magic
!= jset_magic(&ca
->sb
)) {
80 pr_debug("%u: bad magic", bucket_index
);
84 if (bytes
> left
<< 9 ||
85 bytes
> PAGE_SIZE
<< JSET_BITS
) {
86 pr_info("%u: too big, %zu bytes, offset %u",
87 bucket_index
, bytes
, offset
);
94 if (j
->csum
!= csum_set(j
)) {
95 pr_info("%u: bad csum, %zu bytes, offset %u",
96 bucket_index
, bytes
, offset
);
100 blocks
= set_blocks(j
, block_bytes(ca
->set
));
102 while (!list_empty(list
)) {
103 i
= list_first_entry(list
,
104 struct journal_replay
, list
);
105 if (i
->j
.seq
>= j
->last_seq
)
111 list_for_each_entry_reverse(i
, list
, list
) {
112 if (j
->seq
== i
->j
.seq
)
115 if (j
->seq
< i
->j
.last_seq
)
118 if (j
->seq
> i
->j
.seq
) {
126 i
= kmalloc(offsetof(struct journal_replay
, j
) +
130 memcpy(&i
->j
, j
, bytes
);
131 list_add(&i
->list
, where
);
134 ja
->seq
[bucket_index
] = j
->seq
;
136 offset
+= blocks
* ca
->sb
.block_size
;
137 len
-= blocks
* ca
->sb
.block_size
;
138 j
= ((void *) j
) + blocks
* block_bytes(ca
);
145 int bch_journal_read(struct cache_set
*c
, struct list_head
*list
)
147 #define read_bucket(b) \
149 int ret = journal_read_bucket(ca, list, b); \
150 __set_bit(b, bitmap); \
159 for_each_cache(ca
, c
, iter
) {
160 struct journal_device
*ja
= &ca
->journal
;
161 DECLARE_BITMAP(bitmap
, SB_JOURNAL_BUCKETS
);
165 bitmap_zero(bitmap
, SB_JOURNAL_BUCKETS
);
166 pr_debug("%u journal buckets", ca
->sb
.njournal_buckets
);
169 * Read journal buckets ordered by golden ratio hash to quickly
170 * find a sequence of buckets with valid journal entries
172 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++) {
174 * We must try the index l with ZERO first for
175 * correctness due to the scenario that the journal
176 * bucket is circular buffer which might have wrapped
178 l
= (i
* 2654435769U) % ca
->sb
.njournal_buckets
;
180 if (test_bit(l
, bitmap
))
188 * If that fails, check all the buckets we haven't checked
191 pr_debug("falling back to linear search");
193 for (l
= find_first_zero_bit(bitmap
, ca
->sb
.njournal_buckets
);
194 l
< ca
->sb
.njournal_buckets
;
195 l
= find_next_zero_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1))
199 /* no journal entries on this device? */
200 if (l
== ca
->sb
.njournal_buckets
)
203 BUG_ON(list_empty(list
));
207 r
= find_next_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1);
208 pr_debug("starting binary search, l %u r %u", l
, r
);
211 seq
= list_entry(list
->prev
, struct journal_replay
,
217 if (seq
!= list_entry(list
->prev
, struct journal_replay
,
225 * Read buckets in reverse order until we stop finding more
228 pr_debug("finishing up: m %u njournal_buckets %u",
229 m
, ca
->sb
.njournal_buckets
);
234 l
= ca
->sb
.njournal_buckets
- 1;
239 if (test_bit(l
, bitmap
))
248 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++)
249 if (ja
->seq
[i
] > seq
) {
252 * When journal_reclaim() goes to allocate for
253 * the first time, it'll use the bucket after
257 ja
->last_idx
= ja
->discard_idx
= (i
+ 1) %
258 ca
->sb
.njournal_buckets
;
263 if (!list_empty(list
))
264 c
->journal
.seq
= list_entry(list
->prev
,
265 struct journal_replay
,
272 void bch_journal_mark(struct cache_set
*c
, struct list_head
*list
)
276 struct journal_replay
*i
;
277 struct journal
*j
= &c
->journal
;
278 uint64_t last
= j
->seq
;
281 * journal.pin should never fill up - we never write a journal
282 * entry when it would fill up. But if for some reason it does, we
283 * iterate over the list in reverse order so that we can just skip that
284 * refcount instead of bugging.
287 list_for_each_entry_reverse(i
, list
, list
) {
288 BUG_ON(last
< i
->j
.seq
);
291 while (last
-- != i
->j
.seq
)
292 if (fifo_free(&j
->pin
) > 1) {
293 fifo_push_front(&j
->pin
, p
);
294 atomic_set(&fifo_front(&j
->pin
), 0);
297 if (fifo_free(&j
->pin
) > 1) {
298 fifo_push_front(&j
->pin
, p
);
299 i
->pin
= &fifo_front(&j
->pin
);
300 atomic_set(i
->pin
, 1);
304 k
< bset_bkey_last(&i
->j
);
306 if (!__bch_extent_invalid(c
, k
)) {
309 for (j
= 0; j
< KEY_PTRS(k
); j
++)
310 if (ptr_available(c
, k
, j
))
311 atomic_inc(&PTR_BUCKET(c
, k
, j
)->pin
);
313 bch_initial_mark_key(c
, 0, k
);
318 int bch_journal_replay(struct cache_set
*s
, struct list_head
*list
)
320 int ret
= 0, keys
= 0, entries
= 0;
322 struct journal_replay
*i
=
323 list_entry(list
->prev
, struct journal_replay
, list
);
325 uint64_t start
= i
->j
.last_seq
, end
= i
->j
.seq
, n
= start
;
326 struct keylist keylist
;
328 list_for_each_entry(i
, list
, list
) {
329 BUG_ON(i
->pin
&& atomic_read(i
->pin
) != 1);
331 cache_set_err_on(n
!= i
->j
.seq
, s
,
332 "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
333 n
, i
->j
.seq
- 1, start
, end
);
336 k
< bset_bkey_last(&i
->j
);
338 trace_bcache_journal_replay_key(k
);
340 bch_keylist_init_single(&keylist
, k
);
342 ret
= bch_btree_insert(s
, &keylist
, i
->pin
, NULL
);
346 BUG_ON(!bch_keylist_empty(&keylist
));
358 pr_info("journal replay done, %i keys in %i entries, seq %llu",
361 while (!list_empty(list
)) {
362 i
= list_first_entry(list
, struct journal_replay
, list
);
371 #define journal_max_cmp(l, r) \
372 (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
373 fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
374 #define journal_min_cmp(l, r) \
375 (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
376 fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
378 static void btree_flush_write(struct cache_set
*c
)
381 * Try to find the btree node with that references the oldest journal
382 * entry, best is our current candidate and is locked if non NULL:
387 atomic_long_inc(&c
->flush_write
);
390 spin_lock(&c
->journal
.lock
);
391 if (heap_empty(&c
->flush_btree
)) {
392 for_each_cached_btree(b
, c
, i
)
393 if (btree_current_write(b
)->journal
) {
394 if (!heap_full(&c
->flush_btree
))
395 heap_add(&c
->flush_btree
, b
,
397 else if (journal_max_cmp(b
,
398 heap_peek(&c
->flush_btree
))) {
399 c
->flush_btree
.data
[0] = b
;
400 heap_sift(&c
->flush_btree
, 0,
405 for (i
= c
->flush_btree
.used
/ 2 - 1; i
>= 0; --i
)
406 heap_sift(&c
->flush_btree
, i
, journal_min_cmp
);
410 heap_pop(&c
->flush_btree
, b
, journal_min_cmp
);
411 spin_unlock(&c
->journal
.lock
);
414 mutex_lock(&b
->write_lock
);
415 if (!btree_current_write(b
)->journal
) {
416 mutex_unlock(&b
->write_lock
);
418 atomic_long_inc(&c
->retry_flush_write
);
422 __bch_btree_node_write(b
, NULL
);
423 mutex_unlock(&b
->write_lock
);
427 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
429 static void journal_discard_endio(struct bio
*bio
)
431 struct journal_device
*ja
=
432 container_of(bio
, struct journal_device
, discard_bio
);
433 struct cache
*ca
= container_of(ja
, struct cache
, journal
);
435 atomic_set(&ja
->discard_in_flight
, DISCARD_DONE
);
437 closure_wake_up(&ca
->set
->journal
.wait
);
438 closure_put(&ca
->set
->cl
);
441 static void journal_discard_work(struct work_struct
*work
)
443 struct journal_device
*ja
=
444 container_of(work
, struct journal_device
, discard_work
);
446 submit_bio(&ja
->discard_bio
);
449 static void do_journal_discard(struct cache
*ca
)
451 struct journal_device
*ja
= &ca
->journal
;
452 struct bio
*bio
= &ja
->discard_bio
;
455 ja
->discard_idx
= ja
->last_idx
;
459 switch (atomic_read(&ja
->discard_in_flight
)) {
460 case DISCARD_IN_FLIGHT
:
464 ja
->discard_idx
= (ja
->discard_idx
+ 1) %
465 ca
->sb
.njournal_buckets
;
467 atomic_set(&ja
->discard_in_flight
, DISCARD_READY
);
471 if (ja
->discard_idx
== ja
->last_idx
)
474 atomic_set(&ja
->discard_in_flight
, DISCARD_IN_FLIGHT
);
476 bio_init(bio
, bio
->bi_inline_vecs
, 1);
477 bio_set_op_attrs(bio
, REQ_OP_DISCARD
, 0);
478 bio
->bi_iter
.bi_sector
= bucket_to_sector(ca
->set
,
479 ca
->sb
.d
[ja
->discard_idx
]);
480 bio_set_dev(bio
, ca
->bdev
);
481 bio
->bi_iter
.bi_size
= bucket_bytes(ca
);
482 bio
->bi_end_io
= journal_discard_endio
;
484 closure_get(&ca
->set
->cl
);
485 INIT_WORK(&ja
->discard_work
, journal_discard_work
);
486 schedule_work(&ja
->discard_work
);
490 static void journal_reclaim(struct cache_set
*c
)
492 struct bkey
*k
= &c
->journal
.key
;
495 unsigned iter
, n
= 0;
496 atomic_t p __maybe_unused
;
498 atomic_long_inc(&c
->reclaim
);
500 while (!atomic_read(&fifo_front(&c
->journal
.pin
)))
501 fifo_pop(&c
->journal
.pin
, p
);
503 last_seq
= last_seq(&c
->journal
);
505 /* Update last_idx */
507 for_each_cache(ca
, c
, iter
) {
508 struct journal_device
*ja
= &ca
->journal
;
510 while (ja
->last_idx
!= ja
->cur_idx
&&
511 ja
->seq
[ja
->last_idx
] < last_seq
)
512 ja
->last_idx
= (ja
->last_idx
+ 1) %
513 ca
->sb
.njournal_buckets
;
516 for_each_cache(ca
, c
, iter
)
517 do_journal_discard(ca
);
519 if (c
->journal
.blocks_free
)
524 * XXX: Sort by free journal space
527 for_each_cache(ca
, c
, iter
) {
528 struct journal_device
*ja
= &ca
->journal
;
529 unsigned next
= (ja
->cur_idx
+ 1) % ca
->sb
.njournal_buckets
;
531 /* No space available on this device */
532 if (next
== ja
->discard_idx
)
536 k
->ptr
[n
++] = MAKE_PTR(0,
537 bucket_to_sector(c
, ca
->sb
.d
[ja
->cur_idx
]),
545 c
->journal
.blocks_free
= c
->sb
.bucket_size
>> c
->block_bits
;
547 if (!journal_full(&c
->journal
))
548 __closure_wake_up(&c
->journal
.wait
);
551 void bch_journal_next(struct journal
*j
)
555 j
->cur
= (j
->cur
== j
->w
)
560 * The fifo_push() needs to happen at the same time as j->seq is
561 * incremented for last_seq() to be calculated correctly
563 BUG_ON(!fifo_push(&j
->pin
, p
));
564 atomic_set(&fifo_back(&j
->pin
), 1);
566 j
->cur
->data
->seq
= ++j
->seq
;
567 j
->cur
->dirty
= false;
568 j
->cur
->need_write
= false;
569 j
->cur
->data
->keys
= 0;
571 if (fifo_full(&j
->pin
))
572 pr_debug("journal_pin full (%zu)", fifo_used(&j
->pin
));
575 static void journal_write_endio(struct bio
*bio
)
577 struct journal_write
*w
= bio
->bi_private
;
579 cache_set_err_on(bio
->bi_status
, w
->c
, "journal io error");
580 closure_put(&w
->c
->journal
.io
);
583 static void journal_write(struct closure
*);
585 static void journal_write_done(struct closure
*cl
)
587 struct journal
*j
= container_of(cl
, struct journal
, io
);
588 struct journal_write
*w
= (j
->cur
== j
->w
)
592 __closure_wake_up(&w
->wait
);
593 continue_at_nobarrier(cl
, journal_write
, system_wq
);
596 static void journal_write_unlock(struct closure
*cl
)
597 __releases(&c
->journal
.lock
)
599 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
601 c
->journal
.io_in_flight
= 0;
602 spin_unlock(&c
->journal
.lock
);
605 static void journal_write_unlocked(struct closure
*cl
)
606 __releases(c
->journal
.lock
)
608 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
610 struct journal_write
*w
= c
->journal
.cur
;
611 struct bkey
*k
= &c
->journal
.key
;
612 unsigned i
, sectors
= set_blocks(w
->data
, block_bytes(c
)) *
616 struct bio_list list
;
617 bio_list_init(&list
);
619 if (!w
->need_write
) {
620 closure_return_with_destructor(cl
, journal_write_unlock
);
622 } else if (journal_full(&c
->journal
)) {
624 spin_unlock(&c
->journal
.lock
);
626 btree_flush_write(c
);
627 continue_at(cl
, journal_write
, system_wq
);
631 c
->journal
.blocks_free
-= set_blocks(w
->data
, block_bytes(c
));
633 w
->data
->btree_level
= c
->root
->level
;
635 bkey_copy(&w
->data
->btree_root
, &c
->root
->key
);
636 bkey_copy(&w
->data
->uuid_bucket
, &c
->uuid_bucket
);
638 for_each_cache(ca
, c
, i
)
639 w
->data
->prio_bucket
[ca
->sb
.nr_this_dev
] = ca
->prio_buckets
[0];
641 w
->data
->magic
= jset_magic(&c
->sb
);
642 w
->data
->version
= BCACHE_JSET_VERSION
;
643 w
->data
->last_seq
= last_seq(&c
->journal
);
644 w
->data
->csum
= csum_set(w
->data
);
646 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
647 ca
= PTR_CACHE(c
, k
, i
);
648 bio
= &ca
->journal
.bio
;
650 atomic_long_add(sectors
, &ca
->meta_sectors_written
);
653 bio
->bi_iter
.bi_sector
= PTR_OFFSET(k
, i
);
654 bio_set_dev(bio
, ca
->bdev
);
655 bio
->bi_iter
.bi_size
= sectors
<< 9;
657 bio
->bi_end_io
= journal_write_endio
;
659 bio_set_op_attrs(bio
, REQ_OP_WRITE
,
660 REQ_SYNC
|REQ_META
|REQ_PREFLUSH
|REQ_FUA
);
661 bch_bio_map(bio
, w
->data
);
663 trace_bcache_journal_write(bio
);
664 bio_list_add(&list
, bio
);
666 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + sectors
);
668 ca
->journal
.seq
[ca
->journal
.cur_idx
] = w
->data
->seq
;
671 atomic_dec_bug(&fifo_back(&c
->journal
.pin
));
672 bch_journal_next(&c
->journal
);
675 spin_unlock(&c
->journal
.lock
);
677 while ((bio
= bio_list_pop(&list
)))
678 closure_bio_submit(c
, bio
, cl
);
680 continue_at(cl
, journal_write_done
, NULL
);
683 static void journal_write(struct closure
*cl
)
685 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
687 spin_lock(&c
->journal
.lock
);
688 journal_write_unlocked(cl
);
691 static void journal_try_write(struct cache_set
*c
)
692 __releases(c
->journal
.lock
)
694 struct closure
*cl
= &c
->journal
.io
;
695 struct journal_write
*w
= c
->journal
.cur
;
697 w
->need_write
= true;
699 if (!c
->journal
.io_in_flight
) {
700 c
->journal
.io_in_flight
= 1;
701 closure_call(cl
, journal_write_unlocked
, NULL
, &c
->cl
);
703 spin_unlock(&c
->journal
.lock
);
707 static struct journal_write
*journal_wait_for_write(struct cache_set
*c
,
709 __acquires(&c
->journal
.lock
)
715 closure_init_stack(&cl
);
717 spin_lock(&c
->journal
.lock
);
720 struct journal_write
*w
= c
->journal
.cur
;
722 sectors
= __set_blocks(w
->data
, w
->data
->keys
+ nkeys
,
723 block_bytes(c
)) * c
->sb
.block_size
;
725 if (sectors
<= min_t(size_t,
726 c
->journal
.blocks_free
* c
->sb
.block_size
,
727 PAGE_SECTORS
<< JSET_BITS
))
731 closure_wait(&c
->journal
.wait
, &cl
);
733 if (!journal_full(&c
->journal
)) {
735 trace_bcache_journal_entry_full(c
);
738 * XXX: If we were inserting so many keys that they
739 * won't fit in an _empty_ journal write, we'll
740 * deadlock. For now, handle this in
741 * bch_keylist_realloc() - but something to think about.
743 BUG_ON(!w
->data
->keys
);
745 journal_try_write(c
); /* unlocks */
748 trace_bcache_journal_full(c
);
751 spin_unlock(&c
->journal
.lock
);
753 btree_flush_write(c
);
757 spin_lock(&c
->journal
.lock
);
762 static void journal_write_work(struct work_struct
*work
)
764 struct cache_set
*c
= container_of(to_delayed_work(work
),
767 spin_lock(&c
->journal
.lock
);
768 if (c
->journal
.cur
->dirty
)
769 journal_try_write(c
);
771 spin_unlock(&c
->journal
.lock
);
775 * Entry point to the journalling code - bio_insert() and btree_invalidate()
776 * pass bch_journal() a list of keys to be journalled, and then
777 * bch_journal() hands those same keys off to btree_insert_async()
780 atomic_t
*bch_journal(struct cache_set
*c
,
781 struct keylist
*keys
,
782 struct closure
*parent
)
784 struct journal_write
*w
;
787 if (!CACHE_SYNC(&c
->sb
))
790 w
= journal_wait_for_write(c
, bch_keylist_nkeys(keys
));
792 memcpy(bset_bkey_last(w
->data
), keys
->keys
, bch_keylist_bytes(keys
));
793 w
->data
->keys
+= bch_keylist_nkeys(keys
);
795 ret
= &fifo_back(&c
->journal
.pin
);
799 closure_wait(&w
->wait
, parent
);
800 journal_try_write(c
);
801 } else if (!w
->dirty
) {
803 schedule_delayed_work(&c
->journal
.work
,
804 msecs_to_jiffies(c
->journal_delay_ms
));
805 spin_unlock(&c
->journal
.lock
);
807 spin_unlock(&c
->journal
.lock
);
814 void bch_journal_meta(struct cache_set
*c
, struct closure
*cl
)
819 bch_keylist_init(&keys
);
821 ref
= bch_journal(c
, &keys
, cl
);
826 void bch_journal_free(struct cache_set
*c
)
828 free_pages((unsigned long) c
->journal
.w
[1].data
, JSET_BITS
);
829 free_pages((unsigned long) c
->journal
.w
[0].data
, JSET_BITS
);
830 free_fifo(&c
->journal
.pin
);
833 int bch_journal_alloc(struct cache_set
*c
)
835 struct journal
*j
= &c
->journal
;
837 spin_lock_init(&j
->lock
);
838 INIT_DELAYED_WORK(&j
->work
, journal_write_work
);
840 c
->journal_delay_ms
= 100;
845 if (!(init_heap(&c
->flush_btree
, 128, GFP_KERNEL
)) ||
846 !(init_fifo(&j
->pin
, JOURNAL_PIN
, GFP_KERNEL
)) ||
847 !(j
->w
[0].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)) ||
848 !(j
->w
[1].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)))