1 // SPDX-License-Identifier: GPL-2.0
3 * bcache journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
13 #include <trace/events/bcache.h>
16 * Journal replay/recovery:
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
28 static void journal_read_endio(struct bio
*bio
)
30 struct closure
*cl
= bio
->bi_private
;
34 static int journal_read_bucket(struct cache
*ca
, struct list_head
*list
,
35 unsigned bucket_index
)
37 struct journal_device
*ja
= &ca
->journal
;
38 struct bio
*bio
= &ja
->bio
;
40 struct journal_replay
*i
;
41 struct jset
*j
, *data
= ca
->set
->journal
.w
[0].data
;
43 unsigned len
, left
, offset
= 0;
45 sector_t bucket
= bucket_to_sector(ca
->set
, ca
->sb
.d
[bucket_index
]);
47 closure_init_stack(&cl
);
49 pr_debug("reading %u", bucket_index
);
51 while (offset
< ca
->sb
.bucket_size
) {
52 reread
: left
= ca
->sb
.bucket_size
- offset
;
53 len
= min_t(unsigned, left
, PAGE_SECTORS
<< JSET_BITS
);
56 bio
->bi_iter
.bi_sector
= bucket
+ offset
;
57 bio_set_dev(bio
, ca
->bdev
);
58 bio
->bi_iter
.bi_size
= len
<< 9;
60 bio
->bi_end_io
= journal_read_endio
;
61 bio
->bi_private
= &cl
;
62 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
63 bch_bio_map(bio
, data
);
65 closure_bio_submit(bio
, &cl
);
68 /* This function could be simpler now since we no longer write
69 * journal entries that overlap bucket boundaries; this means
70 * the start of a bucket will always have a valid journal entry
71 * if it has any journal entries at all.
76 struct list_head
*where
;
77 size_t blocks
, bytes
= set_bytes(j
);
79 if (j
->magic
!= jset_magic(&ca
->sb
)) {
80 pr_debug("%u: bad magic", bucket_index
);
84 if (bytes
> left
<< 9 ||
85 bytes
> PAGE_SIZE
<< JSET_BITS
) {
86 pr_info("%u: too big, %zu bytes, offset %u",
87 bucket_index
, bytes
, offset
);
94 if (j
->csum
!= csum_set(j
)) {
95 pr_info("%u: bad csum, %zu bytes, offset %u",
96 bucket_index
, bytes
, offset
);
100 blocks
= set_blocks(j
, block_bytes(ca
->set
));
102 while (!list_empty(list
)) {
103 i
= list_first_entry(list
,
104 struct journal_replay
, list
);
105 if (i
->j
.seq
>= j
->last_seq
)
111 list_for_each_entry_reverse(i
, list
, list
) {
112 if (j
->seq
== i
->j
.seq
)
115 if (j
->seq
< i
->j
.last_seq
)
118 if (j
->seq
> i
->j
.seq
) {
126 i
= kmalloc(offsetof(struct journal_replay
, j
) +
130 memcpy(&i
->j
, j
, bytes
);
131 list_add(&i
->list
, where
);
134 ja
->seq
[bucket_index
] = j
->seq
;
136 offset
+= blocks
* ca
->sb
.block_size
;
137 len
-= blocks
* ca
->sb
.block_size
;
138 j
= ((void *) j
) + blocks
* block_bytes(ca
);
145 int bch_journal_read(struct cache_set
*c
, struct list_head
*list
)
147 #define read_bucket(b) \
149 int ret = journal_read_bucket(ca, list, b); \
150 __set_bit(b, bitmap); \
159 for_each_cache(ca
, c
, iter
) {
160 struct journal_device
*ja
= &ca
->journal
;
161 DECLARE_BITMAP(bitmap
, SB_JOURNAL_BUCKETS
);
165 bitmap_zero(bitmap
, SB_JOURNAL_BUCKETS
);
166 pr_debug("%u journal buckets", ca
->sb
.njournal_buckets
);
169 * Read journal buckets ordered by golden ratio hash to quickly
170 * find a sequence of buckets with valid journal entries
172 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++) {
174 * We must try the index l with ZERO first for
175 * correctness due to the scenario that the journal
176 * bucket is circular buffer which might have wrapped
178 l
= (i
* 2654435769U) % ca
->sb
.njournal_buckets
;
180 if (test_bit(l
, bitmap
))
188 * If that fails, check all the buckets we haven't checked
191 pr_debug("falling back to linear search");
193 for (l
= find_first_zero_bit(bitmap
, ca
->sb
.njournal_buckets
);
194 l
< ca
->sb
.njournal_buckets
;
195 l
= find_next_zero_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1))
199 /* no journal entries on this device? */
200 if (l
== ca
->sb
.njournal_buckets
)
203 BUG_ON(list_empty(list
));
207 r
= find_next_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1);
208 pr_debug("starting binary search, l %u r %u", l
, r
);
211 seq
= list_entry(list
->prev
, struct journal_replay
,
217 if (seq
!= list_entry(list
->prev
, struct journal_replay
,
225 * Read buckets in reverse order until we stop finding more
228 pr_debug("finishing up: m %u njournal_buckets %u",
229 m
, ca
->sb
.njournal_buckets
);
234 l
= ca
->sb
.njournal_buckets
- 1;
239 if (test_bit(l
, bitmap
))
248 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++)
249 if (ja
->seq
[i
] > seq
) {
252 * When journal_reclaim() goes to allocate for
253 * the first time, it'll use the bucket after
257 ja
->last_idx
= ja
->discard_idx
= (i
+ 1) %
258 ca
->sb
.njournal_buckets
;
263 if (!list_empty(list
))
264 c
->journal
.seq
= list_entry(list
->prev
,
265 struct journal_replay
,
272 void bch_journal_mark(struct cache_set
*c
, struct list_head
*list
)
276 struct journal_replay
*i
;
277 struct journal
*j
= &c
->journal
;
278 uint64_t last
= j
->seq
;
281 * journal.pin should never fill up - we never write a journal
282 * entry when it would fill up. But if for some reason it does, we
283 * iterate over the list in reverse order so that we can just skip that
284 * refcount instead of bugging.
287 list_for_each_entry_reverse(i
, list
, list
) {
288 BUG_ON(last
< i
->j
.seq
);
291 while (last
-- != i
->j
.seq
)
292 if (fifo_free(&j
->pin
) > 1) {
293 fifo_push_front(&j
->pin
, p
);
294 atomic_set(&fifo_front(&j
->pin
), 0);
297 if (fifo_free(&j
->pin
) > 1) {
298 fifo_push_front(&j
->pin
, p
);
299 i
->pin
= &fifo_front(&j
->pin
);
300 atomic_set(i
->pin
, 1);
304 k
< bset_bkey_last(&i
->j
);
306 if (!__bch_extent_invalid(c
, k
)) {
309 for (j
= 0; j
< KEY_PTRS(k
); j
++)
310 if (ptr_available(c
, k
, j
))
311 atomic_inc(&PTR_BUCKET(c
, k
, j
)->pin
);
313 bch_initial_mark_key(c
, 0, k
);
318 int bch_journal_replay(struct cache_set
*s
, struct list_head
*list
)
320 int ret
= 0, keys
= 0, entries
= 0;
322 struct journal_replay
*i
=
323 list_entry(list
->prev
, struct journal_replay
, list
);
325 uint64_t start
= i
->j
.last_seq
, end
= i
->j
.seq
, n
= start
;
326 struct keylist keylist
;
328 list_for_each_entry(i
, list
, list
) {
329 BUG_ON(i
->pin
&& atomic_read(i
->pin
) != 1);
331 cache_set_err_on(n
!= i
->j
.seq
, s
,
332 "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
333 n
, i
->j
.seq
- 1, start
, end
);
336 k
< bset_bkey_last(&i
->j
);
338 trace_bcache_journal_replay_key(k
);
340 bch_keylist_init_single(&keylist
, k
);
342 ret
= bch_btree_insert(s
, &keylist
, i
->pin
, NULL
);
346 BUG_ON(!bch_keylist_empty(&keylist
));
358 pr_info("journal replay done, %i keys in %i entries, seq %llu",
361 while (!list_empty(list
)) {
362 i
= list_first_entry(list
, struct journal_replay
, list
);
371 #define journal_max_cmp(l, r) \
372 (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
373 fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
374 #define journal_min_cmp(l, r) \
375 (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
376 fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
378 static void btree_flush_write(struct cache_set
*c
)
381 * Try to find the btree node with that references the oldest journal
382 * entry, best is our current candidate and is locked if non NULL:
387 atomic_long_inc(&c
->flush_write
);
390 spin_lock(&c
->journal
.lock
);
391 if (heap_empty(&c
->flush_btree
)) {
392 for_each_cached_btree(b
, c
, i
)
393 if (btree_current_write(b
)->journal
) {
394 if (!heap_full(&c
->flush_btree
))
395 heap_add(&c
->flush_btree
, b
,
397 else if (journal_max_cmp(b
,
398 heap_peek(&c
->flush_btree
))) {
399 c
->flush_btree
.data
[0] = b
;
400 heap_sift(&c
->flush_btree
, 0,
405 for (i
= c
->flush_btree
.used
/ 2 - 1; i
>= 0; --i
)
406 heap_sift(&c
->flush_btree
, i
, journal_min_cmp
);
410 heap_pop(&c
->flush_btree
, b
, journal_min_cmp
);
411 spin_unlock(&c
->journal
.lock
);
414 mutex_lock(&b
->write_lock
);
415 if (!btree_current_write(b
)->journal
) {
416 mutex_unlock(&b
->write_lock
);
418 atomic_long_inc(&c
->retry_flush_write
);
422 __bch_btree_node_write(b
, NULL
);
423 mutex_unlock(&b
->write_lock
);
427 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
429 static void journal_discard_endio(struct bio
*bio
)
431 struct journal_device
*ja
=
432 container_of(bio
, struct journal_device
, discard_bio
);
433 struct cache
*ca
= container_of(ja
, struct cache
, journal
);
435 atomic_set(&ja
->discard_in_flight
, DISCARD_DONE
);
437 closure_wake_up(&ca
->set
->journal
.wait
);
438 closure_put(&ca
->set
->cl
);
441 static void journal_discard_work(struct work_struct
*work
)
443 struct journal_device
*ja
=
444 container_of(work
, struct journal_device
, discard_work
);
446 submit_bio(&ja
->discard_bio
);
449 static void do_journal_discard(struct cache
*ca
)
451 struct journal_device
*ja
= &ca
->journal
;
452 struct bio
*bio
= &ja
->discard_bio
;
455 ja
->discard_idx
= ja
->last_idx
;
459 switch (atomic_read(&ja
->discard_in_flight
)) {
460 case DISCARD_IN_FLIGHT
:
464 ja
->discard_idx
= (ja
->discard_idx
+ 1) %
465 ca
->sb
.njournal_buckets
;
467 atomic_set(&ja
->discard_in_flight
, DISCARD_READY
);
471 if (ja
->discard_idx
== ja
->last_idx
)
474 atomic_set(&ja
->discard_in_flight
, DISCARD_IN_FLIGHT
);
476 bio_init(bio
, bio
->bi_inline_vecs
, 1);
477 bio_set_op_attrs(bio
, REQ_OP_DISCARD
, 0);
478 bio
->bi_iter
.bi_sector
= bucket_to_sector(ca
->set
,
479 ca
->sb
.d
[ja
->discard_idx
]);
480 bio_set_dev(bio
, ca
->bdev
);
481 bio
->bi_iter
.bi_size
= bucket_bytes(ca
);
482 bio
->bi_end_io
= journal_discard_endio
;
484 closure_get(&ca
->set
->cl
);
485 INIT_WORK(&ja
->discard_work
, journal_discard_work
);
486 schedule_work(&ja
->discard_work
);
490 static void journal_reclaim(struct cache_set
*c
)
492 struct bkey
*k
= &c
->journal
.key
;
495 unsigned iter
, n
= 0;
498 atomic_long_inc(&c
->reclaim
);
500 while (!atomic_read(&fifo_front(&c
->journal
.pin
)))
501 fifo_pop(&c
->journal
.pin
, p
);
503 last_seq
= last_seq(&c
->journal
);
505 /* Update last_idx */
507 for_each_cache(ca
, c
, iter
) {
508 struct journal_device
*ja
= &ca
->journal
;
510 while (ja
->last_idx
!= ja
->cur_idx
&&
511 ja
->seq
[ja
->last_idx
] < last_seq
)
512 ja
->last_idx
= (ja
->last_idx
+ 1) %
513 ca
->sb
.njournal_buckets
;
516 for_each_cache(ca
, c
, iter
)
517 do_journal_discard(ca
);
519 if (c
->journal
.blocks_free
)
524 * XXX: Sort by free journal space
527 for_each_cache(ca
, c
, iter
) {
528 struct journal_device
*ja
= &ca
->journal
;
529 unsigned next
= (ja
->cur_idx
+ 1) % ca
->sb
.njournal_buckets
;
531 /* No space available on this device */
532 if (next
== ja
->discard_idx
)
536 k
->ptr
[n
++] = MAKE_PTR(0,
537 bucket_to_sector(c
, ca
->sb
.d
[ja
->cur_idx
]),
545 c
->journal
.blocks_free
= c
->sb
.bucket_size
>> c
->block_bits
;
547 if (!journal_full(&c
->journal
))
548 __closure_wake_up(&c
->journal
.wait
);
551 void bch_journal_next(struct journal
*j
)
555 j
->cur
= (j
->cur
== j
->w
)
560 * The fifo_push() needs to happen at the same time as j->seq is
561 * incremented for last_seq() to be calculated correctly
563 BUG_ON(!fifo_push(&j
->pin
, p
));
564 atomic_set(&fifo_back(&j
->pin
), 1);
566 j
->cur
->data
->seq
= ++j
->seq
;
567 j
->cur
->dirty
= false;
568 j
->cur
->need_write
= false;
569 j
->cur
->data
->keys
= 0;
571 if (fifo_full(&j
->pin
))
572 pr_debug("journal_pin full (%zu)", fifo_used(&j
->pin
));
575 static void journal_write_endio(struct bio
*bio
)
577 struct journal_write
*w
= bio
->bi_private
;
579 cache_set_err_on(bio
->bi_status
, w
->c
, "journal io error");
580 closure_put(&w
->c
->journal
.io
);
583 static void journal_write(struct closure
*);
585 static void journal_write_done(struct closure
*cl
)
587 struct journal
*j
= container_of(cl
, struct journal
, io
);
588 struct journal_write
*w
= (j
->cur
== j
->w
)
592 __closure_wake_up(&w
->wait
);
593 continue_at_nobarrier(cl
, journal_write
, system_wq
);
596 static void journal_write_unlock(struct closure
*cl
)
598 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
600 c
->journal
.io_in_flight
= 0;
601 spin_unlock(&c
->journal
.lock
);
604 static void journal_write_unlocked(struct closure
*cl
)
605 __releases(c
->journal
.lock
)
607 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
609 struct journal_write
*w
= c
->journal
.cur
;
610 struct bkey
*k
= &c
->journal
.key
;
611 unsigned i
, sectors
= set_blocks(w
->data
, block_bytes(c
)) *
615 struct bio_list list
;
616 bio_list_init(&list
);
618 if (!w
->need_write
) {
619 closure_return_with_destructor(cl
, journal_write_unlock
);
621 } else if (journal_full(&c
->journal
)) {
623 spin_unlock(&c
->journal
.lock
);
625 btree_flush_write(c
);
626 continue_at(cl
, journal_write
, system_wq
);
630 c
->journal
.blocks_free
-= set_blocks(w
->data
, block_bytes(c
));
632 w
->data
->btree_level
= c
->root
->level
;
634 bkey_copy(&w
->data
->btree_root
, &c
->root
->key
);
635 bkey_copy(&w
->data
->uuid_bucket
, &c
->uuid_bucket
);
637 for_each_cache(ca
, c
, i
)
638 w
->data
->prio_bucket
[ca
->sb
.nr_this_dev
] = ca
->prio_buckets
[0];
640 w
->data
->magic
= jset_magic(&c
->sb
);
641 w
->data
->version
= BCACHE_JSET_VERSION
;
642 w
->data
->last_seq
= last_seq(&c
->journal
);
643 w
->data
->csum
= csum_set(w
->data
);
645 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
646 ca
= PTR_CACHE(c
, k
, i
);
647 bio
= &ca
->journal
.bio
;
649 atomic_long_add(sectors
, &ca
->meta_sectors_written
);
652 bio
->bi_iter
.bi_sector
= PTR_OFFSET(k
, i
);
653 bio_set_dev(bio
, ca
->bdev
);
654 bio
->bi_iter
.bi_size
= sectors
<< 9;
656 bio
->bi_end_io
= journal_write_endio
;
658 bio_set_op_attrs(bio
, REQ_OP_WRITE
,
659 REQ_SYNC
|REQ_META
|REQ_PREFLUSH
|REQ_FUA
);
660 bch_bio_map(bio
, w
->data
);
662 trace_bcache_journal_write(bio
);
663 bio_list_add(&list
, bio
);
665 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + sectors
);
667 ca
->journal
.seq
[ca
->journal
.cur_idx
] = w
->data
->seq
;
670 atomic_dec_bug(&fifo_back(&c
->journal
.pin
));
671 bch_journal_next(&c
->journal
);
674 spin_unlock(&c
->journal
.lock
);
676 while ((bio
= bio_list_pop(&list
)))
677 closure_bio_submit(bio
, cl
);
679 continue_at(cl
, journal_write_done
, NULL
);
682 static void journal_write(struct closure
*cl
)
684 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
686 spin_lock(&c
->journal
.lock
);
687 journal_write_unlocked(cl
);
690 static void journal_try_write(struct cache_set
*c
)
691 __releases(c
->journal
.lock
)
693 struct closure
*cl
= &c
->journal
.io
;
694 struct journal_write
*w
= c
->journal
.cur
;
696 w
->need_write
= true;
698 if (!c
->journal
.io_in_flight
) {
699 c
->journal
.io_in_flight
= 1;
700 closure_call(cl
, journal_write_unlocked
, NULL
, &c
->cl
);
702 spin_unlock(&c
->journal
.lock
);
706 static struct journal_write
*journal_wait_for_write(struct cache_set
*c
,
713 closure_init_stack(&cl
);
715 spin_lock(&c
->journal
.lock
);
718 struct journal_write
*w
= c
->journal
.cur
;
720 sectors
= __set_blocks(w
->data
, w
->data
->keys
+ nkeys
,
721 block_bytes(c
)) * c
->sb
.block_size
;
723 if (sectors
<= min_t(size_t,
724 c
->journal
.blocks_free
* c
->sb
.block_size
,
725 PAGE_SECTORS
<< JSET_BITS
))
729 closure_wait(&c
->journal
.wait
, &cl
);
731 if (!journal_full(&c
->journal
)) {
733 trace_bcache_journal_entry_full(c
);
736 * XXX: If we were inserting so many keys that they
737 * won't fit in an _empty_ journal write, we'll
738 * deadlock. For now, handle this in
739 * bch_keylist_realloc() - but something to think about.
741 BUG_ON(!w
->data
->keys
);
743 journal_try_write(c
); /* unlocks */
746 trace_bcache_journal_full(c
);
749 spin_unlock(&c
->journal
.lock
);
751 btree_flush_write(c
);
755 spin_lock(&c
->journal
.lock
);
760 static void journal_write_work(struct work_struct
*work
)
762 struct cache_set
*c
= container_of(to_delayed_work(work
),
765 spin_lock(&c
->journal
.lock
);
766 if (c
->journal
.cur
->dirty
)
767 journal_try_write(c
);
769 spin_unlock(&c
->journal
.lock
);
773 * Entry point to the journalling code - bio_insert() and btree_invalidate()
774 * pass bch_journal() a list of keys to be journalled, and then
775 * bch_journal() hands those same keys off to btree_insert_async()
778 atomic_t
*bch_journal(struct cache_set
*c
,
779 struct keylist
*keys
,
780 struct closure
*parent
)
782 struct journal_write
*w
;
785 if (!CACHE_SYNC(&c
->sb
))
788 w
= journal_wait_for_write(c
, bch_keylist_nkeys(keys
));
790 memcpy(bset_bkey_last(w
->data
), keys
->keys
, bch_keylist_bytes(keys
));
791 w
->data
->keys
+= bch_keylist_nkeys(keys
);
793 ret
= &fifo_back(&c
->journal
.pin
);
797 closure_wait(&w
->wait
, parent
);
798 journal_try_write(c
);
799 } else if (!w
->dirty
) {
801 schedule_delayed_work(&c
->journal
.work
,
802 msecs_to_jiffies(c
->journal_delay_ms
));
803 spin_unlock(&c
->journal
.lock
);
805 spin_unlock(&c
->journal
.lock
);
812 void bch_journal_meta(struct cache_set
*c
, struct closure
*cl
)
817 bch_keylist_init(&keys
);
819 ref
= bch_journal(c
, &keys
, cl
);
824 void bch_journal_free(struct cache_set
*c
)
826 free_pages((unsigned long) c
->journal
.w
[1].data
, JSET_BITS
);
827 free_pages((unsigned long) c
->journal
.w
[0].data
, JSET_BITS
);
828 free_fifo(&c
->journal
.pin
);
831 int bch_journal_alloc(struct cache_set
*c
)
833 struct journal
*j
= &c
->journal
;
835 spin_lock_init(&j
->lock
);
836 INIT_DELAYED_WORK(&j
->work
, journal_write_work
);
838 c
->journal_delay_ms
= 100;
843 if (!(init_heap(&c
->flush_btree
, 128, GFP_KERNEL
)) ||
844 !(init_fifo(&j
->pin
, JOURNAL_PIN
, GFP_KERNEL
)) ||
845 !(j
->w
[0].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)) ||
846 !(j
->w
[1].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)))