1 // SPDX-License-Identifier: GPL-2.0
3 * bcache journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
13 #include <trace/events/bcache.h>
16 * Journal replay/recovery:
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
28 static void journal_read_endio(struct bio
*bio
)
30 struct closure
*cl
= bio
->bi_private
;
35 static int journal_read_bucket(struct cache
*ca
, struct list_head
*list
,
36 unsigned int bucket_index
)
38 struct journal_device
*ja
= &ca
->journal
;
39 struct bio
*bio
= &ja
->bio
;
41 struct journal_replay
*i
;
42 struct jset
*j
, *data
= ca
->set
->journal
.w
[0].data
;
44 unsigned int len
, left
, offset
= 0;
46 sector_t bucket
= bucket_to_sector(ca
->set
, ca
->sb
.d
[bucket_index
]);
48 closure_init_stack(&cl
);
50 pr_debug("reading %u", bucket_index
);
52 while (offset
< ca
->sb
.bucket_size
) {
53 reread
: left
= ca
->sb
.bucket_size
- offset
;
54 len
= min_t(unsigned int, left
, PAGE_SECTORS
<< JSET_BITS
);
57 bio
->bi_iter
.bi_sector
= bucket
+ offset
;
58 bio_set_dev(bio
, ca
->bdev
);
59 bio
->bi_iter
.bi_size
= len
<< 9;
61 bio
->bi_end_io
= journal_read_endio
;
62 bio
->bi_private
= &cl
;
63 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
64 bch_bio_map(bio
, data
);
66 closure_bio_submit(ca
->set
, bio
, &cl
);
69 /* This function could be simpler now since we no longer write
70 * journal entries that overlap bucket boundaries; this means
71 * the start of a bucket will always have a valid journal entry
72 * if it has any journal entries at all.
77 struct list_head
*where
;
78 size_t blocks
, bytes
= set_bytes(j
);
80 if (j
->magic
!= jset_magic(&ca
->sb
)) {
81 pr_debug("%u: bad magic", bucket_index
);
85 if (bytes
> left
<< 9 ||
86 bytes
> PAGE_SIZE
<< JSET_BITS
) {
87 pr_info("%u: too big, %zu bytes, offset %u",
88 bucket_index
, bytes
, offset
);
95 if (j
->csum
!= csum_set(j
)) {
96 pr_info("%u: bad csum, %zu bytes, offset %u",
97 bucket_index
, bytes
, offset
);
101 blocks
= set_blocks(j
, block_bytes(ca
->set
));
103 while (!list_empty(list
)) {
104 i
= list_first_entry(list
,
105 struct journal_replay
, list
);
106 if (i
->j
.seq
>= j
->last_seq
)
112 list_for_each_entry_reverse(i
, list
, list
) {
113 if (j
->seq
== i
->j
.seq
)
116 if (j
->seq
< i
->j
.last_seq
)
119 if (j
->seq
> i
->j
.seq
) {
127 i
= kmalloc(offsetof(struct journal_replay
, j
) +
131 memcpy(&i
->j
, j
, bytes
);
132 list_add(&i
->list
, where
);
135 ja
->seq
[bucket_index
] = j
->seq
;
137 offset
+= blocks
* ca
->sb
.block_size
;
138 len
-= blocks
* ca
->sb
.block_size
;
139 j
= ((void *) j
) + blocks
* block_bytes(ca
);
146 int bch_journal_read(struct cache_set
*c
, struct list_head
*list
)
148 #define read_bucket(b) \
150 int ret = journal_read_bucket(ca, list, b); \
151 __set_bit(b, bitmap); \
160 for_each_cache(ca
, c
, iter
) {
161 struct journal_device
*ja
= &ca
->journal
;
162 DECLARE_BITMAP(bitmap
, SB_JOURNAL_BUCKETS
);
163 unsigned int i
, l
, r
, m
;
166 bitmap_zero(bitmap
, SB_JOURNAL_BUCKETS
);
167 pr_debug("%u journal buckets", ca
->sb
.njournal_buckets
);
170 * Read journal buckets ordered by golden ratio hash to quickly
171 * find a sequence of buckets with valid journal entries
173 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++) {
175 * We must try the index l with ZERO first for
176 * correctness due to the scenario that the journal
177 * bucket is circular buffer which might have wrapped
179 l
= (i
* 2654435769U) % ca
->sb
.njournal_buckets
;
181 if (test_bit(l
, bitmap
))
189 * If that fails, check all the buckets we haven't checked
192 pr_debug("falling back to linear search");
194 for (l
= find_first_zero_bit(bitmap
, ca
->sb
.njournal_buckets
);
195 l
< ca
->sb
.njournal_buckets
;
196 l
= find_next_zero_bit(bitmap
, ca
->sb
.njournal_buckets
,
201 /* no journal entries on this device? */
202 if (l
== ca
->sb
.njournal_buckets
)
205 BUG_ON(list_empty(list
));
209 r
= find_next_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1);
210 pr_debug("starting binary search, l %u r %u", l
, r
);
213 seq
= list_entry(list
->prev
, struct journal_replay
,
219 if (seq
!= list_entry(list
->prev
, struct journal_replay
,
227 * Read buckets in reverse order until we stop finding more
230 pr_debug("finishing up: m %u njournal_buckets %u",
231 m
, ca
->sb
.njournal_buckets
);
236 l
= ca
->sb
.njournal_buckets
- 1;
241 if (test_bit(l
, bitmap
))
250 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++)
251 if (ja
->seq
[i
] > seq
) {
254 * When journal_reclaim() goes to allocate for
255 * the first time, it'll use the bucket after
259 ja
->last_idx
= ja
->discard_idx
= (i
+ 1) %
260 ca
->sb
.njournal_buckets
;
265 if (!list_empty(list
))
266 c
->journal
.seq
= list_entry(list
->prev
,
267 struct journal_replay
,
274 void bch_journal_mark(struct cache_set
*c
, struct list_head
*list
)
278 struct journal_replay
*i
;
279 struct journal
*j
= &c
->journal
;
280 uint64_t last
= j
->seq
;
283 * journal.pin should never fill up - we never write a journal
284 * entry when it would fill up. But if for some reason it does, we
285 * iterate over the list in reverse order so that we can just skip that
286 * refcount instead of bugging.
289 list_for_each_entry_reverse(i
, list
, list
) {
290 BUG_ON(last
< i
->j
.seq
);
293 while (last
-- != i
->j
.seq
)
294 if (fifo_free(&j
->pin
) > 1) {
295 fifo_push_front(&j
->pin
, p
);
296 atomic_set(&fifo_front(&j
->pin
), 0);
299 if (fifo_free(&j
->pin
) > 1) {
300 fifo_push_front(&j
->pin
, p
);
301 i
->pin
= &fifo_front(&j
->pin
);
302 atomic_set(i
->pin
, 1);
306 k
< bset_bkey_last(&i
->j
);
308 if (!__bch_extent_invalid(c
, k
)) {
311 for (j
= 0; j
< KEY_PTRS(k
); j
++)
312 if (ptr_available(c
, k
, j
))
313 atomic_inc(&PTR_BUCKET(c
, k
, j
)->pin
);
315 bch_initial_mark_key(c
, 0, k
);
320 bool is_discard_enabled(struct cache_set
*s
)
325 for_each_cache(ca
, s
, i
)
332 int bch_journal_replay(struct cache_set
*s
, struct list_head
*list
)
334 int ret
= 0, keys
= 0, entries
= 0;
336 struct journal_replay
*i
=
337 list_entry(list
->prev
, struct journal_replay
, list
);
339 uint64_t start
= i
->j
.last_seq
, end
= i
->j
.seq
, n
= start
;
340 struct keylist keylist
;
342 list_for_each_entry(i
, list
, list
) {
343 BUG_ON(i
->pin
&& atomic_read(i
->pin
) != 1);
346 if (n
== start
&& is_discard_enabled(s
))
347 pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
348 n
, i
->j
.seq
- 1, start
, end
);
350 pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
351 n
, i
->j
.seq
- 1, start
, end
);
358 k
< bset_bkey_last(&i
->j
);
360 trace_bcache_journal_replay_key(k
);
362 bch_keylist_init_single(&keylist
, k
);
364 ret
= bch_btree_insert(s
, &keylist
, i
->pin
, NULL
);
368 BUG_ON(!bch_keylist_empty(&keylist
));
380 pr_info("journal replay done, %i keys in %i entries, seq %llu",
383 while (!list_empty(list
)) {
384 i
= list_first_entry(list
, struct journal_replay
, list
);
394 static void btree_flush_write(struct cache_set
*c
)
397 * Try to find the btree node with that references the oldest journal
398 * entry, best is our current candidate and is locked if non NULL:
400 struct btree
*b
, *best
;
403 atomic_long_inc(&c
->flush_write
);
407 mutex_lock(&c
->bucket_lock
);
408 for_each_cached_btree(b
, c
, i
)
409 if (btree_current_write(b
)->journal
) {
412 else if (journal_pin_cmp(c
,
413 btree_current_write(best
)->journal
,
414 btree_current_write(b
)->journal
)) {
421 set_btree_node_journal_flush(b
);
422 mutex_unlock(&c
->bucket_lock
);
425 mutex_lock(&b
->write_lock
);
426 if (!btree_current_write(b
)->journal
) {
427 clear_bit(BTREE_NODE_journal_flush
, &b
->flags
);
428 mutex_unlock(&b
->write_lock
);
430 atomic_long_inc(&c
->retry_flush_write
);
434 __bch_btree_node_write(b
, NULL
);
435 clear_bit(BTREE_NODE_journal_flush
, &b
->flags
);
436 mutex_unlock(&b
->write_lock
);
440 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
442 static void journal_discard_endio(struct bio
*bio
)
444 struct journal_device
*ja
=
445 container_of(bio
, struct journal_device
, discard_bio
);
446 struct cache
*ca
= container_of(ja
, struct cache
, journal
);
448 atomic_set(&ja
->discard_in_flight
, DISCARD_DONE
);
450 closure_wake_up(&ca
->set
->journal
.wait
);
451 closure_put(&ca
->set
->cl
);
454 static void journal_discard_work(struct work_struct
*work
)
456 struct journal_device
*ja
=
457 container_of(work
, struct journal_device
, discard_work
);
459 submit_bio(&ja
->discard_bio
);
462 static void do_journal_discard(struct cache
*ca
)
464 struct journal_device
*ja
= &ca
->journal
;
465 struct bio
*bio
= &ja
->discard_bio
;
468 ja
->discard_idx
= ja
->last_idx
;
472 switch (atomic_read(&ja
->discard_in_flight
)) {
473 case DISCARD_IN_FLIGHT
:
477 ja
->discard_idx
= (ja
->discard_idx
+ 1) %
478 ca
->sb
.njournal_buckets
;
480 atomic_set(&ja
->discard_in_flight
, DISCARD_READY
);
484 if (ja
->discard_idx
== ja
->last_idx
)
487 atomic_set(&ja
->discard_in_flight
, DISCARD_IN_FLIGHT
);
489 bio_init(bio
, bio
->bi_inline_vecs
, 1);
490 bio_set_op_attrs(bio
, REQ_OP_DISCARD
, 0);
491 bio
->bi_iter
.bi_sector
= bucket_to_sector(ca
->set
,
492 ca
->sb
.d
[ja
->discard_idx
]);
493 bio_set_dev(bio
, ca
->bdev
);
494 bio
->bi_iter
.bi_size
= bucket_bytes(ca
);
495 bio
->bi_end_io
= journal_discard_endio
;
497 closure_get(&ca
->set
->cl
);
498 INIT_WORK(&ja
->discard_work
, journal_discard_work
);
499 queue_work(bch_journal_wq
, &ja
->discard_work
);
503 static void journal_reclaim(struct cache_set
*c
)
505 struct bkey
*k
= &c
->journal
.key
;
508 unsigned int iter
, n
= 0;
509 atomic_t p __maybe_unused
;
511 atomic_long_inc(&c
->reclaim
);
513 while (!atomic_read(&fifo_front(&c
->journal
.pin
)))
514 fifo_pop(&c
->journal
.pin
, p
);
516 last_seq
= last_seq(&c
->journal
);
518 /* Update last_idx */
520 for_each_cache(ca
, c
, iter
) {
521 struct journal_device
*ja
= &ca
->journal
;
523 while (ja
->last_idx
!= ja
->cur_idx
&&
524 ja
->seq
[ja
->last_idx
] < last_seq
)
525 ja
->last_idx
= (ja
->last_idx
+ 1) %
526 ca
->sb
.njournal_buckets
;
529 for_each_cache(ca
, c
, iter
)
530 do_journal_discard(ca
);
532 if (c
->journal
.blocks_free
)
537 * XXX: Sort by free journal space
540 for_each_cache(ca
, c
, iter
) {
541 struct journal_device
*ja
= &ca
->journal
;
542 unsigned int next
= (ja
->cur_idx
+ 1) % ca
->sb
.njournal_buckets
;
544 /* No space available on this device */
545 if (next
== ja
->discard_idx
)
549 k
->ptr
[n
++] = MAKE_PTR(0,
550 bucket_to_sector(c
, ca
->sb
.d
[ja
->cur_idx
]),
557 c
->journal
.blocks_free
= c
->sb
.bucket_size
>> c
->block_bits
;
560 if (!journal_full(&c
->journal
))
561 __closure_wake_up(&c
->journal
.wait
);
564 void bch_journal_next(struct journal
*j
)
568 j
->cur
= (j
->cur
== j
->w
)
573 * The fifo_push() needs to happen at the same time as j->seq is
574 * incremented for last_seq() to be calculated correctly
576 BUG_ON(!fifo_push(&j
->pin
, p
));
577 atomic_set(&fifo_back(&j
->pin
), 1);
579 j
->cur
->data
->seq
= ++j
->seq
;
580 j
->cur
->dirty
= false;
581 j
->cur
->need_write
= false;
582 j
->cur
->data
->keys
= 0;
584 if (fifo_full(&j
->pin
))
585 pr_debug("journal_pin full (%zu)", fifo_used(&j
->pin
));
588 static void journal_write_endio(struct bio
*bio
)
590 struct journal_write
*w
= bio
->bi_private
;
592 cache_set_err_on(bio
->bi_status
, w
->c
, "journal io error");
593 closure_put(&w
->c
->journal
.io
);
596 static void journal_write(struct closure
*cl
);
598 static void journal_write_done(struct closure
*cl
)
600 struct journal
*j
= container_of(cl
, struct journal
, io
);
601 struct journal_write
*w
= (j
->cur
== j
->w
)
605 __closure_wake_up(&w
->wait
);
606 continue_at_nobarrier(cl
, journal_write
, bch_journal_wq
);
609 static void journal_write_unlock(struct closure
*cl
)
610 __releases(&c
->journal
.lock
)
612 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
614 c
->journal
.io_in_flight
= 0;
615 spin_unlock(&c
->journal
.lock
);
618 static void journal_write_unlocked(struct closure
*cl
)
619 __releases(c
->journal
.lock
)
621 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
623 struct journal_write
*w
= c
->journal
.cur
;
624 struct bkey
*k
= &c
->journal
.key
;
625 unsigned int i
, sectors
= set_blocks(w
->data
, block_bytes(c
)) *
629 struct bio_list list
;
631 bio_list_init(&list
);
633 if (!w
->need_write
) {
634 closure_return_with_destructor(cl
, journal_write_unlock
);
636 } else if (journal_full(&c
->journal
)) {
638 spin_unlock(&c
->journal
.lock
);
640 btree_flush_write(c
);
641 continue_at(cl
, journal_write
, bch_journal_wq
);
645 c
->journal
.blocks_free
-= set_blocks(w
->data
, block_bytes(c
));
647 w
->data
->btree_level
= c
->root
->level
;
649 bkey_copy(&w
->data
->btree_root
, &c
->root
->key
);
650 bkey_copy(&w
->data
->uuid_bucket
, &c
->uuid_bucket
);
652 for_each_cache(ca
, c
, i
)
653 w
->data
->prio_bucket
[ca
->sb
.nr_this_dev
] = ca
->prio_buckets
[0];
655 w
->data
->magic
= jset_magic(&c
->sb
);
656 w
->data
->version
= BCACHE_JSET_VERSION
;
657 w
->data
->last_seq
= last_seq(&c
->journal
);
658 w
->data
->csum
= csum_set(w
->data
);
660 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
661 ca
= PTR_CACHE(c
, k
, i
);
662 bio
= &ca
->journal
.bio
;
664 atomic_long_add(sectors
, &ca
->meta_sectors_written
);
667 bio
->bi_iter
.bi_sector
= PTR_OFFSET(k
, i
);
668 bio_set_dev(bio
, ca
->bdev
);
669 bio
->bi_iter
.bi_size
= sectors
<< 9;
671 bio
->bi_end_io
= journal_write_endio
;
673 bio_set_op_attrs(bio
, REQ_OP_WRITE
,
674 REQ_SYNC
|REQ_META
|REQ_PREFLUSH
|REQ_FUA
);
675 bch_bio_map(bio
, w
->data
);
677 trace_bcache_journal_write(bio
);
678 bio_list_add(&list
, bio
);
680 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + sectors
);
682 ca
->journal
.seq
[ca
->journal
.cur_idx
] = w
->data
->seq
;
685 /* If KEY_PTRS(k) == 0, this jset gets lost in air */
688 atomic_dec_bug(&fifo_back(&c
->journal
.pin
));
689 bch_journal_next(&c
->journal
);
692 spin_unlock(&c
->journal
.lock
);
694 while ((bio
= bio_list_pop(&list
)))
695 closure_bio_submit(c
, bio
, cl
);
697 continue_at(cl
, journal_write_done
, NULL
);
700 static void journal_write(struct closure
*cl
)
702 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
704 spin_lock(&c
->journal
.lock
);
705 journal_write_unlocked(cl
);
708 static void journal_try_write(struct cache_set
*c
)
709 __releases(c
->journal
.lock
)
711 struct closure
*cl
= &c
->journal
.io
;
712 struct journal_write
*w
= c
->journal
.cur
;
714 w
->need_write
= true;
716 if (!c
->journal
.io_in_flight
) {
717 c
->journal
.io_in_flight
= 1;
718 closure_call(cl
, journal_write_unlocked
, NULL
, &c
->cl
);
720 spin_unlock(&c
->journal
.lock
);
724 static struct journal_write
*journal_wait_for_write(struct cache_set
*c
,
726 __acquires(&c
->journal
.lock
)
732 closure_init_stack(&cl
);
734 spin_lock(&c
->journal
.lock
);
737 struct journal_write
*w
= c
->journal
.cur
;
739 sectors
= __set_blocks(w
->data
, w
->data
->keys
+ nkeys
,
740 block_bytes(c
)) * c
->sb
.block_size
;
742 if (sectors
<= min_t(size_t,
743 c
->journal
.blocks_free
* c
->sb
.block_size
,
744 PAGE_SECTORS
<< JSET_BITS
))
748 closure_wait(&c
->journal
.wait
, &cl
);
750 if (!journal_full(&c
->journal
)) {
752 trace_bcache_journal_entry_full(c
);
755 * XXX: If we were inserting so many keys that they
756 * won't fit in an _empty_ journal write, we'll
757 * deadlock. For now, handle this in
758 * bch_keylist_realloc() - but something to think about.
760 BUG_ON(!w
->data
->keys
);
762 journal_try_write(c
); /* unlocks */
765 trace_bcache_journal_full(c
);
768 spin_unlock(&c
->journal
.lock
);
770 btree_flush_write(c
);
774 spin_lock(&c
->journal
.lock
);
779 static void journal_write_work(struct work_struct
*work
)
781 struct cache_set
*c
= container_of(to_delayed_work(work
),
784 spin_lock(&c
->journal
.lock
);
785 if (c
->journal
.cur
->dirty
)
786 journal_try_write(c
);
788 spin_unlock(&c
->journal
.lock
);
792 * Entry point to the journalling code - bio_insert() and btree_invalidate()
793 * pass bch_journal() a list of keys to be journalled, and then
794 * bch_journal() hands those same keys off to btree_insert_async()
797 atomic_t
*bch_journal(struct cache_set
*c
,
798 struct keylist
*keys
,
799 struct closure
*parent
)
801 struct journal_write
*w
;
804 /* No journaling if CACHE_SET_IO_DISABLE set already */
805 if (unlikely(test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
)))
808 if (!CACHE_SYNC(&c
->sb
))
811 w
= journal_wait_for_write(c
, bch_keylist_nkeys(keys
));
813 memcpy(bset_bkey_last(w
->data
), keys
->keys
, bch_keylist_bytes(keys
));
814 w
->data
->keys
+= bch_keylist_nkeys(keys
);
816 ret
= &fifo_back(&c
->journal
.pin
);
820 closure_wait(&w
->wait
, parent
);
821 journal_try_write(c
);
822 } else if (!w
->dirty
) {
824 schedule_delayed_work(&c
->journal
.work
,
825 msecs_to_jiffies(c
->journal_delay_ms
));
826 spin_unlock(&c
->journal
.lock
);
828 spin_unlock(&c
->journal
.lock
);
835 void bch_journal_meta(struct cache_set
*c
, struct closure
*cl
)
840 bch_keylist_init(&keys
);
842 ref
= bch_journal(c
, &keys
, cl
);
847 void bch_journal_free(struct cache_set
*c
)
849 free_pages((unsigned long) c
->journal
.w
[1].data
, JSET_BITS
);
850 free_pages((unsigned long) c
->journal
.w
[0].data
, JSET_BITS
);
851 free_fifo(&c
->journal
.pin
);
854 int bch_journal_alloc(struct cache_set
*c
)
856 struct journal
*j
= &c
->journal
;
858 spin_lock_init(&j
->lock
);
859 INIT_DELAYED_WORK(&j
->work
, journal_write_work
);
861 c
->journal_delay_ms
= 100;
866 if (!(init_fifo(&j
->pin
, JOURNAL_PIN
, GFP_KERNEL
)) ||
867 !(j
->w
[0].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)) ||
868 !(j
->w
[1].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)))