1 // SPDX-License-Identifier: GPL-2.0
3 * bcache journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
13 #include <trace/events/bcache.h>
16 * Journal replay/recovery:
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
28 static void journal_read_endio(struct bio
*bio
)
30 struct closure
*cl
= bio
->bi_private
;
35 static int journal_read_bucket(struct cache
*ca
, struct list_head
*list
,
36 unsigned int bucket_index
)
38 struct journal_device
*ja
= &ca
->journal
;
39 struct bio
*bio
= &ja
->bio
;
41 struct journal_replay
*i
;
42 struct jset
*j
, *data
= ca
->set
->journal
.w
[0].data
;
44 unsigned int len
, left
, offset
= 0;
46 sector_t bucket
= bucket_to_sector(ca
->set
, ca
->sb
.d
[bucket_index
]);
48 closure_init_stack(&cl
);
50 pr_debug("reading %u", bucket_index
);
52 while (offset
< ca
->sb
.bucket_size
) {
53 reread
: left
= ca
->sb
.bucket_size
- offset
;
54 len
= min_t(unsigned int, left
, PAGE_SECTORS
<< JSET_BITS
);
57 bio
->bi_iter
.bi_sector
= bucket
+ offset
;
58 bio_set_dev(bio
, ca
->bdev
);
59 bio
->bi_iter
.bi_size
= len
<< 9;
61 bio
->bi_end_io
= journal_read_endio
;
62 bio
->bi_private
= &cl
;
63 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
64 bch_bio_map(bio
, data
);
66 closure_bio_submit(ca
->set
, bio
, &cl
);
69 /* This function could be simpler now since we no longer write
70 * journal entries that overlap bucket boundaries; this means
71 * the start of a bucket will always have a valid journal entry
72 * if it has any journal entries at all.
77 struct list_head
*where
;
78 size_t blocks
, bytes
= set_bytes(j
);
80 if (j
->magic
!= jset_magic(&ca
->sb
)) {
81 pr_debug("%u: bad magic", bucket_index
);
85 if (bytes
> left
<< 9 ||
86 bytes
> PAGE_SIZE
<< JSET_BITS
) {
87 pr_info("%u: too big, %zu bytes, offset %u",
88 bucket_index
, bytes
, offset
);
95 if (j
->csum
!= csum_set(j
)) {
96 pr_info("%u: bad csum, %zu bytes, offset %u",
97 bucket_index
, bytes
, offset
);
101 blocks
= set_blocks(j
, block_bytes(ca
->set
));
104 * Nodes in 'list' are in linear increasing order of
105 * i->j.seq, the node on head has the smallest (oldest)
106 * journal seq, the node on tail has the biggest
107 * (latest) journal seq.
111 * Check from the oldest jset for last_seq. If
112 * i->j.seq < j->last_seq, it means the oldest jset
113 * in list is expired and useless, remove it from
114 * this list. Otherwise, j is a condidate jset for
115 * further following checks.
117 while (!list_empty(list
)) {
118 i
= list_first_entry(list
,
119 struct journal_replay
, list
);
120 if (i
->j
.seq
>= j
->last_seq
)
126 /* iterate list in reverse order (from latest jset) */
127 list_for_each_entry_reverse(i
, list
, list
) {
128 if (j
->seq
== i
->j
.seq
)
132 * if j->seq is less than any i->j.last_seq
133 * in list, j is an expired and useless jset.
135 if (j
->seq
< i
->j
.last_seq
)
139 * 'where' points to first jset in list which
142 if (j
->seq
> i
->j
.seq
) {
150 i
= kmalloc(offsetof(struct journal_replay
, j
) +
154 memcpy(&i
->j
, j
, bytes
);
155 /* Add to the location after 'where' points to */
156 list_add(&i
->list
, where
);
159 if (j
->seq
> ja
->seq
[bucket_index
])
160 ja
->seq
[bucket_index
] = j
->seq
;
162 offset
+= blocks
* ca
->sb
.block_size
;
163 len
-= blocks
* ca
->sb
.block_size
;
164 j
= ((void *) j
) + blocks
* block_bytes(ca
);
171 int bch_journal_read(struct cache_set
*c
, struct list_head
*list
)
173 #define read_bucket(b) \
175 ret = journal_read_bucket(ca, list, b); \
176 __set_bit(b, bitmap); \
186 for_each_cache(ca
, c
, iter
) {
187 struct journal_device
*ja
= &ca
->journal
;
188 DECLARE_BITMAP(bitmap
, SB_JOURNAL_BUCKETS
);
189 unsigned int i
, l
, r
, m
;
192 bitmap_zero(bitmap
, SB_JOURNAL_BUCKETS
);
193 pr_debug("%u journal buckets", ca
->sb
.njournal_buckets
);
196 * Read journal buckets ordered by golden ratio hash to quickly
197 * find a sequence of buckets with valid journal entries
199 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++) {
201 * We must try the index l with ZERO first for
202 * correctness due to the scenario that the journal
203 * bucket is circular buffer which might have wrapped
205 l
= (i
* 2654435769U) % ca
->sb
.njournal_buckets
;
207 if (test_bit(l
, bitmap
))
215 * If that fails, check all the buckets we haven't checked
218 pr_debug("falling back to linear search");
220 for (l
= find_first_zero_bit(bitmap
, ca
->sb
.njournal_buckets
);
221 l
< ca
->sb
.njournal_buckets
;
222 l
= find_next_zero_bit(bitmap
, ca
->sb
.njournal_buckets
,
227 /* no journal entries on this device? */
228 if (l
== ca
->sb
.njournal_buckets
)
231 BUG_ON(list_empty(list
));
235 r
= find_next_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1);
236 pr_debug("starting binary search, l %u r %u", l
, r
);
239 seq
= list_entry(list
->prev
, struct journal_replay
,
245 if (seq
!= list_entry(list
->prev
, struct journal_replay
,
253 * Read buckets in reverse order until we stop finding more
256 pr_debug("finishing up: m %u njournal_buckets %u",
257 m
, ca
->sb
.njournal_buckets
);
262 l
= ca
->sb
.njournal_buckets
- 1;
267 if (test_bit(l
, bitmap
))
276 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++)
277 if (ja
->seq
[i
] > seq
) {
280 * When journal_reclaim() goes to allocate for
281 * the first time, it'll use the bucket after
285 ja
->last_idx
= ja
->discard_idx
= (i
+ 1) %
286 ca
->sb
.njournal_buckets
;
291 if (!list_empty(list
))
292 c
->journal
.seq
= list_entry(list
->prev
,
293 struct journal_replay
,
300 void bch_journal_mark(struct cache_set
*c
, struct list_head
*list
)
304 struct journal_replay
*i
;
305 struct journal
*j
= &c
->journal
;
306 uint64_t last
= j
->seq
;
309 * journal.pin should never fill up - we never write a journal
310 * entry when it would fill up. But if for some reason it does, we
311 * iterate over the list in reverse order so that we can just skip that
312 * refcount instead of bugging.
315 list_for_each_entry_reverse(i
, list
, list
) {
316 BUG_ON(last
< i
->j
.seq
);
319 while (last
-- != i
->j
.seq
)
320 if (fifo_free(&j
->pin
) > 1) {
321 fifo_push_front(&j
->pin
, p
);
322 atomic_set(&fifo_front(&j
->pin
), 0);
325 if (fifo_free(&j
->pin
) > 1) {
326 fifo_push_front(&j
->pin
, p
);
327 i
->pin
= &fifo_front(&j
->pin
);
328 atomic_set(i
->pin
, 1);
332 k
< bset_bkey_last(&i
->j
);
334 if (!__bch_extent_invalid(c
, k
)) {
337 for (j
= 0; j
< KEY_PTRS(k
); j
++)
338 if (ptr_available(c
, k
, j
))
339 atomic_inc(&PTR_BUCKET(c
, k
, j
)->pin
);
341 bch_initial_mark_key(c
, 0, k
);
346 static bool is_discard_enabled(struct cache_set
*s
)
351 for_each_cache(ca
, s
, i
)
358 int bch_journal_replay(struct cache_set
*s
, struct list_head
*list
)
360 int ret
= 0, keys
= 0, entries
= 0;
362 struct journal_replay
*i
=
363 list_entry(list
->prev
, struct journal_replay
, list
);
365 uint64_t start
= i
->j
.last_seq
, end
= i
->j
.seq
, n
= start
;
366 struct keylist keylist
;
368 list_for_each_entry(i
, list
, list
) {
369 BUG_ON(i
->pin
&& atomic_read(i
->pin
) != 1);
372 if (n
== start
&& is_discard_enabled(s
))
373 pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
374 n
, i
->j
.seq
- 1, start
, end
);
376 pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
377 n
, i
->j
.seq
- 1, start
, end
);
384 k
< bset_bkey_last(&i
->j
);
386 trace_bcache_journal_replay_key(k
);
388 bch_keylist_init_single(&keylist
, k
);
390 ret
= bch_btree_insert(s
, &keylist
, i
->pin
, NULL
);
394 BUG_ON(!bch_keylist_empty(&keylist
));
406 pr_info("journal replay done, %i keys in %i entries, seq %llu",
409 while (!list_empty(list
)) {
410 i
= list_first_entry(list
, struct journal_replay
, list
);
420 #define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
422 static void btree_flush_write(struct cache_set
*c
)
424 struct btree
*b
, *t
, *btree_nodes
[BTREE_FLUSH_NR
];
425 unsigned int i
, nr
, ref_nr
;
426 atomic_t
*fifo_front_p
, *now_fifo_front_p
;
429 if (c
->journal
.btree_flushing
)
432 spin_lock(&c
->journal
.flush_write_lock
);
433 if (c
->journal
.btree_flushing
) {
434 spin_unlock(&c
->journal
.flush_write_lock
);
437 c
->journal
.btree_flushing
= true;
438 spin_unlock(&c
->journal
.flush_write_lock
);
440 /* get the oldest journal entry and check its refcount */
441 spin_lock(&c
->journal
.lock
);
442 fifo_front_p
= &fifo_front(&c
->journal
.pin
);
443 ref_nr
= atomic_read(fifo_front_p
);
446 * do nothing if no btree node references
447 * the oldest journal entry
449 spin_unlock(&c
->journal
.lock
);
452 spin_unlock(&c
->journal
.lock
);
454 mask
= c
->journal
.pin
.mask
;
456 atomic_long_inc(&c
->flush_write
);
457 memset(btree_nodes
, 0, sizeof(btree_nodes
));
459 mutex_lock(&c
->bucket_lock
);
460 list_for_each_entry_safe_reverse(b
, t
, &c
->btree_cache
, list
) {
462 * It is safe to get now_fifo_front_p without holding
463 * c->journal.lock here, because we don't need to know
464 * the exactly accurate value, just check whether the
465 * front pointer of c->journal.pin is changed.
467 now_fifo_front_p
= &fifo_front(&c
->journal
.pin
);
469 * If the oldest journal entry is reclaimed and front
470 * pointer of c->journal.pin changes, it is unnecessary
471 * to scan c->btree_cache anymore, just quit the loop and
472 * flush out what we have already.
474 if (now_fifo_front_p
!= fifo_front_p
)
477 * quit this loop if all matching btree nodes are
478 * scanned and record in btree_nodes[] already.
480 ref_nr
= atomic_read(fifo_front_p
);
484 if (btree_node_journal_flush(b
))
485 pr_err("BUG: flush_write bit should not be set here!");
487 mutex_lock(&b
->write_lock
);
489 if (!btree_node_dirty(b
)) {
490 mutex_unlock(&b
->write_lock
);
494 if (!btree_current_write(b
)->journal
) {
495 mutex_unlock(&b
->write_lock
);
500 * Only select the btree node which exactly references
501 * the oldest journal entry.
503 * If the journal entry pointed by fifo_front_p is
504 * reclaimed in parallel, don't worry:
505 * - the list_for_each_xxx loop will quit when checking
506 * next now_fifo_front_p.
507 * - If there are matched nodes recorded in btree_nodes[],
508 * they are clean now (this is why and how the oldest
509 * journal entry can be reclaimed). These selected nodes
510 * will be ignored and skipped in the folowing for-loop.
512 if (nr_to_fifo_front(btree_current_write(b
)->journal
,
515 mutex_unlock(&b
->write_lock
);
519 set_btree_node_journal_flush(b
);
521 mutex_unlock(&b
->write_lock
);
523 btree_nodes
[nr
++] = b
;
525 * To avoid holding c->bucket_lock too long time,
526 * only scan for BTREE_FLUSH_NR matched btree nodes
527 * at most. If there are more btree nodes reference
528 * the oldest journal entry, try to flush them next
529 * time when btree_flush_write() is called.
531 if (nr
== BTREE_FLUSH_NR
)
534 mutex_unlock(&c
->bucket_lock
);
536 for (i
= 0; i
< nr
; i
++) {
539 pr_err("BUG: btree_nodes[%d] is NULL", i
);
543 /* safe to check without holding b->write_lock */
544 if (!btree_node_journal_flush(b
)) {
545 pr_err("BUG: bnode %p: journal_flush bit cleaned", b
);
549 mutex_lock(&b
->write_lock
);
550 if (!btree_current_write(b
)->journal
) {
551 clear_bit(BTREE_NODE_journal_flush
, &b
->flags
);
552 mutex_unlock(&b
->write_lock
);
553 pr_debug("bnode %p: written by others", b
);
557 if (!btree_node_dirty(b
)) {
558 clear_bit(BTREE_NODE_journal_flush
, &b
->flags
);
559 mutex_unlock(&b
->write_lock
);
560 pr_debug("bnode %p: dirty bit cleaned by others", b
);
564 __bch_btree_node_write(b
, NULL
);
565 clear_bit(BTREE_NODE_journal_flush
, &b
->flags
);
566 mutex_unlock(&b
->write_lock
);
570 spin_lock(&c
->journal
.flush_write_lock
);
571 c
->journal
.btree_flushing
= false;
572 spin_unlock(&c
->journal
.flush_write_lock
);
575 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
577 static void journal_discard_endio(struct bio
*bio
)
579 struct journal_device
*ja
=
580 container_of(bio
, struct journal_device
, discard_bio
);
581 struct cache
*ca
= container_of(ja
, struct cache
, journal
);
583 atomic_set(&ja
->discard_in_flight
, DISCARD_DONE
);
585 closure_wake_up(&ca
->set
->journal
.wait
);
586 closure_put(&ca
->set
->cl
);
589 static void journal_discard_work(struct work_struct
*work
)
591 struct journal_device
*ja
=
592 container_of(work
, struct journal_device
, discard_work
);
594 submit_bio(&ja
->discard_bio
);
597 static void do_journal_discard(struct cache
*ca
)
599 struct journal_device
*ja
= &ca
->journal
;
600 struct bio
*bio
= &ja
->discard_bio
;
603 ja
->discard_idx
= ja
->last_idx
;
607 switch (atomic_read(&ja
->discard_in_flight
)) {
608 case DISCARD_IN_FLIGHT
:
612 ja
->discard_idx
= (ja
->discard_idx
+ 1) %
613 ca
->sb
.njournal_buckets
;
615 atomic_set(&ja
->discard_in_flight
, DISCARD_READY
);
619 if (ja
->discard_idx
== ja
->last_idx
)
622 atomic_set(&ja
->discard_in_flight
, DISCARD_IN_FLIGHT
);
624 bio_init(bio
, bio
->bi_inline_vecs
, 1);
625 bio_set_op_attrs(bio
, REQ_OP_DISCARD
, 0);
626 bio
->bi_iter
.bi_sector
= bucket_to_sector(ca
->set
,
627 ca
->sb
.d
[ja
->discard_idx
]);
628 bio_set_dev(bio
, ca
->bdev
);
629 bio
->bi_iter
.bi_size
= bucket_bytes(ca
);
630 bio
->bi_end_io
= journal_discard_endio
;
632 closure_get(&ca
->set
->cl
);
633 INIT_WORK(&ja
->discard_work
, journal_discard_work
);
634 queue_work(bch_journal_wq
, &ja
->discard_work
);
638 static void journal_reclaim(struct cache_set
*c
)
640 struct bkey
*k
= &c
->journal
.key
;
643 unsigned int iter
, n
= 0;
644 atomic_t p __maybe_unused
;
646 atomic_long_inc(&c
->reclaim
);
648 while (!atomic_read(&fifo_front(&c
->journal
.pin
)))
649 fifo_pop(&c
->journal
.pin
, p
);
651 last_seq
= last_seq(&c
->journal
);
653 /* Update last_idx */
655 for_each_cache(ca
, c
, iter
) {
656 struct journal_device
*ja
= &ca
->journal
;
658 while (ja
->last_idx
!= ja
->cur_idx
&&
659 ja
->seq
[ja
->last_idx
] < last_seq
)
660 ja
->last_idx
= (ja
->last_idx
+ 1) %
661 ca
->sb
.njournal_buckets
;
664 for_each_cache(ca
, c
, iter
)
665 do_journal_discard(ca
);
667 if (c
->journal
.blocks_free
)
672 * XXX: Sort by free journal space
675 for_each_cache(ca
, c
, iter
) {
676 struct journal_device
*ja
= &ca
->journal
;
677 unsigned int next
= (ja
->cur_idx
+ 1) % ca
->sb
.njournal_buckets
;
679 /* No space available on this device */
680 if (next
== ja
->discard_idx
)
684 k
->ptr
[n
++] = MAKE_PTR(0,
685 bucket_to_sector(c
, ca
->sb
.d
[ja
->cur_idx
]),
687 atomic_long_inc(&c
->reclaimed_journal_buckets
);
693 c
->journal
.blocks_free
= c
->sb
.bucket_size
>> c
->block_bits
;
696 if (!journal_full(&c
->journal
))
697 __closure_wake_up(&c
->journal
.wait
);
700 void bch_journal_next(struct journal
*j
)
704 j
->cur
= (j
->cur
== j
->w
)
709 * The fifo_push() needs to happen at the same time as j->seq is
710 * incremented for last_seq() to be calculated correctly
712 BUG_ON(!fifo_push(&j
->pin
, p
));
713 atomic_set(&fifo_back(&j
->pin
), 1);
715 j
->cur
->data
->seq
= ++j
->seq
;
716 j
->cur
->dirty
= false;
717 j
->cur
->need_write
= false;
718 j
->cur
->data
->keys
= 0;
720 if (fifo_full(&j
->pin
))
721 pr_debug("journal_pin full (%zu)", fifo_used(&j
->pin
));
724 static void journal_write_endio(struct bio
*bio
)
726 struct journal_write
*w
= bio
->bi_private
;
728 cache_set_err_on(bio
->bi_status
, w
->c
, "journal io error");
729 closure_put(&w
->c
->journal
.io
);
732 static void journal_write(struct closure
*cl
);
734 static void journal_write_done(struct closure
*cl
)
736 struct journal
*j
= container_of(cl
, struct journal
, io
);
737 struct journal_write
*w
= (j
->cur
== j
->w
)
741 __closure_wake_up(&w
->wait
);
742 continue_at_nobarrier(cl
, journal_write
, bch_journal_wq
);
745 static void journal_write_unlock(struct closure
*cl
)
746 __releases(&c
->journal
.lock
)
748 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
750 c
->journal
.io_in_flight
= 0;
751 spin_unlock(&c
->journal
.lock
);
754 static void journal_write_unlocked(struct closure
*cl
)
755 __releases(c
->journal
.lock
)
757 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
759 struct journal_write
*w
= c
->journal
.cur
;
760 struct bkey
*k
= &c
->journal
.key
;
761 unsigned int i
, sectors
= set_blocks(w
->data
, block_bytes(c
)) *
765 struct bio_list list
;
767 bio_list_init(&list
);
769 if (!w
->need_write
) {
770 closure_return_with_destructor(cl
, journal_write_unlock
);
772 } else if (journal_full(&c
->journal
)) {
774 spin_unlock(&c
->journal
.lock
);
776 btree_flush_write(c
);
777 continue_at(cl
, journal_write
, bch_journal_wq
);
781 c
->journal
.blocks_free
-= set_blocks(w
->data
, block_bytes(c
));
783 w
->data
->btree_level
= c
->root
->level
;
785 bkey_copy(&w
->data
->btree_root
, &c
->root
->key
);
786 bkey_copy(&w
->data
->uuid_bucket
, &c
->uuid_bucket
);
788 for_each_cache(ca
, c
, i
)
789 w
->data
->prio_bucket
[ca
->sb
.nr_this_dev
] = ca
->prio_buckets
[0];
791 w
->data
->magic
= jset_magic(&c
->sb
);
792 w
->data
->version
= BCACHE_JSET_VERSION
;
793 w
->data
->last_seq
= last_seq(&c
->journal
);
794 w
->data
->csum
= csum_set(w
->data
);
796 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
797 ca
= PTR_CACHE(c
, k
, i
);
798 bio
= &ca
->journal
.bio
;
800 atomic_long_add(sectors
, &ca
->meta_sectors_written
);
803 bio
->bi_iter
.bi_sector
= PTR_OFFSET(k
, i
);
804 bio_set_dev(bio
, ca
->bdev
);
805 bio
->bi_iter
.bi_size
= sectors
<< 9;
807 bio
->bi_end_io
= journal_write_endio
;
809 bio_set_op_attrs(bio
, REQ_OP_WRITE
,
810 REQ_SYNC
|REQ_META
|REQ_PREFLUSH
|REQ_FUA
);
811 bch_bio_map(bio
, w
->data
);
813 trace_bcache_journal_write(bio
, w
->data
->keys
);
814 bio_list_add(&list
, bio
);
816 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + sectors
);
818 ca
->journal
.seq
[ca
->journal
.cur_idx
] = w
->data
->seq
;
821 /* If KEY_PTRS(k) == 0, this jset gets lost in air */
824 atomic_dec_bug(&fifo_back(&c
->journal
.pin
));
825 bch_journal_next(&c
->journal
);
828 spin_unlock(&c
->journal
.lock
);
830 while ((bio
= bio_list_pop(&list
)))
831 closure_bio_submit(c
, bio
, cl
);
833 continue_at(cl
, journal_write_done
, NULL
);
836 static void journal_write(struct closure
*cl
)
838 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
840 spin_lock(&c
->journal
.lock
);
841 journal_write_unlocked(cl
);
844 static void journal_try_write(struct cache_set
*c
)
845 __releases(c
->journal
.lock
)
847 struct closure
*cl
= &c
->journal
.io
;
848 struct journal_write
*w
= c
->journal
.cur
;
850 w
->need_write
= true;
852 if (!c
->journal
.io_in_flight
) {
853 c
->journal
.io_in_flight
= 1;
854 closure_call(cl
, journal_write_unlocked
, NULL
, &c
->cl
);
856 spin_unlock(&c
->journal
.lock
);
860 static struct journal_write
*journal_wait_for_write(struct cache_set
*c
,
862 __acquires(&c
->journal
.lock
)
868 closure_init_stack(&cl
);
870 spin_lock(&c
->journal
.lock
);
873 struct journal_write
*w
= c
->journal
.cur
;
875 sectors
= __set_blocks(w
->data
, w
->data
->keys
+ nkeys
,
876 block_bytes(c
)) * c
->sb
.block_size
;
878 if (sectors
<= min_t(size_t,
879 c
->journal
.blocks_free
* c
->sb
.block_size
,
880 PAGE_SECTORS
<< JSET_BITS
))
884 closure_wait(&c
->journal
.wait
, &cl
);
886 if (!journal_full(&c
->journal
)) {
888 trace_bcache_journal_entry_full(c
);
891 * XXX: If we were inserting so many keys that they
892 * won't fit in an _empty_ journal write, we'll
893 * deadlock. For now, handle this in
894 * bch_keylist_realloc() - but something to think about.
896 BUG_ON(!w
->data
->keys
);
898 journal_try_write(c
); /* unlocks */
901 trace_bcache_journal_full(c
);
904 spin_unlock(&c
->journal
.lock
);
906 btree_flush_write(c
);
910 spin_lock(&c
->journal
.lock
);
915 static void journal_write_work(struct work_struct
*work
)
917 struct cache_set
*c
= container_of(to_delayed_work(work
),
920 spin_lock(&c
->journal
.lock
);
921 if (c
->journal
.cur
->dirty
)
922 journal_try_write(c
);
924 spin_unlock(&c
->journal
.lock
);
928 * Entry point to the journalling code - bio_insert() and btree_invalidate()
929 * pass bch_journal() a list of keys to be journalled, and then
930 * bch_journal() hands those same keys off to btree_insert_async()
933 atomic_t
*bch_journal(struct cache_set
*c
,
934 struct keylist
*keys
,
935 struct closure
*parent
)
937 struct journal_write
*w
;
940 /* No journaling if CACHE_SET_IO_DISABLE set already */
941 if (unlikely(test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
)))
944 if (!CACHE_SYNC(&c
->sb
))
947 w
= journal_wait_for_write(c
, bch_keylist_nkeys(keys
));
949 memcpy(bset_bkey_last(w
->data
), keys
->keys
, bch_keylist_bytes(keys
));
950 w
->data
->keys
+= bch_keylist_nkeys(keys
);
952 ret
= &fifo_back(&c
->journal
.pin
);
956 closure_wait(&w
->wait
, parent
);
957 journal_try_write(c
);
958 } else if (!w
->dirty
) {
960 schedule_delayed_work(&c
->journal
.work
,
961 msecs_to_jiffies(c
->journal_delay_ms
));
962 spin_unlock(&c
->journal
.lock
);
964 spin_unlock(&c
->journal
.lock
);
971 void bch_journal_meta(struct cache_set
*c
, struct closure
*cl
)
976 bch_keylist_init(&keys
);
978 ref
= bch_journal(c
, &keys
, cl
);
983 void bch_journal_free(struct cache_set
*c
)
985 free_pages((unsigned long) c
->journal
.w
[1].data
, JSET_BITS
);
986 free_pages((unsigned long) c
->journal
.w
[0].data
, JSET_BITS
);
987 free_fifo(&c
->journal
.pin
);
990 int bch_journal_alloc(struct cache_set
*c
)
992 struct journal
*j
= &c
->journal
;
994 spin_lock_init(&j
->lock
);
995 spin_lock_init(&j
->flush_write_lock
);
996 INIT_DELAYED_WORK(&j
->work
, journal_write_work
);
998 c
->journal_delay_ms
= 100;
1003 if (!(init_fifo(&j
->pin
, JOURNAL_PIN
, GFP_KERNEL
)) ||
1004 !(j
->w
[0].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)) ||
1005 !(j
->w
[1].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)))