1 // SPDX-License-Identifier: GPL-2.0
3 * bcache journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
13 #include <trace/events/bcache.h>
16 * Journal replay/recovery:
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
28 static void journal_read_endio(struct bio
*bio
)
30 struct closure
*cl
= bio
->bi_private
;
35 static int journal_read_bucket(struct cache
*ca
, struct list_head
*list
,
36 unsigned int bucket_index
)
38 struct journal_device
*ja
= &ca
->journal
;
39 struct bio
*bio
= &ja
->bio
;
41 struct journal_replay
*i
;
42 struct jset
*j
, *data
= ca
->set
->journal
.w
[0].data
;
44 unsigned int len
, left
, offset
= 0;
46 sector_t bucket
= bucket_to_sector(ca
->set
, ca
->sb
.d
[bucket_index
]);
48 closure_init_stack(&cl
);
50 pr_debug("reading %u\n", bucket_index
);
52 while (offset
< ca
->sb
.bucket_size
) {
53 reread
: left
= ca
->sb
.bucket_size
- offset
;
54 len
= min_t(unsigned int, left
, PAGE_SECTORS
<< JSET_BITS
);
57 bio
->bi_iter
.bi_sector
= bucket
+ offset
;
58 bio_set_dev(bio
, ca
->bdev
);
59 bio
->bi_iter
.bi_size
= len
<< 9;
61 bio
->bi_end_io
= journal_read_endio
;
62 bio
->bi_private
= &cl
;
63 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
64 bch_bio_map(bio
, data
);
66 closure_bio_submit(ca
->set
, bio
, &cl
);
69 /* This function could be simpler now since we no longer write
70 * journal entries that overlap bucket boundaries; this means
71 * the start of a bucket will always have a valid journal entry
72 * if it has any journal entries at all.
77 struct list_head
*where
;
78 size_t blocks
, bytes
= set_bytes(j
);
80 if (j
->magic
!= jset_magic(&ca
->sb
)) {
81 pr_debug("%u: bad magic\n", bucket_index
);
85 if (bytes
> left
<< 9 ||
86 bytes
> PAGE_SIZE
<< JSET_BITS
) {
87 pr_info("%u: too big, %zu bytes, offset %u\n",
88 bucket_index
, bytes
, offset
);
95 if (j
->csum
!= csum_set(j
)) {
96 pr_info("%u: bad csum, %zu bytes, offset %u\n",
97 bucket_index
, bytes
, offset
);
101 blocks
= set_blocks(j
, block_bytes(ca
));
104 * Nodes in 'list' are in linear increasing order of
105 * i->j.seq, the node on head has the smallest (oldest)
106 * journal seq, the node on tail has the biggest
107 * (latest) journal seq.
111 * Check from the oldest jset for last_seq. If
112 * i->j.seq < j->last_seq, it means the oldest jset
113 * in list is expired and useless, remove it from
114 * this list. Otherwise, j is a condidate jset for
115 * further following checks.
117 while (!list_empty(list
)) {
118 i
= list_first_entry(list
,
119 struct journal_replay
, list
);
120 if (i
->j
.seq
>= j
->last_seq
)
126 /* iterate list in reverse order (from latest jset) */
127 list_for_each_entry_reverse(i
, list
, list
) {
128 if (j
->seq
== i
->j
.seq
)
132 * if j->seq is less than any i->j.last_seq
133 * in list, j is an expired and useless jset.
135 if (j
->seq
< i
->j
.last_seq
)
139 * 'where' points to first jset in list which
142 if (j
->seq
> i
->j
.seq
) {
150 i
= kmalloc(offsetof(struct journal_replay
, j
) +
154 memcpy(&i
->j
, j
, bytes
);
155 /* Add to the location after 'where' points to */
156 list_add(&i
->list
, where
);
159 if (j
->seq
> ja
->seq
[bucket_index
])
160 ja
->seq
[bucket_index
] = j
->seq
;
162 offset
+= blocks
* ca
->sb
.block_size
;
163 len
-= blocks
* ca
->sb
.block_size
;
164 j
= ((void *) j
) + blocks
* block_bytes(ca
);
171 int bch_journal_read(struct cache_set
*c
, struct list_head
*list
)
173 #define read_bucket(b) \
175 ret = journal_read_bucket(ca, list, b); \
176 __set_bit(b, bitmap); \
182 struct cache
*ca
= c
->cache
;
184 struct journal_device
*ja
= &ca
->journal
;
185 DECLARE_BITMAP(bitmap
, SB_JOURNAL_BUCKETS
);
186 unsigned int i
, l
, r
, m
;
189 bitmap_zero(bitmap
, SB_JOURNAL_BUCKETS
);
190 pr_debug("%u journal buckets\n", ca
->sb
.njournal_buckets
);
193 * Read journal buckets ordered by golden ratio hash to quickly
194 * find a sequence of buckets with valid journal entries
196 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++) {
198 * We must try the index l with ZERO first for
199 * correctness due to the scenario that the journal
200 * bucket is circular buffer which might have wrapped
202 l
= (i
* 2654435769U) % ca
->sb
.njournal_buckets
;
204 if (test_bit(l
, bitmap
))
212 * If that fails, check all the buckets we haven't checked
215 pr_debug("falling back to linear search\n");
217 for_each_clear_bit(l
, bitmap
, ca
->sb
.njournal_buckets
)
221 /* no journal entries on this device? */
222 if (l
== ca
->sb
.njournal_buckets
)
225 BUG_ON(list_empty(list
));
229 r
= find_next_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1);
230 pr_debug("starting binary search, l %u r %u\n", l
, r
);
233 seq
= list_entry(list
->prev
, struct journal_replay
,
239 if (seq
!= list_entry(list
->prev
, struct journal_replay
,
247 * Read buckets in reverse order until we stop finding more
250 pr_debug("finishing up: m %u njournal_buckets %u\n",
251 m
, ca
->sb
.njournal_buckets
);
256 l
= ca
->sb
.njournal_buckets
- 1;
261 if (test_bit(l
, bitmap
))
270 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++)
271 if (ja
->seq
[i
] > seq
) {
274 * When journal_reclaim() goes to allocate for
275 * the first time, it'll use the bucket after
279 ja
->last_idx
= ja
->discard_idx
= (i
+ 1) %
280 ca
->sb
.njournal_buckets
;
285 if (!list_empty(list
))
286 c
->journal
.seq
= list_entry(list
->prev
,
287 struct journal_replay
,
294 void bch_journal_mark(struct cache_set
*c
, struct list_head
*list
)
298 struct journal_replay
*i
;
299 struct journal
*j
= &c
->journal
;
300 uint64_t last
= j
->seq
;
303 * journal.pin should never fill up - we never write a journal
304 * entry when it would fill up. But if for some reason it does, we
305 * iterate over the list in reverse order so that we can just skip that
306 * refcount instead of bugging.
309 list_for_each_entry_reverse(i
, list
, list
) {
310 BUG_ON(last
< i
->j
.seq
);
313 while (last
-- != i
->j
.seq
)
314 if (fifo_free(&j
->pin
) > 1) {
315 fifo_push_front(&j
->pin
, p
);
316 atomic_set(&fifo_front(&j
->pin
), 0);
319 if (fifo_free(&j
->pin
) > 1) {
320 fifo_push_front(&j
->pin
, p
);
321 i
->pin
= &fifo_front(&j
->pin
);
322 atomic_set(i
->pin
, 1);
326 k
< bset_bkey_last(&i
->j
);
328 if (!__bch_extent_invalid(c
, k
)) {
331 for (j
= 0; j
< KEY_PTRS(k
); j
++)
332 if (ptr_available(c
, k
, j
))
333 atomic_inc(&PTR_BUCKET(c
, k
, j
)->pin
);
335 bch_initial_mark_key(c
, 0, k
);
340 static bool is_discard_enabled(struct cache_set
*s
)
342 struct cache
*ca
= s
->cache
;
350 int bch_journal_replay(struct cache_set
*s
, struct list_head
*list
)
352 int ret
= 0, keys
= 0, entries
= 0;
354 struct journal_replay
*i
=
355 list_entry(list
->prev
, struct journal_replay
, list
);
357 uint64_t start
= i
->j
.last_seq
, end
= i
->j
.seq
, n
= start
;
358 struct keylist keylist
;
360 list_for_each_entry(i
, list
, list
) {
361 BUG_ON(i
->pin
&& atomic_read(i
->pin
) != 1);
364 if (n
== start
&& is_discard_enabled(s
))
365 pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
366 n
, i
->j
.seq
- 1, start
, end
);
368 pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
369 n
, i
->j
.seq
- 1, start
, end
);
376 k
< bset_bkey_last(&i
->j
);
378 trace_bcache_journal_replay_key(k
);
380 bch_keylist_init_single(&keylist
, k
);
382 ret
= bch_btree_insert(s
, &keylist
, i
->pin
, NULL
);
386 BUG_ON(!bch_keylist_empty(&keylist
));
398 pr_info("journal replay done, %i keys in %i entries, seq %llu\n",
401 while (!list_empty(list
)) {
402 i
= list_first_entry(list
, struct journal_replay
, list
);
412 static void btree_flush_write(struct cache_set
*c
)
414 struct btree
*b
, *t
, *btree_nodes
[BTREE_FLUSH_NR
];
417 atomic_t
*fifo_front_p
, *now_fifo_front_p
;
420 if (c
->journal
.btree_flushing
)
423 spin_lock(&c
->journal
.flush_write_lock
);
424 if (c
->journal
.btree_flushing
) {
425 spin_unlock(&c
->journal
.flush_write_lock
);
428 c
->journal
.btree_flushing
= true;
429 spin_unlock(&c
->journal
.flush_write_lock
);
431 /* get the oldest journal entry and check its refcount */
432 spin_lock(&c
->journal
.lock
);
433 fifo_front_p
= &fifo_front(&c
->journal
.pin
);
434 ref_nr
= atomic_read(fifo_front_p
);
437 * do nothing if no btree node references
438 * the oldest journal entry
440 spin_unlock(&c
->journal
.lock
);
443 spin_unlock(&c
->journal
.lock
);
445 mask
= c
->journal
.pin
.mask
;
447 atomic_long_inc(&c
->flush_write
);
448 memset(btree_nodes
, 0, sizeof(btree_nodes
));
450 mutex_lock(&c
->bucket_lock
);
451 list_for_each_entry_safe_reverse(b
, t
, &c
->btree_cache
, list
) {
453 * It is safe to get now_fifo_front_p without holding
454 * c->journal.lock here, because we don't need to know
455 * the exactly accurate value, just check whether the
456 * front pointer of c->journal.pin is changed.
458 now_fifo_front_p
= &fifo_front(&c
->journal
.pin
);
460 * If the oldest journal entry is reclaimed and front
461 * pointer of c->journal.pin changes, it is unnecessary
462 * to scan c->btree_cache anymore, just quit the loop and
463 * flush out what we have already.
465 if (now_fifo_front_p
!= fifo_front_p
)
468 * quit this loop if all matching btree nodes are
469 * scanned and record in btree_nodes[] already.
471 ref_nr
= atomic_read(fifo_front_p
);
475 if (btree_node_journal_flush(b
))
476 pr_err("BUG: flush_write bit should not be set here!\n");
478 mutex_lock(&b
->write_lock
);
480 if (!btree_node_dirty(b
)) {
481 mutex_unlock(&b
->write_lock
);
485 if (!btree_current_write(b
)->journal
) {
486 mutex_unlock(&b
->write_lock
);
491 * Only select the btree node which exactly references
492 * the oldest journal entry.
494 * If the journal entry pointed by fifo_front_p is
495 * reclaimed in parallel, don't worry:
496 * - the list_for_each_xxx loop will quit when checking
497 * next now_fifo_front_p.
498 * - If there are matched nodes recorded in btree_nodes[],
499 * they are clean now (this is why and how the oldest
500 * journal entry can be reclaimed). These selected nodes
501 * will be ignored and skipped in the folowing for-loop.
503 if (((btree_current_write(b
)->journal
- fifo_front_p
) &
505 mutex_unlock(&b
->write_lock
);
509 set_btree_node_journal_flush(b
);
511 mutex_unlock(&b
->write_lock
);
513 btree_nodes
[nr
++] = b
;
515 * To avoid holding c->bucket_lock too long time,
516 * only scan for BTREE_FLUSH_NR matched btree nodes
517 * at most. If there are more btree nodes reference
518 * the oldest journal entry, try to flush them next
519 * time when btree_flush_write() is called.
521 if (nr
== BTREE_FLUSH_NR
)
524 mutex_unlock(&c
->bucket_lock
);
526 for (i
= 0; i
< nr
; i
++) {
529 pr_err("BUG: btree_nodes[%d] is NULL\n", i
);
533 /* safe to check without holding b->write_lock */
534 if (!btree_node_journal_flush(b
)) {
535 pr_err("BUG: bnode %p: journal_flush bit cleaned\n", b
);
539 mutex_lock(&b
->write_lock
);
540 if (!btree_current_write(b
)->journal
) {
541 clear_bit(BTREE_NODE_journal_flush
, &b
->flags
);
542 mutex_unlock(&b
->write_lock
);
543 pr_debug("bnode %p: written by others\n", b
);
547 if (!btree_node_dirty(b
)) {
548 clear_bit(BTREE_NODE_journal_flush
, &b
->flags
);
549 mutex_unlock(&b
->write_lock
);
550 pr_debug("bnode %p: dirty bit cleaned by others\n", b
);
554 __bch_btree_node_write(b
, NULL
);
555 clear_bit(BTREE_NODE_journal_flush
, &b
->flags
);
556 mutex_unlock(&b
->write_lock
);
560 spin_lock(&c
->journal
.flush_write_lock
);
561 c
->journal
.btree_flushing
= false;
562 spin_unlock(&c
->journal
.flush_write_lock
);
565 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
567 static void journal_discard_endio(struct bio
*bio
)
569 struct journal_device
*ja
=
570 container_of(bio
, struct journal_device
, discard_bio
);
571 struct cache
*ca
= container_of(ja
, struct cache
, journal
);
573 atomic_set(&ja
->discard_in_flight
, DISCARD_DONE
);
575 closure_wake_up(&ca
->set
->journal
.wait
);
576 closure_put(&ca
->set
->cl
);
579 static void journal_discard_work(struct work_struct
*work
)
581 struct journal_device
*ja
=
582 container_of(work
, struct journal_device
, discard_work
);
584 submit_bio(&ja
->discard_bio
);
587 static void do_journal_discard(struct cache
*ca
)
589 struct journal_device
*ja
= &ca
->journal
;
590 struct bio
*bio
= &ja
->discard_bio
;
593 ja
->discard_idx
= ja
->last_idx
;
597 switch (atomic_read(&ja
->discard_in_flight
)) {
598 case DISCARD_IN_FLIGHT
:
602 ja
->discard_idx
= (ja
->discard_idx
+ 1) %
603 ca
->sb
.njournal_buckets
;
605 atomic_set(&ja
->discard_in_flight
, DISCARD_READY
);
609 if (ja
->discard_idx
== ja
->last_idx
)
612 atomic_set(&ja
->discard_in_flight
, DISCARD_IN_FLIGHT
);
614 bio_init(bio
, bio
->bi_inline_vecs
, 1);
615 bio_set_op_attrs(bio
, REQ_OP_DISCARD
, 0);
616 bio
->bi_iter
.bi_sector
= bucket_to_sector(ca
->set
,
617 ca
->sb
.d
[ja
->discard_idx
]);
618 bio_set_dev(bio
, ca
->bdev
);
619 bio
->bi_iter
.bi_size
= bucket_bytes(ca
);
620 bio
->bi_end_io
= journal_discard_endio
;
622 closure_get(&ca
->set
->cl
);
623 INIT_WORK(&ja
->discard_work
, journal_discard_work
);
624 queue_work(bch_journal_wq
, &ja
->discard_work
);
628 static void journal_reclaim(struct cache_set
*c
)
630 struct bkey
*k
= &c
->journal
.key
;
631 struct cache
*ca
= c
->cache
;
634 struct journal_device
*ja
= &ca
->journal
;
635 atomic_t p __maybe_unused
;
637 atomic_long_inc(&c
->reclaim
);
639 while (!atomic_read(&fifo_front(&c
->journal
.pin
)))
640 fifo_pop(&c
->journal
.pin
, p
);
642 last_seq
= last_seq(&c
->journal
);
644 /* Update last_idx */
646 while (ja
->last_idx
!= ja
->cur_idx
&&
647 ja
->seq
[ja
->last_idx
] < last_seq
)
648 ja
->last_idx
= (ja
->last_idx
+ 1) %
649 ca
->sb
.njournal_buckets
;
651 do_journal_discard(ca
);
653 if (c
->journal
.blocks_free
)
656 next
= (ja
->cur_idx
+ 1) % ca
->sb
.njournal_buckets
;
657 /* No space available on this device */
658 if (next
== ja
->discard_idx
)
662 k
->ptr
[0] = MAKE_PTR(0,
663 bucket_to_sector(c
, ca
->sb
.d
[ja
->cur_idx
]),
665 atomic_long_inc(&c
->reclaimed_journal_buckets
);
669 c
->journal
.blocks_free
= ca
->sb
.bucket_size
>> c
->block_bits
;
672 if (!journal_full(&c
->journal
))
673 __closure_wake_up(&c
->journal
.wait
);
676 void bch_journal_next(struct journal
*j
)
680 j
->cur
= (j
->cur
== j
->w
)
685 * The fifo_push() needs to happen at the same time as j->seq is
686 * incremented for last_seq() to be calculated correctly
688 BUG_ON(!fifo_push(&j
->pin
, p
));
689 atomic_set(&fifo_back(&j
->pin
), 1);
691 j
->cur
->data
->seq
= ++j
->seq
;
692 j
->cur
->dirty
= false;
693 j
->cur
->need_write
= false;
694 j
->cur
->data
->keys
= 0;
696 if (fifo_full(&j
->pin
))
697 pr_debug("journal_pin full (%zu)\n", fifo_used(&j
->pin
));
700 static void journal_write_endio(struct bio
*bio
)
702 struct journal_write
*w
= bio
->bi_private
;
704 cache_set_err_on(bio
->bi_status
, w
->c
, "journal io error");
705 closure_put(&w
->c
->journal
.io
);
708 static void journal_write(struct closure
*cl
);
710 static void journal_write_done(struct closure
*cl
)
712 struct journal
*j
= container_of(cl
, struct journal
, io
);
713 struct journal_write
*w
= (j
->cur
== j
->w
)
717 __closure_wake_up(&w
->wait
);
718 continue_at_nobarrier(cl
, journal_write
, bch_journal_wq
);
721 static void journal_write_unlock(struct closure
*cl
)
722 __releases(&c
->journal
.lock
)
724 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
726 c
->journal
.io_in_flight
= 0;
727 spin_unlock(&c
->journal
.lock
);
730 static void journal_write_unlocked(struct closure
*cl
)
731 __releases(c
->journal
.lock
)
733 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
734 struct cache
*ca
= c
->cache
;
735 struct journal_write
*w
= c
->journal
.cur
;
736 struct bkey
*k
= &c
->journal
.key
;
737 unsigned int i
, sectors
= set_blocks(w
->data
, block_bytes(ca
)) *
741 struct bio_list list
;
743 bio_list_init(&list
);
745 if (!w
->need_write
) {
746 closure_return_with_destructor(cl
, journal_write_unlock
);
748 } else if (journal_full(&c
->journal
)) {
750 spin_unlock(&c
->journal
.lock
);
752 btree_flush_write(c
);
753 continue_at(cl
, journal_write
, bch_journal_wq
);
757 c
->journal
.blocks_free
-= set_blocks(w
->data
, block_bytes(ca
));
759 w
->data
->btree_level
= c
->root
->level
;
761 bkey_copy(&w
->data
->btree_root
, &c
->root
->key
);
762 bkey_copy(&w
->data
->uuid_bucket
, &c
->uuid_bucket
);
764 w
->data
->prio_bucket
[ca
->sb
.nr_this_dev
] = ca
->prio_buckets
[0];
765 w
->data
->magic
= jset_magic(&ca
->sb
);
766 w
->data
->version
= BCACHE_JSET_VERSION
;
767 w
->data
->last_seq
= last_seq(&c
->journal
);
768 w
->data
->csum
= csum_set(w
->data
);
770 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
771 ca
= PTR_CACHE(c
, k
, i
);
772 bio
= &ca
->journal
.bio
;
774 atomic_long_add(sectors
, &ca
->meta_sectors_written
);
777 bio
->bi_iter
.bi_sector
= PTR_OFFSET(k
, i
);
778 bio_set_dev(bio
, ca
->bdev
);
779 bio
->bi_iter
.bi_size
= sectors
<< 9;
781 bio
->bi_end_io
= journal_write_endio
;
783 bio_set_op_attrs(bio
, REQ_OP_WRITE
,
784 REQ_SYNC
|REQ_META
|REQ_PREFLUSH
|REQ_FUA
);
785 bch_bio_map(bio
, w
->data
);
787 trace_bcache_journal_write(bio
, w
->data
->keys
);
788 bio_list_add(&list
, bio
);
790 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + sectors
);
792 ca
->journal
.seq
[ca
->journal
.cur_idx
] = w
->data
->seq
;
795 /* If KEY_PTRS(k) == 0, this jset gets lost in air */
798 atomic_dec_bug(&fifo_back(&c
->journal
.pin
));
799 bch_journal_next(&c
->journal
);
802 spin_unlock(&c
->journal
.lock
);
804 while ((bio
= bio_list_pop(&list
)))
805 closure_bio_submit(c
, bio
, cl
);
807 continue_at(cl
, journal_write_done
, NULL
);
810 static void journal_write(struct closure
*cl
)
812 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
);
814 spin_lock(&c
->journal
.lock
);
815 journal_write_unlocked(cl
);
818 static void journal_try_write(struct cache_set
*c
)
819 __releases(c
->journal
.lock
)
821 struct closure
*cl
= &c
->journal
.io
;
822 struct journal_write
*w
= c
->journal
.cur
;
824 w
->need_write
= true;
826 if (!c
->journal
.io_in_flight
) {
827 c
->journal
.io_in_flight
= 1;
828 closure_call(cl
, journal_write_unlocked
, NULL
, &c
->cl
);
830 spin_unlock(&c
->journal
.lock
);
834 static struct journal_write
*journal_wait_for_write(struct cache_set
*c
,
836 __acquires(&c
->journal
.lock
)
841 struct cache
*ca
= c
->cache
;
843 closure_init_stack(&cl
);
845 spin_lock(&c
->journal
.lock
);
848 struct journal_write
*w
= c
->journal
.cur
;
850 sectors
= __set_blocks(w
->data
, w
->data
->keys
+ nkeys
,
851 block_bytes(ca
)) * ca
->sb
.block_size
;
853 if (sectors
<= min_t(size_t,
854 c
->journal
.blocks_free
* ca
->sb
.block_size
,
855 PAGE_SECTORS
<< JSET_BITS
))
859 closure_wait(&c
->journal
.wait
, &cl
);
861 if (!journal_full(&c
->journal
)) {
863 trace_bcache_journal_entry_full(c
);
866 * XXX: If we were inserting so many keys that they
867 * won't fit in an _empty_ journal write, we'll
868 * deadlock. For now, handle this in
869 * bch_keylist_realloc() - but something to think about.
871 BUG_ON(!w
->data
->keys
);
873 journal_try_write(c
); /* unlocks */
876 trace_bcache_journal_full(c
);
879 spin_unlock(&c
->journal
.lock
);
881 btree_flush_write(c
);
885 spin_lock(&c
->journal
.lock
);
890 static void journal_write_work(struct work_struct
*work
)
892 struct cache_set
*c
= container_of(to_delayed_work(work
),
895 spin_lock(&c
->journal
.lock
);
896 if (c
->journal
.cur
->dirty
)
897 journal_try_write(c
);
899 spin_unlock(&c
->journal
.lock
);
903 * Entry point to the journalling code - bio_insert() and btree_invalidate()
904 * pass bch_journal() a list of keys to be journalled, and then
905 * bch_journal() hands those same keys off to btree_insert_async()
908 atomic_t
*bch_journal(struct cache_set
*c
,
909 struct keylist
*keys
,
910 struct closure
*parent
)
912 struct journal_write
*w
;
915 /* No journaling if CACHE_SET_IO_DISABLE set already */
916 if (unlikely(test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
)))
919 if (!CACHE_SYNC(&c
->cache
->sb
))
922 w
= journal_wait_for_write(c
, bch_keylist_nkeys(keys
));
924 memcpy(bset_bkey_last(w
->data
), keys
->keys
, bch_keylist_bytes(keys
));
925 w
->data
->keys
+= bch_keylist_nkeys(keys
);
927 ret
= &fifo_back(&c
->journal
.pin
);
931 closure_wait(&w
->wait
, parent
);
932 journal_try_write(c
);
933 } else if (!w
->dirty
) {
935 schedule_delayed_work(&c
->journal
.work
,
936 msecs_to_jiffies(c
->journal_delay_ms
));
937 spin_unlock(&c
->journal
.lock
);
939 spin_unlock(&c
->journal
.lock
);
946 void bch_journal_meta(struct cache_set
*c
, struct closure
*cl
)
951 bch_keylist_init(&keys
);
953 ref
= bch_journal(c
, &keys
, cl
);
958 void bch_journal_free(struct cache_set
*c
)
960 free_pages((unsigned long) c
->journal
.w
[1].data
, JSET_BITS
);
961 free_pages((unsigned long) c
->journal
.w
[0].data
, JSET_BITS
);
962 free_fifo(&c
->journal
.pin
);
965 int bch_journal_alloc(struct cache_set
*c
)
967 struct journal
*j
= &c
->journal
;
969 spin_lock_init(&j
->lock
);
970 spin_lock_init(&j
->flush_write_lock
);
971 INIT_DELAYED_WORK(&j
->work
, journal_write_work
);
973 c
->journal_delay_ms
= 100;
978 if (!(init_fifo(&j
->pin
, JOURNAL_PIN
, GFP_KERNEL
)) ||
979 !(j
->w
[0].data
= (void *) __get_free_pages(GFP_KERNEL
|__GFP_COMP
, JSET_BITS
)) ||
980 !(j
->w
[1].data
= (void *) __get_free_pages(GFP_KERNEL
|__GFP_COMP
, JSET_BITS
)))