1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
6 #include "btree_write_buffer.h"
11 #include "journal_io.h"
12 #include "journal_reclaim.h"
14 #include "sb-members.h"
17 #include <linux/kthread.h>
18 #include <linux/sched/mm.h>
20 /* Free space calculations: */
22 static unsigned journal_space_from(struct journal_device
*ja
,
23 enum journal_space_from from
)
26 case journal_space_discarded
:
27 return ja
->discard_idx
;
28 case journal_space_clean_ondisk
:
29 return ja
->dirty_idx_ondisk
;
30 case journal_space_clean
:
37 unsigned bch2_journal_dev_buckets_available(struct journal
*j
,
38 struct journal_device
*ja
,
39 enum journal_space_from from
)
41 unsigned available
= (journal_space_from(ja
, from
) -
42 ja
->cur_idx
- 1 + ja
->nr
) % ja
->nr
;
45 * Don't use the last bucket unless writing the new last_seq
46 * will make another bucket available:
48 if (available
&& ja
->dirty_idx_ondisk
== ja
->dirty_idx
)
54 void bch2_journal_set_watermark(struct journal
*j
)
56 struct bch_fs
*c
= container_of(j
, struct bch_fs
, journal
);
57 bool low_on_space
= j
->space
[journal_space_clean
].total
* 4 <=
58 j
->space
[journal_space_total
].total
;
59 bool low_on_pin
= fifo_free(&j
->pin
) < j
->pin
.size
/ 4;
60 bool low_on_wb
= bch2_btree_write_buffer_must_wait(c
);
61 unsigned watermark
= low_on_space
|| low_on_pin
|| low_on_wb
62 ? BCH_WATERMARK_reclaim
63 : BCH_WATERMARK_stripe
;
65 if (track_event_change(&c
->times
[BCH_TIME_blocked_journal_low_on_space
], low_on_space
) ||
66 track_event_change(&c
->times
[BCH_TIME_blocked_journal_low_on_pin
], low_on_pin
) ||
67 track_event_change(&c
->times
[BCH_TIME_blocked_write_buffer_full
], low_on_wb
))
68 trace_and_count(c
, journal_full
, c
);
70 mod_bit(JOURNAL_space_low
, &j
->flags
, low_on_space
|| low_on_pin
);
72 swap(watermark
, j
->watermark
);
73 if (watermark
> j
->watermark
)
77 static struct journal_space
78 journal_dev_space_available(struct journal
*j
, struct bch_dev
*ca
,
79 enum journal_space_from from
)
81 struct journal_device
*ja
= &ca
->journal
;
82 unsigned sectors
, buckets
, unwritten
;
85 if (from
== journal_space_total
)
86 return (struct journal_space
) {
87 .next_entry
= ca
->mi
.bucket_size
,
88 .total
= ca
->mi
.bucket_size
* ja
->nr
,
91 buckets
= bch2_journal_dev_buckets_available(j
, ja
, from
);
92 sectors
= ja
->sectors_free
;
95 * We that we don't allocate the space for a journal entry
96 * until we write it out - thus, account for it here:
98 for (seq
= journal_last_unwritten_seq(j
);
99 seq
<= journal_cur_seq(j
);
101 unwritten
= j
->buf
[seq
& JOURNAL_BUF_MASK
].sectors
;
106 /* entry won't fit on this device, skip: */
107 if (unwritten
> ca
->mi
.bucket_size
)
110 if (unwritten
>= sectors
) {
117 sectors
= ca
->mi
.bucket_size
;
120 sectors
-= unwritten
;
123 if (sectors
< ca
->mi
.bucket_size
&& buckets
) {
125 sectors
= ca
->mi
.bucket_size
;
128 return (struct journal_space
) {
129 .next_entry
= sectors
,
130 .total
= sectors
+ buckets
* ca
->mi
.bucket_size
,
134 static struct journal_space
__journal_space_available(struct journal
*j
, unsigned nr_devs_want
,
135 enum journal_space_from from
)
137 struct bch_fs
*c
= container_of(j
, struct bch_fs
, journal
);
138 unsigned pos
, nr_devs
= 0;
139 struct journal_space space
, dev_space
[BCH_SB_MEMBERS_MAX
];
141 BUG_ON(nr_devs_want
> ARRAY_SIZE(dev_space
));
144 for_each_member_device_rcu(c
, ca
, &c
->rw_devs
[BCH_DATA_journal
]) {
148 space
= journal_dev_space_available(j
, ca
, from
);
149 if (!space
.next_entry
)
152 for (pos
= 0; pos
< nr_devs
; pos
++)
153 if (space
.total
> dev_space
[pos
].total
)
156 array_insert_item(dev_space
, nr_devs
, pos
, space
);
160 if (nr_devs
< nr_devs_want
)
161 return (struct journal_space
) { 0, 0 };
164 * We sorted largest to smallest, and we want the smallest out of the
165 * @nr_devs_want largest devices:
167 return dev_space
[nr_devs_want
- 1];
170 void bch2_journal_space_available(struct journal
*j
)
172 struct bch_fs
*c
= container_of(j
, struct bch_fs
, journal
);
173 unsigned clean
, clean_ondisk
, total
;
174 unsigned max_entry_size
= min(j
->buf
[0].buf_size
>> 9,
175 j
->buf
[1].buf_size
>> 9);
176 unsigned nr_online
= 0, nr_devs_want
;
177 bool can_discard
= false;
180 lockdep_assert_held(&j
->lock
);
183 for_each_member_device_rcu(c
, ca
, &c
->rw_devs
[BCH_DATA_journal
]) {
184 struct journal_device
*ja
= &ca
->journal
;
189 while (ja
->dirty_idx
!= ja
->cur_idx
&&
190 ja
->bucket_seq
[ja
->dirty_idx
] < journal_last_seq(j
))
191 ja
->dirty_idx
= (ja
->dirty_idx
+ 1) % ja
->nr
;
193 while (ja
->dirty_idx_ondisk
!= ja
->dirty_idx
&&
194 ja
->bucket_seq
[ja
->dirty_idx_ondisk
] < j
->last_seq_ondisk
)
195 ja
->dirty_idx_ondisk
= (ja
->dirty_idx_ondisk
+ 1) % ja
->nr
;
197 if (ja
->discard_idx
!= ja
->dirty_idx_ondisk
)
200 max_entry_size
= min_t(unsigned, max_entry_size
, ca
->mi
.bucket_size
);
205 j
->can_discard
= can_discard
;
207 if (nr_online
< metadata_replicas_required(c
)) {
208 struct printbuf buf
= PRINTBUF
;
210 prt_printf(&buf
, "insufficient writeable journal devices available: have %u, need %u\n"
211 "rw journal devs:", nr_online
, metadata_replicas_required(c
));
214 for_each_member_device_rcu(c
, ca
, &c
->rw_devs
[BCH_DATA_journal
])
215 prt_printf(&buf
, " %s", ca
->name
);
218 bch_err(c
, "%s", buf
.buf
);
220 ret
= JOURNAL_ERR_insufficient_devices
;
224 nr_devs_want
= min_t(unsigned, nr_online
, c
->opts
.metadata_replicas
);
226 for (unsigned i
= 0; i
< journal_space_nr
; i
++)
227 j
->space
[i
] = __journal_space_available(j
, nr_devs_want
, i
);
229 clean_ondisk
= j
->space
[journal_space_clean_ondisk
].total
;
230 clean
= j
->space
[journal_space_clean
].total
;
231 total
= j
->space
[journal_space_total
].total
;
233 if (!j
->space
[journal_space_discarded
].next_entry
)
234 ret
= JOURNAL_ERR_journal_full
;
236 if ((j
->space
[journal_space_clean_ondisk
].next_entry
<
237 j
->space
[journal_space_clean_ondisk
].total
) &&
238 (clean
- clean_ondisk
<= total
/ 8) &&
239 (clean_ondisk
* 2 > clean
))
240 set_bit(JOURNAL_may_skip_flush
, &j
->flags
);
242 clear_bit(JOURNAL_may_skip_flush
, &j
->flags
);
244 bch2_journal_set_watermark(j
);
246 j
->cur_entry_sectors
= !ret
? j
->space
[journal_space_discarded
].next_entry
: 0;
247 j
->cur_entry_error
= ret
;
253 /* Discards - last part of journal reclaim: */
255 static bool should_discard_bucket(struct journal
*j
, struct journal_device
*ja
)
260 ret
= ja
->discard_idx
!= ja
->dirty_idx_ondisk
;
261 spin_unlock(&j
->lock
);
267 * Advance ja->discard_idx as long as it points to buckets that are no longer
268 * dirty, issuing discards if necessary:
270 void bch2_journal_do_discards(struct journal
*j
)
272 struct bch_fs
*c
= container_of(j
, struct bch_fs
, journal
);
274 mutex_lock(&j
->discard_lock
);
276 for_each_rw_member(c
, ca
) {
277 struct journal_device
*ja
= &ca
->journal
;
279 while (should_discard_bucket(j
, ja
)) {
280 if (!c
->opts
.nochanges
&&
282 bdev_max_discard_sectors(ca
->disk_sb
.bdev
))
283 blkdev_issue_discard(ca
->disk_sb
.bdev
,
285 ja
->buckets
[ja
->discard_idx
]),
286 ca
->mi
.bucket_size
, GFP_NOFS
);
289 ja
->discard_idx
= (ja
->discard_idx
+ 1) % ja
->nr
;
291 bch2_journal_space_available(j
);
292 spin_unlock(&j
->lock
);
296 mutex_unlock(&j
->discard_lock
);
300 * Journal entry pinning - machinery for holding a reference on a given journal
301 * entry, holding it open to ensure it gets replayed during recovery:
304 void bch2_journal_reclaim_fast(struct journal
*j
)
308 lockdep_assert_held(&j
->lock
);
311 * Unpin journal entries whose reference counts reached zero, meaning
312 * all btree nodes got written out
314 while (!fifo_empty(&j
->pin
) &&
315 j
->pin
.front
<= j
->seq_ondisk
&&
316 !atomic_read(&fifo_peek_front(&j
->pin
).count
)) {
322 bch2_journal_space_available(j
);
325 bool __bch2_journal_pin_put(struct journal
*j
, u64 seq
)
327 struct journal_entry_pin_list
*pin_list
= journal_seq_pin(j
, seq
);
329 return atomic_dec_and_test(&pin_list
->count
);
332 void bch2_journal_pin_put(struct journal
*j
, u64 seq
)
334 if (__bch2_journal_pin_put(j
, seq
)) {
336 bch2_journal_reclaim_fast(j
);
337 spin_unlock(&j
->lock
);
341 static inline bool __journal_pin_drop(struct journal
*j
,
342 struct journal_entry_pin
*pin
)
344 struct journal_entry_pin_list
*pin_list
;
346 if (!journal_pin_active(pin
))
349 if (j
->flush_in_progress
== pin
)
350 j
->flush_in_progress_dropped
= true;
352 pin_list
= journal_seq_pin(j
, pin
->seq
);
354 list_del_init(&pin
->list
);
357 * Unpinning a journal entry may make journal_next_bucket() succeed, if
358 * writing a new last_seq will now make another bucket available:
360 return atomic_dec_and_test(&pin_list
->count
) &&
361 pin_list
== &fifo_peek_front(&j
->pin
);
364 void bch2_journal_pin_drop(struct journal
*j
,
365 struct journal_entry_pin
*pin
)
368 if (__journal_pin_drop(j
, pin
))
369 bch2_journal_reclaim_fast(j
);
370 spin_unlock(&j
->lock
);
373 static enum journal_pin_type
journal_pin_type(journal_pin_flush_fn fn
)
375 if (fn
== bch2_btree_node_flush0
||
376 fn
== bch2_btree_node_flush1
)
377 return JOURNAL_PIN_btree
;
378 else if (fn
== bch2_btree_key_cache_journal_flush
)
379 return JOURNAL_PIN_key_cache
;
381 return JOURNAL_PIN_other
;
384 static inline void bch2_journal_pin_set_locked(struct journal
*j
, u64 seq
,
385 struct journal_entry_pin
*pin
,
386 journal_pin_flush_fn flush_fn
,
387 enum journal_pin_type type
)
389 struct journal_entry_pin_list
*pin_list
= journal_seq_pin(j
, seq
);
392 * flush_fn is how we identify journal pins in debugfs, so must always
393 * exist, even if it doesn't do anything:
397 atomic_inc(&pin_list
->count
);
399 pin
->flush
= flush_fn
;
400 list_add(&pin
->list
, &pin_list
->list
[type
]);
403 void bch2_journal_pin_copy(struct journal
*j
,
404 struct journal_entry_pin
*dst
,
405 struct journal_entry_pin
*src
,
406 journal_pin_flush_fn flush_fn
)
410 u64 seq
= READ_ONCE(src
->seq
);
412 if (seq
< journal_last_seq(j
)) {
414 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
415 * the src pin - with the pin dropped, the entry to pin might no
416 * longer to exist, but that means there's no longer anything to
417 * copy and we can bail out here:
419 spin_unlock(&j
->lock
);
423 bool reclaim
= __journal_pin_drop(j
, dst
);
425 bch2_journal_pin_set_locked(j
, seq
, dst
, flush_fn
, journal_pin_type(flush_fn
));
428 bch2_journal_reclaim_fast(j
);
431 * If the journal is currently full, we might want to call flush_fn
434 if (seq
== journal_last_seq(j
))
436 spin_unlock(&j
->lock
);
439 void bch2_journal_pin_set(struct journal
*j
, u64 seq
,
440 struct journal_entry_pin
*pin
,
441 journal_pin_flush_fn flush_fn
)
445 BUG_ON(seq
< journal_last_seq(j
));
447 bool reclaim
= __journal_pin_drop(j
, pin
);
449 bch2_journal_pin_set_locked(j
, seq
, pin
, flush_fn
, journal_pin_type(flush_fn
));
452 bch2_journal_reclaim_fast(j
);
454 * If the journal is currently full, we might want to call flush_fn
457 if (seq
== journal_last_seq(j
))
460 spin_unlock(&j
->lock
);
464 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
468 void bch2_journal_pin_flush(struct journal
*j
, struct journal_entry_pin
*pin
)
470 BUG_ON(journal_pin_active(pin
));
472 wait_event(j
->pin_flush_wait
, j
->flush_in_progress
!= pin
);
476 * Journal reclaim: flush references to open journal entries to reclaim space in
479 * May be done by the journal code in the background as needed to free up space
480 * for more journal entries, or as part of doing a clean shutdown, or to migrate
481 * data off of a specific device:
484 static struct journal_entry_pin
*
485 journal_get_next_pin(struct journal
*j
,
487 unsigned allowed_below_seq
,
488 unsigned allowed_above_seq
,
491 struct journal_entry_pin_list
*pin_list
;
492 struct journal_entry_pin
*ret
= NULL
;
495 fifo_for_each_entry_ptr(pin_list
, &j
->pin
, *seq
) {
496 if (*seq
> seq_to_flush
&& !allowed_above_seq
)
499 for (i
= 0; i
< JOURNAL_PIN_NR
; i
++)
500 if ((((1U << i
) & allowed_below_seq
) && *seq
<= seq_to_flush
) ||
501 ((1U << i
) & allowed_above_seq
)) {
502 ret
= list_first_entry_or_null(&pin_list
->list
[i
],
503 struct journal_entry_pin
, list
);
512 /* returns true if we did work */
513 static size_t journal_flush_pins(struct journal
*j
,
515 unsigned allowed_below_seq
,
516 unsigned allowed_above_seq
,
518 unsigned min_key_cache
)
520 struct journal_entry_pin
*pin
;
521 size_t nr_flushed
= 0;
522 journal_pin_flush_fn flush_fn
;
526 lockdep_assert_held(&j
->reclaim_lock
);
529 unsigned allowed_above
= allowed_above_seq
;
530 unsigned allowed_below
= allowed_below_seq
;
538 allowed_above
|= 1U << JOURNAL_PIN_key_cache
;
539 allowed_below
|= 1U << JOURNAL_PIN_key_cache
;
544 j
->last_flushed
= jiffies
;
547 pin
= journal_get_next_pin(j
, seq_to_flush
, allowed_below
, allowed_above
, &seq
);
549 BUG_ON(j
->flush_in_progress
);
550 j
->flush_in_progress
= pin
;
551 j
->flush_in_progress_dropped
= false;
552 flush_fn
= pin
->flush
;
554 spin_unlock(&j
->lock
);
559 if (min_key_cache
&& pin
->flush
== bch2_btree_key_cache_journal_flush
)
565 err
= flush_fn(j
, pin
, seq
);
568 /* Pin might have been dropped or rearmed: */
569 if (likely(!err
&& !j
->flush_in_progress_dropped
))
570 list_move(&pin
->list
, &journal_seq_pin(j
, seq
)->flushed
);
571 j
->flush_in_progress
= NULL
;
572 j
->flush_in_progress_dropped
= false;
573 spin_unlock(&j
->lock
);
575 wake_up(&j
->pin_flush_wait
);
586 static u64
journal_seq_to_flush(struct journal
*j
)
588 struct bch_fs
*c
= container_of(j
, struct bch_fs
, journal
);
589 u64 seq_to_flush
= 0;
593 for_each_rw_member(c
, ca
) {
594 struct journal_device
*ja
= &ca
->journal
;
595 unsigned nr_buckets
, bucket_to_flush
;
600 /* Try to keep the journal at most half full: */
601 nr_buckets
= ja
->nr
/ 2;
603 nr_buckets
= min(nr_buckets
, ja
->nr
);
605 bucket_to_flush
= (ja
->cur_idx
+ nr_buckets
) % ja
->nr
;
606 seq_to_flush
= max(seq_to_flush
,
607 ja
->bucket_seq
[bucket_to_flush
]);
610 /* Also flush if the pin fifo is more than half full */
611 seq_to_flush
= max_t(s64
, seq_to_flush
,
612 (s64
) journal_cur_seq(j
) -
614 spin_unlock(&j
->lock
);
620 * __bch2_journal_reclaim - free up journal buckets
622 * @direct: direct or background reclaim?
623 * @kicked: requested to run since we last ran?
624 * Returns: 0 on success, or -EIO if the journal has been shutdown
626 * Background journal reclaim writes out btree nodes. It should be run
627 * early enough so that we never completely run out of journal buckets.
629 * High watermarks for triggering background reclaim:
630 * - FIFO has fewer than 512 entries left
631 * - fewer than 25% journal buckets free
633 * Background reclaim runs until low watermarks are reached:
634 * - FIFO has more than 1024 entries left
635 * - more than 50% journal buckets free
637 * As long as a reclaim can complete in the time it takes to fill up
638 * 512 journal entries or 25% of all journal buckets, then
639 * journal_next_bucket() should not stall.
641 static int __bch2_journal_reclaim(struct journal
*j
, bool direct
, bool kicked
)
643 struct bch_fs
*c
= container_of(j
, struct bch_fs
, journal
);
644 struct btree_cache
*bc
= &c
->btree_cache
;
645 bool kthread
= (current
->flags
& PF_KTHREAD
) != 0;
647 size_t min_nr
, min_key_cache
, nr_flushed
;
652 * We can't invoke memory reclaim while holding the reclaim_lock -
653 * journal reclaim is required to make progress for memory reclaim
654 * (cleaning the caches), so we can't get stuck in memory reclaim while
655 * we're holding the reclaim lock:
657 lockdep_assert_held(&j
->reclaim_lock
);
658 flags
= memalloc_noreclaim_save();
661 if (kthread
&& kthread_should_stop())
664 if (bch2_journal_error(j
)) {
669 bch2_journal_do_discards(j
);
671 seq_to_flush
= journal_seq_to_flush(j
);
675 * If it's been longer than j->reclaim_delay_ms since we last flushed,
676 * make sure to flush at least one journal pin:
678 if (time_after(jiffies
, j
->last_flushed
+
679 msecs_to_jiffies(c
->opts
.journal_reclaim_delay
)))
682 if (j
->watermark
!= BCH_WATERMARK_stripe
)
685 size_t btree_cache_live
= bc
->live
[0].nr
+ bc
->live
[1].nr
;
686 if (atomic_long_read(&bc
->nr_dirty
) * 2 > btree_cache_live
)
689 min_key_cache
= min(bch2_nr_btree_keys_need_flush(c
), (size_t) 128);
691 trace_and_count(c
, journal_reclaim_start
, c
,
693 min_nr
, min_key_cache
,
694 atomic_long_read(&bc
->nr_dirty
), btree_cache_live
,
695 atomic_long_read(&c
->btree_key_cache
.nr_dirty
),
696 atomic_long_read(&c
->btree_key_cache
.nr_keys
));
698 nr_flushed
= journal_flush_pins(j
, seq_to_flush
,
700 min_nr
, min_key_cache
);
703 j
->nr_direct_reclaim
+= nr_flushed
;
705 j
->nr_background_reclaim
+= nr_flushed
;
706 trace_and_count(c
, journal_reclaim_finish
, c
, nr_flushed
);
709 wake_up(&j
->reclaim_wait
);
710 } while ((min_nr
|| min_key_cache
) && nr_flushed
&& !direct
);
712 memalloc_noreclaim_restore(flags
);
717 int bch2_journal_reclaim(struct journal
*j
)
719 return __bch2_journal_reclaim(j
, true, true);
722 static int bch2_journal_reclaim_thread(void *arg
)
724 struct journal
*j
= arg
;
725 struct bch_fs
*c
= container_of(j
, struct bch_fs
, journal
);
726 unsigned long delay
, now
;
732 j
->last_flushed
= jiffies
;
734 while (!ret
&& !kthread_should_stop()) {
735 bool kicked
= j
->reclaim_kicked
;
737 j
->reclaim_kicked
= false;
739 mutex_lock(&j
->reclaim_lock
);
740 ret
= __bch2_journal_reclaim(j
, false, kicked
);
741 mutex_unlock(&j
->reclaim_lock
);
744 delay
= msecs_to_jiffies(c
->opts
.journal_reclaim_delay
);
745 j
->next_reclaim
= j
->last_flushed
+ delay
;
747 if (!time_in_range(j
->next_reclaim
, now
, now
+ delay
))
748 j
->next_reclaim
= now
+ delay
;
751 set_current_state(TASK_INTERRUPTIBLE
|TASK_FREEZABLE
);
752 if (kthread_should_stop())
754 if (j
->reclaim_kicked
)
758 journal_empty
= fifo_empty(&j
->pin
);
759 spin_unlock(&j
->lock
);
763 else if (time_after(j
->next_reclaim
, jiffies
))
764 schedule_timeout(j
->next_reclaim
- jiffies
);
768 __set_current_state(TASK_RUNNING
);
774 void bch2_journal_reclaim_stop(struct journal
*j
)
776 struct task_struct
*p
= j
->reclaim_thread
;
778 j
->reclaim_thread
= NULL
;
786 int bch2_journal_reclaim_start(struct journal
*j
)
788 struct bch_fs
*c
= container_of(j
, struct bch_fs
, journal
);
789 struct task_struct
*p
;
792 if (j
->reclaim_thread
)
795 p
= kthread_create(bch2_journal_reclaim_thread
, j
,
796 "bch-reclaim/%s", c
->name
);
797 ret
= PTR_ERR_OR_ZERO(p
);
798 bch_err_msg(c
, ret
, "creating journal reclaim thread");
803 j
->reclaim_thread
= p
;
808 static int journal_flush_done(struct journal
*j
, u64 seq_to_flush
,
813 ret
= bch2_journal_error(j
);
817 mutex_lock(&j
->reclaim_lock
);
819 if (journal_flush_pins(j
, seq_to_flush
,
820 (1U << JOURNAL_PIN_key_cache
)|
821 (1U << JOURNAL_PIN_other
), 0, 0, 0) ||
822 journal_flush_pins(j
, seq_to_flush
,
823 (1U << JOURNAL_PIN_btree
), 0, 0, 0))
826 if (seq_to_flush
> journal_cur_seq(j
))
827 bch2_journal_entry_close(j
);
831 * If journal replay hasn't completed, the unreplayed journal entries
832 * hold refs on their corresponding sequence numbers
834 ret
= !test_bit(JOURNAL_replay_done
, &j
->flags
) ||
835 journal_last_seq(j
) > seq_to_flush
||
838 spin_unlock(&j
->lock
);
839 mutex_unlock(&j
->reclaim_lock
);
844 bool bch2_journal_flush_pins(struct journal
*j
, u64 seq_to_flush
)
846 /* time_stats this */
847 bool did_work
= false;
849 if (!test_bit(JOURNAL_running
, &j
->flags
))
852 closure_wait_event(&j
->async_wait
,
853 journal_flush_done(j
, seq_to_flush
, &did_work
));
858 int bch2_journal_flush_device_pins(struct journal
*j
, int dev_idx
)
860 struct bch_fs
*c
= container_of(j
, struct bch_fs
, journal
);
861 struct journal_entry_pin_list
*p
;
866 fifo_for_each_entry_ptr(p
, &j
->pin
, iter
)
868 ? bch2_dev_list_has_dev(p
->devs
, dev_idx
)
869 : p
->devs
.nr
< c
->opts
.metadata_replicas
)
871 spin_unlock(&j
->lock
);
873 bch2_journal_flush_pins(j
, seq
);
875 ret
= bch2_journal_error(j
);
879 mutex_lock(&c
->replicas_gc_lock
);
880 bch2_replicas_gc_start(c
, 1 << BCH_DATA_journal
);
883 * Now that we've populated replicas_gc, write to the journal to mark
884 * active journal devices. This handles the case where the journal might
885 * be empty. Otherwise we could clear all journal replicas and
886 * temporarily put the fs into an unrecoverable state. Journal recovery
887 * expects to find devices marked for journal data on unclean mount.
889 ret
= bch2_journal_meta(&c
->journal
);
896 struct bch_replicas_padded replicas
;
898 seq
= max(seq
, journal_last_seq(j
));
899 if (seq
>= j
->pin
.back
)
901 bch2_devlist_to_replicas(&replicas
.e
, BCH_DATA_journal
,
902 journal_seq_pin(j
, seq
)->devs
);
905 if (replicas
.e
.nr_devs
) {
906 spin_unlock(&j
->lock
);
907 ret
= bch2_mark_replicas(c
, &replicas
.e
);
911 spin_unlock(&j
->lock
);
913 ret
= bch2_replicas_gc_end(c
, ret
);
914 mutex_unlock(&c
->replicas_gc_lock
);