1 // SPDX-License-Identifier: GPL-2.0+
3 * linux/fs/jbd2/checkpoint.c
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
7 * Copyright 1999 Red Hat Software --- All Rights Reserved
9 * Checkpoint routines for the generic filesystem journaling code.
10 * Part of the ext2fs journaling system.
12 * Checkpointing is the process of ensuring that a section of the log is
13 * committed fully to disk, so that that portion of the log can be
17 #include <linux/time.h>
19 #include <linux/jbd2.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/blkdev.h>
23 #include <trace/events/jbd2.h>
26 * Unlink a buffer from a transaction checkpoint list.
28 * Called with j_list_lock held.
30 static inline void __buffer_unlink(struct journal_head
*jh
)
32 transaction_t
*transaction
= jh
->b_cp_transaction
;
34 jh
->b_cpnext
->b_cpprev
= jh
->b_cpprev
;
35 jh
->b_cpprev
->b_cpnext
= jh
->b_cpnext
;
36 if (transaction
->t_checkpoint_list
== jh
) {
37 transaction
->t_checkpoint_list
= jh
->b_cpnext
;
38 if (transaction
->t_checkpoint_list
== jh
)
39 transaction
->t_checkpoint_list
= NULL
;
44 * __jbd2_log_wait_for_space: wait until there is space in the journal.
46 * Called under j-state_lock *only*. It will be unlocked if we have to wait
47 * for a checkpoint to free up some space in the log.
49 void __jbd2_log_wait_for_space(journal_t
*journal
)
50 __acquires(&journal
->j_state_lock
)
51 __releases(&journal
->j_state_lock
)
53 int nblocks
, space_left
;
54 /* assert_spin_locked(&journal->j_state_lock); */
56 nblocks
= journal
->j_max_transaction_buffers
;
57 while (jbd2_log_space_left(journal
) < nblocks
) {
58 write_unlock(&journal
->j_state_lock
);
59 mutex_lock_io(&journal
->j_checkpoint_mutex
);
62 * Test again, another process may have checkpointed while we
63 * were waiting for the checkpoint lock. If there are no
64 * transactions ready to be checkpointed, try to recover
65 * journal space by calling cleanup_journal_tail(), and if
66 * that doesn't work, by waiting for the currently committing
67 * transaction to complete. If there is absolutely no way
68 * to make progress, this is either a BUG or corrupted
69 * filesystem, so abort the journal and leave a stack
70 * trace for forensic evidence.
72 write_lock(&journal
->j_state_lock
);
73 if (journal
->j_flags
& JBD2_ABORT
) {
74 mutex_unlock(&journal
->j_checkpoint_mutex
);
77 spin_lock(&journal
->j_list_lock
);
78 space_left
= jbd2_log_space_left(journal
);
79 if (space_left
< nblocks
) {
80 int chkpt
= journal
->j_checkpoint_transactions
!= NULL
;
82 bool has_transaction
= false;
84 if (journal
->j_committing_transaction
) {
85 tid
= journal
->j_committing_transaction
->t_tid
;
86 has_transaction
= true;
88 spin_unlock(&journal
->j_list_lock
);
89 write_unlock(&journal
->j_state_lock
);
91 jbd2_log_do_checkpoint(journal
);
92 } else if (jbd2_cleanup_journal_tail(journal
) <= 0) {
94 * We were able to recover space or the
95 * journal was aborted due to an error.
98 } else if (has_transaction
) {
100 * jbd2_journal_commit_transaction() may want
101 * to take the checkpoint_mutex if JBD2_FLUSHED
102 * is set. So we need to temporarily drop it.
104 mutex_unlock(&journal
->j_checkpoint_mutex
);
105 jbd2_log_wait_commit(journal
, tid
);
106 write_lock(&journal
->j_state_lock
);
109 printk(KERN_ERR
"%s: needed %d blocks and "
110 "only had %d space available\n",
111 __func__
, nblocks
, space_left
);
112 printk(KERN_ERR
"%s: no way to get more "
113 "journal space in %s\n", __func__
,
116 jbd2_journal_abort(journal
, -EIO
);
118 write_lock(&journal
->j_state_lock
);
120 spin_unlock(&journal
->j_list_lock
);
122 mutex_unlock(&journal
->j_checkpoint_mutex
);
127 __flush_batch(journal_t
*journal
, int *batch_count
)
130 struct blk_plug plug
;
132 blk_start_plug(&plug
);
133 for (i
= 0; i
< *batch_count
; i
++)
134 write_dirty_buffer(journal
->j_chkpt_bhs
[i
], REQ_SYNC
);
135 blk_finish_plug(&plug
);
137 for (i
= 0; i
< *batch_count
; i
++) {
138 struct buffer_head
*bh
= journal
->j_chkpt_bhs
[i
];
139 BUFFER_TRACE(bh
, "brelse");
141 journal
->j_chkpt_bhs
[i
] = NULL
;
147 * Perform an actual checkpoint. We take the first transaction on the
148 * list of transactions to be checkpointed and send all its buffers
149 * to disk. We submit larger chunks of data at once.
151 * The journal should be locked before calling this function.
152 * Called with j_checkpoint_mutex held.
154 int jbd2_log_do_checkpoint(journal_t
*journal
)
156 struct journal_head
*jh
;
157 struct buffer_head
*bh
;
158 transaction_t
*transaction
;
160 int result
, batch_count
= 0;
162 jbd2_debug(1, "Start checkpoint\n");
165 * First thing: if there are any transactions in the log which
166 * don't need checkpointing, just eliminate them from the
167 * journal straight away.
169 result
= jbd2_cleanup_journal_tail(journal
);
170 trace_jbd2_checkpoint(journal
, result
);
171 jbd2_debug(1, "cleanup_journal_tail returned %d\n", result
);
176 * OK, we need to start writing disk blocks. Take one transaction
179 spin_lock(&journal
->j_list_lock
);
180 if (!journal
->j_checkpoint_transactions
)
182 transaction
= journal
->j_checkpoint_transactions
;
183 if (transaction
->t_chp_stats
.cs_chp_time
== 0)
184 transaction
->t_chp_stats
.cs_chp_time
= jiffies
;
185 this_tid
= transaction
->t_tid
;
188 * If someone cleaned up this transaction while we slept, we're
189 * done (maybe it's a new transaction, but it fell at the same
192 if (journal
->j_checkpoint_transactions
!= transaction
||
193 transaction
->t_tid
!= this_tid
)
196 /* checkpoint all of the transaction's buffers */
197 while (transaction
->t_checkpoint_list
) {
198 jh
= transaction
->t_checkpoint_list
;
201 if (jh
->b_transaction
!= NULL
) {
202 transaction_t
*t
= jh
->b_transaction
;
203 tid_t tid
= t
->t_tid
;
205 transaction
->t_chp_stats
.cs_forced_to_close
++;
206 spin_unlock(&journal
->j_list_lock
);
207 if (unlikely(journal
->j_flags
& JBD2_UNMOUNT
))
209 * The journal thread is dead; so
210 * starting and waiting for a commit
211 * to finish will cause us to wait for
212 * a _very_ long time.
215 "JBD2: %s: Waiting for Godot: block %llu\n",
216 journal
->j_devname
, (unsigned long long) bh
->b_blocknr
);
219 __flush_batch(journal
, &batch_count
);
220 jbd2_log_start_commit(journal
, tid
);
222 * jbd2_journal_commit_transaction() may want
223 * to take the checkpoint_mutex if JBD2_FLUSHED
224 * is set, jbd2_update_log_tail() called by
225 * jbd2_journal_commit_transaction() may also take
226 * checkpoint_mutex. So we need to temporarily
229 mutex_unlock(&journal
->j_checkpoint_mutex
);
230 jbd2_log_wait_commit(journal
, tid
);
231 mutex_lock_io(&journal
->j_checkpoint_mutex
);
232 spin_lock(&journal
->j_list_lock
);
235 if (!trylock_buffer(bh
)) {
237 * The buffer is locked, it may be writing back, or
238 * flushing out in the last couple of cycles, or
239 * re-adding into a new transaction, need to check
240 * it again until it's unlocked.
243 spin_unlock(&journal
->j_list_lock
);
245 /* the journal_head may have gone by now */
246 BUFFER_TRACE(bh
, "brelse");
249 } else if (!buffer_dirty(bh
)) {
251 BUFFER_TRACE(bh
, "remove from checkpoint");
253 * If the transaction was released or the checkpoint
254 * list was empty, we're done.
256 if (__jbd2_journal_remove_checkpoint(jh
) ||
257 !transaction
->t_checkpoint_list
)
262 * We are about to write the buffer, it could be
263 * raced by some other transaction shrink or buffer
264 * re-log logic once we release the j_list_lock,
265 * leave it on the checkpoint list and check status
266 * again to make sure it's clean.
268 BUFFER_TRACE(bh
, "queue");
270 J_ASSERT_BH(bh
, !buffer_jwrite(bh
));
271 journal
->j_chkpt_bhs
[batch_count
++] = bh
;
272 transaction
->t_chp_stats
.cs_written
++;
273 transaction
->t_checkpoint_list
= jh
->b_cpnext
;
276 if ((batch_count
== JBD2_NR_BATCH
) ||
277 need_resched() || spin_needbreak(&journal
->j_list_lock
) ||
278 jh2bh(transaction
->t_checkpoint_list
) == journal
->j_chkpt_bhs
[0])
279 goto unlock_and_flush
;
284 spin_unlock(&journal
->j_list_lock
);
287 __flush_batch(journal
, &batch_count
);
288 spin_lock(&journal
->j_list_lock
);
293 spin_unlock(&journal
->j_list_lock
);
294 result
= jbd2_cleanup_journal_tail(journal
);
296 return (result
< 0) ? result
: 0;
300 * Check the list of checkpoint transactions for the journal to see if
301 * we have already got rid of any since the last update of the log tail
302 * in the journal superblock. If so, we can instantly roll the
303 * superblock forward to remove those transactions from the log.
305 * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
307 * Called with the journal lock held.
309 * This is the only part of the journaling code which really needs to be
310 * aware of transaction aborts. Checkpointing involves writing to the
311 * main filesystem area rather than to the journal, so it can proceed
312 * even in abort state, but we must not update the super block if
313 * checkpointing may have failed. Otherwise, we would lose some metadata
314 * buffers which should be written-back to the filesystem.
317 int jbd2_cleanup_journal_tail(journal_t
*journal
)
320 unsigned long blocknr
;
322 if (is_journal_aborted(journal
))
325 if (!jbd2_journal_get_log_tail(journal
, &first_tid
, &blocknr
))
327 J_ASSERT(blocknr
!= 0);
330 * We need to make sure that any blocks that were recently written out
331 * --- perhaps by jbd2_log_do_checkpoint() --- are flushed out before
332 * we drop the transactions from the journal. It's unlikely this will
333 * be necessary, especially with an appropriately sized journal, but we
334 * need this to guarantee correctness. Fortunately
335 * jbd2_cleanup_journal_tail() doesn't get called all that often.
337 if (journal
->j_flags
& JBD2_BARRIER
)
338 blkdev_issue_flush(journal
->j_fs_dev
);
340 return __jbd2_update_log_tail(journal
, first_tid
, blocknr
);
344 /* Checkpoint list management */
347 * journal_shrink_one_cp_list
349 * Find all the written-back checkpoint buffers in the given list
350 * and try to release them. If the whole transaction is released, set
351 * the 'released' parameter. Return the number of released checkpointed
354 * Called with j_list_lock held.
356 static unsigned long journal_shrink_one_cp_list(struct journal_head
*jh
,
357 enum jbd2_shrink_type type
,
360 struct journal_head
*last_jh
;
361 struct journal_head
*next_jh
= jh
;
362 unsigned long nr_freed
= 0;
369 last_jh
= jh
->b_cpprev
;
372 next_jh
= jh
->b_cpnext
;
374 if (type
== JBD2_SHRINK_DESTROY
) {
375 ret
= __jbd2_journal_remove_checkpoint(jh
);
377 ret
= jbd2_journal_try_remove_checkpoint(jh
);
379 if (type
== JBD2_SHRINK_BUSY_SKIP
)
393 } while (jh
!= last_jh
);
399 * jbd2_journal_shrink_checkpoint_list
401 * Find 'nr_to_scan' written-back checkpoint buffers in the journal
402 * and try to release them. Return the number of released checkpointed
405 * Called with j_list_lock held.
407 unsigned long jbd2_journal_shrink_checkpoint_list(journal_t
*journal
,
408 unsigned long *nr_to_scan
)
410 transaction_t
*transaction
, *last_transaction
, *next_transaction
;
411 bool __maybe_unused released
;
412 tid_t first_tid
= 0, last_tid
= 0, next_tid
= 0;
414 unsigned long nr_freed
= 0;
416 bool first_set
= false;
419 spin_lock(&journal
->j_list_lock
);
420 if (!journal
->j_checkpoint_transactions
) {
421 spin_unlock(&journal
->j_list_lock
);
426 * Get next shrink transaction, resume previous scan or start
427 * over again. If some others do checkpoint and drop transaction
428 * from the checkpoint list, we ignore saved j_shrink_transaction
429 * and start over unconditionally.
431 if (journal
->j_shrink_transaction
)
432 transaction
= journal
->j_shrink_transaction
;
434 transaction
= journal
->j_checkpoint_transactions
;
437 first_tid
= transaction
->t_tid
;
440 last_transaction
= journal
->j_checkpoint_transactions
->t_cpprev
;
441 next_transaction
= transaction
;
442 last_tid
= last_transaction
->t_tid
;
444 transaction
= next_transaction
;
445 next_transaction
= transaction
->t_cpnext
;
446 tid
= transaction
->t_tid
;
448 freed
= journal_shrink_one_cp_list(transaction
->t_checkpoint_list
,
449 JBD2_SHRINK_BUSY_SKIP
, &released
);
451 (*nr_to_scan
) -= min(*nr_to_scan
, freed
);
452 if (*nr_to_scan
== 0)
454 if (need_resched() || spin_needbreak(&journal
->j_list_lock
))
456 } while (transaction
!= last_transaction
);
458 if (transaction
!= last_transaction
) {
459 journal
->j_shrink_transaction
= next_transaction
;
460 next_tid
= next_transaction
->t_tid
;
462 journal
->j_shrink_transaction
= NULL
;
466 spin_unlock(&journal
->j_list_lock
);
469 if (*nr_to_scan
&& journal
->j_shrink_transaction
)
472 trace_jbd2_shrink_checkpoint_list(journal
, first_tid
, tid
, last_tid
,
479 * journal_clean_checkpoint_list
481 * Find all the written-back checkpoint buffers in the journal and release them.
482 * If 'type' is JBD2_SHRINK_DESTROY, release all buffers unconditionally. If
483 * 'type' is JBD2_SHRINK_BUSY_STOP, will stop release buffers if encounters a
484 * busy buffer. To avoid wasting CPU cycles scanning the buffer list in some
485 * cases, don't pass JBD2_SHRINK_BUSY_SKIP 'type' for this function.
487 * Called with j_list_lock held.
489 void __jbd2_journal_clean_checkpoint_list(journal_t
*journal
,
490 enum jbd2_shrink_type type
)
492 transaction_t
*transaction
, *last_transaction
, *next_transaction
;
495 WARN_ON_ONCE(type
== JBD2_SHRINK_BUSY_SKIP
);
497 transaction
= journal
->j_checkpoint_transactions
;
501 last_transaction
= transaction
->t_cpprev
;
502 next_transaction
= transaction
;
504 transaction
= next_transaction
;
505 next_transaction
= transaction
->t_cpnext
;
506 journal_shrink_one_cp_list(transaction
->t_checkpoint_list
,
509 * This function only frees up some memory if possible so we
510 * dont have an obligation to finish processing. Bail out if
511 * preemption requested:
516 * Stop scanning if we couldn't free the transaction. This
517 * avoids pointless scanning of transactions which still
518 * weren't checkpointed.
522 } while (transaction
!= last_transaction
);
526 * Remove buffers from all checkpoint lists as journal is aborted and we just
527 * need to free memory
529 void jbd2_journal_destroy_checkpoint(journal_t
*journal
)
532 * We loop because __jbd2_journal_clean_checkpoint_list() may abort
533 * early due to a need of rescheduling.
536 spin_lock(&journal
->j_list_lock
);
537 if (!journal
->j_checkpoint_transactions
) {
538 spin_unlock(&journal
->j_list_lock
);
541 __jbd2_journal_clean_checkpoint_list(journal
, JBD2_SHRINK_DESTROY
);
542 spin_unlock(&journal
->j_list_lock
);
548 * journal_remove_checkpoint: called after a buffer has been committed
549 * to disk (either by being write-back flushed to disk, or being
550 * committed to the log).
552 * We cannot safely clean a transaction out of the log until all of the
553 * buffer updates committed in that transaction have safely been stored
554 * elsewhere on disk. To achieve this, all of the buffers in a
555 * transaction need to be maintained on the transaction's checkpoint
556 * lists until they have been rewritten, at which point this function is
557 * called to remove the buffer from the existing transaction's
560 * The function returns 1 if it frees the transaction, 0 otherwise.
561 * The function can free jh and bh.
563 * This function is called with j_list_lock held.
565 int __jbd2_journal_remove_checkpoint(struct journal_head
*jh
)
567 struct transaction_chp_stats_s
*stats
;
568 transaction_t
*transaction
;
571 JBUFFER_TRACE(jh
, "entry");
573 transaction
= jh
->b_cp_transaction
;
575 JBUFFER_TRACE(jh
, "not on transaction");
578 journal
= transaction
->t_journal
;
580 JBUFFER_TRACE(jh
, "removing from transaction");
583 jh
->b_cp_transaction
= NULL
;
584 percpu_counter_dec(&journal
->j_checkpoint_jh_count
);
585 jbd2_journal_put_journal_head(jh
);
587 /* Is this transaction empty? */
588 if (transaction
->t_checkpoint_list
)
592 * There is one special case to worry about: if we have just pulled the
593 * buffer off a running or committing transaction's checkpoing list,
594 * then even if the checkpoint list is empty, the transaction obviously
597 * The locking here around t_state is a bit sleazy.
598 * See the comment at the end of jbd2_journal_commit_transaction().
600 if (transaction
->t_state
!= T_FINISHED
)
604 * OK, that was the last buffer for the transaction, we can now
605 * safely remove this transaction from the log.
607 stats
= &transaction
->t_chp_stats
;
608 if (stats
->cs_chp_time
)
609 stats
->cs_chp_time
= jbd2_time_diff(stats
->cs_chp_time
,
611 trace_jbd2_checkpoint_stats(journal
->j_fs_dev
->bd_dev
,
612 transaction
->t_tid
, stats
);
614 __jbd2_journal_drop_transaction(journal
, transaction
);
615 jbd2_journal_free_transaction(transaction
);
620 * Check the checkpoint buffer and try to remove it from the checkpoint
621 * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if
622 * it frees the transaction, 0 otherwise.
624 * This function is called with j_list_lock held.
626 int jbd2_journal_try_remove_checkpoint(struct journal_head
*jh
)
628 struct buffer_head
*bh
= jh2bh(jh
);
630 if (jh
->b_transaction
)
632 if (!trylock_buffer(bh
))
634 if (buffer_dirty(bh
)) {
641 * Buffer is clean and the IO has finished (we held the buffer
642 * lock) so the checkpoint is done. We can safely remove the
643 * buffer from this transaction.
645 JBUFFER_TRACE(jh
, "remove from checkpoint list");
646 return __jbd2_journal_remove_checkpoint(jh
);
650 * journal_insert_checkpoint: put a committed buffer onto a checkpoint
651 * list so that we know when it is safe to clean the transaction out of
654 * Called with the journal locked.
655 * Called with j_list_lock held.
657 void __jbd2_journal_insert_checkpoint(struct journal_head
*jh
,
658 transaction_t
*transaction
)
660 JBUFFER_TRACE(jh
, "entry");
661 J_ASSERT_JH(jh
, buffer_dirty(jh2bh(jh
)) || buffer_jbddirty(jh2bh(jh
)));
662 J_ASSERT_JH(jh
, jh
->b_cp_transaction
== NULL
);
664 /* Get reference for checkpointing transaction */
665 jbd2_journal_grab_journal_head(jh2bh(jh
));
666 jh
->b_cp_transaction
= transaction
;
668 if (!transaction
->t_checkpoint_list
) {
669 jh
->b_cpnext
= jh
->b_cpprev
= jh
;
671 jh
->b_cpnext
= transaction
->t_checkpoint_list
;
672 jh
->b_cpprev
= transaction
->t_checkpoint_list
->b_cpprev
;
673 jh
->b_cpprev
->b_cpnext
= jh
;
674 jh
->b_cpnext
->b_cpprev
= jh
;
676 transaction
->t_checkpoint_list
= jh
;
677 percpu_counter_inc(&transaction
->t_journal
->j_checkpoint_jh_count
);
681 * We've finished with this transaction structure: adios...
683 * The transaction must have no links except for the checkpoint by this
686 * Called with the journal locked.
687 * Called with j_list_lock held.
690 void __jbd2_journal_drop_transaction(journal_t
*journal
, transaction_t
*transaction
)
692 assert_spin_locked(&journal
->j_list_lock
);
694 journal
->j_shrink_transaction
= NULL
;
695 if (transaction
->t_cpnext
) {
696 transaction
->t_cpnext
->t_cpprev
= transaction
->t_cpprev
;
697 transaction
->t_cpprev
->t_cpnext
= transaction
->t_cpnext
;
698 if (journal
->j_checkpoint_transactions
== transaction
)
699 journal
->j_checkpoint_transactions
=
700 transaction
->t_cpnext
;
701 if (journal
->j_checkpoint_transactions
== transaction
)
702 journal
->j_checkpoint_transactions
= NULL
;
705 J_ASSERT(transaction
->t_state
== T_FINISHED
);
706 J_ASSERT(transaction
->t_buffers
== NULL
);
707 J_ASSERT(transaction
->t_forget
== NULL
);
708 J_ASSERT(transaction
->t_shadow_list
== NULL
);
709 J_ASSERT(transaction
->t_checkpoint_list
== NULL
);
710 J_ASSERT(atomic_read(&transaction
->t_updates
) == 0);
711 J_ASSERT(journal
->j_committing_transaction
!= transaction
);
712 J_ASSERT(journal
->j_running_transaction
!= transaction
);
714 trace_jbd2_drop_transaction(journal
, transaction
);
716 jbd2_debug(1, "Dropping transaction %d, all done\n", transaction
->t_tid
);