1 // SPDX-License-Identifier: GPL-2.0+
3 * linux/fs/jbd2/commit.c
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
9 * Journal commit routines for the generic filesystem journaling code;
10 * part of the ext2fs journaling system.
13 #include <linux/time.h>
15 #include <linux/jbd2.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
19 #include <linux/pagemap.h>
20 #include <linux/jiffies.h>
21 #include <linux/crc32.h>
22 #include <linux/writeback.h>
23 #include <linux/backing-dev.h>
24 #include <linux/bio.h>
25 #include <linux/blkdev.h>
26 #include <linux/bitops.h>
27 #include <trace/events/jbd2.h>
30 * IO end handler for temporary buffer_heads handling writes to the journal.
32 static void journal_end_buffer_io_sync(struct buffer_head
*bh
, int uptodate
)
34 struct buffer_head
*orig_bh
= bh
->b_private
;
38 set_buffer_uptodate(bh
);
40 clear_buffer_uptodate(bh
);
42 clear_bit_unlock(BH_Shadow
, &orig_bh
->b_state
);
43 smp_mb__after_atomic();
44 wake_up_bit(&orig_bh
->b_state
, BH_Shadow
);
50 * When an ext4 file is truncated, it is possible that some pages are not
51 * successfully freed, because they are attached to a committing transaction.
52 * After the transaction commits, these pages are left on the LRU, with no
53 * ->mapping, and with attached buffers. These pages are trivially reclaimable
54 * by the VM, but their apparent absence upsets the VM accounting, and it makes
55 * the numbers in /proc/meminfo look odd.
57 * So here, we have a buffer which has just come off the forget list. Look to
58 * see if we can strip all buffers from the backing page.
60 * Called under lock_journal(), and possibly under journal_datalist_lock. The
61 * caller provided us with a ref against the buffer, and we drop that here.
63 static void release_buffer_page(struct buffer_head
*bh
)
69 if (atomic_read(&bh
->b_count
) != 1)
77 /* OK, it's a truncated page */
78 if (!trylock_page(page
))
83 try_to_free_buffers(page
);
92 static void jbd2_commit_block_csum_set(journal_t
*j
, struct buffer_head
*bh
)
94 struct commit_header
*h
;
97 if (!jbd2_journal_has_csum_v2or3(j
))
100 h
= (struct commit_header
*)(bh
->b_data
);
101 h
->h_chksum_type
= 0;
102 h
->h_chksum_size
= 0;
104 csum
= jbd2_chksum(j
, j
->j_csum_seed
, bh
->b_data
, j
->j_blocksize
);
105 h
->h_chksum
[0] = cpu_to_be32(csum
);
109 * Done it all: now submit the commit record. We should have
110 * cleaned up our previous buffers by now, so if we are in abort
111 * mode we can now just skip the rest of the journal write
114 * Returns 1 if the journal needs to be aborted or 0 on success
116 static int journal_submit_commit_record(journal_t
*journal
,
117 transaction_t
*commit_transaction
,
118 struct buffer_head
**cbh
,
121 struct commit_header
*tmp
;
122 struct buffer_head
*bh
;
124 struct timespec64 now
;
128 if (is_journal_aborted(journal
))
131 bh
= jbd2_journal_get_descriptor_buffer(commit_transaction
,
136 tmp
= (struct commit_header
*)bh
->b_data
;
137 ktime_get_coarse_real_ts64(&now
);
138 tmp
->h_commit_sec
= cpu_to_be64(now
.tv_sec
);
139 tmp
->h_commit_nsec
= cpu_to_be32(now
.tv_nsec
);
141 if (jbd2_has_feature_checksum(journal
)) {
142 tmp
->h_chksum_type
= JBD2_CRC32_CHKSUM
;
143 tmp
->h_chksum_size
= JBD2_CRC32_CHKSUM_SIZE
;
144 tmp
->h_chksum
[0] = cpu_to_be32(crc32_sum
);
146 jbd2_commit_block_csum_set(journal
, bh
);
148 BUFFER_TRACE(bh
, "submit commit block");
150 clear_buffer_dirty(bh
);
151 set_buffer_uptodate(bh
);
152 bh
->b_end_io
= journal_end_buffer_io_sync
;
154 if (journal
->j_flags
& JBD2_BARRIER
&&
155 !jbd2_has_feature_async_commit(journal
))
156 ret
= submit_bh(REQ_OP_WRITE
,
157 REQ_SYNC
| REQ_PREFLUSH
| REQ_FUA
, bh
);
159 ret
= submit_bh(REQ_OP_WRITE
, REQ_SYNC
, bh
);
166 * This function along with journal_submit_commit_record
167 * allows to write the commit record asynchronously.
169 static int journal_wait_on_commit_record(journal_t
*journal
,
170 struct buffer_head
*bh
)
174 clear_buffer_dirty(bh
);
177 if (unlikely(!buffer_uptodate(bh
)))
179 put_bh(bh
); /* One for getblk() */
185 * write the filemap data using writepage() address_space_operations.
186 * We don't do block allocation here even for delalloc. We don't
187 * use writepages() because with dealyed allocation we may be doing
188 * block allocation in writepages().
190 static int journal_submit_inode_data_buffers(struct address_space
*mapping
,
191 loff_t dirty_start
, loff_t dirty_end
)
194 struct writeback_control wbc
= {
195 .sync_mode
= WB_SYNC_ALL
,
196 .nr_to_write
= mapping
->nrpages
* 2,
197 .range_start
= dirty_start
,
198 .range_end
= dirty_end
,
201 ret
= generic_writepages(mapping
, &wbc
);
206 * Submit all the data buffers of inode associated with the transaction to
209 * We are in a committing transaction. Therefore no new inode can be added to
210 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
211 * operate on from being released while we write out pages.
213 static int journal_submit_data_buffers(journal_t
*journal
,
214 transaction_t
*commit_transaction
)
216 struct jbd2_inode
*jinode
;
218 struct address_space
*mapping
;
220 spin_lock(&journal
->j_list_lock
);
221 list_for_each_entry(jinode
, &commit_transaction
->t_inode_list
, i_list
) {
222 loff_t dirty_start
= jinode
->i_dirty_start
;
223 loff_t dirty_end
= jinode
->i_dirty_end
;
225 if (!(jinode
->i_flags
& JI_WRITE_DATA
))
227 mapping
= jinode
->i_vfs_inode
->i_mapping
;
228 jinode
->i_flags
|= JI_COMMIT_RUNNING
;
229 spin_unlock(&journal
->j_list_lock
);
231 * submit the inode data buffers. We use writepage
232 * instead of writepages. Because writepages can do
233 * block allocation with delalloc. We need to write
234 * only allocated blocks here.
236 trace_jbd2_submit_inode_data(jinode
->i_vfs_inode
);
237 err
= journal_submit_inode_data_buffers(mapping
, dirty_start
,
241 spin_lock(&journal
->j_list_lock
);
242 J_ASSERT(jinode
->i_transaction
== commit_transaction
);
243 jinode
->i_flags
&= ~JI_COMMIT_RUNNING
;
245 wake_up_bit(&jinode
->i_flags
, __JI_COMMIT_RUNNING
);
247 spin_unlock(&journal
->j_list_lock
);
252 * Wait for data submitted for writeout, refile inodes to proper
253 * transaction if needed.
256 static int journal_finish_inode_data_buffers(journal_t
*journal
,
257 transaction_t
*commit_transaction
)
259 struct jbd2_inode
*jinode
, *next_i
;
262 /* For locking, see the comment in journal_submit_data_buffers() */
263 spin_lock(&journal
->j_list_lock
);
264 list_for_each_entry(jinode
, &commit_transaction
->t_inode_list
, i_list
) {
265 loff_t dirty_start
= jinode
->i_dirty_start
;
266 loff_t dirty_end
= jinode
->i_dirty_end
;
268 if (!(jinode
->i_flags
& JI_WAIT_DATA
))
270 jinode
->i_flags
|= JI_COMMIT_RUNNING
;
271 spin_unlock(&journal
->j_list_lock
);
272 err
= filemap_fdatawait_range_keep_errors(
273 jinode
->i_vfs_inode
->i_mapping
, dirty_start
,
277 spin_lock(&journal
->j_list_lock
);
278 jinode
->i_flags
&= ~JI_COMMIT_RUNNING
;
280 wake_up_bit(&jinode
->i_flags
, __JI_COMMIT_RUNNING
);
283 /* Now refile inode to proper lists */
284 list_for_each_entry_safe(jinode
, next_i
,
285 &commit_transaction
->t_inode_list
, i_list
) {
286 list_del(&jinode
->i_list
);
287 if (jinode
->i_next_transaction
) {
288 jinode
->i_transaction
= jinode
->i_next_transaction
;
289 jinode
->i_next_transaction
= NULL
;
290 list_add(&jinode
->i_list
,
291 &jinode
->i_transaction
->t_inode_list
);
293 jinode
->i_transaction
= NULL
;
294 jinode
->i_dirty_start
= 0;
295 jinode
->i_dirty_end
= 0;
298 spin_unlock(&journal
->j_list_lock
);
303 static __u32
jbd2_checksum_data(__u32 crc32_sum
, struct buffer_head
*bh
)
305 struct page
*page
= bh
->b_page
;
309 addr
= kmap_atomic(page
);
310 checksum
= crc32_be(crc32_sum
,
311 (void *)(addr
+ offset_in_page(bh
->b_data
)), bh
->b_size
);
317 static void write_tag_block(journal_t
*j
, journal_block_tag_t
*tag
,
318 unsigned long long block
)
320 tag
->t_blocknr
= cpu_to_be32(block
& (u32
)~0);
321 if (jbd2_has_feature_64bit(j
))
322 tag
->t_blocknr_high
= cpu_to_be32((block
>> 31) >> 1);
325 static void jbd2_block_tag_csum_set(journal_t
*j
, journal_block_tag_t
*tag
,
326 struct buffer_head
*bh
, __u32 sequence
)
328 journal_block_tag3_t
*tag3
= (journal_block_tag3_t
*)tag
;
329 struct page
*page
= bh
->b_page
;
334 if (!jbd2_journal_has_csum_v2or3(j
))
337 seq
= cpu_to_be32(sequence
);
338 addr
= kmap_atomic(page
);
339 csum32
= jbd2_chksum(j
, j
->j_csum_seed
, (__u8
*)&seq
, sizeof(seq
));
340 csum32
= jbd2_chksum(j
, csum32
, addr
+ offset_in_page(bh
->b_data
),
344 if (jbd2_has_feature_csum3(j
))
345 tag3
->t_checksum
= cpu_to_be32(csum32
);
347 tag
->t_checksum
= cpu_to_be16(csum32
);
350 * jbd2_journal_commit_transaction
352 * The primary function for committing a transaction to the log. This
353 * function is called by the journal thread to begin a complete commit.
355 void jbd2_journal_commit_transaction(journal_t
*journal
)
357 struct transaction_stats_s stats
;
358 transaction_t
*commit_transaction
;
359 struct journal_head
*jh
;
360 struct buffer_head
*descriptor
;
361 struct buffer_head
**wbuf
= journal
->j_wbuf
;
365 unsigned long long blocknr
;
369 journal_block_tag_t
*tag
= NULL
;
374 int tag_bytes
= journal_tag_bytes(journal
);
375 struct buffer_head
*cbh
= NULL
; /* For transactional checksums */
376 __u32 crc32_sum
= ~0;
377 struct blk_plug plug
;
378 /* Tail of the journal */
379 unsigned long first_block
;
386 if (jbd2_journal_has_csum_v2or3(journal
))
387 csum_size
= sizeof(struct jbd2_journal_block_tail
);
390 * First job: lock down the current transaction and wait for
391 * all outstanding updates to complete.
394 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
395 if (journal
->j_flags
& JBD2_FLUSHED
) {
396 jbd_debug(3, "super block updated\n");
397 mutex_lock_io(&journal
->j_checkpoint_mutex
);
399 * We hold j_checkpoint_mutex so tail cannot change under us.
400 * We don't need any special data guarantees for writing sb
401 * since journal is empty and it is ok for write to be
402 * flushed only with transaction commit.
404 jbd2_journal_update_sb_log_tail(journal
,
405 journal
->j_tail_sequence
,
408 mutex_unlock(&journal
->j_checkpoint_mutex
);
410 jbd_debug(3, "superblock not updated\n");
413 J_ASSERT(journal
->j_running_transaction
!= NULL
);
414 J_ASSERT(journal
->j_committing_transaction
== NULL
);
416 commit_transaction
= journal
->j_running_transaction
;
418 trace_jbd2_start_commit(journal
, commit_transaction
);
419 jbd_debug(1, "JBD2: starting commit of transaction %d\n",
420 commit_transaction
->t_tid
);
422 write_lock(&journal
->j_state_lock
);
423 J_ASSERT(commit_transaction
->t_state
== T_RUNNING
);
424 commit_transaction
->t_state
= T_LOCKED
;
426 trace_jbd2_commit_locking(journal
, commit_transaction
);
427 stats
.run
.rs_wait
= commit_transaction
->t_max_wait
;
428 stats
.run
.rs_request_delay
= 0;
429 stats
.run
.rs_locked
= jiffies
;
430 if (commit_transaction
->t_requested
)
431 stats
.run
.rs_request_delay
=
432 jbd2_time_diff(commit_transaction
->t_requested
,
433 stats
.run
.rs_locked
);
434 stats
.run
.rs_running
= jbd2_time_diff(commit_transaction
->t_start
,
435 stats
.run
.rs_locked
);
437 spin_lock(&commit_transaction
->t_handle_lock
);
438 while (atomic_read(&commit_transaction
->t_updates
)) {
441 prepare_to_wait(&journal
->j_wait_updates
, &wait
,
442 TASK_UNINTERRUPTIBLE
);
443 if (atomic_read(&commit_transaction
->t_updates
)) {
444 spin_unlock(&commit_transaction
->t_handle_lock
);
445 write_unlock(&journal
->j_state_lock
);
447 write_lock(&journal
->j_state_lock
);
448 spin_lock(&commit_transaction
->t_handle_lock
);
450 finish_wait(&journal
->j_wait_updates
, &wait
);
452 spin_unlock(&commit_transaction
->t_handle_lock
);
454 J_ASSERT (atomic_read(&commit_transaction
->t_outstanding_credits
) <=
455 journal
->j_max_transaction_buffers
);
458 * First thing we are allowed to do is to discard any remaining
459 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
460 * that there are no such buffers: if a large filesystem
461 * operation like a truncate needs to split itself over multiple
462 * transactions, then it may try to do a jbd2_journal_restart() while
463 * there are still BJ_Reserved buffers outstanding. These must
464 * be released cleanly from the current transaction.
466 * In this case, the filesystem must still reserve write access
467 * again before modifying the buffer in the new transaction, but
468 * we do not require it to remember exactly which old buffers it
469 * has reserved. This is consistent with the existing behaviour
470 * that multiple jbd2_journal_get_write_access() calls to the same
471 * buffer are perfectly permissible.
473 while (commit_transaction
->t_reserved_list
) {
474 jh
= commit_transaction
->t_reserved_list
;
475 JBUFFER_TRACE(jh
, "reserved, unused: refile");
477 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
478 * leave undo-committed data.
480 if (jh
->b_committed_data
) {
481 struct buffer_head
*bh
= jh2bh(jh
);
483 jbd_lock_bh_state(bh
);
484 jbd2_free(jh
->b_committed_data
, bh
->b_size
);
485 jh
->b_committed_data
= NULL
;
486 jbd_unlock_bh_state(bh
);
488 jbd2_journal_refile_buffer(journal
, jh
);
492 * Now try to drop any written-back buffers from the journal's
493 * checkpoint lists. We do this *before* commit because it potentially
496 spin_lock(&journal
->j_list_lock
);
497 __jbd2_journal_clean_checkpoint_list(journal
, false);
498 spin_unlock(&journal
->j_list_lock
);
500 jbd_debug(3, "JBD2: commit phase 1\n");
503 * Clear revoked flag to reflect there is no revoked buffers
504 * in the next transaction which is going to be started.
506 jbd2_clear_buffer_revoked_flags(journal
);
509 * Switch to a new revoke table.
511 jbd2_journal_switch_revoke_table(journal
);
514 * Reserved credits cannot be claimed anymore, free them
516 atomic_sub(atomic_read(&journal
->j_reserved_credits
),
517 &commit_transaction
->t_outstanding_credits
);
519 trace_jbd2_commit_flushing(journal
, commit_transaction
);
520 stats
.run
.rs_flushing
= jiffies
;
521 stats
.run
.rs_locked
= jbd2_time_diff(stats
.run
.rs_locked
,
522 stats
.run
.rs_flushing
);
524 commit_transaction
->t_state
= T_FLUSH
;
525 journal
->j_committing_transaction
= commit_transaction
;
526 journal
->j_running_transaction
= NULL
;
527 start_time
= ktime_get();
528 commit_transaction
->t_log_start
= journal
->j_head
;
529 wake_up(&journal
->j_wait_transaction_locked
);
530 write_unlock(&journal
->j_state_lock
);
532 jbd_debug(3, "JBD2: commit phase 2a\n");
535 * Now start flushing things to disk, in the order they appear
536 * on the transaction lists. Data blocks go first.
538 err
= journal_submit_data_buffers(journal
, commit_transaction
);
540 jbd2_journal_abort(journal
, err
);
542 blk_start_plug(&plug
);
543 jbd2_journal_write_revoke_records(commit_transaction
, &log_bufs
);
545 jbd_debug(3, "JBD2: commit phase 2b\n");
548 * Way to go: we have now written out all of the data for a
549 * transaction! Now comes the tricky part: we need to write out
550 * metadata. Loop over the transaction's entire buffer list:
552 write_lock(&journal
->j_state_lock
);
553 commit_transaction
->t_state
= T_COMMIT
;
554 write_unlock(&journal
->j_state_lock
);
556 trace_jbd2_commit_logging(journal
, commit_transaction
);
557 stats
.run
.rs_logging
= jiffies
;
558 stats
.run
.rs_flushing
= jbd2_time_diff(stats
.run
.rs_flushing
,
559 stats
.run
.rs_logging
);
560 stats
.run
.rs_blocks
=
561 atomic_read(&commit_transaction
->t_outstanding_credits
);
562 stats
.run
.rs_blocks_logged
= 0;
564 J_ASSERT(commit_transaction
->t_nr_buffers
<=
565 atomic_read(&commit_transaction
->t_outstanding_credits
));
570 while (commit_transaction
->t_buffers
) {
572 /* Find the next buffer to be journaled... */
574 jh
= commit_transaction
->t_buffers
;
576 /* If we're in abort mode, we just un-journal the buffer and
579 if (is_journal_aborted(journal
)) {
580 clear_buffer_jbddirty(jh2bh(jh
));
581 JBUFFER_TRACE(jh
, "journal is aborting: refile");
582 jbd2_buffer_abort_trigger(jh
,
584 jh
->b_frozen_triggers
:
586 jbd2_journal_refile_buffer(journal
, jh
);
587 /* If that was the last one, we need to clean up
588 * any descriptor buffers which may have been
589 * already allocated, even if we are now
591 if (!commit_transaction
->t_buffers
)
592 goto start_journal_io
;
596 /* Make sure we have a descriptor block in which to
597 record the metadata buffer. */
600 J_ASSERT (bufs
== 0);
602 jbd_debug(4, "JBD2: get descriptor\n");
604 descriptor
= jbd2_journal_get_descriptor_buffer(
606 JBD2_DESCRIPTOR_BLOCK
);
608 jbd2_journal_abort(journal
, -EIO
);
612 jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
613 (unsigned long long)descriptor
->b_blocknr
,
615 tagp
= &descriptor
->b_data
[sizeof(journal_header_t
)];
616 space_left
= descriptor
->b_size
-
617 sizeof(journal_header_t
);
619 set_buffer_jwrite(descriptor
);
620 set_buffer_dirty(descriptor
);
621 wbuf
[bufs
++] = descriptor
;
623 /* Record it so that we can wait for IO
625 BUFFER_TRACE(descriptor
, "ph3: file as descriptor");
626 jbd2_file_log_bh(&log_bufs
, descriptor
);
629 /* Where is the buffer to be written? */
631 err
= jbd2_journal_next_log_block(journal
, &blocknr
);
632 /* If the block mapping failed, just abandon the buffer
633 and repeat this loop: we'll fall into the
634 refile-on-abort condition above. */
636 jbd2_journal_abort(journal
, err
);
641 * start_this_handle() uses t_outstanding_credits to determine
642 * the free space in the log, but this counter is changed
643 * by jbd2_journal_next_log_block() also.
645 atomic_dec(&commit_transaction
->t_outstanding_credits
);
647 /* Bump b_count to prevent truncate from stumbling over
648 the shadowed buffer! @@@ This can go if we ever get
649 rid of the shadow pairing of buffers. */
650 atomic_inc(&jh2bh(jh
)->b_count
);
653 * Make a temporary IO buffer with which to write it out
654 * (this will requeue the metadata buffer to BJ_Shadow).
656 set_bit(BH_JWrite
, &jh2bh(jh
)->b_state
);
657 JBUFFER_TRACE(jh
, "ph3: write metadata");
658 flags
= jbd2_journal_write_metadata_buffer(commit_transaction
,
659 jh
, &wbuf
[bufs
], blocknr
);
661 jbd2_journal_abort(journal
, flags
);
664 jbd2_file_log_bh(&io_bufs
, wbuf
[bufs
]);
666 /* Record the new block's tag in the current descriptor
671 tag_flag
|= JBD2_FLAG_ESCAPE
;
673 tag_flag
|= JBD2_FLAG_SAME_UUID
;
675 tag
= (journal_block_tag_t
*) tagp
;
676 write_tag_block(journal
, tag
, jh2bh(jh
)->b_blocknr
);
677 tag
->t_flags
= cpu_to_be16(tag_flag
);
678 jbd2_block_tag_csum_set(journal
, tag
, wbuf
[bufs
],
679 commit_transaction
->t_tid
);
681 space_left
-= tag_bytes
;
685 memcpy (tagp
, journal
->j_uuid
, 16);
691 /* If there's no more to do, or if the descriptor is full,
694 if (bufs
== journal
->j_wbufsize
||
695 commit_transaction
->t_buffers
== NULL
||
696 space_left
< tag_bytes
+ 16 + csum_size
) {
698 jbd_debug(4, "JBD2: Submit %d IOs\n", bufs
);
700 /* Write an end-of-descriptor marker before
701 submitting the IOs. "tag" still points to
702 the last tag we set up. */
704 tag
->t_flags
|= cpu_to_be16(JBD2_FLAG_LAST_TAG
);
707 jbd2_descriptor_block_csum_set(journal
,
710 for (i
= 0; i
< bufs
; i
++) {
711 struct buffer_head
*bh
= wbuf
[i
];
715 if (jbd2_has_feature_checksum(journal
)) {
717 jbd2_checksum_data(crc32_sum
, bh
);
721 clear_buffer_dirty(bh
);
722 set_buffer_uptodate(bh
);
723 bh
->b_end_io
= journal_end_buffer_io_sync
;
724 submit_bh(REQ_OP_WRITE
, REQ_SYNC
, bh
);
728 /* Force a new descriptor to be generated next
729 time round the loop. */
735 err
= journal_finish_inode_data_buffers(journal
, commit_transaction
);
738 "JBD2: Detected IO errors while flushing file data "
739 "on %s\n", journal
->j_devname
);
740 if (journal
->j_flags
& JBD2_ABORT_ON_SYNCDATA_ERR
)
741 jbd2_journal_abort(journal
, err
);
746 * Get current oldest transaction in the log before we issue flush
747 * to the filesystem device. After the flush we can be sure that
748 * blocks of all older transactions are checkpointed to persistent
749 * storage and we will be safe to update journal start in the
750 * superblock with the numbers we get here.
753 jbd2_journal_get_log_tail(journal
, &first_tid
, &first_block
);
755 write_lock(&journal
->j_state_lock
);
757 long freed
= first_block
- journal
->j_tail
;
759 if (first_block
< journal
->j_tail
)
760 freed
+= journal
->j_last
- journal
->j_first
;
761 /* Update tail only if we free significant amount of space */
762 if (freed
< journal
->j_maxlen
/ 4)
765 J_ASSERT(commit_transaction
->t_state
== T_COMMIT
);
766 commit_transaction
->t_state
= T_COMMIT_DFLUSH
;
767 write_unlock(&journal
->j_state_lock
);
770 * If the journal is not located on the file system device,
771 * then we must flush the file system device before we issue
774 if (commit_transaction
->t_need_data_flush
&&
775 (journal
->j_fs_dev
!= journal
->j_dev
) &&
776 (journal
->j_flags
& JBD2_BARRIER
))
777 blkdev_issue_flush(journal
->j_fs_dev
, GFP_NOFS
, NULL
);
779 /* Done it all: now write the commit record asynchronously. */
780 if (jbd2_has_feature_async_commit(journal
)) {
781 err
= journal_submit_commit_record(journal
, commit_transaction
,
784 jbd2_journal_abort(journal
, err
);
787 blk_finish_plug(&plug
);
789 /* Lo and behold: we have just managed to send a transaction to
790 the log. Before we can commit it, wait for the IO so far to
791 complete. Control buffers being written are on the
792 transaction's t_log_list queue, and metadata buffers are on
795 Wait for the buffers in reverse order. That way we are
796 less likely to be woken up until all IOs have completed, and
797 so we incur less scheduling load.
800 jbd_debug(3, "JBD2: commit phase 3\n");
802 while (!list_empty(&io_bufs
)) {
803 struct buffer_head
*bh
= list_entry(io_bufs
.prev
,
810 if (unlikely(!buffer_uptodate(bh
)))
812 jbd2_unfile_log_bh(bh
);
813 stats
.run
.rs_blocks_logged
++;
816 * The list contains temporary buffer heads created by
817 * jbd2_journal_write_metadata_buffer().
819 BUFFER_TRACE(bh
, "dumping temporary bh");
821 J_ASSERT_BH(bh
, atomic_read(&bh
->b_count
) == 0);
822 free_buffer_head(bh
);
824 /* We also have to refile the corresponding shadowed buffer */
825 jh
= commit_transaction
->t_shadow_list
->b_tprev
;
827 clear_buffer_jwrite(bh
);
828 J_ASSERT_BH(bh
, buffer_jbddirty(bh
));
829 J_ASSERT_BH(bh
, !buffer_shadow(bh
));
831 /* The metadata is now released for reuse, but we need
832 to remember it against this transaction so that when
833 we finally commit, we can do any checkpointing
835 JBUFFER_TRACE(jh
, "file as BJ_Forget");
836 jbd2_journal_file_buffer(jh
, commit_transaction
, BJ_Forget
);
837 JBUFFER_TRACE(jh
, "brelse shadowed buffer");
841 J_ASSERT (commit_transaction
->t_shadow_list
== NULL
);
843 jbd_debug(3, "JBD2: commit phase 4\n");
845 /* Here we wait for the revoke record and descriptor record buffers */
846 while (!list_empty(&log_bufs
)) {
847 struct buffer_head
*bh
;
849 bh
= list_entry(log_bufs
.prev
, struct buffer_head
, b_assoc_buffers
);
853 if (unlikely(!buffer_uptodate(bh
)))
856 BUFFER_TRACE(bh
, "ph5: control buffer writeout done: unfile");
857 clear_buffer_jwrite(bh
);
858 jbd2_unfile_log_bh(bh
);
859 stats
.run
.rs_blocks_logged
++;
860 __brelse(bh
); /* One for getblk */
861 /* AKPM: bforget here */
865 jbd2_journal_abort(journal
, err
);
867 jbd_debug(3, "JBD2: commit phase 5\n");
868 write_lock(&journal
->j_state_lock
);
869 J_ASSERT(commit_transaction
->t_state
== T_COMMIT_DFLUSH
);
870 commit_transaction
->t_state
= T_COMMIT_JFLUSH
;
871 write_unlock(&journal
->j_state_lock
);
873 if (!jbd2_has_feature_async_commit(journal
)) {
874 err
= journal_submit_commit_record(journal
, commit_transaction
,
877 jbd2_journal_abort(journal
, err
);
880 err
= journal_wait_on_commit_record(journal
, cbh
);
881 stats
.run
.rs_blocks_logged
++;
882 if (jbd2_has_feature_async_commit(journal
) &&
883 journal
->j_flags
& JBD2_BARRIER
) {
884 blkdev_issue_flush(journal
->j_dev
, GFP_NOFS
, NULL
);
888 jbd2_journal_abort(journal
, err
);
891 * Now disk caches for filesystem device are flushed so we are safe to
892 * erase checkpointed transactions from the log by updating journal
896 jbd2_update_log_tail(journal
, first_tid
, first_block
);
898 /* End of a transaction! Finally, we can do checkpoint
899 processing: any buffers committed as a result of this
900 transaction can be removed from any checkpoint list it was on
903 jbd_debug(3, "JBD2: commit phase 6\n");
905 J_ASSERT(list_empty(&commit_transaction
->t_inode_list
));
906 J_ASSERT(commit_transaction
->t_buffers
== NULL
);
907 J_ASSERT(commit_transaction
->t_checkpoint_list
== NULL
);
908 J_ASSERT(commit_transaction
->t_shadow_list
== NULL
);
912 * As there are other places (journal_unmap_buffer()) adding buffers
913 * to this list we have to be careful and hold the j_list_lock.
915 spin_lock(&journal
->j_list_lock
);
916 while (commit_transaction
->t_forget
) {
917 transaction_t
*cp_transaction
;
918 struct buffer_head
*bh
;
921 jh
= commit_transaction
->t_forget
;
922 spin_unlock(&journal
->j_list_lock
);
925 * Get a reference so that bh cannot be freed before we are
929 jbd_lock_bh_state(bh
);
930 J_ASSERT_JH(jh
, jh
->b_transaction
== commit_transaction
);
933 * If there is undo-protected committed data against
934 * this buffer, then we can remove it now. If it is a
935 * buffer needing such protection, the old frozen_data
936 * field now points to a committed version of the
937 * buffer, so rotate that field to the new committed
940 * Otherwise, we can just throw away the frozen data now.
942 * We also know that the frozen data has already fired
943 * its triggers if they exist, so we can clear that too.
945 if (jh
->b_committed_data
) {
946 jbd2_free(jh
->b_committed_data
, bh
->b_size
);
947 jh
->b_committed_data
= NULL
;
948 if (jh
->b_frozen_data
) {
949 jh
->b_committed_data
= jh
->b_frozen_data
;
950 jh
->b_frozen_data
= NULL
;
951 jh
->b_frozen_triggers
= NULL
;
953 } else if (jh
->b_frozen_data
) {
954 jbd2_free(jh
->b_frozen_data
, bh
->b_size
);
955 jh
->b_frozen_data
= NULL
;
956 jh
->b_frozen_triggers
= NULL
;
959 spin_lock(&journal
->j_list_lock
);
960 cp_transaction
= jh
->b_cp_transaction
;
961 if (cp_transaction
) {
962 JBUFFER_TRACE(jh
, "remove from old cp transaction");
963 cp_transaction
->t_chp_stats
.cs_dropped
++;
964 __jbd2_journal_remove_checkpoint(jh
);
967 /* Only re-checkpoint the buffer_head if it is marked
968 * dirty. If the buffer was added to the BJ_Forget list
969 * by jbd2_journal_forget, it may no longer be dirty and
970 * there's no point in keeping a checkpoint record for
974 * A buffer which has been freed while still being journaled
975 * by a previous transaction, refile the buffer to BJ_Forget of
976 * the running transaction. If the just committed transaction
977 * contains "add to orphan" operation, we can completely
978 * invalidate the buffer now. We are rather through in that
979 * since the buffer may be still accessible when blocksize <
980 * pagesize and it is attached to the last partial page.
982 if (buffer_freed(bh
) && !jh
->b_next_transaction
) {
983 struct address_space
*mapping
;
985 clear_buffer_freed(bh
);
986 clear_buffer_jbddirty(bh
);
989 * Block device buffers need to stay mapped all the
990 * time, so it is enough to clear buffer_jbddirty and
991 * buffer_freed bits. For the file mapping buffers (i.e.
992 * journalled data) we need to unmap buffer and clear
993 * more bits. We also need to be careful about the check
994 * because the data page mapping can get cleared under
995 * our hands. Note that if mapping == NULL, we don't
996 * need to make buffer unmapped because the page is
997 * already detached from the mapping and buffers cannot
1000 mapping
= READ_ONCE(bh
->b_page
->mapping
);
1001 if (mapping
&& !sb_is_blkdev_sb(mapping
->host
->i_sb
)) {
1002 clear_buffer_mapped(bh
);
1003 clear_buffer_new(bh
);
1004 clear_buffer_req(bh
);
1009 if (buffer_jbddirty(bh
)) {
1010 JBUFFER_TRACE(jh
, "add to new checkpointing trans");
1011 __jbd2_journal_insert_checkpoint(jh
, commit_transaction
);
1012 if (is_journal_aborted(journal
))
1013 clear_buffer_jbddirty(bh
);
1015 J_ASSERT_BH(bh
, !buffer_dirty(bh
));
1017 * The buffer on BJ_Forget list and not jbddirty means
1018 * it has been freed by this transaction and hence it
1019 * could not have been reallocated until this
1020 * transaction has committed. *BUT* it could be
1021 * reallocated once we have written all the data to
1022 * disk and before we process the buffer on BJ_Forget
1025 if (!jh
->b_next_transaction
)
1028 JBUFFER_TRACE(jh
, "refile or unfile buffer");
1029 __jbd2_journal_refile_buffer(jh
);
1030 jbd_unlock_bh_state(bh
);
1032 release_buffer_page(bh
); /* Drops bh reference */
1035 cond_resched_lock(&journal
->j_list_lock
);
1037 spin_unlock(&journal
->j_list_lock
);
1039 * This is a bit sleazy. We use j_list_lock to protect transition
1040 * of a transaction into T_FINISHED state and calling
1041 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1042 * other checkpointing code processing the transaction...
1044 write_lock(&journal
->j_state_lock
);
1045 spin_lock(&journal
->j_list_lock
);
1047 * Now recheck if some buffers did not get attached to the transaction
1048 * while the lock was dropped...
1050 if (commit_transaction
->t_forget
) {
1051 spin_unlock(&journal
->j_list_lock
);
1052 write_unlock(&journal
->j_state_lock
);
1056 /* Add the transaction to the checkpoint list
1057 * __journal_remove_checkpoint() can not destroy transaction
1058 * under us because it is not marked as T_FINISHED yet */
1059 if (journal
->j_checkpoint_transactions
== NULL
) {
1060 journal
->j_checkpoint_transactions
= commit_transaction
;
1061 commit_transaction
->t_cpnext
= commit_transaction
;
1062 commit_transaction
->t_cpprev
= commit_transaction
;
1064 commit_transaction
->t_cpnext
=
1065 journal
->j_checkpoint_transactions
;
1066 commit_transaction
->t_cpprev
=
1067 commit_transaction
->t_cpnext
->t_cpprev
;
1068 commit_transaction
->t_cpnext
->t_cpprev
=
1070 commit_transaction
->t_cpprev
->t_cpnext
=
1073 spin_unlock(&journal
->j_list_lock
);
1075 /* Done with this transaction! */
1077 jbd_debug(3, "JBD2: commit phase 7\n");
1079 J_ASSERT(commit_transaction
->t_state
== T_COMMIT_JFLUSH
);
1081 commit_transaction
->t_start
= jiffies
;
1082 stats
.run
.rs_logging
= jbd2_time_diff(stats
.run
.rs_logging
,
1083 commit_transaction
->t_start
);
1086 * File the transaction statistics
1088 stats
.ts_tid
= commit_transaction
->t_tid
;
1089 stats
.run
.rs_handle_count
=
1090 atomic_read(&commit_transaction
->t_handle_count
);
1091 trace_jbd2_run_stats(journal
->j_fs_dev
->bd_dev
,
1092 commit_transaction
->t_tid
, &stats
.run
);
1093 stats
.ts_requested
= (commit_transaction
->t_requested
) ? 1 : 0;
1095 commit_transaction
->t_state
= T_COMMIT_CALLBACK
;
1096 J_ASSERT(commit_transaction
== journal
->j_committing_transaction
);
1097 journal
->j_commit_sequence
= commit_transaction
->t_tid
;
1098 journal
->j_committing_transaction
= NULL
;
1099 commit_time
= ktime_to_ns(ktime_sub(ktime_get(), start_time
));
1102 * weight the commit time higher than the average time so we don't
1103 * react too strongly to vast changes in the commit time
1105 if (likely(journal
->j_average_commit_time
))
1106 journal
->j_average_commit_time
= (commit_time
+
1107 journal
->j_average_commit_time
*3) / 4;
1109 journal
->j_average_commit_time
= commit_time
;
1111 write_unlock(&journal
->j_state_lock
);
1113 if (journal
->j_commit_callback
)
1114 journal
->j_commit_callback(journal
, commit_transaction
);
1116 trace_jbd2_end_commit(journal
, commit_transaction
);
1117 jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1118 journal
->j_commit_sequence
, journal
->j_tail_sequence
);
1120 write_lock(&journal
->j_state_lock
);
1121 spin_lock(&journal
->j_list_lock
);
1122 commit_transaction
->t_state
= T_FINISHED
;
1123 /* Check if the transaction can be dropped now that we are finished */
1124 if (commit_transaction
->t_checkpoint_list
== NULL
&&
1125 commit_transaction
->t_checkpoint_io_list
== NULL
) {
1126 __jbd2_journal_drop_transaction(journal
, commit_transaction
);
1127 jbd2_journal_free_transaction(commit_transaction
);
1129 spin_unlock(&journal
->j_list_lock
);
1130 write_unlock(&journal
->j_state_lock
);
1131 wake_up(&journal
->j_wait_done_commit
);
1134 * Calculate overall stats
1136 spin_lock(&journal
->j_history_lock
);
1137 journal
->j_stats
.ts_tid
++;
1138 journal
->j_stats
.ts_requested
+= stats
.ts_requested
;
1139 journal
->j_stats
.run
.rs_wait
+= stats
.run
.rs_wait
;
1140 journal
->j_stats
.run
.rs_request_delay
+= stats
.run
.rs_request_delay
;
1141 journal
->j_stats
.run
.rs_running
+= stats
.run
.rs_running
;
1142 journal
->j_stats
.run
.rs_locked
+= stats
.run
.rs_locked
;
1143 journal
->j_stats
.run
.rs_flushing
+= stats
.run
.rs_flushing
;
1144 journal
->j_stats
.run
.rs_logging
+= stats
.run
.rs_logging
;
1145 journal
->j_stats
.run
.rs_handle_count
+= stats
.run
.rs_handle_count
;
1146 journal
->j_stats
.run
.rs_blocks
+= stats
.run
.rs_blocks
;
1147 journal
->j_stats
.run
.rs_blocks_logged
+= stats
.run
.rs_blocks_logged
;
1148 spin_unlock(&journal
->j_history_lock
);