2 * linux/fs/jbd2/commit.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Journal commit routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
16 #include <linux/time.h>
18 #include <linux/jbd2.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bio.h>
28 #include <trace/events/jbd2.h>
31 * Default IO end handler for temporary BJ_IO buffer_heads.
33 static void journal_end_buffer_io_sync(struct buffer_head
*bh
, int uptodate
)
37 set_buffer_uptodate(bh
);
39 clear_buffer_uptodate(bh
);
44 * When an ext4 file is truncated, it is possible that some pages are not
45 * successfully freed, because they are attached to a committing transaction.
46 * After the transaction commits, these pages are left on the LRU, with no
47 * ->mapping, and with attached buffers. These pages are trivially reclaimable
48 * by the VM, but their apparent absence upsets the VM accounting, and it makes
49 * the numbers in /proc/meminfo look odd.
51 * So here, we have a buffer which has just come off the forget list. Look to
52 * see if we can strip all buffers from the backing page.
54 * Called under lock_journal(), and possibly under journal_datalist_lock. The
55 * caller provided us with a ref against the buffer, and we drop that here.
57 static void release_buffer_page(struct buffer_head
*bh
)
63 if (atomic_read(&bh
->b_count
) != 1)
71 /* OK, it's a truncated page */
72 if (!trylock_page(page
))
77 try_to_free_buffers(page
);
79 page_cache_release(page
);
87 * Done it all: now submit the commit record. We should have
88 * cleaned up our previous buffers by now, so if we are in abort
89 * mode we can now just skip the rest of the journal write
92 * Returns 1 if the journal needs to be aborted or 0 on success
94 static int journal_submit_commit_record(journal_t
*journal
,
95 transaction_t
*commit_transaction
,
96 struct buffer_head
**cbh
,
99 struct journal_head
*descriptor
;
100 struct commit_header
*tmp
;
101 struct buffer_head
*bh
;
103 int barrier_done
= 0;
104 struct timespec now
= current_kernel_time();
106 if (is_journal_aborted(journal
))
109 descriptor
= jbd2_journal_get_descriptor_buffer(journal
);
113 bh
= jh2bh(descriptor
);
115 tmp
= (struct commit_header
*)bh
->b_data
;
116 tmp
->h_magic
= cpu_to_be32(JBD2_MAGIC_NUMBER
);
117 tmp
->h_blocktype
= cpu_to_be32(JBD2_COMMIT_BLOCK
);
118 tmp
->h_sequence
= cpu_to_be32(commit_transaction
->t_tid
);
119 tmp
->h_commit_sec
= cpu_to_be64(now
.tv_sec
);
120 tmp
->h_commit_nsec
= cpu_to_be32(now
.tv_nsec
);
122 if (JBD2_HAS_COMPAT_FEATURE(journal
,
123 JBD2_FEATURE_COMPAT_CHECKSUM
)) {
124 tmp
->h_chksum_type
= JBD2_CRC32_CHKSUM
;
125 tmp
->h_chksum_size
= JBD2_CRC32_CHKSUM_SIZE
;
126 tmp
->h_chksum
[0] = cpu_to_be32(crc32_sum
);
129 JBUFFER_TRACE(descriptor
, "submit commit block");
131 clear_buffer_dirty(bh
);
132 set_buffer_uptodate(bh
);
133 bh
->b_end_io
= journal_end_buffer_io_sync
;
135 if (journal
->j_flags
& JBD2_BARRIER
&&
136 !JBD2_HAS_INCOMPAT_FEATURE(journal
,
137 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT
)) {
138 set_buffer_ordered(bh
);
141 ret
= submit_bh(WRITE_SYNC_PLUG
, bh
);
143 clear_buffer_ordered(bh
);
145 /* is it possible for another commit to fail at roughly
146 * the same time as this one? If so, we don't want to
147 * trust the barrier flag in the super, but instead want
148 * to remember if we sent a barrier request
150 if (ret
== -EOPNOTSUPP
&& barrier_done
) {
152 "JBD: barrier-based sync failed on %s - "
153 "disabling barriers\n", journal
->j_devname
);
154 spin_lock(&journal
->j_state_lock
);
155 journal
->j_flags
&= ~JBD2_BARRIER
;
156 spin_unlock(&journal
->j_state_lock
);
158 /* And try again, without the barrier */
160 set_buffer_uptodate(bh
);
161 clear_buffer_dirty(bh
);
162 ret
= submit_bh(WRITE_SYNC_PLUG
, bh
);
169 * This function along with journal_submit_commit_record
170 * allows to write the commit record asynchronously.
172 static int journal_wait_on_commit_record(journal_t
*journal
,
173 struct buffer_head
*bh
)
178 clear_buffer_dirty(bh
);
180 if (buffer_eopnotsupp(bh
) && (journal
->j_flags
& JBD2_BARRIER
)) {
182 "JBD2: wait_on_commit_record: sync failed on %s - "
183 "disabling barriers\n", journal
->j_devname
);
184 spin_lock(&journal
->j_state_lock
);
185 journal
->j_flags
&= ~JBD2_BARRIER
;
186 spin_unlock(&journal
->j_state_lock
);
189 clear_buffer_dirty(bh
);
190 set_buffer_uptodate(bh
);
191 bh
->b_end_io
= journal_end_buffer_io_sync
;
193 ret
= submit_bh(WRITE_SYNC_PLUG
, bh
);
201 if (unlikely(!buffer_uptodate(bh
)))
203 put_bh(bh
); /* One for getblk() */
204 jbd2_journal_put_journal_head(bh2jh(bh
));
210 * write the filemap data using writepage() address_space_operations.
211 * We don't do block allocation here even for delalloc. We don't
212 * use writepages() because with dealyed allocation we may be doing
213 * block allocation in writepages().
215 static int journal_submit_inode_data_buffers(struct address_space
*mapping
)
218 struct writeback_control wbc
= {
219 .sync_mode
= WB_SYNC_ALL
,
220 .nr_to_write
= mapping
->nrpages
* 2,
222 .range_end
= i_size_read(mapping
->host
),
225 ret
= generic_writepages(mapping
, &wbc
);
230 * Submit all the data buffers of inode associated with the transaction to
233 * We are in a committing transaction. Therefore no new inode can be added to
234 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
235 * operate on from being released while we write out pages.
237 static int journal_submit_data_buffers(journal_t
*journal
,
238 transaction_t
*commit_transaction
)
240 struct jbd2_inode
*jinode
;
242 struct address_space
*mapping
;
244 spin_lock(&journal
->j_list_lock
);
245 list_for_each_entry(jinode
, &commit_transaction
->t_inode_list
, i_list
) {
246 mapping
= jinode
->i_vfs_inode
->i_mapping
;
247 jinode
->i_flags
|= JI_COMMIT_RUNNING
;
248 spin_unlock(&journal
->j_list_lock
);
250 * submit the inode data buffers. We use writepage
251 * instead of writepages. Because writepages can do
252 * block allocation with delalloc. We need to write
253 * only allocated blocks here.
255 trace_jbd2_submit_inode_data(jinode
->i_vfs_inode
);
256 err
= journal_submit_inode_data_buffers(mapping
);
259 spin_lock(&journal
->j_list_lock
);
260 J_ASSERT(jinode
->i_transaction
== commit_transaction
);
261 jinode
->i_flags
&= ~JI_COMMIT_RUNNING
;
262 wake_up_bit(&jinode
->i_flags
, __JI_COMMIT_RUNNING
);
264 spin_unlock(&journal
->j_list_lock
);
269 * Wait for data submitted for writeout, refile inodes to proper
270 * transaction if needed.
273 static int journal_finish_inode_data_buffers(journal_t
*journal
,
274 transaction_t
*commit_transaction
)
276 struct jbd2_inode
*jinode
, *next_i
;
279 /* For locking, see the comment in journal_submit_data_buffers() */
280 spin_lock(&journal
->j_list_lock
);
281 list_for_each_entry(jinode
, &commit_transaction
->t_inode_list
, i_list
) {
282 jinode
->i_flags
|= JI_COMMIT_RUNNING
;
283 spin_unlock(&journal
->j_list_lock
);
284 err
= filemap_fdatawait(jinode
->i_vfs_inode
->i_mapping
);
287 * Because AS_EIO is cleared by
288 * wait_on_page_writeback_range(), set it again so
289 * that user process can get -EIO from fsync().
292 &jinode
->i_vfs_inode
->i_mapping
->flags
);
297 spin_lock(&journal
->j_list_lock
);
298 jinode
->i_flags
&= ~JI_COMMIT_RUNNING
;
299 wake_up_bit(&jinode
->i_flags
, __JI_COMMIT_RUNNING
);
302 /* Now refile inode to proper lists */
303 list_for_each_entry_safe(jinode
, next_i
,
304 &commit_transaction
->t_inode_list
, i_list
) {
305 list_del(&jinode
->i_list
);
306 if (jinode
->i_next_transaction
) {
307 jinode
->i_transaction
= jinode
->i_next_transaction
;
308 jinode
->i_next_transaction
= NULL
;
309 list_add(&jinode
->i_list
,
310 &jinode
->i_transaction
->t_inode_list
);
312 jinode
->i_transaction
= NULL
;
315 spin_unlock(&journal
->j_list_lock
);
320 static __u32
jbd2_checksum_data(__u32 crc32_sum
, struct buffer_head
*bh
)
322 struct page
*page
= bh
->b_page
;
326 addr
= kmap_atomic(page
, KM_USER0
);
327 checksum
= crc32_be(crc32_sum
,
328 (void *)(addr
+ offset_in_page(bh
->b_data
)), bh
->b_size
);
329 kunmap_atomic(addr
, KM_USER0
);
334 static void write_tag_block(int tag_bytes
, journal_block_tag_t
*tag
,
335 unsigned long long block
)
337 tag
->t_blocknr
= cpu_to_be32(block
& (u32
)~0);
338 if (tag_bytes
> JBD2_TAG_SIZE32
)
339 tag
->t_blocknr_high
= cpu_to_be32((block
>> 31) >> 1);
343 * jbd2_journal_commit_transaction
345 * The primary function for committing a transaction to the log. This
346 * function is called by the journal thread to begin a complete commit.
348 void jbd2_journal_commit_transaction(journal_t
*journal
)
350 struct transaction_stats_s stats
;
351 transaction_t
*commit_transaction
;
352 struct journal_head
*jh
, *new_jh
, *descriptor
;
353 struct buffer_head
**wbuf
= journal
->j_wbuf
;
357 unsigned long long blocknr
;
361 journal_header_t
*header
;
362 journal_block_tag_t
*tag
= NULL
;
367 int tag_bytes
= journal_tag_bytes(journal
);
368 struct buffer_head
*cbh
= NULL
; /* For transactional checksums */
369 __u32 crc32_sum
= ~0;
370 int write_op
= WRITE
;
373 * First job: lock down the current transaction and wait for
374 * all outstanding updates to complete.
378 spin_lock(&journal
->j_list_lock
);
379 summarise_journal_usage(journal
);
380 spin_unlock(&journal
->j_list_lock
);
383 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
384 if (journal
->j_flags
& JBD2_FLUSHED
) {
385 jbd_debug(3, "super block updated\n");
386 jbd2_journal_update_superblock(journal
, 1);
388 jbd_debug(3, "superblock not updated\n");
391 J_ASSERT(journal
->j_running_transaction
!= NULL
);
392 J_ASSERT(journal
->j_committing_transaction
== NULL
);
394 commit_transaction
= journal
->j_running_transaction
;
395 J_ASSERT(commit_transaction
->t_state
== T_RUNNING
);
397 trace_jbd2_start_commit(journal
, commit_transaction
);
398 jbd_debug(1, "JBD: starting commit of transaction %d\n",
399 commit_transaction
->t_tid
);
401 spin_lock(&journal
->j_state_lock
);
402 commit_transaction
->t_state
= T_LOCKED
;
405 * Use plugged writes here, since we want to submit several before
406 * we unplug the device. We don't do explicit unplugging in here,
407 * instead we rely on sync_buffer() doing the unplug for us.
409 if (commit_transaction
->t_synchronous_commit
)
410 write_op
= WRITE_SYNC_PLUG
;
411 trace_jbd2_commit_locking(journal
, commit_transaction
);
412 stats
.u
.run
.rs_wait
= commit_transaction
->t_max_wait
;
413 stats
.u
.run
.rs_locked
= jiffies
;
414 stats
.u
.run
.rs_running
= jbd2_time_diff(commit_transaction
->t_start
,
415 stats
.u
.run
.rs_locked
);
417 spin_lock(&commit_transaction
->t_handle_lock
);
418 while (commit_transaction
->t_updates
) {
421 prepare_to_wait(&journal
->j_wait_updates
, &wait
,
422 TASK_UNINTERRUPTIBLE
);
423 if (commit_transaction
->t_updates
) {
424 spin_unlock(&commit_transaction
->t_handle_lock
);
425 spin_unlock(&journal
->j_state_lock
);
427 spin_lock(&journal
->j_state_lock
);
428 spin_lock(&commit_transaction
->t_handle_lock
);
430 finish_wait(&journal
->j_wait_updates
, &wait
);
432 spin_unlock(&commit_transaction
->t_handle_lock
);
434 J_ASSERT (commit_transaction
->t_outstanding_credits
<=
435 journal
->j_max_transaction_buffers
);
438 * First thing we are allowed to do is to discard any remaining
439 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
440 * that there are no such buffers: if a large filesystem
441 * operation like a truncate needs to split itself over multiple
442 * transactions, then it may try to do a jbd2_journal_restart() while
443 * there are still BJ_Reserved buffers outstanding. These must
444 * be released cleanly from the current transaction.
446 * In this case, the filesystem must still reserve write access
447 * again before modifying the buffer in the new transaction, but
448 * we do not require it to remember exactly which old buffers it
449 * has reserved. This is consistent with the existing behaviour
450 * that multiple jbd2_journal_get_write_access() calls to the same
451 * buffer are perfectly permissable.
453 while (commit_transaction
->t_reserved_list
) {
454 jh
= commit_transaction
->t_reserved_list
;
455 JBUFFER_TRACE(jh
, "reserved, unused: refile");
457 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
458 * leave undo-committed data.
460 if (jh
->b_committed_data
) {
461 struct buffer_head
*bh
= jh2bh(jh
);
463 jbd_lock_bh_state(bh
);
464 jbd2_free(jh
->b_committed_data
, bh
->b_size
);
465 jh
->b_committed_data
= NULL
;
466 jbd_unlock_bh_state(bh
);
468 jbd2_journal_refile_buffer(journal
, jh
);
472 * Now try to drop any written-back buffers from the journal's
473 * checkpoint lists. We do this *before* commit because it potentially
476 spin_lock(&journal
->j_list_lock
);
477 __jbd2_journal_clean_checkpoint_list(journal
);
478 spin_unlock(&journal
->j_list_lock
);
480 jbd_debug (3, "JBD: commit phase 1\n");
483 * Switch to a new revoke table.
485 jbd2_journal_switch_revoke_table(journal
);
487 trace_jbd2_commit_flushing(journal
, commit_transaction
);
488 stats
.u
.run
.rs_flushing
= jiffies
;
489 stats
.u
.run
.rs_locked
= jbd2_time_diff(stats
.u
.run
.rs_locked
,
490 stats
.u
.run
.rs_flushing
);
492 commit_transaction
->t_state
= T_FLUSH
;
493 journal
->j_committing_transaction
= commit_transaction
;
494 journal
->j_running_transaction
= NULL
;
495 start_time
= ktime_get();
496 commit_transaction
->t_log_start
= journal
->j_head
;
497 wake_up(&journal
->j_wait_transaction_locked
);
498 spin_unlock(&journal
->j_state_lock
);
500 jbd_debug (3, "JBD: commit phase 2\n");
503 * Now start flushing things to disk, in the order they appear
504 * on the transaction lists. Data blocks go first.
506 err
= journal_submit_data_buffers(journal
, commit_transaction
);
508 jbd2_journal_abort(journal
, err
);
510 jbd2_journal_write_revoke_records(journal
, commit_transaction
,
513 jbd_debug(3, "JBD: commit phase 2\n");
516 * Way to go: we have now written out all of the data for a
517 * transaction! Now comes the tricky part: we need to write out
518 * metadata. Loop over the transaction's entire buffer list:
520 spin_lock(&journal
->j_state_lock
);
521 commit_transaction
->t_state
= T_COMMIT
;
522 spin_unlock(&journal
->j_state_lock
);
524 trace_jbd2_commit_logging(journal
, commit_transaction
);
525 stats
.u
.run
.rs_logging
= jiffies
;
526 stats
.u
.run
.rs_flushing
= jbd2_time_diff(stats
.u
.run
.rs_flushing
,
527 stats
.u
.run
.rs_logging
);
528 stats
.u
.run
.rs_blocks
= commit_transaction
->t_outstanding_credits
;
529 stats
.u
.run
.rs_blocks_logged
= 0;
531 J_ASSERT(commit_transaction
->t_nr_buffers
<=
532 commit_transaction
->t_outstanding_credits
);
537 while (commit_transaction
->t_buffers
) {
539 /* Find the next buffer to be journaled... */
541 jh
= commit_transaction
->t_buffers
;
543 /* If we're in abort mode, we just un-journal the buffer and
546 if (is_journal_aborted(journal
)) {
547 clear_buffer_jbddirty(jh2bh(jh
));
548 JBUFFER_TRACE(jh
, "journal is aborting: refile");
549 jbd2_buffer_abort_trigger(jh
,
551 jh
->b_frozen_triggers
:
553 jbd2_journal_refile_buffer(journal
, jh
);
554 /* If that was the last one, we need to clean up
555 * any descriptor buffers which may have been
556 * already allocated, even if we are now
558 if (!commit_transaction
->t_buffers
)
559 goto start_journal_io
;
563 /* Make sure we have a descriptor block in which to
564 record the metadata buffer. */
567 struct buffer_head
*bh
;
569 J_ASSERT (bufs
== 0);
571 jbd_debug(4, "JBD: get descriptor\n");
573 descriptor
= jbd2_journal_get_descriptor_buffer(journal
);
575 jbd2_journal_abort(journal
, -EIO
);
579 bh
= jh2bh(descriptor
);
580 jbd_debug(4, "JBD: got buffer %llu (%p)\n",
581 (unsigned long long)bh
->b_blocknr
, bh
->b_data
);
582 header
= (journal_header_t
*)&bh
->b_data
[0];
583 header
->h_magic
= cpu_to_be32(JBD2_MAGIC_NUMBER
);
584 header
->h_blocktype
= cpu_to_be32(JBD2_DESCRIPTOR_BLOCK
);
585 header
->h_sequence
= cpu_to_be32(commit_transaction
->t_tid
);
587 tagp
= &bh
->b_data
[sizeof(journal_header_t
)];
588 space_left
= bh
->b_size
- sizeof(journal_header_t
);
590 set_buffer_jwrite(bh
);
591 set_buffer_dirty(bh
);
594 /* Record it so that we can wait for IO
596 BUFFER_TRACE(bh
, "ph3: file as descriptor");
597 jbd2_journal_file_buffer(descriptor
, commit_transaction
,
601 /* Where is the buffer to be written? */
603 err
= jbd2_journal_next_log_block(journal
, &blocknr
);
604 /* If the block mapping failed, just abandon the buffer
605 and repeat this loop: we'll fall into the
606 refile-on-abort condition above. */
608 jbd2_journal_abort(journal
, err
);
613 * start_this_handle() uses t_outstanding_credits to determine
614 * the free space in the log, but this counter is changed
615 * by jbd2_journal_next_log_block() also.
617 commit_transaction
->t_outstanding_credits
--;
619 /* Bump b_count to prevent truncate from stumbling over
620 the shadowed buffer! @@@ This can go if we ever get
621 rid of the BJ_IO/BJ_Shadow pairing of buffers. */
622 atomic_inc(&jh2bh(jh
)->b_count
);
624 /* Make a temporary IO buffer with which to write it out
625 (this will requeue both the metadata buffer and the
626 temporary IO buffer). new_bh goes on BJ_IO*/
628 set_bit(BH_JWrite
, &jh2bh(jh
)->b_state
);
630 * akpm: jbd2_journal_write_metadata_buffer() sets
631 * new_bh->b_transaction to commit_transaction.
632 * We need to clean this up before we release new_bh
633 * (which is of type BJ_IO)
635 JBUFFER_TRACE(jh
, "ph3: write metadata");
636 flags
= jbd2_journal_write_metadata_buffer(commit_transaction
,
637 jh
, &new_jh
, blocknr
);
638 set_bit(BH_JWrite
, &jh2bh(new_jh
)->b_state
);
639 wbuf
[bufs
++] = jh2bh(new_jh
);
641 /* Record the new block's tag in the current descriptor
646 tag_flag
|= JBD2_FLAG_ESCAPE
;
648 tag_flag
|= JBD2_FLAG_SAME_UUID
;
650 tag
= (journal_block_tag_t
*) tagp
;
651 write_tag_block(tag_bytes
, tag
, jh2bh(jh
)->b_blocknr
);
652 tag
->t_flags
= cpu_to_be32(tag_flag
);
654 space_left
-= tag_bytes
;
657 memcpy (tagp
, journal
->j_uuid
, 16);
663 /* If there's no more to do, or if the descriptor is full,
666 if (bufs
== journal
->j_wbufsize
||
667 commit_transaction
->t_buffers
== NULL
||
668 space_left
< tag_bytes
+ 16) {
670 jbd_debug(4, "JBD: Submit %d IOs\n", bufs
);
672 /* Write an end-of-descriptor marker before
673 submitting the IOs. "tag" still points to
674 the last tag we set up. */
676 tag
->t_flags
|= cpu_to_be32(JBD2_FLAG_LAST_TAG
);
679 for (i
= 0; i
< bufs
; i
++) {
680 struct buffer_head
*bh
= wbuf
[i
];
684 if (JBD2_HAS_COMPAT_FEATURE(journal
,
685 JBD2_FEATURE_COMPAT_CHECKSUM
)) {
687 jbd2_checksum_data(crc32_sum
, bh
);
691 clear_buffer_dirty(bh
);
692 set_buffer_uptodate(bh
);
693 bh
->b_end_io
= journal_end_buffer_io_sync
;
694 submit_bh(write_op
, bh
);
697 stats
.u
.run
.rs_blocks_logged
+= bufs
;
699 /* Force a new descriptor to be generated next
700 time round the loop. */
706 /* Done it all: now write the commit record asynchronously. */
708 if (JBD2_HAS_INCOMPAT_FEATURE(journal
,
709 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT
)) {
710 err
= journal_submit_commit_record(journal
, commit_transaction
,
713 __jbd2_journal_abort_hard(journal
);
717 * This is the right place to wait for data buffers both for ASYNC
718 * and !ASYNC commit. If commit is ASYNC, we need to wait only after
719 * the commit block went to disk (which happens above). If commit is
720 * SYNC, we need to wait for data buffers before we start writing
721 * commit block, which happens below in such setting.
723 err
= journal_finish_inode_data_buffers(journal
, commit_transaction
);
726 "JBD2: Detected IO errors while flushing file data "
727 "on %s\n", journal
->j_devname
);
728 if (journal
->j_flags
& JBD2_ABORT_ON_SYNCDATA_ERR
)
729 jbd2_journal_abort(journal
, err
);
733 /* Lo and behold: we have just managed to send a transaction to
734 the log. Before we can commit it, wait for the IO so far to
735 complete. Control buffers being written are on the
736 transaction's t_log_list queue, and metadata buffers are on
737 the t_iobuf_list queue.
739 Wait for the buffers in reverse order. That way we are
740 less likely to be woken up until all IOs have completed, and
741 so we incur less scheduling load.
744 jbd_debug(3, "JBD: commit phase 3\n");
747 * akpm: these are BJ_IO, and j_list_lock is not needed.
748 * See __journal_try_to_free_buffer.
751 while (commit_transaction
->t_iobuf_list
!= NULL
) {
752 struct buffer_head
*bh
;
754 jh
= commit_transaction
->t_iobuf_list
->b_tprev
;
756 if (buffer_locked(bh
)) {
763 if (unlikely(!buffer_uptodate(bh
)))
766 clear_buffer_jwrite(bh
);
768 JBUFFER_TRACE(jh
, "ph4: unfile after journal write");
769 jbd2_journal_unfile_buffer(journal
, jh
);
772 * ->t_iobuf_list should contain only dummy buffer_heads
773 * which were created by jbd2_journal_write_metadata_buffer().
775 BUFFER_TRACE(bh
, "dumping temporary bh");
776 jbd2_journal_put_journal_head(jh
);
778 J_ASSERT_BH(bh
, atomic_read(&bh
->b_count
) == 0);
779 free_buffer_head(bh
);
781 /* We also have to unlock and free the corresponding
783 jh
= commit_transaction
->t_shadow_list
->b_tprev
;
785 clear_bit(BH_JWrite
, &bh
->b_state
);
786 J_ASSERT_BH(bh
, buffer_jbddirty(bh
));
788 /* The metadata is now released for reuse, but we need
789 to remember it against this transaction so that when
790 we finally commit, we can do any checkpointing
792 JBUFFER_TRACE(jh
, "file as BJ_Forget");
793 jbd2_journal_file_buffer(jh
, commit_transaction
, BJ_Forget
);
794 /* Wake up any transactions which were waiting for this
796 wake_up_bit(&bh
->b_state
, BH_Unshadow
);
797 JBUFFER_TRACE(jh
, "brelse shadowed buffer");
801 J_ASSERT (commit_transaction
->t_shadow_list
== NULL
);
803 jbd_debug(3, "JBD: commit phase 4\n");
805 /* Here we wait for the revoke record and descriptor record buffers */
807 while (commit_transaction
->t_log_list
!= NULL
) {
808 struct buffer_head
*bh
;
810 jh
= commit_transaction
->t_log_list
->b_tprev
;
812 if (buffer_locked(bh
)) {
814 goto wait_for_ctlbuf
;
817 goto wait_for_ctlbuf
;
819 if (unlikely(!buffer_uptodate(bh
)))
822 BUFFER_TRACE(bh
, "ph5: control buffer writeout done: unfile");
823 clear_buffer_jwrite(bh
);
824 jbd2_journal_unfile_buffer(journal
, jh
);
825 jbd2_journal_put_journal_head(jh
);
826 __brelse(bh
); /* One for getblk */
827 /* AKPM: bforget here */
831 jbd2_journal_abort(journal
, err
);
833 jbd_debug(3, "JBD: commit phase 5\n");
835 if (!JBD2_HAS_INCOMPAT_FEATURE(journal
,
836 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT
)) {
837 err
= journal_submit_commit_record(journal
, commit_transaction
,
840 __jbd2_journal_abort_hard(journal
);
842 if (!err
&& !is_journal_aborted(journal
))
843 err
= journal_wait_on_commit_record(journal
, cbh
);
846 jbd2_journal_abort(journal
, err
);
848 /* End of a transaction! Finally, we can do checkpoint
849 processing: any buffers committed as a result of this
850 transaction can be removed from any checkpoint list it was on
853 jbd_debug(3, "JBD: commit phase 6\n");
855 J_ASSERT(list_empty(&commit_transaction
->t_inode_list
));
856 J_ASSERT(commit_transaction
->t_buffers
== NULL
);
857 J_ASSERT(commit_transaction
->t_checkpoint_list
== NULL
);
858 J_ASSERT(commit_transaction
->t_iobuf_list
== NULL
);
859 J_ASSERT(commit_transaction
->t_shadow_list
== NULL
);
860 J_ASSERT(commit_transaction
->t_log_list
== NULL
);
864 * As there are other places (journal_unmap_buffer()) adding buffers
865 * to this list we have to be careful and hold the j_list_lock.
867 spin_lock(&journal
->j_list_lock
);
868 while (commit_transaction
->t_forget
) {
869 transaction_t
*cp_transaction
;
870 struct buffer_head
*bh
;
872 jh
= commit_transaction
->t_forget
;
873 spin_unlock(&journal
->j_list_lock
);
875 jbd_lock_bh_state(bh
);
876 J_ASSERT_JH(jh
, jh
->b_transaction
== commit_transaction
||
877 jh
->b_transaction
== journal
->j_running_transaction
);
880 * If there is undo-protected committed data against
881 * this buffer, then we can remove it now. If it is a
882 * buffer needing such protection, the old frozen_data
883 * field now points to a committed version of the
884 * buffer, so rotate that field to the new committed
887 * Otherwise, we can just throw away the frozen data now.
889 * We also know that the frozen data has already fired
890 * its triggers if they exist, so we can clear that too.
892 if (jh
->b_committed_data
) {
893 jbd2_free(jh
->b_committed_data
, bh
->b_size
);
894 jh
->b_committed_data
= NULL
;
895 if (jh
->b_frozen_data
) {
896 jh
->b_committed_data
= jh
->b_frozen_data
;
897 jh
->b_frozen_data
= NULL
;
898 jh
->b_frozen_triggers
= NULL
;
900 } else if (jh
->b_frozen_data
) {
901 jbd2_free(jh
->b_frozen_data
, bh
->b_size
);
902 jh
->b_frozen_data
= NULL
;
903 jh
->b_frozen_triggers
= NULL
;
906 spin_lock(&journal
->j_list_lock
);
907 cp_transaction
= jh
->b_cp_transaction
;
908 if (cp_transaction
) {
909 JBUFFER_TRACE(jh
, "remove from old cp transaction");
910 cp_transaction
->t_chp_stats
.cs_dropped
++;
911 __jbd2_journal_remove_checkpoint(jh
);
914 /* Only re-checkpoint the buffer_head if it is marked
915 * dirty. If the buffer was added to the BJ_Forget list
916 * by jbd2_journal_forget, it may no longer be dirty and
917 * there's no point in keeping a checkpoint record for
920 /* A buffer which has been freed while still being
921 * journaled by a previous transaction may end up still
922 * being dirty here, but we want to avoid writing back
923 * that buffer in the future now that the last use has
924 * been committed. That's not only a performance gain,
925 * it also stops aliasing problems if the buffer is left
926 * behind for writeback and gets reallocated for another
927 * use in a different page. */
928 if (buffer_freed(bh
)) {
929 clear_buffer_freed(bh
);
930 clear_buffer_jbddirty(bh
);
933 if (buffer_jbddirty(bh
)) {
934 JBUFFER_TRACE(jh
, "add to new checkpointing trans");
935 __jbd2_journal_insert_checkpoint(jh
, commit_transaction
);
936 if (is_journal_aborted(journal
))
937 clear_buffer_jbddirty(bh
);
938 JBUFFER_TRACE(jh
, "refile for checkpoint writeback");
939 __jbd2_journal_refile_buffer(jh
);
940 jbd_unlock_bh_state(bh
);
942 J_ASSERT_BH(bh
, !buffer_dirty(bh
));
943 /* The buffer on BJ_Forget list and not jbddirty means
944 * it has been freed by this transaction and hence it
945 * could not have been reallocated until this
946 * transaction has committed. *BUT* it could be
947 * reallocated once we have written all the data to
948 * disk and before we process the buffer on BJ_Forget
950 JBUFFER_TRACE(jh
, "refile or unfile freed buffer");
951 __jbd2_journal_refile_buffer(jh
);
952 if (!jh
->b_transaction
) {
953 jbd_unlock_bh_state(bh
);
955 jbd2_journal_remove_journal_head(bh
);
956 release_buffer_page(bh
);
958 jbd_unlock_bh_state(bh
);
960 cond_resched_lock(&journal
->j_list_lock
);
962 spin_unlock(&journal
->j_list_lock
);
964 * This is a bit sleazy. We use j_list_lock to protect transition
965 * of a transaction into T_FINISHED state and calling
966 * __jbd2_journal_drop_transaction(). Otherwise we could race with
967 * other checkpointing code processing the transaction...
969 spin_lock(&journal
->j_state_lock
);
970 spin_lock(&journal
->j_list_lock
);
972 * Now recheck if some buffers did not get attached to the transaction
973 * while the lock was dropped...
975 if (commit_transaction
->t_forget
) {
976 spin_unlock(&journal
->j_list_lock
);
977 spin_unlock(&journal
->j_state_lock
);
981 /* Done with this transaction! */
983 jbd_debug(3, "JBD: commit phase 7\n");
985 J_ASSERT(commit_transaction
->t_state
== T_COMMIT
);
987 commit_transaction
->t_start
= jiffies
;
988 stats
.u
.run
.rs_logging
= jbd2_time_diff(stats
.u
.run
.rs_logging
,
989 commit_transaction
->t_start
);
992 * File the transaction for history
994 stats
.ts_type
= JBD2_STATS_RUN
;
995 stats
.ts_tid
= commit_transaction
->t_tid
;
996 stats
.u
.run
.rs_handle_count
= commit_transaction
->t_handle_count
;
997 spin_lock(&journal
->j_history_lock
);
998 memcpy(journal
->j_history
+ journal
->j_history_cur
, &stats
,
1000 if (++journal
->j_history_cur
== journal
->j_history_max
)
1001 journal
->j_history_cur
= 0;
1004 * Calculate overall stats
1006 journal
->j_stats
.ts_tid
++;
1007 journal
->j_stats
.u
.run
.rs_wait
+= stats
.u
.run
.rs_wait
;
1008 journal
->j_stats
.u
.run
.rs_running
+= stats
.u
.run
.rs_running
;
1009 journal
->j_stats
.u
.run
.rs_locked
+= stats
.u
.run
.rs_locked
;
1010 journal
->j_stats
.u
.run
.rs_flushing
+= stats
.u
.run
.rs_flushing
;
1011 journal
->j_stats
.u
.run
.rs_logging
+= stats
.u
.run
.rs_logging
;
1012 journal
->j_stats
.u
.run
.rs_handle_count
+= stats
.u
.run
.rs_handle_count
;
1013 journal
->j_stats
.u
.run
.rs_blocks
+= stats
.u
.run
.rs_blocks
;
1014 journal
->j_stats
.u
.run
.rs_blocks_logged
+= stats
.u
.run
.rs_blocks_logged
;
1015 spin_unlock(&journal
->j_history_lock
);
1017 commit_transaction
->t_state
= T_FINISHED
;
1018 J_ASSERT(commit_transaction
== journal
->j_committing_transaction
);
1019 journal
->j_commit_sequence
= commit_transaction
->t_tid
;
1020 journal
->j_committing_transaction
= NULL
;
1021 commit_time
= ktime_to_ns(ktime_sub(ktime_get(), start_time
));
1024 * weight the commit time higher than the average time so we don't
1025 * react too strongly to vast changes in the commit time
1027 if (likely(journal
->j_average_commit_time
))
1028 journal
->j_average_commit_time
= (commit_time
+
1029 journal
->j_average_commit_time
*3) / 4;
1031 journal
->j_average_commit_time
= commit_time
;
1032 spin_unlock(&journal
->j_state_lock
);
1034 if (commit_transaction
->t_checkpoint_list
== NULL
&&
1035 commit_transaction
->t_checkpoint_io_list
== NULL
) {
1036 __jbd2_journal_drop_transaction(journal
, commit_transaction
);
1039 if (journal
->j_checkpoint_transactions
== NULL
) {
1040 journal
->j_checkpoint_transactions
= commit_transaction
;
1041 commit_transaction
->t_cpnext
= commit_transaction
;
1042 commit_transaction
->t_cpprev
= commit_transaction
;
1044 commit_transaction
->t_cpnext
=
1045 journal
->j_checkpoint_transactions
;
1046 commit_transaction
->t_cpprev
=
1047 commit_transaction
->t_cpnext
->t_cpprev
;
1048 commit_transaction
->t_cpnext
->t_cpprev
=
1050 commit_transaction
->t_cpprev
->t_cpnext
=
1054 spin_unlock(&journal
->j_list_lock
);
1056 if (journal
->j_commit_callback
)
1057 journal
->j_commit_callback(journal
, commit_transaction
);
1059 trace_jbd2_end_commit(journal
, commit_transaction
);
1060 jbd_debug(1, "JBD: commit %d complete, head %d\n",
1061 journal
->j_commit_sequence
, journal
->j_tail_sequence
);
1063 kfree(commit_transaction
);
1065 wake_up(&journal
->j_wait_done_commit
);