2 * linux/fs/jbd2/transaction.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Generic filesystem transaction handling code; part of the ext2fs
15 * This file manages transactions (compound commits managed by the
16 * journaling code) and handles (individual atomic operations by the
20 #include <linux/time.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
27 #include <linux/highmem.h>
28 #include <linux/hrtimer.h>
30 static void __jbd2_journal_temp_unlink_buffer(struct journal_head
*jh
);
33 * jbd2_get_transaction: obtain a new transaction_t object.
35 * Simply allocate and initialise a new transaction. Create it in
36 * RUNNING state and add it to the current journal (which should not
37 * have an existing running transaction: we only make a new transaction
38 * once we have started to commit the old one).
41 * The journal MUST be locked. We don't perform atomic mallocs on the
42 * new transaction and we can't block without protecting against other
43 * processes trying to touch the journal while it is in transition.
47 static transaction_t
*
48 jbd2_get_transaction(journal_t
*journal
, transaction_t
*transaction
)
50 transaction
->t_journal
= journal
;
51 transaction
->t_state
= T_RUNNING
;
52 transaction
->t_start_time
= ktime_get();
53 transaction
->t_tid
= journal
->j_transaction_sequence
++;
54 transaction
->t_expires
= jiffies
+ journal
->j_commit_interval
;
55 spin_lock_init(&transaction
->t_handle_lock
);
56 INIT_LIST_HEAD(&transaction
->t_inode_list
);
57 INIT_LIST_HEAD(&transaction
->t_private_list
);
59 /* Set up the commit timer for the new transaction. */
60 journal
->j_commit_timer
.expires
= round_jiffies_up(transaction
->t_expires
);
61 add_timer(&journal
->j_commit_timer
);
63 J_ASSERT(journal
->j_running_transaction
== NULL
);
64 journal
->j_running_transaction
= transaction
;
65 transaction
->t_max_wait
= 0;
66 transaction
->t_start
= jiffies
;
74 * A handle_t is an object which represents a single atomic update to a
75 * filesystem, and which tracks all of the modifications which form part
80 * start_this_handle: Given a handle, deal with any locking or stalling
81 * needed to make sure that there is enough journal space for the handle
82 * to begin. Attach the handle to a transaction and set up the
83 * transaction's buffer credits.
86 static int start_this_handle(journal_t
*journal
, handle_t
*handle
)
88 transaction_t
*transaction
;
90 int nblocks
= handle
->h_buffer_credits
;
91 transaction_t
*new_transaction
= NULL
;
93 unsigned long ts
= jiffies
;
95 if (nblocks
> journal
->j_max_transaction_buffers
) {
96 printk(KERN_ERR
"JBD: %s wants too many credits (%d > %d)\n",
97 current
->comm
, nblocks
,
98 journal
->j_max_transaction_buffers
);
104 if (!journal
->j_running_transaction
) {
105 new_transaction
= kzalloc(sizeof(*new_transaction
),
106 GFP_NOFS
|__GFP_NOFAIL
);
107 if (!new_transaction
) {
113 jbd_debug(3, "New handle %p going live.\n", handle
);
118 * We need to hold j_state_lock until t_updates has been incremented,
119 * for proper journal barrier handling
121 spin_lock(&journal
->j_state_lock
);
123 if (is_journal_aborted(journal
) ||
124 (journal
->j_errno
!= 0 && !(journal
->j_flags
& JBD2_ACK_ERR
))) {
125 spin_unlock(&journal
->j_state_lock
);
130 /* Wait on the journal's transaction barrier if necessary */
131 if (journal
->j_barrier_count
) {
132 spin_unlock(&journal
->j_state_lock
);
133 wait_event(journal
->j_wait_transaction_locked
,
134 journal
->j_barrier_count
== 0);
138 if (!journal
->j_running_transaction
) {
139 if (!new_transaction
) {
140 spin_unlock(&journal
->j_state_lock
);
141 goto alloc_transaction
;
143 jbd2_get_transaction(journal
, new_transaction
);
144 new_transaction
= NULL
;
147 transaction
= journal
->j_running_transaction
;
150 * If the current transaction is locked down for commit, wait for the
151 * lock to be released.
153 if (transaction
->t_state
== T_LOCKED
) {
156 prepare_to_wait(&journal
->j_wait_transaction_locked
,
157 &wait
, TASK_UNINTERRUPTIBLE
);
158 spin_unlock(&journal
->j_state_lock
);
160 finish_wait(&journal
->j_wait_transaction_locked
, &wait
);
165 * If there is not enough space left in the log to write all potential
166 * buffers requested by this operation, we need to stall pending a log
167 * checkpoint to free some more log space.
169 spin_lock(&transaction
->t_handle_lock
);
170 needed
= transaction
->t_outstanding_credits
+ nblocks
;
172 if (needed
> journal
->j_max_transaction_buffers
) {
174 * If the current transaction is already too large, then start
175 * to commit it: we can then go back and attach this handle to
180 jbd_debug(2, "Handle %p starting new commit...\n", handle
);
181 spin_unlock(&transaction
->t_handle_lock
);
182 prepare_to_wait(&journal
->j_wait_transaction_locked
, &wait
,
183 TASK_UNINTERRUPTIBLE
);
184 __jbd2_log_start_commit(journal
, transaction
->t_tid
);
185 spin_unlock(&journal
->j_state_lock
);
187 finish_wait(&journal
->j_wait_transaction_locked
, &wait
);
192 * The commit code assumes that it can get enough log space
193 * without forcing a checkpoint. This is *critical* for
194 * correctness: a checkpoint of a buffer which is also
195 * associated with a committing transaction creates a deadlock,
196 * so commit simply cannot force through checkpoints.
198 * We must therefore ensure the necessary space in the journal
199 * *before* starting to dirty potentially checkpointed buffers
200 * in the new transaction.
202 * The worst part is, any transaction currently committing can
203 * reduce the free space arbitrarily. Be careful to account for
204 * those buffers when checkpointing.
208 * @@@ AKPM: This seems rather over-defensive. We're giving commit
209 * a _lot_ of headroom: 1/4 of the journal plus the size of
210 * the committing transaction. Really, we only need to give it
211 * committing_transaction->t_outstanding_credits plus "enough" for
212 * the log control blocks.
213 * Also, this test is inconsitent with the matching one in
214 * jbd2_journal_extend().
216 if (__jbd2_log_space_left(journal
) < jbd_space_needed(journal
)) {
217 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle
);
218 spin_unlock(&transaction
->t_handle_lock
);
219 __jbd2_log_wait_for_space(journal
);
223 /* OK, account for the buffers that this operation expects to
224 * use and add the handle to the running transaction. */
226 if (time_after(transaction
->t_start
, ts
)) {
227 ts
= jbd2_time_diff(ts
, transaction
->t_start
);
228 if (ts
> transaction
->t_max_wait
)
229 transaction
->t_max_wait
= ts
;
232 handle
->h_transaction
= transaction
;
233 transaction
->t_outstanding_credits
+= nblocks
;
234 transaction
->t_updates
++;
235 transaction
->t_handle_count
++;
236 jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
237 handle
, nblocks
, transaction
->t_outstanding_credits
,
238 __jbd2_log_space_left(journal
));
239 spin_unlock(&transaction
->t_handle_lock
);
240 spin_unlock(&journal
->j_state_lock
);
242 lock_map_acquire(&handle
->h_lockdep_map
);
244 if (unlikely(new_transaction
)) /* It's usually NULL */
245 kfree(new_transaction
);
249 static struct lock_class_key jbd2_handle_key
;
251 /* Allocate a new handle. This should probably be in a slab... */
252 static handle_t
*new_handle(int nblocks
)
254 handle_t
*handle
= jbd2_alloc_handle(GFP_NOFS
);
257 memset(handle
, 0, sizeof(*handle
));
258 handle
->h_buffer_credits
= nblocks
;
261 lockdep_init_map(&handle
->h_lockdep_map
, "jbd2_handle",
262 &jbd2_handle_key
, 0);
268 * handle_t *jbd2_journal_start() - Obtain a new handle.
269 * @journal: Journal to start transaction on.
270 * @nblocks: number of block buffer we might modify
272 * We make sure that the transaction can guarantee at least nblocks of
273 * modified buffers in the log. We block until the log can guarantee
276 * This function is visible to journal users (like ext3fs), so is not
277 * called with the journal already locked.
279 * Return a pointer to a newly allocated handle, or NULL on failure
281 handle_t
*jbd2_journal_start(journal_t
*journal
, int nblocks
)
283 handle_t
*handle
= journal_current_handle();
287 return ERR_PTR(-EROFS
);
290 J_ASSERT(handle
->h_transaction
->t_journal
== journal
);
295 handle
= new_handle(nblocks
);
297 return ERR_PTR(-ENOMEM
);
299 current
->journal_info
= handle
;
301 err
= start_this_handle(journal
, handle
);
303 jbd2_free_handle(handle
);
304 current
->journal_info
= NULL
;
305 handle
= ERR_PTR(err
);
313 * int jbd2_journal_extend() - extend buffer credits.
314 * @handle: handle to 'extend'
315 * @nblocks: nr blocks to try to extend by.
317 * Some transactions, such as large extends and truncates, can be done
318 * atomically all at once or in several stages. The operation requests
319 * a credit for a number of buffer modications in advance, but can
320 * extend its credit if it needs more.
322 * jbd2_journal_extend tries to give the running handle more buffer credits.
323 * It does not guarantee that allocation - this is a best-effort only.
324 * The calling process MUST be able to deal cleanly with a failure to
327 * Return 0 on success, non-zero on failure.
329 * return code < 0 implies an error
330 * return code > 0 implies normal transaction-full status.
332 int jbd2_journal_extend(handle_t
*handle
, int nblocks
)
334 transaction_t
*transaction
= handle
->h_transaction
;
335 journal_t
*journal
= transaction
->t_journal
;
340 if (is_handle_aborted(handle
))
345 spin_lock(&journal
->j_state_lock
);
347 /* Don't extend a locked-down transaction! */
348 if (handle
->h_transaction
->t_state
!= T_RUNNING
) {
349 jbd_debug(3, "denied handle %p %d blocks: "
350 "transaction not running\n", handle
, nblocks
);
354 spin_lock(&transaction
->t_handle_lock
);
355 wanted
= transaction
->t_outstanding_credits
+ nblocks
;
357 if (wanted
> journal
->j_max_transaction_buffers
) {
358 jbd_debug(3, "denied handle %p %d blocks: "
359 "transaction too large\n", handle
, nblocks
);
363 if (wanted
> __jbd2_log_space_left(journal
)) {
364 jbd_debug(3, "denied handle %p %d blocks: "
365 "insufficient log space\n", handle
, nblocks
);
369 handle
->h_buffer_credits
+= nblocks
;
370 transaction
->t_outstanding_credits
+= nblocks
;
373 jbd_debug(3, "extended handle %p by %d\n", handle
, nblocks
);
375 spin_unlock(&transaction
->t_handle_lock
);
377 spin_unlock(&journal
->j_state_lock
);
384 * int jbd2_journal_restart() - restart a handle .
385 * @handle: handle to restart
386 * @nblocks: nr credits requested
388 * Restart a handle for a multi-transaction filesystem
391 * If the jbd2_journal_extend() call above fails to grant new buffer credits
392 * to a running handle, a call to jbd2_journal_restart will commit the
393 * handle's transaction so far and reattach the handle to a new
394 * transaction capabable of guaranteeing the requested number of
398 int jbd2_journal_restart(handle_t
*handle
, int nblocks
)
400 transaction_t
*transaction
= handle
->h_transaction
;
401 journal_t
*journal
= transaction
->t_journal
;
404 /* If we've had an abort of any type, don't even think about
405 * actually doing the restart! */
406 if (is_handle_aborted(handle
))
410 * First unlink the handle from its current transaction, and start the
413 J_ASSERT(transaction
->t_updates
> 0);
414 J_ASSERT(journal_current_handle() == handle
);
416 spin_lock(&journal
->j_state_lock
);
417 spin_lock(&transaction
->t_handle_lock
);
418 transaction
->t_outstanding_credits
-= handle
->h_buffer_credits
;
419 transaction
->t_updates
--;
421 if (!transaction
->t_updates
)
422 wake_up(&journal
->j_wait_updates
);
423 spin_unlock(&transaction
->t_handle_lock
);
425 jbd_debug(2, "restarting handle %p\n", handle
);
426 __jbd2_log_start_commit(journal
, transaction
->t_tid
);
427 spin_unlock(&journal
->j_state_lock
);
429 lock_map_release(&handle
->h_lockdep_map
);
430 handle
->h_buffer_credits
= nblocks
;
431 ret
= start_this_handle(journal
, handle
);
437 * void jbd2_journal_lock_updates () - establish a transaction barrier.
438 * @journal: Journal to establish a barrier on.
440 * This locks out any further updates from being started, and blocks
441 * until all existing updates have completed, returning only once the
442 * journal is in a quiescent state with no updates running.
444 * The journal lock should not be held on entry.
446 void jbd2_journal_lock_updates(journal_t
*journal
)
450 spin_lock(&journal
->j_state_lock
);
451 ++journal
->j_barrier_count
;
453 /* Wait until there are no running updates */
455 transaction_t
*transaction
= journal
->j_running_transaction
;
460 spin_lock(&transaction
->t_handle_lock
);
461 if (!transaction
->t_updates
) {
462 spin_unlock(&transaction
->t_handle_lock
);
465 prepare_to_wait(&journal
->j_wait_updates
, &wait
,
466 TASK_UNINTERRUPTIBLE
);
467 spin_unlock(&transaction
->t_handle_lock
);
468 spin_unlock(&journal
->j_state_lock
);
470 finish_wait(&journal
->j_wait_updates
, &wait
);
471 spin_lock(&journal
->j_state_lock
);
473 spin_unlock(&journal
->j_state_lock
);
476 * We have now established a barrier against other normal updates, but
477 * we also need to barrier against other jbd2_journal_lock_updates() calls
478 * to make sure that we serialise special journal-locked operations
481 mutex_lock(&journal
->j_barrier
);
485 * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
486 * @journal: Journal to release the barrier on.
488 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
490 * Should be called without the journal lock held.
492 void jbd2_journal_unlock_updates (journal_t
*journal
)
494 J_ASSERT(journal
->j_barrier_count
!= 0);
496 mutex_unlock(&journal
->j_barrier
);
497 spin_lock(&journal
->j_state_lock
);
498 --journal
->j_barrier_count
;
499 spin_unlock(&journal
->j_state_lock
);
500 wake_up(&journal
->j_wait_transaction_locked
);
503 static void warn_dirty_buffer(struct buffer_head
*bh
)
505 char b
[BDEVNAME_SIZE
];
508 "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
509 "There's a risk of filesystem corruption in case of system "
511 bdevname(bh
->b_bdev
, b
), (unsigned long long)bh
->b_blocknr
);
515 * If the buffer is already part of the current transaction, then there
516 * is nothing we need to do. If it is already part of a prior
517 * transaction which we are still committing to disk, then we need to
518 * make sure that we do not overwrite the old copy: we do copy-out to
519 * preserve the copy going to disk. We also account the buffer against
520 * the handle's metadata buffer credits (unless the buffer is already
521 * part of the transaction, that is).
525 do_get_write_access(handle_t
*handle
, struct journal_head
*jh
,
528 struct buffer_head
*bh
;
529 transaction_t
*transaction
;
532 char *frozen_buffer
= NULL
;
535 if (is_handle_aborted(handle
))
538 transaction
= handle
->h_transaction
;
539 journal
= transaction
->t_journal
;
541 jbd_debug(5, "buffer_head %p, force_copy %d\n", jh
, force_copy
);
543 JBUFFER_TRACE(jh
, "entry");
547 /* @@@ Need to check for errors here at some point. */
550 jbd_lock_bh_state(bh
);
552 /* We now hold the buffer lock so it is safe to query the buffer
553 * state. Is the buffer dirty?
555 * If so, there are two possibilities. The buffer may be
556 * non-journaled, and undergoing a quite legitimate writeback.
557 * Otherwise, it is journaled, and we don't expect dirty buffers
558 * in that state (the buffers should be marked JBD_Dirty
559 * instead.) So either the IO is being done under our own
560 * control and this is a bug, or it's a third party IO such as
561 * dump(8) (which may leave the buffer scheduled for read ---
562 * ie. locked but not dirty) or tune2fs (which may actually have
563 * the buffer dirtied, ugh.) */
565 if (buffer_dirty(bh
)) {
567 * First question: is this buffer already part of the current
568 * transaction or the existing committing transaction?
570 if (jh
->b_transaction
) {
572 jh
->b_transaction
== transaction
||
574 journal
->j_committing_transaction
);
575 if (jh
->b_next_transaction
)
576 J_ASSERT_JH(jh
, jh
->b_next_transaction
==
578 warn_dirty_buffer(bh
);
581 * In any case we need to clean the dirty flag and we must
582 * do it under the buffer lock to be sure we don't race
583 * with running write-out.
585 JBUFFER_TRACE(jh
, "Journalling dirty buffer");
586 clear_buffer_dirty(bh
);
587 set_buffer_jbddirty(bh
);
593 if (is_handle_aborted(handle
)) {
594 jbd_unlock_bh_state(bh
);
600 * The buffer is already part of this transaction if b_transaction or
601 * b_next_transaction points to it
603 if (jh
->b_transaction
== transaction
||
604 jh
->b_next_transaction
== transaction
)
608 * this is the first time this transaction is touching this buffer,
609 * reset the modified flag
614 * If there is already a copy-out version of this buffer, then we don't
615 * need to make another one
617 if (jh
->b_frozen_data
) {
618 JBUFFER_TRACE(jh
, "has frozen data");
619 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
620 jh
->b_next_transaction
= transaction
;
624 /* Is there data here we need to preserve? */
626 if (jh
->b_transaction
&& jh
->b_transaction
!= transaction
) {
627 JBUFFER_TRACE(jh
, "owned by older transaction");
628 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
629 J_ASSERT_JH(jh
, jh
->b_transaction
==
630 journal
->j_committing_transaction
);
632 /* There is one case we have to be very careful about.
633 * If the committing transaction is currently writing
634 * this buffer out to disk and has NOT made a copy-out,
635 * then we cannot modify the buffer contents at all
636 * right now. The essence of copy-out is that it is the
637 * extra copy, not the primary copy, which gets
638 * journaled. If the primary copy is already going to
639 * disk then we cannot do copy-out here. */
641 if (jh
->b_jlist
== BJ_Shadow
) {
642 DEFINE_WAIT_BIT(wait
, &bh
->b_state
, BH_Unshadow
);
643 wait_queue_head_t
*wqh
;
645 wqh
= bit_waitqueue(&bh
->b_state
, BH_Unshadow
);
647 JBUFFER_TRACE(jh
, "on shadow: sleep");
648 jbd_unlock_bh_state(bh
);
649 /* commit wakes up all shadow buffers after IO */
651 prepare_to_wait(wqh
, &wait
.wait
,
652 TASK_UNINTERRUPTIBLE
);
653 if (jh
->b_jlist
!= BJ_Shadow
)
657 finish_wait(wqh
, &wait
.wait
);
661 /* Only do the copy if the currently-owning transaction
662 * still needs it. If it is on the Forget list, the
663 * committing transaction is past that stage. The
664 * buffer had better remain locked during the kmalloc,
665 * but that should be true --- we hold the journal lock
666 * still and the buffer is already on the BUF_JOURNAL
667 * list so won't be flushed.
669 * Subtle point, though: if this is a get_undo_access,
670 * then we will be relying on the frozen_data to contain
671 * the new value of the committed_data record after the
672 * transaction, so we HAVE to force the frozen_data copy
675 if (jh
->b_jlist
!= BJ_Forget
|| force_copy
) {
676 JBUFFER_TRACE(jh
, "generate frozen data");
677 if (!frozen_buffer
) {
678 JBUFFER_TRACE(jh
, "allocate memory for buffer");
679 jbd_unlock_bh_state(bh
);
681 jbd2_alloc(jh2bh(jh
)->b_size
,
683 if (!frozen_buffer
) {
685 "%s: OOM for frozen_buffer\n",
687 JBUFFER_TRACE(jh
, "oom!");
689 jbd_lock_bh_state(bh
);
694 jh
->b_frozen_data
= frozen_buffer
;
695 frozen_buffer
= NULL
;
698 jh
->b_next_transaction
= transaction
;
703 * Finally, if the buffer is not journaled right now, we need to make
704 * sure it doesn't get written to disk before the caller actually
705 * commits the new data
707 if (!jh
->b_transaction
) {
708 JBUFFER_TRACE(jh
, "no transaction");
709 J_ASSERT_JH(jh
, !jh
->b_next_transaction
);
710 jh
->b_transaction
= transaction
;
711 JBUFFER_TRACE(jh
, "file as BJ_Reserved");
712 spin_lock(&journal
->j_list_lock
);
713 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Reserved
);
714 spin_unlock(&journal
->j_list_lock
);
723 J_EXPECT_JH(jh
, buffer_uptodate(jh2bh(jh
)),
724 "Possible IO failure.\n");
725 page
= jh2bh(jh
)->b_page
;
726 offset
= ((unsigned long) jh2bh(jh
)->b_data
) & ~PAGE_MASK
;
727 source
= kmap_atomic(page
, KM_USER0
);
728 /* Fire data frozen trigger just before we copy the data */
729 jbd2_buffer_frozen_trigger(jh
, source
+ offset
,
731 memcpy(jh
->b_frozen_data
, source
+offset
, jh2bh(jh
)->b_size
);
732 kunmap_atomic(source
, KM_USER0
);
735 * Now that the frozen data is saved off, we need to store
736 * any matching triggers.
738 jh
->b_frozen_triggers
= jh
->b_triggers
;
740 jbd_unlock_bh_state(bh
);
743 * If we are about to journal a buffer, then any revoke pending on it is
746 jbd2_journal_cancel_revoke(handle
, jh
);
749 if (unlikely(frozen_buffer
)) /* It's usually NULL */
750 jbd2_free(frozen_buffer
, bh
->b_size
);
752 JBUFFER_TRACE(jh
, "exit");
757 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
758 * @handle: transaction to add buffer modifications to
759 * @bh: bh to be used for metadata writes
760 * @credits: variable that will receive credits for the buffer
762 * Returns an error code or 0 on success.
764 * In full data journalling mode the buffer may be of type BJ_AsyncData,
765 * because we're write()ing a buffer which is also part of a shared mapping.
768 int jbd2_journal_get_write_access(handle_t
*handle
, struct buffer_head
*bh
)
770 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
773 /* We do not want to get caught playing with fields which the
774 * log thread also manipulates. Make sure that the buffer
775 * completes any outstanding IO before proceeding. */
776 rc
= do_get_write_access(handle
, jh
, 0);
777 jbd2_journal_put_journal_head(jh
);
783 * When the user wants to journal a newly created buffer_head
784 * (ie. getblk() returned a new buffer and we are going to populate it
785 * manually rather than reading off disk), then we need to keep the
786 * buffer_head locked until it has been completely filled with new
787 * data. In this case, we should be able to make the assertion that
788 * the bh is not already part of an existing transaction.
790 * The buffer should already be locked by the caller by this point.
791 * There is no lock ranking violation: it was a newly created,
792 * unlocked buffer beforehand. */
795 * int jbd2_journal_get_create_access () - notify intent to use newly created bh
796 * @handle: transaction to new buffer to
799 * Call this if you create a new bh.
801 int jbd2_journal_get_create_access(handle_t
*handle
, struct buffer_head
*bh
)
803 transaction_t
*transaction
= handle
->h_transaction
;
804 journal_t
*journal
= transaction
->t_journal
;
805 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
808 jbd_debug(5, "journal_head %p\n", jh
);
810 if (is_handle_aborted(handle
))
814 JBUFFER_TRACE(jh
, "entry");
816 * The buffer may already belong to this transaction due to pre-zeroing
817 * in the filesystem's new_block code. It may also be on the previous,
818 * committing transaction's lists, but it HAS to be in Forget state in
819 * that case: the transaction must have deleted the buffer for it to be
822 jbd_lock_bh_state(bh
);
823 spin_lock(&journal
->j_list_lock
);
824 J_ASSERT_JH(jh
, (jh
->b_transaction
== transaction
||
825 jh
->b_transaction
== NULL
||
826 (jh
->b_transaction
== journal
->j_committing_transaction
&&
827 jh
->b_jlist
== BJ_Forget
)));
829 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
830 J_ASSERT_JH(jh
, buffer_locked(jh2bh(jh
)));
832 if (jh
->b_transaction
== NULL
) {
834 * Previous jbd2_journal_forget() could have left the buffer
835 * with jbddirty bit set because it was being committed. When
836 * the commit finished, we've filed the buffer for
837 * checkpointing and marked it dirty. Now we are reallocating
838 * the buffer so the transaction freeing it must have
839 * committed and so it's safe to clear the dirty bit.
841 clear_buffer_dirty(jh2bh(jh
));
842 jh
->b_transaction
= transaction
;
844 /* first access by this transaction */
847 JBUFFER_TRACE(jh
, "file as BJ_Reserved");
848 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Reserved
);
849 } else if (jh
->b_transaction
== journal
->j_committing_transaction
) {
850 /* first access by this transaction */
853 JBUFFER_TRACE(jh
, "set next transaction");
854 jh
->b_next_transaction
= transaction
;
856 spin_unlock(&journal
->j_list_lock
);
857 jbd_unlock_bh_state(bh
);
860 * akpm: I added this. ext3_alloc_branch can pick up new indirect
861 * blocks which contain freed but then revoked metadata. We need
862 * to cancel the revoke in case we end up freeing it yet again
863 * and the reallocating as data - this would cause a second revoke,
864 * which hits an assertion error.
866 JBUFFER_TRACE(jh
, "cancelling revoke");
867 jbd2_journal_cancel_revoke(handle
, jh
);
868 jbd2_journal_put_journal_head(jh
);
874 * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
875 * non-rewindable consequences
876 * @handle: transaction
877 * @bh: buffer to undo
878 * @credits: store the number of taken credits here (if not NULL)
880 * Sometimes there is a need to distinguish between metadata which has
881 * been committed to disk and that which has not. The ext3fs code uses
882 * this for freeing and allocating space, we have to make sure that we
883 * do not reuse freed space until the deallocation has been committed,
884 * since if we overwrote that space we would make the delete
885 * un-rewindable in case of a crash.
887 * To deal with that, jbd2_journal_get_undo_access requests write access to a
888 * buffer for parts of non-rewindable operations such as delete
889 * operations on the bitmaps. The journaling code must keep a copy of
890 * the buffer's contents prior to the undo_access call until such time
891 * as we know that the buffer has definitely been committed to disk.
893 * We never need to know which transaction the committed data is part
894 * of, buffers touched here are guaranteed to be dirtied later and so
895 * will be committed to a new transaction in due course, at which point
896 * we can discard the old committed data pointer.
898 * Returns error number or 0 on success.
900 int jbd2_journal_get_undo_access(handle_t
*handle
, struct buffer_head
*bh
)
903 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
904 char *committed_data
= NULL
;
906 JBUFFER_TRACE(jh
, "entry");
909 * Do this first --- it can drop the journal lock, so we want to
910 * make sure that obtaining the committed_data is done
911 * atomically wrt. completion of any outstanding commits.
913 err
= do_get_write_access(handle
, jh
, 1);
918 if (!jh
->b_committed_data
) {
919 committed_data
= jbd2_alloc(jh2bh(jh
)->b_size
, GFP_NOFS
);
920 if (!committed_data
) {
921 printk(KERN_EMERG
"%s: No memory for committed data\n",
928 jbd_lock_bh_state(bh
);
929 if (!jh
->b_committed_data
) {
930 /* Copy out the current buffer contents into the
931 * preserved, committed copy. */
932 JBUFFER_TRACE(jh
, "generate b_committed data");
933 if (!committed_data
) {
934 jbd_unlock_bh_state(bh
);
938 jh
->b_committed_data
= committed_data
;
939 committed_data
= NULL
;
940 memcpy(jh
->b_committed_data
, bh
->b_data
, bh
->b_size
);
942 jbd_unlock_bh_state(bh
);
944 jbd2_journal_put_journal_head(jh
);
945 if (unlikely(committed_data
))
946 jbd2_free(committed_data
, bh
->b_size
);
951 * void jbd2_journal_set_triggers() - Add triggers for commit writeout
952 * @bh: buffer to trigger on
953 * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
955 * Set any triggers on this journal_head. This is always safe, because
956 * triggers for a committing buffer will be saved off, and triggers for
957 * a running transaction will match the buffer in that transaction.
959 * Call with NULL to clear the triggers.
961 void jbd2_journal_set_triggers(struct buffer_head
*bh
,
962 struct jbd2_buffer_trigger_type
*type
)
964 struct journal_head
*jh
= bh2jh(bh
);
966 jh
->b_triggers
= type
;
969 void jbd2_buffer_frozen_trigger(struct journal_head
*jh
, void *mapped_data
,
970 struct jbd2_buffer_trigger_type
*triggers
)
972 struct buffer_head
*bh
= jh2bh(jh
);
974 if (!triggers
|| !triggers
->t_frozen
)
977 triggers
->t_frozen(triggers
, bh
, mapped_data
, bh
->b_size
);
980 void jbd2_buffer_abort_trigger(struct journal_head
*jh
,
981 struct jbd2_buffer_trigger_type
*triggers
)
983 if (!triggers
|| !triggers
->t_abort
)
986 triggers
->t_abort(triggers
, jh2bh(jh
));
992 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
993 * @handle: transaction to add buffer to.
994 * @bh: buffer to mark
996 * mark dirty metadata which needs to be journaled as part of the current
999 * The buffer is placed on the transaction's metadata list and is marked
1000 * as belonging to the transaction.
1002 * Returns error number or 0 on success.
1004 * Special care needs to be taken if the buffer already belongs to the
1005 * current committing transaction (in which case we should have frozen
1006 * data present for that commit). In that case, we don't relink the
1007 * buffer: that only gets done when the old transaction finally
1008 * completes its commit.
1010 int jbd2_journal_dirty_metadata(handle_t
*handle
, struct buffer_head
*bh
)
1012 transaction_t
*transaction
= handle
->h_transaction
;
1013 journal_t
*journal
= transaction
->t_journal
;
1014 struct journal_head
*jh
= bh2jh(bh
);
1016 jbd_debug(5, "journal_head %p\n", jh
);
1017 JBUFFER_TRACE(jh
, "entry");
1018 if (is_handle_aborted(handle
))
1021 jbd_lock_bh_state(bh
);
1023 if (jh
->b_modified
== 0) {
1025 * This buffer's got modified and becoming part
1026 * of the transaction. This needs to be done
1027 * once a transaction -bzzz
1030 J_ASSERT_JH(jh
, handle
->h_buffer_credits
> 0);
1031 handle
->h_buffer_credits
--;
1035 * fastpath, to avoid expensive locking. If this buffer is already
1036 * on the running transaction's metadata list there is nothing to do.
1037 * Nobody can take it off again because there is a handle open.
1038 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1039 * result in this test being false, so we go in and take the locks.
1041 if (jh
->b_transaction
== transaction
&& jh
->b_jlist
== BJ_Metadata
) {
1042 JBUFFER_TRACE(jh
, "fastpath");
1043 J_ASSERT_JH(jh
, jh
->b_transaction
==
1044 journal
->j_running_transaction
);
1048 set_buffer_jbddirty(bh
);
1051 * Metadata already on the current transaction list doesn't
1052 * need to be filed. Metadata on another transaction's list must
1053 * be committing, and will be refiled once the commit completes:
1054 * leave it alone for now.
1056 if (jh
->b_transaction
!= transaction
) {
1057 JBUFFER_TRACE(jh
, "already on other transaction");
1058 J_ASSERT_JH(jh
, jh
->b_transaction
==
1059 journal
->j_committing_transaction
);
1060 J_ASSERT_JH(jh
, jh
->b_next_transaction
== transaction
);
1061 /* And this case is illegal: we can't reuse another
1062 * transaction's data buffer, ever. */
1066 /* That test should have eliminated the following case: */
1067 J_ASSERT_JH(jh
, jh
->b_frozen_data
== NULL
);
1069 JBUFFER_TRACE(jh
, "file as BJ_Metadata");
1070 spin_lock(&journal
->j_list_lock
);
1071 __jbd2_journal_file_buffer(jh
, handle
->h_transaction
, BJ_Metadata
);
1072 spin_unlock(&journal
->j_list_lock
);
1074 jbd_unlock_bh_state(bh
);
1076 JBUFFER_TRACE(jh
, "exit");
1081 * jbd2_journal_release_buffer: undo a get_write_access without any buffer
1082 * updates, if the update decided in the end that it didn't need access.
1086 jbd2_journal_release_buffer(handle_t
*handle
, struct buffer_head
*bh
)
1088 BUFFER_TRACE(bh
, "entry");
1092 * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1093 * @handle: transaction handle
1094 * @bh: bh to 'forget'
1096 * We can only do the bforget if there are no commits pending against the
1097 * buffer. If the buffer is dirty in the current running transaction we
1098 * can safely unlink it.
1100 * bh may not be a journalled buffer at all - it may be a non-JBD
1101 * buffer which came off the hashtable. Check for this.
1103 * Decrements bh->b_count by one.
1105 * Allow this call even if the handle has aborted --- it may be part of
1106 * the caller's cleanup after an abort.
1108 int jbd2_journal_forget (handle_t
*handle
, struct buffer_head
*bh
)
1110 transaction_t
*transaction
= handle
->h_transaction
;
1111 journal_t
*journal
= transaction
->t_journal
;
1112 struct journal_head
*jh
;
1113 int drop_reserve
= 0;
1115 int was_modified
= 0;
1117 BUFFER_TRACE(bh
, "entry");
1119 jbd_lock_bh_state(bh
);
1120 spin_lock(&journal
->j_list_lock
);
1122 if (!buffer_jbd(bh
))
1126 /* Critical error: attempting to delete a bitmap buffer, maybe?
1127 * Don't do any jbd operations, and return an error. */
1128 if (!J_EXPECT_JH(jh
, !jh
->b_committed_data
,
1129 "inconsistent data on disk")) {
1134 /* keep track of wether or not this transaction modified us */
1135 was_modified
= jh
->b_modified
;
1138 * The buffer's going from the transaction, we must drop
1139 * all references -bzzz
1143 if (jh
->b_transaction
== handle
->h_transaction
) {
1144 J_ASSERT_JH(jh
, !jh
->b_frozen_data
);
1146 /* If we are forgetting a buffer which is already part
1147 * of this transaction, then we can just drop it from
1148 * the transaction immediately. */
1149 clear_buffer_dirty(bh
);
1150 clear_buffer_jbddirty(bh
);
1152 JBUFFER_TRACE(jh
, "belongs to current transaction: unfile");
1155 * we only want to drop a reference if this transaction
1156 * modified the buffer
1162 * We are no longer going to journal this buffer.
1163 * However, the commit of this transaction is still
1164 * important to the buffer: the delete that we are now
1165 * processing might obsolete an old log entry, so by
1166 * committing, we can satisfy the buffer's checkpoint.
1168 * So, if we have a checkpoint on the buffer, we should
1169 * now refile the buffer on our BJ_Forget list so that
1170 * we know to remove the checkpoint after we commit.
1173 if (jh
->b_cp_transaction
) {
1174 __jbd2_journal_temp_unlink_buffer(jh
);
1175 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
1177 __jbd2_journal_unfile_buffer(jh
);
1178 jbd2_journal_remove_journal_head(bh
);
1180 if (!buffer_jbd(bh
)) {
1181 spin_unlock(&journal
->j_list_lock
);
1182 jbd_unlock_bh_state(bh
);
1187 } else if (jh
->b_transaction
) {
1188 J_ASSERT_JH(jh
, (jh
->b_transaction
==
1189 journal
->j_committing_transaction
));
1190 /* However, if the buffer is still owned by a prior
1191 * (committing) transaction, we can't drop it yet... */
1192 JBUFFER_TRACE(jh
, "belongs to older transaction");
1193 /* ... but we CAN drop it from the new transaction if we
1194 * have also modified it since the original commit. */
1196 if (jh
->b_next_transaction
) {
1197 J_ASSERT(jh
->b_next_transaction
== transaction
);
1198 jh
->b_next_transaction
= NULL
;
1201 * only drop a reference if this transaction modified
1210 spin_unlock(&journal
->j_list_lock
);
1211 jbd_unlock_bh_state(bh
);
1215 /* no need to reserve log space for this block -bzzz */
1216 handle
->h_buffer_credits
++;
1222 * int jbd2_journal_stop() - complete a transaction
1223 * @handle: tranaction to complete.
1225 * All done for a particular handle.
1227 * There is not much action needed here. We just return any remaining
1228 * buffer credits to the transaction and remove the handle. The only
1229 * complication is that we need to start a commit operation if the
1230 * filesystem is marked for synchronous update.
1232 * jbd2_journal_stop itself will not usually return an error, but it may
1233 * do so in unusual circumstances. In particular, expect it to
1234 * return -EIO if a jbd2_journal_abort has been executed since the
1235 * transaction began.
1237 int jbd2_journal_stop(handle_t
*handle
)
1239 transaction_t
*transaction
= handle
->h_transaction
;
1240 journal_t
*journal
= transaction
->t_journal
;
1244 J_ASSERT(journal_current_handle() == handle
);
1246 if (is_handle_aborted(handle
))
1249 J_ASSERT(transaction
->t_updates
> 0);
1253 if (--handle
->h_ref
> 0) {
1254 jbd_debug(4, "h_ref %d -> %d\n", handle
->h_ref
+ 1,
1259 jbd_debug(4, "Handle %p going down\n", handle
);
1262 * Implement synchronous transaction batching. If the handle
1263 * was synchronous, don't force a commit immediately. Let's
1264 * yield and let another thread piggyback onto this
1265 * transaction. Keep doing that while new threads continue to
1266 * arrive. It doesn't cost much - we're about to run a commit
1267 * and sleep on IO anyway. Speeds up many-threaded, many-dir
1268 * operations by 30x or more...
1270 * We try and optimize the sleep time against what the
1271 * underlying disk can do, instead of having a static sleep
1272 * time. This is useful for the case where our storage is so
1273 * fast that it is more optimal to go ahead and force a flush
1274 * and wait for the transaction to be committed than it is to
1275 * wait for an arbitrary amount of time for new writers to
1276 * join the transaction. We achieve this by measuring how
1277 * long it takes to commit a transaction, and compare it with
1278 * how long this transaction has been running, and if run time
1279 * < commit time then we sleep for the delta and commit. This
1280 * greatly helps super fast disks that would see slowdowns as
1281 * more threads started doing fsyncs.
1283 * But don't do this if this process was the most recent one
1284 * to perform a synchronous write. We do this to detect the
1285 * case where a single process is doing a stream of sync
1286 * writes. No point in waiting for joiners in that case.
1289 if (handle
->h_sync
&& journal
->j_last_sync_writer
!= pid
) {
1290 u64 commit_time
, trans_time
;
1292 journal
->j_last_sync_writer
= pid
;
1294 spin_lock(&journal
->j_state_lock
);
1295 commit_time
= journal
->j_average_commit_time
;
1296 spin_unlock(&journal
->j_state_lock
);
1298 trans_time
= ktime_to_ns(ktime_sub(ktime_get(),
1299 transaction
->t_start_time
));
1301 commit_time
= max_t(u64
, commit_time
,
1302 1000*journal
->j_min_batch_time
);
1303 commit_time
= min_t(u64
, commit_time
,
1304 1000*journal
->j_max_batch_time
);
1306 if (trans_time
< commit_time
) {
1307 ktime_t expires
= ktime_add_ns(ktime_get(),
1309 set_current_state(TASK_UNINTERRUPTIBLE
);
1310 schedule_hrtimeout(&expires
, HRTIMER_MODE_ABS
);
1315 transaction
->t_synchronous_commit
= 1;
1316 current
->journal_info
= NULL
;
1317 spin_lock(&transaction
->t_handle_lock
);
1318 transaction
->t_outstanding_credits
-= handle
->h_buffer_credits
;
1319 transaction
->t_updates
--;
1320 if (!transaction
->t_updates
) {
1321 wake_up(&journal
->j_wait_updates
);
1322 if (journal
->j_barrier_count
)
1323 wake_up(&journal
->j_wait_transaction_locked
);
1327 * If the handle is marked SYNC, we need to set another commit
1328 * going! We also want to force a commit if the current
1329 * transaction is occupying too much of the log, or if the
1330 * transaction is too old now.
1332 if (handle
->h_sync
||
1333 transaction
->t_outstanding_credits
>
1334 journal
->j_max_transaction_buffers
||
1335 time_after_eq(jiffies
, transaction
->t_expires
)) {
1336 /* Do this even for aborted journals: an abort still
1337 * completes the commit thread, it just doesn't write
1338 * anything to disk. */
1339 tid_t tid
= transaction
->t_tid
;
1341 spin_unlock(&transaction
->t_handle_lock
);
1342 jbd_debug(2, "transaction too old, requesting commit for "
1343 "handle %p\n", handle
);
1344 /* This is non-blocking */
1345 jbd2_log_start_commit(journal
, transaction
->t_tid
);
1348 * Special case: JBD2_SYNC synchronous updates require us
1349 * to wait for the commit to complete.
1351 if (handle
->h_sync
&& !(current
->flags
& PF_MEMALLOC
))
1352 err
= jbd2_log_wait_commit(journal
, tid
);
1354 spin_unlock(&transaction
->t_handle_lock
);
1357 lock_map_release(&handle
->h_lockdep_map
);
1359 jbd2_free_handle(handle
);
1364 * int jbd2_journal_force_commit() - force any uncommitted transactions
1365 * @journal: journal to force
1367 * For synchronous operations: force any uncommitted transactions
1368 * to disk. May seem kludgy, but it reuses all the handle batching
1369 * code in a very simple manner.
1371 int jbd2_journal_force_commit(journal_t
*journal
)
1376 handle
= jbd2_journal_start(journal
, 1);
1377 if (IS_ERR(handle
)) {
1378 ret
= PTR_ERR(handle
);
1381 ret
= jbd2_journal_stop(handle
);
1388 * List management code snippets: various functions for manipulating the
1389 * transaction buffer lists.
1394 * Append a buffer to a transaction list, given the transaction's list head
1397 * j_list_lock is held.
1399 * jbd_lock_bh_state(jh2bh(jh)) is held.
1403 __blist_add_buffer(struct journal_head
**list
, struct journal_head
*jh
)
1406 jh
->b_tnext
= jh
->b_tprev
= jh
;
1409 /* Insert at the tail of the list to preserve order */
1410 struct journal_head
*first
= *list
, *last
= first
->b_tprev
;
1412 jh
->b_tnext
= first
;
1413 last
->b_tnext
= first
->b_tprev
= jh
;
1418 * Remove a buffer from a transaction list, given the transaction's list
1421 * Called with j_list_lock held, and the journal may not be locked.
1423 * jbd_lock_bh_state(jh2bh(jh)) is held.
1427 __blist_del_buffer(struct journal_head
**list
, struct journal_head
*jh
)
1430 *list
= jh
->b_tnext
;
1434 jh
->b_tprev
->b_tnext
= jh
->b_tnext
;
1435 jh
->b_tnext
->b_tprev
= jh
->b_tprev
;
1439 * Remove a buffer from the appropriate transaction list.
1441 * Note that this function can *change* the value of
1442 * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
1443 * t_log_list or t_reserved_list. If the caller is holding onto a copy of one
1444 * of these pointers, it could go bad. Generally the caller needs to re-read
1445 * the pointer from the transaction_t.
1447 * Called under j_list_lock. The journal may not be locked.
1449 void __jbd2_journal_temp_unlink_buffer(struct journal_head
*jh
)
1451 struct journal_head
**list
= NULL
;
1452 transaction_t
*transaction
;
1453 struct buffer_head
*bh
= jh2bh(jh
);
1455 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
1456 transaction
= jh
->b_transaction
;
1458 assert_spin_locked(&transaction
->t_journal
->j_list_lock
);
1460 J_ASSERT_JH(jh
, jh
->b_jlist
< BJ_Types
);
1461 if (jh
->b_jlist
!= BJ_None
)
1462 J_ASSERT_JH(jh
, transaction
!= NULL
);
1464 switch (jh
->b_jlist
) {
1468 transaction
->t_nr_buffers
--;
1469 J_ASSERT_JH(jh
, transaction
->t_nr_buffers
>= 0);
1470 list
= &transaction
->t_buffers
;
1473 list
= &transaction
->t_forget
;
1476 list
= &transaction
->t_iobuf_list
;
1479 list
= &transaction
->t_shadow_list
;
1482 list
= &transaction
->t_log_list
;
1485 list
= &transaction
->t_reserved_list
;
1489 __blist_del_buffer(list
, jh
);
1490 jh
->b_jlist
= BJ_None
;
1491 if (test_clear_buffer_jbddirty(bh
))
1492 mark_buffer_dirty(bh
); /* Expose it to the VM */
1495 void __jbd2_journal_unfile_buffer(struct journal_head
*jh
)
1497 __jbd2_journal_temp_unlink_buffer(jh
);
1498 jh
->b_transaction
= NULL
;
1501 void jbd2_journal_unfile_buffer(journal_t
*journal
, struct journal_head
*jh
)
1503 jbd_lock_bh_state(jh2bh(jh
));
1504 spin_lock(&journal
->j_list_lock
);
1505 __jbd2_journal_unfile_buffer(jh
);
1506 spin_unlock(&journal
->j_list_lock
);
1507 jbd_unlock_bh_state(jh2bh(jh
));
1511 * Called from jbd2_journal_try_to_free_buffers().
1513 * Called under jbd_lock_bh_state(bh)
1516 __journal_try_to_free_buffer(journal_t
*journal
, struct buffer_head
*bh
)
1518 struct journal_head
*jh
;
1522 if (buffer_locked(bh
) || buffer_dirty(bh
))
1525 if (jh
->b_next_transaction
!= NULL
)
1528 spin_lock(&journal
->j_list_lock
);
1529 if (jh
->b_cp_transaction
!= NULL
&& jh
->b_transaction
== NULL
) {
1530 /* written-back checkpointed metadata buffer */
1531 if (jh
->b_jlist
== BJ_None
) {
1532 JBUFFER_TRACE(jh
, "remove from checkpoint list");
1533 __jbd2_journal_remove_checkpoint(jh
);
1534 jbd2_journal_remove_journal_head(bh
);
1538 spin_unlock(&journal
->j_list_lock
);
1544 * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1545 * @journal: journal for operation
1546 * @page: to try and free
1547 * @gfp_mask: we use the mask to detect how hard should we try to release
1548 * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1549 * release the buffers.
1552 * For all the buffers on this page,
1553 * if they are fully written out ordered data, move them onto BUF_CLEAN
1554 * so try_to_free_buffers() can reap them.
1556 * This function returns non-zero if we wish try_to_free_buffers()
1557 * to be called. We do this if the page is releasable by try_to_free_buffers().
1558 * We also do it if the page has locked or dirty buffers and the caller wants
1559 * us to perform sync or async writeout.
1561 * This complicates JBD locking somewhat. We aren't protected by the
1562 * BKL here. We wish to remove the buffer from its committing or
1563 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1565 * This may *change* the value of transaction_t->t_datalist, so anyone
1566 * who looks at t_datalist needs to lock against this function.
1568 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1569 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
1570 * will come out of the lock with the buffer dirty, which makes it
1571 * ineligible for release here.
1573 * Who else is affected by this? hmm... Really the only contender
1574 * is do_get_write_access() - it could be looking at the buffer while
1575 * journal_try_to_free_buffer() is changing its state. But that
1576 * cannot happen because we never reallocate freed data as metadata
1577 * while the data is part of a transaction. Yes?
1579 * Return 0 on failure, 1 on success
1581 int jbd2_journal_try_to_free_buffers(journal_t
*journal
,
1582 struct page
*page
, gfp_t gfp_mask
)
1584 struct buffer_head
*head
;
1585 struct buffer_head
*bh
;
1588 J_ASSERT(PageLocked(page
));
1590 head
= page_buffers(page
);
1593 struct journal_head
*jh
;
1596 * We take our own ref against the journal_head here to avoid
1597 * having to add tons of locking around each instance of
1598 * jbd2_journal_remove_journal_head() and
1599 * jbd2_journal_put_journal_head().
1601 jh
= jbd2_journal_grab_journal_head(bh
);
1605 jbd_lock_bh_state(bh
);
1606 __journal_try_to_free_buffer(journal
, bh
);
1607 jbd2_journal_put_journal_head(jh
);
1608 jbd_unlock_bh_state(bh
);
1611 } while ((bh
= bh
->b_this_page
) != head
);
1613 ret
= try_to_free_buffers(page
);
1620 * This buffer is no longer needed. If it is on an older transaction's
1621 * checkpoint list we need to record it on this transaction's forget list
1622 * to pin this buffer (and hence its checkpointing transaction) down until
1623 * this transaction commits. If the buffer isn't on a checkpoint list, we
1625 * Returns non-zero if JBD no longer has an interest in the buffer.
1627 * Called under j_list_lock.
1629 * Called under jbd_lock_bh_state(bh).
1631 static int __dispose_buffer(struct journal_head
*jh
, transaction_t
*transaction
)
1634 struct buffer_head
*bh
= jh2bh(jh
);
1636 __jbd2_journal_unfile_buffer(jh
);
1638 if (jh
->b_cp_transaction
) {
1639 JBUFFER_TRACE(jh
, "on running+cp transaction");
1641 * We don't want to write the buffer anymore, clear the
1642 * bit so that we don't confuse checks in
1643 * __journal_file_buffer
1645 clear_buffer_dirty(bh
);
1646 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
1649 JBUFFER_TRACE(jh
, "on running transaction");
1650 jbd2_journal_remove_journal_head(bh
);
1657 * jbd2_journal_invalidatepage
1659 * This code is tricky. It has a number of cases to deal with.
1661 * There are two invariants which this code relies on:
1663 * i_size must be updated on disk before we start calling invalidatepage on the
1666 * This is done in ext3 by defining an ext3_setattr method which
1667 * updates i_size before truncate gets going. By maintaining this
1668 * invariant, we can be sure that it is safe to throw away any buffers
1669 * attached to the current transaction: once the transaction commits,
1670 * we know that the data will not be needed.
1672 * Note however that we can *not* throw away data belonging to the
1673 * previous, committing transaction!
1675 * Any disk blocks which *are* part of the previous, committing
1676 * transaction (and which therefore cannot be discarded immediately) are
1677 * not going to be reused in the new running transaction
1679 * The bitmap committed_data images guarantee this: any block which is
1680 * allocated in one transaction and removed in the next will be marked
1681 * as in-use in the committed_data bitmap, so cannot be reused until
1682 * the next transaction to delete the block commits. This means that
1683 * leaving committing buffers dirty is quite safe: the disk blocks
1684 * cannot be reallocated to a different file and so buffer aliasing is
1688 * The above applies mainly to ordered data mode. In writeback mode we
1689 * don't make guarantees about the order in which data hits disk --- in
1690 * particular we don't guarantee that new dirty data is flushed before
1691 * transaction commit --- so it is always safe just to discard data
1692 * immediately in that mode. --sct
1696 * The journal_unmap_buffer helper function returns zero if the buffer
1697 * concerned remains pinned as an anonymous buffer belonging to an older
1700 * We're outside-transaction here. Either or both of j_running_transaction
1701 * and j_committing_transaction may be NULL.
1703 static int journal_unmap_buffer(journal_t
*journal
, struct buffer_head
*bh
)
1705 transaction_t
*transaction
;
1706 struct journal_head
*jh
;
1710 BUFFER_TRACE(bh
, "entry");
1713 * It is safe to proceed here without the j_list_lock because the
1714 * buffers cannot be stolen by try_to_free_buffers as long as we are
1715 * holding the page lock. --sct
1718 if (!buffer_jbd(bh
))
1719 goto zap_buffer_unlocked
;
1721 /* OK, we have data buffer in journaled mode */
1722 spin_lock(&journal
->j_state_lock
);
1723 jbd_lock_bh_state(bh
);
1724 spin_lock(&journal
->j_list_lock
);
1726 jh
= jbd2_journal_grab_journal_head(bh
);
1728 goto zap_buffer_no_jh
;
1731 * We cannot remove the buffer from checkpoint lists until the
1732 * transaction adding inode to orphan list (let's call it T)
1733 * is committed. Otherwise if the transaction changing the
1734 * buffer would be cleaned from the journal before T is
1735 * committed, a crash will cause that the correct contents of
1736 * the buffer will be lost. On the other hand we have to
1737 * clear the buffer dirty bit at latest at the moment when the
1738 * transaction marking the buffer as freed in the filesystem
1739 * structures is committed because from that moment on the
1740 * buffer can be reallocated and used by a different page.
1741 * Since the block hasn't been freed yet but the inode has
1742 * already been added to orphan list, it is safe for us to add
1743 * the buffer to BJ_Forget list of the newest transaction.
1745 transaction
= jh
->b_transaction
;
1746 if (transaction
== NULL
) {
1747 /* First case: not on any transaction. If it
1748 * has no checkpoint link, then we can zap it:
1749 * it's a writeback-mode buffer so we don't care
1750 * if it hits disk safely. */
1751 if (!jh
->b_cp_transaction
) {
1752 JBUFFER_TRACE(jh
, "not on any transaction: zap");
1756 if (!buffer_dirty(bh
)) {
1757 /* bdflush has written it. We can drop it now */
1761 /* OK, it must be in the journal but still not
1762 * written fully to disk: it's metadata or
1763 * journaled data... */
1765 if (journal
->j_running_transaction
) {
1766 /* ... and once the current transaction has
1767 * committed, the buffer won't be needed any
1769 JBUFFER_TRACE(jh
, "checkpointed: add to BJ_Forget");
1770 ret
= __dispose_buffer(jh
,
1771 journal
->j_running_transaction
);
1772 jbd2_journal_put_journal_head(jh
);
1773 spin_unlock(&journal
->j_list_lock
);
1774 jbd_unlock_bh_state(bh
);
1775 spin_unlock(&journal
->j_state_lock
);
1778 /* There is no currently-running transaction. So the
1779 * orphan record which we wrote for this file must have
1780 * passed into commit. We must attach this buffer to
1781 * the committing transaction, if it exists. */
1782 if (journal
->j_committing_transaction
) {
1783 JBUFFER_TRACE(jh
, "give to committing trans");
1784 ret
= __dispose_buffer(jh
,
1785 journal
->j_committing_transaction
);
1786 jbd2_journal_put_journal_head(jh
);
1787 spin_unlock(&journal
->j_list_lock
);
1788 jbd_unlock_bh_state(bh
);
1789 spin_unlock(&journal
->j_state_lock
);
1792 /* The orphan record's transaction has
1793 * committed. We can cleanse this buffer */
1794 clear_buffer_jbddirty(bh
);
1798 } else if (transaction
== journal
->j_committing_transaction
) {
1799 JBUFFER_TRACE(jh
, "on committing transaction");
1801 * The buffer is committing, we simply cannot touch
1802 * it. So we just set j_next_transaction to the
1803 * running transaction (if there is one) and mark
1804 * buffer as freed so that commit code knows it should
1805 * clear dirty bits when it is done with the buffer.
1807 set_buffer_freed(bh
);
1808 if (journal
->j_running_transaction
&& buffer_jbddirty(bh
))
1809 jh
->b_next_transaction
= journal
->j_running_transaction
;
1810 jbd2_journal_put_journal_head(jh
);
1811 spin_unlock(&journal
->j_list_lock
);
1812 jbd_unlock_bh_state(bh
);
1813 spin_unlock(&journal
->j_state_lock
);
1816 /* Good, the buffer belongs to the running transaction.
1817 * We are writing our own transaction's data, not any
1818 * previous one's, so it is safe to throw it away
1819 * (remember that we expect the filesystem to have set
1820 * i_size already for this truncate so recovery will not
1821 * expose the disk blocks we are discarding here.) */
1822 J_ASSERT_JH(jh
, transaction
== journal
->j_running_transaction
);
1823 JBUFFER_TRACE(jh
, "on running transaction");
1824 may_free
= __dispose_buffer(jh
, transaction
);
1828 jbd2_journal_put_journal_head(jh
);
1830 spin_unlock(&journal
->j_list_lock
);
1831 jbd_unlock_bh_state(bh
);
1832 spin_unlock(&journal
->j_state_lock
);
1833 zap_buffer_unlocked
:
1834 clear_buffer_dirty(bh
);
1835 J_ASSERT_BH(bh
, !buffer_jbddirty(bh
));
1836 clear_buffer_mapped(bh
);
1837 clear_buffer_req(bh
);
1838 clear_buffer_new(bh
);
1844 * void jbd2_journal_invalidatepage()
1845 * @journal: journal to use for flush...
1846 * @page: page to flush
1847 * @offset: length of page to invalidate.
1849 * Reap page buffers containing data after offset in page.
1852 void jbd2_journal_invalidatepage(journal_t
*journal
,
1854 unsigned long offset
)
1856 struct buffer_head
*head
, *bh
, *next
;
1857 unsigned int curr_off
= 0;
1860 if (!PageLocked(page
))
1862 if (!page_has_buffers(page
))
1865 /* We will potentially be playing with lists other than just the
1866 * data lists (especially for journaled data mode), so be
1867 * cautious in our locking. */
1869 head
= bh
= page_buffers(page
);
1871 unsigned int next_off
= curr_off
+ bh
->b_size
;
1872 next
= bh
->b_this_page
;
1874 if (offset
<= curr_off
) {
1875 /* This block is wholly outside the truncation point */
1877 may_free
&= journal_unmap_buffer(journal
, bh
);
1880 curr_off
= next_off
;
1883 } while (bh
!= head
);
1886 if (may_free
&& try_to_free_buffers(page
))
1887 J_ASSERT(!page_has_buffers(page
));
1892 * File a buffer on the given transaction list.
1894 void __jbd2_journal_file_buffer(struct journal_head
*jh
,
1895 transaction_t
*transaction
, int jlist
)
1897 struct journal_head
**list
= NULL
;
1899 struct buffer_head
*bh
= jh2bh(jh
);
1901 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
1902 assert_spin_locked(&transaction
->t_journal
->j_list_lock
);
1904 J_ASSERT_JH(jh
, jh
->b_jlist
< BJ_Types
);
1905 J_ASSERT_JH(jh
, jh
->b_transaction
== transaction
||
1906 jh
->b_transaction
== NULL
);
1908 if (jh
->b_transaction
&& jh
->b_jlist
== jlist
)
1911 if (jlist
== BJ_Metadata
|| jlist
== BJ_Reserved
||
1912 jlist
== BJ_Shadow
|| jlist
== BJ_Forget
) {
1914 * For metadata buffers, we track dirty bit in buffer_jbddirty
1915 * instead of buffer_dirty. We should not see a dirty bit set
1916 * here because we clear it in do_get_write_access but e.g.
1917 * tune2fs can modify the sb and set the dirty bit at any time
1918 * so we try to gracefully handle that.
1920 if (buffer_dirty(bh
))
1921 warn_dirty_buffer(bh
);
1922 if (test_clear_buffer_dirty(bh
) ||
1923 test_clear_buffer_jbddirty(bh
))
1927 if (jh
->b_transaction
)
1928 __jbd2_journal_temp_unlink_buffer(jh
);
1929 jh
->b_transaction
= transaction
;
1933 J_ASSERT_JH(jh
, !jh
->b_committed_data
);
1934 J_ASSERT_JH(jh
, !jh
->b_frozen_data
);
1937 transaction
->t_nr_buffers
++;
1938 list
= &transaction
->t_buffers
;
1941 list
= &transaction
->t_forget
;
1944 list
= &transaction
->t_iobuf_list
;
1947 list
= &transaction
->t_shadow_list
;
1950 list
= &transaction
->t_log_list
;
1953 list
= &transaction
->t_reserved_list
;
1957 __blist_add_buffer(list
, jh
);
1958 jh
->b_jlist
= jlist
;
1961 set_buffer_jbddirty(bh
);
1964 void jbd2_journal_file_buffer(struct journal_head
*jh
,
1965 transaction_t
*transaction
, int jlist
)
1967 jbd_lock_bh_state(jh2bh(jh
));
1968 spin_lock(&transaction
->t_journal
->j_list_lock
);
1969 __jbd2_journal_file_buffer(jh
, transaction
, jlist
);
1970 spin_unlock(&transaction
->t_journal
->j_list_lock
);
1971 jbd_unlock_bh_state(jh2bh(jh
));
1975 * Remove a buffer from its current buffer list in preparation for
1976 * dropping it from its current transaction entirely. If the buffer has
1977 * already started to be used by a subsequent transaction, refile the
1978 * buffer on that transaction's metadata list.
1980 * Called under journal->j_list_lock
1982 * Called under jbd_lock_bh_state(jh2bh(jh))
1984 void __jbd2_journal_refile_buffer(struct journal_head
*jh
)
1986 int was_dirty
, jlist
;
1987 struct buffer_head
*bh
= jh2bh(jh
);
1989 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
1990 if (jh
->b_transaction
)
1991 assert_spin_locked(&jh
->b_transaction
->t_journal
->j_list_lock
);
1993 /* If the buffer is now unused, just drop it. */
1994 if (jh
->b_next_transaction
== NULL
) {
1995 __jbd2_journal_unfile_buffer(jh
);
2000 * It has been modified by a later transaction: add it to the new
2001 * transaction's metadata list.
2004 was_dirty
= test_clear_buffer_jbddirty(bh
);
2005 __jbd2_journal_temp_unlink_buffer(jh
);
2006 jh
->b_transaction
= jh
->b_next_transaction
;
2007 jh
->b_next_transaction
= NULL
;
2008 if (buffer_freed(bh
))
2010 else if (jh
->b_modified
)
2011 jlist
= BJ_Metadata
;
2013 jlist
= BJ_Reserved
;
2014 __jbd2_journal_file_buffer(jh
, jh
->b_transaction
, jlist
);
2015 J_ASSERT_JH(jh
, jh
->b_transaction
->t_state
== T_RUNNING
);
2018 set_buffer_jbddirty(bh
);
2022 * For the unlocked version of this call, also make sure that any
2023 * hanging journal_head is cleaned up if necessary.
2025 * __jbd2_journal_refile_buffer is usually called as part of a single locked
2026 * operation on a buffer_head, in which the caller is probably going to
2027 * be hooking the journal_head onto other lists. In that case it is up
2028 * to the caller to remove the journal_head if necessary. For the
2029 * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
2030 * doing anything else to the buffer so we need to do the cleanup
2031 * ourselves to avoid a jh leak.
2033 * *** The journal_head may be freed by this call! ***
2035 void jbd2_journal_refile_buffer(journal_t
*journal
, struct journal_head
*jh
)
2037 struct buffer_head
*bh
= jh2bh(jh
);
2039 jbd_lock_bh_state(bh
);
2040 spin_lock(&journal
->j_list_lock
);
2042 __jbd2_journal_refile_buffer(jh
);
2043 jbd_unlock_bh_state(bh
);
2044 jbd2_journal_remove_journal_head(bh
);
2046 spin_unlock(&journal
->j_list_lock
);
2051 * File inode in the inode list of the handle's transaction
2053 int jbd2_journal_file_inode(handle_t
*handle
, struct jbd2_inode
*jinode
)
2055 transaction_t
*transaction
= handle
->h_transaction
;
2056 journal_t
*journal
= transaction
->t_journal
;
2058 if (is_handle_aborted(handle
))
2061 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode
->i_vfs_inode
->i_ino
,
2062 transaction
->t_tid
);
2065 * First check whether inode isn't already on the transaction's
2066 * lists without taking the lock. Note that this check is safe
2067 * without the lock as we cannot race with somebody removing inode
2068 * from the transaction. The reason is that we remove inode from the
2069 * transaction only in journal_release_jbd_inode() and when we commit
2070 * the transaction. We are guarded from the first case by holding
2071 * a reference to the inode. We are safe against the second case
2072 * because if jinode->i_transaction == transaction, commit code
2073 * cannot touch the transaction because we hold reference to it,
2074 * and if jinode->i_next_transaction == transaction, commit code
2075 * will only file the inode where we want it.
2077 if (jinode
->i_transaction
== transaction
||
2078 jinode
->i_next_transaction
== transaction
)
2081 spin_lock(&journal
->j_list_lock
);
2083 if (jinode
->i_transaction
== transaction
||
2084 jinode
->i_next_transaction
== transaction
)
2087 /* On some different transaction's list - should be
2088 * the committing one */
2089 if (jinode
->i_transaction
) {
2090 J_ASSERT(jinode
->i_next_transaction
== NULL
);
2091 J_ASSERT(jinode
->i_transaction
==
2092 journal
->j_committing_transaction
);
2093 jinode
->i_next_transaction
= transaction
;
2096 /* Not on any transaction list... */
2097 J_ASSERT(!jinode
->i_next_transaction
);
2098 jinode
->i_transaction
= transaction
;
2099 list_add(&jinode
->i_list
, &transaction
->t_inode_list
);
2101 spin_unlock(&journal
->j_list_lock
);
2107 * File truncate and transaction commit interact with each other in a
2108 * non-trivial way. If a transaction writing data block A is
2109 * committing, we cannot discard the data by truncate until we have
2110 * written them. Otherwise if we crashed after the transaction with
2111 * write has committed but before the transaction with truncate has
2112 * committed, we could see stale data in block A. This function is a
2113 * helper to solve this problem. It starts writeout of the truncated
2114 * part in case it is in the committing transaction.
2116 * Filesystem code must call this function when inode is journaled in
2117 * ordered mode before truncation happens and after the inode has been
2118 * placed on orphan list with the new inode size. The second condition
2119 * avoids the race that someone writes new data and we start
2120 * committing the transaction after this function has been called but
2121 * before a transaction for truncate is started (and furthermore it
2122 * allows us to optimize the case where the addition to orphan list
2123 * happens in the same transaction as write --- we don't have to write
2124 * any data in such case).
2126 int jbd2_journal_begin_ordered_truncate(journal_t
*journal
,
2127 struct jbd2_inode
*jinode
,
2130 transaction_t
*inode_trans
, *commit_trans
;
2133 /* This is a quick check to avoid locking if not necessary */
2134 if (!jinode
->i_transaction
)
2136 /* Locks are here just to force reading of recent values, it is
2137 * enough that the transaction was not committing before we started
2138 * a transaction adding the inode to orphan list */
2139 spin_lock(&journal
->j_state_lock
);
2140 commit_trans
= journal
->j_committing_transaction
;
2141 spin_unlock(&journal
->j_state_lock
);
2142 spin_lock(&journal
->j_list_lock
);
2143 inode_trans
= jinode
->i_transaction
;
2144 spin_unlock(&journal
->j_list_lock
);
2145 if (inode_trans
== commit_trans
) {
2146 ret
= filemap_fdatawrite_range(jinode
->i_vfs_inode
->i_mapping
,
2147 new_size
, LLONG_MAX
);
2149 jbd2_journal_abort(journal
, ret
);