1 // SPDX-License-Identifier: GPL-2.0+
3 * linux/fs/jbd2/transaction.c
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
9 * Generic filesystem transaction handling code; part of the ext2fs
12 * This file manages transactions (compound commits managed by the
13 * journaling code) and handles (individual atomic operations by the
17 #include <linux/time.h>
19 #include <linux/jbd2.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
24 #include <linux/highmem.h>
25 #include <linux/hrtimer.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bug.h>
28 #include <linux/module.h>
29 #include <linux/sched/mm.h>
31 #include <trace/events/jbd2.h>
33 static void __jbd2_journal_temp_unlink_buffer(struct journal_head
*jh
);
34 static void __jbd2_journal_unfile_buffer(struct journal_head
*jh
);
36 static struct kmem_cache
*transaction_cache
;
37 int __init
jbd2_journal_init_transaction_cache(void)
39 J_ASSERT(!transaction_cache
);
40 transaction_cache
= kmem_cache_create("jbd2_transaction_s",
41 sizeof(transaction_t
),
43 SLAB_HWCACHE_ALIGN
|SLAB_TEMPORARY
,
45 if (transaction_cache
)
50 void jbd2_journal_destroy_transaction_cache(void)
52 kmem_cache_destroy(transaction_cache
);
53 transaction_cache
= NULL
;
56 void jbd2_journal_free_transaction(transaction_t
*transaction
)
58 if (unlikely(ZERO_OR_NULL_PTR(transaction
)))
60 kmem_cache_free(transaction_cache
, transaction
);
64 * jbd2_get_transaction: obtain a new transaction_t object.
66 * Simply allocate and initialise a new transaction. Create it in
67 * RUNNING state and add it to the current journal (which should not
68 * have an existing running transaction: we only make a new transaction
69 * once we have started to commit the old one).
72 * The journal MUST be locked. We don't perform atomic mallocs on the
73 * new transaction and we can't block without protecting against other
74 * processes trying to touch the journal while it is in transition.
78 static transaction_t
*
79 jbd2_get_transaction(journal_t
*journal
, transaction_t
*transaction
)
81 transaction
->t_journal
= journal
;
82 transaction
->t_state
= T_RUNNING
;
83 transaction
->t_start_time
= ktime_get();
84 transaction
->t_tid
= journal
->j_transaction_sequence
++;
85 transaction
->t_expires
= jiffies
+ journal
->j_commit_interval
;
86 spin_lock_init(&transaction
->t_handle_lock
);
87 atomic_set(&transaction
->t_updates
, 0);
88 atomic_set(&transaction
->t_outstanding_credits
,
89 atomic_read(&journal
->j_reserved_credits
));
90 atomic_set(&transaction
->t_handle_count
, 0);
91 INIT_LIST_HEAD(&transaction
->t_inode_list
);
92 INIT_LIST_HEAD(&transaction
->t_private_list
);
94 /* Set up the commit timer for the new transaction. */
95 journal
->j_commit_timer
.expires
= round_jiffies_up(transaction
->t_expires
);
96 add_timer(&journal
->j_commit_timer
);
98 J_ASSERT(journal
->j_running_transaction
== NULL
);
99 journal
->j_running_transaction
= transaction
;
100 transaction
->t_max_wait
= 0;
101 transaction
->t_start
= jiffies
;
102 transaction
->t_requested
= 0;
110 * A handle_t is an object which represents a single atomic update to a
111 * filesystem, and which tracks all of the modifications which form part
112 * of that one update.
116 * Update transaction's maximum wait time, if debugging is enabled.
118 * In order for t_max_wait to be reliable, it must be protected by a
119 * lock. But doing so will mean that start_this_handle() can not be
120 * run in parallel on SMP systems, which limits our scalability. So
121 * unless debugging is enabled, we no longer update t_max_wait, which
122 * means that maximum wait time reported by the jbd2_run_stats
123 * tracepoint will always be zero.
125 static inline void update_t_max_wait(transaction_t
*transaction
,
128 #ifdef CONFIG_JBD2_DEBUG
129 if (jbd2_journal_enable_debug
&&
130 time_after(transaction
->t_start
, ts
)) {
131 ts
= jbd2_time_diff(ts
, transaction
->t_start
);
132 spin_lock(&transaction
->t_handle_lock
);
133 if (ts
> transaction
->t_max_wait
)
134 transaction
->t_max_wait
= ts
;
135 spin_unlock(&transaction
->t_handle_lock
);
141 * Wait until running transaction passes T_LOCKED state. Also starts the commit
142 * if needed. The function expects running transaction to exist and releases
145 static void wait_transaction_locked(journal_t
*journal
)
146 __releases(journal
->j_state_lock
)
150 tid_t tid
= journal
->j_running_transaction
->t_tid
;
152 prepare_to_wait(&journal
->j_wait_transaction_locked
, &wait
,
153 TASK_UNINTERRUPTIBLE
);
154 need_to_start
= !tid_geq(journal
->j_commit_request
, tid
);
155 read_unlock(&journal
->j_state_lock
);
157 jbd2_log_start_commit(journal
, tid
);
158 jbd2_might_wait_for_commit(journal
);
160 finish_wait(&journal
->j_wait_transaction_locked
, &wait
);
163 static void sub_reserved_credits(journal_t
*journal
, int blocks
)
165 atomic_sub(blocks
, &journal
->j_reserved_credits
);
166 wake_up(&journal
->j_wait_reserved
);
170 * Wait until we can add credits for handle to the running transaction. Called
171 * with j_state_lock held for reading. Returns 0 if handle joined the running
172 * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
175 static int add_transaction_credits(journal_t
*journal
, int blocks
,
178 transaction_t
*t
= journal
->j_running_transaction
;
180 int total
= blocks
+ rsv_blocks
;
183 * If the current transaction is locked down for commit, wait
184 * for the lock to be released.
186 if (t
->t_state
== T_LOCKED
) {
187 wait_transaction_locked(journal
);
192 * If there is not enough space left in the log to write all
193 * potential buffers requested by this operation, we need to
194 * stall pending a log checkpoint to free some more log space.
196 needed
= atomic_add_return(total
, &t
->t_outstanding_credits
);
197 if (needed
> journal
->j_max_transaction_buffers
) {
199 * If the current transaction is already too large,
200 * then start to commit it: we can then go back and
201 * attach this handle to a new transaction.
203 atomic_sub(total
, &t
->t_outstanding_credits
);
206 * Is the number of reserved credits in the current transaction too
207 * big to fit this handle? Wait until reserved credits are freed.
209 if (atomic_read(&journal
->j_reserved_credits
) + total
>
210 journal
->j_max_transaction_buffers
) {
211 read_unlock(&journal
->j_state_lock
);
212 jbd2_might_wait_for_commit(journal
);
213 wait_event(journal
->j_wait_reserved
,
214 atomic_read(&journal
->j_reserved_credits
) + total
<=
215 journal
->j_max_transaction_buffers
);
219 wait_transaction_locked(journal
);
224 * The commit code assumes that it can get enough log space
225 * without forcing a checkpoint. This is *critical* for
226 * correctness: a checkpoint of a buffer which is also
227 * associated with a committing transaction creates a deadlock,
228 * so commit simply cannot force through checkpoints.
230 * We must therefore ensure the necessary space in the journal
231 * *before* starting to dirty potentially checkpointed buffers
232 * in the new transaction.
234 if (jbd2_log_space_left(journal
) < jbd2_space_needed(journal
)) {
235 atomic_sub(total
, &t
->t_outstanding_credits
);
236 read_unlock(&journal
->j_state_lock
);
237 jbd2_might_wait_for_commit(journal
);
238 write_lock(&journal
->j_state_lock
);
239 if (jbd2_log_space_left(journal
) < jbd2_space_needed(journal
))
240 __jbd2_log_wait_for_space(journal
);
241 write_unlock(&journal
->j_state_lock
);
245 /* No reservation? We are done... */
249 needed
= atomic_add_return(rsv_blocks
, &journal
->j_reserved_credits
);
250 /* We allow at most half of a transaction to be reserved */
251 if (needed
> journal
->j_max_transaction_buffers
/ 2) {
252 sub_reserved_credits(journal
, rsv_blocks
);
253 atomic_sub(total
, &t
->t_outstanding_credits
);
254 read_unlock(&journal
->j_state_lock
);
255 jbd2_might_wait_for_commit(journal
);
256 wait_event(journal
->j_wait_reserved
,
257 atomic_read(&journal
->j_reserved_credits
) + rsv_blocks
258 <= journal
->j_max_transaction_buffers
/ 2);
265 * start_this_handle: Given a handle, deal with any locking or stalling
266 * needed to make sure that there is enough journal space for the handle
267 * to begin. Attach the handle to a transaction and set up the
268 * transaction's buffer credits.
271 static int start_this_handle(journal_t
*journal
, handle_t
*handle
,
274 transaction_t
*transaction
, *new_transaction
= NULL
;
275 int blocks
= handle
->h_buffer_credits
;
277 unsigned long ts
= jiffies
;
279 if (handle
->h_rsv_handle
)
280 rsv_blocks
= handle
->h_rsv_handle
->h_buffer_credits
;
283 * Limit the number of reserved credits to 1/2 of maximum transaction
284 * size and limit the number of total credits to not exceed maximum
285 * transaction size per operation.
287 if ((rsv_blocks
> journal
->j_max_transaction_buffers
/ 2) ||
288 (rsv_blocks
+ blocks
> journal
->j_max_transaction_buffers
)) {
289 printk(KERN_ERR
"JBD2: %s wants too many credits "
290 "credits:%d rsv_credits:%d max:%d\n",
291 current
->comm
, blocks
, rsv_blocks
,
292 journal
->j_max_transaction_buffers
);
298 if (!journal
->j_running_transaction
) {
300 * If __GFP_FS is not present, then we may be being called from
301 * inside the fs writeback layer, so we MUST NOT fail.
303 if ((gfp_mask
& __GFP_FS
) == 0)
304 gfp_mask
|= __GFP_NOFAIL
;
305 new_transaction
= kmem_cache_zalloc(transaction_cache
,
307 if (!new_transaction
)
311 jbd_debug(3, "New handle %p going live.\n", handle
);
314 * We need to hold j_state_lock until t_updates has been incremented,
315 * for proper journal barrier handling
318 read_lock(&journal
->j_state_lock
);
319 BUG_ON(journal
->j_flags
& JBD2_UNMOUNT
);
320 if (is_journal_aborted(journal
) ||
321 (journal
->j_errno
!= 0 && !(journal
->j_flags
& JBD2_ACK_ERR
))) {
322 read_unlock(&journal
->j_state_lock
);
323 jbd2_journal_free_transaction(new_transaction
);
328 * Wait on the journal's transaction barrier if necessary. Specifically
329 * we allow reserved handles to proceed because otherwise commit could
330 * deadlock on page writeback not being able to complete.
332 if (!handle
->h_reserved
&& journal
->j_barrier_count
) {
333 read_unlock(&journal
->j_state_lock
);
334 wait_event(journal
->j_wait_transaction_locked
,
335 journal
->j_barrier_count
== 0);
339 if (!journal
->j_running_transaction
) {
340 read_unlock(&journal
->j_state_lock
);
341 if (!new_transaction
)
342 goto alloc_transaction
;
343 write_lock(&journal
->j_state_lock
);
344 if (!journal
->j_running_transaction
&&
345 (handle
->h_reserved
|| !journal
->j_barrier_count
)) {
346 jbd2_get_transaction(journal
, new_transaction
);
347 new_transaction
= NULL
;
349 write_unlock(&journal
->j_state_lock
);
353 transaction
= journal
->j_running_transaction
;
355 if (!handle
->h_reserved
) {
356 /* We may have dropped j_state_lock - restart in that case */
357 if (add_transaction_credits(journal
, blocks
, rsv_blocks
))
361 * We have handle reserved so we are allowed to join T_LOCKED
362 * transaction and we don't have to check for transaction size
365 sub_reserved_credits(journal
, blocks
);
366 handle
->h_reserved
= 0;
369 /* OK, account for the buffers that this operation expects to
370 * use and add the handle to the running transaction.
372 update_t_max_wait(transaction
, ts
);
373 handle
->h_transaction
= transaction
;
374 handle
->h_requested_credits
= blocks
;
375 handle
->h_start_jiffies
= jiffies
;
376 atomic_inc(&transaction
->t_updates
);
377 atomic_inc(&transaction
->t_handle_count
);
378 jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
380 atomic_read(&transaction
->t_outstanding_credits
),
381 jbd2_log_space_left(journal
));
382 read_unlock(&journal
->j_state_lock
);
383 current
->journal_info
= handle
;
385 rwsem_acquire_read(&journal
->j_trans_commit_map
, 0, 0, _THIS_IP_
);
386 jbd2_journal_free_transaction(new_transaction
);
388 * Ensure that no allocations done while the transaction is open are
389 * going to recurse back to the fs layer.
391 handle
->saved_alloc_context
= memalloc_nofs_save();
395 /* Allocate a new handle. This should probably be in a slab... */
396 static handle_t
*new_handle(int nblocks
)
398 handle_t
*handle
= jbd2_alloc_handle(GFP_NOFS
);
401 handle
->h_buffer_credits
= nblocks
;
407 handle_t
*jbd2__journal_start(journal_t
*journal
, int nblocks
, int rsv_blocks
,
408 gfp_t gfp_mask
, unsigned int type
,
409 unsigned int line_no
)
411 handle_t
*handle
= journal_current_handle();
415 return ERR_PTR(-EROFS
);
418 J_ASSERT(handle
->h_transaction
->t_journal
== journal
);
423 handle
= new_handle(nblocks
);
425 return ERR_PTR(-ENOMEM
);
427 handle_t
*rsv_handle
;
429 rsv_handle
= new_handle(rsv_blocks
);
431 jbd2_free_handle(handle
);
432 return ERR_PTR(-ENOMEM
);
434 rsv_handle
->h_reserved
= 1;
435 rsv_handle
->h_journal
= journal
;
436 handle
->h_rsv_handle
= rsv_handle
;
439 err
= start_this_handle(journal
, handle
, gfp_mask
);
441 if (handle
->h_rsv_handle
)
442 jbd2_free_handle(handle
->h_rsv_handle
);
443 jbd2_free_handle(handle
);
446 handle
->h_type
= type
;
447 handle
->h_line_no
= line_no
;
448 trace_jbd2_handle_start(journal
->j_fs_dev
->bd_dev
,
449 handle
->h_transaction
->t_tid
, type
,
454 EXPORT_SYMBOL(jbd2__journal_start
);
458 * handle_t *jbd2_journal_start() - Obtain a new handle.
459 * @journal: Journal to start transaction on.
460 * @nblocks: number of block buffer we might modify
462 * We make sure that the transaction can guarantee at least nblocks of
463 * modified buffers in the log. We block until the log can guarantee
464 * that much space. Additionally, if rsv_blocks > 0, we also create another
465 * handle with rsv_blocks reserved blocks in the journal. This handle is
466 * is stored in h_rsv_handle. It is not attached to any particular transaction
467 * and thus doesn't block transaction commit. If the caller uses this reserved
468 * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
469 * on the parent handle will dispose the reserved one. Reserved handle has to
470 * be converted to a normal handle using jbd2_journal_start_reserved() before
473 * Return a pointer to a newly allocated handle, or an ERR_PTR() value
476 handle_t
*jbd2_journal_start(journal_t
*journal
, int nblocks
)
478 return jbd2__journal_start(journal
, nblocks
, 0, GFP_NOFS
, 0, 0);
480 EXPORT_SYMBOL(jbd2_journal_start
);
482 void jbd2_journal_free_reserved(handle_t
*handle
)
484 journal_t
*journal
= handle
->h_journal
;
486 WARN_ON(!handle
->h_reserved
);
487 sub_reserved_credits(journal
, handle
->h_buffer_credits
);
488 jbd2_free_handle(handle
);
490 EXPORT_SYMBOL(jbd2_journal_free_reserved
);
493 * int jbd2_journal_start_reserved() - start reserved handle
494 * @handle: handle to start
495 * @type: for handle statistics
496 * @line_no: for handle statistics
498 * Start handle that has been previously reserved with jbd2_journal_reserve().
499 * This attaches @handle to the running transaction (or creates one if there's
500 * not transaction running). Unlike jbd2_journal_start() this function cannot
501 * block on journal commit, checkpointing, or similar stuff. It can block on
502 * memory allocation or frozen journal though.
504 * Return 0 on success, non-zero on error - handle is freed in that case.
506 int jbd2_journal_start_reserved(handle_t
*handle
, unsigned int type
,
507 unsigned int line_no
)
509 journal_t
*journal
= handle
->h_journal
;
512 if (WARN_ON(!handle
->h_reserved
)) {
513 /* Someone passed in normal handle? Just stop it. */
514 jbd2_journal_stop(handle
);
518 * Usefulness of mixing of reserved and unreserved handles is
519 * questionable. So far nobody seems to need it so just error out.
521 if (WARN_ON(current
->journal_info
)) {
522 jbd2_journal_free_reserved(handle
);
526 handle
->h_journal
= NULL
;
528 * GFP_NOFS is here because callers are likely from writeback or
529 * similarly constrained call sites
531 ret
= start_this_handle(journal
, handle
, GFP_NOFS
);
533 handle
->h_journal
= journal
;
534 jbd2_journal_free_reserved(handle
);
537 handle
->h_type
= type
;
538 handle
->h_line_no
= line_no
;
541 EXPORT_SYMBOL(jbd2_journal_start_reserved
);
544 * int jbd2_journal_extend() - extend buffer credits.
545 * @handle: handle to 'extend'
546 * @nblocks: nr blocks to try to extend by.
548 * Some transactions, such as large extends and truncates, can be done
549 * atomically all at once or in several stages. The operation requests
550 * a credit for a number of buffer modifications in advance, but can
551 * extend its credit if it needs more.
553 * jbd2_journal_extend tries to give the running handle more buffer credits.
554 * It does not guarantee that allocation - this is a best-effort only.
555 * The calling process MUST be able to deal cleanly with a failure to
558 * Return 0 on success, non-zero on failure.
560 * return code < 0 implies an error
561 * return code > 0 implies normal transaction-full status.
563 int jbd2_journal_extend(handle_t
*handle
, int nblocks
)
565 transaction_t
*transaction
= handle
->h_transaction
;
570 if (is_handle_aborted(handle
))
572 journal
= transaction
->t_journal
;
576 read_lock(&journal
->j_state_lock
);
578 /* Don't extend a locked-down transaction! */
579 if (transaction
->t_state
!= T_RUNNING
) {
580 jbd_debug(3, "denied handle %p %d blocks: "
581 "transaction not running\n", handle
, nblocks
);
585 spin_lock(&transaction
->t_handle_lock
);
586 wanted
= atomic_add_return(nblocks
,
587 &transaction
->t_outstanding_credits
);
589 if (wanted
> journal
->j_max_transaction_buffers
) {
590 jbd_debug(3, "denied handle %p %d blocks: "
591 "transaction too large\n", handle
, nblocks
);
592 atomic_sub(nblocks
, &transaction
->t_outstanding_credits
);
596 if (wanted
+ (wanted
>> JBD2_CONTROL_BLOCKS_SHIFT
) >
597 jbd2_log_space_left(journal
)) {
598 jbd_debug(3, "denied handle %p %d blocks: "
599 "insufficient log space\n", handle
, nblocks
);
600 atomic_sub(nblocks
, &transaction
->t_outstanding_credits
);
604 trace_jbd2_handle_extend(journal
->j_fs_dev
->bd_dev
,
606 handle
->h_type
, handle
->h_line_no
,
607 handle
->h_buffer_credits
,
610 handle
->h_buffer_credits
+= nblocks
;
611 handle
->h_requested_credits
+= nblocks
;
614 jbd_debug(3, "extended handle %p by %d\n", handle
, nblocks
);
616 spin_unlock(&transaction
->t_handle_lock
);
618 read_unlock(&journal
->j_state_lock
);
624 * int jbd2_journal_restart() - restart a handle .
625 * @handle: handle to restart
626 * @nblocks: nr credits requested
627 * @gfp_mask: memory allocation flags (for start_this_handle)
629 * Restart a handle for a multi-transaction filesystem
632 * If the jbd2_journal_extend() call above fails to grant new buffer credits
633 * to a running handle, a call to jbd2_journal_restart will commit the
634 * handle's transaction so far and reattach the handle to a new
635 * transaction capable of guaranteeing the requested number of
636 * credits. We preserve reserved handle if there's any attached to the
639 int jbd2__journal_restart(handle_t
*handle
, int nblocks
, gfp_t gfp_mask
)
641 transaction_t
*transaction
= handle
->h_transaction
;
644 int need_to_start
, ret
;
646 /* If we've had an abort of any type, don't even think about
647 * actually doing the restart! */
648 if (is_handle_aborted(handle
))
650 journal
= transaction
->t_journal
;
653 * First unlink the handle from its current transaction, and start the
656 J_ASSERT(atomic_read(&transaction
->t_updates
) > 0);
657 J_ASSERT(journal_current_handle() == handle
);
659 read_lock(&journal
->j_state_lock
);
660 spin_lock(&transaction
->t_handle_lock
);
661 atomic_sub(handle
->h_buffer_credits
,
662 &transaction
->t_outstanding_credits
);
663 if (handle
->h_rsv_handle
) {
664 sub_reserved_credits(journal
,
665 handle
->h_rsv_handle
->h_buffer_credits
);
667 if (atomic_dec_and_test(&transaction
->t_updates
))
668 wake_up(&journal
->j_wait_updates
);
669 tid
= transaction
->t_tid
;
670 spin_unlock(&transaction
->t_handle_lock
);
671 handle
->h_transaction
= NULL
;
672 current
->journal_info
= NULL
;
674 jbd_debug(2, "restarting handle %p\n", handle
);
675 need_to_start
= !tid_geq(journal
->j_commit_request
, tid
);
676 read_unlock(&journal
->j_state_lock
);
678 jbd2_log_start_commit(journal
, tid
);
680 rwsem_release(&journal
->j_trans_commit_map
, 1, _THIS_IP_
);
681 handle
->h_buffer_credits
= nblocks
;
683 * Restore the original nofs context because the journal restart
684 * is basically the same thing as journal stop and start.
685 * start_this_handle will start a new nofs context.
687 memalloc_nofs_restore(handle
->saved_alloc_context
);
688 ret
= start_this_handle(journal
, handle
, gfp_mask
);
691 EXPORT_SYMBOL(jbd2__journal_restart
);
694 int jbd2_journal_restart(handle_t
*handle
, int nblocks
)
696 return jbd2__journal_restart(handle
, nblocks
, GFP_NOFS
);
698 EXPORT_SYMBOL(jbd2_journal_restart
);
701 * void jbd2_journal_lock_updates () - establish a transaction barrier.
702 * @journal: Journal to establish a barrier on.
704 * This locks out any further updates from being started, and blocks
705 * until all existing updates have completed, returning only once the
706 * journal is in a quiescent state with no updates running.
708 * The journal lock should not be held on entry.
710 void jbd2_journal_lock_updates(journal_t
*journal
)
714 jbd2_might_wait_for_commit(journal
);
716 write_lock(&journal
->j_state_lock
);
717 ++journal
->j_barrier_count
;
719 /* Wait until there are no reserved handles */
720 if (atomic_read(&journal
->j_reserved_credits
)) {
721 write_unlock(&journal
->j_state_lock
);
722 wait_event(journal
->j_wait_reserved
,
723 atomic_read(&journal
->j_reserved_credits
) == 0);
724 write_lock(&journal
->j_state_lock
);
727 /* Wait until there are no running updates */
729 transaction_t
*transaction
= journal
->j_running_transaction
;
734 spin_lock(&transaction
->t_handle_lock
);
735 prepare_to_wait(&journal
->j_wait_updates
, &wait
,
736 TASK_UNINTERRUPTIBLE
);
737 if (!atomic_read(&transaction
->t_updates
)) {
738 spin_unlock(&transaction
->t_handle_lock
);
739 finish_wait(&journal
->j_wait_updates
, &wait
);
742 spin_unlock(&transaction
->t_handle_lock
);
743 write_unlock(&journal
->j_state_lock
);
745 finish_wait(&journal
->j_wait_updates
, &wait
);
746 write_lock(&journal
->j_state_lock
);
748 write_unlock(&journal
->j_state_lock
);
751 * We have now established a barrier against other normal updates, but
752 * we also need to barrier against other jbd2_journal_lock_updates() calls
753 * to make sure that we serialise special journal-locked operations
756 mutex_lock(&journal
->j_barrier
);
760 * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
761 * @journal: Journal to release the barrier on.
763 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
765 * Should be called without the journal lock held.
767 void jbd2_journal_unlock_updates (journal_t
*journal
)
769 J_ASSERT(journal
->j_barrier_count
!= 0);
771 mutex_unlock(&journal
->j_barrier
);
772 write_lock(&journal
->j_state_lock
);
773 --journal
->j_barrier_count
;
774 write_unlock(&journal
->j_state_lock
);
775 wake_up(&journal
->j_wait_transaction_locked
);
778 static void warn_dirty_buffer(struct buffer_head
*bh
)
781 "JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). "
782 "There's a risk of filesystem corruption in case of system "
784 bh
->b_bdev
, (unsigned long long)bh
->b_blocknr
);
787 /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
788 static void jbd2_freeze_jh_data(struct journal_head
*jh
)
793 struct buffer_head
*bh
= jh2bh(jh
);
795 J_EXPECT_JH(jh
, buffer_uptodate(bh
), "Possible IO failure.\n");
797 offset
= offset_in_page(bh
->b_data
);
798 source
= kmap_atomic(page
);
799 /* Fire data frozen trigger just before we copy the data */
800 jbd2_buffer_frozen_trigger(jh
, source
+ offset
, jh
->b_triggers
);
801 memcpy(jh
->b_frozen_data
, source
+ offset
, bh
->b_size
);
802 kunmap_atomic(source
);
805 * Now that the frozen data is saved off, we need to store any matching
808 jh
->b_frozen_triggers
= jh
->b_triggers
;
812 * If the buffer is already part of the current transaction, then there
813 * is nothing we need to do. If it is already part of a prior
814 * transaction which we are still committing to disk, then we need to
815 * make sure that we do not overwrite the old copy: we do copy-out to
816 * preserve the copy going to disk. We also account the buffer against
817 * the handle's metadata buffer credits (unless the buffer is already
818 * part of the transaction, that is).
822 do_get_write_access(handle_t
*handle
, struct journal_head
*jh
,
825 struct buffer_head
*bh
;
826 transaction_t
*transaction
= handle
->h_transaction
;
829 char *frozen_buffer
= NULL
;
830 unsigned long start_lock
, time_lock
;
832 if (is_handle_aborted(handle
))
834 journal
= transaction
->t_journal
;
836 jbd_debug(5, "journal_head %p, force_copy %d\n", jh
, force_copy
);
838 JBUFFER_TRACE(jh
, "entry");
842 /* @@@ Need to check for errors here at some point. */
844 start_lock
= jiffies
;
846 jbd_lock_bh_state(bh
);
848 /* If it takes too long to lock the buffer, trace it */
849 time_lock
= jbd2_time_diff(start_lock
, jiffies
);
850 if (time_lock
> HZ
/10)
851 trace_jbd2_lock_buffer_stall(bh
->b_bdev
->bd_dev
,
852 jiffies_to_msecs(time_lock
));
854 /* We now hold the buffer lock so it is safe to query the buffer
855 * state. Is the buffer dirty?
857 * If so, there are two possibilities. The buffer may be
858 * non-journaled, and undergoing a quite legitimate writeback.
859 * Otherwise, it is journaled, and we don't expect dirty buffers
860 * in that state (the buffers should be marked JBD_Dirty
861 * instead.) So either the IO is being done under our own
862 * control and this is a bug, or it's a third party IO such as
863 * dump(8) (which may leave the buffer scheduled for read ---
864 * ie. locked but not dirty) or tune2fs (which may actually have
865 * the buffer dirtied, ugh.) */
867 if (buffer_dirty(bh
)) {
869 * First question: is this buffer already part of the current
870 * transaction or the existing committing transaction?
872 if (jh
->b_transaction
) {
874 jh
->b_transaction
== transaction
||
876 journal
->j_committing_transaction
);
877 if (jh
->b_next_transaction
)
878 J_ASSERT_JH(jh
, jh
->b_next_transaction
==
880 warn_dirty_buffer(bh
);
883 * In any case we need to clean the dirty flag and we must
884 * do it under the buffer lock to be sure we don't race
885 * with running write-out.
887 JBUFFER_TRACE(jh
, "Journalling dirty buffer");
888 clear_buffer_dirty(bh
);
889 set_buffer_jbddirty(bh
);
895 if (is_handle_aborted(handle
)) {
896 jbd_unlock_bh_state(bh
);
902 * The buffer is already part of this transaction if b_transaction or
903 * b_next_transaction points to it
905 if (jh
->b_transaction
== transaction
||
906 jh
->b_next_transaction
== transaction
)
910 * this is the first time this transaction is touching this buffer,
911 * reset the modified flag
916 * If the buffer is not journaled right now, we need to make sure it
917 * doesn't get written to disk before the caller actually commits the
920 if (!jh
->b_transaction
) {
921 JBUFFER_TRACE(jh
, "no transaction");
922 J_ASSERT_JH(jh
, !jh
->b_next_transaction
);
923 JBUFFER_TRACE(jh
, "file as BJ_Reserved");
925 * Make sure all stores to jh (b_modified, b_frozen_data) are
926 * visible before attaching it to the running transaction.
927 * Paired with barrier in jbd2_write_access_granted()
930 spin_lock(&journal
->j_list_lock
);
931 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Reserved
);
932 spin_unlock(&journal
->j_list_lock
);
936 * If there is already a copy-out version of this buffer, then we don't
937 * need to make another one
939 if (jh
->b_frozen_data
) {
940 JBUFFER_TRACE(jh
, "has frozen data");
941 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
945 JBUFFER_TRACE(jh
, "owned by older transaction");
946 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
947 J_ASSERT_JH(jh
, jh
->b_transaction
== journal
->j_committing_transaction
);
950 * There is one case we have to be very careful about. If the
951 * committing transaction is currently writing this buffer out to disk
952 * and has NOT made a copy-out, then we cannot modify the buffer
953 * contents at all right now. The essence of copy-out is that it is
954 * the extra copy, not the primary copy, which gets journaled. If the
955 * primary copy is already going to disk then we cannot do copy-out
958 if (buffer_shadow(bh
)) {
959 JBUFFER_TRACE(jh
, "on shadow: sleep");
960 jbd_unlock_bh_state(bh
);
961 wait_on_bit_io(&bh
->b_state
, BH_Shadow
, TASK_UNINTERRUPTIBLE
);
966 * Only do the copy if the currently-owning transaction still needs it.
967 * If buffer isn't on BJ_Metadata list, the committing transaction is
968 * past that stage (here we use the fact that BH_Shadow is set under
969 * bh_state lock together with refiling to BJ_Shadow list and at this
970 * point we know the buffer doesn't have BH_Shadow set).
972 * Subtle point, though: if this is a get_undo_access, then we will be
973 * relying on the frozen_data to contain the new value of the
974 * committed_data record after the transaction, so we HAVE to force the
975 * frozen_data copy in that case.
977 if (jh
->b_jlist
== BJ_Metadata
|| force_copy
) {
978 JBUFFER_TRACE(jh
, "generate frozen data");
979 if (!frozen_buffer
) {
980 JBUFFER_TRACE(jh
, "allocate memory for buffer");
981 jbd_unlock_bh_state(bh
);
982 frozen_buffer
= jbd2_alloc(jh2bh(jh
)->b_size
,
983 GFP_NOFS
| __GFP_NOFAIL
);
986 jh
->b_frozen_data
= frozen_buffer
;
987 frozen_buffer
= NULL
;
988 jbd2_freeze_jh_data(jh
);
992 * Make sure all stores to jh (b_modified, b_frozen_data) are visible
993 * before attaching it to the running transaction. Paired with barrier
994 * in jbd2_write_access_granted()
997 jh
->b_next_transaction
= transaction
;
1000 jbd_unlock_bh_state(bh
);
1003 * If we are about to journal a buffer, then any revoke pending on it is
1006 jbd2_journal_cancel_revoke(handle
, jh
);
1009 if (unlikely(frozen_buffer
)) /* It's usually NULL */
1010 jbd2_free(frozen_buffer
, bh
->b_size
);
1012 JBUFFER_TRACE(jh
, "exit");
1016 /* Fast check whether buffer is already attached to the required transaction */
1017 static bool jbd2_write_access_granted(handle_t
*handle
, struct buffer_head
*bh
,
1020 struct journal_head
*jh
;
1023 /* Dirty buffers require special handling... */
1024 if (buffer_dirty(bh
))
1028 * RCU protects us from dereferencing freed pages. So the checks we do
1029 * are guaranteed not to oops. However the jh slab object can get freed
1030 * & reallocated while we work with it. So we have to be careful. When
1031 * we see jh attached to the running transaction, we know it must stay
1032 * so until the transaction is committed. Thus jh won't be freed and
1033 * will be attached to the same bh while we run. However it can
1034 * happen jh gets freed, reallocated, and attached to the transaction
1035 * just after we get pointer to it from bh. So we have to be careful
1036 * and recheck jh still belongs to our bh before we return success.
1039 if (!buffer_jbd(bh
))
1041 /* This should be bh2jh() but that doesn't work with inline functions */
1042 jh
= READ_ONCE(bh
->b_private
);
1045 /* For undo access buffer must have data copied */
1046 if (undo
&& !jh
->b_committed_data
)
1048 if (jh
->b_transaction
!= handle
->h_transaction
&&
1049 jh
->b_next_transaction
!= handle
->h_transaction
)
1052 * There are two reasons for the barrier here:
1053 * 1) Make sure to fetch b_bh after we did previous checks so that we
1054 * detect when jh went through free, realloc, attach to transaction
1055 * while we were checking. Paired with implicit barrier in that path.
1056 * 2) So that access to bh done after jbd2_write_access_granted()
1057 * doesn't get reordered and see inconsistent state of concurrent
1058 * do_get_write_access().
1061 if (unlikely(jh
->b_bh
!= bh
))
1070 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
1071 * @handle: transaction to add buffer modifications to
1072 * @bh: bh to be used for metadata writes
1074 * Returns: error code or 0 on success.
1076 * In full data journalling mode the buffer may be of type BJ_AsyncData,
1077 * because we're ``write()ing`` a buffer which is also part of a shared mapping.
1080 int jbd2_journal_get_write_access(handle_t
*handle
, struct buffer_head
*bh
)
1082 struct journal_head
*jh
;
1085 if (jbd2_write_access_granted(handle
, bh
, false))
1088 jh
= jbd2_journal_add_journal_head(bh
);
1089 /* We do not want to get caught playing with fields which the
1090 * log thread also manipulates. Make sure that the buffer
1091 * completes any outstanding IO before proceeding. */
1092 rc
= do_get_write_access(handle
, jh
, 0);
1093 jbd2_journal_put_journal_head(jh
);
1099 * When the user wants to journal a newly created buffer_head
1100 * (ie. getblk() returned a new buffer and we are going to populate it
1101 * manually rather than reading off disk), then we need to keep the
1102 * buffer_head locked until it has been completely filled with new
1103 * data. In this case, we should be able to make the assertion that
1104 * the bh is not already part of an existing transaction.
1106 * The buffer should already be locked by the caller by this point.
1107 * There is no lock ranking violation: it was a newly created,
1108 * unlocked buffer beforehand. */
1111 * int jbd2_journal_get_create_access () - notify intent to use newly created bh
1112 * @handle: transaction to new buffer to
1115 * Call this if you create a new bh.
1117 int jbd2_journal_get_create_access(handle_t
*handle
, struct buffer_head
*bh
)
1119 transaction_t
*transaction
= handle
->h_transaction
;
1121 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
1124 jbd_debug(5, "journal_head %p\n", jh
);
1126 if (is_handle_aborted(handle
))
1128 journal
= transaction
->t_journal
;
1131 JBUFFER_TRACE(jh
, "entry");
1133 * The buffer may already belong to this transaction due to pre-zeroing
1134 * in the filesystem's new_block code. It may also be on the previous,
1135 * committing transaction's lists, but it HAS to be in Forget state in
1136 * that case: the transaction must have deleted the buffer for it to be
1139 jbd_lock_bh_state(bh
);
1140 J_ASSERT_JH(jh
, (jh
->b_transaction
== transaction
||
1141 jh
->b_transaction
== NULL
||
1142 (jh
->b_transaction
== journal
->j_committing_transaction
&&
1143 jh
->b_jlist
== BJ_Forget
)));
1145 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
1146 J_ASSERT_JH(jh
, buffer_locked(jh2bh(jh
)));
1148 if (jh
->b_transaction
== NULL
) {
1150 * Previous jbd2_journal_forget() could have left the buffer
1151 * with jbddirty bit set because it was being committed. When
1152 * the commit finished, we've filed the buffer for
1153 * checkpointing and marked it dirty. Now we are reallocating
1154 * the buffer so the transaction freeing it must have
1155 * committed and so it's safe to clear the dirty bit.
1157 clear_buffer_dirty(jh2bh(jh
));
1158 /* first access by this transaction */
1161 JBUFFER_TRACE(jh
, "file as BJ_Reserved");
1162 spin_lock(&journal
->j_list_lock
);
1163 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Reserved
);
1164 spin_unlock(&journal
->j_list_lock
);
1165 } else if (jh
->b_transaction
== journal
->j_committing_transaction
) {
1166 /* first access by this transaction */
1169 JBUFFER_TRACE(jh
, "set next transaction");
1170 spin_lock(&journal
->j_list_lock
);
1171 jh
->b_next_transaction
= transaction
;
1172 spin_unlock(&journal
->j_list_lock
);
1174 jbd_unlock_bh_state(bh
);
1177 * akpm: I added this. ext3_alloc_branch can pick up new indirect
1178 * blocks which contain freed but then revoked metadata. We need
1179 * to cancel the revoke in case we end up freeing it yet again
1180 * and the reallocating as data - this would cause a second revoke,
1181 * which hits an assertion error.
1183 JBUFFER_TRACE(jh
, "cancelling revoke");
1184 jbd2_journal_cancel_revoke(handle
, jh
);
1186 jbd2_journal_put_journal_head(jh
);
1191 * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
1192 * non-rewindable consequences
1193 * @handle: transaction
1194 * @bh: buffer to undo
1196 * Sometimes there is a need to distinguish between metadata which has
1197 * been committed to disk and that which has not. The ext3fs code uses
1198 * this for freeing and allocating space, we have to make sure that we
1199 * do not reuse freed space until the deallocation has been committed,
1200 * since if we overwrote that space we would make the delete
1201 * un-rewindable in case of a crash.
1203 * To deal with that, jbd2_journal_get_undo_access requests write access to a
1204 * buffer for parts of non-rewindable operations such as delete
1205 * operations on the bitmaps. The journaling code must keep a copy of
1206 * the buffer's contents prior to the undo_access call until such time
1207 * as we know that the buffer has definitely been committed to disk.
1209 * We never need to know which transaction the committed data is part
1210 * of, buffers touched here are guaranteed to be dirtied later and so
1211 * will be committed to a new transaction in due course, at which point
1212 * we can discard the old committed data pointer.
1214 * Returns error number or 0 on success.
1216 int jbd2_journal_get_undo_access(handle_t
*handle
, struct buffer_head
*bh
)
1219 struct journal_head
*jh
;
1220 char *committed_data
= NULL
;
1222 JBUFFER_TRACE(jh
, "entry");
1223 if (jbd2_write_access_granted(handle
, bh
, true))
1226 jh
= jbd2_journal_add_journal_head(bh
);
1228 * Do this first --- it can drop the journal lock, so we want to
1229 * make sure that obtaining the committed_data is done
1230 * atomically wrt. completion of any outstanding commits.
1232 err
= do_get_write_access(handle
, jh
, 1);
1237 if (!jh
->b_committed_data
)
1238 committed_data
= jbd2_alloc(jh2bh(jh
)->b_size
,
1239 GFP_NOFS
|__GFP_NOFAIL
);
1241 jbd_lock_bh_state(bh
);
1242 if (!jh
->b_committed_data
) {
1243 /* Copy out the current buffer contents into the
1244 * preserved, committed copy. */
1245 JBUFFER_TRACE(jh
, "generate b_committed data");
1246 if (!committed_data
) {
1247 jbd_unlock_bh_state(bh
);
1251 jh
->b_committed_data
= committed_data
;
1252 committed_data
= NULL
;
1253 memcpy(jh
->b_committed_data
, bh
->b_data
, bh
->b_size
);
1255 jbd_unlock_bh_state(bh
);
1257 jbd2_journal_put_journal_head(jh
);
1258 if (unlikely(committed_data
))
1259 jbd2_free(committed_data
, bh
->b_size
);
1264 * void jbd2_journal_set_triggers() - Add triggers for commit writeout
1265 * @bh: buffer to trigger on
1266 * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1268 * Set any triggers on this journal_head. This is always safe, because
1269 * triggers for a committing buffer will be saved off, and triggers for
1270 * a running transaction will match the buffer in that transaction.
1272 * Call with NULL to clear the triggers.
1274 void jbd2_journal_set_triggers(struct buffer_head
*bh
,
1275 struct jbd2_buffer_trigger_type
*type
)
1277 struct journal_head
*jh
= jbd2_journal_grab_journal_head(bh
);
1281 jh
->b_triggers
= type
;
1282 jbd2_journal_put_journal_head(jh
);
1285 void jbd2_buffer_frozen_trigger(struct journal_head
*jh
, void *mapped_data
,
1286 struct jbd2_buffer_trigger_type
*triggers
)
1288 struct buffer_head
*bh
= jh2bh(jh
);
1290 if (!triggers
|| !triggers
->t_frozen
)
1293 triggers
->t_frozen(triggers
, bh
, mapped_data
, bh
->b_size
);
1296 void jbd2_buffer_abort_trigger(struct journal_head
*jh
,
1297 struct jbd2_buffer_trigger_type
*triggers
)
1299 if (!triggers
|| !triggers
->t_abort
)
1302 triggers
->t_abort(triggers
, jh2bh(jh
));
1306 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
1307 * @handle: transaction to add buffer to.
1308 * @bh: buffer to mark
1310 * mark dirty metadata which needs to be journaled as part of the current
1313 * The buffer must have previously had jbd2_journal_get_write_access()
1314 * called so that it has a valid journal_head attached to the buffer
1317 * The buffer is placed on the transaction's metadata list and is marked
1318 * as belonging to the transaction.
1320 * Returns error number or 0 on success.
1322 * Special care needs to be taken if the buffer already belongs to the
1323 * current committing transaction (in which case we should have frozen
1324 * data present for that commit). In that case, we don't relink the
1325 * buffer: that only gets done when the old transaction finally
1326 * completes its commit.
1328 int jbd2_journal_dirty_metadata(handle_t
*handle
, struct buffer_head
*bh
)
1330 transaction_t
*transaction
= handle
->h_transaction
;
1332 struct journal_head
*jh
;
1335 if (is_handle_aborted(handle
))
1337 if (!buffer_jbd(bh
)) {
1342 * We don't grab jh reference here since the buffer must be part
1343 * of the running transaction.
1347 * This and the following assertions are unreliable since we may see jh
1348 * in inconsistent state unless we grab bh_state lock. But this is
1349 * crucial to catch bugs so let's do a reliable check until the
1350 * lockless handling is fully proven.
1352 if (jh
->b_transaction
!= transaction
&&
1353 jh
->b_next_transaction
!= transaction
) {
1354 jbd_lock_bh_state(bh
);
1355 J_ASSERT_JH(jh
, jh
->b_transaction
== transaction
||
1356 jh
->b_next_transaction
== transaction
);
1357 jbd_unlock_bh_state(bh
);
1359 if (jh
->b_modified
== 1) {
1360 /* If it's in our transaction it must be in BJ_Metadata list. */
1361 if (jh
->b_transaction
== transaction
&&
1362 jh
->b_jlist
!= BJ_Metadata
) {
1363 jbd_lock_bh_state(bh
);
1364 if (jh
->b_transaction
== transaction
&&
1365 jh
->b_jlist
!= BJ_Metadata
)
1366 pr_err("JBD2: assertion failure: h_type=%u "
1367 "h_line_no=%u block_no=%llu jlist=%u\n",
1368 handle
->h_type
, handle
->h_line_no
,
1369 (unsigned long long) bh
->b_blocknr
,
1371 J_ASSERT_JH(jh
, jh
->b_transaction
!= transaction
||
1372 jh
->b_jlist
== BJ_Metadata
);
1373 jbd_unlock_bh_state(bh
);
1378 journal
= transaction
->t_journal
;
1379 jbd_debug(5, "journal_head %p\n", jh
);
1380 JBUFFER_TRACE(jh
, "entry");
1382 jbd_lock_bh_state(bh
);
1384 if (jh
->b_modified
== 0) {
1386 * This buffer's got modified and becoming part
1387 * of the transaction. This needs to be done
1388 * once a transaction -bzzz
1390 if (handle
->h_buffer_credits
<= 0) {
1395 handle
->h_buffer_credits
--;
1399 * fastpath, to avoid expensive locking. If this buffer is already
1400 * on the running transaction's metadata list there is nothing to do.
1401 * Nobody can take it off again because there is a handle open.
1402 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1403 * result in this test being false, so we go in and take the locks.
1405 if (jh
->b_transaction
== transaction
&& jh
->b_jlist
== BJ_Metadata
) {
1406 JBUFFER_TRACE(jh
, "fastpath");
1407 if (unlikely(jh
->b_transaction
!=
1408 journal
->j_running_transaction
)) {
1409 printk(KERN_ERR
"JBD2: %s: "
1410 "jh->b_transaction (%llu, %p, %u) != "
1411 "journal->j_running_transaction (%p, %u)\n",
1413 (unsigned long long) bh
->b_blocknr
,
1415 jh
->b_transaction
? jh
->b_transaction
->t_tid
: 0,
1416 journal
->j_running_transaction
,
1417 journal
->j_running_transaction
?
1418 journal
->j_running_transaction
->t_tid
: 0);
1424 set_buffer_jbddirty(bh
);
1427 * Metadata already on the current transaction list doesn't
1428 * need to be filed. Metadata on another transaction's list must
1429 * be committing, and will be refiled once the commit completes:
1430 * leave it alone for now.
1432 if (jh
->b_transaction
!= transaction
) {
1433 JBUFFER_TRACE(jh
, "already on other transaction");
1434 if (unlikely(((jh
->b_transaction
!=
1435 journal
->j_committing_transaction
)) ||
1436 (jh
->b_next_transaction
!= transaction
))) {
1437 printk(KERN_ERR
"jbd2_journal_dirty_metadata: %s: "
1438 "bad jh for block %llu: "
1439 "transaction (%p, %u), "
1440 "jh->b_transaction (%p, %u), "
1441 "jh->b_next_transaction (%p, %u), jlist %u\n",
1443 (unsigned long long) bh
->b_blocknr
,
1444 transaction
, transaction
->t_tid
,
1447 jh
->b_transaction
->t_tid
: 0,
1448 jh
->b_next_transaction
,
1449 jh
->b_next_transaction
?
1450 jh
->b_next_transaction
->t_tid
: 0,
1455 /* And this case is illegal: we can't reuse another
1456 * transaction's data buffer, ever. */
1460 /* That test should have eliminated the following case: */
1461 J_ASSERT_JH(jh
, jh
->b_frozen_data
== NULL
);
1463 JBUFFER_TRACE(jh
, "file as BJ_Metadata");
1464 spin_lock(&journal
->j_list_lock
);
1465 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Metadata
);
1466 spin_unlock(&journal
->j_list_lock
);
1468 jbd_unlock_bh_state(bh
);
1470 JBUFFER_TRACE(jh
, "exit");
1475 * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1476 * @handle: transaction handle
1477 * @bh: bh to 'forget'
1479 * We can only do the bforget if there are no commits pending against the
1480 * buffer. If the buffer is dirty in the current running transaction we
1481 * can safely unlink it.
1483 * bh may not be a journalled buffer at all - it may be a non-JBD
1484 * buffer which came off the hashtable. Check for this.
1486 * Decrements bh->b_count by one.
1488 * Allow this call even if the handle has aborted --- it may be part of
1489 * the caller's cleanup after an abort.
1491 int jbd2_journal_forget (handle_t
*handle
, struct buffer_head
*bh
)
1493 transaction_t
*transaction
= handle
->h_transaction
;
1495 struct journal_head
*jh
;
1496 int drop_reserve
= 0;
1498 int was_modified
= 0;
1500 if (is_handle_aborted(handle
))
1502 journal
= transaction
->t_journal
;
1504 BUFFER_TRACE(bh
, "entry");
1506 jbd_lock_bh_state(bh
);
1508 if (!buffer_jbd(bh
))
1512 /* Critical error: attempting to delete a bitmap buffer, maybe?
1513 * Don't do any jbd operations, and return an error. */
1514 if (!J_EXPECT_JH(jh
, !jh
->b_committed_data
,
1515 "inconsistent data on disk")) {
1520 /* keep track of whether or not this transaction modified us */
1521 was_modified
= jh
->b_modified
;
1524 * The buffer's going from the transaction, we must drop
1525 * all references -bzzz
1529 if (jh
->b_transaction
== transaction
) {
1530 J_ASSERT_JH(jh
, !jh
->b_frozen_data
);
1532 /* If we are forgetting a buffer which is already part
1533 * of this transaction, then we can just drop it from
1534 * the transaction immediately. */
1535 clear_buffer_dirty(bh
);
1536 clear_buffer_jbddirty(bh
);
1538 JBUFFER_TRACE(jh
, "belongs to current transaction: unfile");
1541 * we only want to drop a reference if this transaction
1542 * modified the buffer
1548 * We are no longer going to journal this buffer.
1549 * However, the commit of this transaction is still
1550 * important to the buffer: the delete that we are now
1551 * processing might obsolete an old log entry, so by
1552 * committing, we can satisfy the buffer's checkpoint.
1554 * So, if we have a checkpoint on the buffer, we should
1555 * now refile the buffer on our BJ_Forget list so that
1556 * we know to remove the checkpoint after we commit.
1559 spin_lock(&journal
->j_list_lock
);
1560 if (jh
->b_cp_transaction
) {
1561 __jbd2_journal_temp_unlink_buffer(jh
);
1562 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
1564 __jbd2_journal_unfile_buffer(jh
);
1565 if (!buffer_jbd(bh
)) {
1566 spin_unlock(&journal
->j_list_lock
);
1567 jbd_unlock_bh_state(bh
);
1572 spin_unlock(&journal
->j_list_lock
);
1573 } else if (jh
->b_transaction
) {
1574 J_ASSERT_JH(jh
, (jh
->b_transaction
==
1575 journal
->j_committing_transaction
));
1576 /* However, if the buffer is still owned by a prior
1577 * (committing) transaction, we can't drop it yet... */
1578 JBUFFER_TRACE(jh
, "belongs to older transaction");
1579 /* ... but we CAN drop it from the new transaction if we
1580 * have also modified it since the original commit. */
1582 if (jh
->b_next_transaction
) {
1583 J_ASSERT(jh
->b_next_transaction
== transaction
);
1584 spin_lock(&journal
->j_list_lock
);
1585 jh
->b_next_transaction
= NULL
;
1586 spin_unlock(&journal
->j_list_lock
);
1589 * only drop a reference if this transaction modified
1598 jbd_unlock_bh_state(bh
);
1602 /* no need to reserve log space for this block -bzzz */
1603 handle
->h_buffer_credits
++;
1609 * int jbd2_journal_stop() - complete a transaction
1610 * @handle: transaction to complete.
1612 * All done for a particular handle.
1614 * There is not much action needed here. We just return any remaining
1615 * buffer credits to the transaction and remove the handle. The only
1616 * complication is that we need to start a commit operation if the
1617 * filesystem is marked for synchronous update.
1619 * jbd2_journal_stop itself will not usually return an error, but it may
1620 * do so in unusual circumstances. In particular, expect it to
1621 * return -EIO if a jbd2_journal_abort has been executed since the
1622 * transaction began.
1624 int jbd2_journal_stop(handle_t
*handle
)
1626 transaction_t
*transaction
= handle
->h_transaction
;
1628 int err
= 0, wait_for_commit
= 0;
1634 * Handle is already detached from the transaction so
1635 * there is nothing to do other than decrease a refcount,
1636 * or free the handle if refcount drops to zero
1638 if (--handle
->h_ref
> 0) {
1639 jbd_debug(4, "h_ref %d -> %d\n", handle
->h_ref
+ 1,
1643 if (handle
->h_rsv_handle
)
1644 jbd2_free_handle(handle
->h_rsv_handle
);
1648 journal
= transaction
->t_journal
;
1650 J_ASSERT(journal_current_handle() == handle
);
1652 if (is_handle_aborted(handle
))
1655 J_ASSERT(atomic_read(&transaction
->t_updates
) > 0);
1657 if (--handle
->h_ref
> 0) {
1658 jbd_debug(4, "h_ref %d -> %d\n", handle
->h_ref
+ 1,
1663 jbd_debug(4, "Handle %p going down\n", handle
);
1664 trace_jbd2_handle_stats(journal
->j_fs_dev
->bd_dev
,
1666 handle
->h_type
, handle
->h_line_no
,
1667 jiffies
- handle
->h_start_jiffies
,
1668 handle
->h_sync
, handle
->h_requested_credits
,
1669 (handle
->h_requested_credits
-
1670 handle
->h_buffer_credits
));
1673 * Implement synchronous transaction batching. If the handle
1674 * was synchronous, don't force a commit immediately. Let's
1675 * yield and let another thread piggyback onto this
1676 * transaction. Keep doing that while new threads continue to
1677 * arrive. It doesn't cost much - we're about to run a commit
1678 * and sleep on IO anyway. Speeds up many-threaded, many-dir
1679 * operations by 30x or more...
1681 * We try and optimize the sleep time against what the
1682 * underlying disk can do, instead of having a static sleep
1683 * time. This is useful for the case where our storage is so
1684 * fast that it is more optimal to go ahead and force a flush
1685 * and wait for the transaction to be committed than it is to
1686 * wait for an arbitrary amount of time for new writers to
1687 * join the transaction. We achieve this by measuring how
1688 * long it takes to commit a transaction, and compare it with
1689 * how long this transaction has been running, and if run time
1690 * < commit time then we sleep for the delta and commit. This
1691 * greatly helps super fast disks that would see slowdowns as
1692 * more threads started doing fsyncs.
1694 * But don't do this if this process was the most recent one
1695 * to perform a synchronous write. We do this to detect the
1696 * case where a single process is doing a stream of sync
1697 * writes. No point in waiting for joiners in that case.
1699 * Setting max_batch_time to 0 disables this completely.
1702 if (handle
->h_sync
&& journal
->j_last_sync_writer
!= pid
&&
1703 journal
->j_max_batch_time
) {
1704 u64 commit_time
, trans_time
;
1706 journal
->j_last_sync_writer
= pid
;
1708 read_lock(&journal
->j_state_lock
);
1709 commit_time
= journal
->j_average_commit_time
;
1710 read_unlock(&journal
->j_state_lock
);
1712 trans_time
= ktime_to_ns(ktime_sub(ktime_get(),
1713 transaction
->t_start_time
));
1715 commit_time
= max_t(u64
, commit_time
,
1716 1000*journal
->j_min_batch_time
);
1717 commit_time
= min_t(u64
, commit_time
,
1718 1000*journal
->j_max_batch_time
);
1720 if (trans_time
< commit_time
) {
1721 ktime_t expires
= ktime_add_ns(ktime_get(),
1723 set_current_state(TASK_UNINTERRUPTIBLE
);
1724 schedule_hrtimeout(&expires
, HRTIMER_MODE_ABS
);
1729 transaction
->t_synchronous_commit
= 1;
1730 current
->journal_info
= NULL
;
1731 atomic_sub(handle
->h_buffer_credits
,
1732 &transaction
->t_outstanding_credits
);
1735 * If the handle is marked SYNC, we need to set another commit
1736 * going! We also want to force a commit if the current
1737 * transaction is occupying too much of the log, or if the
1738 * transaction is too old now.
1740 if (handle
->h_sync
||
1741 (atomic_read(&transaction
->t_outstanding_credits
) >
1742 journal
->j_max_transaction_buffers
) ||
1743 time_after_eq(jiffies
, transaction
->t_expires
)) {
1744 /* Do this even for aborted journals: an abort still
1745 * completes the commit thread, it just doesn't write
1746 * anything to disk. */
1748 jbd_debug(2, "transaction too old, requesting commit for "
1749 "handle %p\n", handle
);
1750 /* This is non-blocking */
1751 jbd2_log_start_commit(journal
, transaction
->t_tid
);
1754 * Special case: JBD2_SYNC synchronous updates require us
1755 * to wait for the commit to complete.
1757 if (handle
->h_sync
&& !(current
->flags
& PF_MEMALLOC
))
1758 wait_for_commit
= 1;
1762 * Once we drop t_updates, if it goes to zero the transaction
1763 * could start committing on us and eventually disappear. So
1764 * once we do this, we must not dereference transaction
1767 tid
= transaction
->t_tid
;
1768 if (atomic_dec_and_test(&transaction
->t_updates
)) {
1769 wake_up(&journal
->j_wait_updates
);
1770 if (journal
->j_barrier_count
)
1771 wake_up(&journal
->j_wait_transaction_locked
);
1774 rwsem_release(&journal
->j_trans_commit_map
, 1, _THIS_IP_
);
1776 if (wait_for_commit
)
1777 err
= jbd2_log_wait_commit(journal
, tid
);
1779 if (handle
->h_rsv_handle
)
1780 jbd2_journal_free_reserved(handle
->h_rsv_handle
);
1783 * Scope of the GFP_NOFS context is over here and so we can restore the
1784 * original alloc context.
1786 memalloc_nofs_restore(handle
->saved_alloc_context
);
1787 jbd2_free_handle(handle
);
1793 * List management code snippets: various functions for manipulating the
1794 * transaction buffer lists.
1799 * Append a buffer to a transaction list, given the transaction's list head
1802 * j_list_lock is held.
1804 * jbd_lock_bh_state(jh2bh(jh)) is held.
1808 __blist_add_buffer(struct journal_head
**list
, struct journal_head
*jh
)
1811 jh
->b_tnext
= jh
->b_tprev
= jh
;
1814 /* Insert at the tail of the list to preserve order */
1815 struct journal_head
*first
= *list
, *last
= first
->b_tprev
;
1817 jh
->b_tnext
= first
;
1818 last
->b_tnext
= first
->b_tprev
= jh
;
1823 * Remove a buffer from a transaction list, given the transaction's list
1826 * Called with j_list_lock held, and the journal may not be locked.
1828 * jbd_lock_bh_state(jh2bh(jh)) is held.
1832 __blist_del_buffer(struct journal_head
**list
, struct journal_head
*jh
)
1835 *list
= jh
->b_tnext
;
1839 jh
->b_tprev
->b_tnext
= jh
->b_tnext
;
1840 jh
->b_tnext
->b_tprev
= jh
->b_tprev
;
1844 * Remove a buffer from the appropriate transaction list.
1846 * Note that this function can *change* the value of
1847 * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
1848 * t_reserved_list. If the caller is holding onto a copy of one of these
1849 * pointers, it could go bad. Generally the caller needs to re-read the
1850 * pointer from the transaction_t.
1852 * Called under j_list_lock.
1854 static void __jbd2_journal_temp_unlink_buffer(struct journal_head
*jh
)
1856 struct journal_head
**list
= NULL
;
1857 transaction_t
*transaction
;
1858 struct buffer_head
*bh
= jh2bh(jh
);
1860 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
1861 transaction
= jh
->b_transaction
;
1863 assert_spin_locked(&transaction
->t_journal
->j_list_lock
);
1865 J_ASSERT_JH(jh
, jh
->b_jlist
< BJ_Types
);
1866 if (jh
->b_jlist
!= BJ_None
)
1867 J_ASSERT_JH(jh
, transaction
!= NULL
);
1869 switch (jh
->b_jlist
) {
1873 transaction
->t_nr_buffers
--;
1874 J_ASSERT_JH(jh
, transaction
->t_nr_buffers
>= 0);
1875 list
= &transaction
->t_buffers
;
1878 list
= &transaction
->t_forget
;
1881 list
= &transaction
->t_shadow_list
;
1884 list
= &transaction
->t_reserved_list
;
1888 __blist_del_buffer(list
, jh
);
1889 jh
->b_jlist
= BJ_None
;
1890 if (transaction
&& is_journal_aborted(transaction
->t_journal
))
1891 clear_buffer_jbddirty(bh
);
1892 else if (test_clear_buffer_jbddirty(bh
))
1893 mark_buffer_dirty(bh
); /* Expose it to the VM */
1897 * Remove buffer from all transactions.
1899 * Called with bh_state lock and j_list_lock
1901 * jh and bh may be already freed when this function returns.
1903 static void __jbd2_journal_unfile_buffer(struct journal_head
*jh
)
1905 __jbd2_journal_temp_unlink_buffer(jh
);
1906 jh
->b_transaction
= NULL
;
1907 jbd2_journal_put_journal_head(jh
);
1910 void jbd2_journal_unfile_buffer(journal_t
*journal
, struct journal_head
*jh
)
1912 struct buffer_head
*bh
= jh2bh(jh
);
1914 /* Get reference so that buffer cannot be freed before we unlock it */
1916 jbd_lock_bh_state(bh
);
1917 spin_lock(&journal
->j_list_lock
);
1918 __jbd2_journal_unfile_buffer(jh
);
1919 spin_unlock(&journal
->j_list_lock
);
1920 jbd_unlock_bh_state(bh
);
1925 * Called from jbd2_journal_try_to_free_buffers().
1927 * Called under jbd_lock_bh_state(bh)
1930 __journal_try_to_free_buffer(journal_t
*journal
, struct buffer_head
*bh
)
1932 struct journal_head
*jh
;
1936 if (buffer_locked(bh
) || buffer_dirty(bh
))
1939 if (jh
->b_next_transaction
!= NULL
|| jh
->b_transaction
!= NULL
)
1942 spin_lock(&journal
->j_list_lock
);
1943 if (jh
->b_cp_transaction
!= NULL
) {
1944 /* written-back checkpointed metadata buffer */
1945 JBUFFER_TRACE(jh
, "remove from checkpoint list");
1946 __jbd2_journal_remove_checkpoint(jh
);
1948 spin_unlock(&journal
->j_list_lock
);
1954 * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1955 * @journal: journal for operation
1956 * @page: to try and free
1957 * @gfp_mask: we use the mask to detect how hard should we try to release
1958 * buffers. If __GFP_DIRECT_RECLAIM and __GFP_FS is set, we wait for commit
1959 * code to release the buffers.
1962 * For all the buffers on this page,
1963 * if they are fully written out ordered data, move them onto BUF_CLEAN
1964 * so try_to_free_buffers() can reap them.
1966 * This function returns non-zero if we wish try_to_free_buffers()
1967 * to be called. We do this if the page is releasable by try_to_free_buffers().
1968 * We also do it if the page has locked or dirty buffers and the caller wants
1969 * us to perform sync or async writeout.
1971 * This complicates JBD locking somewhat. We aren't protected by the
1972 * BKL here. We wish to remove the buffer from its committing or
1973 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1975 * This may *change* the value of transaction_t->t_datalist, so anyone
1976 * who looks at t_datalist needs to lock against this function.
1978 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1979 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
1980 * will come out of the lock with the buffer dirty, which makes it
1981 * ineligible for release here.
1983 * Who else is affected by this? hmm... Really the only contender
1984 * is do_get_write_access() - it could be looking at the buffer while
1985 * journal_try_to_free_buffer() is changing its state. But that
1986 * cannot happen because we never reallocate freed data as metadata
1987 * while the data is part of a transaction. Yes?
1989 * Return 0 on failure, 1 on success
1991 int jbd2_journal_try_to_free_buffers(journal_t
*journal
,
1992 struct page
*page
, gfp_t gfp_mask
)
1994 struct buffer_head
*head
;
1995 struct buffer_head
*bh
;
1998 J_ASSERT(PageLocked(page
));
2000 head
= page_buffers(page
);
2003 struct journal_head
*jh
;
2006 * We take our own ref against the journal_head here to avoid
2007 * having to add tons of locking around each instance of
2008 * jbd2_journal_put_journal_head().
2010 jh
= jbd2_journal_grab_journal_head(bh
);
2014 jbd_lock_bh_state(bh
);
2015 __journal_try_to_free_buffer(journal
, bh
);
2016 jbd2_journal_put_journal_head(jh
);
2017 jbd_unlock_bh_state(bh
);
2020 } while ((bh
= bh
->b_this_page
) != head
);
2022 ret
= try_to_free_buffers(page
);
2029 * This buffer is no longer needed. If it is on an older transaction's
2030 * checkpoint list we need to record it on this transaction's forget list
2031 * to pin this buffer (and hence its checkpointing transaction) down until
2032 * this transaction commits. If the buffer isn't on a checkpoint list, we
2034 * Returns non-zero if JBD no longer has an interest in the buffer.
2036 * Called under j_list_lock.
2038 * Called under jbd_lock_bh_state(bh).
2040 static int __dispose_buffer(struct journal_head
*jh
, transaction_t
*transaction
)
2043 struct buffer_head
*bh
= jh2bh(jh
);
2045 if (jh
->b_cp_transaction
) {
2046 JBUFFER_TRACE(jh
, "on running+cp transaction");
2047 __jbd2_journal_temp_unlink_buffer(jh
);
2049 * We don't want to write the buffer anymore, clear the
2050 * bit so that we don't confuse checks in
2051 * __journal_file_buffer
2053 clear_buffer_dirty(bh
);
2054 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
2057 JBUFFER_TRACE(jh
, "on running transaction");
2058 __jbd2_journal_unfile_buffer(jh
);
2064 * jbd2_journal_invalidatepage
2066 * This code is tricky. It has a number of cases to deal with.
2068 * There are two invariants which this code relies on:
2070 * i_size must be updated on disk before we start calling invalidatepage on the
2073 * This is done in ext3 by defining an ext3_setattr method which
2074 * updates i_size before truncate gets going. By maintaining this
2075 * invariant, we can be sure that it is safe to throw away any buffers
2076 * attached to the current transaction: once the transaction commits,
2077 * we know that the data will not be needed.
2079 * Note however that we can *not* throw away data belonging to the
2080 * previous, committing transaction!
2082 * Any disk blocks which *are* part of the previous, committing
2083 * transaction (and which therefore cannot be discarded immediately) are
2084 * not going to be reused in the new running transaction
2086 * The bitmap committed_data images guarantee this: any block which is
2087 * allocated in one transaction and removed in the next will be marked
2088 * as in-use in the committed_data bitmap, so cannot be reused until
2089 * the next transaction to delete the block commits. This means that
2090 * leaving committing buffers dirty is quite safe: the disk blocks
2091 * cannot be reallocated to a different file and so buffer aliasing is
2095 * The above applies mainly to ordered data mode. In writeback mode we
2096 * don't make guarantees about the order in which data hits disk --- in
2097 * particular we don't guarantee that new dirty data is flushed before
2098 * transaction commit --- so it is always safe just to discard data
2099 * immediately in that mode. --sct
2103 * The journal_unmap_buffer helper function returns zero if the buffer
2104 * concerned remains pinned as an anonymous buffer belonging to an older
2107 * We're outside-transaction here. Either or both of j_running_transaction
2108 * and j_committing_transaction may be NULL.
2110 static int journal_unmap_buffer(journal_t
*journal
, struct buffer_head
*bh
,
2113 transaction_t
*transaction
;
2114 struct journal_head
*jh
;
2117 BUFFER_TRACE(bh
, "entry");
2120 * It is safe to proceed here without the j_list_lock because the
2121 * buffers cannot be stolen by try_to_free_buffers as long as we are
2122 * holding the page lock. --sct
2125 if (!buffer_jbd(bh
))
2126 goto zap_buffer_unlocked
;
2128 /* OK, we have data buffer in journaled mode */
2129 write_lock(&journal
->j_state_lock
);
2130 jbd_lock_bh_state(bh
);
2131 spin_lock(&journal
->j_list_lock
);
2133 jh
= jbd2_journal_grab_journal_head(bh
);
2135 goto zap_buffer_no_jh
;
2138 * We cannot remove the buffer from checkpoint lists until the
2139 * transaction adding inode to orphan list (let's call it T)
2140 * is committed. Otherwise if the transaction changing the
2141 * buffer would be cleaned from the journal before T is
2142 * committed, a crash will cause that the correct contents of
2143 * the buffer will be lost. On the other hand we have to
2144 * clear the buffer dirty bit at latest at the moment when the
2145 * transaction marking the buffer as freed in the filesystem
2146 * structures is committed because from that moment on the
2147 * block can be reallocated and used by a different page.
2148 * Since the block hasn't been freed yet but the inode has
2149 * already been added to orphan list, it is safe for us to add
2150 * the buffer to BJ_Forget list of the newest transaction.
2152 * Also we have to clear buffer_mapped flag of a truncated buffer
2153 * because the buffer_head may be attached to the page straddling
2154 * i_size (can happen only when blocksize < pagesize) and thus the
2155 * buffer_head can be reused when the file is extended again. So we end
2156 * up keeping around invalidated buffers attached to transactions'
2157 * BJ_Forget list just to stop checkpointing code from cleaning up
2158 * the transaction this buffer was modified in.
2160 transaction
= jh
->b_transaction
;
2161 if (transaction
== NULL
) {
2162 /* First case: not on any transaction. If it
2163 * has no checkpoint link, then we can zap it:
2164 * it's a writeback-mode buffer so we don't care
2165 * if it hits disk safely. */
2166 if (!jh
->b_cp_transaction
) {
2167 JBUFFER_TRACE(jh
, "not on any transaction: zap");
2171 if (!buffer_dirty(bh
)) {
2172 /* bdflush has written it. We can drop it now */
2173 __jbd2_journal_remove_checkpoint(jh
);
2177 /* OK, it must be in the journal but still not
2178 * written fully to disk: it's metadata or
2179 * journaled data... */
2181 if (journal
->j_running_transaction
) {
2182 /* ... and once the current transaction has
2183 * committed, the buffer won't be needed any
2185 JBUFFER_TRACE(jh
, "checkpointed: add to BJ_Forget");
2186 may_free
= __dispose_buffer(jh
,
2187 journal
->j_running_transaction
);
2190 /* There is no currently-running transaction. So the
2191 * orphan record which we wrote for this file must have
2192 * passed into commit. We must attach this buffer to
2193 * the committing transaction, if it exists. */
2194 if (journal
->j_committing_transaction
) {
2195 JBUFFER_TRACE(jh
, "give to committing trans");
2196 may_free
= __dispose_buffer(jh
,
2197 journal
->j_committing_transaction
);
2200 /* The orphan record's transaction has
2201 * committed. We can cleanse this buffer */
2202 clear_buffer_jbddirty(bh
);
2203 __jbd2_journal_remove_checkpoint(jh
);
2207 } else if (transaction
== journal
->j_committing_transaction
) {
2208 JBUFFER_TRACE(jh
, "on committing transaction");
2210 * The buffer is committing, we simply cannot touch
2211 * it. If the page is straddling i_size we have to wait
2212 * for commit and try again.
2215 jbd2_journal_put_journal_head(jh
);
2216 spin_unlock(&journal
->j_list_lock
);
2217 jbd_unlock_bh_state(bh
);
2218 write_unlock(&journal
->j_state_lock
);
2222 * OK, buffer won't be reachable after truncate. We just set
2223 * j_next_transaction to the running transaction (if there is
2224 * one) and mark buffer as freed so that commit code knows it
2225 * should clear dirty bits when it is done with the buffer.
2227 set_buffer_freed(bh
);
2228 if (journal
->j_running_transaction
&& buffer_jbddirty(bh
))
2229 jh
->b_next_transaction
= journal
->j_running_transaction
;
2230 jbd2_journal_put_journal_head(jh
);
2231 spin_unlock(&journal
->j_list_lock
);
2232 jbd_unlock_bh_state(bh
);
2233 write_unlock(&journal
->j_state_lock
);
2236 /* Good, the buffer belongs to the running transaction.
2237 * We are writing our own transaction's data, not any
2238 * previous one's, so it is safe to throw it away
2239 * (remember that we expect the filesystem to have set
2240 * i_size already for this truncate so recovery will not
2241 * expose the disk blocks we are discarding here.) */
2242 J_ASSERT_JH(jh
, transaction
== journal
->j_running_transaction
);
2243 JBUFFER_TRACE(jh
, "on running transaction");
2244 may_free
= __dispose_buffer(jh
, transaction
);
2249 * This is tricky. Although the buffer is truncated, it may be reused
2250 * if blocksize < pagesize and it is attached to the page straddling
2251 * EOF. Since the buffer might have been added to BJ_Forget list of the
2252 * running transaction, journal_get_write_access() won't clear
2253 * b_modified and credit accounting gets confused. So clear b_modified
2257 jbd2_journal_put_journal_head(jh
);
2259 spin_unlock(&journal
->j_list_lock
);
2260 jbd_unlock_bh_state(bh
);
2261 write_unlock(&journal
->j_state_lock
);
2262 zap_buffer_unlocked
:
2263 clear_buffer_dirty(bh
);
2264 J_ASSERT_BH(bh
, !buffer_jbddirty(bh
));
2265 clear_buffer_mapped(bh
);
2266 clear_buffer_req(bh
);
2267 clear_buffer_new(bh
);
2268 clear_buffer_delay(bh
);
2269 clear_buffer_unwritten(bh
);
2275 * void jbd2_journal_invalidatepage()
2276 * @journal: journal to use for flush...
2277 * @page: page to flush
2278 * @offset: start of the range to invalidate
2279 * @length: length of the range to invalidate
2281 * Reap page buffers containing data after in the specified range in page.
2282 * Can return -EBUSY if buffers are part of the committing transaction and
2283 * the page is straddling i_size. Caller then has to wait for current commit
2286 int jbd2_journal_invalidatepage(journal_t
*journal
,
2288 unsigned int offset
,
2289 unsigned int length
)
2291 struct buffer_head
*head
, *bh
, *next
;
2292 unsigned int stop
= offset
+ length
;
2293 unsigned int curr_off
= 0;
2294 int partial_page
= (offset
|| length
< PAGE_SIZE
);
2298 if (!PageLocked(page
))
2300 if (!page_has_buffers(page
))
2303 BUG_ON(stop
> PAGE_SIZE
|| stop
< length
);
2305 /* We will potentially be playing with lists other than just the
2306 * data lists (especially for journaled data mode), so be
2307 * cautious in our locking. */
2309 head
= bh
= page_buffers(page
);
2311 unsigned int next_off
= curr_off
+ bh
->b_size
;
2312 next
= bh
->b_this_page
;
2314 if (next_off
> stop
)
2317 if (offset
<= curr_off
) {
2318 /* This block is wholly outside the truncation point */
2320 ret
= journal_unmap_buffer(journal
, bh
, partial_page
);
2326 curr_off
= next_off
;
2329 } while (bh
!= head
);
2331 if (!partial_page
) {
2332 if (may_free
&& try_to_free_buffers(page
))
2333 J_ASSERT(!page_has_buffers(page
));
2339 * File a buffer on the given transaction list.
2341 void __jbd2_journal_file_buffer(struct journal_head
*jh
,
2342 transaction_t
*transaction
, int jlist
)
2344 struct journal_head
**list
= NULL
;
2346 struct buffer_head
*bh
= jh2bh(jh
);
2348 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
2349 assert_spin_locked(&transaction
->t_journal
->j_list_lock
);
2351 J_ASSERT_JH(jh
, jh
->b_jlist
< BJ_Types
);
2352 J_ASSERT_JH(jh
, jh
->b_transaction
== transaction
||
2353 jh
->b_transaction
== NULL
);
2355 if (jh
->b_transaction
&& jh
->b_jlist
== jlist
)
2358 if (jlist
== BJ_Metadata
|| jlist
== BJ_Reserved
||
2359 jlist
== BJ_Shadow
|| jlist
== BJ_Forget
) {
2361 * For metadata buffers, we track dirty bit in buffer_jbddirty
2362 * instead of buffer_dirty. We should not see a dirty bit set
2363 * here because we clear it in do_get_write_access but e.g.
2364 * tune2fs can modify the sb and set the dirty bit at any time
2365 * so we try to gracefully handle that.
2367 if (buffer_dirty(bh
))
2368 warn_dirty_buffer(bh
);
2369 if (test_clear_buffer_dirty(bh
) ||
2370 test_clear_buffer_jbddirty(bh
))
2374 if (jh
->b_transaction
)
2375 __jbd2_journal_temp_unlink_buffer(jh
);
2377 jbd2_journal_grab_journal_head(bh
);
2378 jh
->b_transaction
= transaction
;
2382 J_ASSERT_JH(jh
, !jh
->b_committed_data
);
2383 J_ASSERT_JH(jh
, !jh
->b_frozen_data
);
2386 transaction
->t_nr_buffers
++;
2387 list
= &transaction
->t_buffers
;
2390 list
= &transaction
->t_forget
;
2393 list
= &transaction
->t_shadow_list
;
2396 list
= &transaction
->t_reserved_list
;
2400 __blist_add_buffer(list
, jh
);
2401 jh
->b_jlist
= jlist
;
2404 set_buffer_jbddirty(bh
);
2407 void jbd2_journal_file_buffer(struct journal_head
*jh
,
2408 transaction_t
*transaction
, int jlist
)
2410 jbd_lock_bh_state(jh2bh(jh
));
2411 spin_lock(&transaction
->t_journal
->j_list_lock
);
2412 __jbd2_journal_file_buffer(jh
, transaction
, jlist
);
2413 spin_unlock(&transaction
->t_journal
->j_list_lock
);
2414 jbd_unlock_bh_state(jh2bh(jh
));
2418 * Remove a buffer from its current buffer list in preparation for
2419 * dropping it from its current transaction entirely. If the buffer has
2420 * already started to be used by a subsequent transaction, refile the
2421 * buffer on that transaction's metadata list.
2423 * Called under j_list_lock
2424 * Called under jbd_lock_bh_state(jh2bh(jh))
2426 * jh and bh may be already free when this function returns
2428 void __jbd2_journal_refile_buffer(struct journal_head
*jh
)
2430 int was_dirty
, jlist
;
2431 struct buffer_head
*bh
= jh2bh(jh
);
2433 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
2434 if (jh
->b_transaction
)
2435 assert_spin_locked(&jh
->b_transaction
->t_journal
->j_list_lock
);
2437 /* If the buffer is now unused, just drop it. */
2438 if (jh
->b_next_transaction
== NULL
) {
2439 __jbd2_journal_unfile_buffer(jh
);
2444 * It has been modified by a later transaction: add it to the new
2445 * transaction's metadata list.
2448 was_dirty
= test_clear_buffer_jbddirty(bh
);
2449 __jbd2_journal_temp_unlink_buffer(jh
);
2451 * We set b_transaction here because b_next_transaction will inherit
2452 * our jh reference and thus __jbd2_journal_file_buffer() must not
2455 jh
->b_transaction
= jh
->b_next_transaction
;
2456 jh
->b_next_transaction
= NULL
;
2457 if (buffer_freed(bh
))
2459 else if (jh
->b_modified
)
2460 jlist
= BJ_Metadata
;
2462 jlist
= BJ_Reserved
;
2463 __jbd2_journal_file_buffer(jh
, jh
->b_transaction
, jlist
);
2464 J_ASSERT_JH(jh
, jh
->b_transaction
->t_state
== T_RUNNING
);
2467 set_buffer_jbddirty(bh
);
2471 * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2472 * bh reference so that we can safely unlock bh.
2474 * The jh and bh may be freed by this call.
2476 void jbd2_journal_refile_buffer(journal_t
*journal
, struct journal_head
*jh
)
2478 struct buffer_head
*bh
= jh2bh(jh
);
2480 /* Get reference so that buffer cannot be freed before we unlock it */
2482 jbd_lock_bh_state(bh
);
2483 spin_lock(&journal
->j_list_lock
);
2484 __jbd2_journal_refile_buffer(jh
);
2485 jbd_unlock_bh_state(bh
);
2486 spin_unlock(&journal
->j_list_lock
);
2491 * File inode in the inode list of the handle's transaction
2493 static int jbd2_journal_file_inode(handle_t
*handle
, struct jbd2_inode
*jinode
,
2494 unsigned long flags
)
2496 transaction_t
*transaction
= handle
->h_transaction
;
2499 if (is_handle_aborted(handle
))
2501 journal
= transaction
->t_journal
;
2503 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode
->i_vfs_inode
->i_ino
,
2504 transaction
->t_tid
);
2507 * First check whether inode isn't already on the transaction's
2508 * lists without taking the lock. Note that this check is safe
2509 * without the lock as we cannot race with somebody removing inode
2510 * from the transaction. The reason is that we remove inode from the
2511 * transaction only in journal_release_jbd_inode() and when we commit
2512 * the transaction. We are guarded from the first case by holding
2513 * a reference to the inode. We are safe against the second case
2514 * because if jinode->i_transaction == transaction, commit code
2515 * cannot touch the transaction because we hold reference to it,
2516 * and if jinode->i_next_transaction == transaction, commit code
2517 * will only file the inode where we want it.
2519 if ((jinode
->i_transaction
== transaction
||
2520 jinode
->i_next_transaction
== transaction
) &&
2521 (jinode
->i_flags
& flags
) == flags
)
2524 spin_lock(&journal
->j_list_lock
);
2525 jinode
->i_flags
|= flags
;
2526 /* Is inode already attached where we need it? */
2527 if (jinode
->i_transaction
== transaction
||
2528 jinode
->i_next_transaction
== transaction
)
2532 * We only ever set this variable to 1 so the test is safe. Since
2533 * t_need_data_flush is likely to be set, we do the test to save some
2534 * cacheline bouncing
2536 if (!transaction
->t_need_data_flush
)
2537 transaction
->t_need_data_flush
= 1;
2538 /* On some different transaction's list - should be
2539 * the committing one */
2540 if (jinode
->i_transaction
) {
2541 J_ASSERT(jinode
->i_next_transaction
== NULL
);
2542 J_ASSERT(jinode
->i_transaction
==
2543 journal
->j_committing_transaction
);
2544 jinode
->i_next_transaction
= transaction
;
2547 /* Not on any transaction list... */
2548 J_ASSERT(!jinode
->i_next_transaction
);
2549 jinode
->i_transaction
= transaction
;
2550 list_add(&jinode
->i_list
, &transaction
->t_inode_list
);
2552 spin_unlock(&journal
->j_list_lock
);
2557 int jbd2_journal_inode_add_write(handle_t
*handle
, struct jbd2_inode
*jinode
)
2559 return jbd2_journal_file_inode(handle
, jinode
,
2560 JI_WRITE_DATA
| JI_WAIT_DATA
);
2563 int jbd2_journal_inode_add_wait(handle_t
*handle
, struct jbd2_inode
*jinode
)
2565 return jbd2_journal_file_inode(handle
, jinode
, JI_WAIT_DATA
);
2569 * File truncate and transaction commit interact with each other in a
2570 * non-trivial way. If a transaction writing data block A is
2571 * committing, we cannot discard the data by truncate until we have
2572 * written them. Otherwise if we crashed after the transaction with
2573 * write has committed but before the transaction with truncate has
2574 * committed, we could see stale data in block A. This function is a
2575 * helper to solve this problem. It starts writeout of the truncated
2576 * part in case it is in the committing transaction.
2578 * Filesystem code must call this function when inode is journaled in
2579 * ordered mode before truncation happens and after the inode has been
2580 * placed on orphan list with the new inode size. The second condition
2581 * avoids the race that someone writes new data and we start
2582 * committing the transaction after this function has been called but
2583 * before a transaction for truncate is started (and furthermore it
2584 * allows us to optimize the case where the addition to orphan list
2585 * happens in the same transaction as write --- we don't have to write
2586 * any data in such case).
2588 int jbd2_journal_begin_ordered_truncate(journal_t
*journal
,
2589 struct jbd2_inode
*jinode
,
2592 transaction_t
*inode_trans
, *commit_trans
;
2595 /* This is a quick check to avoid locking if not necessary */
2596 if (!jinode
->i_transaction
)
2598 /* Locks are here just to force reading of recent values, it is
2599 * enough that the transaction was not committing before we started
2600 * a transaction adding the inode to orphan list */
2601 read_lock(&journal
->j_state_lock
);
2602 commit_trans
= journal
->j_committing_transaction
;
2603 read_unlock(&journal
->j_state_lock
);
2604 spin_lock(&journal
->j_list_lock
);
2605 inode_trans
= jinode
->i_transaction
;
2606 spin_unlock(&journal
->j_list_lock
);
2607 if (inode_trans
== commit_trans
) {
2608 ret
= filemap_fdatawrite_range(jinode
->i_vfs_inode
->i_mapping
,
2609 new_size
, LLONG_MAX
);
2611 jbd2_journal_abort(journal
, ret
);