Linux 3.16-rc2
[linux/fpc-iii.git] / fs / jbd2 / transaction.c
blob38cfcf5f6fce6127807da86c8e5d6a9be867fb98
1 /*
2 * linux/fs/jbd2/transaction.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Generic filesystem transaction handling code; part of the ext2fs
13 * journaling system.
15 * This file manages transactions (compound commits managed by the
16 * journaling code) and handles (individual atomic operations by the
17 * filesystem).
20 #include <linux/time.h>
21 #include <linux/fs.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/hrtimer.h>
29 #include <linux/backing-dev.h>
30 #include <linux/bug.h>
31 #include <linux/module.h>
33 #include <trace/events/jbd2.h>
35 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
36 static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
38 static struct kmem_cache *transaction_cache;
39 int __init jbd2_journal_init_transaction_cache(void)
41 J_ASSERT(!transaction_cache);
42 transaction_cache = kmem_cache_create("jbd2_transaction_s",
43 sizeof(transaction_t),
45 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
46 NULL);
47 if (transaction_cache)
48 return 0;
49 return -ENOMEM;
52 void jbd2_journal_destroy_transaction_cache(void)
54 if (transaction_cache) {
55 kmem_cache_destroy(transaction_cache);
56 transaction_cache = NULL;
60 void jbd2_journal_free_transaction(transaction_t *transaction)
62 if (unlikely(ZERO_OR_NULL_PTR(transaction)))
63 return;
64 kmem_cache_free(transaction_cache, transaction);
68 * jbd2_get_transaction: obtain a new transaction_t object.
70 * Simply allocate and initialise a new transaction. Create it in
71 * RUNNING state and add it to the current journal (which should not
72 * have an existing running transaction: we only make a new transaction
73 * once we have started to commit the old one).
75 * Preconditions:
76 * The journal MUST be locked. We don't perform atomic mallocs on the
77 * new transaction and we can't block without protecting against other
78 * processes trying to touch the journal while it is in transition.
82 static transaction_t *
83 jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
85 transaction->t_journal = journal;
86 transaction->t_state = T_RUNNING;
87 transaction->t_start_time = ktime_get();
88 transaction->t_tid = journal->j_transaction_sequence++;
89 transaction->t_expires = jiffies + journal->j_commit_interval;
90 spin_lock_init(&transaction->t_handle_lock);
91 atomic_set(&transaction->t_updates, 0);
92 atomic_set(&transaction->t_outstanding_credits,
93 atomic_read(&journal->j_reserved_credits));
94 atomic_set(&transaction->t_handle_count, 0);
95 INIT_LIST_HEAD(&transaction->t_inode_list);
96 INIT_LIST_HEAD(&transaction->t_private_list);
98 /* Set up the commit timer for the new transaction. */
99 journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
100 add_timer(&journal->j_commit_timer);
102 J_ASSERT(journal->j_running_transaction == NULL);
103 journal->j_running_transaction = transaction;
104 transaction->t_max_wait = 0;
105 transaction->t_start = jiffies;
106 transaction->t_requested = 0;
108 return transaction;
112 * Handle management.
114 * A handle_t is an object which represents a single atomic update to a
115 * filesystem, and which tracks all of the modifications which form part
116 * of that one update.
120 * Update transaction's maximum wait time, if debugging is enabled.
122 * In order for t_max_wait to be reliable, it must be protected by a
123 * lock. But doing so will mean that start_this_handle() can not be
124 * run in parallel on SMP systems, which limits our scalability. So
125 * unless debugging is enabled, we no longer update t_max_wait, which
126 * means that maximum wait time reported by the jbd2_run_stats
127 * tracepoint will always be zero.
129 static inline void update_t_max_wait(transaction_t *transaction,
130 unsigned long ts)
132 #ifdef CONFIG_JBD2_DEBUG
133 if (jbd2_journal_enable_debug &&
134 time_after(transaction->t_start, ts)) {
135 ts = jbd2_time_diff(ts, transaction->t_start);
136 spin_lock(&transaction->t_handle_lock);
137 if (ts > transaction->t_max_wait)
138 transaction->t_max_wait = ts;
139 spin_unlock(&transaction->t_handle_lock);
141 #endif
145 * Wait until running transaction passes T_LOCKED state. Also starts the commit
146 * if needed. The function expects running transaction to exist and releases
147 * j_state_lock.
149 static void wait_transaction_locked(journal_t *journal)
150 __releases(journal->j_state_lock)
152 DEFINE_WAIT(wait);
153 int need_to_start;
154 tid_t tid = journal->j_running_transaction->t_tid;
156 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
157 TASK_UNINTERRUPTIBLE);
158 need_to_start = !tid_geq(journal->j_commit_request, tid);
159 read_unlock(&journal->j_state_lock);
160 if (need_to_start)
161 jbd2_log_start_commit(journal, tid);
162 schedule();
163 finish_wait(&journal->j_wait_transaction_locked, &wait);
166 static void sub_reserved_credits(journal_t *journal, int blocks)
168 atomic_sub(blocks, &journal->j_reserved_credits);
169 wake_up(&journal->j_wait_reserved);
173 * Wait until we can add credits for handle to the running transaction. Called
174 * with j_state_lock held for reading. Returns 0 if handle joined the running
175 * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
176 * caller must retry.
178 static int add_transaction_credits(journal_t *journal, int blocks,
179 int rsv_blocks)
181 transaction_t *t = journal->j_running_transaction;
182 int needed;
183 int total = blocks + rsv_blocks;
186 * If the current transaction is locked down for commit, wait
187 * for the lock to be released.
189 if (t->t_state == T_LOCKED) {
190 wait_transaction_locked(journal);
191 return 1;
195 * If there is not enough space left in the log to write all
196 * potential buffers requested by this operation, we need to
197 * stall pending a log checkpoint to free some more log space.
199 needed = atomic_add_return(total, &t->t_outstanding_credits);
200 if (needed > journal->j_max_transaction_buffers) {
202 * If the current transaction is already too large,
203 * then start to commit it: we can then go back and
204 * attach this handle to a new transaction.
206 atomic_sub(total, &t->t_outstanding_credits);
207 wait_transaction_locked(journal);
208 return 1;
212 * The commit code assumes that it can get enough log space
213 * without forcing a checkpoint. This is *critical* for
214 * correctness: a checkpoint of a buffer which is also
215 * associated with a committing transaction creates a deadlock,
216 * so commit simply cannot force through checkpoints.
218 * We must therefore ensure the necessary space in the journal
219 * *before* starting to dirty potentially checkpointed buffers
220 * in the new transaction.
222 if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
223 atomic_sub(total, &t->t_outstanding_credits);
224 read_unlock(&journal->j_state_lock);
225 write_lock(&journal->j_state_lock);
226 if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
227 __jbd2_log_wait_for_space(journal);
228 write_unlock(&journal->j_state_lock);
229 return 1;
232 /* No reservation? We are done... */
233 if (!rsv_blocks)
234 return 0;
236 needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
237 /* We allow at most half of a transaction to be reserved */
238 if (needed > journal->j_max_transaction_buffers / 2) {
239 sub_reserved_credits(journal, rsv_blocks);
240 atomic_sub(total, &t->t_outstanding_credits);
241 read_unlock(&journal->j_state_lock);
242 wait_event(journal->j_wait_reserved,
243 atomic_read(&journal->j_reserved_credits) + rsv_blocks
244 <= journal->j_max_transaction_buffers / 2);
245 return 1;
247 return 0;
251 * start_this_handle: Given a handle, deal with any locking or stalling
252 * needed to make sure that there is enough journal space for the handle
253 * to begin. Attach the handle to a transaction and set up the
254 * transaction's buffer credits.
257 static int start_this_handle(journal_t *journal, handle_t *handle,
258 gfp_t gfp_mask)
260 transaction_t *transaction, *new_transaction = NULL;
261 int blocks = handle->h_buffer_credits;
262 int rsv_blocks = 0;
263 unsigned long ts = jiffies;
266 * 1/2 of transaction can be reserved so we can practically handle
267 * only 1/2 of maximum transaction size per operation
269 if (WARN_ON(blocks > journal->j_max_transaction_buffers / 2)) {
270 printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n",
271 current->comm, blocks,
272 journal->j_max_transaction_buffers / 2);
273 return -ENOSPC;
276 if (handle->h_rsv_handle)
277 rsv_blocks = handle->h_rsv_handle->h_buffer_credits;
279 alloc_transaction:
280 if (!journal->j_running_transaction) {
281 new_transaction = kmem_cache_zalloc(transaction_cache,
282 gfp_mask);
283 if (!new_transaction) {
285 * If __GFP_FS is not present, then we may be
286 * being called from inside the fs writeback
287 * layer, so we MUST NOT fail. Since
288 * __GFP_NOFAIL is going away, we will arrange
289 * to retry the allocation ourselves.
291 if ((gfp_mask & __GFP_FS) == 0) {
292 congestion_wait(BLK_RW_ASYNC, HZ/50);
293 goto alloc_transaction;
295 return -ENOMEM;
299 jbd_debug(3, "New handle %p going live.\n", handle);
302 * We need to hold j_state_lock until t_updates has been incremented,
303 * for proper journal barrier handling
305 repeat:
306 read_lock(&journal->j_state_lock);
307 BUG_ON(journal->j_flags & JBD2_UNMOUNT);
308 if (is_journal_aborted(journal) ||
309 (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
310 read_unlock(&journal->j_state_lock);
311 jbd2_journal_free_transaction(new_transaction);
312 return -EROFS;
316 * Wait on the journal's transaction barrier if necessary. Specifically
317 * we allow reserved handles to proceed because otherwise commit could
318 * deadlock on page writeback not being able to complete.
320 if (!handle->h_reserved && journal->j_barrier_count) {
321 read_unlock(&journal->j_state_lock);
322 wait_event(journal->j_wait_transaction_locked,
323 journal->j_barrier_count == 0);
324 goto repeat;
327 if (!journal->j_running_transaction) {
328 read_unlock(&journal->j_state_lock);
329 if (!new_transaction)
330 goto alloc_transaction;
331 write_lock(&journal->j_state_lock);
332 if (!journal->j_running_transaction &&
333 (handle->h_reserved || !journal->j_barrier_count)) {
334 jbd2_get_transaction(journal, new_transaction);
335 new_transaction = NULL;
337 write_unlock(&journal->j_state_lock);
338 goto repeat;
341 transaction = journal->j_running_transaction;
343 if (!handle->h_reserved) {
344 /* We may have dropped j_state_lock - restart in that case */
345 if (add_transaction_credits(journal, blocks, rsv_blocks))
346 goto repeat;
347 } else {
349 * We have handle reserved so we are allowed to join T_LOCKED
350 * transaction and we don't have to check for transaction size
351 * and journal space.
353 sub_reserved_credits(journal, blocks);
354 handle->h_reserved = 0;
357 /* OK, account for the buffers that this operation expects to
358 * use and add the handle to the running transaction.
360 update_t_max_wait(transaction, ts);
361 handle->h_transaction = transaction;
362 handle->h_requested_credits = blocks;
363 handle->h_start_jiffies = jiffies;
364 atomic_inc(&transaction->t_updates);
365 atomic_inc(&transaction->t_handle_count);
366 jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
367 handle, blocks,
368 atomic_read(&transaction->t_outstanding_credits),
369 jbd2_log_space_left(journal));
370 read_unlock(&journal->j_state_lock);
371 current->journal_info = handle;
373 lock_map_acquire(&handle->h_lockdep_map);
374 jbd2_journal_free_transaction(new_transaction);
375 return 0;
378 static struct lock_class_key jbd2_handle_key;
380 /* Allocate a new handle. This should probably be in a slab... */
381 static handle_t *new_handle(int nblocks)
383 handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
384 if (!handle)
385 return NULL;
386 handle->h_buffer_credits = nblocks;
387 handle->h_ref = 1;
389 lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle",
390 &jbd2_handle_key, 0);
392 return handle;
396 * handle_t *jbd2_journal_start() - Obtain a new handle.
397 * @journal: Journal to start transaction on.
398 * @nblocks: number of block buffer we might modify
400 * We make sure that the transaction can guarantee at least nblocks of
401 * modified buffers in the log. We block until the log can guarantee
402 * that much space. Additionally, if rsv_blocks > 0, we also create another
403 * handle with rsv_blocks reserved blocks in the journal. This handle is
404 * is stored in h_rsv_handle. It is not attached to any particular transaction
405 * and thus doesn't block transaction commit. If the caller uses this reserved
406 * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
407 * on the parent handle will dispose the reserved one. Reserved handle has to
408 * be converted to a normal handle using jbd2_journal_start_reserved() before
409 * it can be used.
411 * Return a pointer to a newly allocated handle, or an ERR_PTR() value
412 * on failure.
414 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
415 gfp_t gfp_mask, unsigned int type,
416 unsigned int line_no)
418 handle_t *handle = journal_current_handle();
419 int err;
421 if (!journal)
422 return ERR_PTR(-EROFS);
424 if (handle) {
425 J_ASSERT(handle->h_transaction->t_journal == journal);
426 handle->h_ref++;
427 return handle;
430 handle = new_handle(nblocks);
431 if (!handle)
432 return ERR_PTR(-ENOMEM);
433 if (rsv_blocks) {
434 handle_t *rsv_handle;
436 rsv_handle = new_handle(rsv_blocks);
437 if (!rsv_handle) {
438 jbd2_free_handle(handle);
439 return ERR_PTR(-ENOMEM);
441 rsv_handle->h_reserved = 1;
442 rsv_handle->h_journal = journal;
443 handle->h_rsv_handle = rsv_handle;
446 err = start_this_handle(journal, handle, gfp_mask);
447 if (err < 0) {
448 if (handle->h_rsv_handle)
449 jbd2_free_handle(handle->h_rsv_handle);
450 jbd2_free_handle(handle);
451 return ERR_PTR(err);
453 handle->h_type = type;
454 handle->h_line_no = line_no;
455 trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
456 handle->h_transaction->t_tid, type,
457 line_no, nblocks);
458 return handle;
460 EXPORT_SYMBOL(jbd2__journal_start);
463 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
465 return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0);
467 EXPORT_SYMBOL(jbd2_journal_start);
469 void jbd2_journal_free_reserved(handle_t *handle)
471 journal_t *journal = handle->h_journal;
473 WARN_ON(!handle->h_reserved);
474 sub_reserved_credits(journal, handle->h_buffer_credits);
475 jbd2_free_handle(handle);
477 EXPORT_SYMBOL(jbd2_journal_free_reserved);
480 * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle
481 * @handle: handle to start
483 * Start handle that has been previously reserved with jbd2_journal_reserve().
484 * This attaches @handle to the running transaction (or creates one if there's
485 * not transaction running). Unlike jbd2_journal_start() this function cannot
486 * block on journal commit, checkpointing, or similar stuff. It can block on
487 * memory allocation or frozen journal though.
489 * Return 0 on success, non-zero on error - handle is freed in that case.
491 int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
492 unsigned int line_no)
494 journal_t *journal = handle->h_journal;
495 int ret = -EIO;
497 if (WARN_ON(!handle->h_reserved)) {
498 /* Someone passed in normal handle? Just stop it. */
499 jbd2_journal_stop(handle);
500 return ret;
503 * Usefulness of mixing of reserved and unreserved handles is
504 * questionable. So far nobody seems to need it so just error out.
506 if (WARN_ON(current->journal_info)) {
507 jbd2_journal_free_reserved(handle);
508 return ret;
511 handle->h_journal = NULL;
513 * GFP_NOFS is here because callers are likely from writeback or
514 * similarly constrained call sites
516 ret = start_this_handle(journal, handle, GFP_NOFS);
517 if (ret < 0) {
518 jbd2_journal_free_reserved(handle);
519 return ret;
521 handle->h_type = type;
522 handle->h_line_no = line_no;
523 return 0;
525 EXPORT_SYMBOL(jbd2_journal_start_reserved);
528 * int jbd2_journal_extend() - extend buffer credits.
529 * @handle: handle to 'extend'
530 * @nblocks: nr blocks to try to extend by.
532 * Some transactions, such as large extends and truncates, can be done
533 * atomically all at once or in several stages. The operation requests
534 * a credit for a number of buffer modications in advance, but can
535 * extend its credit if it needs more.
537 * jbd2_journal_extend tries to give the running handle more buffer credits.
538 * It does not guarantee that allocation - this is a best-effort only.
539 * The calling process MUST be able to deal cleanly with a failure to
540 * extend here.
542 * Return 0 on success, non-zero on failure.
544 * return code < 0 implies an error
545 * return code > 0 implies normal transaction-full status.
547 int jbd2_journal_extend(handle_t *handle, int nblocks)
549 transaction_t *transaction = handle->h_transaction;
550 journal_t *journal;
551 int result;
552 int wanted;
554 WARN_ON(!transaction);
555 if (is_handle_aborted(handle))
556 return -EROFS;
557 journal = transaction->t_journal;
559 result = 1;
561 read_lock(&journal->j_state_lock);
563 /* Don't extend a locked-down transaction! */
564 if (transaction->t_state != T_RUNNING) {
565 jbd_debug(3, "denied handle %p %d blocks: "
566 "transaction not running\n", handle, nblocks);
567 goto error_out;
570 spin_lock(&transaction->t_handle_lock);
571 wanted = atomic_add_return(nblocks,
572 &transaction->t_outstanding_credits);
574 if (wanted > journal->j_max_transaction_buffers) {
575 jbd_debug(3, "denied handle %p %d blocks: "
576 "transaction too large\n", handle, nblocks);
577 atomic_sub(nblocks, &transaction->t_outstanding_credits);
578 goto unlock;
581 if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) >
582 jbd2_log_space_left(journal)) {
583 jbd_debug(3, "denied handle %p %d blocks: "
584 "insufficient log space\n", handle, nblocks);
585 atomic_sub(nblocks, &transaction->t_outstanding_credits);
586 goto unlock;
589 trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
590 transaction->t_tid,
591 handle->h_type, handle->h_line_no,
592 handle->h_buffer_credits,
593 nblocks);
595 handle->h_buffer_credits += nblocks;
596 handle->h_requested_credits += nblocks;
597 result = 0;
599 jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
600 unlock:
601 spin_unlock(&transaction->t_handle_lock);
602 error_out:
603 read_unlock(&journal->j_state_lock);
604 return result;
609 * int jbd2_journal_restart() - restart a handle .
610 * @handle: handle to restart
611 * @nblocks: nr credits requested
613 * Restart a handle for a multi-transaction filesystem
614 * operation.
616 * If the jbd2_journal_extend() call above fails to grant new buffer credits
617 * to a running handle, a call to jbd2_journal_restart will commit the
618 * handle's transaction so far and reattach the handle to a new
619 * transaction capabable of guaranteeing the requested number of
620 * credits. We preserve reserved handle if there's any attached to the
621 * passed in handle.
623 int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
625 transaction_t *transaction = handle->h_transaction;
626 journal_t *journal;
627 tid_t tid;
628 int need_to_start, ret;
630 WARN_ON(!transaction);
631 /* If we've had an abort of any type, don't even think about
632 * actually doing the restart! */
633 if (is_handle_aborted(handle))
634 return 0;
635 journal = transaction->t_journal;
638 * First unlink the handle from its current transaction, and start the
639 * commit on that.
641 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
642 J_ASSERT(journal_current_handle() == handle);
644 read_lock(&journal->j_state_lock);
645 spin_lock(&transaction->t_handle_lock);
646 atomic_sub(handle->h_buffer_credits,
647 &transaction->t_outstanding_credits);
648 if (handle->h_rsv_handle) {
649 sub_reserved_credits(journal,
650 handle->h_rsv_handle->h_buffer_credits);
652 if (atomic_dec_and_test(&transaction->t_updates))
653 wake_up(&journal->j_wait_updates);
654 tid = transaction->t_tid;
655 spin_unlock(&transaction->t_handle_lock);
656 handle->h_transaction = NULL;
657 current->journal_info = NULL;
659 jbd_debug(2, "restarting handle %p\n", handle);
660 need_to_start = !tid_geq(journal->j_commit_request, tid);
661 read_unlock(&journal->j_state_lock);
662 if (need_to_start)
663 jbd2_log_start_commit(journal, tid);
665 lock_map_release(&handle->h_lockdep_map);
666 handle->h_buffer_credits = nblocks;
667 ret = start_this_handle(journal, handle, gfp_mask);
668 return ret;
670 EXPORT_SYMBOL(jbd2__journal_restart);
673 int jbd2_journal_restart(handle_t *handle, int nblocks)
675 return jbd2__journal_restart(handle, nblocks, GFP_NOFS);
677 EXPORT_SYMBOL(jbd2_journal_restart);
680 * void jbd2_journal_lock_updates () - establish a transaction barrier.
681 * @journal: Journal to establish a barrier on.
683 * This locks out any further updates from being started, and blocks
684 * until all existing updates have completed, returning only once the
685 * journal is in a quiescent state with no updates running.
687 * The journal lock should not be held on entry.
689 void jbd2_journal_lock_updates(journal_t *journal)
691 DEFINE_WAIT(wait);
693 write_lock(&journal->j_state_lock);
694 ++journal->j_barrier_count;
696 /* Wait until there are no reserved handles */
697 if (atomic_read(&journal->j_reserved_credits)) {
698 write_unlock(&journal->j_state_lock);
699 wait_event(journal->j_wait_reserved,
700 atomic_read(&journal->j_reserved_credits) == 0);
701 write_lock(&journal->j_state_lock);
704 /* Wait until there are no running updates */
705 while (1) {
706 transaction_t *transaction = journal->j_running_transaction;
708 if (!transaction)
709 break;
711 spin_lock(&transaction->t_handle_lock);
712 prepare_to_wait(&journal->j_wait_updates, &wait,
713 TASK_UNINTERRUPTIBLE);
714 if (!atomic_read(&transaction->t_updates)) {
715 spin_unlock(&transaction->t_handle_lock);
716 finish_wait(&journal->j_wait_updates, &wait);
717 break;
719 spin_unlock(&transaction->t_handle_lock);
720 write_unlock(&journal->j_state_lock);
721 schedule();
722 finish_wait(&journal->j_wait_updates, &wait);
723 write_lock(&journal->j_state_lock);
725 write_unlock(&journal->j_state_lock);
728 * We have now established a barrier against other normal updates, but
729 * we also need to barrier against other jbd2_journal_lock_updates() calls
730 * to make sure that we serialise special journal-locked operations
731 * too.
733 mutex_lock(&journal->j_barrier);
737 * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
738 * @journal: Journal to release the barrier on.
740 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
742 * Should be called without the journal lock held.
744 void jbd2_journal_unlock_updates (journal_t *journal)
746 J_ASSERT(journal->j_barrier_count != 0);
748 mutex_unlock(&journal->j_barrier);
749 write_lock(&journal->j_state_lock);
750 --journal->j_barrier_count;
751 write_unlock(&journal->j_state_lock);
752 wake_up(&journal->j_wait_transaction_locked);
755 static void warn_dirty_buffer(struct buffer_head *bh)
757 char b[BDEVNAME_SIZE];
759 printk(KERN_WARNING
760 "JBD2: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
761 "There's a risk of filesystem corruption in case of system "
762 "crash.\n",
763 bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
766 static int sleep_on_shadow_bh(void *word)
768 io_schedule();
769 return 0;
773 * If the buffer is already part of the current transaction, then there
774 * is nothing we need to do. If it is already part of a prior
775 * transaction which we are still committing to disk, then we need to
776 * make sure that we do not overwrite the old copy: we do copy-out to
777 * preserve the copy going to disk. We also account the buffer against
778 * the handle's metadata buffer credits (unless the buffer is already
779 * part of the transaction, that is).
782 static int
783 do_get_write_access(handle_t *handle, struct journal_head *jh,
784 int force_copy)
786 struct buffer_head *bh;
787 transaction_t *transaction = handle->h_transaction;
788 journal_t *journal;
789 int error;
790 char *frozen_buffer = NULL;
791 int need_copy = 0;
792 unsigned long start_lock, time_lock;
794 WARN_ON(!transaction);
795 if (is_handle_aborted(handle))
796 return -EROFS;
797 journal = transaction->t_journal;
799 jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
801 JBUFFER_TRACE(jh, "entry");
802 repeat:
803 bh = jh2bh(jh);
805 /* @@@ Need to check for errors here at some point. */
807 start_lock = jiffies;
808 lock_buffer(bh);
809 jbd_lock_bh_state(bh);
811 /* If it takes too long to lock the buffer, trace it */
812 time_lock = jbd2_time_diff(start_lock, jiffies);
813 if (time_lock > HZ/10)
814 trace_jbd2_lock_buffer_stall(bh->b_bdev->bd_dev,
815 jiffies_to_msecs(time_lock));
817 /* We now hold the buffer lock so it is safe to query the buffer
818 * state. Is the buffer dirty?
820 * If so, there are two possibilities. The buffer may be
821 * non-journaled, and undergoing a quite legitimate writeback.
822 * Otherwise, it is journaled, and we don't expect dirty buffers
823 * in that state (the buffers should be marked JBD_Dirty
824 * instead.) So either the IO is being done under our own
825 * control and this is a bug, or it's a third party IO such as
826 * dump(8) (which may leave the buffer scheduled for read ---
827 * ie. locked but not dirty) or tune2fs (which may actually have
828 * the buffer dirtied, ugh.) */
830 if (buffer_dirty(bh)) {
832 * First question: is this buffer already part of the current
833 * transaction or the existing committing transaction?
835 if (jh->b_transaction) {
836 J_ASSERT_JH(jh,
837 jh->b_transaction == transaction ||
838 jh->b_transaction ==
839 journal->j_committing_transaction);
840 if (jh->b_next_transaction)
841 J_ASSERT_JH(jh, jh->b_next_transaction ==
842 transaction);
843 warn_dirty_buffer(bh);
846 * In any case we need to clean the dirty flag and we must
847 * do it under the buffer lock to be sure we don't race
848 * with running write-out.
850 JBUFFER_TRACE(jh, "Journalling dirty buffer");
851 clear_buffer_dirty(bh);
852 set_buffer_jbddirty(bh);
855 unlock_buffer(bh);
857 error = -EROFS;
858 if (is_handle_aborted(handle)) {
859 jbd_unlock_bh_state(bh);
860 goto out;
862 error = 0;
865 * The buffer is already part of this transaction if b_transaction or
866 * b_next_transaction points to it
868 if (jh->b_transaction == transaction ||
869 jh->b_next_transaction == transaction)
870 goto done;
873 * this is the first time this transaction is touching this buffer,
874 * reset the modified flag
876 jh->b_modified = 0;
879 * If there is already a copy-out version of this buffer, then we don't
880 * need to make another one
882 if (jh->b_frozen_data) {
883 JBUFFER_TRACE(jh, "has frozen data");
884 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
885 jh->b_next_transaction = transaction;
886 goto done;
889 /* Is there data here we need to preserve? */
891 if (jh->b_transaction && jh->b_transaction != transaction) {
892 JBUFFER_TRACE(jh, "owned by older transaction");
893 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
894 J_ASSERT_JH(jh, jh->b_transaction ==
895 journal->j_committing_transaction);
897 /* There is one case we have to be very careful about.
898 * If the committing transaction is currently writing
899 * this buffer out to disk and has NOT made a copy-out,
900 * then we cannot modify the buffer contents at all
901 * right now. The essence of copy-out is that it is the
902 * extra copy, not the primary copy, which gets
903 * journaled. If the primary copy is already going to
904 * disk then we cannot do copy-out here. */
906 if (buffer_shadow(bh)) {
907 JBUFFER_TRACE(jh, "on shadow: sleep");
908 jbd_unlock_bh_state(bh);
909 wait_on_bit(&bh->b_state, BH_Shadow,
910 sleep_on_shadow_bh, TASK_UNINTERRUPTIBLE);
911 goto repeat;
915 * Only do the copy if the currently-owning transaction still
916 * needs it. If buffer isn't on BJ_Metadata list, the
917 * committing transaction is past that stage (here we use the
918 * fact that BH_Shadow is set under bh_state lock together with
919 * refiling to BJ_Shadow list and at this point we know the
920 * buffer doesn't have BH_Shadow set).
922 * Subtle point, though: if this is a get_undo_access,
923 * then we will be relying on the frozen_data to contain
924 * the new value of the committed_data record after the
925 * transaction, so we HAVE to force the frozen_data copy
926 * in that case.
928 if (jh->b_jlist == BJ_Metadata || force_copy) {
929 JBUFFER_TRACE(jh, "generate frozen data");
930 if (!frozen_buffer) {
931 JBUFFER_TRACE(jh, "allocate memory for buffer");
932 jbd_unlock_bh_state(bh);
933 frozen_buffer =
934 jbd2_alloc(jh2bh(jh)->b_size,
935 GFP_NOFS);
936 if (!frozen_buffer) {
937 printk(KERN_ERR
938 "%s: OOM for frozen_buffer\n",
939 __func__);
940 JBUFFER_TRACE(jh, "oom!");
941 error = -ENOMEM;
942 jbd_lock_bh_state(bh);
943 goto done;
945 goto repeat;
947 jh->b_frozen_data = frozen_buffer;
948 frozen_buffer = NULL;
949 need_copy = 1;
951 jh->b_next_transaction = transaction;
956 * Finally, if the buffer is not journaled right now, we need to make
957 * sure it doesn't get written to disk before the caller actually
958 * commits the new data
960 if (!jh->b_transaction) {
961 JBUFFER_TRACE(jh, "no transaction");
962 J_ASSERT_JH(jh, !jh->b_next_transaction);
963 JBUFFER_TRACE(jh, "file as BJ_Reserved");
964 spin_lock(&journal->j_list_lock);
965 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
966 spin_unlock(&journal->j_list_lock);
969 done:
970 if (need_copy) {
971 struct page *page;
972 int offset;
973 char *source;
975 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
976 "Possible IO failure.\n");
977 page = jh2bh(jh)->b_page;
978 offset = offset_in_page(jh2bh(jh)->b_data);
979 source = kmap_atomic(page);
980 /* Fire data frozen trigger just before we copy the data */
981 jbd2_buffer_frozen_trigger(jh, source + offset,
982 jh->b_triggers);
983 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
984 kunmap_atomic(source);
987 * Now that the frozen data is saved off, we need to store
988 * any matching triggers.
990 jh->b_frozen_triggers = jh->b_triggers;
992 jbd_unlock_bh_state(bh);
995 * If we are about to journal a buffer, then any revoke pending on it is
996 * no longer valid
998 jbd2_journal_cancel_revoke(handle, jh);
1000 out:
1001 if (unlikely(frozen_buffer)) /* It's usually NULL */
1002 jbd2_free(frozen_buffer, bh->b_size);
1004 JBUFFER_TRACE(jh, "exit");
1005 return error;
1009 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
1010 * @handle: transaction to add buffer modifications to
1011 * @bh: bh to be used for metadata writes
1013 * Returns an error code or 0 on success.
1015 * In full data journalling mode the buffer may be of type BJ_AsyncData,
1016 * because we're write()ing a buffer which is also part of a shared mapping.
1019 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
1021 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
1022 int rc;
1024 /* We do not want to get caught playing with fields which the
1025 * log thread also manipulates. Make sure that the buffer
1026 * completes any outstanding IO before proceeding. */
1027 rc = do_get_write_access(handle, jh, 0);
1028 jbd2_journal_put_journal_head(jh);
1029 return rc;
1034 * When the user wants to journal a newly created buffer_head
1035 * (ie. getblk() returned a new buffer and we are going to populate it
1036 * manually rather than reading off disk), then we need to keep the
1037 * buffer_head locked until it has been completely filled with new
1038 * data. In this case, we should be able to make the assertion that
1039 * the bh is not already part of an existing transaction.
1041 * The buffer should already be locked by the caller by this point.
1042 * There is no lock ranking violation: it was a newly created,
1043 * unlocked buffer beforehand. */
1046 * int jbd2_journal_get_create_access () - notify intent to use newly created bh
1047 * @handle: transaction to new buffer to
1048 * @bh: new buffer.
1050 * Call this if you create a new bh.
1052 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
1054 transaction_t *transaction = handle->h_transaction;
1055 journal_t *journal;
1056 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
1057 int err;
1059 jbd_debug(5, "journal_head %p\n", jh);
1060 WARN_ON(!transaction);
1061 err = -EROFS;
1062 if (is_handle_aborted(handle))
1063 goto out;
1064 journal = transaction->t_journal;
1065 err = 0;
1067 JBUFFER_TRACE(jh, "entry");
1069 * The buffer may already belong to this transaction due to pre-zeroing
1070 * in the filesystem's new_block code. It may also be on the previous,
1071 * committing transaction's lists, but it HAS to be in Forget state in
1072 * that case: the transaction must have deleted the buffer for it to be
1073 * reused here.
1075 jbd_lock_bh_state(bh);
1076 J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
1077 jh->b_transaction == NULL ||
1078 (jh->b_transaction == journal->j_committing_transaction &&
1079 jh->b_jlist == BJ_Forget)));
1081 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1082 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
1084 if (jh->b_transaction == NULL) {
1086 * Previous jbd2_journal_forget() could have left the buffer
1087 * with jbddirty bit set because it was being committed. When
1088 * the commit finished, we've filed the buffer for
1089 * checkpointing and marked it dirty. Now we are reallocating
1090 * the buffer so the transaction freeing it must have
1091 * committed and so it's safe to clear the dirty bit.
1093 clear_buffer_dirty(jh2bh(jh));
1094 /* first access by this transaction */
1095 jh->b_modified = 0;
1097 JBUFFER_TRACE(jh, "file as BJ_Reserved");
1098 spin_lock(&journal->j_list_lock);
1099 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1100 } else if (jh->b_transaction == journal->j_committing_transaction) {
1101 /* first access by this transaction */
1102 jh->b_modified = 0;
1104 JBUFFER_TRACE(jh, "set next transaction");
1105 spin_lock(&journal->j_list_lock);
1106 jh->b_next_transaction = transaction;
1108 spin_unlock(&journal->j_list_lock);
1109 jbd_unlock_bh_state(bh);
1112 * akpm: I added this. ext3_alloc_branch can pick up new indirect
1113 * blocks which contain freed but then revoked metadata. We need
1114 * to cancel the revoke in case we end up freeing it yet again
1115 * and the reallocating as data - this would cause a second revoke,
1116 * which hits an assertion error.
1118 JBUFFER_TRACE(jh, "cancelling revoke");
1119 jbd2_journal_cancel_revoke(handle, jh);
1120 out:
1121 jbd2_journal_put_journal_head(jh);
1122 return err;
1126 * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
1127 * non-rewindable consequences
1128 * @handle: transaction
1129 * @bh: buffer to undo
1131 * Sometimes there is a need to distinguish between metadata which has
1132 * been committed to disk and that which has not. The ext3fs code uses
1133 * this for freeing and allocating space, we have to make sure that we
1134 * do not reuse freed space until the deallocation has been committed,
1135 * since if we overwrote that space we would make the delete
1136 * un-rewindable in case of a crash.
1138 * To deal with that, jbd2_journal_get_undo_access requests write access to a
1139 * buffer for parts of non-rewindable operations such as delete
1140 * operations on the bitmaps. The journaling code must keep a copy of
1141 * the buffer's contents prior to the undo_access call until such time
1142 * as we know that the buffer has definitely been committed to disk.
1144 * We never need to know which transaction the committed data is part
1145 * of, buffers touched here are guaranteed to be dirtied later and so
1146 * will be committed to a new transaction in due course, at which point
1147 * we can discard the old committed data pointer.
1149 * Returns error number or 0 on success.
1151 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
1153 int err;
1154 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
1155 char *committed_data = NULL;
1157 JBUFFER_TRACE(jh, "entry");
1160 * Do this first --- it can drop the journal lock, so we want to
1161 * make sure that obtaining the committed_data is done
1162 * atomically wrt. completion of any outstanding commits.
1164 err = do_get_write_access(handle, jh, 1);
1165 if (err)
1166 goto out;
1168 repeat:
1169 if (!jh->b_committed_data) {
1170 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
1171 if (!committed_data) {
1172 printk(KERN_ERR "%s: No memory for committed data\n",
1173 __func__);
1174 err = -ENOMEM;
1175 goto out;
1179 jbd_lock_bh_state(bh);
1180 if (!jh->b_committed_data) {
1181 /* Copy out the current buffer contents into the
1182 * preserved, committed copy. */
1183 JBUFFER_TRACE(jh, "generate b_committed data");
1184 if (!committed_data) {
1185 jbd_unlock_bh_state(bh);
1186 goto repeat;
1189 jh->b_committed_data = committed_data;
1190 committed_data = NULL;
1191 memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
1193 jbd_unlock_bh_state(bh);
1194 out:
1195 jbd2_journal_put_journal_head(jh);
1196 if (unlikely(committed_data))
1197 jbd2_free(committed_data, bh->b_size);
1198 return err;
1202 * void jbd2_journal_set_triggers() - Add triggers for commit writeout
1203 * @bh: buffer to trigger on
1204 * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1206 * Set any triggers on this journal_head. This is always safe, because
1207 * triggers for a committing buffer will be saved off, and triggers for
1208 * a running transaction will match the buffer in that transaction.
1210 * Call with NULL to clear the triggers.
1212 void jbd2_journal_set_triggers(struct buffer_head *bh,
1213 struct jbd2_buffer_trigger_type *type)
1215 struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
1217 if (WARN_ON(!jh))
1218 return;
1219 jh->b_triggers = type;
1220 jbd2_journal_put_journal_head(jh);
1223 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
1224 struct jbd2_buffer_trigger_type *triggers)
1226 struct buffer_head *bh = jh2bh(jh);
1228 if (!triggers || !triggers->t_frozen)
1229 return;
1231 triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
1234 void jbd2_buffer_abort_trigger(struct journal_head *jh,
1235 struct jbd2_buffer_trigger_type *triggers)
1237 if (!triggers || !triggers->t_abort)
1238 return;
1240 triggers->t_abort(triggers, jh2bh(jh));
1246 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
1247 * @handle: transaction to add buffer to.
1248 * @bh: buffer to mark
1250 * mark dirty metadata which needs to be journaled as part of the current
1251 * transaction.
1253 * The buffer must have previously had jbd2_journal_get_write_access()
1254 * called so that it has a valid journal_head attached to the buffer
1255 * head.
1257 * The buffer is placed on the transaction's metadata list and is marked
1258 * as belonging to the transaction.
1260 * Returns error number or 0 on success.
1262 * Special care needs to be taken if the buffer already belongs to the
1263 * current committing transaction (in which case we should have frozen
1264 * data present for that commit). In that case, we don't relink the
1265 * buffer: that only gets done when the old transaction finally
1266 * completes its commit.
1268 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1270 transaction_t *transaction = handle->h_transaction;
1271 journal_t *journal;
1272 struct journal_head *jh;
1273 int ret = 0;
1275 WARN_ON(!transaction);
1276 if (is_handle_aborted(handle))
1277 return -EROFS;
1278 journal = transaction->t_journal;
1279 jh = jbd2_journal_grab_journal_head(bh);
1280 if (!jh) {
1281 ret = -EUCLEAN;
1282 goto out;
1284 jbd_debug(5, "journal_head %p\n", jh);
1285 JBUFFER_TRACE(jh, "entry");
1287 jbd_lock_bh_state(bh);
1289 if (jh->b_modified == 0) {
1291 * This buffer's got modified and becoming part
1292 * of the transaction. This needs to be done
1293 * once a transaction -bzzz
1295 jh->b_modified = 1;
1296 if (handle->h_buffer_credits <= 0) {
1297 ret = -ENOSPC;
1298 goto out_unlock_bh;
1300 handle->h_buffer_credits--;
1304 * fastpath, to avoid expensive locking. If this buffer is already
1305 * on the running transaction's metadata list there is nothing to do.
1306 * Nobody can take it off again because there is a handle open.
1307 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1308 * result in this test being false, so we go in and take the locks.
1310 if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1311 JBUFFER_TRACE(jh, "fastpath");
1312 if (unlikely(jh->b_transaction !=
1313 journal->j_running_transaction)) {
1314 printk(KERN_ERR "JBD2: %s: "
1315 "jh->b_transaction (%llu, %p, %u) != "
1316 "journal->j_running_transaction (%p, %u)\n",
1317 journal->j_devname,
1318 (unsigned long long) bh->b_blocknr,
1319 jh->b_transaction,
1320 jh->b_transaction ? jh->b_transaction->t_tid : 0,
1321 journal->j_running_transaction,
1322 journal->j_running_transaction ?
1323 journal->j_running_transaction->t_tid : 0);
1324 ret = -EINVAL;
1326 goto out_unlock_bh;
1329 set_buffer_jbddirty(bh);
1332 * Metadata already on the current transaction list doesn't
1333 * need to be filed. Metadata on another transaction's list must
1334 * be committing, and will be refiled once the commit completes:
1335 * leave it alone for now.
1337 if (jh->b_transaction != transaction) {
1338 JBUFFER_TRACE(jh, "already on other transaction");
1339 if (unlikely(((jh->b_transaction !=
1340 journal->j_committing_transaction)) ||
1341 (jh->b_next_transaction != transaction))) {
1342 printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: "
1343 "bad jh for block %llu: "
1344 "transaction (%p, %u), "
1345 "jh->b_transaction (%p, %u), "
1346 "jh->b_next_transaction (%p, %u), jlist %u\n",
1347 journal->j_devname,
1348 (unsigned long long) bh->b_blocknr,
1349 transaction, transaction->t_tid,
1350 jh->b_transaction,
1351 jh->b_transaction ?
1352 jh->b_transaction->t_tid : 0,
1353 jh->b_next_transaction,
1354 jh->b_next_transaction ?
1355 jh->b_next_transaction->t_tid : 0,
1356 jh->b_jlist);
1357 WARN_ON(1);
1358 ret = -EINVAL;
1360 /* And this case is illegal: we can't reuse another
1361 * transaction's data buffer, ever. */
1362 goto out_unlock_bh;
1365 /* That test should have eliminated the following case: */
1366 J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1368 JBUFFER_TRACE(jh, "file as BJ_Metadata");
1369 spin_lock(&journal->j_list_lock);
1370 __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
1371 spin_unlock(&journal->j_list_lock);
1372 out_unlock_bh:
1373 jbd_unlock_bh_state(bh);
1374 jbd2_journal_put_journal_head(jh);
1375 out:
1376 JBUFFER_TRACE(jh, "exit");
1377 return ret;
1381 * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1382 * @handle: transaction handle
1383 * @bh: bh to 'forget'
1385 * We can only do the bforget if there are no commits pending against the
1386 * buffer. If the buffer is dirty in the current running transaction we
1387 * can safely unlink it.
1389 * bh may not be a journalled buffer at all - it may be a non-JBD
1390 * buffer which came off the hashtable. Check for this.
1392 * Decrements bh->b_count by one.
1394 * Allow this call even if the handle has aborted --- it may be part of
1395 * the caller's cleanup after an abort.
1397 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1399 transaction_t *transaction = handle->h_transaction;
1400 journal_t *journal;
1401 struct journal_head *jh;
1402 int drop_reserve = 0;
1403 int err = 0;
1404 int was_modified = 0;
1406 WARN_ON(!transaction);
1407 if (is_handle_aborted(handle))
1408 return -EROFS;
1409 journal = transaction->t_journal;
1411 BUFFER_TRACE(bh, "entry");
1413 jbd_lock_bh_state(bh);
1415 if (!buffer_jbd(bh))
1416 goto not_jbd;
1417 jh = bh2jh(bh);
1419 /* Critical error: attempting to delete a bitmap buffer, maybe?
1420 * Don't do any jbd operations, and return an error. */
1421 if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1422 "inconsistent data on disk")) {
1423 err = -EIO;
1424 goto not_jbd;
1427 /* keep track of whether or not this transaction modified us */
1428 was_modified = jh->b_modified;
1431 * The buffer's going from the transaction, we must drop
1432 * all references -bzzz
1434 jh->b_modified = 0;
1436 if (jh->b_transaction == transaction) {
1437 J_ASSERT_JH(jh, !jh->b_frozen_data);
1439 /* If we are forgetting a buffer which is already part
1440 * of this transaction, then we can just drop it from
1441 * the transaction immediately. */
1442 clear_buffer_dirty(bh);
1443 clear_buffer_jbddirty(bh);
1445 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1448 * we only want to drop a reference if this transaction
1449 * modified the buffer
1451 if (was_modified)
1452 drop_reserve = 1;
1455 * We are no longer going to journal this buffer.
1456 * However, the commit of this transaction is still
1457 * important to the buffer: the delete that we are now
1458 * processing might obsolete an old log entry, so by
1459 * committing, we can satisfy the buffer's checkpoint.
1461 * So, if we have a checkpoint on the buffer, we should
1462 * now refile the buffer on our BJ_Forget list so that
1463 * we know to remove the checkpoint after we commit.
1466 spin_lock(&journal->j_list_lock);
1467 if (jh->b_cp_transaction) {
1468 __jbd2_journal_temp_unlink_buffer(jh);
1469 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1470 } else {
1471 __jbd2_journal_unfile_buffer(jh);
1472 if (!buffer_jbd(bh)) {
1473 spin_unlock(&journal->j_list_lock);
1474 jbd_unlock_bh_state(bh);
1475 __bforget(bh);
1476 goto drop;
1479 spin_unlock(&journal->j_list_lock);
1480 } else if (jh->b_transaction) {
1481 J_ASSERT_JH(jh, (jh->b_transaction ==
1482 journal->j_committing_transaction));
1483 /* However, if the buffer is still owned by a prior
1484 * (committing) transaction, we can't drop it yet... */
1485 JBUFFER_TRACE(jh, "belongs to older transaction");
1486 /* ... but we CAN drop it from the new transaction if we
1487 * have also modified it since the original commit. */
1489 if (jh->b_next_transaction) {
1490 J_ASSERT(jh->b_next_transaction == transaction);
1491 spin_lock(&journal->j_list_lock);
1492 jh->b_next_transaction = NULL;
1493 spin_unlock(&journal->j_list_lock);
1496 * only drop a reference if this transaction modified
1497 * the buffer
1499 if (was_modified)
1500 drop_reserve = 1;
1504 not_jbd:
1505 jbd_unlock_bh_state(bh);
1506 __brelse(bh);
1507 drop:
1508 if (drop_reserve) {
1509 /* no need to reserve log space for this block -bzzz */
1510 handle->h_buffer_credits++;
1512 return err;
1516 * int jbd2_journal_stop() - complete a transaction
1517 * @handle: tranaction to complete.
1519 * All done for a particular handle.
1521 * There is not much action needed here. We just return any remaining
1522 * buffer credits to the transaction and remove the handle. The only
1523 * complication is that we need to start a commit operation if the
1524 * filesystem is marked for synchronous update.
1526 * jbd2_journal_stop itself will not usually return an error, but it may
1527 * do so in unusual circumstances. In particular, expect it to
1528 * return -EIO if a jbd2_journal_abort has been executed since the
1529 * transaction began.
1531 int jbd2_journal_stop(handle_t *handle)
1533 transaction_t *transaction = handle->h_transaction;
1534 journal_t *journal;
1535 int err = 0, wait_for_commit = 0;
1536 tid_t tid;
1537 pid_t pid;
1539 if (!transaction)
1540 goto free_and_exit;
1541 journal = transaction->t_journal;
1543 J_ASSERT(journal_current_handle() == handle);
1545 if (is_handle_aborted(handle))
1546 err = -EIO;
1547 else
1548 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
1550 if (--handle->h_ref > 0) {
1551 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1552 handle->h_ref);
1553 return err;
1556 jbd_debug(4, "Handle %p going down\n", handle);
1557 trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
1558 transaction->t_tid,
1559 handle->h_type, handle->h_line_no,
1560 jiffies - handle->h_start_jiffies,
1561 handle->h_sync, handle->h_requested_credits,
1562 (handle->h_requested_credits -
1563 handle->h_buffer_credits));
1566 * Implement synchronous transaction batching. If the handle
1567 * was synchronous, don't force a commit immediately. Let's
1568 * yield and let another thread piggyback onto this
1569 * transaction. Keep doing that while new threads continue to
1570 * arrive. It doesn't cost much - we're about to run a commit
1571 * and sleep on IO anyway. Speeds up many-threaded, many-dir
1572 * operations by 30x or more...
1574 * We try and optimize the sleep time against what the
1575 * underlying disk can do, instead of having a static sleep
1576 * time. This is useful for the case where our storage is so
1577 * fast that it is more optimal to go ahead and force a flush
1578 * and wait for the transaction to be committed than it is to
1579 * wait for an arbitrary amount of time for new writers to
1580 * join the transaction. We achieve this by measuring how
1581 * long it takes to commit a transaction, and compare it with
1582 * how long this transaction has been running, and if run time
1583 * < commit time then we sleep for the delta and commit. This
1584 * greatly helps super fast disks that would see slowdowns as
1585 * more threads started doing fsyncs.
1587 * But don't do this if this process was the most recent one
1588 * to perform a synchronous write. We do this to detect the
1589 * case where a single process is doing a stream of sync
1590 * writes. No point in waiting for joiners in that case.
1592 pid = current->pid;
1593 if (handle->h_sync && journal->j_last_sync_writer != pid) {
1594 u64 commit_time, trans_time;
1596 journal->j_last_sync_writer = pid;
1598 read_lock(&journal->j_state_lock);
1599 commit_time = journal->j_average_commit_time;
1600 read_unlock(&journal->j_state_lock);
1602 trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1603 transaction->t_start_time));
1605 commit_time = max_t(u64, commit_time,
1606 1000*journal->j_min_batch_time);
1607 commit_time = min_t(u64, commit_time,
1608 1000*journal->j_max_batch_time);
1610 if (trans_time < commit_time) {
1611 ktime_t expires = ktime_add_ns(ktime_get(),
1612 commit_time);
1613 set_current_state(TASK_UNINTERRUPTIBLE);
1614 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1618 if (handle->h_sync)
1619 transaction->t_synchronous_commit = 1;
1620 current->journal_info = NULL;
1621 atomic_sub(handle->h_buffer_credits,
1622 &transaction->t_outstanding_credits);
1625 * If the handle is marked SYNC, we need to set another commit
1626 * going! We also want to force a commit if the current
1627 * transaction is occupying too much of the log, or if the
1628 * transaction is too old now.
1630 if (handle->h_sync ||
1631 (atomic_read(&transaction->t_outstanding_credits) >
1632 journal->j_max_transaction_buffers) ||
1633 time_after_eq(jiffies, transaction->t_expires)) {
1634 /* Do this even for aborted journals: an abort still
1635 * completes the commit thread, it just doesn't write
1636 * anything to disk. */
1638 jbd_debug(2, "transaction too old, requesting commit for "
1639 "handle %p\n", handle);
1640 /* This is non-blocking */
1641 jbd2_log_start_commit(journal, transaction->t_tid);
1644 * Special case: JBD2_SYNC synchronous updates require us
1645 * to wait for the commit to complete.
1647 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1648 wait_for_commit = 1;
1652 * Once we drop t_updates, if it goes to zero the transaction
1653 * could start committing on us and eventually disappear. So
1654 * once we do this, we must not dereference transaction
1655 * pointer again.
1657 tid = transaction->t_tid;
1658 if (atomic_dec_and_test(&transaction->t_updates)) {
1659 wake_up(&journal->j_wait_updates);
1660 if (journal->j_barrier_count)
1661 wake_up(&journal->j_wait_transaction_locked);
1664 if (wait_for_commit)
1665 err = jbd2_log_wait_commit(journal, tid);
1667 lock_map_release(&handle->h_lockdep_map);
1669 if (handle->h_rsv_handle)
1670 jbd2_journal_free_reserved(handle->h_rsv_handle);
1671 free_and_exit:
1672 jbd2_free_handle(handle);
1673 return err;
1678 * List management code snippets: various functions for manipulating the
1679 * transaction buffer lists.
1684 * Append a buffer to a transaction list, given the transaction's list head
1685 * pointer.
1687 * j_list_lock is held.
1689 * jbd_lock_bh_state(jh2bh(jh)) is held.
1692 static inline void
1693 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1695 if (!*list) {
1696 jh->b_tnext = jh->b_tprev = jh;
1697 *list = jh;
1698 } else {
1699 /* Insert at the tail of the list to preserve order */
1700 struct journal_head *first = *list, *last = first->b_tprev;
1701 jh->b_tprev = last;
1702 jh->b_tnext = first;
1703 last->b_tnext = first->b_tprev = jh;
1708 * Remove a buffer from a transaction list, given the transaction's list
1709 * head pointer.
1711 * Called with j_list_lock held, and the journal may not be locked.
1713 * jbd_lock_bh_state(jh2bh(jh)) is held.
1716 static inline void
1717 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1719 if (*list == jh) {
1720 *list = jh->b_tnext;
1721 if (*list == jh)
1722 *list = NULL;
1724 jh->b_tprev->b_tnext = jh->b_tnext;
1725 jh->b_tnext->b_tprev = jh->b_tprev;
1729 * Remove a buffer from the appropriate transaction list.
1731 * Note that this function can *change* the value of
1732 * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
1733 * t_reserved_list. If the caller is holding onto a copy of one of these
1734 * pointers, it could go bad. Generally the caller needs to re-read the
1735 * pointer from the transaction_t.
1737 * Called under j_list_lock.
1739 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1741 struct journal_head **list = NULL;
1742 transaction_t *transaction;
1743 struct buffer_head *bh = jh2bh(jh);
1745 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1746 transaction = jh->b_transaction;
1747 if (transaction)
1748 assert_spin_locked(&transaction->t_journal->j_list_lock);
1750 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1751 if (jh->b_jlist != BJ_None)
1752 J_ASSERT_JH(jh, transaction != NULL);
1754 switch (jh->b_jlist) {
1755 case BJ_None:
1756 return;
1757 case BJ_Metadata:
1758 transaction->t_nr_buffers--;
1759 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
1760 list = &transaction->t_buffers;
1761 break;
1762 case BJ_Forget:
1763 list = &transaction->t_forget;
1764 break;
1765 case BJ_Shadow:
1766 list = &transaction->t_shadow_list;
1767 break;
1768 case BJ_Reserved:
1769 list = &transaction->t_reserved_list;
1770 break;
1773 __blist_del_buffer(list, jh);
1774 jh->b_jlist = BJ_None;
1775 if (test_clear_buffer_jbddirty(bh))
1776 mark_buffer_dirty(bh); /* Expose it to the VM */
1780 * Remove buffer from all transactions.
1782 * Called with bh_state lock and j_list_lock
1784 * jh and bh may be already freed when this function returns.
1786 static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1788 __jbd2_journal_temp_unlink_buffer(jh);
1789 jh->b_transaction = NULL;
1790 jbd2_journal_put_journal_head(jh);
1793 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1795 struct buffer_head *bh = jh2bh(jh);
1797 /* Get reference so that buffer cannot be freed before we unlock it */
1798 get_bh(bh);
1799 jbd_lock_bh_state(bh);
1800 spin_lock(&journal->j_list_lock);
1801 __jbd2_journal_unfile_buffer(jh);
1802 spin_unlock(&journal->j_list_lock);
1803 jbd_unlock_bh_state(bh);
1804 __brelse(bh);
1808 * Called from jbd2_journal_try_to_free_buffers().
1810 * Called under jbd_lock_bh_state(bh)
1812 static void
1813 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1815 struct journal_head *jh;
1817 jh = bh2jh(bh);
1819 if (buffer_locked(bh) || buffer_dirty(bh))
1820 goto out;
1822 if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
1823 goto out;
1825 spin_lock(&journal->j_list_lock);
1826 if (jh->b_cp_transaction != NULL) {
1827 /* written-back checkpointed metadata buffer */
1828 JBUFFER_TRACE(jh, "remove from checkpoint list");
1829 __jbd2_journal_remove_checkpoint(jh);
1831 spin_unlock(&journal->j_list_lock);
1832 out:
1833 return;
1837 * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1838 * @journal: journal for operation
1839 * @page: to try and free
1840 * @gfp_mask: we use the mask to detect how hard should we try to release
1841 * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1842 * release the buffers.
1845 * For all the buffers on this page,
1846 * if they are fully written out ordered data, move them onto BUF_CLEAN
1847 * so try_to_free_buffers() can reap them.
1849 * This function returns non-zero if we wish try_to_free_buffers()
1850 * to be called. We do this if the page is releasable by try_to_free_buffers().
1851 * We also do it if the page has locked or dirty buffers and the caller wants
1852 * us to perform sync or async writeout.
1854 * This complicates JBD locking somewhat. We aren't protected by the
1855 * BKL here. We wish to remove the buffer from its committing or
1856 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1858 * This may *change* the value of transaction_t->t_datalist, so anyone
1859 * who looks at t_datalist needs to lock against this function.
1861 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1862 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
1863 * will come out of the lock with the buffer dirty, which makes it
1864 * ineligible for release here.
1866 * Who else is affected by this? hmm... Really the only contender
1867 * is do_get_write_access() - it could be looking at the buffer while
1868 * journal_try_to_free_buffer() is changing its state. But that
1869 * cannot happen because we never reallocate freed data as metadata
1870 * while the data is part of a transaction. Yes?
1872 * Return 0 on failure, 1 on success
1874 int jbd2_journal_try_to_free_buffers(journal_t *journal,
1875 struct page *page, gfp_t gfp_mask)
1877 struct buffer_head *head;
1878 struct buffer_head *bh;
1879 int ret = 0;
1881 J_ASSERT(PageLocked(page));
1883 head = page_buffers(page);
1884 bh = head;
1885 do {
1886 struct journal_head *jh;
1889 * We take our own ref against the journal_head here to avoid
1890 * having to add tons of locking around each instance of
1891 * jbd2_journal_put_journal_head().
1893 jh = jbd2_journal_grab_journal_head(bh);
1894 if (!jh)
1895 continue;
1897 jbd_lock_bh_state(bh);
1898 __journal_try_to_free_buffer(journal, bh);
1899 jbd2_journal_put_journal_head(jh);
1900 jbd_unlock_bh_state(bh);
1901 if (buffer_jbd(bh))
1902 goto busy;
1903 } while ((bh = bh->b_this_page) != head);
1905 ret = try_to_free_buffers(page);
1907 busy:
1908 return ret;
1912 * This buffer is no longer needed. If it is on an older transaction's
1913 * checkpoint list we need to record it on this transaction's forget list
1914 * to pin this buffer (and hence its checkpointing transaction) down until
1915 * this transaction commits. If the buffer isn't on a checkpoint list, we
1916 * release it.
1917 * Returns non-zero if JBD no longer has an interest in the buffer.
1919 * Called under j_list_lock.
1921 * Called under jbd_lock_bh_state(bh).
1923 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1925 int may_free = 1;
1926 struct buffer_head *bh = jh2bh(jh);
1928 if (jh->b_cp_transaction) {
1929 JBUFFER_TRACE(jh, "on running+cp transaction");
1930 __jbd2_journal_temp_unlink_buffer(jh);
1932 * We don't want to write the buffer anymore, clear the
1933 * bit so that we don't confuse checks in
1934 * __journal_file_buffer
1936 clear_buffer_dirty(bh);
1937 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1938 may_free = 0;
1939 } else {
1940 JBUFFER_TRACE(jh, "on running transaction");
1941 __jbd2_journal_unfile_buffer(jh);
1943 return may_free;
1947 * jbd2_journal_invalidatepage
1949 * This code is tricky. It has a number of cases to deal with.
1951 * There are two invariants which this code relies on:
1953 * i_size must be updated on disk before we start calling invalidatepage on the
1954 * data.
1956 * This is done in ext3 by defining an ext3_setattr method which
1957 * updates i_size before truncate gets going. By maintaining this
1958 * invariant, we can be sure that it is safe to throw away any buffers
1959 * attached to the current transaction: once the transaction commits,
1960 * we know that the data will not be needed.
1962 * Note however that we can *not* throw away data belonging to the
1963 * previous, committing transaction!
1965 * Any disk blocks which *are* part of the previous, committing
1966 * transaction (and which therefore cannot be discarded immediately) are
1967 * not going to be reused in the new running transaction
1969 * The bitmap committed_data images guarantee this: any block which is
1970 * allocated in one transaction and removed in the next will be marked
1971 * as in-use in the committed_data bitmap, so cannot be reused until
1972 * the next transaction to delete the block commits. This means that
1973 * leaving committing buffers dirty is quite safe: the disk blocks
1974 * cannot be reallocated to a different file and so buffer aliasing is
1975 * not possible.
1978 * The above applies mainly to ordered data mode. In writeback mode we
1979 * don't make guarantees about the order in which data hits disk --- in
1980 * particular we don't guarantee that new dirty data is flushed before
1981 * transaction commit --- so it is always safe just to discard data
1982 * immediately in that mode. --sct
1986 * The journal_unmap_buffer helper function returns zero if the buffer
1987 * concerned remains pinned as an anonymous buffer belonging to an older
1988 * transaction.
1990 * We're outside-transaction here. Either or both of j_running_transaction
1991 * and j_committing_transaction may be NULL.
1993 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
1994 int partial_page)
1996 transaction_t *transaction;
1997 struct journal_head *jh;
1998 int may_free = 1;
2000 BUFFER_TRACE(bh, "entry");
2003 * It is safe to proceed here without the j_list_lock because the
2004 * buffers cannot be stolen by try_to_free_buffers as long as we are
2005 * holding the page lock. --sct
2008 if (!buffer_jbd(bh))
2009 goto zap_buffer_unlocked;
2011 /* OK, we have data buffer in journaled mode */
2012 write_lock(&journal->j_state_lock);
2013 jbd_lock_bh_state(bh);
2014 spin_lock(&journal->j_list_lock);
2016 jh = jbd2_journal_grab_journal_head(bh);
2017 if (!jh)
2018 goto zap_buffer_no_jh;
2021 * We cannot remove the buffer from checkpoint lists until the
2022 * transaction adding inode to orphan list (let's call it T)
2023 * is committed. Otherwise if the transaction changing the
2024 * buffer would be cleaned from the journal before T is
2025 * committed, a crash will cause that the correct contents of
2026 * the buffer will be lost. On the other hand we have to
2027 * clear the buffer dirty bit at latest at the moment when the
2028 * transaction marking the buffer as freed in the filesystem
2029 * structures is committed because from that moment on the
2030 * block can be reallocated and used by a different page.
2031 * Since the block hasn't been freed yet but the inode has
2032 * already been added to orphan list, it is safe for us to add
2033 * the buffer to BJ_Forget list of the newest transaction.
2035 * Also we have to clear buffer_mapped flag of a truncated buffer
2036 * because the buffer_head may be attached to the page straddling
2037 * i_size (can happen only when blocksize < pagesize) and thus the
2038 * buffer_head can be reused when the file is extended again. So we end
2039 * up keeping around invalidated buffers attached to transactions'
2040 * BJ_Forget list just to stop checkpointing code from cleaning up
2041 * the transaction this buffer was modified in.
2043 transaction = jh->b_transaction;
2044 if (transaction == NULL) {
2045 /* First case: not on any transaction. If it
2046 * has no checkpoint link, then we can zap it:
2047 * it's a writeback-mode buffer so we don't care
2048 * if it hits disk safely. */
2049 if (!jh->b_cp_transaction) {
2050 JBUFFER_TRACE(jh, "not on any transaction: zap");
2051 goto zap_buffer;
2054 if (!buffer_dirty(bh)) {
2055 /* bdflush has written it. We can drop it now */
2056 goto zap_buffer;
2059 /* OK, it must be in the journal but still not
2060 * written fully to disk: it's metadata or
2061 * journaled data... */
2063 if (journal->j_running_transaction) {
2064 /* ... and once the current transaction has
2065 * committed, the buffer won't be needed any
2066 * longer. */
2067 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
2068 may_free = __dispose_buffer(jh,
2069 journal->j_running_transaction);
2070 goto zap_buffer;
2071 } else {
2072 /* There is no currently-running transaction. So the
2073 * orphan record which we wrote for this file must have
2074 * passed into commit. We must attach this buffer to
2075 * the committing transaction, if it exists. */
2076 if (journal->j_committing_transaction) {
2077 JBUFFER_TRACE(jh, "give to committing trans");
2078 may_free = __dispose_buffer(jh,
2079 journal->j_committing_transaction);
2080 goto zap_buffer;
2081 } else {
2082 /* The orphan record's transaction has
2083 * committed. We can cleanse this buffer */
2084 clear_buffer_jbddirty(bh);
2085 goto zap_buffer;
2088 } else if (transaction == journal->j_committing_transaction) {
2089 JBUFFER_TRACE(jh, "on committing transaction");
2091 * The buffer is committing, we simply cannot touch
2092 * it. If the page is straddling i_size we have to wait
2093 * for commit and try again.
2095 if (partial_page) {
2096 jbd2_journal_put_journal_head(jh);
2097 spin_unlock(&journal->j_list_lock);
2098 jbd_unlock_bh_state(bh);
2099 write_unlock(&journal->j_state_lock);
2100 return -EBUSY;
2103 * OK, buffer won't be reachable after truncate. We just set
2104 * j_next_transaction to the running transaction (if there is
2105 * one) and mark buffer as freed so that commit code knows it
2106 * should clear dirty bits when it is done with the buffer.
2108 set_buffer_freed(bh);
2109 if (journal->j_running_transaction && buffer_jbddirty(bh))
2110 jh->b_next_transaction = journal->j_running_transaction;
2111 jbd2_journal_put_journal_head(jh);
2112 spin_unlock(&journal->j_list_lock);
2113 jbd_unlock_bh_state(bh);
2114 write_unlock(&journal->j_state_lock);
2115 return 0;
2116 } else {
2117 /* Good, the buffer belongs to the running transaction.
2118 * We are writing our own transaction's data, not any
2119 * previous one's, so it is safe to throw it away
2120 * (remember that we expect the filesystem to have set
2121 * i_size already for this truncate so recovery will not
2122 * expose the disk blocks we are discarding here.) */
2123 J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
2124 JBUFFER_TRACE(jh, "on running transaction");
2125 may_free = __dispose_buffer(jh, transaction);
2128 zap_buffer:
2130 * This is tricky. Although the buffer is truncated, it may be reused
2131 * if blocksize < pagesize and it is attached to the page straddling
2132 * EOF. Since the buffer might have been added to BJ_Forget list of the
2133 * running transaction, journal_get_write_access() won't clear
2134 * b_modified and credit accounting gets confused. So clear b_modified
2135 * here.
2137 jh->b_modified = 0;
2138 jbd2_journal_put_journal_head(jh);
2139 zap_buffer_no_jh:
2140 spin_unlock(&journal->j_list_lock);
2141 jbd_unlock_bh_state(bh);
2142 write_unlock(&journal->j_state_lock);
2143 zap_buffer_unlocked:
2144 clear_buffer_dirty(bh);
2145 J_ASSERT_BH(bh, !buffer_jbddirty(bh));
2146 clear_buffer_mapped(bh);
2147 clear_buffer_req(bh);
2148 clear_buffer_new(bh);
2149 clear_buffer_delay(bh);
2150 clear_buffer_unwritten(bh);
2151 bh->b_bdev = NULL;
2152 return may_free;
2156 * void jbd2_journal_invalidatepage()
2157 * @journal: journal to use for flush...
2158 * @page: page to flush
2159 * @offset: start of the range to invalidate
2160 * @length: length of the range to invalidate
2162 * Reap page buffers containing data after in the specified range in page.
2163 * Can return -EBUSY if buffers are part of the committing transaction and
2164 * the page is straddling i_size. Caller then has to wait for current commit
2165 * and try again.
2167 int jbd2_journal_invalidatepage(journal_t *journal,
2168 struct page *page,
2169 unsigned int offset,
2170 unsigned int length)
2172 struct buffer_head *head, *bh, *next;
2173 unsigned int stop = offset + length;
2174 unsigned int curr_off = 0;
2175 int partial_page = (offset || length < PAGE_CACHE_SIZE);
2176 int may_free = 1;
2177 int ret = 0;
2179 if (!PageLocked(page))
2180 BUG();
2181 if (!page_has_buffers(page))
2182 return 0;
2184 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
2186 /* We will potentially be playing with lists other than just the
2187 * data lists (especially for journaled data mode), so be
2188 * cautious in our locking. */
2190 head = bh = page_buffers(page);
2191 do {
2192 unsigned int next_off = curr_off + bh->b_size;
2193 next = bh->b_this_page;
2195 if (next_off > stop)
2196 return 0;
2198 if (offset <= curr_off) {
2199 /* This block is wholly outside the truncation point */
2200 lock_buffer(bh);
2201 ret = journal_unmap_buffer(journal, bh, partial_page);
2202 unlock_buffer(bh);
2203 if (ret < 0)
2204 return ret;
2205 may_free &= ret;
2207 curr_off = next_off;
2208 bh = next;
2210 } while (bh != head);
2212 if (!partial_page) {
2213 if (may_free && try_to_free_buffers(page))
2214 J_ASSERT(!page_has_buffers(page));
2216 return 0;
2220 * File a buffer on the given transaction list.
2222 void __jbd2_journal_file_buffer(struct journal_head *jh,
2223 transaction_t *transaction, int jlist)
2225 struct journal_head **list = NULL;
2226 int was_dirty = 0;
2227 struct buffer_head *bh = jh2bh(jh);
2229 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2230 assert_spin_locked(&transaction->t_journal->j_list_lock);
2232 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2233 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
2234 jh->b_transaction == NULL);
2236 if (jh->b_transaction && jh->b_jlist == jlist)
2237 return;
2239 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
2240 jlist == BJ_Shadow || jlist == BJ_Forget) {
2242 * For metadata buffers, we track dirty bit in buffer_jbddirty
2243 * instead of buffer_dirty. We should not see a dirty bit set
2244 * here because we clear it in do_get_write_access but e.g.
2245 * tune2fs can modify the sb and set the dirty bit at any time
2246 * so we try to gracefully handle that.
2248 if (buffer_dirty(bh))
2249 warn_dirty_buffer(bh);
2250 if (test_clear_buffer_dirty(bh) ||
2251 test_clear_buffer_jbddirty(bh))
2252 was_dirty = 1;
2255 if (jh->b_transaction)
2256 __jbd2_journal_temp_unlink_buffer(jh);
2257 else
2258 jbd2_journal_grab_journal_head(bh);
2259 jh->b_transaction = transaction;
2261 switch (jlist) {
2262 case BJ_None:
2263 J_ASSERT_JH(jh, !jh->b_committed_data);
2264 J_ASSERT_JH(jh, !jh->b_frozen_data);
2265 return;
2266 case BJ_Metadata:
2267 transaction->t_nr_buffers++;
2268 list = &transaction->t_buffers;
2269 break;
2270 case BJ_Forget:
2271 list = &transaction->t_forget;
2272 break;
2273 case BJ_Shadow:
2274 list = &transaction->t_shadow_list;
2275 break;
2276 case BJ_Reserved:
2277 list = &transaction->t_reserved_list;
2278 break;
2281 __blist_add_buffer(list, jh);
2282 jh->b_jlist = jlist;
2284 if (was_dirty)
2285 set_buffer_jbddirty(bh);
2288 void jbd2_journal_file_buffer(struct journal_head *jh,
2289 transaction_t *transaction, int jlist)
2291 jbd_lock_bh_state(jh2bh(jh));
2292 spin_lock(&transaction->t_journal->j_list_lock);
2293 __jbd2_journal_file_buffer(jh, transaction, jlist);
2294 spin_unlock(&transaction->t_journal->j_list_lock);
2295 jbd_unlock_bh_state(jh2bh(jh));
2299 * Remove a buffer from its current buffer list in preparation for
2300 * dropping it from its current transaction entirely. If the buffer has
2301 * already started to be used by a subsequent transaction, refile the
2302 * buffer on that transaction's metadata list.
2304 * Called under j_list_lock
2305 * Called under jbd_lock_bh_state(jh2bh(jh))
2307 * jh and bh may be already free when this function returns
2309 void __jbd2_journal_refile_buffer(struct journal_head *jh)
2311 int was_dirty, jlist;
2312 struct buffer_head *bh = jh2bh(jh);
2314 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2315 if (jh->b_transaction)
2316 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2318 /* If the buffer is now unused, just drop it. */
2319 if (jh->b_next_transaction == NULL) {
2320 __jbd2_journal_unfile_buffer(jh);
2321 return;
2325 * It has been modified by a later transaction: add it to the new
2326 * transaction's metadata list.
2329 was_dirty = test_clear_buffer_jbddirty(bh);
2330 __jbd2_journal_temp_unlink_buffer(jh);
2332 * We set b_transaction here because b_next_transaction will inherit
2333 * our jh reference and thus __jbd2_journal_file_buffer() must not
2334 * take a new one.
2336 jh->b_transaction = jh->b_next_transaction;
2337 jh->b_next_transaction = NULL;
2338 if (buffer_freed(bh))
2339 jlist = BJ_Forget;
2340 else if (jh->b_modified)
2341 jlist = BJ_Metadata;
2342 else
2343 jlist = BJ_Reserved;
2344 __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2345 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2347 if (was_dirty)
2348 set_buffer_jbddirty(bh);
2352 * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2353 * bh reference so that we can safely unlock bh.
2355 * The jh and bh may be freed by this call.
2357 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2359 struct buffer_head *bh = jh2bh(jh);
2361 /* Get reference so that buffer cannot be freed before we unlock it */
2362 get_bh(bh);
2363 jbd_lock_bh_state(bh);
2364 spin_lock(&journal->j_list_lock);
2365 __jbd2_journal_refile_buffer(jh);
2366 jbd_unlock_bh_state(bh);
2367 spin_unlock(&journal->j_list_lock);
2368 __brelse(bh);
2372 * File inode in the inode list of the handle's transaction
2374 int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
2376 transaction_t *transaction = handle->h_transaction;
2377 journal_t *journal;
2379 WARN_ON(!transaction);
2380 if (is_handle_aborted(handle))
2381 return -EROFS;
2382 journal = transaction->t_journal;
2384 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2385 transaction->t_tid);
2388 * First check whether inode isn't already on the transaction's
2389 * lists without taking the lock. Note that this check is safe
2390 * without the lock as we cannot race with somebody removing inode
2391 * from the transaction. The reason is that we remove inode from the
2392 * transaction only in journal_release_jbd_inode() and when we commit
2393 * the transaction. We are guarded from the first case by holding
2394 * a reference to the inode. We are safe against the second case
2395 * because if jinode->i_transaction == transaction, commit code
2396 * cannot touch the transaction because we hold reference to it,
2397 * and if jinode->i_next_transaction == transaction, commit code
2398 * will only file the inode where we want it.
2400 if (jinode->i_transaction == transaction ||
2401 jinode->i_next_transaction == transaction)
2402 return 0;
2404 spin_lock(&journal->j_list_lock);
2406 if (jinode->i_transaction == transaction ||
2407 jinode->i_next_transaction == transaction)
2408 goto done;
2411 * We only ever set this variable to 1 so the test is safe. Since
2412 * t_need_data_flush is likely to be set, we do the test to save some
2413 * cacheline bouncing
2415 if (!transaction->t_need_data_flush)
2416 transaction->t_need_data_flush = 1;
2417 /* On some different transaction's list - should be
2418 * the committing one */
2419 if (jinode->i_transaction) {
2420 J_ASSERT(jinode->i_next_transaction == NULL);
2421 J_ASSERT(jinode->i_transaction ==
2422 journal->j_committing_transaction);
2423 jinode->i_next_transaction = transaction;
2424 goto done;
2426 /* Not on any transaction list... */
2427 J_ASSERT(!jinode->i_next_transaction);
2428 jinode->i_transaction = transaction;
2429 list_add(&jinode->i_list, &transaction->t_inode_list);
2430 done:
2431 spin_unlock(&journal->j_list_lock);
2433 return 0;
2437 * File truncate and transaction commit interact with each other in a
2438 * non-trivial way. If a transaction writing data block A is
2439 * committing, we cannot discard the data by truncate until we have
2440 * written them. Otherwise if we crashed after the transaction with
2441 * write has committed but before the transaction with truncate has
2442 * committed, we could see stale data in block A. This function is a
2443 * helper to solve this problem. It starts writeout of the truncated
2444 * part in case it is in the committing transaction.
2446 * Filesystem code must call this function when inode is journaled in
2447 * ordered mode before truncation happens and after the inode has been
2448 * placed on orphan list with the new inode size. The second condition
2449 * avoids the race that someone writes new data and we start
2450 * committing the transaction after this function has been called but
2451 * before a transaction for truncate is started (and furthermore it
2452 * allows us to optimize the case where the addition to orphan list
2453 * happens in the same transaction as write --- we don't have to write
2454 * any data in such case).
2456 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2457 struct jbd2_inode *jinode,
2458 loff_t new_size)
2460 transaction_t *inode_trans, *commit_trans;
2461 int ret = 0;
2463 /* This is a quick check to avoid locking if not necessary */
2464 if (!jinode->i_transaction)
2465 goto out;
2466 /* Locks are here just to force reading of recent values, it is
2467 * enough that the transaction was not committing before we started
2468 * a transaction adding the inode to orphan list */
2469 read_lock(&journal->j_state_lock);
2470 commit_trans = journal->j_committing_transaction;
2471 read_unlock(&journal->j_state_lock);
2472 spin_lock(&journal->j_list_lock);
2473 inode_trans = jinode->i_transaction;
2474 spin_unlock(&journal->j_list_lock);
2475 if (inode_trans == commit_trans) {
2476 ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2477 new_size, LLONG_MAX);
2478 if (ret)
2479 jbd2_journal_abort(journal, ret);
2481 out:
2482 return ret;