2 * Write ahead logging implementation copyright Chris Mason 2000
4 * The background commits make this code very interrelated, and
5 * overly complex. I need to rethink things a bit....The major players:
7 * journal_begin -- call with the number of blocks you expect to log.
8 * If the current transaction is too
9 * old, it will block until the current transaction is
10 * finished, and then start a new one.
11 * Usually, your transaction will get joined in with
12 * previous ones for speed.
14 * journal_join -- same as journal_begin, but won't block on the current
15 * transaction regardless of age. Don't ever call
16 * this. Ever. There are only two places it should be
17 * called from, and they are both inside this file.
19 * journal_mark_dirty -- adds blocks into this transaction. clears any flags
20 * that might make them get sent to disk
21 * and then marks them BH_JDirty. Puts the buffer head
22 * into the current transaction hash.
24 * journal_end -- if the current transaction is batchable, it does nothing
25 * otherwise, it could do an async/synchronous commit, or
26 * a full flush of all log and real blocks in the
29 * flush_old_commits -- if the current transaction is too old, it is ended and
30 * commit blocks are sent to disk. Forces commit blocks
31 * to disk for all backgrounded commits that have been
33 * -- Note, if you call this as an immediate flush from
34 * from within kupdate, it will ignore the immediate flag
37 #include <linux/time.h>
38 #include <linux/semaphore.h>
39 #include <linux/vmalloc.h>
41 #include <linux/kernel.h>
42 #include <linux/errno.h>
43 #include <linux/fcntl.h>
44 #include <linux/stat.h>
45 #include <linux/string.h>
46 #include <linux/buffer_head.h>
47 #include <linux/workqueue.h>
48 #include <linux/writeback.h>
49 #include <linux/blkdev.h>
50 #include <linux/backing-dev.h>
51 #include <linux/uaccess.h>
52 #include <linux/slab.h>
55 /* gets a struct reiserfs_journal_list * from a list head */
56 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
58 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
61 /* must be correct to keep the desc and commit structs at 4k */
62 #define JOURNAL_TRANS_HALF 1018
63 #define BUFNR 64 /*read ahead */
65 /* cnode stat bits. Move these into reiserfs_fs.h */
67 /* this block was freed, and can't be written. */
69 /* this block was freed during this transaction, and can't be written */
70 #define BLOCK_FREED_HOLDER 3
72 /* used in flush_journal_list */
73 #define BLOCK_NEEDS_FLUSH 4
74 #define BLOCK_DIRTIED 5
76 /* journal list state bits */
77 #define LIST_TOUCHED 1
79 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
81 /* flags for do_journal_end */
82 #define FLUSH_ALL 1 /* flush commit and real blocks */
83 #define COMMIT_NOW 2 /* end and commit this transaction */
84 #define WAIT 4 /* wait for the log blocks to hit the disk */
86 static int do_journal_end(struct reiserfs_transaction_handle
*, int flags
);
87 static int flush_journal_list(struct super_block
*s
,
88 struct reiserfs_journal_list
*jl
, int flushall
);
89 static int flush_commit_list(struct super_block
*s
,
90 struct reiserfs_journal_list
*jl
, int flushall
);
91 static int can_dirty(struct reiserfs_journal_cnode
*cn
);
92 static int journal_join(struct reiserfs_transaction_handle
*th
,
93 struct super_block
*sb
);
94 static void release_journal_dev(struct super_block
*super
,
95 struct reiserfs_journal
*journal
);
96 static int dirty_one_transaction(struct super_block
*s
,
97 struct reiserfs_journal_list
*jl
);
98 static void flush_async_commits(struct work_struct
*work
);
99 static void queue_log_writer(struct super_block
*s
);
101 /* values for join in do_journal_begin_r */
103 JBEGIN_REG
= 0, /* regular journal begin */
104 /* join the running transaction if at all possible */
106 /* called from cleanup code, ignores aborted flag */
110 static int do_journal_begin_r(struct reiserfs_transaction_handle
*th
,
111 struct super_block
*sb
,
112 unsigned long nblocks
, int join
);
114 static void init_journal_hash(struct super_block
*sb
)
116 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
117 memset(journal
->j_hash_table
, 0,
118 JOURNAL_HASH_SIZE
* sizeof(struct reiserfs_journal_cnode
*));
122 * clears BH_Dirty and sticks the buffer on the clean list. Called because
123 * I can't allow refile_buffer to make schedule happen after I've freed a
124 * block. Look at remove_from_transaction and journal_mark_freed for
127 static int reiserfs_clean_and_file_buffer(struct buffer_head
*bh
)
130 clear_buffer_dirty(bh
);
131 clear_buffer_journal_test(bh
);
136 static struct reiserfs_bitmap_node
*allocate_bitmap_node(struct super_block
139 struct reiserfs_bitmap_node
*bn
;
142 bn
= kmalloc(sizeof(struct reiserfs_bitmap_node
), GFP_NOFS
);
146 bn
->data
= kzalloc(sb
->s_blocksize
, GFP_NOFS
);
152 INIT_LIST_HEAD(&bn
->list
);
156 static struct reiserfs_bitmap_node
*get_bitmap_node(struct super_block
*sb
)
158 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
159 struct reiserfs_bitmap_node
*bn
= NULL
;
160 struct list_head
*entry
= journal
->j_bitmap_nodes
.next
;
162 journal
->j_used_bitmap_nodes
++;
165 if (entry
!= &journal
->j_bitmap_nodes
) {
166 bn
= list_entry(entry
, struct reiserfs_bitmap_node
, list
);
168 memset(bn
->data
, 0, sb
->s_blocksize
);
169 journal
->j_free_bitmap_nodes
--;
172 bn
= allocate_bitmap_node(sb
);
179 static inline void free_bitmap_node(struct super_block
*sb
,
180 struct reiserfs_bitmap_node
*bn
)
182 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
183 journal
->j_used_bitmap_nodes
--;
184 if (journal
->j_free_bitmap_nodes
> REISERFS_MAX_BITMAP_NODES
) {
188 list_add(&bn
->list
, &journal
->j_bitmap_nodes
);
189 journal
->j_free_bitmap_nodes
++;
193 static void allocate_bitmap_nodes(struct super_block
*sb
)
196 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
197 struct reiserfs_bitmap_node
*bn
= NULL
;
198 for (i
= 0; i
< REISERFS_MIN_BITMAP_NODES
; i
++) {
199 bn
= allocate_bitmap_node(sb
);
201 list_add(&bn
->list
, &journal
->j_bitmap_nodes
);
202 journal
->j_free_bitmap_nodes
++;
204 /* this is ok, we'll try again when more are needed */
210 static int set_bit_in_list_bitmap(struct super_block
*sb
,
212 struct reiserfs_list_bitmap
*jb
)
214 unsigned int bmap_nr
= block
/ (sb
->s_blocksize
<< 3);
215 unsigned int bit_nr
= block
% (sb
->s_blocksize
<< 3);
217 if (!jb
->bitmaps
[bmap_nr
]) {
218 jb
->bitmaps
[bmap_nr
] = get_bitmap_node(sb
);
220 set_bit(bit_nr
, (unsigned long *)jb
->bitmaps
[bmap_nr
]->data
);
224 static void cleanup_bitmap_list(struct super_block
*sb
,
225 struct reiserfs_list_bitmap
*jb
)
228 if (jb
->bitmaps
== NULL
)
231 for (i
= 0; i
< reiserfs_bmap_count(sb
); i
++) {
232 if (jb
->bitmaps
[i
]) {
233 free_bitmap_node(sb
, jb
->bitmaps
[i
]);
234 jb
->bitmaps
[i
] = NULL
;
240 * only call this on FS unmount.
242 static int free_list_bitmaps(struct super_block
*sb
,
243 struct reiserfs_list_bitmap
*jb_array
)
246 struct reiserfs_list_bitmap
*jb
;
247 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
249 jb
->journal_list
= NULL
;
250 cleanup_bitmap_list(sb
, jb
);
257 static int free_bitmap_nodes(struct super_block
*sb
)
259 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
260 struct list_head
*next
= journal
->j_bitmap_nodes
.next
;
261 struct reiserfs_bitmap_node
*bn
;
263 while (next
!= &journal
->j_bitmap_nodes
) {
264 bn
= list_entry(next
, struct reiserfs_bitmap_node
, list
);
268 next
= journal
->j_bitmap_nodes
.next
;
269 journal
->j_free_bitmap_nodes
--;
276 * get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
277 * jb_array is the array to be filled in.
279 int reiserfs_allocate_list_bitmaps(struct super_block
*sb
,
280 struct reiserfs_list_bitmap
*jb_array
,
281 unsigned int bmap_nr
)
285 struct reiserfs_list_bitmap
*jb
;
286 int mem
= bmap_nr
* sizeof(struct reiserfs_bitmap_node
*);
288 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
290 jb
->journal_list
= NULL
;
291 jb
->bitmaps
= vzalloc(mem
);
293 reiserfs_warning(sb
, "clm-2000", "unable to "
294 "allocate bitmaps for journal lists");
300 free_list_bitmaps(sb
, jb_array
);
307 * find an available list bitmap. If you can't find one, flush a commit list
310 static struct reiserfs_list_bitmap
*get_list_bitmap(struct super_block
*sb
,
311 struct reiserfs_journal_list
315 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
316 struct reiserfs_list_bitmap
*jb
= NULL
;
318 for (j
= 0; j
< (JOURNAL_NUM_BITMAPS
* 3); j
++) {
319 i
= journal
->j_list_bitmap_index
;
320 journal
->j_list_bitmap_index
= (i
+ 1) % JOURNAL_NUM_BITMAPS
;
321 jb
= journal
->j_list_bitmap
+ i
;
322 if (journal
->j_list_bitmap
[i
].journal_list
) {
323 flush_commit_list(sb
,
324 journal
->j_list_bitmap
[i
].
326 if (!journal
->j_list_bitmap
[i
].journal_list
) {
333 /* double check to make sure if flushed correctly */
334 if (jb
->journal_list
)
336 jb
->journal_list
= jl
;
341 * allocates a new chunk of X nodes, and links them all together as a list.
342 * Uses the cnode->next and cnode->prev pointers
343 * returns NULL on failure
345 static struct reiserfs_journal_cnode
*allocate_cnodes(int num_cnodes
)
347 struct reiserfs_journal_cnode
*head
;
349 if (num_cnodes
<= 0) {
352 head
= vzalloc(num_cnodes
* sizeof(struct reiserfs_journal_cnode
));
357 head
[0].next
= head
+ 1;
358 for (i
= 1; i
< num_cnodes
; i
++) {
359 head
[i
].prev
= head
+ (i
- 1);
360 head
[i
].next
= head
+ (i
+ 1); /* if last one, overwrite it after the if */
362 head
[num_cnodes
- 1].next
= NULL
;
366 /* pulls a cnode off the free list, or returns NULL on failure */
367 static struct reiserfs_journal_cnode
*get_cnode(struct super_block
*sb
)
369 struct reiserfs_journal_cnode
*cn
;
370 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
372 reiserfs_check_lock_depth(sb
, "get_cnode");
374 if (journal
->j_cnode_free
<= 0) {
377 journal
->j_cnode_used
++;
378 journal
->j_cnode_free
--;
379 cn
= journal
->j_cnode_free_list
;
384 cn
->next
->prev
= NULL
;
386 journal
->j_cnode_free_list
= cn
->next
;
387 memset(cn
, 0, sizeof(struct reiserfs_journal_cnode
));
392 * returns a cnode to the free list
394 static void free_cnode(struct super_block
*sb
,
395 struct reiserfs_journal_cnode
*cn
)
397 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
399 reiserfs_check_lock_depth(sb
, "free_cnode");
401 journal
->j_cnode_used
--;
402 journal
->j_cnode_free
++;
403 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
404 cn
->next
= journal
->j_cnode_free_list
;
405 if (journal
->j_cnode_free_list
) {
406 journal
->j_cnode_free_list
->prev
= cn
;
408 cn
->prev
= NULL
; /* not needed with the memset, but I might kill the memset, and forget to do this */
409 journal
->j_cnode_free_list
= cn
;
412 static void clear_prepared_bits(struct buffer_head
*bh
)
414 clear_buffer_journal_prepared(bh
);
415 clear_buffer_journal_restore_dirty(bh
);
419 * return a cnode with same dev, block number and size in table,
420 * or null if not found
422 static inline struct reiserfs_journal_cnode
*get_journal_hash_dev(struct
426 reiserfs_journal_cnode
430 struct reiserfs_journal_cnode
*cn
;
431 cn
= journal_hash(table
, sb
, bl
);
433 if (cn
->blocknr
== bl
&& cn
->sb
== sb
)
437 return (struct reiserfs_journal_cnode
*)0;
441 * this actually means 'can this block be reallocated yet?'. If you set
442 * search_all, a block can only be allocated if it is not in the current
443 * transaction, was not freed by the current transaction, and has no chance
444 * of ever being overwritten by a replay after crashing.
446 * If you don't set search_all, a block can only be allocated if it is not
447 * in the current transaction. Since deleting a block removes it from the
448 * current transaction, this case should never happen. If you don't set
449 * search_all, make sure you never write the block without logging it.
451 * next_zero_bit is a suggestion about the next block to try for find_forward.
452 * when bl is rejected because it is set in a journal list bitmap, we search
453 * for the next zero bit in the bitmap that rejected bl. Then, we return
454 * that through next_zero_bit for find_forward to try.
456 * Just because we return something in next_zero_bit does not mean we won't
457 * reject it on the next call to reiserfs_in_journal
459 int reiserfs_in_journal(struct super_block
*sb
,
460 unsigned int bmap_nr
, int bit_nr
, int search_all
,
461 b_blocknr_t
* next_zero_bit
)
463 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
464 struct reiserfs_journal_cnode
*cn
;
465 struct reiserfs_list_bitmap
*jb
;
469 *next_zero_bit
= 0; /* always start this at zero. */
471 PROC_INFO_INC(sb
, journal
.in_journal
);
473 * If we aren't doing a search_all, this is a metablock, and it
474 * will be logged before use. if we crash before the transaction
475 * that freed it commits, this transaction won't have committed
476 * either, and the block will never be written
479 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
480 PROC_INFO_INC(sb
, journal
.in_journal_bitmap
);
481 jb
= journal
->j_list_bitmap
+ i
;
482 if (jb
->journal_list
&& jb
->bitmaps
[bmap_nr
] &&
484 (unsigned long *)jb
->bitmaps
[bmap_nr
]->
487 find_next_zero_bit((unsigned long *)
488 (jb
->bitmaps
[bmap_nr
]->
490 sb
->s_blocksize
<< 3,
497 bl
= bmap_nr
* (sb
->s_blocksize
<< 3) + bit_nr
;
498 /* is it in any old transactions? */
501 get_journal_hash_dev(sb
, journal
->j_list_hash_table
, bl
))) {
505 /* is it in the current transaction. This should never happen */
506 if ((cn
= get_journal_hash_dev(sb
, journal
->j_hash_table
, bl
))) {
511 PROC_INFO_INC(sb
, journal
.in_journal_reusable
);
516 /* insert cn into table */
517 static inline void insert_journal_hash(struct reiserfs_journal_cnode
**table
,
518 struct reiserfs_journal_cnode
*cn
)
520 struct reiserfs_journal_cnode
*cn_orig
;
522 cn_orig
= journal_hash(table
, cn
->sb
, cn
->blocknr
);
528 journal_hash(table
, cn
->sb
, cn
->blocknr
) = cn
;
531 /* lock the current transaction */
532 static inline void lock_journal(struct super_block
*sb
)
534 PROC_INFO_INC(sb
, journal
.lock_journal
);
536 reiserfs_mutex_lock_safe(&SB_JOURNAL(sb
)->j_mutex
, sb
);
539 /* unlock the current transaction */
540 static inline void unlock_journal(struct super_block
*sb
)
542 mutex_unlock(&SB_JOURNAL(sb
)->j_mutex
);
545 static inline void get_journal_list(struct reiserfs_journal_list
*jl
)
550 static inline void put_journal_list(struct super_block
*s
,
551 struct reiserfs_journal_list
*jl
)
553 if (jl
->j_refcount
< 1) {
554 reiserfs_panic(s
, "journal-2", "trans id %u, refcount at %d",
555 jl
->j_trans_id
, jl
->j_refcount
);
557 if (--jl
->j_refcount
== 0)
562 * this used to be much more involved, and I'm keeping it just in case
563 * things get ugly again. it gets called by flush_commit_list, and
564 * cleans up any data stored about blocks freed during a transaction.
566 static void cleanup_freed_for_journal_list(struct super_block
*sb
,
567 struct reiserfs_journal_list
*jl
)
570 struct reiserfs_list_bitmap
*jb
= jl
->j_list_bitmap
;
572 cleanup_bitmap_list(sb
, jb
);
574 jl
->j_list_bitmap
->journal_list
= NULL
;
575 jl
->j_list_bitmap
= NULL
;
578 static int journal_list_still_alive(struct super_block
*s
,
579 unsigned int trans_id
)
581 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
582 struct list_head
*entry
= &journal
->j_journal_list
;
583 struct reiserfs_journal_list
*jl
;
585 if (!list_empty(entry
)) {
586 jl
= JOURNAL_LIST_ENTRY(entry
->next
);
587 if (jl
->j_trans_id
<= trans_id
) {
595 * If page->mapping was null, we failed to truncate this page for
596 * some reason. Most likely because it was truncated after being
597 * logged via data=journal.
599 * This does a check to see if the buffer belongs to one of these
600 * lost pages before doing the final put_bh. If page->mapping was
601 * null, it tries to free buffers on the page, which should make the
602 * final page_cache_release drop the page from the lru.
604 static void release_buffer_page(struct buffer_head
*bh
)
606 struct page
*page
= bh
->b_page
;
607 if (!page
->mapping
&& trylock_page(page
)) {
608 page_cache_get(page
);
611 try_to_free_buffers(page
);
613 page_cache_release(page
);
619 static void reiserfs_end_buffer_io_sync(struct buffer_head
*bh
, int uptodate
)
621 char b
[BDEVNAME_SIZE
];
623 if (buffer_journaled(bh
)) {
624 reiserfs_warning(NULL
, "clm-2084",
625 "pinned buffer %lu:%s sent to disk",
626 bh
->b_blocknr
, bdevname(bh
->b_bdev
, b
));
629 set_buffer_uptodate(bh
);
631 clear_buffer_uptodate(bh
);
634 release_buffer_page(bh
);
637 static void reiserfs_end_ordered_io(struct buffer_head
*bh
, int uptodate
)
640 set_buffer_uptodate(bh
);
642 clear_buffer_uptodate(bh
);
647 static void submit_logged_buffer(struct buffer_head
*bh
)
650 bh
->b_end_io
= reiserfs_end_buffer_io_sync
;
651 clear_buffer_journal_new(bh
);
652 clear_buffer_dirty(bh
);
653 if (!test_clear_buffer_journal_test(bh
))
655 if (!buffer_uptodate(bh
))
657 submit_bh(WRITE
, bh
);
660 static void submit_ordered_buffer(struct buffer_head
*bh
)
663 bh
->b_end_io
= reiserfs_end_ordered_io
;
664 clear_buffer_dirty(bh
);
665 if (!buffer_uptodate(bh
))
667 submit_bh(WRITE
, bh
);
670 #define CHUNK_SIZE 32
671 struct buffer_chunk
{
672 struct buffer_head
*bh
[CHUNK_SIZE
];
676 static void write_chunk(struct buffer_chunk
*chunk
)
679 for (i
= 0; i
< chunk
->nr
; i
++) {
680 submit_logged_buffer(chunk
->bh
[i
]);
685 static void write_ordered_chunk(struct buffer_chunk
*chunk
)
688 for (i
= 0; i
< chunk
->nr
; i
++) {
689 submit_ordered_buffer(chunk
->bh
[i
]);
694 static int add_to_chunk(struct buffer_chunk
*chunk
, struct buffer_head
*bh
,
695 spinlock_t
* lock
, void (fn
) (struct buffer_chunk
*))
698 BUG_ON(chunk
->nr
>= CHUNK_SIZE
);
699 chunk
->bh
[chunk
->nr
++] = bh
;
700 if (chunk
->nr
>= CHUNK_SIZE
) {
713 static atomic_t nr_reiserfs_jh
= ATOMIC_INIT(0);
714 static struct reiserfs_jh
*alloc_jh(void)
716 struct reiserfs_jh
*jh
;
718 jh
= kmalloc(sizeof(*jh
), GFP_NOFS
);
720 atomic_inc(&nr_reiserfs_jh
);
728 * we want to free the jh when the buffer has been written
731 void reiserfs_free_jh(struct buffer_head
*bh
)
733 struct reiserfs_jh
*jh
;
737 bh
->b_private
= NULL
;
739 list_del_init(&jh
->list
);
741 if (atomic_read(&nr_reiserfs_jh
) <= 0)
743 atomic_dec(&nr_reiserfs_jh
);
748 static inline int __add_jh(struct reiserfs_journal
*j
, struct buffer_head
*bh
,
751 struct reiserfs_jh
*jh
;
754 spin_lock(&j
->j_dirty_buffers_lock
);
755 if (!bh
->b_private
) {
756 spin_unlock(&j
->j_dirty_buffers_lock
);
760 list_del_init(&jh
->list
);
765 spin_lock(&j
->j_dirty_buffers_lock
);
767 * buffer must be locked for __add_jh, should be able to have
768 * two adds at the same time
770 BUG_ON(bh
->b_private
);
774 jh
->jl
= j
->j_current_jl
;
776 list_add_tail(&jh
->list
, &jh
->jl
->j_tail_bh_list
);
778 list_add_tail(&jh
->list
, &jh
->jl
->j_bh_list
);
780 spin_unlock(&j
->j_dirty_buffers_lock
);
784 int reiserfs_add_tail_list(struct inode
*inode
, struct buffer_head
*bh
)
786 return __add_jh(SB_JOURNAL(inode
->i_sb
), bh
, 1);
788 int reiserfs_add_ordered_list(struct inode
*inode
, struct buffer_head
*bh
)
790 return __add_jh(SB_JOURNAL(inode
->i_sb
), bh
, 0);
793 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
794 static int write_ordered_buffers(spinlock_t
* lock
,
795 struct reiserfs_journal
*j
,
796 struct reiserfs_journal_list
*jl
,
797 struct list_head
*list
)
799 struct buffer_head
*bh
;
800 struct reiserfs_jh
*jh
;
801 int ret
= j
->j_errno
;
802 struct buffer_chunk chunk
;
803 struct list_head tmp
;
804 INIT_LIST_HEAD(&tmp
);
808 while (!list_empty(list
)) {
809 jh
= JH_ENTRY(list
->next
);
812 if (!trylock_buffer(bh
)) {
813 if (!buffer_dirty(bh
)) {
814 list_move(&jh
->list
, &tmp
);
819 write_ordered_chunk(&chunk
);
826 * in theory, dirty non-uptodate buffers should never get here,
827 * but the upper layer io error paths still have a few quirks.
828 * Handle them here as gracefully as we can
830 if (!buffer_uptodate(bh
) && buffer_dirty(bh
)) {
831 clear_buffer_dirty(bh
);
834 if (buffer_dirty(bh
)) {
835 list_move(&jh
->list
, &tmp
);
836 add_to_chunk(&chunk
, bh
, lock
, write_ordered_chunk
);
838 reiserfs_free_jh(bh
);
843 cond_resched_lock(lock
);
847 write_ordered_chunk(&chunk
);
850 while (!list_empty(&tmp
)) {
851 jh
= JH_ENTRY(tmp
.prev
);
854 reiserfs_free_jh(bh
);
856 if (buffer_locked(bh
)) {
861 if (!buffer_uptodate(bh
)) {
865 * ugly interaction with invalidatepage here.
866 * reiserfs_invalidate_page will pin any buffer that has a
867 * valid journal head from an older transaction. If someone
868 * else sets our buffer dirty after we write it in the first
869 * loop, and then someone truncates the page away, nobody
870 * will ever write the buffer. We're safe if we write the
871 * page one last time after freeing the journal header.
873 if (buffer_dirty(bh
) && unlikely(bh
->b_page
->mapping
== NULL
)) {
875 ll_rw_block(WRITE
, 1, &bh
);
879 cond_resched_lock(lock
);
885 static int flush_older_commits(struct super_block
*s
,
886 struct reiserfs_journal_list
*jl
)
888 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
889 struct reiserfs_journal_list
*other_jl
;
890 struct reiserfs_journal_list
*first_jl
;
891 struct list_head
*entry
;
892 unsigned int trans_id
= jl
->j_trans_id
;
893 unsigned int other_trans_id
;
894 unsigned int first_trans_id
;
898 * first we walk backwards to find the oldest uncommitted transation
901 entry
= jl
->j_list
.prev
;
903 other_jl
= JOURNAL_LIST_ENTRY(entry
);
904 if (entry
== &journal
->j_journal_list
||
905 atomic_read(&other_jl
->j_older_commits_done
))
909 entry
= other_jl
->j_list
.prev
;
912 /* if we didn't find any older uncommitted transactions, return now */
913 if (first_jl
== jl
) {
917 first_trans_id
= first_jl
->j_trans_id
;
919 entry
= &first_jl
->j_list
;
921 other_jl
= JOURNAL_LIST_ENTRY(entry
);
922 other_trans_id
= other_jl
->j_trans_id
;
924 if (other_trans_id
< trans_id
) {
925 if (atomic_read(&other_jl
->j_commit_left
) != 0) {
926 flush_commit_list(s
, other_jl
, 0);
928 /* list we were called with is gone, return */
929 if (!journal_list_still_alive(s
, trans_id
))
933 * the one we just flushed is gone, this means
934 * all older lists are also gone, so first_jl
935 * is no longer valid either. Go back to the
938 if (!journal_list_still_alive
939 (s
, other_trans_id
)) {
944 if (entry
== &journal
->j_journal_list
)
953 static int reiserfs_async_progress_wait(struct super_block
*s
)
955 struct reiserfs_journal
*j
= SB_JOURNAL(s
);
957 if (atomic_read(&j
->j_async_throttle
)) {
960 depth
= reiserfs_write_unlock_nested(s
);
961 congestion_wait(BLK_RW_ASYNC
, HZ
/ 10);
962 reiserfs_write_lock_nested(s
, depth
);
969 * if this journal list still has commit blocks unflushed, send them to disk.
971 * log areas must be flushed in order (transaction 2 can't commit before
972 * transaction 1) Before the commit block can by written, every other log
973 * block must be safely on disk
975 static int flush_commit_list(struct super_block
*s
,
976 struct reiserfs_journal_list
*jl
, int flushall
)
980 struct buffer_head
*tbh
= NULL
;
981 unsigned int trans_id
= jl
->j_trans_id
;
982 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
987 reiserfs_check_lock_depth(s
, "flush_commit_list");
989 if (atomic_read(&jl
->j_older_commits_done
)) {
994 * before we can put our commit blocks on disk, we have to make
995 * sure everyone older than us is on disk too
997 BUG_ON(jl
->j_len
<= 0);
998 BUG_ON(trans_id
== journal
->j_trans_id
);
1000 get_journal_list(jl
);
1002 if (flush_older_commits(s
, jl
) == 1) {
1004 * list disappeared during flush_older_commits.
1011 /* make sure nobody is trying to flush this one at the same time */
1012 reiserfs_mutex_lock_safe(&jl
->j_commit_mutex
, s
);
1014 if (!journal_list_still_alive(s
, trans_id
)) {
1015 mutex_unlock(&jl
->j_commit_mutex
);
1018 BUG_ON(jl
->j_trans_id
== 0);
1020 /* this commit is done, exit */
1021 if (atomic_read(&jl
->j_commit_left
) <= 0) {
1023 atomic_set(&jl
->j_older_commits_done
, 1);
1025 mutex_unlock(&jl
->j_commit_mutex
);
1029 if (!list_empty(&jl
->j_bh_list
)) {
1033 * We might sleep in numerous places inside
1034 * write_ordered_buffers. Relax the write lock.
1036 depth
= reiserfs_write_unlock_nested(s
);
1037 ret
= write_ordered_buffers(&journal
->j_dirty_buffers_lock
,
1038 journal
, jl
, &jl
->j_bh_list
);
1039 if (ret
< 0 && retval
== 0)
1041 reiserfs_write_lock_nested(s
, depth
);
1043 BUG_ON(!list_empty(&jl
->j_bh_list
));
1045 * for the description block and all the log blocks, submit any buffers
1046 * that haven't already reached the disk. Try to write at least 256
1047 * log blocks. later on, we will only wait on blocks that correspond
1048 * to this transaction, but while we're unplugging we might as well
1049 * get a chunk of data on there.
1051 atomic_inc(&journal
->j_async_throttle
);
1052 write_len
= jl
->j_len
+ 1;
1053 if (write_len
< 256)
1055 for (i
= 0 ; i
< write_len
; i
++) {
1056 bn
= SB_ONDISK_JOURNAL_1st_BLOCK(s
) + (jl
->j_start
+ i
) %
1057 SB_ONDISK_JOURNAL_SIZE(s
);
1058 tbh
= journal_find_get_block(s
, bn
);
1060 if (buffer_dirty(tbh
)) {
1061 depth
= reiserfs_write_unlock_nested(s
);
1062 ll_rw_block(WRITE
, 1, &tbh
);
1063 reiserfs_write_lock_nested(s
, depth
);
1068 atomic_dec(&journal
->j_async_throttle
);
1070 for (i
= 0; i
< (jl
->j_len
+ 1); i
++) {
1071 bn
= SB_ONDISK_JOURNAL_1st_BLOCK(s
) +
1072 (jl
->j_start
+ i
) % SB_ONDISK_JOURNAL_SIZE(s
);
1073 tbh
= journal_find_get_block(s
, bn
);
1075 depth
= reiserfs_write_unlock_nested(s
);
1076 __wait_on_buffer(tbh
);
1077 reiserfs_write_lock_nested(s
, depth
);
1079 * since we're using ll_rw_blk above, it might have skipped
1080 * over a locked buffer. Double check here
1082 /* redundant, sync_dirty_buffer() checks */
1083 if (buffer_dirty(tbh
)) {
1084 depth
= reiserfs_write_unlock_nested(s
);
1085 sync_dirty_buffer(tbh
);
1086 reiserfs_write_lock_nested(s
, depth
);
1088 if (unlikely(!buffer_uptodate(tbh
))) {
1089 #ifdef CONFIG_REISERFS_CHECK
1090 reiserfs_warning(s
, "journal-601",
1091 "buffer write failed");
1095 /* once for journal_find_get_block */
1097 /* once due to original getblk in do_journal_end */
1099 atomic_dec(&jl
->j_commit_left
);
1102 BUG_ON(atomic_read(&jl
->j_commit_left
) != 1);
1105 * If there was a write error in the journal - we can't commit
1106 * this transaction - it will be invalid and, if successful,
1107 * will just end up propagating the write error out to
1110 if (likely(!retval
&& !reiserfs_is_journal_aborted (journal
))) {
1111 if (buffer_dirty(jl
->j_commit_bh
))
1113 mark_buffer_dirty(jl
->j_commit_bh
) ;
1114 depth
= reiserfs_write_unlock_nested(s
);
1115 if (reiserfs_barrier_flush(s
))
1116 __sync_dirty_buffer(jl
->j_commit_bh
, WRITE_FLUSH_FUA
);
1118 sync_dirty_buffer(jl
->j_commit_bh
);
1119 reiserfs_write_lock_nested(s
, depth
);
1123 * If there was a write error in the journal - we can't commit this
1124 * transaction - it will be invalid and, if successful, will just end
1125 * up propagating the write error out to the filesystem.
1127 if (unlikely(!buffer_uptodate(jl
->j_commit_bh
))) {
1128 #ifdef CONFIG_REISERFS_CHECK
1129 reiserfs_warning(s
, "journal-615", "buffer write failed");
1133 bforget(jl
->j_commit_bh
);
1134 if (journal
->j_last_commit_id
!= 0 &&
1135 (jl
->j_trans_id
- journal
->j_last_commit_id
) != 1) {
1136 reiserfs_warning(s
, "clm-2200", "last commit %lu, current %lu",
1137 journal
->j_last_commit_id
, jl
->j_trans_id
);
1139 journal
->j_last_commit_id
= jl
->j_trans_id
;
1142 * now, every commit block is on the disk. It is safe to allow
1143 * blocks freed during this transaction to be reallocated
1145 cleanup_freed_for_journal_list(s
, jl
);
1147 retval
= retval
? retval
: journal
->j_errno
;
1149 /* mark the metadata dirty */
1151 dirty_one_transaction(s
, jl
);
1152 atomic_dec(&jl
->j_commit_left
);
1155 atomic_set(&jl
->j_older_commits_done
, 1);
1157 mutex_unlock(&jl
->j_commit_mutex
);
1159 put_journal_list(s
, jl
);
1162 reiserfs_abort(s
, retval
, "Journal write error in %s",
1168 * flush_journal_list frequently needs to find a newer transaction for a
1169 * given block. This does that, or returns NULL if it can't find anything
1171 static struct reiserfs_journal_list
*find_newer_jl_for_cn(struct
1172 reiserfs_journal_cnode
1175 struct super_block
*sb
= cn
->sb
;
1176 b_blocknr_t blocknr
= cn
->blocknr
;
1180 if (cn
->sb
== sb
&& cn
->blocknr
== blocknr
&& cn
->jlist
) {
1188 static void remove_journal_hash(struct super_block
*,
1189 struct reiserfs_journal_cnode
**,
1190 struct reiserfs_journal_list
*, unsigned long,
1194 * once all the real blocks have been flushed, it is safe to remove them
1195 * from the journal list for this transaction. Aside from freeing the
1196 * cnode, this also allows the block to be reallocated for data blocks
1197 * if it had been deleted.
1199 static void remove_all_from_journal_list(struct super_block
*sb
,
1200 struct reiserfs_journal_list
*jl
,
1203 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1204 struct reiserfs_journal_cnode
*cn
, *last
;
1205 cn
= jl
->j_realblock
;
1208 * which is better, to lock once around the whole loop, or
1209 * to lock for each call to remove_journal_hash?
1212 if (cn
->blocknr
!= 0) {
1214 reiserfs_warning(sb
, "reiserfs-2201",
1215 "block %u, bh is %d, state %ld",
1216 cn
->blocknr
, cn
->bh
? 1 : 0,
1220 remove_journal_hash(sb
, journal
->j_list_hash_table
,
1221 jl
, cn
->blocknr
, 1);
1225 free_cnode(sb
, last
);
1227 jl
->j_realblock
= NULL
;
1231 * if this timestamp is greater than the timestamp we wrote last to the
1232 * header block, write it to the header block. once this is done, I can
1233 * safely say the log area for this transaction won't ever be replayed,
1234 * and I can start releasing blocks in this transaction for reuse as data
1235 * blocks. called by flush_journal_list, before it calls
1236 * remove_all_from_journal_list
1238 static int _update_journal_header_block(struct super_block
*sb
,
1239 unsigned long offset
,
1240 unsigned int trans_id
)
1242 struct reiserfs_journal_header
*jh
;
1243 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1246 if (reiserfs_is_journal_aborted(journal
))
1249 if (trans_id
>= journal
->j_last_flush_trans_id
) {
1250 if (buffer_locked((journal
->j_header_bh
))) {
1251 depth
= reiserfs_write_unlock_nested(sb
);
1252 __wait_on_buffer(journal
->j_header_bh
);
1253 reiserfs_write_lock_nested(sb
, depth
);
1254 if (unlikely(!buffer_uptodate(journal
->j_header_bh
))) {
1255 #ifdef CONFIG_REISERFS_CHECK
1256 reiserfs_warning(sb
, "journal-699",
1257 "buffer write failed");
1262 journal
->j_last_flush_trans_id
= trans_id
;
1263 journal
->j_first_unflushed_offset
= offset
;
1264 jh
= (struct reiserfs_journal_header
*)(journal
->j_header_bh
->
1266 jh
->j_last_flush_trans_id
= cpu_to_le32(trans_id
);
1267 jh
->j_first_unflushed_offset
= cpu_to_le32(offset
);
1268 jh
->j_mount_id
= cpu_to_le32(journal
->j_mount_id
);
1270 set_buffer_dirty(journal
->j_header_bh
);
1271 depth
= reiserfs_write_unlock_nested(sb
);
1273 if (reiserfs_barrier_flush(sb
))
1274 __sync_dirty_buffer(journal
->j_header_bh
, WRITE_FLUSH_FUA
);
1276 sync_dirty_buffer(journal
->j_header_bh
);
1278 reiserfs_write_lock_nested(sb
, depth
);
1279 if (!buffer_uptodate(journal
->j_header_bh
)) {
1280 reiserfs_warning(sb
, "journal-837",
1281 "IO error during journal replay");
1288 static int update_journal_header_block(struct super_block
*sb
,
1289 unsigned long offset
,
1290 unsigned int trans_id
)
1292 return _update_journal_header_block(sb
, offset
, trans_id
);
1296 ** flush any and all journal lists older than you are
1297 ** can only be called from flush_journal_list
1299 static int flush_older_journal_lists(struct super_block
*sb
,
1300 struct reiserfs_journal_list
*jl
)
1302 struct list_head
*entry
;
1303 struct reiserfs_journal_list
*other_jl
;
1304 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1305 unsigned int trans_id
= jl
->j_trans_id
;
1308 * we know we are the only ones flushing things, no extra race
1309 * protection is required.
1312 entry
= journal
->j_journal_list
.next
;
1314 if (entry
== &journal
->j_journal_list
)
1316 other_jl
= JOURNAL_LIST_ENTRY(entry
);
1317 if (other_jl
->j_trans_id
< trans_id
) {
1318 BUG_ON(other_jl
->j_refcount
<= 0);
1319 /* do not flush all */
1320 flush_journal_list(sb
, other_jl
, 0);
1322 /* other_jl is now deleted from the list */
1328 static void del_from_work_list(struct super_block
*s
,
1329 struct reiserfs_journal_list
*jl
)
1331 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1332 if (!list_empty(&jl
->j_working_list
)) {
1333 list_del_init(&jl
->j_working_list
);
1334 journal
->j_num_work_lists
--;
1339 * flush a journal list, both commit and real blocks
1341 * always set flushall to 1, unless you are calling from inside
1342 * flush_journal_list
1344 * IMPORTANT. This can only be called while there are no journal writers,
1345 * and the journal is locked. That means it can only be called from
1346 * do_journal_end, or by journal_release
1348 static int flush_journal_list(struct super_block
*s
,
1349 struct reiserfs_journal_list
*jl
, int flushall
)
1351 struct reiserfs_journal_list
*pjl
;
1352 struct reiserfs_journal_cnode
*cn
, *last
;
1356 struct buffer_head
*saved_bh
;
1357 unsigned long j_len_saved
= jl
->j_len
;
1358 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1362 BUG_ON(j_len_saved
<= 0);
1364 if (atomic_read(&journal
->j_wcount
) != 0) {
1365 reiserfs_warning(s
, "clm-2048", "called with wcount %d",
1366 atomic_read(&journal
->j_wcount
));
1369 /* if flushall == 0, the lock is already held */
1371 reiserfs_mutex_lock_safe(&journal
->j_flush_mutex
, s
);
1372 } else if (mutex_trylock(&journal
->j_flush_mutex
)) {
1377 if (j_len_saved
> journal
->j_trans_max
) {
1378 reiserfs_panic(s
, "journal-715", "length is %lu, trans id %lu",
1379 j_len_saved
, jl
->j_trans_id
);
1383 /* if all the work is already done, get out of here */
1384 if (atomic_read(&jl
->j_nonzerolen
) <= 0 &&
1385 atomic_read(&jl
->j_commit_left
) <= 0) {
1386 goto flush_older_and_return
;
1390 * start by putting the commit list on disk. This will also flush
1391 * the commit lists of any olders transactions
1393 flush_commit_list(s
, jl
, 1);
1395 if (!(jl
->j_state
& LIST_DIRTY
)
1396 && !reiserfs_is_journal_aborted(journal
))
1399 /* are we done now? */
1400 if (atomic_read(&jl
->j_nonzerolen
) <= 0 &&
1401 atomic_read(&jl
->j_commit_left
) <= 0) {
1402 goto flush_older_and_return
;
1406 * loop through each cnode, see if we need to write it,
1407 * or wait on a more recent transaction, or just ignore it
1409 if (atomic_read(&journal
->j_wcount
) != 0) {
1410 reiserfs_panic(s
, "journal-844", "journal list is flushing, "
1413 cn
= jl
->j_realblock
;
1418 /* blocknr of 0 is no longer in the hash, ignore it */
1419 if (cn
->blocknr
== 0) {
1424 * This transaction failed commit.
1425 * Don't write out to the disk
1427 if (!(jl
->j_state
& LIST_DIRTY
))
1430 pjl
= find_newer_jl_for_cn(cn
);
1432 * the order is important here. We check pjl to make sure we
1433 * don't clear BH_JDirty_wait if we aren't the one writing this
1436 if (!pjl
&& cn
->bh
) {
1440 * we do this to make sure nobody releases the
1441 * buffer while we are working with it
1445 if (buffer_journal_dirty(saved_bh
)) {
1446 BUG_ON(!can_dirty(cn
));
1449 } else if (can_dirty(cn
)) {
1451 * everything with !pjl && jwait
1452 * should be writable
1459 * if someone has this block in a newer transaction, just make
1460 * sure they are committed, and don't try writing it to disk
1463 if (atomic_read(&pjl
->j_commit_left
))
1464 flush_commit_list(s
, pjl
, 1);
1469 * bh == NULL when the block got to disk on its own, OR,
1470 * the block got freed in a future transaction
1472 if (saved_bh
== NULL
) {
1477 * this should never happen. kupdate_one_transaction has
1478 * this list locked while it works, so we should never see a
1479 * buffer here that is not marked JDirty_wait
1481 if ((!was_jwait
) && !buffer_locked(saved_bh
)) {
1482 reiserfs_warning(s
, "journal-813",
1483 "BAD! buffer %llu %cdirty %cjwait, "
1484 "not in a newer tranasction",
1485 (unsigned long long)saved_bh
->
1486 b_blocknr
, was_dirty
? ' ' : '!',
1487 was_jwait
? ' ' : '!');
1491 * we inc again because saved_bh gets decremented
1495 set_bit(BLOCK_NEEDS_FLUSH
, &cn
->state
);
1496 lock_buffer(saved_bh
);
1497 BUG_ON(cn
->blocknr
!= saved_bh
->b_blocknr
);
1498 if (buffer_dirty(saved_bh
))
1499 submit_logged_buffer(saved_bh
);
1501 unlock_buffer(saved_bh
);
1504 reiserfs_warning(s
, "clm-2082",
1505 "Unable to flush buffer %llu in %s",
1506 (unsigned long long)saved_bh
->
1507 b_blocknr
, __func__
);
1514 * we incremented this to keep others from
1515 * taking the buffer head away
1518 if (atomic_read(&saved_bh
->b_count
) < 0) {
1519 reiserfs_warning(s
, "journal-945",
1520 "saved_bh->b_count < 0");
1525 cn
= jl
->j_realblock
;
1527 if (test_bit(BLOCK_NEEDS_FLUSH
, &cn
->state
)) {
1529 reiserfs_panic(s
, "journal-1011",
1533 depth
= reiserfs_write_unlock_nested(s
);
1534 __wait_on_buffer(cn
->bh
);
1535 reiserfs_write_lock_nested(s
, depth
);
1538 reiserfs_panic(s
, "journal-1012",
1541 if (unlikely(!buffer_uptodate(cn
->bh
))) {
1542 #ifdef CONFIG_REISERFS_CHECK
1543 reiserfs_warning(s
, "journal-949",
1544 "buffer write failed");
1549 * note, we must clear the JDirty_wait bit
1550 * after the up to date check, otherwise we
1551 * race against our flushpage routine
1553 BUG_ON(!test_clear_buffer_journal_dirty
1556 /* drop one ref for us */
1558 /* drop one ref for journal_mark_dirty */
1559 release_buffer_page(cn
->bh
);
1566 reiserfs_abort(s
, -EIO
,
1567 "Write error while pushing transaction to disk in %s",
1569 flush_older_and_return
:
1572 * before we can update the journal header block, we _must_ flush all
1573 * real blocks from all older transactions to disk. This is because
1574 * once the header block is updated, this transaction will not be
1575 * replayed after a crash
1578 flush_older_journal_lists(s
, jl
);
1581 err
= journal
->j_errno
;
1583 * before we can remove everything from the hash tables for this
1584 * transaction, we must make sure it can never be replayed
1586 * since we are only called from do_journal_end, we know for sure there
1587 * are no allocations going on while we are flushing journal lists. So,
1588 * we only need to update the journal header block for the last list
1591 if (!err
&& flushall
) {
1593 update_journal_header_block(s
,
1594 (jl
->j_start
+ jl
->j_len
+
1595 2) % SB_ONDISK_JOURNAL_SIZE(s
),
1598 reiserfs_abort(s
, -EIO
,
1599 "Write error while updating journal header in %s",
1602 remove_all_from_journal_list(s
, jl
, 0);
1603 list_del_init(&jl
->j_list
);
1604 journal
->j_num_lists
--;
1605 del_from_work_list(s
, jl
);
1607 if (journal
->j_last_flush_id
!= 0 &&
1608 (jl
->j_trans_id
- journal
->j_last_flush_id
) != 1) {
1609 reiserfs_warning(s
, "clm-2201", "last flush %lu, current %lu",
1610 journal
->j_last_flush_id
, jl
->j_trans_id
);
1612 journal
->j_last_flush_id
= jl
->j_trans_id
;
1615 * not strictly required since we are freeing the list, but it should
1616 * help find code using dead lists later on
1619 atomic_set(&jl
->j_nonzerolen
, 0);
1621 jl
->j_realblock
= NULL
;
1622 jl
->j_commit_bh
= NULL
;
1625 put_journal_list(s
, jl
);
1627 mutex_unlock(&journal
->j_flush_mutex
);
1631 static int write_one_transaction(struct super_block
*s
,
1632 struct reiserfs_journal_list
*jl
,
1633 struct buffer_chunk
*chunk
)
1635 struct reiserfs_journal_cnode
*cn
;
1638 jl
->j_state
|= LIST_TOUCHED
;
1639 del_from_work_list(s
, jl
);
1640 if (jl
->j_len
== 0 || atomic_read(&jl
->j_nonzerolen
) == 0) {
1644 cn
= jl
->j_realblock
;
1647 * if the blocknr == 0, this has been cleared from the hash,
1650 if (cn
->blocknr
== 0) {
1653 if (cn
->bh
&& can_dirty(cn
) && buffer_dirty(cn
->bh
)) {
1654 struct buffer_head
*tmp_bh
;
1656 * we can race against journal_mark_freed when we try
1657 * to lock_buffer(cn->bh), so we have to inc the buffer
1658 * count, and recheck things after locking
1662 lock_buffer(tmp_bh
);
1663 if (cn
->bh
&& can_dirty(cn
) && buffer_dirty(tmp_bh
)) {
1664 if (!buffer_journal_dirty(tmp_bh
) ||
1665 buffer_journal_prepared(tmp_bh
))
1667 add_to_chunk(chunk
, tmp_bh
, NULL
, write_chunk
);
1670 /* note, cn->bh might be null now */
1671 unlock_buffer(tmp_bh
);
1682 /* used by flush_commit_list */
1683 static int dirty_one_transaction(struct super_block
*s
,
1684 struct reiserfs_journal_list
*jl
)
1686 struct reiserfs_journal_cnode
*cn
;
1687 struct reiserfs_journal_list
*pjl
;
1690 jl
->j_state
|= LIST_DIRTY
;
1691 cn
= jl
->j_realblock
;
1694 * look for a more recent transaction that logged this
1695 * buffer. Only the most recent transaction with a buffer in
1696 * it is allowed to send that buffer to disk
1698 pjl
= find_newer_jl_for_cn(cn
);
1699 if (!pjl
&& cn
->blocknr
&& cn
->bh
1700 && buffer_journal_dirty(cn
->bh
)) {
1701 BUG_ON(!can_dirty(cn
));
1703 * if the buffer is prepared, it will either be logged
1704 * or restored. If restored, we need to make sure
1705 * it actually gets marked dirty
1707 clear_buffer_journal_new(cn
->bh
);
1708 if (buffer_journal_prepared(cn
->bh
)) {
1709 set_buffer_journal_restore_dirty(cn
->bh
);
1711 set_buffer_journal_test(cn
->bh
);
1712 mark_buffer_dirty(cn
->bh
);
1720 static int kupdate_transactions(struct super_block
*s
,
1721 struct reiserfs_journal_list
*jl
,
1722 struct reiserfs_journal_list
**next_jl
,
1723 unsigned int *next_trans_id
,
1724 int num_blocks
, int num_trans
)
1728 int transactions_flushed
= 0;
1729 unsigned int orig_trans_id
= jl
->j_trans_id
;
1730 struct buffer_chunk chunk
;
1731 struct list_head
*entry
;
1732 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1735 reiserfs_mutex_lock_safe(&journal
->j_flush_mutex
, s
);
1736 if (!journal_list_still_alive(s
, orig_trans_id
)) {
1741 * we've got j_flush_mutex held, nobody is going to delete any
1742 * of these lists out from underneath us
1744 while ((num_trans
&& transactions_flushed
< num_trans
) ||
1745 (!num_trans
&& written
< num_blocks
)) {
1747 if (jl
->j_len
== 0 || (jl
->j_state
& LIST_TOUCHED
) ||
1748 atomic_read(&jl
->j_commit_left
)
1749 || !(jl
->j_state
& LIST_DIRTY
)) {
1750 del_from_work_list(s
, jl
);
1753 ret
= write_one_transaction(s
, jl
, &chunk
);
1757 transactions_flushed
++;
1759 entry
= jl
->j_list
.next
;
1762 if (entry
== &journal
->j_journal_list
) {
1765 jl
= JOURNAL_LIST_ENTRY(entry
);
1767 /* don't bother with older transactions */
1768 if (jl
->j_trans_id
<= orig_trans_id
)
1772 write_chunk(&chunk
);
1776 mutex_unlock(&journal
->j_flush_mutex
);
1781 * for o_sync and fsync heavy applications, they tend to use
1782 * all the journa list slots with tiny transactions. These
1783 * trigger lots and lots of calls to update the header block, which
1784 * adds seeks and slows things down.
1786 * This function tries to clear out a large chunk of the journal lists
1787 * at once, which makes everything faster since only the newest journal
1788 * list updates the header block
1790 static int flush_used_journal_lists(struct super_block
*s
,
1791 struct reiserfs_journal_list
*jl
)
1793 unsigned long len
= 0;
1794 unsigned long cur_len
;
1798 struct reiserfs_journal_list
*tjl
;
1799 struct reiserfs_journal_list
*flush_jl
;
1800 unsigned int trans_id
;
1801 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1803 flush_jl
= tjl
= jl
;
1805 /* in data logging mode, try harder to flush a lot of blocks */
1806 if (reiserfs_data_log(s
))
1808 /* flush for 256 transactions or limit blocks, whichever comes first */
1809 for (i
= 0; i
< 256 && len
< limit
; i
++) {
1810 if (atomic_read(&tjl
->j_commit_left
) ||
1811 tjl
->j_trans_id
< jl
->j_trans_id
) {
1814 cur_len
= atomic_read(&tjl
->j_nonzerolen
);
1816 tjl
->j_state
&= ~LIST_TOUCHED
;
1820 if (tjl
->j_list
.next
== &journal
->j_journal_list
)
1822 tjl
= JOURNAL_LIST_ENTRY(tjl
->j_list
.next
);
1824 get_journal_list(jl
);
1825 get_journal_list(flush_jl
);
1828 * try to find a group of blocks we can flush across all the
1829 * transactions, but only bother if we've actually spanned
1830 * across multiple lists
1832 if (flush_jl
!= jl
) {
1833 ret
= kupdate_transactions(s
, jl
, &tjl
, &trans_id
, len
, i
);
1835 flush_journal_list(s
, flush_jl
, 1);
1836 put_journal_list(s
, flush_jl
);
1837 put_journal_list(s
, jl
);
1842 * removes any nodes in table with name block and dev as bh.
1843 * only touchs the hnext and hprev pointers.
1845 void remove_journal_hash(struct super_block
*sb
,
1846 struct reiserfs_journal_cnode
**table
,
1847 struct reiserfs_journal_list
*jl
,
1848 unsigned long block
, int remove_freed
)
1850 struct reiserfs_journal_cnode
*cur
;
1851 struct reiserfs_journal_cnode
**head
;
1853 head
= &(journal_hash(table
, sb
, block
));
1859 if (cur
->blocknr
== block
&& cur
->sb
== sb
1860 && (jl
== NULL
|| jl
== cur
->jlist
)
1861 && (!test_bit(BLOCK_FREED
, &cur
->state
) || remove_freed
)) {
1863 cur
->hnext
->hprev
= cur
->hprev
;
1866 cur
->hprev
->hnext
= cur
->hnext
;
1874 * anybody who clears the cur->bh will also
1875 * dec the nonzerolen
1877 if (cur
->bh
&& cur
->jlist
)
1878 atomic_dec(&cur
->jlist
->j_nonzerolen
);
1886 static void free_journal_ram(struct super_block
*sb
)
1888 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1889 kfree(journal
->j_current_jl
);
1890 journal
->j_num_lists
--;
1892 vfree(journal
->j_cnode_free_orig
);
1893 free_list_bitmaps(sb
, journal
->j_list_bitmap
);
1894 free_bitmap_nodes(sb
); /* must be after free_list_bitmaps */
1895 if (journal
->j_header_bh
) {
1896 brelse(journal
->j_header_bh
);
1899 * j_header_bh is on the journal dev, make sure
1900 * not to release the journal dev until we brelse j_header_bh
1902 release_journal_dev(sb
, journal
);
1907 * call on unmount. Only set error to 1 if you haven't made your way out
1908 * of read_super() yet. Any other caller must keep error at 0.
1910 static int do_journal_release(struct reiserfs_transaction_handle
*th
,
1911 struct super_block
*sb
, int error
)
1913 struct reiserfs_transaction_handle myth
;
1915 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1918 * we only want to flush out transactions if we were
1919 * called with error == 0
1921 if (!error
&& !(sb
->s_flags
& MS_RDONLY
)) {
1922 /* end the current trans */
1923 BUG_ON(!th
->t_trans_id
);
1924 do_journal_end(th
, FLUSH_ALL
);
1927 * make sure something gets logged to force
1928 * our way into the flush code
1930 if (!journal_join(&myth
, sb
)) {
1931 reiserfs_prepare_for_journal(sb
,
1932 SB_BUFFER_WITH_SB(sb
),
1934 journal_mark_dirty(&myth
, SB_BUFFER_WITH_SB(sb
));
1935 do_journal_end(&myth
, FLUSH_ALL
);
1940 /* this also catches errors during the do_journal_end above */
1941 if (!error
&& reiserfs_is_journal_aborted(journal
)) {
1942 memset(&myth
, 0, sizeof(myth
));
1943 if (!journal_join_abort(&myth
, sb
)) {
1944 reiserfs_prepare_for_journal(sb
,
1945 SB_BUFFER_WITH_SB(sb
),
1947 journal_mark_dirty(&myth
, SB_BUFFER_WITH_SB(sb
));
1948 do_journal_end(&myth
, FLUSH_ALL
);
1954 * We must release the write lock here because
1955 * the workqueue job (flush_async_commit) needs this lock
1957 reiserfs_write_unlock(sb
);
1960 * Cancel flushing of old commits. Note that neither of these works
1961 * will be requeued because superblock is being shutdown and doesn't
1962 * have MS_ACTIVE set.
1964 cancel_delayed_work_sync(&REISERFS_SB(sb
)->old_work
);
1965 /* wait for all commits to finish */
1966 cancel_delayed_work_sync(&SB_JOURNAL(sb
)->j_work
);
1968 free_journal_ram(sb
);
1970 reiserfs_write_lock(sb
);
1975 /* * call on unmount. flush all journal trans, release all alloc'd ram */
1976 int journal_release(struct reiserfs_transaction_handle
*th
,
1977 struct super_block
*sb
)
1979 return do_journal_release(th
, sb
, 0);
1982 /* only call from an error condition inside reiserfs_read_super! */
1983 int journal_release_error(struct reiserfs_transaction_handle
*th
,
1984 struct super_block
*sb
)
1986 return do_journal_release(th
, sb
, 1);
1990 * compares description block with commit block.
1991 * returns 1 if they differ, 0 if they are the same
1993 static int journal_compare_desc_commit(struct super_block
*sb
,
1994 struct reiserfs_journal_desc
*desc
,
1995 struct reiserfs_journal_commit
*commit
)
1997 if (get_commit_trans_id(commit
) != get_desc_trans_id(desc
) ||
1998 get_commit_trans_len(commit
) != get_desc_trans_len(desc
) ||
1999 get_commit_trans_len(commit
) > SB_JOURNAL(sb
)->j_trans_max
||
2000 get_commit_trans_len(commit
) <= 0) {
2007 * returns 0 if it did not find a description block
2008 * returns -1 if it found a corrupt commit block
2009 * returns 1 if both desc and commit were valid
2010 * NOTE: only called during fs mount
2012 static int journal_transaction_is_valid(struct super_block
*sb
,
2013 struct buffer_head
*d_bh
,
2014 unsigned int *oldest_invalid_trans_id
,
2015 unsigned long *newest_mount_id
)
2017 struct reiserfs_journal_desc
*desc
;
2018 struct reiserfs_journal_commit
*commit
;
2019 struct buffer_head
*c_bh
;
2020 unsigned long offset
;
2025 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2026 if (get_desc_trans_len(desc
) > 0
2027 && !memcmp(get_journal_desc_magic(d_bh
), JOURNAL_DESC_MAGIC
, 8)) {
2028 if (oldest_invalid_trans_id
&& *oldest_invalid_trans_id
2029 && get_desc_trans_id(desc
) > *oldest_invalid_trans_id
) {
2030 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2031 "journal-986: transaction "
2032 "is valid returning because trans_id %d is greater than "
2033 "oldest_invalid %lu",
2034 get_desc_trans_id(desc
),
2035 *oldest_invalid_trans_id
);
2039 && *newest_mount_id
> get_desc_mount_id(desc
)) {
2040 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2041 "journal-1087: transaction "
2042 "is valid returning because mount_id %d is less than "
2043 "newest_mount_id %lu",
2044 get_desc_mount_id(desc
),
2048 if (get_desc_trans_len(desc
) > SB_JOURNAL(sb
)->j_trans_max
) {
2049 reiserfs_warning(sb
, "journal-2018",
2050 "Bad transaction length %d "
2051 "encountered, ignoring transaction",
2052 get_desc_trans_len(desc
));
2055 offset
= d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2058 * ok, we have a journal description block,
2059 * let's see if the transaction was valid
2063 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2064 ((offset
+ get_desc_trans_len(desc
) +
2065 1) % SB_ONDISK_JOURNAL_SIZE(sb
)));
2068 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
2069 if (journal_compare_desc_commit(sb
, desc
, commit
)) {
2070 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2071 "journal_transaction_is_valid, commit offset %ld had bad "
2072 "time %d or length %d",
2074 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2075 get_commit_trans_id(commit
),
2076 get_commit_trans_len(commit
));
2078 if (oldest_invalid_trans_id
) {
2079 *oldest_invalid_trans_id
=
2080 get_desc_trans_id(desc
);
2081 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2083 "transaction_is_valid setting oldest invalid trans_id "
2085 get_desc_trans_id(desc
));
2090 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2091 "journal-1006: found valid "
2092 "transaction start offset %llu, len %d id %d",
2094 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2095 get_desc_trans_len(desc
),
2096 get_desc_trans_id(desc
));
2103 static void brelse_array(struct buffer_head
**heads
, int num
)
2106 for (i
= 0; i
< num
; i
++) {
2112 * given the start, and values for the oldest acceptable transactions,
2113 * this either reads in a replays a transaction, or returns because the
2114 * transaction is invalid, or too old.
2115 * NOTE: only called during fs mount
2117 static int journal_read_transaction(struct super_block
*sb
,
2118 unsigned long cur_dblock
,
2119 unsigned long oldest_start
,
2120 unsigned int oldest_trans_id
,
2121 unsigned long newest_mount_id
)
2123 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2124 struct reiserfs_journal_desc
*desc
;
2125 struct reiserfs_journal_commit
*commit
;
2126 unsigned int trans_id
= 0;
2127 struct buffer_head
*c_bh
;
2128 struct buffer_head
*d_bh
;
2129 struct buffer_head
**log_blocks
= NULL
;
2130 struct buffer_head
**real_blocks
= NULL
;
2131 unsigned int trans_offset
;
2135 d_bh
= journal_bread(sb
, cur_dblock
);
2138 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2139 trans_offset
= d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2140 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1037: "
2141 "journal_read_transaction, offset %llu, len %d mount_id %d",
2142 d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2143 get_desc_trans_len(desc
), get_desc_mount_id(desc
));
2144 if (get_desc_trans_id(desc
) < oldest_trans_id
) {
2145 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1039: "
2146 "journal_read_trans skipping because %lu is too old",
2148 SB_ONDISK_JOURNAL_1st_BLOCK(sb
));
2152 if (get_desc_mount_id(desc
) != newest_mount_id
) {
2153 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1146: "
2154 "journal_read_trans skipping because %d is != "
2155 "newest_mount_id %lu", get_desc_mount_id(desc
),
2160 c_bh
= journal_bread(sb
, SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2161 ((trans_offset
+ get_desc_trans_len(desc
) + 1) %
2162 SB_ONDISK_JOURNAL_SIZE(sb
)));
2167 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
2168 if (journal_compare_desc_commit(sb
, desc
, commit
)) {
2169 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2170 "journal_read_transaction, "
2171 "commit offset %llu had bad time %d or length %d",
2173 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2174 get_commit_trans_id(commit
),
2175 get_commit_trans_len(commit
));
2181 if (bdev_read_only(sb
->s_bdev
)) {
2182 reiserfs_warning(sb
, "clm-2076",
2183 "device is readonly, unable to replay log");
2189 trans_id
= get_desc_trans_id(desc
);
2191 * now we know we've got a good transaction, and it was
2192 * inside the valid time ranges
2194 log_blocks
= kmalloc(get_desc_trans_len(desc
) *
2195 sizeof(struct buffer_head
*), GFP_NOFS
);
2196 real_blocks
= kmalloc(get_desc_trans_len(desc
) *
2197 sizeof(struct buffer_head
*), GFP_NOFS
);
2198 if (!log_blocks
|| !real_blocks
) {
2203 reiserfs_warning(sb
, "journal-1169",
2204 "kmalloc failed, unable to mount FS");
2207 /* get all the buffer heads */
2208 trans_half
= journal_trans_half(sb
->s_blocksize
);
2209 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2212 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2214 i
) % SB_ONDISK_JOURNAL_SIZE(sb
));
2215 if (i
< trans_half
) {
2218 le32_to_cpu(desc
->j_realblock
[i
]));
2222 le32_to_cpu(commit
->
2223 j_realblock
[i
- trans_half
]));
2225 if (real_blocks
[i
]->b_blocknr
> SB_BLOCK_COUNT(sb
)) {
2226 reiserfs_warning(sb
, "journal-1207",
2227 "REPLAY FAILURE fsck required! "
2228 "Block to replay is outside of "
2232 /* make sure we don't try to replay onto log or reserved area */
2233 if (is_block_in_log_or_reserved_area
2234 (sb
, real_blocks
[i
]->b_blocknr
)) {
2235 reiserfs_warning(sb
, "journal-1204",
2236 "REPLAY FAILURE fsck required! "
2237 "Trying to replay onto a log block");
2239 brelse_array(log_blocks
, i
);
2240 brelse_array(real_blocks
, i
);
2248 /* read in the log blocks, memcpy to the corresponding real block */
2249 ll_rw_block(READ
, get_desc_trans_len(desc
), log_blocks
);
2250 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2252 wait_on_buffer(log_blocks
[i
]);
2253 if (!buffer_uptodate(log_blocks
[i
])) {
2254 reiserfs_warning(sb
, "journal-1212",
2255 "REPLAY FAILURE fsck required! "
2256 "buffer write failed");
2257 brelse_array(log_blocks
+ i
,
2258 get_desc_trans_len(desc
) - i
);
2259 brelse_array(real_blocks
, get_desc_trans_len(desc
));
2266 memcpy(real_blocks
[i
]->b_data
, log_blocks
[i
]->b_data
,
2267 real_blocks
[i
]->b_size
);
2268 set_buffer_uptodate(real_blocks
[i
]);
2269 brelse(log_blocks
[i
]);
2271 /* flush out the real blocks */
2272 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2273 set_buffer_dirty(real_blocks
[i
]);
2274 write_dirty_buffer(real_blocks
[i
], WRITE
);
2276 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2277 wait_on_buffer(real_blocks
[i
]);
2278 if (!buffer_uptodate(real_blocks
[i
])) {
2279 reiserfs_warning(sb
, "journal-1226",
2280 "REPLAY FAILURE, fsck required! "
2281 "buffer write failed");
2282 brelse_array(real_blocks
+ i
,
2283 get_desc_trans_len(desc
) - i
);
2290 brelse(real_blocks
[i
]);
2293 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2294 ((trans_offset
+ get_desc_trans_len(desc
) +
2295 2) % SB_ONDISK_JOURNAL_SIZE(sb
));
2296 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2297 "journal-1095: setting journal " "start to offset %ld",
2298 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
));
2301 * init starting values for the first transaction, in case
2302 * this is the last transaction to be replayed.
2304 journal
->j_start
= cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2305 journal
->j_last_flush_trans_id
= trans_id
;
2306 journal
->j_trans_id
= trans_id
+ 1;
2307 /* check for trans_id overflow */
2308 if (journal
->j_trans_id
== 0)
2309 journal
->j_trans_id
= 10;
2318 * This function reads blocks starting from block and to max_block of bufsize
2319 * size (but no more than BUFNR blocks at a time). This proved to improve
2320 * mounting speed on self-rebuilding raid5 arrays at least.
2321 * Right now it is only used from journal code. But later we might use it
2322 * from other places.
2323 * Note: Do not use journal_getblk/sb_getblk functions here!
2325 static struct buffer_head
*reiserfs_breada(struct block_device
*dev
,
2326 b_blocknr_t block
, int bufsize
,
2327 b_blocknr_t max_block
)
2329 struct buffer_head
*bhlist
[BUFNR
];
2330 unsigned int blocks
= BUFNR
;
2331 struct buffer_head
*bh
;
2334 bh
= __getblk(dev
, block
, bufsize
);
2335 if (buffer_uptodate(bh
))
2338 if (block
+ BUFNR
> max_block
) {
2339 blocks
= max_block
- block
;
2343 for (i
= 1; i
< blocks
; i
++) {
2344 bh
= __getblk(dev
, block
+ i
, bufsize
);
2345 if (buffer_uptodate(bh
)) {
2351 ll_rw_block(READ
, j
, bhlist
);
2352 for (i
= 1; i
< j
; i
++)
2356 if (buffer_uptodate(bh
))
2363 * read and replay the log
2364 * on a clean unmount, the journal header's next unflushed pointer will be
2365 * to an invalid transaction. This tests that before finding all the
2366 * transactions in the log, which makes normal mount times fast.
2368 * After a crash, this starts with the next unflushed transaction, and
2369 * replays until it finds one too old, or invalid.
2371 * On exit, it sets things up so the first transaction will work correctly.
2372 * NOTE: only called during fs mount
2374 static int journal_read(struct super_block
*sb
)
2376 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2377 struct reiserfs_journal_desc
*desc
;
2378 unsigned int oldest_trans_id
= 0;
2379 unsigned int oldest_invalid_trans_id
= 0;
2381 unsigned long oldest_start
= 0;
2382 unsigned long cur_dblock
= 0;
2383 unsigned long newest_mount_id
= 9;
2384 struct buffer_head
*d_bh
;
2385 struct reiserfs_journal_header
*jh
;
2386 int valid_journal_header
= 0;
2387 int replay_count
= 0;
2388 int continue_replay
= 1;
2390 char b
[BDEVNAME_SIZE
];
2392 cur_dblock
= SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2393 reiserfs_info(sb
, "checking transaction log (%s)\n",
2394 bdevname(journal
->j_dev_bd
, b
));
2395 start
= get_seconds();
2398 * step 1, read in the journal header block. Check the transaction
2399 * it says is the first unflushed, and if that transaction is not
2400 * valid, replay is done
2402 journal
->j_header_bh
= journal_bread(sb
,
2403 SB_ONDISK_JOURNAL_1st_BLOCK(sb
)
2404 + SB_ONDISK_JOURNAL_SIZE(sb
));
2405 if (!journal
->j_header_bh
) {
2408 jh
= (struct reiserfs_journal_header
*)(journal
->j_header_bh
->b_data
);
2409 if (le32_to_cpu(jh
->j_first_unflushed_offset
) <
2410 SB_ONDISK_JOURNAL_SIZE(sb
)
2411 && le32_to_cpu(jh
->j_last_flush_trans_id
) > 0) {
2413 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2414 le32_to_cpu(jh
->j_first_unflushed_offset
);
2415 oldest_trans_id
= le32_to_cpu(jh
->j_last_flush_trans_id
) + 1;
2416 newest_mount_id
= le32_to_cpu(jh
->j_mount_id
);
2417 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2418 "journal-1153: found in "
2419 "header: first_unflushed_offset %d, last_flushed_trans_id "
2420 "%lu", le32_to_cpu(jh
->j_first_unflushed_offset
),
2421 le32_to_cpu(jh
->j_last_flush_trans_id
));
2422 valid_journal_header
= 1;
2425 * now, we try to read the first unflushed offset. If it
2426 * is not valid, there is nothing more we can do, and it
2427 * makes no sense to read through the whole log.
2431 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2432 le32_to_cpu(jh
->j_first_unflushed_offset
));
2433 ret
= journal_transaction_is_valid(sb
, d_bh
, NULL
, NULL
);
2435 continue_replay
= 0;
2438 goto start_log_replay
;
2442 * ok, there are transactions that need to be replayed. start
2443 * with the first log block, find all the valid transactions, and
2444 * pick out the oldest.
2446 while (continue_replay
2448 (SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2449 SB_ONDISK_JOURNAL_SIZE(sb
))) {
2451 * Note that it is required for blocksize of primary fs
2452 * device and journal device to be the same
2455 reiserfs_breada(journal
->j_dev_bd
, cur_dblock
,
2457 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2458 SB_ONDISK_JOURNAL_SIZE(sb
));
2460 journal_transaction_is_valid(sb
, d_bh
,
2461 &oldest_invalid_trans_id
,
2464 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2465 if (oldest_start
== 0) { /* init all oldest_ values */
2466 oldest_trans_id
= get_desc_trans_id(desc
);
2467 oldest_start
= d_bh
->b_blocknr
;
2468 newest_mount_id
= get_desc_mount_id(desc
);
2469 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2470 "journal-1179: Setting "
2471 "oldest_start to offset %llu, trans_id %lu",
2473 SB_ONDISK_JOURNAL_1st_BLOCK
2474 (sb
), oldest_trans_id
);
2475 } else if (oldest_trans_id
> get_desc_trans_id(desc
)) {
2476 /* one we just read was older */
2477 oldest_trans_id
= get_desc_trans_id(desc
);
2478 oldest_start
= d_bh
->b_blocknr
;
2479 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2480 "journal-1180: Resetting "
2481 "oldest_start to offset %lu, trans_id %lu",
2483 SB_ONDISK_JOURNAL_1st_BLOCK
2484 (sb
), oldest_trans_id
);
2486 if (newest_mount_id
< get_desc_mount_id(desc
)) {
2487 newest_mount_id
= get_desc_mount_id(desc
);
2488 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2489 "journal-1299: Setting "
2490 "newest_mount_id to %d",
2491 get_desc_mount_id(desc
));
2493 cur_dblock
+= get_desc_trans_len(desc
) + 2;
2501 cur_dblock
= oldest_start
;
2502 if (oldest_trans_id
) {
2503 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2504 "journal-1206: Starting replay "
2505 "from offset %llu, trans_id %lu",
2506 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2511 while (continue_replay
&& oldest_trans_id
> 0) {
2513 journal_read_transaction(sb
, cur_dblock
, oldest_start
,
2514 oldest_trans_id
, newest_mount_id
);
2517 } else if (ret
!= 0) {
2521 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) + journal
->j_start
;
2523 if (cur_dblock
== oldest_start
)
2527 if (oldest_trans_id
== 0) {
2528 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2529 "journal-1225: No valid " "transactions found");
2532 * j_start does not get set correctly if we don't replay any
2533 * transactions. if we had a valid journal_header, set j_start
2534 * to the first unflushed transaction value, copy the trans_id
2537 if (valid_journal_header
&& replay_count
== 0) {
2538 journal
->j_start
= le32_to_cpu(jh
->j_first_unflushed_offset
);
2539 journal
->j_trans_id
=
2540 le32_to_cpu(jh
->j_last_flush_trans_id
) + 1;
2541 /* check for trans_id overflow */
2542 if (journal
->j_trans_id
== 0)
2543 journal
->j_trans_id
= 10;
2544 journal
->j_last_flush_trans_id
=
2545 le32_to_cpu(jh
->j_last_flush_trans_id
);
2546 journal
->j_mount_id
= le32_to_cpu(jh
->j_mount_id
) + 1;
2548 journal
->j_mount_id
= newest_mount_id
+ 1;
2550 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1299: Setting "
2551 "newest_mount_id to %lu", journal
->j_mount_id
);
2552 journal
->j_first_unflushed_offset
= journal
->j_start
;
2553 if (replay_count
> 0) {
2555 "replayed %d transactions in %lu seconds\n",
2556 replay_count
, get_seconds() - start
);
2558 /* needed to satisfy the locking in _update_journal_header_block */
2559 reiserfs_write_lock(sb
);
2560 if (!bdev_read_only(sb
->s_bdev
) &&
2561 _update_journal_header_block(sb
, journal
->j_start
,
2562 journal
->j_last_flush_trans_id
)) {
2563 reiserfs_write_unlock(sb
);
2565 * replay failed, caller must call free_journal_ram and abort
2570 reiserfs_write_unlock(sb
);
2574 static struct reiserfs_journal_list
*alloc_journal_list(struct super_block
*s
)
2576 struct reiserfs_journal_list
*jl
;
2577 jl
= kzalloc(sizeof(struct reiserfs_journal_list
),
2578 GFP_NOFS
| __GFP_NOFAIL
);
2579 INIT_LIST_HEAD(&jl
->j_list
);
2580 INIT_LIST_HEAD(&jl
->j_working_list
);
2581 INIT_LIST_HEAD(&jl
->j_tail_bh_list
);
2582 INIT_LIST_HEAD(&jl
->j_bh_list
);
2583 mutex_init(&jl
->j_commit_mutex
);
2584 SB_JOURNAL(s
)->j_num_lists
++;
2585 get_journal_list(jl
);
2589 static void journal_list_init(struct super_block
*sb
)
2591 SB_JOURNAL(sb
)->j_current_jl
= alloc_journal_list(sb
);
2594 static void release_journal_dev(struct super_block
*super
,
2595 struct reiserfs_journal
*journal
)
2597 if (journal
->j_dev_bd
!= NULL
) {
2598 blkdev_put(journal
->j_dev_bd
, journal
->j_dev_mode
);
2599 journal
->j_dev_bd
= NULL
;
2603 static int journal_init_dev(struct super_block
*super
,
2604 struct reiserfs_journal
*journal
,
2605 const char *jdev_name
)
2609 fmode_t blkdev_mode
= FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
;
2610 char b
[BDEVNAME_SIZE
];
2614 journal
->j_dev_bd
= NULL
;
2615 jdev
= SB_ONDISK_JOURNAL_DEVICE(super
) ?
2616 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super
)) : super
->s_dev
;
2618 if (bdev_read_only(super
->s_bdev
))
2619 blkdev_mode
= FMODE_READ
;
2621 /* there is no "jdev" option and journal is on separate device */
2622 if ((!jdev_name
|| !jdev_name
[0])) {
2623 if (jdev
== super
->s_dev
)
2624 blkdev_mode
&= ~FMODE_EXCL
;
2625 journal
->j_dev_bd
= blkdev_get_by_dev(jdev
, blkdev_mode
,
2627 journal
->j_dev_mode
= blkdev_mode
;
2628 if (IS_ERR(journal
->j_dev_bd
)) {
2629 result
= PTR_ERR(journal
->j_dev_bd
);
2630 journal
->j_dev_bd
= NULL
;
2631 reiserfs_warning(super
, "sh-458",
2632 "cannot init journal device '%s': %i",
2633 __bdevname(jdev
, b
), result
);
2635 } else if (jdev
!= super
->s_dev
)
2636 set_blocksize(journal
->j_dev_bd
, super
->s_blocksize
);
2641 journal
->j_dev_mode
= blkdev_mode
;
2642 journal
->j_dev_bd
= blkdev_get_by_path(jdev_name
, blkdev_mode
, journal
);
2643 if (IS_ERR(journal
->j_dev_bd
)) {
2644 result
= PTR_ERR(journal
->j_dev_bd
);
2645 journal
->j_dev_bd
= NULL
;
2646 reiserfs_warning(super
,
2647 "journal_init_dev: Cannot open '%s': %i",
2652 set_blocksize(journal
->j_dev_bd
, super
->s_blocksize
);
2653 reiserfs_info(super
,
2654 "journal_init_dev: journal device: %s\n",
2655 bdevname(journal
->j_dev_bd
, b
));
2660 * When creating/tuning a file system user can assign some
2661 * journal params within boundaries which depend on the ratio
2662 * blocksize/standard_blocksize.
2664 * For blocks >= standard_blocksize transaction size should
2665 * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more
2666 * then JOURNAL_TRANS_MAX_DEFAULT.
2668 * For blocks < standard_blocksize these boundaries should be
2669 * decreased proportionally.
2671 #define REISERFS_STANDARD_BLKSIZE (4096)
2673 static int check_advise_trans_params(struct super_block
*sb
,
2674 struct reiserfs_journal
*journal
)
2676 if (journal
->j_trans_max
) {
2677 /* Non-default journal params. Do sanity check for them. */
2679 if (sb
->s_blocksize
< REISERFS_STANDARD_BLKSIZE
)
2680 ratio
= REISERFS_STANDARD_BLKSIZE
/ sb
->s_blocksize
;
2682 if (journal
->j_trans_max
> JOURNAL_TRANS_MAX_DEFAULT
/ ratio
||
2683 journal
->j_trans_max
< JOURNAL_TRANS_MIN_DEFAULT
/ ratio
||
2684 SB_ONDISK_JOURNAL_SIZE(sb
) / journal
->j_trans_max
<
2685 JOURNAL_MIN_RATIO
) {
2686 reiserfs_warning(sb
, "sh-462",
2687 "bad transaction max size (%u). "
2688 "FSCK?", journal
->j_trans_max
);
2691 if (journal
->j_max_batch
!= (journal
->j_trans_max
) *
2692 JOURNAL_MAX_BATCH_DEFAULT
/JOURNAL_TRANS_MAX_DEFAULT
) {
2693 reiserfs_warning(sb
, "sh-463",
2694 "bad transaction max batch (%u). "
2695 "FSCK?", journal
->j_max_batch
);
2700 * Default journal params.
2701 * The file system was created by old version
2702 * of mkreiserfs, so some fields contain zeros,
2703 * and we need to advise proper values for them
2705 if (sb
->s_blocksize
!= REISERFS_STANDARD_BLKSIZE
) {
2706 reiserfs_warning(sb
, "sh-464", "bad blocksize (%u)",
2710 journal
->j_trans_max
= JOURNAL_TRANS_MAX_DEFAULT
;
2711 journal
->j_max_batch
= JOURNAL_MAX_BATCH_DEFAULT
;
2712 journal
->j_max_commit_age
= JOURNAL_MAX_COMMIT_AGE
;
2717 /* must be called once on fs mount. calls journal_read for you */
2718 int journal_init(struct super_block
*sb
, const char *j_dev_name
,
2719 int old_format
, unsigned int commit_max_age
)
2721 int num_cnodes
= SB_ONDISK_JOURNAL_SIZE(sb
) * 2;
2722 struct buffer_head
*bhjh
;
2723 struct reiserfs_super_block
*rs
;
2724 struct reiserfs_journal_header
*jh
;
2725 struct reiserfs_journal
*journal
;
2726 struct reiserfs_journal_list
*jl
;
2727 char b
[BDEVNAME_SIZE
];
2730 journal
= SB_JOURNAL(sb
) = vzalloc(sizeof(struct reiserfs_journal
));
2732 reiserfs_warning(sb
, "journal-1256",
2733 "unable to get memory for journal structure");
2736 INIT_LIST_HEAD(&journal
->j_bitmap_nodes
);
2737 INIT_LIST_HEAD(&journal
->j_prealloc_list
);
2738 INIT_LIST_HEAD(&journal
->j_working_list
);
2739 INIT_LIST_HEAD(&journal
->j_journal_list
);
2740 journal
->j_persistent_trans
= 0;
2741 if (reiserfs_allocate_list_bitmaps(sb
, journal
->j_list_bitmap
,
2742 reiserfs_bmap_count(sb
)))
2743 goto free_and_return
;
2745 allocate_bitmap_nodes(sb
);
2747 /* reserved for journal area support */
2748 SB_JOURNAL_1st_RESERVED_BLOCK(sb
) = (old_format
?
2749 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2751 reiserfs_bmap_count(sb
) +
2753 REISERFS_DISK_OFFSET_IN_BYTES
/
2754 sb
->s_blocksize
+ 2);
2757 * Sanity check to see is the standard journal fitting
2758 * within first bitmap (actual for small blocksizes)
2760 if (!SB_ONDISK_JOURNAL_DEVICE(sb
) &&
2761 (SB_JOURNAL_1st_RESERVED_BLOCK(sb
) +
2762 SB_ONDISK_JOURNAL_SIZE(sb
) > sb
->s_blocksize
* 8)) {
2763 reiserfs_warning(sb
, "journal-1393",
2764 "journal does not fit for area addressed "
2765 "by first of bitmap blocks. It starts at "
2766 "%u and its size is %u. Block size %ld",
2767 SB_JOURNAL_1st_RESERVED_BLOCK(sb
),
2768 SB_ONDISK_JOURNAL_SIZE(sb
),
2770 goto free_and_return
;
2773 if (journal_init_dev(sb
, journal
, j_dev_name
) != 0) {
2774 reiserfs_warning(sb
, "sh-462",
2775 "unable to initialize journal device");
2776 goto free_and_return
;
2779 rs
= SB_DISK_SUPER_BLOCK(sb
);
2781 /* read journal header */
2782 bhjh
= journal_bread(sb
,
2783 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2784 SB_ONDISK_JOURNAL_SIZE(sb
));
2786 reiserfs_warning(sb
, "sh-459",
2787 "unable to read journal header");
2788 goto free_and_return
;
2790 jh
= (struct reiserfs_journal_header
*)(bhjh
->b_data
);
2792 /* make sure that journal matches to the super block */
2793 if (is_reiserfs_jr(rs
)
2794 && (le32_to_cpu(jh
->jh_journal
.jp_journal_magic
) !=
2795 sb_jp_journal_magic(rs
))) {
2796 reiserfs_warning(sb
, "sh-460",
2797 "journal header magic %x (device %s) does "
2798 "not match to magic found in super block %x",
2799 jh
->jh_journal
.jp_journal_magic
,
2800 bdevname(journal
->j_dev_bd
, b
),
2801 sb_jp_journal_magic(rs
));
2803 goto free_and_return
;
2806 journal
->j_trans_max
= le32_to_cpu(jh
->jh_journal
.jp_journal_trans_max
);
2807 journal
->j_max_batch
= le32_to_cpu(jh
->jh_journal
.jp_journal_max_batch
);
2808 journal
->j_max_commit_age
=
2809 le32_to_cpu(jh
->jh_journal
.jp_journal_max_commit_age
);
2810 journal
->j_max_trans_age
= JOURNAL_MAX_TRANS_AGE
;
2812 if (check_advise_trans_params(sb
, journal
) != 0)
2813 goto free_and_return
;
2814 journal
->j_default_max_commit_age
= journal
->j_max_commit_age
;
2816 if (commit_max_age
!= 0) {
2817 journal
->j_max_commit_age
= commit_max_age
;
2818 journal
->j_max_trans_age
= commit_max_age
;
2821 reiserfs_info(sb
, "journal params: device %s, size %u, "
2822 "journal first block %u, max trans len %u, max batch %u, "
2823 "max commit age %u, max trans age %u\n",
2824 bdevname(journal
->j_dev_bd
, b
),
2825 SB_ONDISK_JOURNAL_SIZE(sb
),
2826 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2827 journal
->j_trans_max
,
2828 journal
->j_max_batch
,
2829 journal
->j_max_commit_age
, journal
->j_max_trans_age
);
2833 journal
->j_list_bitmap_index
= 0;
2834 journal_list_init(sb
);
2836 memset(journal
->j_list_hash_table
, 0,
2837 JOURNAL_HASH_SIZE
* sizeof(struct reiserfs_journal_cnode
*));
2839 INIT_LIST_HEAD(&journal
->j_dirty_buffers
);
2840 spin_lock_init(&journal
->j_dirty_buffers_lock
);
2842 journal
->j_start
= 0;
2844 journal
->j_len_alloc
= 0;
2845 atomic_set(&journal
->j_wcount
, 0);
2846 atomic_set(&journal
->j_async_throttle
, 0);
2847 journal
->j_bcount
= 0;
2848 journal
->j_trans_start_time
= 0;
2849 journal
->j_last
= NULL
;
2850 journal
->j_first
= NULL
;
2851 init_waitqueue_head(&journal
->j_join_wait
);
2852 mutex_init(&journal
->j_mutex
);
2853 mutex_init(&journal
->j_flush_mutex
);
2855 journal
->j_trans_id
= 10;
2856 journal
->j_mount_id
= 10;
2857 journal
->j_state
= 0;
2858 atomic_set(&journal
->j_jlock
, 0);
2859 journal
->j_cnode_free_list
= allocate_cnodes(num_cnodes
);
2860 journal
->j_cnode_free_orig
= journal
->j_cnode_free_list
;
2861 journal
->j_cnode_free
= journal
->j_cnode_free_list
? num_cnodes
: 0;
2862 journal
->j_cnode_used
= 0;
2863 journal
->j_must_wait
= 0;
2865 if (journal
->j_cnode_free
== 0) {
2866 reiserfs_warning(sb
, "journal-2004", "Journal cnode memory "
2867 "allocation failed (%ld bytes). Journal is "
2868 "too large for available memory. Usually "
2869 "this is due to a journal that is too large.",
2870 sizeof (struct reiserfs_journal_cnode
) * num_cnodes
);
2871 goto free_and_return
;
2874 init_journal_hash(sb
);
2875 jl
= journal
->j_current_jl
;
2878 * get_list_bitmap() may call flush_commit_list() which
2879 * requires the lock. Calling flush_commit_list() shouldn't happen
2880 * this early but I like to be paranoid.
2882 reiserfs_write_lock(sb
);
2883 jl
->j_list_bitmap
= get_list_bitmap(sb
, jl
);
2884 reiserfs_write_unlock(sb
);
2885 if (!jl
->j_list_bitmap
) {
2886 reiserfs_warning(sb
, "journal-2005",
2887 "get_list_bitmap failed for journal list 0");
2888 goto free_and_return
;
2891 ret
= journal_read(sb
);
2893 reiserfs_warning(sb
, "reiserfs-2006",
2894 "Replay Failure, unable to mount");
2895 goto free_and_return
;
2898 INIT_DELAYED_WORK(&journal
->j_work
, flush_async_commits
);
2899 journal
->j_work_sb
= sb
;
2902 free_journal_ram(sb
);
2907 * test for a polite end of the current transaction. Used by file_write,
2908 * and should be used by delete to make sure they don't write more than
2909 * can fit inside a single transaction
2911 int journal_transaction_should_end(struct reiserfs_transaction_handle
*th
,
2914 struct reiserfs_journal
*journal
= SB_JOURNAL(th
->t_super
);
2915 time_t now
= get_seconds();
2916 /* cannot restart while nested */
2917 BUG_ON(!th
->t_trans_id
);
2918 if (th
->t_refcount
> 1)
2920 if (journal
->j_must_wait
> 0 ||
2921 (journal
->j_len_alloc
+ new_alloc
) >= journal
->j_max_batch
||
2922 atomic_read(&journal
->j_jlock
) ||
2923 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
||
2924 journal
->j_cnode_free
< (journal
->j_trans_max
* 3)) {
2928 journal
->j_len_alloc
+= new_alloc
;
2929 th
->t_blocks_allocated
+= new_alloc
;
2933 /* this must be called inside a transaction */
2934 void reiserfs_block_writes(struct reiserfs_transaction_handle
*th
)
2936 struct reiserfs_journal
*journal
= SB_JOURNAL(th
->t_super
);
2937 BUG_ON(!th
->t_trans_id
);
2938 journal
->j_must_wait
= 1;
2939 set_bit(J_WRITERS_BLOCKED
, &journal
->j_state
);
2943 /* this must be called without a transaction started */
2944 void reiserfs_allow_writes(struct super_block
*s
)
2946 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2947 clear_bit(J_WRITERS_BLOCKED
, &journal
->j_state
);
2948 wake_up(&journal
->j_join_wait
);
2951 /* this must be called without a transaction started */
2952 void reiserfs_wait_on_write_block(struct super_block
*s
)
2954 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2955 wait_event(journal
->j_join_wait
,
2956 !test_bit(J_WRITERS_BLOCKED
, &journal
->j_state
));
2959 static void queue_log_writer(struct super_block
*s
)
2962 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2963 set_bit(J_WRITERS_QUEUED
, &journal
->j_state
);
2966 * we don't want to use wait_event here because
2967 * we only want to wait once.
2969 init_waitqueue_entry(&wait
, current
);
2970 add_wait_queue(&journal
->j_join_wait
, &wait
);
2971 set_current_state(TASK_UNINTERRUPTIBLE
);
2972 if (test_bit(J_WRITERS_QUEUED
, &journal
->j_state
)) {
2973 int depth
= reiserfs_write_unlock_nested(s
);
2975 reiserfs_write_lock_nested(s
, depth
);
2977 __set_current_state(TASK_RUNNING
);
2978 remove_wait_queue(&journal
->j_join_wait
, &wait
);
2981 static void wake_queued_writers(struct super_block
*s
)
2983 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2984 if (test_and_clear_bit(J_WRITERS_QUEUED
, &journal
->j_state
))
2985 wake_up(&journal
->j_join_wait
);
2988 static void let_transaction_grow(struct super_block
*sb
, unsigned int trans_id
)
2990 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2991 unsigned long bcount
= journal
->j_bcount
;
2995 depth
= reiserfs_write_unlock_nested(sb
);
2996 schedule_timeout_uninterruptible(1);
2997 reiserfs_write_lock_nested(sb
, depth
);
2999 journal
->j_current_jl
->j_state
|= LIST_COMMIT_PENDING
;
3000 while ((atomic_read(&journal
->j_wcount
) > 0 ||
3001 atomic_read(&journal
->j_jlock
)) &&
3002 journal
->j_trans_id
== trans_id
) {
3003 queue_log_writer(sb
);
3005 if (journal
->j_trans_id
!= trans_id
)
3007 if (bcount
== journal
->j_bcount
)
3009 bcount
= journal
->j_bcount
;
3014 * join == true if you must join an existing transaction.
3015 * join == false if you can deal with waiting for others to finish
3017 * this will block until the transaction is joinable. send the number of
3018 * blocks you expect to use in nblocks.
3020 static int do_journal_begin_r(struct reiserfs_transaction_handle
*th
,
3021 struct super_block
*sb
, unsigned long nblocks
,
3024 time_t now
= get_seconds();
3025 unsigned int old_trans_id
;
3026 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3027 struct reiserfs_transaction_handle myth
;
3028 int sched_count
= 0;
3032 reiserfs_check_lock_depth(sb
, "journal_begin");
3033 BUG_ON(nblocks
> journal
->j_trans_max
);
3035 PROC_INFO_INC(sb
, journal
.journal_being
);
3036 /* set here for journal_join */
3042 if (join
!= JBEGIN_ABORT
&& reiserfs_is_journal_aborted(journal
)) {
3044 retval
= journal
->j_errno
;
3047 journal
->j_bcount
++;
3049 if (test_bit(J_WRITERS_BLOCKED
, &journal
->j_state
)) {
3051 depth
= reiserfs_write_unlock_nested(sb
);
3052 reiserfs_wait_on_write_block(sb
);
3053 reiserfs_write_lock_nested(sb
, depth
);
3054 PROC_INFO_INC(sb
, journal
.journal_relock_writers
);
3057 now
= get_seconds();
3060 * if there is no room in the journal OR
3061 * if this transaction is too old, and we weren't called joinable,
3062 * wait for it to finish before beginning we don't sleep if there
3063 * aren't other writers
3066 if ((!join
&& journal
->j_must_wait
> 0) ||
3068 && (journal
->j_len_alloc
+ nblocks
+ 2) >= journal
->j_max_batch
)
3069 || (!join
&& atomic_read(&journal
->j_wcount
) > 0
3070 && journal
->j_trans_start_time
> 0
3071 && (now
- journal
->j_trans_start_time
) >
3072 journal
->j_max_trans_age
) || (!join
3073 && atomic_read(&journal
->j_jlock
))
3074 || (!join
&& journal
->j_cnode_free
< (journal
->j_trans_max
* 3))) {
3076 old_trans_id
= journal
->j_trans_id
;
3077 /* allow others to finish this transaction */
3080 if (!join
&& (journal
->j_len_alloc
+ nblocks
+ 2) >=
3081 journal
->j_max_batch
&&
3082 ((journal
->j_len
+ nblocks
+ 2) * 100) <
3083 (journal
->j_len_alloc
* 75)) {
3084 if (atomic_read(&journal
->j_wcount
) > 10) {
3086 queue_log_writer(sb
);
3091 * don't mess with joining the transaction if all we
3092 * have to do is wait for someone else to do a commit
3094 if (atomic_read(&journal
->j_jlock
)) {
3095 while (journal
->j_trans_id
== old_trans_id
&&
3096 atomic_read(&journal
->j_jlock
)) {
3097 queue_log_writer(sb
);
3101 retval
= journal_join(&myth
, sb
);
3105 /* someone might have ended the transaction while we joined */
3106 if (old_trans_id
!= journal
->j_trans_id
) {
3107 retval
= do_journal_end(&myth
, 0);
3109 retval
= do_journal_end(&myth
, COMMIT_NOW
);
3115 PROC_INFO_INC(sb
, journal
.journal_relock_wcount
);
3118 /* we are the first writer, set trans_id */
3119 if (journal
->j_trans_start_time
== 0) {
3120 journal
->j_trans_start_time
= get_seconds();
3122 atomic_inc(&journal
->j_wcount
);
3123 journal
->j_len_alloc
+= nblocks
;
3124 th
->t_blocks_logged
= 0;
3125 th
->t_blocks_allocated
= nblocks
;
3126 th
->t_trans_id
= journal
->j_trans_id
;
3128 INIT_LIST_HEAD(&th
->t_list
);
3132 memset(th
, 0, sizeof(*th
));
3134 * Re-set th->t_super, so we can properly keep track of how many
3135 * persistent transactions there are. We need to do this so if this
3136 * call is part of a failed restart_transaction, we can free it later
3142 struct reiserfs_transaction_handle
*reiserfs_persistent_transaction(struct
3148 struct reiserfs_transaction_handle
*th
;
3151 * if we're nesting into an existing transaction. It will be
3152 * persistent on its own
3154 if (reiserfs_transaction_running(s
)) {
3155 th
= current
->journal_info
;
3157 BUG_ON(th
->t_refcount
< 2);
3161 th
= kmalloc(sizeof(struct reiserfs_transaction_handle
), GFP_NOFS
);
3164 ret
= journal_begin(th
, s
, nblocks
);
3170 SB_JOURNAL(s
)->j_persistent_trans
++;
3174 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle
*th
)
3176 struct super_block
*s
= th
->t_super
;
3179 ret
= journal_end(th
);
3182 if (th
->t_refcount
== 0) {
3183 SB_JOURNAL(s
)->j_persistent_trans
--;
3189 static int journal_join(struct reiserfs_transaction_handle
*th
,
3190 struct super_block
*sb
)
3192 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3195 * this keeps do_journal_end from NULLing out the
3196 * current->journal_info pointer
3198 th
->t_handle_save
= cur_th
;
3199 BUG_ON(cur_th
&& cur_th
->t_refcount
> 1);
3200 return do_journal_begin_r(th
, sb
, 1, JBEGIN_JOIN
);
3203 int journal_join_abort(struct reiserfs_transaction_handle
*th
,
3204 struct super_block
*sb
)
3206 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3209 * this keeps do_journal_end from NULLing out the
3210 * current->journal_info pointer
3212 th
->t_handle_save
= cur_th
;
3213 BUG_ON(cur_th
&& cur_th
->t_refcount
> 1);
3214 return do_journal_begin_r(th
, sb
, 1, JBEGIN_ABORT
);
3217 int journal_begin(struct reiserfs_transaction_handle
*th
,
3218 struct super_block
*sb
, unsigned long nblocks
)
3220 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3223 th
->t_handle_save
= NULL
;
3225 /* we are nesting into the current transaction */
3226 if (cur_th
->t_super
== sb
) {
3227 BUG_ON(!cur_th
->t_refcount
);
3228 cur_th
->t_refcount
++;
3229 memcpy(th
, cur_th
, sizeof(*th
));
3230 if (th
->t_refcount
<= 1)
3231 reiserfs_warning(sb
, "reiserfs-2005",
3232 "BAD: refcount <= 1, but "
3233 "journal_info != 0");
3237 * we've ended up with a handle from a different
3238 * filesystem. save it and restore on journal_end.
3239 * This should never really happen...
3241 reiserfs_warning(sb
, "clm-2100",
3242 "nesting info a different FS");
3243 th
->t_handle_save
= current
->journal_info
;
3244 current
->journal_info
= th
;
3247 current
->journal_info
= th
;
3249 ret
= do_journal_begin_r(th
, sb
, nblocks
, JBEGIN_REG
);
3250 BUG_ON(current
->journal_info
!= th
);
3253 * I guess this boils down to being the reciprocal of clm-2100 above.
3254 * If do_journal_begin_r fails, we need to put it back, since
3255 * journal_end won't be called to do it. */
3257 current
->journal_info
= th
->t_handle_save
;
3259 BUG_ON(!th
->t_refcount
);
3265 * puts bh into the current transaction. If it was already there, reorders
3266 * removes the old pointers from the hash, and puts new ones in (to make
3267 * sure replay happen in the right order).
3269 * if it was dirty, cleans and files onto the clean list. I can't let it
3270 * be dirty again until the transaction is committed.
3272 * if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3274 int journal_mark_dirty(struct reiserfs_transaction_handle
*th
,
3275 struct buffer_head
*bh
)
3277 struct super_block
*sb
= th
->t_super
;
3278 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3279 struct reiserfs_journal_cnode
*cn
= NULL
;
3280 int count_already_incd
= 0;
3282 BUG_ON(!th
->t_trans_id
);
3284 PROC_INFO_INC(sb
, journal
.mark_dirty
);
3285 if (th
->t_trans_id
!= journal
->j_trans_id
) {
3286 reiserfs_panic(th
->t_super
, "journal-1577",
3287 "handle trans id %ld != current trans id %ld",
3288 th
->t_trans_id
, journal
->j_trans_id
);
3291 prepared
= test_clear_buffer_journal_prepared(bh
);
3292 clear_buffer_journal_restore_dirty(bh
);
3293 /* already in this transaction, we are done */
3294 if (buffer_journaled(bh
)) {
3295 PROC_INFO_INC(sb
, journal
.mark_dirty_already
);
3300 * this must be turned into a panic instead of a warning. We can't
3301 * allow a dirty or journal_dirty or locked buffer to be logged, as
3302 * some changes could get to disk too early. NOT GOOD.
3304 if (!prepared
|| buffer_dirty(bh
)) {
3305 reiserfs_warning(sb
, "journal-1777",
3306 "buffer %llu bad state "
3307 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3308 (unsigned long long)bh
->b_blocknr
,
3309 prepared
? ' ' : '!',
3310 buffer_locked(bh
) ? ' ' : '!',
3311 buffer_dirty(bh
) ? ' ' : '!',
3312 buffer_journal_dirty(bh
) ? ' ' : '!');
3315 if (atomic_read(&journal
->j_wcount
) <= 0) {
3316 reiserfs_warning(sb
, "journal-1409",
3317 "returning because j_wcount was %d",
3318 atomic_read(&journal
->j_wcount
));
3322 * this error means I've screwed up, and we've overflowed
3323 * the transaction. Nothing can be done here, except make the
3324 * FS readonly or panic.
3326 if (journal
->j_len
>= journal
->j_trans_max
) {
3327 reiserfs_panic(th
->t_super
, "journal-1413",
3328 "j_len (%lu) is too big",
3332 if (buffer_journal_dirty(bh
)) {
3333 count_already_incd
= 1;
3334 PROC_INFO_INC(sb
, journal
.mark_dirty_notjournal
);
3335 clear_buffer_journal_dirty(bh
);
3338 if (journal
->j_len
> journal
->j_len_alloc
) {
3339 journal
->j_len_alloc
= journal
->j_len
+ JOURNAL_PER_BALANCE_CNT
;
3342 set_buffer_journaled(bh
);
3344 /* now put this guy on the end */
3348 reiserfs_panic(sb
, "journal-4", "get_cnode failed!");
3351 if (th
->t_blocks_logged
== th
->t_blocks_allocated
) {
3352 th
->t_blocks_allocated
+= JOURNAL_PER_BALANCE_CNT
;
3353 journal
->j_len_alloc
+= JOURNAL_PER_BALANCE_CNT
;
3355 th
->t_blocks_logged
++;
3359 cn
->blocknr
= bh
->b_blocknr
;
3362 insert_journal_hash(journal
->j_hash_table
, cn
);
3363 if (!count_already_incd
) {
3368 cn
->prev
= journal
->j_last
;
3370 if (journal
->j_last
) {
3371 journal
->j_last
->next
= cn
;
3372 journal
->j_last
= cn
;
3374 journal
->j_first
= cn
;
3375 journal
->j_last
= cn
;
3377 reiserfs_schedule_old_flush(sb
);
3381 int journal_end(struct reiserfs_transaction_handle
*th
)
3383 struct super_block
*sb
= th
->t_super
;
3384 if (!current
->journal_info
&& th
->t_refcount
> 1)
3385 reiserfs_warning(sb
, "REISER-NESTING",
3386 "th NULL, refcount %d", th
->t_refcount
);
3388 if (!th
->t_trans_id
) {
3394 if (th
->t_refcount
> 0) {
3395 struct reiserfs_transaction_handle
*cur_th
=
3396 current
->journal_info
;
3399 * we aren't allowed to close a nested transaction on a
3400 * different filesystem from the one in the task struct
3402 BUG_ON(cur_th
->t_super
!= th
->t_super
);
3405 memcpy(current
->journal_info
, th
, sizeof(*th
));
3410 return do_journal_end(th
, 0);
3415 * removes from the current transaction, relsing and descrementing any counters.
3416 * also files the removed buffer directly onto the clean list
3418 * called by journal_mark_freed when a block has been deleted
3420 * returns 1 if it cleaned and relsed the buffer. 0 otherwise
3422 static int remove_from_transaction(struct super_block
*sb
,
3423 b_blocknr_t blocknr
, int already_cleaned
)
3425 struct buffer_head
*bh
;
3426 struct reiserfs_journal_cnode
*cn
;
3427 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3430 cn
= get_journal_hash_dev(sb
, journal
->j_hash_table
, blocknr
);
3431 if (!cn
|| !cn
->bh
) {
3436 cn
->prev
->next
= cn
->next
;
3439 cn
->next
->prev
= cn
->prev
;
3441 if (cn
== journal
->j_first
) {
3442 journal
->j_first
= cn
->next
;
3444 if (cn
== journal
->j_last
) {
3445 journal
->j_last
= cn
->prev
;
3448 remove_journal_hash(sb
, journal
->j_hash_table
, NULL
,
3450 clear_buffer_journaled(bh
); /* don't log this one */
3452 if (!already_cleaned
) {
3453 clear_buffer_journal_dirty(bh
);
3454 clear_buffer_dirty(bh
);
3455 clear_buffer_journal_test(bh
);
3457 if (atomic_read(&bh
->b_count
) < 0) {
3458 reiserfs_warning(sb
, "journal-1752",
3464 journal
->j_len_alloc
--;
3470 * for any cnode in a journal list, it can only be dirtied of all the
3471 * transactions that include it are committed to disk.
3472 * this checks through each transaction, and returns 1 if you are allowed
3473 * to dirty, and 0 if you aren't
3475 * it is called by dirty_journal_list, which is called after
3476 * flush_commit_list has gotten all the log blocks for a given
3477 * transaction on disk
3480 static int can_dirty(struct reiserfs_journal_cnode
*cn
)
3482 struct super_block
*sb
= cn
->sb
;
3483 b_blocknr_t blocknr
= cn
->blocknr
;
3484 struct reiserfs_journal_cnode
*cur
= cn
->hprev
;
3488 * first test hprev. These are all newer than cn, so any node here
3489 * with the same block number and dev means this node can't be sent
3490 * to disk right now.
3492 while (cur
&& can_dirty
) {
3493 if (cur
->jlist
&& cur
->bh
&& cur
->blocknr
&& cur
->sb
== sb
&&
3494 cur
->blocknr
== blocknr
) {
3500 * then test hnext. These are all older than cn. As long as they
3501 * are committed to the log, it is safe to write cn to disk
3504 while (cur
&& can_dirty
) {
3505 if (cur
->jlist
&& cur
->jlist
->j_len
> 0 &&
3506 atomic_read(&cur
->jlist
->j_commit_left
) > 0 && cur
->bh
&&
3507 cur
->blocknr
&& cur
->sb
== sb
&& cur
->blocknr
== blocknr
) {
3516 * syncs the commit blocks, but does not force the real buffers to disk
3517 * will wait until the current transaction is done/committed before returning
3519 int journal_end_sync(struct reiserfs_transaction_handle
*th
)
3521 struct super_block
*sb
= th
->t_super
;
3522 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3524 BUG_ON(!th
->t_trans_id
);
3525 /* you can sync while nested, very, very bad */
3526 BUG_ON(th
->t_refcount
> 1);
3527 if (journal
->j_len
== 0) {
3528 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
),
3530 journal_mark_dirty(th
, SB_BUFFER_WITH_SB(sb
));
3532 return do_journal_end(th
, COMMIT_NOW
| WAIT
);
3535 /* writeback the pending async commits to disk */
3536 static void flush_async_commits(struct work_struct
*work
)
3538 struct reiserfs_journal
*journal
=
3539 container_of(work
, struct reiserfs_journal
, j_work
.work
);
3540 struct super_block
*sb
= journal
->j_work_sb
;
3541 struct reiserfs_journal_list
*jl
;
3542 struct list_head
*entry
;
3544 reiserfs_write_lock(sb
);
3545 if (!list_empty(&journal
->j_journal_list
)) {
3546 /* last entry is the youngest, commit it and you get everything */
3547 entry
= journal
->j_journal_list
.prev
;
3548 jl
= JOURNAL_LIST_ENTRY(entry
);
3549 flush_commit_list(sb
, jl
, 1);
3551 reiserfs_write_unlock(sb
);
3555 * flushes any old transactions to disk
3556 * ends the current transaction if it is too old
3558 void reiserfs_flush_old_commits(struct super_block
*sb
)
3561 struct reiserfs_transaction_handle th
;
3562 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3564 now
= get_seconds();
3566 * safety check so we don't flush while we are replaying the log during
3569 if (list_empty(&journal
->j_journal_list
))
3573 * check the current transaction. If there are no writers, and it is
3574 * too old, finish it, and force the commit blocks to disk
3576 if (atomic_read(&journal
->j_wcount
) <= 0 &&
3577 journal
->j_trans_start_time
> 0 &&
3578 journal
->j_len
> 0 &&
3579 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
) {
3580 if (!journal_join(&th
, sb
)) {
3581 reiserfs_prepare_for_journal(sb
,
3582 SB_BUFFER_WITH_SB(sb
),
3584 journal_mark_dirty(&th
, SB_BUFFER_WITH_SB(sb
));
3587 * we're only being called from kreiserfsd, it makes
3588 * no sense to do an async commit so that kreiserfsd
3591 do_journal_end(&th
, COMMIT_NOW
| WAIT
);
3597 * returns 0 if do_journal_end should return right away, returns 1 if
3598 * do_journal_end should finish the commit
3600 * if the current transaction is too old, but still has writers, this will
3601 * wait on j_join_wait until all the writers are done. By the time it
3602 * wakes up, the transaction it was called has already ended, so it just
3603 * flushes the commit list and returns 0.
3605 * Won't batch when flush or commit_now is set. Also won't batch when
3606 * others are waiting on j_join_wait.
3608 * Note, we can't allow the journal_end to proceed while there are still
3609 * writers in the log.
3611 static int check_journal_end(struct reiserfs_transaction_handle
*th
, int flags
)
3615 int flush
= flags
& FLUSH_ALL
;
3616 int commit_now
= flags
& COMMIT_NOW
;
3617 int wait_on_commit
= flags
& WAIT
;
3618 struct reiserfs_journal_list
*jl
;
3619 struct super_block
*sb
= th
->t_super
;
3620 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3622 BUG_ON(!th
->t_trans_id
);
3624 if (th
->t_trans_id
!= journal
->j_trans_id
) {
3625 reiserfs_panic(th
->t_super
, "journal-1577",
3626 "handle trans id %ld != current trans id %ld",
3627 th
->t_trans_id
, journal
->j_trans_id
);
3630 journal
->j_len_alloc
-= (th
->t_blocks_allocated
- th
->t_blocks_logged
);
3631 /* <= 0 is allowed. unmounting might not call begin */
3632 if (atomic_read(&journal
->j_wcount
) > 0)
3633 atomic_dec(&journal
->j_wcount
);
3636 * BUG, deal with case where j_len is 0, but people previously
3637 * freed blocks need to be released will be dealt with by next
3638 * transaction that actually writes something, but should be taken
3639 * care of in this trans
3641 BUG_ON(journal
->j_len
== 0);
3644 * if wcount > 0, and we are called to with flush or commit_now,
3645 * we wait on j_join_wait. We will wake up when the last writer has
3646 * finished the transaction, and started it on its way to the disk.
3647 * Then, we flush the commit or journal list, and just return 0
3648 * because the rest of journal end was already done for this
3651 if (atomic_read(&journal
->j_wcount
) > 0) {
3652 if (flush
|| commit_now
) {
3655 jl
= journal
->j_current_jl
;
3656 trans_id
= jl
->j_trans_id
;
3658 jl
->j_state
|= LIST_COMMIT_PENDING
;
3659 atomic_set(&journal
->j_jlock
, 1);
3661 journal
->j_next_full_flush
= 1;
3666 * sleep while the current transaction is
3669 while (journal
->j_trans_id
== trans_id
) {
3670 if (atomic_read(&journal
->j_jlock
)) {
3671 queue_log_writer(sb
);
3674 if (journal
->j_trans_id
== trans_id
) {
3675 atomic_set(&journal
->j_jlock
,
3681 BUG_ON(journal
->j_trans_id
== trans_id
);
3684 && journal_list_still_alive(sb
, trans_id
)
3685 && wait_on_commit
) {
3686 flush_commit_list(sb
, jl
, 1);
3694 /* deal with old transactions where we are the last writers */
3695 now
= get_seconds();
3696 if ((now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
) {
3698 journal
->j_next_async_flush
= 1;
3700 /* don't batch when someone is waiting on j_join_wait */
3701 /* don't batch when syncing the commit or flushing the whole trans */
3702 if (!(journal
->j_must_wait
> 0) && !(atomic_read(&journal
->j_jlock
))
3703 && !flush
&& !commit_now
&& (journal
->j_len
< journal
->j_max_batch
)
3704 && journal
->j_len_alloc
< journal
->j_max_batch
3705 && journal
->j_cnode_free
> (journal
->j_trans_max
* 3)) {
3706 journal
->j_bcount
++;
3711 if (journal
->j_start
> SB_ONDISK_JOURNAL_SIZE(sb
)) {
3712 reiserfs_panic(sb
, "journal-003",
3713 "j_start (%ld) is too high",
3720 * Does all the work that makes deleting blocks safe.
3721 * when deleting a block mark BH_JNew, just remove it from the current
3722 * transaction, clean it's buffer_head and move on.
3725 * set a bit for the block in the journal bitmap. That will prevent it from
3726 * being allocated for unformatted nodes before this transaction has finished.
3728 * mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.
3729 * That will prevent any old transactions with this block from trying to flush
3730 * to the real location. Since we aren't removing the cnode from the
3731 * journal_list_hash, *the block can't be reallocated yet.
3733 * Then remove it from the current transaction, decrementing any counters and
3734 * filing it on the clean list.
3736 int journal_mark_freed(struct reiserfs_transaction_handle
*th
,
3737 struct super_block
*sb
, b_blocknr_t blocknr
)
3739 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3740 struct reiserfs_journal_cnode
*cn
= NULL
;
3741 struct buffer_head
*bh
= NULL
;
3742 struct reiserfs_list_bitmap
*jb
= NULL
;
3744 BUG_ON(!th
->t_trans_id
);
3746 cn
= get_journal_hash_dev(sb
, journal
->j_hash_table
, blocknr
);
3751 /* if it is journal new, we just remove it from this transaction */
3752 if (bh
&& buffer_journal_new(bh
)) {
3753 clear_buffer_journal_new(bh
);
3754 clear_prepared_bits(bh
);
3755 reiserfs_clean_and_file_buffer(bh
);
3756 cleaned
= remove_from_transaction(sb
, blocknr
, cleaned
);
3759 * set the bit for this block in the journal bitmap
3760 * for this transaction
3762 jb
= journal
->j_current_jl
->j_list_bitmap
;
3764 reiserfs_panic(sb
, "journal-1702",
3765 "journal_list_bitmap is NULL");
3767 set_bit_in_list_bitmap(sb
, blocknr
, jb
);
3769 /* Note, the entire while loop is not allowed to schedule. */
3772 clear_prepared_bits(bh
);
3773 reiserfs_clean_and_file_buffer(bh
);
3775 cleaned
= remove_from_transaction(sb
, blocknr
, cleaned
);
3778 * find all older transactions with this block,
3779 * make sure they don't try to write it out
3781 cn
= get_journal_hash_dev(sb
, journal
->j_list_hash_table
,
3784 if (sb
== cn
->sb
&& blocknr
== cn
->blocknr
) {
3785 set_bit(BLOCK_FREED
, &cn
->state
);
3788 * remove_from_transaction will brelse
3789 * the buffer if it was in the current
3793 clear_buffer_journal_dirty(cn
->
3795 clear_buffer_dirty(cn
->bh
);
3796 clear_buffer_journal_test(cn
->
3801 (&cn
->bh
->b_count
) < 0) {
3802 reiserfs_warning(sb
,
3804 "cn->bh->b_count < 0");
3808 * since we are clearing the bh,
3809 * we MUST dec nonzerolen
3812 atomic_dec(&cn
->jlist
->
3823 release_buffer_page(bh
); /* get_hash grabs the buffer */
3827 void reiserfs_update_inode_transaction(struct inode
*inode
)
3829 struct reiserfs_journal
*journal
= SB_JOURNAL(inode
->i_sb
);
3830 REISERFS_I(inode
)->i_jl
= journal
->j_current_jl
;
3831 REISERFS_I(inode
)->i_trans_id
= journal
->j_trans_id
;
3835 * returns -1 on error, 0 if no commits/barriers were done and 1
3836 * if a transaction was actually committed and the barrier was done
3838 static int __commit_trans_jl(struct inode
*inode
, unsigned long id
,
3839 struct reiserfs_journal_list
*jl
)
3841 struct reiserfs_transaction_handle th
;
3842 struct super_block
*sb
= inode
->i_sb
;
3843 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3847 * is it from the current transaction,
3848 * or from an unknown transaction?
3850 if (id
== journal
->j_trans_id
) {
3851 jl
= journal
->j_current_jl
;
3853 * try to let other writers come in and
3854 * grow this transaction
3856 let_transaction_grow(sb
, id
);
3857 if (journal
->j_trans_id
!= id
) {
3858 goto flush_commit_only
;
3861 ret
= journal_begin(&th
, sb
, 1);
3865 /* someone might have ended this transaction while we joined */
3866 if (journal
->j_trans_id
!= id
) {
3867 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
),
3869 journal_mark_dirty(&th
, SB_BUFFER_WITH_SB(sb
));
3870 ret
= journal_end(&th
);
3871 goto flush_commit_only
;
3874 ret
= journal_end_sync(&th
);
3880 * this gets tricky, we have to make sure the journal list in
3881 * the inode still exists. We know the list is still around
3882 * if we've got a larger transaction id than the oldest list
3885 if (journal_list_still_alive(inode
->i_sb
, id
)) {
3887 * we only set ret to 1 when we know for sure
3888 * the barrier hasn't been started yet on the commit
3891 if (atomic_read(&jl
->j_commit_left
) > 1)
3893 flush_commit_list(sb
, jl
, 1);
3894 if (journal
->j_errno
)
3895 ret
= journal
->j_errno
;
3898 /* otherwise the list is gone, and long since committed */
3902 int reiserfs_commit_for_inode(struct inode
*inode
)
3904 unsigned int id
= REISERFS_I(inode
)->i_trans_id
;
3905 struct reiserfs_journal_list
*jl
= REISERFS_I(inode
)->i_jl
;
3908 * for the whole inode, assume unset id means it was
3909 * changed in the current transaction. More conservative
3912 reiserfs_update_inode_transaction(inode
);
3913 id
= REISERFS_I(inode
)->i_trans_id
;
3914 /* jl will be updated in __commit_trans_jl */
3917 return __commit_trans_jl(inode
, id
, jl
);
3920 void reiserfs_restore_prepared_buffer(struct super_block
*sb
,
3921 struct buffer_head
*bh
)
3923 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3924 PROC_INFO_INC(sb
, journal
.restore_prepared
);
3928 if (test_clear_buffer_journal_restore_dirty(bh
) &&
3929 buffer_journal_dirty(bh
)) {
3930 struct reiserfs_journal_cnode
*cn
;
3931 reiserfs_write_lock(sb
);
3932 cn
= get_journal_hash_dev(sb
,
3933 journal
->j_list_hash_table
,
3935 if (cn
&& can_dirty(cn
)) {
3936 set_buffer_journal_test(bh
);
3937 mark_buffer_dirty(bh
);
3939 reiserfs_write_unlock(sb
);
3941 clear_buffer_journal_prepared(bh
);
3944 extern struct tree_balance
*cur_tb
;
3946 * before we can change a metadata block, we have to make sure it won't
3947 * be written to disk while we are altering it. So, we must:
3951 int reiserfs_prepare_for_journal(struct super_block
*sb
,
3952 struct buffer_head
*bh
, int wait
)
3954 PROC_INFO_INC(sb
, journal
.prepare
);
3956 if (!trylock_buffer(bh
)) {
3961 set_buffer_journal_prepared(bh
);
3962 if (test_clear_buffer_dirty(bh
) && buffer_journal_dirty(bh
)) {
3963 clear_buffer_journal_test(bh
);
3964 set_buffer_journal_restore_dirty(bh
);
3971 * long and ugly. If flush, will not return until all commit
3972 * blocks and all real buffers in the trans are on disk.
3973 * If no_async, won't return until all commit blocks are on disk.
3975 * keep reading, there are comments as you go along
3977 * If the journal is aborted, we just clean up. Things like flushing
3978 * journal lists, etc just won't happen.
3980 static int do_journal_end(struct reiserfs_transaction_handle
*th
, int flags
)
3982 struct super_block
*sb
= th
->t_super
;
3983 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3984 struct reiserfs_journal_cnode
*cn
, *next
, *jl_cn
;
3985 struct reiserfs_journal_cnode
*last_cn
= NULL
;
3986 struct reiserfs_journal_desc
*desc
;
3987 struct reiserfs_journal_commit
*commit
;
3988 struct buffer_head
*c_bh
; /* commit bh */
3989 struct buffer_head
*d_bh
; /* desc bh */
3990 int cur_write_start
= 0; /* start index of current log write */
3995 struct reiserfs_journal_list
*jl
, *temp_jl
;
3996 struct list_head
*entry
, *safe
;
3997 unsigned long jindex
;
3998 unsigned int commit_trans_id
;
4002 BUG_ON(th
->t_refcount
> 1);
4003 BUG_ON(!th
->t_trans_id
);
4004 BUG_ON(!th
->t_super
);
4007 * protect flush_older_commits from doing mistakes if the
4008 * transaction ID counter gets overflowed.
4010 if (th
->t_trans_id
== ~0U)
4011 flags
|= FLUSH_ALL
| COMMIT_NOW
| WAIT
;
4012 flush
= flags
& FLUSH_ALL
;
4013 wait_on_commit
= flags
& WAIT
;
4015 current
->journal_info
= th
->t_handle_save
;
4016 reiserfs_check_lock_depth(sb
, "journal end");
4017 if (journal
->j_len
== 0) {
4018 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
),
4020 journal_mark_dirty(th
, SB_BUFFER_WITH_SB(sb
));
4024 if (journal
->j_next_full_flush
) {
4028 if (journal
->j_next_async_flush
) {
4029 flags
|= COMMIT_NOW
| WAIT
;
4034 * check_journal_end locks the journal, and unlocks if it does
4035 * not return 1 it tells us if we should continue with the
4036 * journal_end, or just return
4038 if (!check_journal_end(th
, flags
)) {
4039 reiserfs_schedule_old_flush(sb
);
4040 wake_queued_writers(sb
);
4041 reiserfs_async_progress_wait(sb
);
4045 /* check_journal_end might set these, check again */
4046 if (journal
->j_next_full_flush
) {
4051 * j must wait means we have to flush the log blocks, and the
4052 * real blocks for this transaction
4054 if (journal
->j_must_wait
> 0) {
4057 #ifdef REISERFS_PREALLOCATE
4059 * quota ops might need to nest, setup the journal_info pointer
4060 * for them and raise the refcount so that it is > 0.
4062 current
->journal_info
= th
;
4065 /* it should not involve new blocks into the transaction */
4066 reiserfs_discard_all_prealloc(th
);
4069 current
->journal_info
= th
->t_handle_save
;
4072 /* setup description block */
4075 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
4077 set_buffer_uptodate(d_bh
);
4078 desc
= (struct reiserfs_journal_desc
*)(d_bh
)->b_data
;
4079 memset(d_bh
->b_data
, 0, d_bh
->b_size
);
4080 memcpy(get_journal_desc_magic(d_bh
), JOURNAL_DESC_MAGIC
, 8);
4081 set_desc_trans_id(desc
, journal
->j_trans_id
);
4084 * setup commit block. Don't write (keep it clean too) this one
4085 * until after everyone else is written
4087 c_bh
= journal_getblk(sb
, SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
4088 ((journal
->j_start
+ journal
->j_len
+
4089 1) % SB_ONDISK_JOURNAL_SIZE(sb
)));
4090 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
4091 memset(c_bh
->b_data
, 0, c_bh
->b_size
);
4092 set_commit_trans_id(commit
, journal
->j_trans_id
);
4093 set_buffer_uptodate(c_bh
);
4095 /* init this journal list */
4096 jl
= journal
->j_current_jl
;
4099 * we lock the commit before doing anything because
4100 * we want to make sure nobody tries to run flush_commit_list until
4101 * the new transaction is fully setup, and we've already flushed the
4104 reiserfs_mutex_lock_safe(&jl
->j_commit_mutex
, sb
);
4106 /* save the transaction id in case we need to commit it later */
4107 commit_trans_id
= jl
->j_trans_id
;
4109 atomic_set(&jl
->j_older_commits_done
, 0);
4110 jl
->j_trans_id
= journal
->j_trans_id
;
4111 jl
->j_timestamp
= journal
->j_trans_start_time
;
4112 jl
->j_commit_bh
= c_bh
;
4113 jl
->j_start
= journal
->j_start
;
4114 jl
->j_len
= journal
->j_len
;
4115 atomic_set(&jl
->j_nonzerolen
, journal
->j_len
);
4116 atomic_set(&jl
->j_commit_left
, journal
->j_len
+ 2);
4117 jl
->j_realblock
= NULL
;
4120 * The ENTIRE FOR LOOP MUST not cause schedule to occur.
4121 * for each real block, add it to the journal list hash,
4122 * copy into real block index array in the commit or desc block
4124 trans_half
= journal_trans_half(sb
->s_blocksize
);
4125 for (i
= 0, cn
= journal
->j_first
; cn
; cn
= cn
->next
, i
++) {
4126 if (buffer_journaled(cn
->bh
)) {
4127 jl_cn
= get_cnode(sb
);
4129 reiserfs_panic(sb
, "journal-1676",
4130 "get_cnode returned NULL");
4133 jl
->j_realblock
= jl_cn
;
4135 jl_cn
->prev
= last_cn
;
4138 last_cn
->next
= jl_cn
;
4142 * make sure the block we are trying to log
4143 * is not a block of journal or reserved area
4145 if (is_block_in_log_or_reserved_area
4146 (sb
, cn
->bh
->b_blocknr
)) {
4147 reiserfs_panic(sb
, "journal-2332",
4148 "Trying to log block %lu, "
4149 "which is a log block",
4152 jl_cn
->blocknr
= cn
->bh
->b_blocknr
;
4157 insert_journal_hash(journal
->j_list_hash_table
, jl_cn
);
4158 if (i
< trans_half
) {
4159 desc
->j_realblock
[i
] =
4160 cpu_to_le32(cn
->bh
->b_blocknr
);
4162 commit
->j_realblock
[i
- trans_half
] =
4163 cpu_to_le32(cn
->bh
->b_blocknr
);
4169 set_desc_trans_len(desc
, journal
->j_len
);
4170 set_desc_mount_id(desc
, journal
->j_mount_id
);
4171 set_desc_trans_id(desc
, journal
->j_trans_id
);
4172 set_commit_trans_len(commit
, journal
->j_len
);
4175 * special check in case all buffers in the journal
4176 * were marked for not logging
4178 BUG_ON(journal
->j_len
== 0);
4181 * we're about to dirty all the log blocks, mark the description block
4182 * dirty now too. Don't mark the commit block dirty until all the
4183 * others are on disk
4185 mark_buffer_dirty(d_bh
);
4188 * first data block is j_start + 1, so add one to
4189 * cur_write_start wherever you use it
4191 cur_write_start
= journal
->j_start
;
4192 cn
= journal
->j_first
;
4193 jindex
= 1; /* start at one so we don't get the desc again */
4195 clear_buffer_journal_new(cn
->bh
);
4196 /* copy all the real blocks into log area. dirty log blocks */
4197 if (buffer_journaled(cn
->bh
)) {
4198 struct buffer_head
*tmp_bh
;
4203 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
4206 SB_ONDISK_JOURNAL_SIZE(sb
)));
4207 set_buffer_uptodate(tmp_bh
);
4208 page
= cn
->bh
->b_page
;
4210 memcpy(tmp_bh
->b_data
,
4211 addr
+ offset_in_page(cn
->bh
->b_data
),
4214 mark_buffer_dirty(tmp_bh
);
4216 set_buffer_journal_dirty(cn
->bh
);
4217 clear_buffer_journaled(cn
->bh
);
4220 * JDirty cleared sometime during transaction.
4221 * don't log this one
4223 reiserfs_warning(sb
, "journal-2048",
4224 "BAD, buffer in journal hash, "
4231 reiserfs_cond_resched(sb
);
4235 * we are done with both the c_bh and d_bh, but
4236 * c_bh must be written after all other commit blocks,
4237 * so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4240 journal
->j_current_jl
= alloc_journal_list(sb
);
4242 /* now it is safe to insert this transaction on the main list */
4243 list_add_tail(&jl
->j_list
, &journal
->j_journal_list
);
4244 list_add_tail(&jl
->j_working_list
, &journal
->j_working_list
);
4245 journal
->j_num_work_lists
++;
4247 /* reset journal values for the next transaction */
4248 old_start
= journal
->j_start
;
4250 (journal
->j_start
+ journal
->j_len
+
4251 2) % SB_ONDISK_JOURNAL_SIZE(sb
);
4252 atomic_set(&journal
->j_wcount
, 0);
4253 journal
->j_bcount
= 0;
4254 journal
->j_last
= NULL
;
4255 journal
->j_first
= NULL
;
4257 journal
->j_trans_start_time
= 0;
4258 /* check for trans_id overflow */
4259 if (++journal
->j_trans_id
== 0)
4260 journal
->j_trans_id
= 10;
4261 journal
->j_current_jl
->j_trans_id
= journal
->j_trans_id
;
4262 journal
->j_must_wait
= 0;
4263 journal
->j_len_alloc
= 0;
4264 journal
->j_next_full_flush
= 0;
4265 journal
->j_next_async_flush
= 0;
4266 init_journal_hash(sb
);
4269 * make sure reiserfs_add_jh sees the new current_jl before we
4270 * write out the tails
4275 * tail conversion targets have to hit the disk before we end the
4276 * transaction. Otherwise a later transaction might repack the tail
4277 * before this transaction commits, leaving the data block unflushed
4278 * and clean, if we crash before the later transaction commits, the
4279 * data block is lost.
4281 if (!list_empty(&jl
->j_tail_bh_list
)) {
4282 depth
= reiserfs_write_unlock_nested(sb
);
4283 write_ordered_buffers(&journal
->j_dirty_buffers_lock
,
4284 journal
, jl
, &jl
->j_tail_bh_list
);
4285 reiserfs_write_lock_nested(sb
, depth
);
4287 BUG_ON(!list_empty(&jl
->j_tail_bh_list
));
4288 mutex_unlock(&jl
->j_commit_mutex
);
4291 * honor the flush wishes from the caller, simple commits can
4292 * be done outside the journal lock, they are done below
4294 * if we don't flush the commit list right now, we put it into
4295 * the work queue so the people waiting on the async progress work
4296 * queue don't wait for this proc to flush journal lists and such.
4299 flush_commit_list(sb
, jl
, 1);
4300 flush_journal_list(sb
, jl
, 1);
4301 } else if (!(jl
->j_state
& LIST_COMMIT_PENDING
)) {
4303 * Avoid queueing work when sb is being shut down. Transaction
4304 * will be flushed on journal shutdown.
4306 if (sb
->s_flags
& MS_ACTIVE
)
4307 queue_delayed_work(REISERFS_SB(sb
)->commit_wq
,
4308 &journal
->j_work
, HZ
/ 10);
4312 * if the next transaction has any chance of wrapping, flush
4313 * transactions that might get overwritten. If any journal lists
4314 * are very old flush them as well.
4317 list_for_each_safe(entry
, safe
, &journal
->j_journal_list
) {
4318 temp_jl
= JOURNAL_LIST_ENTRY(entry
);
4319 if (journal
->j_start
<= temp_jl
->j_start
) {
4320 if ((journal
->j_start
+ journal
->j_trans_max
+ 1) >=
4322 flush_used_journal_lists(sb
, temp_jl
);
4324 } else if ((journal
->j_start
+
4325 journal
->j_trans_max
+ 1) <
4326 SB_ONDISK_JOURNAL_SIZE(sb
)) {
4328 * if we don't cross into the next
4329 * transaction and we don't wrap, there is
4330 * no way we can overlap any later transactions
4335 } else if ((journal
->j_start
+
4336 journal
->j_trans_max
+ 1) >
4337 SB_ONDISK_JOURNAL_SIZE(sb
)) {
4338 if (((journal
->j_start
+ journal
->j_trans_max
+ 1) %
4339 SB_ONDISK_JOURNAL_SIZE(sb
)) >=
4341 flush_used_journal_lists(sb
, temp_jl
);
4345 * we don't overlap anything from out start
4346 * to the end of the log, and our wrapped
4347 * portion doesn't overlap anything at
4348 * the start of the log. We can break
4355 journal
->j_current_jl
->j_list_bitmap
=
4356 get_list_bitmap(sb
, journal
->j_current_jl
);
4358 if (!(journal
->j_current_jl
->j_list_bitmap
)) {
4359 reiserfs_panic(sb
, "journal-1996",
4360 "could not get a list bitmap");
4363 atomic_set(&journal
->j_jlock
, 0);
4365 /* wake up any body waiting to join. */
4366 clear_bit(J_WRITERS_QUEUED
, &journal
->j_state
);
4367 wake_up(&journal
->j_join_wait
);
4369 if (!flush
&& wait_on_commit
&&
4370 journal_list_still_alive(sb
, commit_trans_id
)) {
4371 flush_commit_list(sb
, jl
, 1);
4374 reiserfs_check_lock_depth(sb
, "journal end2");
4376 memset(th
, 0, sizeof(*th
));
4378 * Re-set th->t_super, so we can properly keep track of how many
4379 * persistent transactions there are. We need to do this so if this
4380 * call is part of a failed restart_transaction, we can free it later
4384 return journal
->j_errno
;
4387 /* Send the file system read only and refuse new transactions */
4388 void reiserfs_abort_journal(struct super_block
*sb
, int errno
)
4390 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
4391 if (test_bit(J_ABORTED
, &journal
->j_state
))
4394 if (!journal
->j_errno
)
4395 journal
->j_errno
= errno
;
4397 sb
->s_flags
|= MS_RDONLY
;
4398 set_bit(J_ABORTED
, &journal
->j_state
);
4400 #ifdef CONFIG_REISERFS_CHECK