2 ** Write ahead logging implementation copyright Chris Mason 2000
4 ** The background commits make this code very interelated, and
5 ** overly complex. I need to rethink things a bit....The major players:
7 ** journal_begin -- call with the number of blocks you expect to log.
8 ** If the current transaction is too
9 ** old, it will block until the current transaction is
10 ** finished, and then start a new one.
11 ** Usually, your transaction will get joined in with
12 ** previous ones for speed.
14 ** journal_join -- same as journal_begin, but won't block on the current
15 ** transaction regardless of age. Don't ever call
16 ** this. Ever. There are only two places it should be
17 ** called from, and they are both inside this file.
19 ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20 ** that might make them get sent to disk
21 ** and then marks them BH_JDirty. Puts the buffer head
22 ** into the current transaction hash.
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 ** otherwise, it could do an async/synchronous commit, or
26 ** a full flush of all log and real blocks in the
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 ** commit blocks are sent to disk. Forces commit blocks
31 ** to disk for all backgrounded commits that have been
33 ** -- Note, if you call this as an immediate flush from
34 ** from within kupdate, it will ignore the immediate flag
37 #include <linux/config.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
41 #include <linux/time.h>
42 #include <asm/semaphore.h>
44 #include <linux/vmalloc.h>
45 #include <linux/reiserfs_fs.h>
47 #include <linux/kernel.h>
48 #include <linux/errno.h>
49 #include <linux/fcntl.h>
50 #include <linux/stat.h>
51 #include <linux/string.h>
52 #include <linux/smp_lock.h>
53 #include <linux/buffer_head.h>
54 #include <linux/workqueue.h>
55 #include <linux/writeback.h>
56 #include <linux/blkdev.h>
58 /* gets a struct reiserfs_journal_list * from a list head */
59 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
61 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
64 /* the number of mounted filesystems. This is used to decide when to
65 ** start and kill the commit workqueue
67 static int reiserfs_mounted_fs_count
;
69 static struct workqueue_struct
*commit_wq
;
71 #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
73 #define BUFNR 64 /*read ahead */
75 /* cnode stat bits. Move these into reiserfs_fs.h */
77 #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
78 #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
80 #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
81 #define BLOCK_DIRTIED 5
83 /* journal list state bits */
84 #define LIST_TOUCHED 1
86 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
88 /* flags for do_journal_end */
89 #define FLUSH_ALL 1 /* flush commit and real blocks */
90 #define COMMIT_NOW 2 /* end and commit this transaction */
91 #define WAIT 4 /* wait for the log blocks to hit the disk */
93 static int do_journal_end(struct reiserfs_transaction_handle
*,
94 struct super_block
*, unsigned long nblocks
,
96 static int flush_journal_list(struct super_block
*s
,
97 struct reiserfs_journal_list
*jl
, int flushall
);
98 static int flush_commit_list(struct super_block
*s
,
99 struct reiserfs_journal_list
*jl
, int flushall
);
100 static int can_dirty(struct reiserfs_journal_cnode
*cn
);
101 static int journal_join(struct reiserfs_transaction_handle
*th
,
102 struct super_block
*p_s_sb
, unsigned long nblocks
);
103 static int release_journal_dev(struct super_block
*super
,
104 struct reiserfs_journal
*journal
);
105 static int dirty_one_transaction(struct super_block
*s
,
106 struct reiserfs_journal_list
*jl
);
107 static void flush_async_commits(void *p
);
108 static void queue_log_writer(struct super_block
*s
);
110 /* values for join in do_journal_begin_r */
112 JBEGIN_REG
= 0, /* regular journal begin */
113 JBEGIN_JOIN
= 1, /* join the running transaction if at all possible */
114 JBEGIN_ABORT
= 2, /* called from cleanup code, ignores aborted flag */
117 static int do_journal_begin_r(struct reiserfs_transaction_handle
*th
,
118 struct super_block
*p_s_sb
,
119 unsigned long nblocks
, int join
);
121 static void init_journal_hash(struct super_block
*p_s_sb
)
123 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
124 memset(journal
->j_hash_table
, 0,
125 JOURNAL_HASH_SIZE
* sizeof(struct reiserfs_journal_cnode
*));
129 ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
130 ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
133 static int reiserfs_clean_and_file_buffer(struct buffer_head
*bh
)
136 clear_buffer_dirty(bh
);
137 clear_buffer_journal_test(bh
);
142 static void disable_barrier(struct super_block
*s
)
144 REISERFS_SB(s
)->s_mount_opt
&= ~(1 << REISERFS_BARRIER_FLUSH
);
145 printk("reiserfs: disabling flush barriers on %s\n",
146 reiserfs_bdevname(s
));
149 static struct reiserfs_bitmap_node
*allocate_bitmap_node(struct super_block
152 struct reiserfs_bitmap_node
*bn
;
155 bn
= kmalloc(sizeof(struct reiserfs_bitmap_node
), GFP_NOFS
);
159 bn
->data
= kzalloc(p_s_sb
->s_blocksize
, GFP_NOFS
);
165 INIT_LIST_HEAD(&bn
->list
);
169 static struct reiserfs_bitmap_node
*get_bitmap_node(struct super_block
*p_s_sb
)
171 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
172 struct reiserfs_bitmap_node
*bn
= NULL
;
173 struct list_head
*entry
= journal
->j_bitmap_nodes
.next
;
175 journal
->j_used_bitmap_nodes
++;
178 if (entry
!= &journal
->j_bitmap_nodes
) {
179 bn
= list_entry(entry
, struct reiserfs_bitmap_node
, list
);
181 memset(bn
->data
, 0, p_s_sb
->s_blocksize
);
182 journal
->j_free_bitmap_nodes
--;
185 bn
= allocate_bitmap_node(p_s_sb
);
192 static inline void free_bitmap_node(struct super_block
*p_s_sb
,
193 struct reiserfs_bitmap_node
*bn
)
195 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
196 journal
->j_used_bitmap_nodes
--;
197 if (journal
->j_free_bitmap_nodes
> REISERFS_MAX_BITMAP_NODES
) {
201 list_add(&bn
->list
, &journal
->j_bitmap_nodes
);
202 journal
->j_free_bitmap_nodes
++;
206 static void allocate_bitmap_nodes(struct super_block
*p_s_sb
)
209 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
210 struct reiserfs_bitmap_node
*bn
= NULL
;
211 for (i
= 0; i
< REISERFS_MIN_BITMAP_NODES
; i
++) {
212 bn
= allocate_bitmap_node(p_s_sb
);
214 list_add(&bn
->list
, &journal
->j_bitmap_nodes
);
215 journal
->j_free_bitmap_nodes
++;
217 break; // this is ok, we'll try again when more are needed
222 static int set_bit_in_list_bitmap(struct super_block
*p_s_sb
, int block
,
223 struct reiserfs_list_bitmap
*jb
)
225 int bmap_nr
= block
/ (p_s_sb
->s_blocksize
<< 3);
226 int bit_nr
= block
% (p_s_sb
->s_blocksize
<< 3);
228 if (!jb
->bitmaps
[bmap_nr
]) {
229 jb
->bitmaps
[bmap_nr
] = get_bitmap_node(p_s_sb
);
231 set_bit(bit_nr
, (unsigned long *)jb
->bitmaps
[bmap_nr
]->data
);
235 static void cleanup_bitmap_list(struct super_block
*p_s_sb
,
236 struct reiserfs_list_bitmap
*jb
)
239 if (jb
->bitmaps
== NULL
)
242 for (i
= 0; i
< SB_BMAP_NR(p_s_sb
); i
++) {
243 if (jb
->bitmaps
[i
]) {
244 free_bitmap_node(p_s_sb
, jb
->bitmaps
[i
]);
245 jb
->bitmaps
[i
] = NULL
;
251 ** only call this on FS unmount.
253 static int free_list_bitmaps(struct super_block
*p_s_sb
,
254 struct reiserfs_list_bitmap
*jb_array
)
257 struct reiserfs_list_bitmap
*jb
;
258 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
260 jb
->journal_list
= NULL
;
261 cleanup_bitmap_list(p_s_sb
, jb
);
268 static int free_bitmap_nodes(struct super_block
*p_s_sb
)
270 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
271 struct list_head
*next
= journal
->j_bitmap_nodes
.next
;
272 struct reiserfs_bitmap_node
*bn
;
274 while (next
!= &journal
->j_bitmap_nodes
) {
275 bn
= list_entry(next
, struct reiserfs_bitmap_node
, list
);
279 next
= journal
->j_bitmap_nodes
.next
;
280 journal
->j_free_bitmap_nodes
--;
287 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
288 ** jb_array is the array to be filled in.
290 int reiserfs_allocate_list_bitmaps(struct super_block
*p_s_sb
,
291 struct reiserfs_list_bitmap
*jb_array
,
296 struct reiserfs_list_bitmap
*jb
;
297 int mem
= bmap_nr
* sizeof(struct reiserfs_bitmap_node
*);
299 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
301 jb
->journal_list
= NULL
;
302 jb
->bitmaps
= vmalloc(mem
);
304 reiserfs_warning(p_s_sb
,
305 "clm-2000, unable to allocate bitmaps for journal lists");
309 memset(jb
->bitmaps
, 0, mem
);
312 free_list_bitmaps(p_s_sb
, jb_array
);
319 ** find an available list bitmap. If you can't find one, flush a commit list
322 static struct reiserfs_list_bitmap
*get_list_bitmap(struct super_block
*p_s_sb
,
323 struct reiserfs_journal_list
327 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
328 struct reiserfs_list_bitmap
*jb
= NULL
;
330 for (j
= 0; j
< (JOURNAL_NUM_BITMAPS
* 3); j
++) {
331 i
= journal
->j_list_bitmap_index
;
332 journal
->j_list_bitmap_index
= (i
+ 1) % JOURNAL_NUM_BITMAPS
;
333 jb
= journal
->j_list_bitmap
+ i
;
334 if (journal
->j_list_bitmap
[i
].journal_list
) {
335 flush_commit_list(p_s_sb
,
336 journal
->j_list_bitmap
[i
].
338 if (!journal
->j_list_bitmap
[i
].journal_list
) {
345 if (jb
->journal_list
) { /* double check to make sure if flushed correctly */
348 jb
->journal_list
= jl
;
353 ** allocates a new chunk of X nodes, and links them all together as a list.
354 ** Uses the cnode->next and cnode->prev pointers
355 ** returns NULL on failure
357 static struct reiserfs_journal_cnode
*allocate_cnodes(int num_cnodes
)
359 struct reiserfs_journal_cnode
*head
;
361 if (num_cnodes
<= 0) {
364 head
= vmalloc(num_cnodes
* sizeof(struct reiserfs_journal_cnode
));
368 memset(head
, 0, num_cnodes
* sizeof(struct reiserfs_journal_cnode
));
370 head
[0].next
= head
+ 1;
371 for (i
= 1; i
< num_cnodes
; i
++) {
372 head
[i
].prev
= head
+ (i
- 1);
373 head
[i
].next
= head
+ (i
+ 1); /* if last one, overwrite it after the if */
375 head
[num_cnodes
- 1].next
= NULL
;
380 ** pulls a cnode off the free list, or returns NULL on failure
382 static struct reiserfs_journal_cnode
*get_cnode(struct super_block
*p_s_sb
)
384 struct reiserfs_journal_cnode
*cn
;
385 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
387 reiserfs_check_lock_depth(p_s_sb
, "get_cnode");
389 if (journal
->j_cnode_free
<= 0) {
392 journal
->j_cnode_used
++;
393 journal
->j_cnode_free
--;
394 cn
= journal
->j_cnode_free_list
;
399 cn
->next
->prev
= NULL
;
401 journal
->j_cnode_free_list
= cn
->next
;
402 memset(cn
, 0, sizeof(struct reiserfs_journal_cnode
));
407 ** returns a cnode to the free list
409 static void free_cnode(struct super_block
*p_s_sb
,
410 struct reiserfs_journal_cnode
*cn
)
412 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
414 reiserfs_check_lock_depth(p_s_sb
, "free_cnode");
416 journal
->j_cnode_used
--;
417 journal
->j_cnode_free
++;
418 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
419 cn
->next
= journal
->j_cnode_free_list
;
420 if (journal
->j_cnode_free_list
) {
421 journal
->j_cnode_free_list
->prev
= cn
;
423 cn
->prev
= NULL
; /* not needed with the memset, but I might kill the memset, and forget to do this */
424 journal
->j_cnode_free_list
= cn
;
427 static void clear_prepared_bits(struct buffer_head
*bh
)
429 clear_buffer_journal_prepared(bh
);
430 clear_buffer_journal_restore_dirty(bh
);
433 /* utility function to force a BUG if it is called without the big
434 ** kernel lock held. caller is the string printed just before calling BUG()
436 void reiserfs_check_lock_depth(struct super_block
*sb
, char *caller
)
439 if (current
->lock_depth
< 0) {
440 reiserfs_panic(sb
, "%s called without kernel lock held",
448 /* return a cnode with same dev, block number and size in table, or null if not found */
449 static inline struct reiserfs_journal_cnode
*get_journal_hash_dev(struct
453 reiserfs_journal_cnode
457 struct reiserfs_journal_cnode
*cn
;
458 cn
= journal_hash(table
, sb
, bl
);
460 if (cn
->blocknr
== bl
&& cn
->sb
== sb
)
464 return (struct reiserfs_journal_cnode
*)0;
468 ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
469 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
470 ** being overwritten by a replay after crashing.
472 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
473 ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
474 ** sure you never write the block without logging it.
476 ** next_zero_bit is a suggestion about the next block to try for find_forward.
477 ** when bl is rejected because it is set in a journal list bitmap, we search
478 ** for the next zero bit in the bitmap that rejected bl. Then, we return that
479 ** through next_zero_bit for find_forward to try.
481 ** Just because we return something in next_zero_bit does not mean we won't
482 ** reject it on the next call to reiserfs_in_journal
485 int reiserfs_in_journal(struct super_block
*p_s_sb
,
486 int bmap_nr
, int bit_nr
, int search_all
,
487 b_blocknr_t
* next_zero_bit
)
489 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
490 struct reiserfs_journal_cnode
*cn
;
491 struct reiserfs_list_bitmap
*jb
;
495 *next_zero_bit
= 0; /* always start this at zero. */
497 PROC_INFO_INC(p_s_sb
, journal
.in_journal
);
498 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
499 ** if we crash before the transaction that freed it commits, this transaction won't
500 ** have committed either, and the block will never be written
503 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
504 PROC_INFO_INC(p_s_sb
, journal
.in_journal_bitmap
);
505 jb
= journal
->j_list_bitmap
+ i
;
506 if (jb
->journal_list
&& jb
->bitmaps
[bmap_nr
] &&
508 (unsigned long *)jb
->bitmaps
[bmap_nr
]->
511 find_next_zero_bit((unsigned long *)
512 (jb
->bitmaps
[bmap_nr
]->
514 p_s_sb
->s_blocksize
<< 3,
521 bl
= bmap_nr
* (p_s_sb
->s_blocksize
<< 3) + bit_nr
;
522 /* is it in any old transactions? */
525 get_journal_hash_dev(p_s_sb
, journal
->j_list_hash_table
, bl
))) {
529 /* is it in the current transaction. This should never happen */
530 if ((cn
= get_journal_hash_dev(p_s_sb
, journal
->j_hash_table
, bl
))) {
535 PROC_INFO_INC(p_s_sb
, journal
.in_journal_reusable
);
540 /* insert cn into table
542 static inline void insert_journal_hash(struct reiserfs_journal_cnode
**table
,
543 struct reiserfs_journal_cnode
*cn
)
545 struct reiserfs_journal_cnode
*cn_orig
;
547 cn_orig
= journal_hash(table
, cn
->sb
, cn
->blocknr
);
553 journal_hash(table
, cn
->sb
, cn
->blocknr
) = cn
;
556 /* lock the current transaction */
557 static inline void lock_journal(struct super_block
*p_s_sb
)
559 PROC_INFO_INC(p_s_sb
, journal
.lock_journal
);
560 down(&SB_JOURNAL(p_s_sb
)->j_lock
);
563 /* unlock the current transaction */
564 static inline void unlock_journal(struct super_block
*p_s_sb
)
566 up(&SB_JOURNAL(p_s_sb
)->j_lock
);
569 static inline void get_journal_list(struct reiserfs_journal_list
*jl
)
574 static inline void put_journal_list(struct super_block
*s
,
575 struct reiserfs_journal_list
*jl
)
577 if (jl
->j_refcount
< 1) {
578 reiserfs_panic(s
, "trans id %lu, refcount at %d",
579 jl
->j_trans_id
, jl
->j_refcount
);
581 if (--jl
->j_refcount
== 0)
586 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
587 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
590 static void cleanup_freed_for_journal_list(struct super_block
*p_s_sb
,
591 struct reiserfs_journal_list
*jl
)
594 struct reiserfs_list_bitmap
*jb
= jl
->j_list_bitmap
;
596 cleanup_bitmap_list(p_s_sb
, jb
);
598 jl
->j_list_bitmap
->journal_list
= NULL
;
599 jl
->j_list_bitmap
= NULL
;
602 static int journal_list_still_alive(struct super_block
*s
,
603 unsigned long trans_id
)
605 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
606 struct list_head
*entry
= &journal
->j_journal_list
;
607 struct reiserfs_journal_list
*jl
;
609 if (!list_empty(entry
)) {
610 jl
= JOURNAL_LIST_ENTRY(entry
->next
);
611 if (jl
->j_trans_id
<= trans_id
) {
618 static void reiserfs_end_buffer_io_sync(struct buffer_head
*bh
, int uptodate
)
620 char b
[BDEVNAME_SIZE
];
622 if (buffer_journaled(bh
)) {
623 reiserfs_warning(NULL
,
624 "clm-2084: pinned buffer %lu:%s sent to disk",
625 bh
->b_blocknr
, bdevname(bh
->b_bdev
, b
));
628 set_buffer_uptodate(bh
);
630 clear_buffer_uptodate(bh
);
635 static void reiserfs_end_ordered_io(struct buffer_head
*bh
, int uptodate
)
638 set_buffer_uptodate(bh
);
640 clear_buffer_uptodate(bh
);
645 static void submit_logged_buffer(struct buffer_head
*bh
)
648 bh
->b_end_io
= reiserfs_end_buffer_io_sync
;
649 clear_buffer_journal_new(bh
);
650 clear_buffer_dirty(bh
);
651 if (!test_clear_buffer_journal_test(bh
))
653 if (!buffer_uptodate(bh
))
655 submit_bh(WRITE
, bh
);
658 static void submit_ordered_buffer(struct buffer_head
*bh
)
661 bh
->b_end_io
= reiserfs_end_ordered_io
;
662 clear_buffer_dirty(bh
);
663 if (!buffer_uptodate(bh
))
665 submit_bh(WRITE
, bh
);
668 static int submit_barrier_buffer(struct buffer_head
*bh
)
671 bh
->b_end_io
= reiserfs_end_ordered_io
;
672 clear_buffer_dirty(bh
);
673 if (!buffer_uptodate(bh
))
675 return submit_bh(WRITE_BARRIER
, bh
);
678 static void check_barrier_completion(struct super_block
*s
,
679 struct buffer_head
*bh
)
681 if (buffer_eopnotsupp(bh
)) {
682 clear_buffer_eopnotsupp(bh
);
684 set_buffer_uptodate(bh
);
685 set_buffer_dirty(bh
);
686 sync_dirty_buffer(bh
);
690 #define CHUNK_SIZE 32
691 struct buffer_chunk
{
692 struct buffer_head
*bh
[CHUNK_SIZE
];
696 static void write_chunk(struct buffer_chunk
*chunk
)
700 for (i
= 0; i
< chunk
->nr
; i
++) {
701 submit_logged_buffer(chunk
->bh
[i
]);
707 static void write_ordered_chunk(struct buffer_chunk
*chunk
)
711 for (i
= 0; i
< chunk
->nr
; i
++) {
712 submit_ordered_buffer(chunk
->bh
[i
]);
718 static int add_to_chunk(struct buffer_chunk
*chunk
, struct buffer_head
*bh
,
719 spinlock_t
* lock
, void (fn
) (struct buffer_chunk
*))
722 if (chunk
->nr
>= CHUNK_SIZE
)
724 chunk
->bh
[chunk
->nr
++] = bh
;
725 if (chunk
->nr
>= CHUNK_SIZE
) {
736 static atomic_t nr_reiserfs_jh
= ATOMIC_INIT(0);
737 static struct reiserfs_jh
*alloc_jh(void)
739 struct reiserfs_jh
*jh
;
741 jh
= kmalloc(sizeof(*jh
), GFP_NOFS
);
743 atomic_inc(&nr_reiserfs_jh
);
751 * we want to free the jh when the buffer has been written
754 void reiserfs_free_jh(struct buffer_head
*bh
)
756 struct reiserfs_jh
*jh
;
760 bh
->b_private
= NULL
;
762 list_del_init(&jh
->list
);
764 if (atomic_read(&nr_reiserfs_jh
) <= 0)
766 atomic_dec(&nr_reiserfs_jh
);
771 static inline int __add_jh(struct reiserfs_journal
*j
, struct buffer_head
*bh
,
774 struct reiserfs_jh
*jh
;
777 spin_lock(&j
->j_dirty_buffers_lock
);
778 if (!bh
->b_private
) {
779 spin_unlock(&j
->j_dirty_buffers_lock
);
783 list_del_init(&jh
->list
);
788 spin_lock(&j
->j_dirty_buffers_lock
);
789 /* buffer must be locked for __add_jh, should be able to have
790 * two adds at the same time
797 jh
->jl
= j
->j_current_jl
;
799 list_add_tail(&jh
->list
, &jh
->jl
->j_tail_bh_list
);
801 list_add_tail(&jh
->list
, &jh
->jl
->j_bh_list
);
803 spin_unlock(&j
->j_dirty_buffers_lock
);
807 int reiserfs_add_tail_list(struct inode
*inode
, struct buffer_head
*bh
)
809 return __add_jh(SB_JOURNAL(inode
->i_sb
), bh
, 1);
811 int reiserfs_add_ordered_list(struct inode
*inode
, struct buffer_head
*bh
)
813 return __add_jh(SB_JOURNAL(inode
->i_sb
), bh
, 0);
816 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
817 static int write_ordered_buffers(spinlock_t
* lock
,
818 struct reiserfs_journal
*j
,
819 struct reiserfs_journal_list
*jl
,
820 struct list_head
*list
)
822 struct buffer_head
*bh
;
823 struct reiserfs_jh
*jh
;
824 int ret
= j
->j_errno
;
825 struct buffer_chunk chunk
;
826 struct list_head tmp
;
827 INIT_LIST_HEAD(&tmp
);
831 while (!list_empty(list
)) {
832 jh
= JH_ENTRY(list
->next
);
835 if (test_set_buffer_locked(bh
)) {
836 if (!buffer_dirty(bh
)) {
837 list_del_init(&jh
->list
);
838 list_add(&jh
->list
, &tmp
);
843 write_ordered_chunk(&chunk
);
849 /* in theory, dirty non-uptodate buffers should never get here,
850 * but the upper layer io error paths still have a few quirks.
851 * Handle them here as gracefully as we can
853 if (!buffer_uptodate(bh
) && buffer_dirty(bh
)) {
854 clear_buffer_dirty(bh
);
857 if (buffer_dirty(bh
)) {
858 list_del_init(&jh
->list
);
859 list_add(&jh
->list
, &tmp
);
860 add_to_chunk(&chunk
, bh
, lock
, write_ordered_chunk
);
862 reiserfs_free_jh(bh
);
867 cond_resched_lock(lock
);
871 write_ordered_chunk(&chunk
);
874 while (!list_empty(&tmp
)) {
875 jh
= JH_ENTRY(tmp
.prev
);
878 reiserfs_free_jh(bh
);
880 if (buffer_locked(bh
)) {
885 if (!buffer_uptodate(bh
)) {
888 /* ugly interaction with invalidatepage here.
889 * reiserfs_invalidate_page will pin any buffer that has a valid
890 * journal head from an older transaction. If someone else sets
891 * our buffer dirty after we write it in the first loop, and
892 * then someone truncates the page away, nobody will ever write
893 * the buffer. We're safe if we write the page one last time
894 * after freeing the journal header.
896 if (buffer_dirty(bh
) && unlikely(bh
->b_page
->mapping
== NULL
)) {
898 ll_rw_block(WRITE
, 1, &bh
);
902 cond_resched_lock(lock
);
908 static int flush_older_commits(struct super_block
*s
,
909 struct reiserfs_journal_list
*jl
)
911 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
912 struct reiserfs_journal_list
*other_jl
;
913 struct reiserfs_journal_list
*first_jl
;
914 struct list_head
*entry
;
915 unsigned long trans_id
= jl
->j_trans_id
;
916 unsigned long other_trans_id
;
917 unsigned long first_trans_id
;
921 * first we walk backwards to find the oldest uncommitted transation
924 entry
= jl
->j_list
.prev
;
926 other_jl
= JOURNAL_LIST_ENTRY(entry
);
927 if (entry
== &journal
->j_journal_list
||
928 atomic_read(&other_jl
->j_older_commits_done
))
932 entry
= other_jl
->j_list
.prev
;
935 /* if we didn't find any older uncommitted transactions, return now */
936 if (first_jl
== jl
) {
940 first_trans_id
= first_jl
->j_trans_id
;
942 entry
= &first_jl
->j_list
;
944 other_jl
= JOURNAL_LIST_ENTRY(entry
);
945 other_trans_id
= other_jl
->j_trans_id
;
947 if (other_trans_id
< trans_id
) {
948 if (atomic_read(&other_jl
->j_commit_left
) != 0) {
949 flush_commit_list(s
, other_jl
, 0);
951 /* list we were called with is gone, return */
952 if (!journal_list_still_alive(s
, trans_id
))
955 /* the one we just flushed is gone, this means all
956 * older lists are also gone, so first_jl is no longer
957 * valid either. Go back to the beginning.
959 if (!journal_list_still_alive
960 (s
, other_trans_id
)) {
965 if (entry
== &journal
->j_journal_list
)
973 int reiserfs_async_progress_wait(struct super_block
*s
)
976 struct reiserfs_journal
*j
= SB_JOURNAL(s
);
977 if (atomic_read(&j
->j_async_throttle
))
978 blk_congestion_wait(WRITE
, HZ
/ 10);
983 ** if this journal list still has commit blocks unflushed, send them to disk.
985 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
986 ** Before the commit block can by written, every other log block must be safely on disk
989 static int flush_commit_list(struct super_block
*s
,
990 struct reiserfs_journal_list
*jl
, int flushall
)
994 struct buffer_head
*tbh
= NULL
;
995 unsigned long trans_id
= jl
->j_trans_id
;
996 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1001 reiserfs_check_lock_depth(s
, "flush_commit_list");
1003 if (atomic_read(&jl
->j_older_commits_done
)) {
1009 /* before we can put our commit blocks on disk, we have to make sure everyone older than
1010 ** us is on disk too
1012 BUG_ON(jl
->j_len
<= 0);
1013 BUG_ON(trans_id
== journal
->j_trans_id
);
1015 get_journal_list(jl
);
1017 if (flush_older_commits(s
, jl
) == 1) {
1018 /* list disappeared during flush_older_commits. return */
1023 /* make sure nobody is trying to flush this one at the same time */
1024 down(&jl
->j_commit_lock
);
1025 if (!journal_list_still_alive(s
, trans_id
)) {
1026 up(&jl
->j_commit_lock
);
1029 BUG_ON(jl
->j_trans_id
== 0);
1031 /* this commit is done, exit */
1032 if (atomic_read(&(jl
->j_commit_left
)) <= 0) {
1034 atomic_set(&(jl
->j_older_commits_done
), 1);
1036 up(&jl
->j_commit_lock
);
1040 if (!list_empty(&jl
->j_bh_list
)) {
1043 ret
= write_ordered_buffers(&journal
->j_dirty_buffers_lock
,
1044 journal
, jl
, &jl
->j_bh_list
);
1045 if (ret
< 0 && retval
== 0)
1049 BUG_ON(!list_empty(&jl
->j_bh_list
));
1051 * for the description block and all the log blocks, submit any buffers
1052 * that haven't already reached the disk. Try to write at least 256
1053 * log blocks. later on, we will only wait on blocks that correspond
1054 * to this transaction, but while we're unplugging we might as well
1055 * get a chunk of data on there.
1057 atomic_inc(&journal
->j_async_throttle
);
1058 write_len
= jl
->j_len
+ 1;
1059 if (write_len
< 256)
1061 for (i
= 0 ; i
< write_len
; i
++) {
1062 bn
= SB_ONDISK_JOURNAL_1st_BLOCK(s
) + (jl
->j_start
+ i
) %
1063 SB_ONDISK_JOURNAL_SIZE(s
);
1064 tbh
= journal_find_get_block(s
, bn
);
1066 if (buffer_dirty(tbh
))
1067 ll_rw_block(WRITE
, 1, &tbh
) ;
1071 atomic_dec(&journal
->j_async_throttle
);
1073 /* We're skipping the commit if there's an error */
1074 if (retval
|| reiserfs_is_journal_aborted(journal
))
1077 /* wait on everything written so far before writing the commit
1078 * if we are in barrier mode, send the commit down now
1080 barrier
= reiserfs_barrier_flush(s
);
1083 lock_buffer(jl
->j_commit_bh
);
1084 ret
= submit_barrier_buffer(jl
->j_commit_bh
);
1085 if (ret
== -EOPNOTSUPP
) {
1086 set_buffer_uptodate(jl
->j_commit_bh
);
1091 for (i
= 0; i
< (jl
->j_len
+ 1); i
++) {
1092 bn
= SB_ONDISK_JOURNAL_1st_BLOCK(s
) +
1093 (jl
->j_start
+ i
) % SB_ONDISK_JOURNAL_SIZE(s
);
1094 tbh
= journal_find_get_block(s
, bn
);
1095 wait_on_buffer(tbh
);
1096 // since we're using ll_rw_blk above, it might have skipped over
1097 // a locked buffer. Double check here
1099 if (buffer_dirty(tbh
)) /* redundant, sync_dirty_buffer() checks */
1100 sync_dirty_buffer(tbh
);
1101 if (unlikely(!buffer_uptodate(tbh
))) {
1102 #ifdef CONFIG_REISERFS_CHECK
1103 reiserfs_warning(s
, "journal-601, buffer write failed");
1107 put_bh(tbh
); /* once for journal_find_get_block */
1108 put_bh(tbh
); /* once due to original getblk in do_journal_end */
1109 atomic_dec(&(jl
->j_commit_left
));
1112 BUG_ON(atomic_read(&(jl
->j_commit_left
)) != 1);
1115 /* If there was a write error in the journal - we can't commit
1116 * this transaction - it will be invalid and, if successful,
1117 * will just end up propogating the write error out to
1118 * the file system. */
1119 if (likely(!retval
&& !reiserfs_is_journal_aborted (journal
))) {
1120 if (buffer_dirty(jl
->j_commit_bh
))
1122 mark_buffer_dirty(jl
->j_commit_bh
) ;
1123 sync_dirty_buffer(jl
->j_commit_bh
) ;
1126 wait_on_buffer(jl
->j_commit_bh
);
1128 check_barrier_completion(s
, jl
->j_commit_bh
);
1130 /* If there was a write error in the journal - we can't commit this
1131 * transaction - it will be invalid and, if successful, will just end
1132 * up propogating the write error out to the filesystem. */
1133 if (unlikely(!buffer_uptodate(jl
->j_commit_bh
))) {
1134 #ifdef CONFIG_REISERFS_CHECK
1135 reiserfs_warning(s
, "journal-615: buffer write failed");
1139 bforget(jl
->j_commit_bh
);
1140 if (journal
->j_last_commit_id
!= 0 &&
1141 (jl
->j_trans_id
- journal
->j_last_commit_id
) != 1) {
1142 reiserfs_warning(s
, "clm-2200: last commit %lu, current %lu",
1143 journal
->j_last_commit_id
, jl
->j_trans_id
);
1145 journal
->j_last_commit_id
= jl
->j_trans_id
;
1147 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1148 cleanup_freed_for_journal_list(s
, jl
);
1150 retval
= retval
? retval
: journal
->j_errno
;
1152 /* mark the metadata dirty */
1154 dirty_one_transaction(s
, jl
);
1155 atomic_dec(&(jl
->j_commit_left
));
1158 atomic_set(&(jl
->j_older_commits_done
), 1);
1160 up(&jl
->j_commit_lock
);
1162 put_journal_list(s
, jl
);
1165 reiserfs_abort(s
, retval
, "Journal write error in %s",
1172 ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1173 ** returns NULL if it can't find anything
1175 static struct reiserfs_journal_list
*find_newer_jl_for_cn(struct
1176 reiserfs_journal_cnode
1179 struct super_block
*sb
= cn
->sb
;
1180 b_blocknr_t blocknr
= cn
->blocknr
;
1184 if (cn
->sb
== sb
&& cn
->blocknr
== blocknr
&& cn
->jlist
) {
1192 static void remove_journal_hash(struct super_block
*,
1193 struct reiserfs_journal_cnode
**,
1194 struct reiserfs_journal_list
*, unsigned long,
1198 ** once all the real blocks have been flushed, it is safe to remove them from the
1199 ** journal list for this transaction. Aside from freeing the cnode, this also allows the
1200 ** block to be reallocated for data blocks if it had been deleted.
1202 static void remove_all_from_journal_list(struct super_block
*p_s_sb
,
1203 struct reiserfs_journal_list
*jl
,
1206 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
1207 struct reiserfs_journal_cnode
*cn
, *last
;
1208 cn
= jl
->j_realblock
;
1210 /* which is better, to lock once around the whole loop, or
1211 ** to lock for each call to remove_journal_hash?
1214 if (cn
->blocknr
!= 0) {
1216 reiserfs_warning(p_s_sb
,
1217 "block %u, bh is %d, state %ld",
1218 cn
->blocknr
, cn
->bh
? 1 : 0,
1222 remove_journal_hash(p_s_sb
, journal
->j_list_hash_table
,
1223 jl
, cn
->blocknr
, 1);
1227 free_cnode(p_s_sb
, last
);
1229 jl
->j_realblock
= NULL
;
1233 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1234 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1235 ** releasing blocks in this transaction for reuse as data blocks.
1236 ** called by flush_journal_list, before it calls remove_all_from_journal_list
1239 static int _update_journal_header_block(struct super_block
*p_s_sb
,
1240 unsigned long offset
,
1241 unsigned long trans_id
)
1243 struct reiserfs_journal_header
*jh
;
1244 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
1246 if (reiserfs_is_journal_aborted(journal
))
1249 if (trans_id
>= journal
->j_last_flush_trans_id
) {
1250 if (buffer_locked((journal
->j_header_bh
))) {
1251 wait_on_buffer((journal
->j_header_bh
));
1252 if (unlikely(!buffer_uptodate(journal
->j_header_bh
))) {
1253 #ifdef CONFIG_REISERFS_CHECK
1254 reiserfs_warning(p_s_sb
,
1255 "journal-699: buffer write failed");
1260 journal
->j_last_flush_trans_id
= trans_id
;
1261 journal
->j_first_unflushed_offset
= offset
;
1262 jh
= (struct reiserfs_journal_header
*)(journal
->j_header_bh
->
1264 jh
->j_last_flush_trans_id
= cpu_to_le32(trans_id
);
1265 jh
->j_first_unflushed_offset
= cpu_to_le32(offset
);
1266 jh
->j_mount_id
= cpu_to_le32(journal
->j_mount_id
);
1268 if (reiserfs_barrier_flush(p_s_sb
)) {
1270 lock_buffer(journal
->j_header_bh
);
1271 ret
= submit_barrier_buffer(journal
->j_header_bh
);
1272 if (ret
== -EOPNOTSUPP
) {
1273 set_buffer_uptodate(journal
->j_header_bh
);
1274 disable_barrier(p_s_sb
);
1277 wait_on_buffer(journal
->j_header_bh
);
1278 check_barrier_completion(p_s_sb
, journal
->j_header_bh
);
1281 set_buffer_dirty(journal
->j_header_bh
);
1282 sync_dirty_buffer(journal
->j_header_bh
);
1284 if (!buffer_uptodate(journal
->j_header_bh
)) {
1285 reiserfs_warning(p_s_sb
,
1286 "journal-837: IO error during journal replay");
1293 static int update_journal_header_block(struct super_block
*p_s_sb
,
1294 unsigned long offset
,
1295 unsigned long trans_id
)
1297 return _update_journal_header_block(p_s_sb
, offset
, trans_id
);
1301 ** flush any and all journal lists older than you are
1302 ** can only be called from flush_journal_list
1304 static int flush_older_journal_lists(struct super_block
*p_s_sb
,
1305 struct reiserfs_journal_list
*jl
)
1307 struct list_head
*entry
;
1308 struct reiserfs_journal_list
*other_jl
;
1309 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
1310 unsigned long trans_id
= jl
->j_trans_id
;
1312 /* we know we are the only ones flushing things, no extra race
1313 * protection is required.
1316 entry
= journal
->j_journal_list
.next
;
1318 if (entry
== &journal
->j_journal_list
)
1320 other_jl
= JOURNAL_LIST_ENTRY(entry
);
1321 if (other_jl
->j_trans_id
< trans_id
) {
1322 BUG_ON(other_jl
->j_refcount
<= 0);
1323 /* do not flush all */
1324 flush_journal_list(p_s_sb
, other_jl
, 0);
1326 /* other_jl is now deleted from the list */
1332 static void del_from_work_list(struct super_block
*s
,
1333 struct reiserfs_journal_list
*jl
)
1335 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1336 if (!list_empty(&jl
->j_working_list
)) {
1337 list_del_init(&jl
->j_working_list
);
1338 journal
->j_num_work_lists
--;
1342 /* flush a journal list, both commit and real blocks
1344 ** always set flushall to 1, unless you are calling from inside
1345 ** flush_journal_list
1347 ** IMPORTANT. This can only be called while there are no journal writers,
1348 ** and the journal is locked. That means it can only be called from
1349 ** do_journal_end, or by journal_release
1351 static int flush_journal_list(struct super_block
*s
,
1352 struct reiserfs_journal_list
*jl
, int flushall
)
1354 struct reiserfs_journal_list
*pjl
;
1355 struct reiserfs_journal_cnode
*cn
, *last
;
1359 struct buffer_head
*saved_bh
;
1360 unsigned long j_len_saved
= jl
->j_len
;
1361 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1364 BUG_ON(j_len_saved
<= 0);
1366 if (atomic_read(&journal
->j_wcount
) != 0) {
1368 "clm-2048: flush_journal_list called with wcount %d",
1369 atomic_read(&journal
->j_wcount
));
1371 BUG_ON(jl
->j_trans_id
== 0);
1373 /* if flushall == 0, the lock is already held */
1375 down(&journal
->j_flush_sem
);
1376 } else if (!down_trylock(&journal
->j_flush_sem
)) {
1381 if (j_len_saved
> journal
->j_trans_max
) {
1383 "journal-715: flush_journal_list, length is %lu, trans id %lu\n",
1384 j_len_saved
, jl
->j_trans_id
);
1390 /* if all the work is already done, get out of here */
1391 if (atomic_read(&(jl
->j_nonzerolen
)) <= 0 &&
1392 atomic_read(&(jl
->j_commit_left
)) <= 0) {
1393 goto flush_older_and_return
;
1396 /* start by putting the commit list on disk. This will also flush
1397 ** the commit lists of any olders transactions
1399 flush_commit_list(s
, jl
, 1);
1401 if (!(jl
->j_state
& LIST_DIRTY
)
1402 && !reiserfs_is_journal_aborted(journal
))
1405 /* are we done now? */
1406 if (atomic_read(&(jl
->j_nonzerolen
)) <= 0 &&
1407 atomic_read(&(jl
->j_commit_left
)) <= 0) {
1408 goto flush_older_and_return
;
1411 /* loop through each cnode, see if we need to write it,
1412 ** or wait on a more recent transaction, or just ignore it
1414 if (atomic_read(&(journal
->j_wcount
)) != 0) {
1416 "journal-844: panic journal list is flushing, wcount is not 0\n");
1418 cn
= jl
->j_realblock
;
1423 /* blocknr of 0 is no longer in the hash, ignore it */
1424 if (cn
->blocknr
== 0) {
1428 /* This transaction failed commit. Don't write out to the disk */
1429 if (!(jl
->j_state
& LIST_DIRTY
))
1432 pjl
= find_newer_jl_for_cn(cn
);
1433 /* the order is important here. We check pjl to make sure we
1434 ** don't clear BH_JDirty_wait if we aren't the one writing this
1437 if (!pjl
&& cn
->bh
) {
1440 /* we do this to make sure nobody releases the buffer while
1441 ** we are working with it
1445 if (buffer_journal_dirty(saved_bh
)) {
1446 BUG_ON(!can_dirty(cn
));
1449 } else if (can_dirty(cn
)) {
1450 /* everything with !pjl && jwait should be writable */
1455 /* if someone has this block in a newer transaction, just make
1456 ** sure they are commited, and don't try writing it to disk
1459 if (atomic_read(&pjl
->j_commit_left
))
1460 flush_commit_list(s
, pjl
, 1);
1464 /* bh == NULL when the block got to disk on its own, OR,
1465 ** the block got freed in a future transaction
1467 if (saved_bh
== NULL
) {
1471 /* this should never happen. kupdate_one_transaction has this list
1472 ** locked while it works, so we should never see a buffer here that
1473 ** is not marked JDirty_wait
1475 if ((!was_jwait
) && !buffer_locked(saved_bh
)) {
1477 "journal-813: BAD! buffer %llu %cdirty %cjwait, "
1478 "not in a newer tranasction",
1479 (unsigned long long)saved_bh
->
1480 b_blocknr
, was_dirty
? ' ' : '!',
1481 was_jwait
? ' ' : '!');
1484 /* we inc again because saved_bh gets decremented at free_cnode */
1486 set_bit(BLOCK_NEEDS_FLUSH
, &cn
->state
);
1487 lock_buffer(saved_bh
);
1488 BUG_ON(cn
->blocknr
!= saved_bh
->b_blocknr
);
1489 if (buffer_dirty(saved_bh
))
1490 submit_logged_buffer(saved_bh
);
1492 unlock_buffer(saved_bh
);
1496 "clm-2082: Unable to flush buffer %llu in %s",
1497 (unsigned long long)saved_bh
->
1498 b_blocknr
, __FUNCTION__
);
1504 /* we incremented this to keep others from taking the buffer head away */
1506 if (atomic_read(&(saved_bh
->b_count
)) < 0) {
1508 "journal-945: saved_bh->b_count < 0");
1513 cn
= jl
->j_realblock
;
1515 if (test_bit(BLOCK_NEEDS_FLUSH
, &cn
->state
)) {
1518 "journal-1011: cn->bh is NULL\n");
1520 wait_on_buffer(cn
->bh
);
1523 "journal-1012: cn->bh is NULL\n");
1525 if (unlikely(!buffer_uptodate(cn
->bh
))) {
1526 #ifdef CONFIG_REISERFS_CHECK
1528 "journal-949: buffer write failed\n");
1532 /* note, we must clear the JDirty_wait bit after the up to date
1533 ** check, otherwise we race against our flushpage routine
1535 BUG_ON(!test_clear_buffer_journal_dirty
1538 /* undo the inc from journal_mark_dirty */
1547 reiserfs_abort(s
, -EIO
,
1548 "Write error while pushing transaction to disk in %s",
1550 flush_older_and_return
:
1552 /* before we can update the journal header block, we _must_ flush all
1553 ** real blocks from all older transactions to disk. This is because
1554 ** once the header block is updated, this transaction will not be
1555 ** replayed after a crash
1558 flush_older_journal_lists(s
, jl
);
1561 err
= journal
->j_errno
;
1562 /* before we can remove everything from the hash tables for this
1563 ** transaction, we must make sure it can never be replayed
1565 ** since we are only called from do_journal_end, we know for sure there
1566 ** are no allocations going on while we are flushing journal lists. So,
1567 ** we only need to update the journal header block for the last list
1570 if (!err
&& flushall
) {
1572 update_journal_header_block(s
,
1573 (jl
->j_start
+ jl
->j_len
+
1574 2) % SB_ONDISK_JOURNAL_SIZE(s
),
1577 reiserfs_abort(s
, -EIO
,
1578 "Write error while updating journal header in %s",
1581 remove_all_from_journal_list(s
, jl
, 0);
1582 list_del_init(&jl
->j_list
);
1583 journal
->j_num_lists
--;
1584 del_from_work_list(s
, jl
);
1586 if (journal
->j_last_flush_id
!= 0 &&
1587 (jl
->j_trans_id
- journal
->j_last_flush_id
) != 1) {
1588 reiserfs_warning(s
, "clm-2201: last flush %lu, current %lu",
1589 journal
->j_last_flush_id
, jl
->j_trans_id
);
1591 journal
->j_last_flush_id
= jl
->j_trans_id
;
1593 /* not strictly required since we are freeing the list, but it should
1594 * help find code using dead lists later on
1597 atomic_set(&(jl
->j_nonzerolen
), 0);
1599 jl
->j_realblock
= NULL
;
1600 jl
->j_commit_bh
= NULL
;
1603 put_journal_list(s
, jl
);
1605 up(&journal
->j_flush_sem
);
1610 static int write_one_transaction(struct super_block
*s
,
1611 struct reiserfs_journal_list
*jl
,
1612 struct buffer_chunk
*chunk
)
1614 struct reiserfs_journal_cnode
*cn
;
1617 jl
->j_state
|= LIST_TOUCHED
;
1618 del_from_work_list(s
, jl
);
1619 if (jl
->j_len
== 0 || atomic_read(&jl
->j_nonzerolen
) == 0) {
1623 cn
= jl
->j_realblock
;
1625 /* if the blocknr == 0, this has been cleared from the hash,
1628 if (cn
->blocknr
== 0) {
1631 if (cn
->bh
&& can_dirty(cn
) && buffer_dirty(cn
->bh
)) {
1632 struct buffer_head
*tmp_bh
;
1633 /* we can race against journal_mark_freed when we try
1634 * to lock_buffer(cn->bh), so we have to inc the buffer
1635 * count, and recheck things after locking
1639 lock_buffer(tmp_bh
);
1640 if (cn
->bh
&& can_dirty(cn
) && buffer_dirty(tmp_bh
)) {
1641 if (!buffer_journal_dirty(tmp_bh
) ||
1642 buffer_journal_prepared(tmp_bh
))
1644 add_to_chunk(chunk
, tmp_bh
, NULL
, write_chunk
);
1647 /* note, cn->bh might be null now */
1648 unlock_buffer(tmp_bh
);
1659 /* used by flush_commit_list */
1660 static int dirty_one_transaction(struct super_block
*s
,
1661 struct reiserfs_journal_list
*jl
)
1663 struct reiserfs_journal_cnode
*cn
;
1664 struct reiserfs_journal_list
*pjl
;
1667 jl
->j_state
|= LIST_DIRTY
;
1668 cn
= jl
->j_realblock
;
1670 /* look for a more recent transaction that logged this
1671 ** buffer. Only the most recent transaction with a buffer in
1672 ** it is allowed to send that buffer to disk
1674 pjl
= find_newer_jl_for_cn(cn
);
1675 if (!pjl
&& cn
->blocknr
&& cn
->bh
1676 && buffer_journal_dirty(cn
->bh
)) {
1677 BUG_ON(!can_dirty(cn
));
1678 /* if the buffer is prepared, it will either be logged
1679 * or restored. If restored, we need to make sure
1680 * it actually gets marked dirty
1682 clear_buffer_journal_new(cn
->bh
);
1683 if (buffer_journal_prepared(cn
->bh
)) {
1684 set_buffer_journal_restore_dirty(cn
->bh
);
1686 set_buffer_journal_test(cn
->bh
);
1687 mark_buffer_dirty(cn
->bh
);
1695 static int kupdate_transactions(struct super_block
*s
,
1696 struct reiserfs_journal_list
*jl
,
1697 struct reiserfs_journal_list
**next_jl
,
1698 unsigned long *next_trans_id
,
1699 int num_blocks
, int num_trans
)
1703 int transactions_flushed
= 0;
1704 unsigned long orig_trans_id
= jl
->j_trans_id
;
1705 struct buffer_chunk chunk
;
1706 struct list_head
*entry
;
1707 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1710 down(&journal
->j_flush_sem
);
1711 if (!journal_list_still_alive(s
, orig_trans_id
)) {
1715 /* we've got j_flush_sem held, nobody is going to delete any
1716 * of these lists out from underneath us
1718 while ((num_trans
&& transactions_flushed
< num_trans
) ||
1719 (!num_trans
&& written
< num_blocks
)) {
1721 if (jl
->j_len
== 0 || (jl
->j_state
& LIST_TOUCHED
) ||
1722 atomic_read(&jl
->j_commit_left
)
1723 || !(jl
->j_state
& LIST_DIRTY
)) {
1724 del_from_work_list(s
, jl
);
1727 ret
= write_one_transaction(s
, jl
, &chunk
);
1731 transactions_flushed
++;
1733 entry
= jl
->j_list
.next
;
1736 if (entry
== &journal
->j_journal_list
) {
1739 jl
= JOURNAL_LIST_ENTRY(entry
);
1741 /* don't bother with older transactions */
1742 if (jl
->j_trans_id
<= orig_trans_id
)
1746 write_chunk(&chunk
);
1750 up(&journal
->j_flush_sem
);
1754 /* for o_sync and fsync heavy applications, they tend to use
1755 ** all the journa list slots with tiny transactions. These
1756 ** trigger lots and lots of calls to update the header block, which
1757 ** adds seeks and slows things down.
1759 ** This function tries to clear out a large chunk of the journal lists
1760 ** at once, which makes everything faster since only the newest journal
1761 ** list updates the header block
1763 static int flush_used_journal_lists(struct super_block
*s
,
1764 struct reiserfs_journal_list
*jl
)
1766 unsigned long len
= 0;
1767 unsigned long cur_len
;
1771 struct reiserfs_journal_list
*tjl
;
1772 struct reiserfs_journal_list
*flush_jl
;
1773 unsigned long trans_id
;
1774 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1776 flush_jl
= tjl
= jl
;
1778 /* in data logging mode, try harder to flush a lot of blocks */
1779 if (reiserfs_data_log(s
))
1781 /* flush for 256 transactions or limit blocks, whichever comes first */
1782 for (i
= 0; i
< 256 && len
< limit
; i
++) {
1783 if (atomic_read(&tjl
->j_commit_left
) ||
1784 tjl
->j_trans_id
< jl
->j_trans_id
) {
1787 cur_len
= atomic_read(&tjl
->j_nonzerolen
);
1789 tjl
->j_state
&= ~LIST_TOUCHED
;
1793 if (tjl
->j_list
.next
== &journal
->j_journal_list
)
1795 tjl
= JOURNAL_LIST_ENTRY(tjl
->j_list
.next
);
1797 /* try to find a group of blocks we can flush across all the
1798 ** transactions, but only bother if we've actually spanned
1799 ** across multiple lists
1801 if (flush_jl
!= jl
) {
1802 ret
= kupdate_transactions(s
, jl
, &tjl
, &trans_id
, len
, i
);
1804 flush_journal_list(s
, flush_jl
, 1);
1809 ** removes any nodes in table with name block and dev as bh.
1810 ** only touchs the hnext and hprev pointers.
1812 void remove_journal_hash(struct super_block
*sb
,
1813 struct reiserfs_journal_cnode
**table
,
1814 struct reiserfs_journal_list
*jl
,
1815 unsigned long block
, int remove_freed
)
1817 struct reiserfs_journal_cnode
*cur
;
1818 struct reiserfs_journal_cnode
**head
;
1820 head
= &(journal_hash(table
, sb
, block
));
1826 if (cur
->blocknr
== block
&& cur
->sb
== sb
1827 && (jl
== NULL
|| jl
== cur
->jlist
)
1828 && (!test_bit(BLOCK_FREED
, &cur
->state
) || remove_freed
)) {
1830 cur
->hnext
->hprev
= cur
->hprev
;
1833 cur
->hprev
->hnext
= cur
->hnext
;
1840 if (cur
->bh
&& cur
->jlist
) /* anybody who clears the cur->bh will also dec the nonzerolen */
1841 atomic_dec(&(cur
->jlist
->j_nonzerolen
));
1849 static void free_journal_ram(struct super_block
*p_s_sb
)
1851 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
1852 kfree(journal
->j_current_jl
);
1853 journal
->j_num_lists
--;
1855 vfree(journal
->j_cnode_free_orig
);
1856 free_list_bitmaps(p_s_sb
, journal
->j_list_bitmap
);
1857 free_bitmap_nodes(p_s_sb
); /* must be after free_list_bitmaps */
1858 if (journal
->j_header_bh
) {
1859 brelse(journal
->j_header_bh
);
1861 /* j_header_bh is on the journal dev, make sure not to release the journal
1862 * dev until we brelse j_header_bh
1864 release_journal_dev(p_s_sb
, journal
);
1869 ** call on unmount. Only set error to 1 if you haven't made your way out
1870 ** of read_super() yet. Any other caller must keep error at 0.
1872 static int do_journal_release(struct reiserfs_transaction_handle
*th
,
1873 struct super_block
*p_s_sb
, int error
)
1875 struct reiserfs_transaction_handle myth
;
1877 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
1879 /* we only want to flush out transactions if we were called with error == 0
1881 if (!error
&& !(p_s_sb
->s_flags
& MS_RDONLY
)) {
1882 /* end the current trans */
1883 BUG_ON(!th
->t_trans_id
);
1884 do_journal_end(th
, p_s_sb
, 10, FLUSH_ALL
);
1886 /* make sure something gets logged to force our way into the flush code */
1887 if (!journal_join(&myth
, p_s_sb
, 1)) {
1888 reiserfs_prepare_for_journal(p_s_sb
,
1889 SB_BUFFER_WITH_SB(p_s_sb
),
1891 journal_mark_dirty(&myth
, p_s_sb
,
1892 SB_BUFFER_WITH_SB(p_s_sb
));
1893 do_journal_end(&myth
, p_s_sb
, 1, FLUSH_ALL
);
1898 /* this also catches errors during the do_journal_end above */
1899 if (!error
&& reiserfs_is_journal_aborted(journal
)) {
1900 memset(&myth
, 0, sizeof(myth
));
1901 if (!journal_join_abort(&myth
, p_s_sb
, 1)) {
1902 reiserfs_prepare_for_journal(p_s_sb
,
1903 SB_BUFFER_WITH_SB(p_s_sb
),
1905 journal_mark_dirty(&myth
, p_s_sb
,
1906 SB_BUFFER_WITH_SB(p_s_sb
));
1907 do_journal_end(&myth
, p_s_sb
, 1, FLUSH_ALL
);
1911 reiserfs_mounted_fs_count
--;
1912 /* wait for all commits to finish */
1913 cancel_delayed_work(&SB_JOURNAL(p_s_sb
)->j_work
);
1914 flush_workqueue(commit_wq
);
1915 if (!reiserfs_mounted_fs_count
) {
1916 destroy_workqueue(commit_wq
);
1920 free_journal_ram(p_s_sb
);
1926 ** call on unmount. flush all journal trans, release all alloc'd ram
1928 int journal_release(struct reiserfs_transaction_handle
*th
,
1929 struct super_block
*p_s_sb
)
1931 return do_journal_release(th
, p_s_sb
, 0);
1935 ** only call from an error condition inside reiserfs_read_super!
1937 int journal_release_error(struct reiserfs_transaction_handle
*th
,
1938 struct super_block
*p_s_sb
)
1940 return do_journal_release(th
, p_s_sb
, 1);
1943 /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
1944 static int journal_compare_desc_commit(struct super_block
*p_s_sb
,
1945 struct reiserfs_journal_desc
*desc
,
1946 struct reiserfs_journal_commit
*commit
)
1948 if (get_commit_trans_id(commit
) != get_desc_trans_id(desc
) ||
1949 get_commit_trans_len(commit
) != get_desc_trans_len(desc
) ||
1950 get_commit_trans_len(commit
) > SB_JOURNAL(p_s_sb
)->j_trans_max
||
1951 get_commit_trans_len(commit
) <= 0) {
1957 /* returns 0 if it did not find a description block
1958 ** returns -1 if it found a corrupt commit block
1959 ** returns 1 if both desc and commit were valid
1961 static int journal_transaction_is_valid(struct super_block
*p_s_sb
,
1962 struct buffer_head
*d_bh
,
1963 unsigned long *oldest_invalid_trans_id
,
1964 unsigned long *newest_mount_id
)
1966 struct reiserfs_journal_desc
*desc
;
1967 struct reiserfs_journal_commit
*commit
;
1968 struct buffer_head
*c_bh
;
1969 unsigned long offset
;
1974 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
1975 if (get_desc_trans_len(desc
) > 0
1976 && !memcmp(get_journal_desc_magic(d_bh
), JOURNAL_DESC_MAGIC
, 8)) {
1977 if (oldest_invalid_trans_id
&& *oldest_invalid_trans_id
1978 && get_desc_trans_id(desc
) > *oldest_invalid_trans_id
) {
1979 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
1980 "journal-986: transaction "
1981 "is valid returning because trans_id %d is greater than "
1982 "oldest_invalid %lu",
1983 get_desc_trans_id(desc
),
1984 *oldest_invalid_trans_id
);
1988 && *newest_mount_id
> get_desc_mount_id(desc
)) {
1989 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
1990 "journal-1087: transaction "
1991 "is valid returning because mount_id %d is less than "
1992 "newest_mount_id %lu",
1993 get_desc_mount_id(desc
),
1997 if (get_desc_trans_len(desc
) > SB_JOURNAL(p_s_sb
)->j_trans_max
) {
1998 reiserfs_warning(p_s_sb
,
1999 "journal-2018: Bad transaction length %d encountered, ignoring transaction",
2000 get_desc_trans_len(desc
));
2003 offset
= d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
);
2005 /* ok, we have a journal description block, lets see if the transaction was valid */
2007 journal_bread(p_s_sb
,
2008 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2009 ((offset
+ get_desc_trans_len(desc
) +
2010 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
)));
2013 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
2014 if (journal_compare_desc_commit(p_s_sb
, desc
, commit
)) {
2015 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2016 "journal_transaction_is_valid, commit offset %ld had bad "
2017 "time %d or length %d",
2019 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
2020 get_commit_trans_id(commit
),
2021 get_commit_trans_len(commit
));
2023 if (oldest_invalid_trans_id
) {
2024 *oldest_invalid_trans_id
=
2025 get_desc_trans_id(desc
);
2026 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2028 "transaction_is_valid setting oldest invalid trans_id "
2030 get_desc_trans_id(desc
));
2035 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2036 "journal-1006: found valid "
2037 "transaction start offset %llu, len %d id %d",
2039 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
2040 get_desc_trans_len(desc
),
2041 get_desc_trans_id(desc
));
2048 static void brelse_array(struct buffer_head
**heads
, int num
)
2051 for (i
= 0; i
< num
; i
++) {
2057 ** given the start, and values for the oldest acceptable transactions,
2058 ** this either reads in a replays a transaction, or returns because the transaction
2059 ** is invalid, or too old.
2061 static int journal_read_transaction(struct super_block
*p_s_sb
,
2062 unsigned long cur_dblock
,
2063 unsigned long oldest_start
,
2064 unsigned long oldest_trans_id
,
2065 unsigned long newest_mount_id
)
2067 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
2068 struct reiserfs_journal_desc
*desc
;
2069 struct reiserfs_journal_commit
*commit
;
2070 unsigned long trans_id
= 0;
2071 struct buffer_head
*c_bh
;
2072 struct buffer_head
*d_bh
;
2073 struct buffer_head
**log_blocks
= NULL
;
2074 struct buffer_head
**real_blocks
= NULL
;
2075 unsigned long trans_offset
;
2079 d_bh
= journal_bread(p_s_sb
, cur_dblock
);
2082 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2083 trans_offset
= d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
);
2084 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1037: "
2085 "journal_read_transaction, offset %llu, len %d mount_id %d",
2086 d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
2087 get_desc_trans_len(desc
), get_desc_mount_id(desc
));
2088 if (get_desc_trans_id(desc
) < oldest_trans_id
) {
2089 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1039: "
2090 "journal_read_trans skipping because %lu is too old",
2092 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
));
2096 if (get_desc_mount_id(desc
) != newest_mount_id
) {
2097 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1146: "
2098 "journal_read_trans skipping because %d is != "
2099 "newest_mount_id %lu", get_desc_mount_id(desc
),
2104 c_bh
= journal_bread(p_s_sb
, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2105 ((trans_offset
+ get_desc_trans_len(desc
) + 1) %
2106 SB_ONDISK_JOURNAL_SIZE(p_s_sb
)));
2111 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
2112 if (journal_compare_desc_commit(p_s_sb
, desc
, commit
)) {
2113 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2114 "journal_read_transaction, "
2115 "commit offset %llu had bad time %d or length %d",
2117 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
2118 get_commit_trans_id(commit
),
2119 get_commit_trans_len(commit
));
2124 trans_id
= get_desc_trans_id(desc
);
2125 /* now we know we've got a good transaction, and it was inside the valid time ranges */
2126 log_blocks
= kmalloc(get_desc_trans_len(desc
) *
2127 sizeof(struct buffer_head
*), GFP_NOFS
);
2128 real_blocks
= kmalloc(get_desc_trans_len(desc
) *
2129 sizeof(struct buffer_head
*), GFP_NOFS
);
2130 if (!log_blocks
|| !real_blocks
) {
2135 reiserfs_warning(p_s_sb
,
2136 "journal-1169: kmalloc failed, unable to mount FS");
2139 /* get all the buffer heads */
2140 trans_half
= journal_trans_half(p_s_sb
->s_blocksize
);
2141 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2143 journal_getblk(p_s_sb
,
2144 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2146 i
) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
));
2147 if (i
< trans_half
) {
2150 le32_to_cpu(desc
->j_realblock
[i
]));
2154 le32_to_cpu(commit
->
2155 j_realblock
[i
- trans_half
]));
2157 if (real_blocks
[i
]->b_blocknr
> SB_BLOCK_COUNT(p_s_sb
)) {
2158 reiserfs_warning(p_s_sb
,
2159 "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
2162 /* make sure we don't try to replay onto log or reserved area */
2163 if (is_block_in_log_or_reserved_area
2164 (p_s_sb
, real_blocks
[i
]->b_blocknr
)) {
2165 reiserfs_warning(p_s_sb
,
2166 "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block");
2168 brelse_array(log_blocks
, i
);
2169 brelse_array(real_blocks
, i
);
2177 /* read in the log blocks, memcpy to the corresponding real block */
2178 ll_rw_block(READ
, get_desc_trans_len(desc
), log_blocks
);
2179 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2180 wait_on_buffer(log_blocks
[i
]);
2181 if (!buffer_uptodate(log_blocks
[i
])) {
2182 reiserfs_warning(p_s_sb
,
2183 "journal-1212: REPLAY FAILURE fsck required! buffer write failed");
2184 brelse_array(log_blocks
+ i
,
2185 get_desc_trans_len(desc
) - i
);
2186 brelse_array(real_blocks
, get_desc_trans_len(desc
));
2193 memcpy(real_blocks
[i
]->b_data
, log_blocks
[i
]->b_data
,
2194 real_blocks
[i
]->b_size
);
2195 set_buffer_uptodate(real_blocks
[i
]);
2196 brelse(log_blocks
[i
]);
2198 /* flush out the real blocks */
2199 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2200 set_buffer_dirty(real_blocks
[i
]);
2201 ll_rw_block(SWRITE
, 1, real_blocks
+ i
);
2203 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2204 wait_on_buffer(real_blocks
[i
]);
2205 if (!buffer_uptodate(real_blocks
[i
])) {
2206 reiserfs_warning(p_s_sb
,
2207 "journal-1226: REPLAY FAILURE, fsck required! buffer write failed");
2208 brelse_array(real_blocks
+ i
,
2209 get_desc_trans_len(desc
) - i
);
2216 brelse(real_blocks
[i
]);
2219 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2220 ((trans_offset
+ get_desc_trans_len(desc
) +
2221 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
));
2222 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2223 "journal-1095: setting journal " "start to offset %ld",
2224 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
));
2226 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2227 journal
->j_start
= cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
);
2228 journal
->j_last_flush_trans_id
= trans_id
;
2229 journal
->j_trans_id
= trans_id
+ 1;
2230 /* check for trans_id overflow */
2231 if (journal
->j_trans_id
== 0)
2232 journal
->j_trans_id
= 10;
2240 /* This function reads blocks starting from block and to max_block of bufsize
2241 size (but no more than BUFNR blocks at a time). This proved to improve
2242 mounting speed on self-rebuilding raid5 arrays at least.
2243 Right now it is only used from journal code. But later we might use it
2245 Note: Do not use journal_getblk/sb_getblk functions here! */
2246 static struct buffer_head
*reiserfs_breada(struct block_device
*dev
, int block
,
2247 int bufsize
, unsigned int max_block
)
2249 struct buffer_head
*bhlist
[BUFNR
];
2250 unsigned int blocks
= BUFNR
;
2251 struct buffer_head
*bh
;
2254 bh
= __getblk(dev
, block
, bufsize
);
2255 if (buffer_uptodate(bh
))
2258 if (block
+ BUFNR
> max_block
) {
2259 blocks
= max_block
- block
;
2263 for (i
= 1; i
< blocks
; i
++) {
2264 bh
= __getblk(dev
, block
+ i
, bufsize
);
2265 if (buffer_uptodate(bh
)) {
2271 ll_rw_block(READ
, j
, bhlist
);
2272 for (i
= 1; i
< j
; i
++)
2276 if (buffer_uptodate(bh
))
2283 ** read and replay the log
2284 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2285 ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
2287 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2289 ** On exit, it sets things up so the first transaction will work correctly.
2291 static int journal_read(struct super_block
*p_s_sb
)
2293 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
2294 struct reiserfs_journal_desc
*desc
;
2295 unsigned long oldest_trans_id
= 0;
2296 unsigned long oldest_invalid_trans_id
= 0;
2298 unsigned long oldest_start
= 0;
2299 unsigned long cur_dblock
= 0;
2300 unsigned long newest_mount_id
= 9;
2301 struct buffer_head
*d_bh
;
2302 struct reiserfs_journal_header
*jh
;
2303 int valid_journal_header
= 0;
2304 int replay_count
= 0;
2305 int continue_replay
= 1;
2307 char b
[BDEVNAME_SIZE
];
2309 cur_dblock
= SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
);
2310 reiserfs_info(p_s_sb
, "checking transaction log (%s)\n",
2311 bdevname(journal
->j_dev_bd
, b
));
2312 start
= get_seconds();
2314 /* step 1, read in the journal header block. Check the transaction it says
2315 ** is the first unflushed, and if that transaction is not valid,
2318 journal
->j_header_bh
= journal_bread(p_s_sb
,
2319 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
)
2320 + SB_ONDISK_JOURNAL_SIZE(p_s_sb
));
2321 if (!journal
->j_header_bh
) {
2324 jh
= (struct reiserfs_journal_header
*)(journal
->j_header_bh
->b_data
);
2325 if (le32_to_cpu(jh
->j_first_unflushed_offset
) <
2326 SB_ONDISK_JOURNAL_SIZE(p_s_sb
)
2327 && le32_to_cpu(jh
->j_last_flush_trans_id
) > 0) {
2329 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2330 le32_to_cpu(jh
->j_first_unflushed_offset
);
2331 oldest_trans_id
= le32_to_cpu(jh
->j_last_flush_trans_id
) + 1;
2332 newest_mount_id
= le32_to_cpu(jh
->j_mount_id
);
2333 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2334 "journal-1153: found in "
2335 "header: first_unflushed_offset %d, last_flushed_trans_id "
2336 "%lu", le32_to_cpu(jh
->j_first_unflushed_offset
),
2337 le32_to_cpu(jh
->j_last_flush_trans_id
));
2338 valid_journal_header
= 1;
2340 /* now, we try to read the first unflushed offset. If it is not valid,
2341 ** there is nothing more we can do, and it makes no sense to read
2342 ** through the whole log.
2345 journal_bread(p_s_sb
,
2346 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2347 le32_to_cpu(jh
->j_first_unflushed_offset
));
2348 ret
= journal_transaction_is_valid(p_s_sb
, d_bh
, NULL
, NULL
);
2350 continue_replay
= 0;
2353 goto start_log_replay
;
2356 if (continue_replay
&& bdev_read_only(p_s_sb
->s_bdev
)) {
2357 reiserfs_warning(p_s_sb
,
2358 "clm-2076: device is readonly, unable to replay log");
2362 /* ok, there are transactions that need to be replayed. start with the first log block, find
2363 ** all the valid transactions, and pick out the oldest.
2365 while (continue_replay
2367 (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2368 SB_ONDISK_JOURNAL_SIZE(p_s_sb
))) {
2369 /* Note that it is required for blocksize of primary fs device and journal
2370 device to be the same */
2372 reiserfs_breada(journal
->j_dev_bd
, cur_dblock
,
2373 p_s_sb
->s_blocksize
,
2374 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2375 SB_ONDISK_JOURNAL_SIZE(p_s_sb
));
2377 journal_transaction_is_valid(p_s_sb
, d_bh
,
2378 &oldest_invalid_trans_id
,
2381 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2382 if (oldest_start
== 0) { /* init all oldest_ values */
2383 oldest_trans_id
= get_desc_trans_id(desc
);
2384 oldest_start
= d_bh
->b_blocknr
;
2385 newest_mount_id
= get_desc_mount_id(desc
);
2386 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2387 "journal-1179: Setting "
2388 "oldest_start to offset %llu, trans_id %lu",
2390 SB_ONDISK_JOURNAL_1st_BLOCK
2391 (p_s_sb
), oldest_trans_id
);
2392 } else if (oldest_trans_id
> get_desc_trans_id(desc
)) {
2393 /* one we just read was older */
2394 oldest_trans_id
= get_desc_trans_id(desc
);
2395 oldest_start
= d_bh
->b_blocknr
;
2396 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2397 "journal-1180: Resetting "
2398 "oldest_start to offset %lu, trans_id %lu",
2400 SB_ONDISK_JOURNAL_1st_BLOCK
2401 (p_s_sb
), oldest_trans_id
);
2403 if (newest_mount_id
< get_desc_mount_id(desc
)) {
2404 newest_mount_id
= get_desc_mount_id(desc
);
2405 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2406 "journal-1299: Setting "
2407 "newest_mount_id to %d",
2408 get_desc_mount_id(desc
));
2410 cur_dblock
+= get_desc_trans_len(desc
) + 2;
2418 cur_dblock
= oldest_start
;
2419 if (oldest_trans_id
) {
2420 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2421 "journal-1206: Starting replay "
2422 "from offset %llu, trans_id %lu",
2423 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
2428 while (continue_replay
&& oldest_trans_id
> 0) {
2430 journal_read_transaction(p_s_sb
, cur_dblock
, oldest_start
,
2431 oldest_trans_id
, newest_mount_id
);
2434 } else if (ret
!= 0) {
2438 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) + journal
->j_start
;
2440 if (cur_dblock
== oldest_start
)
2444 if (oldest_trans_id
== 0) {
2445 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
2446 "journal-1225: No valid " "transactions found");
2448 /* j_start does not get set correctly if we don't replay any transactions.
2449 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2450 ** copy the trans_id from the header
2452 if (valid_journal_header
&& replay_count
== 0) {
2453 journal
->j_start
= le32_to_cpu(jh
->j_first_unflushed_offset
);
2454 journal
->j_trans_id
=
2455 le32_to_cpu(jh
->j_last_flush_trans_id
) + 1;
2456 /* check for trans_id overflow */
2457 if (journal
->j_trans_id
== 0)
2458 journal
->j_trans_id
= 10;
2459 journal
->j_last_flush_trans_id
=
2460 le32_to_cpu(jh
->j_last_flush_trans_id
);
2461 journal
->j_mount_id
= le32_to_cpu(jh
->j_mount_id
) + 1;
2463 journal
->j_mount_id
= newest_mount_id
+ 1;
2465 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1299: Setting "
2466 "newest_mount_id to %lu", journal
->j_mount_id
);
2467 journal
->j_first_unflushed_offset
= journal
->j_start
;
2468 if (replay_count
> 0) {
2469 reiserfs_info(p_s_sb
,
2470 "replayed %d transactions in %lu seconds\n",
2471 replay_count
, get_seconds() - start
);
2473 if (!bdev_read_only(p_s_sb
->s_bdev
) &&
2474 _update_journal_header_block(p_s_sb
, journal
->j_start
,
2475 journal
->j_last_flush_trans_id
)) {
2476 /* replay failed, caller must call free_journal_ram and abort
2484 static struct reiserfs_journal_list
*alloc_journal_list(struct super_block
*s
)
2486 struct reiserfs_journal_list
*jl
;
2487 jl
= kzalloc(sizeof(struct reiserfs_journal_list
),
2488 GFP_NOFS
| __GFP_NOFAIL
);
2489 INIT_LIST_HEAD(&jl
->j_list
);
2490 INIT_LIST_HEAD(&jl
->j_working_list
);
2491 INIT_LIST_HEAD(&jl
->j_tail_bh_list
);
2492 INIT_LIST_HEAD(&jl
->j_bh_list
);
2493 sema_init(&jl
->j_commit_lock
, 1);
2494 SB_JOURNAL(s
)->j_num_lists
++;
2495 get_journal_list(jl
);
2499 static void journal_list_init(struct super_block
*p_s_sb
)
2501 SB_JOURNAL(p_s_sb
)->j_current_jl
= alloc_journal_list(p_s_sb
);
2504 static int release_journal_dev(struct super_block
*super
,
2505 struct reiserfs_journal
*journal
)
2511 if (journal
->j_dev_file
!= NULL
) {
2512 result
= filp_close(journal
->j_dev_file
, NULL
);
2513 journal
->j_dev_file
= NULL
;
2514 journal
->j_dev_bd
= NULL
;
2515 } else if (journal
->j_dev_bd
!= NULL
) {
2516 result
= blkdev_put(journal
->j_dev_bd
);
2517 journal
->j_dev_bd
= NULL
;
2521 reiserfs_warning(super
,
2522 "sh-457: release_journal_dev: Cannot release journal device: %i",
2528 static int journal_init_dev(struct super_block
*super
,
2529 struct reiserfs_journal
*journal
,
2530 const char *jdev_name
)
2534 int blkdev_mode
= FMODE_READ
| FMODE_WRITE
;
2535 char b
[BDEVNAME_SIZE
];
2539 journal
->j_dev_bd
= NULL
;
2540 journal
->j_dev_file
= NULL
;
2541 jdev
= SB_ONDISK_JOURNAL_DEVICE(super
) ?
2542 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super
)) : super
->s_dev
;
2544 if (bdev_read_only(super
->s_bdev
))
2545 blkdev_mode
= FMODE_READ
;
2547 /* there is no "jdev" option and journal is on separate device */
2548 if ((!jdev_name
|| !jdev_name
[0])) {
2549 journal
->j_dev_bd
= open_by_devnum(jdev
, blkdev_mode
);
2550 if (IS_ERR(journal
->j_dev_bd
)) {
2551 result
= PTR_ERR(journal
->j_dev_bd
);
2552 journal
->j_dev_bd
= NULL
;
2553 reiserfs_warning(super
, "sh-458: journal_init_dev: "
2554 "cannot init journal device '%s': %i",
2555 __bdevname(jdev
, b
), result
);
2557 } else if (jdev
!= super
->s_dev
)
2558 set_blocksize(journal
->j_dev_bd
, super
->s_blocksize
);
2562 journal
->j_dev_file
= filp_open(jdev_name
, 0, 0);
2563 if (!IS_ERR(journal
->j_dev_file
)) {
2564 struct inode
*jdev_inode
= journal
->j_dev_file
->f_mapping
->host
;
2565 if (!S_ISBLK(jdev_inode
->i_mode
)) {
2566 reiserfs_warning(super
, "journal_init_dev: '%s' is "
2567 "not a block device", jdev_name
);
2569 release_journal_dev(super
, journal
);
2572 journal
->j_dev_bd
= I_BDEV(jdev_inode
);
2573 set_blocksize(journal
->j_dev_bd
, super
->s_blocksize
);
2574 reiserfs_info(super
,
2575 "journal_init_dev: journal device: %s\n",
2576 bdevname(journal
->j_dev_bd
, b
));
2579 result
= PTR_ERR(journal
->j_dev_file
);
2580 journal
->j_dev_file
= NULL
;
2581 reiserfs_warning(super
,
2582 "journal_init_dev: Cannot open '%s': %i",
2589 ** must be called once on fs mount. calls journal_read for you
2591 int journal_init(struct super_block
*p_s_sb
, const char *j_dev_name
,
2592 int old_format
, unsigned int commit_max_age
)
2594 int num_cnodes
= SB_ONDISK_JOURNAL_SIZE(p_s_sb
) * 2;
2595 struct buffer_head
*bhjh
;
2596 struct reiserfs_super_block
*rs
;
2597 struct reiserfs_journal_header
*jh
;
2598 struct reiserfs_journal
*journal
;
2599 struct reiserfs_journal_list
*jl
;
2600 char b
[BDEVNAME_SIZE
];
2602 journal
= SB_JOURNAL(p_s_sb
) = vmalloc(sizeof(struct reiserfs_journal
));
2604 reiserfs_warning(p_s_sb
,
2605 "journal-1256: unable to get memory for journal structure");
2608 memset(journal
, 0, sizeof(struct reiserfs_journal
));
2609 INIT_LIST_HEAD(&journal
->j_bitmap_nodes
);
2610 INIT_LIST_HEAD(&journal
->j_prealloc_list
);
2611 INIT_LIST_HEAD(&journal
->j_working_list
);
2612 INIT_LIST_HEAD(&journal
->j_journal_list
);
2613 journal
->j_persistent_trans
= 0;
2614 if (reiserfs_allocate_list_bitmaps(p_s_sb
,
2615 journal
->j_list_bitmap
,
2616 SB_BMAP_NR(p_s_sb
)))
2617 goto free_and_return
;
2618 allocate_bitmap_nodes(p_s_sb
);
2620 /* reserved for journal area support */
2621 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb
) = (old_format
?
2622 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2623 / p_s_sb
->s_blocksize
+
2624 SB_BMAP_NR(p_s_sb
) +
2626 REISERFS_DISK_OFFSET_IN_BYTES
/
2627 p_s_sb
->s_blocksize
+ 2);
2629 /* Sanity check to see is the standard journal fitting withing first bitmap
2630 (actual for small blocksizes) */
2631 if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb
) &&
2632 (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb
) +
2633 SB_ONDISK_JOURNAL_SIZE(p_s_sb
) > p_s_sb
->s_blocksize
* 8)) {
2634 reiserfs_warning(p_s_sb
,
2635 "journal-1393: journal does not fit for area "
2636 "addressed by first of bitmap blocks. It starts at "
2637 "%u and its size is %u. Block size %ld",
2638 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb
),
2639 SB_ONDISK_JOURNAL_SIZE(p_s_sb
),
2640 p_s_sb
->s_blocksize
);
2641 goto free_and_return
;
2644 if (journal_init_dev(p_s_sb
, journal
, j_dev_name
) != 0) {
2645 reiserfs_warning(p_s_sb
,
2646 "sh-462: unable to initialize jornal device");
2647 goto free_and_return
;
2650 rs
= SB_DISK_SUPER_BLOCK(p_s_sb
);
2652 /* read journal header */
2653 bhjh
= journal_bread(p_s_sb
,
2654 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2655 SB_ONDISK_JOURNAL_SIZE(p_s_sb
));
2657 reiserfs_warning(p_s_sb
,
2658 "sh-459: unable to read journal header");
2659 goto free_and_return
;
2661 jh
= (struct reiserfs_journal_header
*)(bhjh
->b_data
);
2663 /* make sure that journal matches to the super block */
2664 if (is_reiserfs_jr(rs
)
2665 && (le32_to_cpu(jh
->jh_journal
.jp_journal_magic
) !=
2666 sb_jp_journal_magic(rs
))) {
2667 reiserfs_warning(p_s_sb
,
2668 "sh-460: journal header magic %x "
2669 "(device %s) does not match to magic found in super "
2670 "block %x", jh
->jh_journal
.jp_journal_magic
,
2671 bdevname(journal
->j_dev_bd
, b
),
2672 sb_jp_journal_magic(rs
));
2674 goto free_and_return
;
2677 journal
->j_trans_max
= le32_to_cpu(jh
->jh_journal
.jp_journal_trans_max
);
2678 journal
->j_max_batch
= le32_to_cpu(jh
->jh_journal
.jp_journal_max_batch
);
2679 journal
->j_max_commit_age
=
2680 le32_to_cpu(jh
->jh_journal
.jp_journal_max_commit_age
);
2681 journal
->j_max_trans_age
= JOURNAL_MAX_TRANS_AGE
;
2683 if (journal
->j_trans_max
) {
2684 /* make sure these parameters are available, assign it if they are not */
2685 __u32 initial
= journal
->j_trans_max
;
2688 if (p_s_sb
->s_blocksize
< 4096)
2689 ratio
= 4096 / p_s_sb
->s_blocksize
;
2691 if (SB_ONDISK_JOURNAL_SIZE(p_s_sb
) / journal
->j_trans_max
<
2693 journal
->j_trans_max
=
2694 SB_ONDISK_JOURNAL_SIZE(p_s_sb
) / JOURNAL_MIN_RATIO
;
2695 if (journal
->j_trans_max
> JOURNAL_TRANS_MAX_DEFAULT
/ ratio
)
2696 journal
->j_trans_max
=
2697 JOURNAL_TRANS_MAX_DEFAULT
/ ratio
;
2698 if (journal
->j_trans_max
< JOURNAL_TRANS_MIN_DEFAULT
/ ratio
)
2699 journal
->j_trans_max
=
2700 JOURNAL_TRANS_MIN_DEFAULT
/ ratio
;
2702 if (journal
->j_trans_max
!= initial
)
2703 reiserfs_warning(p_s_sb
,
2704 "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
2705 initial
, journal
->j_trans_max
);
2707 journal
->j_max_batch
= journal
->j_trans_max
*
2708 JOURNAL_MAX_BATCH_DEFAULT
/ JOURNAL_TRANS_MAX_DEFAULT
;
2711 if (!journal
->j_trans_max
) {
2712 /*we have the file system was created by old version of mkreiserfs
2713 so this field contains zero value */
2714 journal
->j_trans_max
= JOURNAL_TRANS_MAX_DEFAULT
;
2715 journal
->j_max_batch
= JOURNAL_MAX_BATCH_DEFAULT
;
2716 journal
->j_max_commit_age
= JOURNAL_MAX_COMMIT_AGE
;
2718 /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
2719 trans max size is decreased proportionally */
2720 if (p_s_sb
->s_blocksize
< 4096) {
2721 journal
->j_trans_max
/= (4096 / p_s_sb
->s_blocksize
);
2722 journal
->j_max_batch
= (journal
->j_trans_max
) * 9 / 10;
2726 journal
->j_default_max_commit_age
= journal
->j_max_commit_age
;
2728 if (commit_max_age
!= 0) {
2729 journal
->j_max_commit_age
= commit_max_age
;
2730 journal
->j_max_trans_age
= commit_max_age
;
2733 reiserfs_info(p_s_sb
, "journal params: device %s, size %u, "
2734 "journal first block %u, max trans len %u, max batch %u, "
2735 "max commit age %u, max trans age %u\n",
2736 bdevname(journal
->j_dev_bd
, b
),
2737 SB_ONDISK_JOURNAL_SIZE(p_s_sb
),
2738 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
2739 journal
->j_trans_max
,
2740 journal
->j_max_batch
,
2741 journal
->j_max_commit_age
, journal
->j_max_trans_age
);
2745 journal
->j_list_bitmap_index
= 0;
2746 journal_list_init(p_s_sb
);
2748 memset(journal
->j_list_hash_table
, 0,
2749 JOURNAL_HASH_SIZE
* sizeof(struct reiserfs_journal_cnode
*));
2751 INIT_LIST_HEAD(&journal
->j_dirty_buffers
);
2752 spin_lock_init(&journal
->j_dirty_buffers_lock
);
2754 journal
->j_start
= 0;
2756 journal
->j_len_alloc
= 0;
2757 atomic_set(&(journal
->j_wcount
), 0);
2758 atomic_set(&(journal
->j_async_throttle
), 0);
2759 journal
->j_bcount
= 0;
2760 journal
->j_trans_start_time
= 0;
2761 journal
->j_last
= NULL
;
2762 journal
->j_first
= NULL
;
2763 init_waitqueue_head(&(journal
->j_join_wait
));
2764 sema_init(&journal
->j_lock
, 1);
2765 sema_init(&journal
->j_flush_sem
, 1);
2767 journal
->j_trans_id
= 10;
2768 journal
->j_mount_id
= 10;
2769 journal
->j_state
= 0;
2770 atomic_set(&(journal
->j_jlock
), 0);
2771 journal
->j_cnode_free_list
= allocate_cnodes(num_cnodes
);
2772 journal
->j_cnode_free_orig
= journal
->j_cnode_free_list
;
2773 journal
->j_cnode_free
= journal
->j_cnode_free_list
? num_cnodes
: 0;
2774 journal
->j_cnode_used
= 0;
2775 journal
->j_must_wait
= 0;
2777 if (journal
->j_cnode_free
== 0) {
2778 reiserfs_warning(p_s_sb
, "journal-2004: Journal cnode memory "
2779 "allocation failed (%ld bytes). Journal is "
2780 "too large for available memory. Usually "
2781 "this is due to a journal that is too large.",
2782 sizeof (struct reiserfs_journal_cnode
) * num_cnodes
);
2783 goto free_and_return
;
2786 init_journal_hash(p_s_sb
);
2787 jl
= journal
->j_current_jl
;
2788 jl
->j_list_bitmap
= get_list_bitmap(p_s_sb
, jl
);
2789 if (!jl
->j_list_bitmap
) {
2790 reiserfs_warning(p_s_sb
,
2791 "journal-2005, get_list_bitmap failed for journal list 0");
2792 goto free_and_return
;
2794 if (journal_read(p_s_sb
) < 0) {
2795 reiserfs_warning(p_s_sb
, "Replay Failure, unable to mount");
2796 goto free_and_return
;
2799 reiserfs_mounted_fs_count
++;
2800 if (reiserfs_mounted_fs_count
<= 1)
2801 commit_wq
= create_workqueue("reiserfs");
2803 INIT_WORK(&journal
->j_work
, flush_async_commits
, p_s_sb
);
2806 free_journal_ram(p_s_sb
);
2811 ** test for a polite end of the current transaction. Used by file_write, and should
2812 ** be used by delete to make sure they don't write more than can fit inside a single
2815 int journal_transaction_should_end(struct reiserfs_transaction_handle
*th
,
2818 struct reiserfs_journal
*journal
= SB_JOURNAL(th
->t_super
);
2819 time_t now
= get_seconds();
2820 /* cannot restart while nested */
2821 BUG_ON(!th
->t_trans_id
);
2822 if (th
->t_refcount
> 1)
2824 if (journal
->j_must_wait
> 0 ||
2825 (journal
->j_len_alloc
+ new_alloc
) >= journal
->j_max_batch
||
2826 atomic_read(&(journal
->j_jlock
)) ||
2827 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
||
2828 journal
->j_cnode_free
< (journal
->j_trans_max
* 3)) {
2831 /* protected by the BKL here */
2832 journal
->j_len_alloc
+= new_alloc
;
2833 th
->t_blocks_allocated
+= new_alloc
;
2837 /* this must be called inside a transaction, and requires the
2838 ** kernel_lock to be held
2840 void reiserfs_block_writes(struct reiserfs_transaction_handle
*th
)
2842 struct reiserfs_journal
*journal
= SB_JOURNAL(th
->t_super
);
2843 BUG_ON(!th
->t_trans_id
);
2844 journal
->j_must_wait
= 1;
2845 set_bit(J_WRITERS_BLOCKED
, &journal
->j_state
);
2849 /* this must be called without a transaction started, and does not
2852 void reiserfs_allow_writes(struct super_block
*s
)
2854 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2855 clear_bit(J_WRITERS_BLOCKED
, &journal
->j_state
);
2856 wake_up(&journal
->j_join_wait
);
2859 /* this must be called without a transaction started, and does not
2862 void reiserfs_wait_on_write_block(struct super_block
*s
)
2864 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2865 wait_event(journal
->j_join_wait
,
2866 !test_bit(J_WRITERS_BLOCKED
, &journal
->j_state
));
2869 static void queue_log_writer(struct super_block
*s
)
2872 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2873 set_bit(J_WRITERS_QUEUED
, &journal
->j_state
);
2876 * we don't want to use wait_event here because
2877 * we only want to wait once.
2879 init_waitqueue_entry(&wait
, current
);
2880 add_wait_queue(&journal
->j_join_wait
, &wait
);
2881 set_current_state(TASK_UNINTERRUPTIBLE
);
2882 if (test_bit(J_WRITERS_QUEUED
, &journal
->j_state
))
2884 current
->state
= TASK_RUNNING
;
2885 remove_wait_queue(&journal
->j_join_wait
, &wait
);
2888 static void wake_queued_writers(struct super_block
*s
)
2890 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2891 if (test_and_clear_bit(J_WRITERS_QUEUED
, &journal
->j_state
))
2892 wake_up(&journal
->j_join_wait
);
2895 static void let_transaction_grow(struct super_block
*sb
, unsigned long trans_id
)
2897 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2898 unsigned long bcount
= journal
->j_bcount
;
2900 schedule_timeout_uninterruptible(1);
2901 journal
->j_current_jl
->j_state
|= LIST_COMMIT_PENDING
;
2902 while ((atomic_read(&journal
->j_wcount
) > 0 ||
2903 atomic_read(&journal
->j_jlock
)) &&
2904 journal
->j_trans_id
== trans_id
) {
2905 queue_log_writer(sb
);
2907 if (journal
->j_trans_id
!= trans_id
)
2909 if (bcount
== journal
->j_bcount
)
2911 bcount
= journal
->j_bcount
;
2915 /* join == true if you must join an existing transaction.
2916 ** join == false if you can deal with waiting for others to finish
2918 ** this will block until the transaction is joinable. send the number of blocks you
2919 ** expect to use in nblocks.
2921 static int do_journal_begin_r(struct reiserfs_transaction_handle
*th
,
2922 struct super_block
*p_s_sb
, unsigned long nblocks
,
2925 time_t now
= get_seconds();
2927 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
2928 struct reiserfs_transaction_handle myth
;
2929 int sched_count
= 0;
2932 reiserfs_check_lock_depth(p_s_sb
, "journal_begin");
2933 if (nblocks
> journal
->j_trans_max
)
2936 PROC_INFO_INC(p_s_sb
, journal
.journal_being
);
2937 /* set here for journal_join */
2939 th
->t_super
= p_s_sb
;
2942 lock_journal(p_s_sb
);
2943 if (join
!= JBEGIN_ABORT
&& reiserfs_is_journal_aborted(journal
)) {
2944 unlock_journal(p_s_sb
);
2945 retval
= journal
->j_errno
;
2948 journal
->j_bcount
++;
2950 if (test_bit(J_WRITERS_BLOCKED
, &journal
->j_state
)) {
2951 unlock_journal(p_s_sb
);
2952 reiserfs_wait_on_write_block(p_s_sb
);
2953 PROC_INFO_INC(p_s_sb
, journal
.journal_relock_writers
);
2956 now
= get_seconds();
2958 /* if there is no room in the journal OR
2959 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2960 ** we don't sleep if there aren't other writers
2963 if ((!join
&& journal
->j_must_wait
> 0) ||
2965 && (journal
->j_len_alloc
+ nblocks
+ 2) >= journal
->j_max_batch
)
2966 || (!join
&& atomic_read(&journal
->j_wcount
) > 0
2967 && journal
->j_trans_start_time
> 0
2968 && (now
- journal
->j_trans_start_time
) >
2969 journal
->j_max_trans_age
) || (!join
2970 && atomic_read(&journal
->j_jlock
))
2971 || (!join
&& journal
->j_cnode_free
< (journal
->j_trans_max
* 3))) {
2973 old_trans_id
= journal
->j_trans_id
;
2974 unlock_journal(p_s_sb
); /* allow others to finish this transaction */
2976 if (!join
&& (journal
->j_len_alloc
+ nblocks
+ 2) >=
2977 journal
->j_max_batch
&&
2978 ((journal
->j_len
+ nblocks
+ 2) * 100) <
2979 (journal
->j_len_alloc
* 75)) {
2980 if (atomic_read(&journal
->j_wcount
) > 10) {
2982 queue_log_writer(p_s_sb
);
2986 /* don't mess with joining the transaction if all we have to do is
2987 * wait for someone else to do a commit
2989 if (atomic_read(&journal
->j_jlock
)) {
2990 while (journal
->j_trans_id
== old_trans_id
&&
2991 atomic_read(&journal
->j_jlock
)) {
2992 queue_log_writer(p_s_sb
);
2996 retval
= journal_join(&myth
, p_s_sb
, 1);
3000 /* someone might have ended the transaction while we joined */
3001 if (old_trans_id
!= journal
->j_trans_id
) {
3002 retval
= do_journal_end(&myth
, p_s_sb
, 1, 0);
3004 retval
= do_journal_end(&myth
, p_s_sb
, 1, COMMIT_NOW
);
3010 PROC_INFO_INC(p_s_sb
, journal
.journal_relock_wcount
);
3013 /* we are the first writer, set trans_id */
3014 if (journal
->j_trans_start_time
== 0) {
3015 journal
->j_trans_start_time
= get_seconds();
3017 atomic_inc(&(journal
->j_wcount
));
3018 journal
->j_len_alloc
+= nblocks
;
3019 th
->t_blocks_logged
= 0;
3020 th
->t_blocks_allocated
= nblocks
;
3021 th
->t_trans_id
= journal
->j_trans_id
;
3022 unlock_journal(p_s_sb
);
3023 INIT_LIST_HEAD(&th
->t_list
);
3028 memset(th
, 0, sizeof(*th
));
3029 /* Re-set th->t_super, so we can properly keep track of how many
3030 * persistent transactions there are. We need to do this so if this
3031 * call is part of a failed restart_transaction, we can free it later */
3032 th
->t_super
= p_s_sb
;
3036 struct reiserfs_transaction_handle
*reiserfs_persistent_transaction(struct
3042 struct reiserfs_transaction_handle
*th
;
3044 /* if we're nesting into an existing transaction. It will be
3045 ** persistent on its own
3047 if (reiserfs_transaction_running(s
)) {
3048 th
= current
->journal_info
;
3050 if (th
->t_refcount
< 2) {
3055 th
= kmalloc(sizeof(struct reiserfs_transaction_handle
), GFP_NOFS
);
3058 ret
= journal_begin(th
, s
, nblocks
);
3064 SB_JOURNAL(s
)->j_persistent_trans
++;
3068 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle
*th
)
3070 struct super_block
*s
= th
->t_super
;
3073 ret
= journal_end(th
, th
->t_super
, th
->t_blocks_allocated
);
3076 if (th
->t_refcount
== 0) {
3077 SB_JOURNAL(s
)->j_persistent_trans
--;
3083 static int journal_join(struct reiserfs_transaction_handle
*th
,
3084 struct super_block
*p_s_sb
, unsigned long nblocks
)
3086 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3088 /* this keeps do_journal_end from NULLing out the current->journal_info
3091 th
->t_handle_save
= cur_th
;
3092 if (cur_th
&& cur_th
->t_refcount
> 1) {
3095 return do_journal_begin_r(th
, p_s_sb
, nblocks
, JBEGIN_JOIN
);
3098 int journal_join_abort(struct reiserfs_transaction_handle
*th
,
3099 struct super_block
*p_s_sb
, unsigned long nblocks
)
3101 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3103 /* this keeps do_journal_end from NULLing out the current->journal_info
3106 th
->t_handle_save
= cur_th
;
3107 if (cur_th
&& cur_th
->t_refcount
> 1) {
3110 return do_journal_begin_r(th
, p_s_sb
, nblocks
, JBEGIN_ABORT
);
3113 int journal_begin(struct reiserfs_transaction_handle
*th
,
3114 struct super_block
*p_s_sb
, unsigned long nblocks
)
3116 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3119 th
->t_handle_save
= NULL
;
3121 /* we are nesting into the current transaction */
3122 if (cur_th
->t_super
== p_s_sb
) {
3123 BUG_ON(!cur_th
->t_refcount
);
3124 cur_th
->t_refcount
++;
3125 memcpy(th
, cur_th
, sizeof(*th
));
3126 if (th
->t_refcount
<= 1)
3127 reiserfs_warning(p_s_sb
,
3128 "BAD: refcount <= 1, but journal_info != 0");
3131 /* we've ended up with a handle from a different filesystem.
3132 ** save it and restore on journal_end. This should never
3135 reiserfs_warning(p_s_sb
,
3136 "clm-2100: nesting info a different FS");
3137 th
->t_handle_save
= current
->journal_info
;
3138 current
->journal_info
= th
;
3141 current
->journal_info
= th
;
3143 ret
= do_journal_begin_r(th
, p_s_sb
, nblocks
, JBEGIN_REG
);
3144 if (current
->journal_info
!= th
)
3147 /* I guess this boils down to being the reciprocal of clm-2100 above.
3148 * If do_journal_begin_r fails, we need to put it back, since journal_end
3149 * won't be called to do it. */
3151 current
->journal_info
= th
->t_handle_save
;
3153 BUG_ON(!th
->t_refcount
);
3159 ** puts bh into the current transaction. If it was already there, reorders removes the
3160 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3162 ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
3163 ** transaction is committed.
3165 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3167 int journal_mark_dirty(struct reiserfs_transaction_handle
*th
,
3168 struct super_block
*p_s_sb
, struct buffer_head
*bh
)
3170 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
3171 struct reiserfs_journal_cnode
*cn
= NULL
;
3172 int count_already_incd
= 0;
3174 BUG_ON(!th
->t_trans_id
);
3176 PROC_INFO_INC(p_s_sb
, journal
.mark_dirty
);
3177 if (th
->t_trans_id
!= journal
->j_trans_id
) {
3178 reiserfs_panic(th
->t_super
,
3179 "journal-1577: handle trans id %ld != current trans id %ld\n",
3180 th
->t_trans_id
, journal
->j_trans_id
);
3185 prepared
= test_clear_buffer_journal_prepared(bh
);
3186 clear_buffer_journal_restore_dirty(bh
);
3187 /* already in this transaction, we are done */
3188 if (buffer_journaled(bh
)) {
3189 PROC_INFO_INC(p_s_sb
, journal
.mark_dirty_already
);
3193 /* this must be turned into a panic instead of a warning. We can't allow
3194 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3195 ** could get to disk too early. NOT GOOD.
3197 if (!prepared
|| buffer_dirty(bh
)) {
3198 reiserfs_warning(p_s_sb
, "journal-1777: buffer %llu bad state "
3199 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3200 (unsigned long long)bh
->b_blocknr
,
3201 prepared
? ' ' : '!',
3202 buffer_locked(bh
) ? ' ' : '!',
3203 buffer_dirty(bh
) ? ' ' : '!',
3204 buffer_journal_dirty(bh
) ? ' ' : '!');
3207 if (atomic_read(&(journal
->j_wcount
)) <= 0) {
3208 reiserfs_warning(p_s_sb
,
3209 "journal-1409: journal_mark_dirty returning because j_wcount was %d",
3210 atomic_read(&(journal
->j_wcount
)));
3213 /* this error means I've screwed up, and we've overflowed the transaction.
3214 ** Nothing can be done here, except make the FS readonly or panic.
3216 if (journal
->j_len
>= journal
->j_trans_max
) {
3217 reiserfs_panic(th
->t_super
,
3218 "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n",
3222 if (buffer_journal_dirty(bh
)) {
3223 count_already_incd
= 1;
3224 PROC_INFO_INC(p_s_sb
, journal
.mark_dirty_notjournal
);
3225 clear_buffer_journal_dirty(bh
);
3228 if (journal
->j_len
> journal
->j_len_alloc
) {
3229 journal
->j_len_alloc
= journal
->j_len
+ JOURNAL_PER_BALANCE_CNT
;
3232 set_buffer_journaled(bh
);
3234 /* now put this guy on the end */
3236 cn
= get_cnode(p_s_sb
);
3238 reiserfs_panic(p_s_sb
, "get_cnode failed!\n");
3241 if (th
->t_blocks_logged
== th
->t_blocks_allocated
) {
3242 th
->t_blocks_allocated
+= JOURNAL_PER_BALANCE_CNT
;
3243 journal
->j_len_alloc
+= JOURNAL_PER_BALANCE_CNT
;
3245 th
->t_blocks_logged
++;
3249 cn
->blocknr
= bh
->b_blocknr
;
3252 insert_journal_hash(journal
->j_hash_table
, cn
);
3253 if (!count_already_incd
) {
3258 cn
->prev
= journal
->j_last
;
3260 if (journal
->j_last
) {
3261 journal
->j_last
->next
= cn
;
3262 journal
->j_last
= cn
;
3264 journal
->j_first
= cn
;
3265 journal
->j_last
= cn
;
3270 int journal_end(struct reiserfs_transaction_handle
*th
,
3271 struct super_block
*p_s_sb
, unsigned long nblocks
)
3273 if (!current
->journal_info
&& th
->t_refcount
> 1)
3274 reiserfs_warning(p_s_sb
, "REISER-NESTING: th NULL, refcount %d",
3277 if (!th
->t_trans_id
) {
3283 if (th
->t_refcount
> 0) {
3284 struct reiserfs_transaction_handle
*cur_th
=
3285 current
->journal_info
;
3287 /* we aren't allowed to close a nested transaction on a different
3288 ** filesystem from the one in the task struct
3290 if (cur_th
->t_super
!= th
->t_super
)
3294 memcpy(current
->journal_info
, th
, sizeof(*th
));
3299 return do_journal_end(th
, p_s_sb
, nblocks
, 0);
3303 /* removes from the current transaction, relsing and descrementing any counters.
3304 ** also files the removed buffer directly onto the clean list
3306 ** called by journal_mark_freed when a block has been deleted
3308 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3310 static int remove_from_transaction(struct super_block
*p_s_sb
,
3311 b_blocknr_t blocknr
, int already_cleaned
)
3313 struct buffer_head
*bh
;
3314 struct reiserfs_journal_cnode
*cn
;
3315 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
3318 cn
= get_journal_hash_dev(p_s_sb
, journal
->j_hash_table
, blocknr
);
3319 if (!cn
|| !cn
->bh
) {
3324 cn
->prev
->next
= cn
->next
;
3327 cn
->next
->prev
= cn
->prev
;
3329 if (cn
== journal
->j_first
) {
3330 journal
->j_first
= cn
->next
;
3332 if (cn
== journal
->j_last
) {
3333 journal
->j_last
= cn
->prev
;
3336 remove_journal_hash(p_s_sb
, journal
->j_hash_table
, NULL
,
3338 clear_buffer_journaled(bh
); /* don't log this one */
3340 if (!already_cleaned
) {
3341 clear_buffer_journal_dirty(bh
);
3342 clear_buffer_dirty(bh
);
3343 clear_buffer_journal_test(bh
);
3345 if (atomic_read(&(bh
->b_count
)) < 0) {
3346 reiserfs_warning(p_s_sb
,
3347 "journal-1752: remove from trans, b_count < 0");
3352 journal
->j_len_alloc
--;
3353 free_cnode(p_s_sb
, cn
);
3358 ** for any cnode in a journal list, it can only be dirtied of all the
3359 ** transactions that include it are commited to disk.
3360 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
3361 ** and 0 if you aren't
3363 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3364 ** blocks for a given transaction on disk
3367 static int can_dirty(struct reiserfs_journal_cnode
*cn
)
3369 struct super_block
*sb
= cn
->sb
;
3370 b_blocknr_t blocknr
= cn
->blocknr
;
3371 struct reiserfs_journal_cnode
*cur
= cn
->hprev
;
3374 /* first test hprev. These are all newer than cn, so any node here
3375 ** with the same block number and dev means this node can't be sent
3376 ** to disk right now.
3378 while (cur
&& can_dirty
) {
3379 if (cur
->jlist
&& cur
->bh
&& cur
->blocknr
&& cur
->sb
== sb
&&
3380 cur
->blocknr
== blocknr
) {
3385 /* then test hnext. These are all older than cn. As long as they
3386 ** are committed to the log, it is safe to write cn to disk
3389 while (cur
&& can_dirty
) {
3390 if (cur
->jlist
&& cur
->jlist
->j_len
> 0 &&
3391 atomic_read(&(cur
->jlist
->j_commit_left
)) > 0 && cur
->bh
&&
3392 cur
->blocknr
&& cur
->sb
== sb
&& cur
->blocknr
== blocknr
) {
3400 /* syncs the commit blocks, but does not force the real buffers to disk
3401 ** will wait until the current transaction is done/commited before returning
3403 int journal_end_sync(struct reiserfs_transaction_handle
*th
,
3404 struct super_block
*p_s_sb
, unsigned long nblocks
)
3406 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
3408 BUG_ON(!th
->t_trans_id
);
3409 /* you can sync while nested, very, very bad */
3410 if (th
->t_refcount
> 1) {
3413 if (journal
->j_len
== 0) {
3414 reiserfs_prepare_for_journal(p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
),
3416 journal_mark_dirty(th
, p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
));
3418 return do_journal_end(th
, p_s_sb
, nblocks
, COMMIT_NOW
| WAIT
);
3422 ** writeback the pending async commits to disk
3424 static void flush_async_commits(void *p
)
3426 struct super_block
*p_s_sb
= p
;
3427 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
3428 struct reiserfs_journal_list
*jl
;
3429 struct list_head
*entry
;
3432 if (!list_empty(&journal
->j_journal_list
)) {
3433 /* last entry is the youngest, commit it and you get everything */
3434 entry
= journal
->j_journal_list
.prev
;
3435 jl
= JOURNAL_LIST_ENTRY(entry
);
3436 flush_commit_list(p_s_sb
, jl
, 1);
3440 * this is a little racey, but there's no harm in missing
3441 * the filemap_fdata_write
3443 if (!atomic_read(&journal
->j_async_throttle
)
3444 && !reiserfs_is_journal_aborted(journal
)) {
3445 atomic_inc(&journal
->j_async_throttle
);
3446 filemap_fdatawrite(p_s_sb
->s_bdev
->bd_inode
->i_mapping
);
3447 atomic_dec(&journal
->j_async_throttle
);
3452 ** flushes any old transactions to disk
3453 ** ends the current transaction if it is too old
3455 int reiserfs_flush_old_commits(struct super_block
*p_s_sb
)
3458 struct reiserfs_transaction_handle th
;
3459 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
3461 now
= get_seconds();
3462 /* safety check so we don't flush while we are replaying the log during
3465 if (list_empty(&journal
->j_journal_list
)) {
3469 /* check the current transaction. If there are no writers, and it is
3470 * too old, finish it, and force the commit blocks to disk
3472 if (atomic_read(&journal
->j_wcount
) <= 0 &&
3473 journal
->j_trans_start_time
> 0 &&
3474 journal
->j_len
> 0 &&
3475 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
) {
3476 if (!journal_join(&th
, p_s_sb
, 1)) {
3477 reiserfs_prepare_for_journal(p_s_sb
,
3478 SB_BUFFER_WITH_SB(p_s_sb
),
3480 journal_mark_dirty(&th
, p_s_sb
,
3481 SB_BUFFER_WITH_SB(p_s_sb
));
3483 /* we're only being called from kreiserfsd, it makes no sense to do
3484 ** an async commit so that kreiserfsd can do it later
3486 do_journal_end(&th
, p_s_sb
, 1, COMMIT_NOW
| WAIT
);
3489 return p_s_sb
->s_dirt
;
3493 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3495 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3496 ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3497 ** flushes the commit list and returns 0.
3499 ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3501 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3503 static int check_journal_end(struct reiserfs_transaction_handle
*th
,
3504 struct super_block
*p_s_sb
, unsigned long nblocks
,
3509 int flush
= flags
& FLUSH_ALL
;
3510 int commit_now
= flags
& COMMIT_NOW
;
3511 int wait_on_commit
= flags
& WAIT
;
3512 struct reiserfs_journal_list
*jl
;
3513 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
3515 BUG_ON(!th
->t_trans_id
);
3517 if (th
->t_trans_id
!= journal
->j_trans_id
) {
3518 reiserfs_panic(th
->t_super
,
3519 "journal-1577: handle trans id %ld != current trans id %ld\n",
3520 th
->t_trans_id
, journal
->j_trans_id
);
3523 journal
->j_len_alloc
-= (th
->t_blocks_allocated
- th
->t_blocks_logged
);
3524 if (atomic_read(&(journal
->j_wcount
)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3525 atomic_dec(&(journal
->j_wcount
));
3528 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3529 ** will be dealt with by next transaction that actually writes something, but should be taken
3530 ** care of in this trans
3532 if (journal
->j_len
== 0) {
3535 /* if wcount > 0, and we are called to with flush or commit_now,
3536 ** we wait on j_join_wait. We will wake up when the last writer has
3537 ** finished the transaction, and started it on its way to the disk.
3538 ** Then, we flush the commit or journal list, and just return 0
3539 ** because the rest of journal end was already done for this transaction.
3541 if (atomic_read(&(journal
->j_wcount
)) > 0) {
3542 if (flush
|| commit_now
) {
3545 jl
= journal
->j_current_jl
;
3546 trans_id
= jl
->j_trans_id
;
3548 jl
->j_state
|= LIST_COMMIT_PENDING
;
3549 atomic_set(&(journal
->j_jlock
), 1);
3551 journal
->j_next_full_flush
= 1;
3553 unlock_journal(p_s_sb
);
3555 /* sleep while the current transaction is still j_jlocked */
3556 while (journal
->j_trans_id
== trans_id
) {
3557 if (atomic_read(&journal
->j_jlock
)) {
3558 queue_log_writer(p_s_sb
);
3560 lock_journal(p_s_sb
);
3561 if (journal
->j_trans_id
== trans_id
) {
3562 atomic_set(&(journal
->j_jlock
),
3565 unlock_journal(p_s_sb
);
3568 if (journal
->j_trans_id
== trans_id
) {
3572 && journal_list_still_alive(p_s_sb
, trans_id
)
3573 && wait_on_commit
) {
3574 flush_commit_list(p_s_sb
, jl
, 1);
3578 unlock_journal(p_s_sb
);
3582 /* deal with old transactions where we are the last writers */
3583 now
= get_seconds();
3584 if ((now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
) {
3586 journal
->j_next_async_flush
= 1;
3588 /* don't batch when someone is waiting on j_join_wait */
3589 /* don't batch when syncing the commit or flushing the whole trans */
3590 if (!(journal
->j_must_wait
> 0) && !(atomic_read(&(journal
->j_jlock
)))
3591 && !flush
&& !commit_now
&& (journal
->j_len
< journal
->j_max_batch
)
3592 && journal
->j_len_alloc
< journal
->j_max_batch
3593 && journal
->j_cnode_free
> (journal
->j_trans_max
* 3)) {
3594 journal
->j_bcount
++;
3595 unlock_journal(p_s_sb
);
3599 if (journal
->j_start
> SB_ONDISK_JOURNAL_SIZE(p_s_sb
)) {
3600 reiserfs_panic(p_s_sb
,
3601 "journal-003: journal_end: j_start (%ld) is too high\n",
3608 ** Does all the work that makes deleting blocks safe.
3609 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3612 ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3613 ** before this transaction has finished.
3615 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3616 ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3617 ** the block can't be reallocated yet.
3619 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3621 int journal_mark_freed(struct reiserfs_transaction_handle
*th
,
3622 struct super_block
*p_s_sb
, b_blocknr_t blocknr
)
3624 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
3625 struct reiserfs_journal_cnode
*cn
= NULL
;
3626 struct buffer_head
*bh
= NULL
;
3627 struct reiserfs_list_bitmap
*jb
= NULL
;
3629 BUG_ON(!th
->t_trans_id
);
3631 cn
= get_journal_hash_dev(p_s_sb
, journal
->j_hash_table
, blocknr
);
3636 /* if it is journal new, we just remove it from this transaction */
3637 if (bh
&& buffer_journal_new(bh
)) {
3638 clear_buffer_journal_new(bh
);
3639 clear_prepared_bits(bh
);
3640 reiserfs_clean_and_file_buffer(bh
);
3641 cleaned
= remove_from_transaction(p_s_sb
, blocknr
, cleaned
);
3643 /* set the bit for this block in the journal bitmap for this transaction */
3644 jb
= journal
->j_current_jl
->j_list_bitmap
;
3646 reiserfs_panic(p_s_sb
,
3647 "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n");
3649 set_bit_in_list_bitmap(p_s_sb
, blocknr
, jb
);
3651 /* Note, the entire while loop is not allowed to schedule. */
3654 clear_prepared_bits(bh
);
3655 reiserfs_clean_and_file_buffer(bh
);
3657 cleaned
= remove_from_transaction(p_s_sb
, blocknr
, cleaned
);
3659 /* find all older transactions with this block, make sure they don't try to write it out */
3660 cn
= get_journal_hash_dev(p_s_sb
, journal
->j_list_hash_table
,
3663 if (p_s_sb
== cn
->sb
&& blocknr
== cn
->blocknr
) {
3664 set_bit(BLOCK_FREED
, &cn
->state
);
3667 /* remove_from_transaction will brelse the buffer if it was
3668 ** in the current trans
3670 clear_buffer_journal_dirty(cn
->
3672 clear_buffer_dirty(cn
->bh
);
3673 clear_buffer_journal_test(cn
->
3678 (&(cn
->bh
->b_count
)) < 0) {
3679 reiserfs_warning(p_s_sb
,
3680 "journal-2138: cn->bh->b_count < 0");
3683 if (cn
->jlist
) { /* since we are clearing the bh, we MUST dec nonzerolen */
3696 put_bh(bh
); /* get_hash grabs the buffer */
3697 if (atomic_read(&(bh
->b_count
)) < 0) {
3698 reiserfs_warning(p_s_sb
,
3699 "journal-2165: bh->b_count < 0");
3705 void reiserfs_update_inode_transaction(struct inode
*inode
)
3707 struct reiserfs_journal
*journal
= SB_JOURNAL(inode
->i_sb
);
3708 REISERFS_I(inode
)->i_jl
= journal
->j_current_jl
;
3709 REISERFS_I(inode
)->i_trans_id
= journal
->j_trans_id
;
3713 * returns -1 on error, 0 if no commits/barriers were done and 1
3714 * if a transaction was actually committed and the barrier was done
3716 static int __commit_trans_jl(struct inode
*inode
, unsigned long id
,
3717 struct reiserfs_journal_list
*jl
)
3719 struct reiserfs_transaction_handle th
;
3720 struct super_block
*sb
= inode
->i_sb
;
3721 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3724 /* is it from the current transaction, or from an unknown transaction? */
3725 if (id
== journal
->j_trans_id
) {
3726 jl
= journal
->j_current_jl
;
3727 /* try to let other writers come in and grow this transaction */
3728 let_transaction_grow(sb
, id
);
3729 if (journal
->j_trans_id
!= id
) {
3730 goto flush_commit_only
;
3733 ret
= journal_begin(&th
, sb
, 1);
3737 /* someone might have ended this transaction while we joined */
3738 if (journal
->j_trans_id
!= id
) {
3739 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
),
3741 journal_mark_dirty(&th
, sb
, SB_BUFFER_WITH_SB(sb
));
3742 ret
= journal_end(&th
, sb
, 1);
3743 goto flush_commit_only
;
3746 ret
= journal_end_sync(&th
, sb
, 1);
3751 /* this gets tricky, we have to make sure the journal list in
3752 * the inode still exists. We know the list is still around
3753 * if we've got a larger transaction id than the oldest list
3756 if (journal_list_still_alive(inode
->i_sb
, id
)) {
3758 * we only set ret to 1 when we know for sure
3759 * the barrier hasn't been started yet on the commit
3762 if (atomic_read(&jl
->j_commit_left
) > 1)
3764 flush_commit_list(sb
, jl
, 1);
3765 if (journal
->j_errno
)
3766 ret
= journal
->j_errno
;
3769 /* otherwise the list is gone, and long since committed */
3773 int reiserfs_commit_for_inode(struct inode
*inode
)
3775 unsigned long id
= REISERFS_I(inode
)->i_trans_id
;
3776 struct reiserfs_journal_list
*jl
= REISERFS_I(inode
)->i_jl
;
3778 /* for the whole inode, assume unset id means it was
3779 * changed in the current transaction. More conservative
3782 reiserfs_update_inode_transaction(inode
);
3783 id
= REISERFS_I(inode
)->i_trans_id
;
3784 /* jl will be updated in __commit_trans_jl */
3787 return __commit_trans_jl(inode
, id
, jl
);
3790 void reiserfs_restore_prepared_buffer(struct super_block
*p_s_sb
,
3791 struct buffer_head
*bh
)
3793 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
3794 PROC_INFO_INC(p_s_sb
, journal
.restore_prepared
);
3798 if (test_clear_buffer_journal_restore_dirty(bh
) &&
3799 buffer_journal_dirty(bh
)) {
3800 struct reiserfs_journal_cnode
*cn
;
3801 cn
= get_journal_hash_dev(p_s_sb
,
3802 journal
->j_list_hash_table
,
3804 if (cn
&& can_dirty(cn
)) {
3805 set_buffer_journal_test(bh
);
3806 mark_buffer_dirty(bh
);
3809 clear_buffer_journal_prepared(bh
);
3812 extern struct tree_balance
*cur_tb
;
3814 ** before we can change a metadata block, we have to make sure it won't
3815 ** be written to disk while we are altering it. So, we must:
3820 int reiserfs_prepare_for_journal(struct super_block
*p_s_sb
,
3821 struct buffer_head
*bh
, int wait
)
3823 PROC_INFO_INC(p_s_sb
, journal
.prepare
);
3825 if (test_set_buffer_locked(bh
)) {
3830 set_buffer_journal_prepared(bh
);
3831 if (test_clear_buffer_dirty(bh
) && buffer_journal_dirty(bh
)) {
3832 clear_buffer_journal_test(bh
);
3833 set_buffer_journal_restore_dirty(bh
);
3839 static void flush_old_journal_lists(struct super_block
*s
)
3841 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
3842 struct reiserfs_journal_list
*jl
;
3843 struct list_head
*entry
;
3844 time_t now
= get_seconds();
3846 while (!list_empty(&journal
->j_journal_list
)) {
3847 entry
= journal
->j_journal_list
.next
;
3848 jl
= JOURNAL_LIST_ENTRY(entry
);
3849 /* this check should always be run, to send old lists to disk */
3850 if (jl
->j_timestamp
< (now
- (JOURNAL_MAX_TRANS_AGE
* 4))) {
3851 flush_used_journal_lists(s
, jl
);
3859 ** long and ugly. If flush, will not return until all commit
3860 ** blocks and all real buffers in the trans are on disk.
3861 ** If no_async, won't return until all commit blocks are on disk.
3863 ** keep reading, there are comments as you go along
3865 ** If the journal is aborted, we just clean up. Things like flushing
3866 ** journal lists, etc just won't happen.
3868 static int do_journal_end(struct reiserfs_transaction_handle
*th
,
3869 struct super_block
*p_s_sb
, unsigned long nblocks
,
3872 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
3873 struct reiserfs_journal_cnode
*cn
, *next
, *jl_cn
;
3874 struct reiserfs_journal_cnode
*last_cn
= NULL
;
3875 struct reiserfs_journal_desc
*desc
;
3876 struct reiserfs_journal_commit
*commit
;
3877 struct buffer_head
*c_bh
; /* commit bh */
3878 struct buffer_head
*d_bh
; /* desc bh */
3879 int cur_write_start
= 0; /* start index of current log write */
3884 struct reiserfs_journal_list
*jl
, *temp_jl
;
3885 struct list_head
*entry
, *safe
;
3886 unsigned long jindex
;
3887 unsigned long commit_trans_id
;
3890 BUG_ON(th
->t_refcount
> 1);
3891 BUG_ON(!th
->t_trans_id
);
3893 /* protect flush_older_commits from doing mistakes if the
3894 transaction ID counter gets overflowed. */
3895 if (th
->t_trans_id
== ~0UL)
3896 flags
|= FLUSH_ALL
| COMMIT_NOW
| WAIT
;
3897 flush
= flags
& FLUSH_ALL
;
3898 wait_on_commit
= flags
& WAIT
;
3901 current
->journal_info
= th
->t_handle_save
;
3902 reiserfs_check_lock_depth(p_s_sb
, "journal end");
3903 if (journal
->j_len
== 0) {
3904 reiserfs_prepare_for_journal(p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
),
3906 journal_mark_dirty(th
, p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
));
3909 lock_journal(p_s_sb
);
3910 if (journal
->j_next_full_flush
) {
3914 if (journal
->j_next_async_flush
) {
3915 flags
|= COMMIT_NOW
| WAIT
;
3919 /* check_journal_end locks the journal, and unlocks if it does not return 1
3920 ** it tells us if we should continue with the journal_end, or just return
3922 if (!check_journal_end(th
, p_s_sb
, nblocks
, flags
)) {
3924 wake_queued_writers(p_s_sb
);
3925 reiserfs_async_progress_wait(p_s_sb
);
3929 /* check_journal_end might set these, check again */
3930 if (journal
->j_next_full_flush
) {
3935 ** j must wait means we have to flush the log blocks, and the real blocks for
3938 if (journal
->j_must_wait
> 0) {
3941 #ifdef REISERFS_PREALLOCATE
3942 /* quota ops might need to nest, setup the journal_info pointer for them
3943 * and raise the refcount so that it is > 0. */
3944 current
->journal_info
= th
;
3946 reiserfs_discard_all_prealloc(th
); /* it should not involve new blocks into
3947 * the transaction */
3949 current
->journal_info
= th
->t_handle_save
;
3952 /* setup description block */
3954 journal_getblk(p_s_sb
,
3955 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
3957 set_buffer_uptodate(d_bh
);
3958 desc
= (struct reiserfs_journal_desc
*)(d_bh
)->b_data
;
3959 memset(d_bh
->b_data
, 0, d_bh
->b_size
);
3960 memcpy(get_journal_desc_magic(d_bh
), JOURNAL_DESC_MAGIC
, 8);
3961 set_desc_trans_id(desc
, journal
->j_trans_id
);
3963 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
3964 c_bh
= journal_getblk(p_s_sb
, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
3965 ((journal
->j_start
+ journal
->j_len
+
3966 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
)));
3967 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
3968 memset(c_bh
->b_data
, 0, c_bh
->b_size
);
3969 set_commit_trans_id(commit
, journal
->j_trans_id
);
3970 set_buffer_uptodate(c_bh
);
3972 /* init this journal list */
3973 jl
= journal
->j_current_jl
;
3975 /* we lock the commit before doing anything because
3976 * we want to make sure nobody tries to run flush_commit_list until
3977 * the new transaction is fully setup, and we've already flushed the
3980 down(&jl
->j_commit_lock
);
3982 /* save the transaction id in case we need to commit it later */
3983 commit_trans_id
= jl
->j_trans_id
;
3985 atomic_set(&jl
->j_older_commits_done
, 0);
3986 jl
->j_trans_id
= journal
->j_trans_id
;
3987 jl
->j_timestamp
= journal
->j_trans_start_time
;
3988 jl
->j_commit_bh
= c_bh
;
3989 jl
->j_start
= journal
->j_start
;
3990 jl
->j_len
= journal
->j_len
;
3991 atomic_set(&jl
->j_nonzerolen
, journal
->j_len
);
3992 atomic_set(&jl
->j_commit_left
, journal
->j_len
+ 2);
3993 jl
->j_realblock
= NULL
;
3995 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
3996 ** for each real block, add it to the journal list hash,
3997 ** copy into real block index array in the commit or desc block
3999 trans_half
= journal_trans_half(p_s_sb
->s_blocksize
);
4000 for (i
= 0, cn
= journal
->j_first
; cn
; cn
= cn
->next
, i
++) {
4001 if (buffer_journaled(cn
->bh
)) {
4002 jl_cn
= get_cnode(p_s_sb
);
4004 reiserfs_panic(p_s_sb
,
4005 "journal-1676, get_cnode returned NULL\n");
4008 jl
->j_realblock
= jl_cn
;
4010 jl_cn
->prev
= last_cn
;
4013 last_cn
->next
= jl_cn
;
4016 /* make sure the block we are trying to log is not a block
4017 of journal or reserved area */
4019 if (is_block_in_log_or_reserved_area
4020 (p_s_sb
, cn
->bh
->b_blocknr
)) {
4021 reiserfs_panic(p_s_sb
,
4022 "journal-2332: Trying to log block %lu, which is a log block\n",
4025 jl_cn
->blocknr
= cn
->bh
->b_blocknr
;
4030 insert_journal_hash(journal
->j_list_hash_table
, jl_cn
);
4031 if (i
< trans_half
) {
4032 desc
->j_realblock
[i
] =
4033 cpu_to_le32(cn
->bh
->b_blocknr
);
4035 commit
->j_realblock
[i
- trans_half
] =
4036 cpu_to_le32(cn
->bh
->b_blocknr
);
4042 set_desc_trans_len(desc
, journal
->j_len
);
4043 set_desc_mount_id(desc
, journal
->j_mount_id
);
4044 set_desc_trans_id(desc
, journal
->j_trans_id
);
4045 set_commit_trans_len(commit
, journal
->j_len
);
4047 /* special check in case all buffers in the journal were marked for not logging */
4048 if (journal
->j_len
== 0) {
4052 /* we're about to dirty all the log blocks, mark the description block
4053 * dirty now too. Don't mark the commit block dirty until all the
4054 * others are on disk
4056 mark_buffer_dirty(d_bh
);
4058 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4059 cur_write_start
= journal
->j_start
;
4060 cn
= journal
->j_first
;
4061 jindex
= 1; /* start at one so we don't get the desc again */
4063 clear_buffer_journal_new(cn
->bh
);
4064 /* copy all the real blocks into log area. dirty log blocks */
4065 if (buffer_journaled(cn
->bh
)) {
4066 struct buffer_head
*tmp_bh
;
4070 journal_getblk(p_s_sb
,
4071 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
4074 SB_ONDISK_JOURNAL_SIZE(p_s_sb
)));
4075 set_buffer_uptodate(tmp_bh
);
4076 page
= cn
->bh
->b_page
;
4078 memcpy(tmp_bh
->b_data
,
4079 addr
+ offset_in_page(cn
->bh
->b_data
),
4082 mark_buffer_dirty(tmp_bh
);
4084 set_buffer_journal_dirty(cn
->bh
);
4085 clear_buffer_journaled(cn
->bh
);
4087 /* JDirty cleared sometime during transaction. don't log this one */
4088 reiserfs_warning(p_s_sb
,
4089 "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!");
4093 free_cnode(p_s_sb
, cn
);
4098 /* we are done with both the c_bh and d_bh, but
4099 ** c_bh must be written after all other commit blocks,
4100 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4103 journal
->j_current_jl
= alloc_journal_list(p_s_sb
);
4105 /* now it is safe to insert this transaction on the main list */
4106 list_add_tail(&jl
->j_list
, &journal
->j_journal_list
);
4107 list_add_tail(&jl
->j_working_list
, &journal
->j_working_list
);
4108 journal
->j_num_work_lists
++;
4110 /* reset journal values for the next transaction */
4111 old_start
= journal
->j_start
;
4113 (journal
->j_start
+ journal
->j_len
+
4114 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
);
4115 atomic_set(&(journal
->j_wcount
), 0);
4116 journal
->j_bcount
= 0;
4117 journal
->j_last
= NULL
;
4118 journal
->j_first
= NULL
;
4120 journal
->j_trans_start_time
= 0;
4121 /* check for trans_id overflow */
4122 if (++journal
->j_trans_id
== 0)
4123 journal
->j_trans_id
= 10;
4124 journal
->j_current_jl
->j_trans_id
= journal
->j_trans_id
;
4125 journal
->j_must_wait
= 0;
4126 journal
->j_len_alloc
= 0;
4127 journal
->j_next_full_flush
= 0;
4128 journal
->j_next_async_flush
= 0;
4129 init_journal_hash(p_s_sb
);
4131 // make sure reiserfs_add_jh sees the new current_jl before we
4132 // write out the tails
4135 /* tail conversion targets have to hit the disk before we end the
4136 * transaction. Otherwise a later transaction might repack the tail
4137 * before this transaction commits, leaving the data block unflushed and
4138 * clean, if we crash before the later transaction commits, the data block
4141 if (!list_empty(&jl
->j_tail_bh_list
)) {
4143 write_ordered_buffers(&journal
->j_dirty_buffers_lock
,
4144 journal
, jl
, &jl
->j_tail_bh_list
);
4147 if (!list_empty(&jl
->j_tail_bh_list
))
4149 up(&jl
->j_commit_lock
);
4151 /* honor the flush wishes from the caller, simple commits can
4152 ** be done outside the journal lock, they are done below
4154 ** if we don't flush the commit list right now, we put it into
4155 ** the work queue so the people waiting on the async progress work
4156 ** queue don't wait for this proc to flush journal lists and such.
4159 flush_commit_list(p_s_sb
, jl
, 1);
4160 flush_journal_list(p_s_sb
, jl
, 1);
4161 } else if (!(jl
->j_state
& LIST_COMMIT_PENDING
))
4162 queue_delayed_work(commit_wq
, &journal
->j_work
, HZ
/ 10);
4164 /* if the next transaction has any chance of wrapping, flush
4165 ** transactions that might get overwritten. If any journal lists are very
4166 ** old flush them as well.
4169 list_for_each_safe(entry
, safe
, &journal
->j_journal_list
) {
4170 temp_jl
= JOURNAL_LIST_ENTRY(entry
);
4171 if (journal
->j_start
<= temp_jl
->j_start
) {
4172 if ((journal
->j_start
+ journal
->j_trans_max
+ 1) >=
4174 flush_used_journal_lists(p_s_sb
, temp_jl
);
4176 } else if ((journal
->j_start
+
4177 journal
->j_trans_max
+ 1) <
4178 SB_ONDISK_JOURNAL_SIZE(p_s_sb
)) {
4179 /* if we don't cross into the next transaction and we don't
4180 * wrap, there is no way we can overlap any later transactions
4185 } else if ((journal
->j_start
+
4186 journal
->j_trans_max
+ 1) >
4187 SB_ONDISK_JOURNAL_SIZE(p_s_sb
)) {
4188 if (((journal
->j_start
+ journal
->j_trans_max
+ 1) %
4189 SB_ONDISK_JOURNAL_SIZE(p_s_sb
)) >=
4191 flush_used_journal_lists(p_s_sb
, temp_jl
);
4194 /* we don't overlap anything from out start to the end of the
4195 * log, and our wrapped portion doesn't overlap anything at
4196 * the start of the log. We can break
4202 flush_old_journal_lists(p_s_sb
);
4204 journal
->j_current_jl
->j_list_bitmap
=
4205 get_list_bitmap(p_s_sb
, journal
->j_current_jl
);
4207 if (!(journal
->j_current_jl
->j_list_bitmap
)) {
4208 reiserfs_panic(p_s_sb
,
4209 "journal-1996: do_journal_end, could not get a list bitmap\n");
4212 atomic_set(&(journal
->j_jlock
), 0);
4213 unlock_journal(p_s_sb
);
4214 /* wake up any body waiting to join. */
4215 clear_bit(J_WRITERS_QUEUED
, &journal
->j_state
);
4216 wake_up(&(journal
->j_join_wait
));
4218 if (!flush
&& wait_on_commit
&&
4219 journal_list_still_alive(p_s_sb
, commit_trans_id
)) {
4220 flush_commit_list(p_s_sb
, jl
, 1);
4223 reiserfs_check_lock_depth(p_s_sb
, "journal end2");
4225 memset(th
, 0, sizeof(*th
));
4226 /* Re-set th->t_super, so we can properly keep track of how many
4227 * persistent transactions there are. We need to do this so if this
4228 * call is part of a failed restart_transaction, we can free it later */
4229 th
->t_super
= p_s_sb
;
4231 return journal
->j_errno
;
4234 static void __reiserfs_journal_abort_hard(struct super_block
*sb
)
4236 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
4237 if (test_bit(J_ABORTED
, &journal
->j_state
))
4240 printk(KERN_CRIT
"REISERFS: Aborting journal for filesystem on %s\n",
4241 reiserfs_bdevname(sb
));
4243 sb
->s_flags
|= MS_RDONLY
;
4244 set_bit(J_ABORTED
, &journal
->j_state
);
4246 #ifdef CONFIG_REISERFS_CHECK
4251 static void __reiserfs_journal_abort_soft(struct super_block
*sb
, int errno
)
4253 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
4254 if (test_bit(J_ABORTED
, &journal
->j_state
))
4257 if (!journal
->j_errno
)
4258 journal
->j_errno
= errno
;
4260 __reiserfs_journal_abort_hard(sb
);
4263 void reiserfs_journal_abort(struct super_block
*sb
, int errno
)
4265 return __reiserfs_journal_abort_soft(sb
, errno
);