2 ** Write ahead logging implementation copyright Chris Mason 2000
4 ** The background commits make this code very interelated, and
5 ** overly complex. I need to rethink things a bit....The major players:
7 ** journal_begin -- call with the number of blocks you expect to log.
8 ** If the current transaction is too
9 ** old, it will block until the current transaction is
10 ** finished, and then start a new one.
11 ** Usually, your transaction will get joined in with
12 ** previous ones for speed.
14 ** journal_join -- same as journal_begin, but won't block on the current
15 ** transaction regardless of age. Don't ever call
16 ** this. Ever. There are only two places it should be
17 ** called from, and they are both inside this file.
19 ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20 ** that might make them get sent to disk
21 ** and then marks them BH_JDirty. Puts the buffer head
22 ** into the current transaction hash.
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 ** otherwise, it could do an async/synchronous commit, or
26 ** a full flush of all log and real blocks in the
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 ** commit blocks are sent to disk. Forces commit blocks
31 ** to disk for all backgrounded commits that have been
33 ** -- Note, if you call this as an immediate flush from
34 ** from within kupdate, it will ignore the immediate flag
37 #include <linux/config.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
41 #include <linux/time.h>
42 #include <asm/semaphore.h>
44 #include <linux/vmalloc.h>
45 #include <linux/reiserfs_fs.h>
47 #include <linux/kernel.h>
48 #include <linux/errno.h>
49 #include <linux/fcntl.h>
50 #include <linux/stat.h>
51 #include <linux/string.h>
52 #include <linux/smp_lock.h>
53 #include <linux/buffer_head.h>
54 #include <linux/workqueue.h>
55 #include <linux/writeback.h>
56 #include <linux/blkdev.h>
59 /* gets a struct reiserfs_journal_list * from a list head */
60 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
62 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
65 /* the number of mounted filesystems. This is used to decide when to
66 ** start and kill the commit workqueue
68 static int reiserfs_mounted_fs_count
;
70 static struct workqueue_struct
*commit_wq
;
72 #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
74 #define BUFNR 64 /*read ahead */
76 /* cnode stat bits. Move these into reiserfs_fs.h */
78 #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
79 #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
81 #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
82 #define BLOCK_DIRTIED 5
85 /* journal list state bits */
86 #define LIST_TOUCHED 1
88 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
90 /* flags for do_journal_end */
91 #define FLUSH_ALL 1 /* flush commit and real blocks */
92 #define COMMIT_NOW 2 /* end and commit this transaction */
93 #define WAIT 4 /* wait for the log blocks to hit the disk*/
95 static int do_journal_end(struct reiserfs_transaction_handle
*,struct super_block
*,unsigned long nblocks
,int flags
) ;
96 static int flush_journal_list(struct super_block
*s
, struct reiserfs_journal_list
*jl
, int flushall
) ;
97 static int flush_commit_list(struct super_block
*s
, struct reiserfs_journal_list
*jl
, int flushall
) ;
98 static int can_dirty(struct reiserfs_journal_cnode
*cn
) ;
99 static int journal_join(struct reiserfs_transaction_handle
*th
, struct super_block
*p_s_sb
, unsigned long nblocks
);
100 static int release_journal_dev( struct super_block
*super
,
101 struct reiserfs_journal
*journal
);
102 static int dirty_one_transaction(struct super_block
*s
,
103 struct reiserfs_journal_list
*jl
);
104 static void flush_async_commits(void *p
);
105 static void queue_log_writer(struct super_block
*s
);
107 /* values for join in do_journal_begin_r */
109 JBEGIN_REG
= 0, /* regular journal begin */
110 JBEGIN_JOIN
= 1, /* join the running transaction if at all possible */
111 JBEGIN_ABORT
= 2, /* called from cleanup code, ignores aborted flag */
114 static int do_journal_begin_r(struct reiserfs_transaction_handle
*th
,
115 struct super_block
* p_s_sb
,
116 unsigned long nblocks
,int join
);
118 static void init_journal_hash(struct super_block
*p_s_sb
) {
119 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
120 memset(journal
->j_hash_table
, 0, JOURNAL_HASH_SIZE
* sizeof(struct reiserfs_journal_cnode
*)) ;
124 ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
125 ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
128 static int reiserfs_clean_and_file_buffer(struct buffer_head
*bh
) {
130 clear_buffer_dirty(bh
);
131 clear_buffer_journal_test(bh
);
136 static void disable_barrier(struct super_block
*s
)
138 REISERFS_SB(s
)->s_mount_opt
&= ~(1 << REISERFS_BARRIER_FLUSH
);
139 printk("reiserfs: disabling flush barriers on %s\n", reiserfs_bdevname(s
));
142 static struct reiserfs_bitmap_node
*
143 allocate_bitmap_node(struct super_block
*p_s_sb
) {
144 struct reiserfs_bitmap_node
*bn
;
147 bn
= reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node
), GFP_NOFS
, p_s_sb
) ;
151 bn
->data
= reiserfs_kmalloc(p_s_sb
->s_blocksize
, GFP_NOFS
, p_s_sb
) ;
153 reiserfs_kfree(bn
, sizeof(struct reiserfs_bitmap_node
), p_s_sb
) ;
157 memset(bn
->data
, 0, p_s_sb
->s_blocksize
) ;
158 INIT_LIST_HEAD(&bn
->list
) ;
162 static struct reiserfs_bitmap_node
*
163 get_bitmap_node(struct super_block
*p_s_sb
) {
164 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
165 struct reiserfs_bitmap_node
*bn
= NULL
;
166 struct list_head
*entry
= journal
->j_bitmap_nodes
.next
;
168 journal
->j_used_bitmap_nodes
++ ;
171 if(entry
!= &journal
->j_bitmap_nodes
) {
172 bn
= list_entry(entry
, struct reiserfs_bitmap_node
, list
) ;
174 memset(bn
->data
, 0, p_s_sb
->s_blocksize
) ;
175 journal
->j_free_bitmap_nodes
-- ;
178 bn
= allocate_bitmap_node(p_s_sb
) ;
185 static inline void free_bitmap_node(struct super_block
*p_s_sb
,
186 struct reiserfs_bitmap_node
*bn
) {
187 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
188 journal
->j_used_bitmap_nodes
-- ;
189 if (journal
->j_free_bitmap_nodes
> REISERFS_MAX_BITMAP_NODES
) {
190 reiserfs_kfree(bn
->data
, p_s_sb
->s_blocksize
, p_s_sb
) ;
191 reiserfs_kfree(bn
, sizeof(struct reiserfs_bitmap_node
), p_s_sb
) ;
193 list_add(&bn
->list
, &journal
->j_bitmap_nodes
) ;
194 journal
->j_free_bitmap_nodes
++ ;
198 static void allocate_bitmap_nodes(struct super_block
*p_s_sb
) {
200 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
201 struct reiserfs_bitmap_node
*bn
= NULL
;
202 for (i
= 0 ; i
< REISERFS_MIN_BITMAP_NODES
; i
++) {
203 bn
= allocate_bitmap_node(p_s_sb
) ;
205 list_add(&bn
->list
, &journal
->j_bitmap_nodes
) ;
206 journal
->j_free_bitmap_nodes
++ ;
208 break ; // this is ok, we'll try again when more are needed
213 static int set_bit_in_list_bitmap(struct super_block
*p_s_sb
, int block
,
214 struct reiserfs_list_bitmap
*jb
) {
215 int bmap_nr
= block
/ (p_s_sb
->s_blocksize
<< 3) ;
216 int bit_nr
= block
% (p_s_sb
->s_blocksize
<< 3) ;
218 if (!jb
->bitmaps
[bmap_nr
]) {
219 jb
->bitmaps
[bmap_nr
] = get_bitmap_node(p_s_sb
) ;
221 set_bit(bit_nr
, (unsigned long *)jb
->bitmaps
[bmap_nr
]->data
) ;
225 static void cleanup_bitmap_list(struct super_block
*p_s_sb
,
226 struct reiserfs_list_bitmap
*jb
) {
228 if (jb
->bitmaps
== NULL
)
231 for (i
= 0 ; i
< SB_BMAP_NR(p_s_sb
) ; i
++) {
232 if (jb
->bitmaps
[i
]) {
233 free_bitmap_node(p_s_sb
, jb
->bitmaps
[i
]) ;
234 jb
->bitmaps
[i
] = NULL
;
240 ** only call this on FS unmount.
242 static int free_list_bitmaps(struct super_block
*p_s_sb
,
243 struct reiserfs_list_bitmap
*jb_array
) {
245 struct reiserfs_list_bitmap
*jb
;
246 for (i
= 0 ; i
< JOURNAL_NUM_BITMAPS
; i
++) {
248 jb
->journal_list
= NULL
;
249 cleanup_bitmap_list(p_s_sb
, jb
) ;
256 static int free_bitmap_nodes(struct super_block
*p_s_sb
) {
257 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
258 struct list_head
*next
= journal
->j_bitmap_nodes
.next
;
259 struct reiserfs_bitmap_node
*bn
;
261 while(next
!= &journal
->j_bitmap_nodes
) {
262 bn
= list_entry(next
, struct reiserfs_bitmap_node
, list
) ;
264 reiserfs_kfree(bn
->data
, p_s_sb
->s_blocksize
, p_s_sb
) ;
265 reiserfs_kfree(bn
, sizeof(struct reiserfs_bitmap_node
), p_s_sb
) ;
266 next
= journal
->j_bitmap_nodes
.next
;
267 journal
->j_free_bitmap_nodes
-- ;
274 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
275 ** jb_array is the array to be filled in.
277 int reiserfs_allocate_list_bitmaps(struct super_block
*p_s_sb
,
278 struct reiserfs_list_bitmap
*jb_array
,
282 struct reiserfs_list_bitmap
*jb
;
283 int mem
= bmap_nr
* sizeof(struct reiserfs_bitmap_node
*) ;
285 for (i
= 0 ; i
< JOURNAL_NUM_BITMAPS
; i
++) {
287 jb
->journal_list
= NULL
;
288 jb
->bitmaps
= vmalloc( mem
) ;
290 reiserfs_warning(p_s_sb
, "clm-2000, unable to allocate bitmaps for journal lists") ;
294 memset(jb
->bitmaps
, 0, mem
) ;
297 free_list_bitmaps(p_s_sb
, jb_array
) ;
304 ** find an available list bitmap. If you can't find one, flush a commit list
307 static struct reiserfs_list_bitmap
*
308 get_list_bitmap(struct super_block
*p_s_sb
, struct reiserfs_journal_list
*jl
) {
310 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
311 struct reiserfs_list_bitmap
*jb
= NULL
;
313 for (j
= 0 ; j
< (JOURNAL_NUM_BITMAPS
* 3) ; j
++) {
314 i
= journal
->j_list_bitmap_index
;
315 journal
->j_list_bitmap_index
= (i
+ 1) % JOURNAL_NUM_BITMAPS
;
316 jb
= journal
->j_list_bitmap
+ i
;
317 if (journal
->j_list_bitmap
[i
].journal_list
) {
318 flush_commit_list(p_s_sb
, journal
->j_list_bitmap
[i
].journal_list
, 1) ;
319 if (!journal
->j_list_bitmap
[i
].journal_list
) {
326 if (jb
->journal_list
) { /* double check to make sure if flushed correctly */
329 jb
->journal_list
= jl
;
334 ** allocates a new chunk of X nodes, and links them all together as a list.
335 ** Uses the cnode->next and cnode->prev pointers
336 ** returns NULL on failure
338 static struct reiserfs_journal_cnode
*allocate_cnodes(int num_cnodes
) {
339 struct reiserfs_journal_cnode
*head
;
341 if (num_cnodes
<= 0) {
344 head
= vmalloc(num_cnodes
* sizeof(struct reiserfs_journal_cnode
)) ;
348 memset(head
, 0, num_cnodes
* sizeof(struct reiserfs_journal_cnode
)) ;
349 head
[0].prev
= NULL
;
350 head
[0].next
= head
+ 1 ;
351 for (i
= 1 ; i
< num_cnodes
; i
++) {
352 head
[i
].prev
= head
+ (i
- 1) ;
353 head
[i
].next
= head
+ (i
+ 1) ; /* if last one, overwrite it after the if */
355 head
[num_cnodes
-1].next
= NULL
;
360 ** pulls a cnode off the free list, or returns NULL on failure
362 static struct reiserfs_journal_cnode
*get_cnode(struct super_block
*p_s_sb
) {
363 struct reiserfs_journal_cnode
*cn
;
364 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
366 reiserfs_check_lock_depth(p_s_sb
, "get_cnode") ;
368 if (journal
->j_cnode_free
<= 0) {
371 journal
->j_cnode_used
++ ;
372 journal
->j_cnode_free
-- ;
373 cn
= journal
->j_cnode_free_list
;
378 cn
->next
->prev
= NULL
;
380 journal
->j_cnode_free_list
= cn
->next
;
381 memset(cn
, 0, sizeof(struct reiserfs_journal_cnode
)) ;
386 ** returns a cnode to the free list
388 static void free_cnode(struct super_block
*p_s_sb
, struct reiserfs_journal_cnode
*cn
) {
389 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
391 reiserfs_check_lock_depth(p_s_sb
, "free_cnode") ;
393 journal
->j_cnode_used
-- ;
394 journal
->j_cnode_free
++ ;
395 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
396 cn
->next
= journal
->j_cnode_free_list
;
397 if (journal
->j_cnode_free_list
) {
398 journal
->j_cnode_free_list
->prev
= cn
;
400 cn
->prev
= NULL
; /* not needed with the memset, but I might kill the memset, and forget to do this */
401 journal
->j_cnode_free_list
= cn
;
404 static void clear_prepared_bits(struct buffer_head
*bh
) {
405 clear_buffer_journal_prepared (bh
);
406 clear_buffer_journal_restore_dirty (bh
);
409 /* utility function to force a BUG if it is called without the big
410 ** kernel lock held. caller is the string printed just before calling BUG()
412 void reiserfs_check_lock_depth(struct super_block
*sb
, char *caller
) {
414 if (current
->lock_depth
< 0) {
415 reiserfs_panic (sb
, "%s called without kernel lock held", caller
) ;
422 /* return a cnode with same dev, block number and size in table, or null if not found */
423 static inline struct reiserfs_journal_cnode
*
424 get_journal_hash_dev(struct super_block
*sb
,
425 struct reiserfs_journal_cnode
**table
,
428 struct reiserfs_journal_cnode
*cn
;
429 cn
= journal_hash(table
, sb
, bl
) ;
431 if (cn
->blocknr
== bl
&& cn
->sb
== sb
)
435 return (struct reiserfs_journal_cnode
*)0 ;
439 ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
440 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
441 ** being overwritten by a replay after crashing.
443 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
444 ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
445 ** sure you never write the block without logging it.
447 ** next_zero_bit is a suggestion about the next block to try for find_forward.
448 ** when bl is rejected because it is set in a journal list bitmap, we search
449 ** for the next zero bit in the bitmap that rejected bl. Then, we return that
450 ** through next_zero_bit for find_forward to try.
452 ** Just because we return something in next_zero_bit does not mean we won't
453 ** reject it on the next call to reiserfs_in_journal
456 int reiserfs_in_journal(struct super_block
*p_s_sb
,
457 int bmap_nr
, int bit_nr
, int search_all
,
458 b_blocknr_t
*next_zero_bit
) {
459 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
460 struct reiserfs_journal_cnode
*cn
;
461 struct reiserfs_list_bitmap
*jb
;
465 *next_zero_bit
= 0 ; /* always start this at zero. */
467 PROC_INFO_INC( p_s_sb
, journal
.in_journal
);
468 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
469 ** if we crash before the transaction that freed it commits, this transaction won't
470 ** have committed either, and the block will never be written
473 for (i
= 0 ; i
< JOURNAL_NUM_BITMAPS
; i
++) {
474 PROC_INFO_INC( p_s_sb
, journal
.in_journal_bitmap
);
475 jb
= journal
->j_list_bitmap
+ i
;
476 if (jb
->journal_list
&& jb
->bitmaps
[bmap_nr
] &&
477 test_bit(bit_nr
, (unsigned long *)jb
->bitmaps
[bmap_nr
]->data
)) {
478 *next_zero_bit
= find_next_zero_bit((unsigned long *)
479 (jb
->bitmaps
[bmap_nr
]->data
),
480 p_s_sb
->s_blocksize
<< 3, bit_nr
+1) ;
486 bl
= bmap_nr
* (p_s_sb
->s_blocksize
<< 3) + bit_nr
;
487 /* is it in any old transactions? */
488 if (search_all
&& (cn
= get_journal_hash_dev(p_s_sb
, journal
->j_list_hash_table
, bl
))) {
492 /* is it in the current transaction. This should never happen */
493 if ((cn
= get_journal_hash_dev(p_s_sb
, journal
->j_hash_table
, bl
))) {
498 PROC_INFO_INC( p_s_sb
, journal
.in_journal_reusable
);
503 /* insert cn into table
505 static inline void insert_journal_hash(struct reiserfs_journal_cnode
**table
, struct reiserfs_journal_cnode
*cn
) {
506 struct reiserfs_journal_cnode
*cn_orig
;
508 cn_orig
= journal_hash(table
, cn
->sb
, cn
->blocknr
) ;
509 cn
->hnext
= cn_orig
;
512 cn_orig
->hprev
= cn
;
514 journal_hash(table
, cn
->sb
, cn
->blocknr
) = cn
;
517 /* lock the current transaction */
518 inline static void lock_journal(struct super_block
*p_s_sb
) {
519 PROC_INFO_INC( p_s_sb
, journal
.lock_journal
);
520 down(&SB_JOURNAL(p_s_sb
)->j_lock
);
523 /* unlock the current transaction */
524 inline static void unlock_journal(struct super_block
*p_s_sb
) {
525 up(&SB_JOURNAL(p_s_sb
)->j_lock
);
528 static inline void get_journal_list(struct reiserfs_journal_list
*jl
)
533 static inline void put_journal_list(struct super_block
*s
,
534 struct reiserfs_journal_list
*jl
)
536 if (jl
->j_refcount
< 1) {
537 reiserfs_panic (s
, "trans id %lu, refcount at %d", jl
->j_trans_id
,
540 if (--jl
->j_refcount
== 0)
541 reiserfs_kfree(jl
, sizeof(struct reiserfs_journal_list
), s
);
545 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
546 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
549 static void cleanup_freed_for_journal_list(struct super_block
*p_s_sb
, struct reiserfs_journal_list
*jl
) {
551 struct reiserfs_list_bitmap
*jb
= jl
->j_list_bitmap
;
553 cleanup_bitmap_list(p_s_sb
, jb
) ;
555 jl
->j_list_bitmap
->journal_list
= NULL
;
556 jl
->j_list_bitmap
= NULL
;
559 static int journal_list_still_alive(struct super_block
*s
,
560 unsigned long trans_id
)
562 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
563 struct list_head
*entry
= &journal
->j_journal_list
;
564 struct reiserfs_journal_list
*jl
;
566 if (!list_empty(entry
)) {
567 jl
= JOURNAL_LIST_ENTRY(entry
->next
);
568 if (jl
->j_trans_id
<= trans_id
) {
575 static void reiserfs_end_buffer_io_sync(struct buffer_head
*bh
, int uptodate
) {
576 char b
[BDEVNAME_SIZE
];
578 if (buffer_journaled(bh
)) {
579 reiserfs_warning(NULL
, "clm-2084: pinned buffer %lu:%s sent to disk",
580 bh
->b_blocknr
, bdevname(bh
->b_bdev
, b
)) ;
583 set_buffer_uptodate(bh
) ;
585 clear_buffer_uptodate(bh
) ;
590 static void reiserfs_end_ordered_io(struct buffer_head
*bh
, int uptodate
) {
592 set_buffer_uptodate(bh
) ;
594 clear_buffer_uptodate(bh
) ;
599 static void submit_logged_buffer(struct buffer_head
*bh
) {
601 bh
->b_end_io
= reiserfs_end_buffer_io_sync
;
602 clear_buffer_journal_new (bh
);
603 clear_buffer_dirty(bh
) ;
604 if (!test_clear_buffer_journal_test (bh
))
606 if (!buffer_uptodate(bh
))
608 submit_bh(WRITE
, bh
) ;
611 static void submit_ordered_buffer(struct buffer_head
*bh
) {
613 bh
->b_end_io
= reiserfs_end_ordered_io
;
614 clear_buffer_dirty(bh
) ;
615 if (!buffer_uptodate(bh
))
617 submit_bh(WRITE
, bh
) ;
620 static int submit_barrier_buffer(struct buffer_head
*bh
) {
622 bh
->b_end_io
= reiserfs_end_ordered_io
;
623 clear_buffer_dirty(bh
) ;
624 if (!buffer_uptodate(bh
))
626 return submit_bh(WRITE_BARRIER
, bh
) ;
629 static void check_barrier_completion(struct super_block
*s
,
630 struct buffer_head
*bh
) {
631 if (buffer_eopnotsupp(bh
)) {
632 clear_buffer_eopnotsupp(bh
);
634 set_buffer_uptodate(bh
);
635 set_buffer_dirty(bh
);
636 sync_dirty_buffer(bh
);
640 #define CHUNK_SIZE 32
641 struct buffer_chunk
{
642 struct buffer_head
*bh
[CHUNK_SIZE
];
646 static void write_chunk(struct buffer_chunk
*chunk
) {
649 for (i
= 0; i
< chunk
->nr
; i
++) {
650 submit_logged_buffer(chunk
->bh
[i
]) ;
656 static void write_ordered_chunk(struct buffer_chunk
*chunk
) {
659 for (i
= 0; i
< chunk
->nr
; i
++) {
660 submit_ordered_buffer(chunk
->bh
[i
]) ;
666 static int add_to_chunk(struct buffer_chunk
*chunk
, struct buffer_head
*bh
,
668 void (fn
)(struct buffer_chunk
*))
671 if (chunk
->nr
>= CHUNK_SIZE
)
673 chunk
->bh
[chunk
->nr
++] = bh
;
674 if (chunk
->nr
>= CHUNK_SIZE
) {
686 static atomic_t nr_reiserfs_jh
= ATOMIC_INIT(0);
687 static struct reiserfs_jh
*alloc_jh(void) {
688 struct reiserfs_jh
*jh
;
690 jh
= kmalloc(sizeof(*jh
), GFP_NOFS
);
692 atomic_inc(&nr_reiserfs_jh
);
700 * we want to free the jh when the buffer has been written
703 void reiserfs_free_jh(struct buffer_head
*bh
) {
704 struct reiserfs_jh
*jh
;
708 bh
->b_private
= NULL
;
710 list_del_init(&jh
->list
);
712 if (atomic_read(&nr_reiserfs_jh
) <= 0)
714 atomic_dec(&nr_reiserfs_jh
);
719 static inline int __add_jh(struct reiserfs_journal
*j
, struct buffer_head
*bh
,
722 struct reiserfs_jh
*jh
;
725 spin_lock(&j
->j_dirty_buffers_lock
);
726 if (!bh
->b_private
) {
727 spin_unlock(&j
->j_dirty_buffers_lock
);
731 list_del_init(&jh
->list
);
736 spin_lock(&j
->j_dirty_buffers_lock
);
737 /* buffer must be locked for __add_jh, should be able to have
738 * two adds at the same time
745 jh
->jl
= j
->j_current_jl
;
747 list_add_tail(&jh
->list
, &jh
->jl
->j_tail_bh_list
);
749 list_add_tail(&jh
->list
, &jh
->jl
->j_bh_list
);
751 spin_unlock(&j
->j_dirty_buffers_lock
);
755 int reiserfs_add_tail_list(struct inode
*inode
, struct buffer_head
*bh
) {
756 return __add_jh(SB_JOURNAL(inode
->i_sb
), bh
, 1);
758 int reiserfs_add_ordered_list(struct inode
*inode
, struct buffer_head
*bh
) {
759 return __add_jh(SB_JOURNAL(inode
->i_sb
), bh
, 0);
762 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
763 static int write_ordered_buffers(spinlock_t
*lock
,
764 struct reiserfs_journal
*j
,
765 struct reiserfs_journal_list
*jl
,
766 struct list_head
*list
)
768 struct buffer_head
*bh
;
769 struct reiserfs_jh
*jh
;
770 int ret
= j
->j_errno
;
771 struct buffer_chunk chunk
;
772 struct list_head tmp
;
773 INIT_LIST_HEAD(&tmp
);
777 while(!list_empty(list
)) {
778 jh
= JH_ENTRY(list
->next
);
781 if (test_set_buffer_locked(bh
)) {
782 if (!buffer_dirty(bh
)) {
783 list_del_init(&jh
->list
);
784 list_add(&jh
->list
, &tmp
);
789 write_ordered_chunk(&chunk
);
795 if (buffer_dirty(bh
)) {
796 list_del_init(&jh
->list
);
797 list_add(&jh
->list
, &tmp
);
798 add_to_chunk(&chunk
, bh
, lock
, write_ordered_chunk
);
800 reiserfs_free_jh(bh
);
805 cond_resched_lock(lock
);
809 write_ordered_chunk(&chunk
);
812 while(!list_empty(&tmp
)) {
813 jh
= JH_ENTRY(tmp
.prev
);
816 reiserfs_free_jh(bh
);
818 if (buffer_locked(bh
)) {
823 if (!buffer_uptodate(bh
)) {
827 cond_resched_lock(lock
);
833 static int flush_older_commits(struct super_block
*s
, struct reiserfs_journal_list
*jl
) {
834 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
835 struct reiserfs_journal_list
*other_jl
;
836 struct reiserfs_journal_list
*first_jl
;
837 struct list_head
*entry
;
838 unsigned long trans_id
= jl
->j_trans_id
;
839 unsigned long other_trans_id
;
840 unsigned long first_trans_id
;
844 * first we walk backwards to find the oldest uncommitted transation
847 entry
= jl
->j_list
.prev
;
849 other_jl
= JOURNAL_LIST_ENTRY(entry
);
850 if (entry
== &journal
->j_journal_list
||
851 atomic_read(&other_jl
->j_older_commits_done
))
855 entry
= other_jl
->j_list
.prev
;
858 /* if we didn't find any older uncommitted transactions, return now */
859 if (first_jl
== jl
) {
863 first_trans_id
= first_jl
->j_trans_id
;
865 entry
= &first_jl
->j_list
;
867 other_jl
= JOURNAL_LIST_ENTRY(entry
);
868 other_trans_id
= other_jl
->j_trans_id
;
870 if (other_trans_id
< trans_id
) {
871 if (atomic_read(&other_jl
->j_commit_left
) != 0) {
872 flush_commit_list(s
, other_jl
, 0);
874 /* list we were called with is gone, return */
875 if (!journal_list_still_alive(s
, trans_id
))
878 /* the one we just flushed is gone, this means all
879 * older lists are also gone, so first_jl is no longer
880 * valid either. Go back to the beginning.
882 if (!journal_list_still_alive(s
, other_trans_id
)) {
887 if (entry
== &journal
->j_journal_list
)
895 int reiserfs_async_progress_wait(struct super_block
*s
) {
897 struct reiserfs_journal
*j
= SB_JOURNAL(s
);
898 if (atomic_read(&j
->j_async_throttle
))
899 blk_congestion_wait(WRITE
, HZ
/10);
904 ** if this journal list still has commit blocks unflushed, send them to disk.
906 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
907 ** Before the commit block can by written, every other log block must be safely on disk
910 static int flush_commit_list(struct super_block
*s
, struct reiserfs_journal_list
*jl
, int flushall
) {
913 struct buffer_head
*tbh
= NULL
;
914 unsigned long trans_id
= jl
->j_trans_id
;
915 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
919 reiserfs_check_lock_depth(s
, "flush_commit_list") ;
921 if (atomic_read(&jl
->j_older_commits_done
)) {
927 /* before we can put our commit blocks on disk, we have to make sure everyone older than
930 BUG_ON (jl
->j_len
<= 0);
931 BUG_ON (trans_id
== journal
->j_trans_id
);
933 get_journal_list(jl
);
935 if (flush_older_commits(s
, jl
) == 1) {
936 /* list disappeared during flush_older_commits. return */
941 /* make sure nobody is trying to flush this one at the same time */
942 down(&jl
->j_commit_lock
);
943 if (!journal_list_still_alive(s
, trans_id
)) {
944 up(&jl
->j_commit_lock
);
947 BUG_ON (jl
->j_trans_id
== 0);
949 /* this commit is done, exit */
950 if (atomic_read(&(jl
->j_commit_left
)) <= 0) {
952 atomic_set(&(jl
->j_older_commits_done
), 1) ;
954 up(&jl
->j_commit_lock
);
958 if (!list_empty(&jl
->j_bh_list
)) {
960 write_ordered_buffers(&journal
->j_dirty_buffers_lock
,
961 journal
, jl
, &jl
->j_bh_list
);
964 BUG_ON (!list_empty(&jl
->j_bh_list
));
966 * for the description block and all the log blocks, submit any buffers
967 * that haven't already reached the disk
969 atomic_inc(&journal
->j_async_throttle
);
970 for (i
= 0 ; i
< (jl
->j_len
+ 1) ; i
++) {
971 bn
= SB_ONDISK_JOURNAL_1st_BLOCK(s
) + (jl
->j_start
+i
) %
972 SB_ONDISK_JOURNAL_SIZE(s
);
973 tbh
= journal_find_get_block(s
, bn
) ;
974 if (buffer_dirty(tbh
)) /* redundant, ll_rw_block() checks */
975 ll_rw_block(WRITE
, 1, &tbh
) ;
978 atomic_dec(&journal
->j_async_throttle
);
980 /* wait on everything written so far before writing the commit
981 * if we are in barrier mode, send the commit down now
983 barrier
= reiserfs_barrier_flush(s
);
986 lock_buffer(jl
->j_commit_bh
);
987 ret
= submit_barrier_buffer(jl
->j_commit_bh
);
988 if (ret
== -EOPNOTSUPP
) {
989 set_buffer_uptodate(jl
->j_commit_bh
);
994 for (i
= 0 ; i
< (jl
->j_len
+ 1) ; i
++) {
995 bn
= SB_ONDISK_JOURNAL_1st_BLOCK(s
) +
996 (jl
->j_start
+ i
) % SB_ONDISK_JOURNAL_SIZE(s
) ;
997 tbh
= journal_find_get_block(s
, bn
) ;
998 wait_on_buffer(tbh
) ;
999 // since we're using ll_rw_blk above, it might have skipped over
1000 // a locked buffer. Double check here
1002 if (buffer_dirty(tbh
)) /* redundant, sync_dirty_buffer() checks */
1003 sync_dirty_buffer(tbh
);
1004 if (unlikely (!buffer_uptodate(tbh
))) {
1005 #ifdef CONFIG_REISERFS_CHECK
1006 reiserfs_warning(s
, "journal-601, buffer write failed") ;
1010 put_bh(tbh
) ; /* once for journal_find_get_block */
1011 put_bh(tbh
) ; /* once due to original getblk in do_journal_end */
1012 atomic_dec(&(jl
->j_commit_left
)) ;
1015 BUG_ON (atomic_read(&(jl
->j_commit_left
)) != 1);
1018 if (buffer_dirty(jl
->j_commit_bh
))
1020 mark_buffer_dirty(jl
->j_commit_bh
) ;
1021 sync_dirty_buffer(jl
->j_commit_bh
) ;
1023 wait_on_buffer(jl
->j_commit_bh
);
1025 check_barrier_completion(s
, jl
->j_commit_bh
);
1027 /* If there was a write error in the journal - we can't commit this
1028 * transaction - it will be invalid and, if successful, will just end
1029 * up propogating the write error out to the filesystem. */
1030 if (unlikely (!buffer_uptodate(jl
->j_commit_bh
))) {
1031 #ifdef CONFIG_REISERFS_CHECK
1032 reiserfs_warning(s
, "journal-615: buffer write failed") ;
1036 bforget(jl
->j_commit_bh
) ;
1037 if (journal
->j_last_commit_id
!= 0 &&
1038 (jl
->j_trans_id
- journal
->j_last_commit_id
) != 1) {
1039 reiserfs_warning(s
, "clm-2200: last commit %lu, current %lu",
1040 journal
->j_last_commit_id
,
1043 journal
->j_last_commit_id
= jl
->j_trans_id
;
1045 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1046 cleanup_freed_for_journal_list(s
, jl
) ;
1048 retval
= retval
? retval
: journal
->j_errno
;
1050 /* mark the metadata dirty */
1052 dirty_one_transaction(s
, jl
);
1053 atomic_dec(&(jl
->j_commit_left
)) ;
1056 atomic_set(&(jl
->j_older_commits_done
), 1) ;
1058 up(&jl
->j_commit_lock
);
1060 put_journal_list(s
, jl
);
1063 reiserfs_abort (s
, retval
, "Journal write error in %s", __FUNCTION__
);
1069 ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1070 ** returns NULL if it can't find anything
1072 static struct reiserfs_journal_list
*find_newer_jl_for_cn(struct reiserfs_journal_cnode
*cn
) {
1073 struct super_block
*sb
= cn
->sb
;
1074 b_blocknr_t blocknr
= cn
->blocknr
;
1078 if (cn
->sb
== sb
&& cn
->blocknr
== blocknr
&& cn
->jlist
) {
1086 static void remove_journal_hash(struct super_block
*, struct reiserfs_journal_cnode
**,
1087 struct reiserfs_journal_list
*, unsigned long, int);
1090 ** once all the real blocks have been flushed, it is safe to remove them from the
1091 ** journal list for this transaction. Aside from freeing the cnode, this also allows the
1092 ** block to be reallocated for data blocks if it had been deleted.
1094 static void remove_all_from_journal_list(struct super_block
*p_s_sb
, struct reiserfs_journal_list
*jl
, int debug
) {
1095 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
1096 struct reiserfs_journal_cnode
*cn
, *last
;
1097 cn
= jl
->j_realblock
;
1099 /* which is better, to lock once around the whole loop, or
1100 ** to lock for each call to remove_journal_hash?
1103 if (cn
->blocknr
!= 0) {
1105 reiserfs_warning (p_s_sb
, "block %u, bh is %d, state %ld", cn
->blocknr
,
1106 cn
->bh
? 1: 0, cn
->state
) ;
1109 remove_journal_hash(p_s_sb
, journal
->j_list_hash_table
, jl
, cn
->blocknr
, 1) ;
1113 free_cnode(p_s_sb
, last
) ;
1115 jl
->j_realblock
= NULL
;
1119 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1120 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1121 ** releasing blocks in this transaction for reuse as data blocks.
1122 ** called by flush_journal_list, before it calls remove_all_from_journal_list
1125 static int _update_journal_header_block(struct super_block
*p_s_sb
, unsigned long offset
, unsigned long trans_id
) {
1126 struct reiserfs_journal_header
*jh
;
1127 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
1129 if (reiserfs_is_journal_aborted (journal
))
1132 if (trans_id
>= journal
->j_last_flush_trans_id
) {
1133 if (buffer_locked((journal
->j_header_bh
))) {
1134 wait_on_buffer((journal
->j_header_bh
)) ;
1135 if (unlikely (!buffer_uptodate(journal
->j_header_bh
))) {
1136 #ifdef CONFIG_REISERFS_CHECK
1137 reiserfs_warning (p_s_sb
, "journal-699: buffer write failed") ;
1142 journal
->j_last_flush_trans_id
= trans_id
;
1143 journal
->j_first_unflushed_offset
= offset
;
1144 jh
= (struct reiserfs_journal_header
*)(journal
->j_header_bh
->b_data
) ;
1145 jh
->j_last_flush_trans_id
= cpu_to_le32(trans_id
) ;
1146 jh
->j_first_unflushed_offset
= cpu_to_le32(offset
) ;
1147 jh
->j_mount_id
= cpu_to_le32(journal
->j_mount_id
) ;
1149 if (reiserfs_barrier_flush(p_s_sb
)) {
1151 lock_buffer(journal
->j_header_bh
);
1152 ret
= submit_barrier_buffer(journal
->j_header_bh
);
1153 if (ret
== -EOPNOTSUPP
) {
1154 set_buffer_uptodate(journal
->j_header_bh
);
1155 disable_barrier(p_s_sb
);
1158 wait_on_buffer(journal
->j_header_bh
);
1159 check_barrier_completion(p_s_sb
, journal
->j_header_bh
);
1162 set_buffer_dirty(journal
->j_header_bh
) ;
1163 sync_dirty_buffer(journal
->j_header_bh
) ;
1165 if (!buffer_uptodate(journal
->j_header_bh
)) {
1166 reiserfs_warning (p_s_sb
, "journal-837: IO error during journal replay");
1173 static int update_journal_header_block(struct super_block
*p_s_sb
,
1174 unsigned long offset
,
1175 unsigned long trans_id
) {
1176 return _update_journal_header_block(p_s_sb
, offset
, trans_id
);
1179 ** flush any and all journal lists older than you are
1180 ** can only be called from flush_journal_list
1182 static int flush_older_journal_lists(struct super_block
*p_s_sb
,
1183 struct reiserfs_journal_list
*jl
)
1185 struct list_head
*entry
;
1186 struct reiserfs_journal_list
*other_jl
;
1187 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
1188 unsigned long trans_id
= jl
->j_trans_id
;
1190 /* we know we are the only ones flushing things, no extra race
1191 * protection is required.
1194 entry
= journal
->j_journal_list
.next
;
1196 if (entry
== &journal
->j_journal_list
)
1198 other_jl
= JOURNAL_LIST_ENTRY(entry
);
1199 if (other_jl
->j_trans_id
< trans_id
) {
1200 BUG_ON (other_jl
->j_refcount
<= 0);
1201 /* do not flush all */
1202 flush_journal_list(p_s_sb
, other_jl
, 0) ;
1204 /* other_jl is now deleted from the list */
1210 static void del_from_work_list(struct super_block
*s
,
1211 struct reiserfs_journal_list
*jl
) {
1212 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
1213 if (!list_empty(&jl
->j_working_list
)) {
1214 list_del_init(&jl
->j_working_list
);
1215 journal
->j_num_work_lists
--;
1219 /* flush a journal list, both commit and real blocks
1221 ** always set flushall to 1, unless you are calling from inside
1222 ** flush_journal_list
1224 ** IMPORTANT. This can only be called while there are no journal writers,
1225 ** and the journal is locked. That means it can only be called from
1226 ** do_journal_end, or by journal_release
1228 static int flush_journal_list(struct super_block
*s
,
1229 struct reiserfs_journal_list
*jl
, int flushall
) {
1230 struct reiserfs_journal_list
*pjl
;
1231 struct reiserfs_journal_cnode
*cn
, *last
;
1235 struct buffer_head
*saved_bh
;
1236 unsigned long j_len_saved
= jl
->j_len
;
1237 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
1240 BUG_ON (j_len_saved
<= 0);
1242 if (atomic_read(&journal
->j_wcount
) != 0) {
1243 reiserfs_warning(s
, "clm-2048: flush_journal_list called with wcount %d",
1244 atomic_read(&journal
->j_wcount
)) ;
1246 BUG_ON (jl
->j_trans_id
== 0);
1248 /* if flushall == 0, the lock is already held */
1250 down(&journal
->j_flush_sem
);
1251 } else if (!down_trylock(&journal
->j_flush_sem
)) {
1256 if (j_len_saved
> journal
->j_trans_max
) {
1257 reiserfs_panic(s
, "journal-715: flush_journal_list, length is %lu, trans id %lu\n", j_len_saved
, jl
->j_trans_id
);
1263 /* if all the work is already done, get out of here */
1264 if (atomic_read(&(jl
->j_nonzerolen
)) <= 0 &&
1265 atomic_read(&(jl
->j_commit_left
)) <= 0) {
1266 goto flush_older_and_return
;
1269 /* start by putting the commit list on disk. This will also flush
1270 ** the commit lists of any olders transactions
1272 flush_commit_list(s
, jl
, 1) ;
1274 if (!(jl
->j_state
& LIST_DIRTY
) && !reiserfs_is_journal_aborted (journal
))
1277 /* are we done now? */
1278 if (atomic_read(&(jl
->j_nonzerolen
)) <= 0 &&
1279 atomic_read(&(jl
->j_commit_left
)) <= 0) {
1280 goto flush_older_and_return
;
1283 /* loop through each cnode, see if we need to write it,
1284 ** or wait on a more recent transaction, or just ignore it
1286 if (atomic_read(&(journal
->j_wcount
)) != 0) {
1287 reiserfs_panic(s
, "journal-844: panic journal list is flushing, wcount is not 0\n") ;
1289 cn
= jl
->j_realblock
;
1294 /* blocknr of 0 is no longer in the hash, ignore it */
1295 if (cn
->blocknr
== 0) {
1299 /* This transaction failed commit. Don't write out to the disk */
1300 if (!(jl
->j_state
& LIST_DIRTY
))
1303 pjl
= find_newer_jl_for_cn(cn
) ;
1304 /* the order is important here. We check pjl to make sure we
1305 ** don't clear BH_JDirty_wait if we aren't the one writing this
1308 if (!pjl
&& cn
->bh
) {
1311 /* we do this to make sure nobody releases the buffer while
1312 ** we are working with it
1316 if (buffer_journal_dirty(saved_bh
)) {
1317 BUG_ON (!can_dirty (cn
));
1320 } else if (can_dirty(cn
)) {
1321 /* everything with !pjl && jwait should be writable */
1326 /* if someone has this block in a newer transaction, just make
1327 ** sure they are commited, and don't try writing it to disk
1330 if (atomic_read(&pjl
->j_commit_left
))
1331 flush_commit_list(s
, pjl
, 1) ;
1335 /* bh == NULL when the block got to disk on its own, OR,
1336 ** the block got freed in a future transaction
1338 if (saved_bh
== NULL
) {
1342 /* this should never happen. kupdate_one_transaction has this list
1343 ** locked while it works, so we should never see a buffer here that
1344 ** is not marked JDirty_wait
1346 if ((!was_jwait
) && !buffer_locked(saved_bh
)) {
1347 reiserfs_warning (s
, "journal-813: BAD! buffer %llu %cdirty %cjwait, "
1348 "not in a newer tranasction",
1349 (unsigned long long)saved_bh
->b_blocknr
,
1350 was_dirty
? ' ' : '!', was_jwait
? ' ' : '!') ;
1353 /* we inc again because saved_bh gets decremented at free_cnode */
1355 set_bit(BLOCK_NEEDS_FLUSH
, &cn
->state
) ;
1356 lock_buffer(saved_bh
);
1357 BUG_ON (cn
->blocknr
!= saved_bh
->b_blocknr
);
1358 if (buffer_dirty(saved_bh
))
1359 submit_logged_buffer(saved_bh
) ;
1361 unlock_buffer(saved_bh
);
1364 reiserfs_warning (s
, "clm-2082: Unable to flush buffer %llu in %s",
1365 (unsigned long long)saved_bh
->b_blocknr
, __FUNCTION__
);
1371 /* we incremented this to keep others from taking the buffer head away */
1373 if (atomic_read(&(saved_bh
->b_count
)) < 0) {
1374 reiserfs_warning (s
, "journal-945: saved_bh->b_count < 0");
1379 cn
= jl
->j_realblock
;
1381 if (test_bit(BLOCK_NEEDS_FLUSH
, &cn
->state
)) {
1383 reiserfs_panic(s
, "journal-1011: cn->bh is NULL\n") ;
1385 wait_on_buffer(cn
->bh
) ;
1387 reiserfs_panic(s
, "journal-1012: cn->bh is NULL\n") ;
1389 if (unlikely (!buffer_uptodate(cn
->bh
))) {
1390 #ifdef CONFIG_REISERFS_CHECK
1391 reiserfs_warning(s
, "journal-949: buffer write failed\n") ;
1395 /* note, we must clear the JDirty_wait bit after the up to date
1396 ** check, otherwise we race against our flushpage routine
1398 BUG_ON (!test_clear_buffer_journal_dirty (cn
->bh
));
1400 /* undo the inc from journal_mark_dirty */
1409 reiserfs_abort (s
, -EIO
, "Write error while pushing transaction to disk in %s", __FUNCTION__
);
1410 flush_older_and_return
:
1413 /* before we can update the journal header block, we _must_ flush all
1414 ** real blocks from all older transactions to disk. This is because
1415 ** once the header block is updated, this transaction will not be
1416 ** replayed after a crash
1419 flush_older_journal_lists(s
, jl
);
1422 err
= journal
->j_errno
;
1423 /* before we can remove everything from the hash tables for this
1424 ** transaction, we must make sure it can never be replayed
1426 ** since we are only called from do_journal_end, we know for sure there
1427 ** are no allocations going on while we are flushing journal lists. So,
1428 ** we only need to update the journal header block for the last list
1431 if (!err
&& flushall
) {
1432 err
= update_journal_header_block(s
, (jl
->j_start
+ jl
->j_len
+ 2) % SB_ONDISK_JOURNAL_SIZE(s
), jl
->j_trans_id
) ;
1434 reiserfs_abort (s
, -EIO
, "Write error while updating journal header in %s", __FUNCTION__
);
1436 remove_all_from_journal_list(s
, jl
, 0) ;
1437 list_del_init(&jl
->j_list
);
1438 journal
->j_num_lists
--;
1439 del_from_work_list(s
, jl
);
1441 if (journal
->j_last_flush_id
!= 0 &&
1442 (jl
->j_trans_id
- journal
->j_last_flush_id
) != 1) {
1443 reiserfs_warning(s
, "clm-2201: last flush %lu, current %lu",
1444 journal
->j_last_flush_id
,
1447 journal
->j_last_flush_id
= jl
->j_trans_id
;
1449 /* not strictly required since we are freeing the list, but it should
1450 * help find code using dead lists later on
1453 atomic_set(&(jl
->j_nonzerolen
), 0) ;
1455 jl
->j_realblock
= NULL
;
1456 jl
->j_commit_bh
= NULL
;
1457 jl
->j_trans_id
= 0 ;
1459 put_journal_list(s
, jl
);
1461 up(&journal
->j_flush_sem
);
1466 static int write_one_transaction(struct super_block
*s
,
1467 struct reiserfs_journal_list
*jl
,
1468 struct buffer_chunk
*chunk
)
1470 struct reiserfs_journal_cnode
*cn
;
1473 jl
->j_state
|= LIST_TOUCHED
;
1474 del_from_work_list(s
, jl
);
1475 if (jl
->j_len
== 0 || atomic_read(&jl
->j_nonzerolen
) == 0) {
1479 cn
= jl
->j_realblock
;
1481 /* if the blocknr == 0, this has been cleared from the hash,
1484 if (cn
->blocknr
== 0) {
1487 if (cn
->bh
&& can_dirty(cn
) && buffer_dirty(cn
->bh
)) {
1488 struct buffer_head
*tmp_bh
;
1489 /* we can race against journal_mark_freed when we try
1490 * to lock_buffer(cn->bh), so we have to inc the buffer
1491 * count, and recheck things after locking
1495 lock_buffer(tmp_bh
);
1496 if (cn
->bh
&& can_dirty(cn
) && buffer_dirty(tmp_bh
)) {
1497 if (!buffer_journal_dirty(tmp_bh
) ||
1498 buffer_journal_prepared(tmp_bh
))
1500 add_to_chunk(chunk
, tmp_bh
, NULL
, write_chunk
);
1503 /* note, cn->bh might be null now */
1504 unlock_buffer(tmp_bh
);
1515 /* used by flush_commit_list */
1516 static int dirty_one_transaction(struct super_block
*s
,
1517 struct reiserfs_journal_list
*jl
)
1519 struct reiserfs_journal_cnode
*cn
;
1520 struct reiserfs_journal_list
*pjl
;
1523 jl
->j_state
|= LIST_DIRTY
;
1524 cn
= jl
->j_realblock
;
1526 /* look for a more recent transaction that logged this
1527 ** buffer. Only the most recent transaction with a buffer in
1528 ** it is allowed to send that buffer to disk
1530 pjl
= find_newer_jl_for_cn(cn
) ;
1531 if (!pjl
&& cn
->blocknr
&& cn
->bh
&& buffer_journal_dirty(cn
->bh
))
1533 BUG_ON (!can_dirty(cn
));
1534 /* if the buffer is prepared, it will either be logged
1535 * or restored. If restored, we need to make sure
1536 * it actually gets marked dirty
1538 clear_buffer_journal_new (cn
->bh
);
1539 if (buffer_journal_prepared (cn
->bh
)) {
1540 set_buffer_journal_restore_dirty (cn
->bh
);
1542 set_buffer_journal_test (cn
->bh
);
1543 mark_buffer_dirty(cn
->bh
);
1551 static int kupdate_transactions(struct super_block
*s
,
1552 struct reiserfs_journal_list
*jl
,
1553 struct reiserfs_journal_list
**next_jl
,
1554 unsigned long *next_trans_id
,
1559 int transactions_flushed
= 0;
1560 unsigned long orig_trans_id
= jl
->j_trans_id
;
1561 struct buffer_chunk chunk
;
1562 struct list_head
*entry
;
1563 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
1566 down(&journal
->j_flush_sem
);
1567 if (!journal_list_still_alive(s
, orig_trans_id
)) {
1571 /* we've got j_flush_sem held, nobody is going to delete any
1572 * of these lists out from underneath us
1574 while((num_trans
&& transactions_flushed
< num_trans
) ||
1575 (!num_trans
&& written
< num_blocks
)) {
1577 if (jl
->j_len
== 0 || (jl
->j_state
& LIST_TOUCHED
) ||
1578 atomic_read(&jl
->j_commit_left
) || !(jl
->j_state
& LIST_DIRTY
))
1580 del_from_work_list(s
, jl
);
1583 ret
= write_one_transaction(s
, jl
, &chunk
);
1587 transactions_flushed
++;
1589 entry
= jl
->j_list
.next
;
1592 if (entry
== &journal
->j_journal_list
) {
1595 jl
= JOURNAL_LIST_ENTRY(entry
);
1597 /* don't bother with older transactions */
1598 if (jl
->j_trans_id
<= orig_trans_id
)
1602 write_chunk(&chunk
);
1606 up(&journal
->j_flush_sem
);
1610 /* for o_sync and fsync heavy applications, they tend to use
1611 ** all the journa list slots with tiny transactions. These
1612 ** trigger lots and lots of calls to update the header block, which
1613 ** adds seeks and slows things down.
1615 ** This function tries to clear out a large chunk of the journal lists
1616 ** at once, which makes everything faster since only the newest journal
1617 ** list updates the header block
1619 static int flush_used_journal_lists(struct super_block
*s
,
1620 struct reiserfs_journal_list
*jl
) {
1621 unsigned long len
= 0;
1622 unsigned long cur_len
;
1626 struct reiserfs_journal_list
*tjl
;
1627 struct reiserfs_journal_list
*flush_jl
;
1628 unsigned long trans_id
;
1629 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
1631 flush_jl
= tjl
= jl
;
1633 /* in data logging mode, try harder to flush a lot of blocks */
1634 if (reiserfs_data_log(s
))
1636 /* flush for 256 transactions or limit blocks, whichever comes first */
1637 for(i
= 0 ; i
< 256 && len
< limit
; i
++) {
1638 if (atomic_read(&tjl
->j_commit_left
) ||
1639 tjl
->j_trans_id
< jl
->j_trans_id
) {
1642 cur_len
= atomic_read(&tjl
->j_nonzerolen
);
1644 tjl
->j_state
&= ~LIST_TOUCHED
;
1648 if (tjl
->j_list
.next
== &journal
->j_journal_list
)
1650 tjl
= JOURNAL_LIST_ENTRY(tjl
->j_list
.next
);
1652 /* try to find a group of blocks we can flush across all the
1653 ** transactions, but only bother if we've actually spanned
1654 ** across multiple lists
1656 if (flush_jl
!= jl
) {
1657 ret
= kupdate_transactions(s
, jl
, &tjl
, &trans_id
, len
, i
);
1659 flush_journal_list(s
, flush_jl
, 1);
1664 ** removes any nodes in table with name block and dev as bh.
1665 ** only touchs the hnext and hprev pointers.
1667 void remove_journal_hash(struct super_block
*sb
,
1668 struct reiserfs_journal_cnode
**table
,
1669 struct reiserfs_journal_list
*jl
,
1670 unsigned long block
, int remove_freed
)
1672 struct reiserfs_journal_cnode
*cur
;
1673 struct reiserfs_journal_cnode
**head
;
1675 head
= &(journal_hash(table
, sb
, block
)) ;
1681 if (cur
->blocknr
== block
&& cur
->sb
== sb
&& (jl
== NULL
|| jl
== cur
->jlist
) &&
1682 (!test_bit(BLOCK_FREED
, &cur
->state
) || remove_freed
)) {
1684 cur
->hnext
->hprev
= cur
->hprev
;
1687 cur
->hprev
->hnext
= cur
->hnext
;
1689 *head
= cur
->hnext
;
1694 if (cur
->bh
&& cur
->jlist
) /* anybody who clears the cur->bh will also dec the nonzerolen */
1695 atomic_dec(&(cur
->jlist
->j_nonzerolen
)) ;
1703 static void free_journal_ram(struct super_block
*p_s_sb
) {
1704 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
1705 reiserfs_kfree(journal
->j_current_jl
,
1706 sizeof(struct reiserfs_journal_list
), p_s_sb
);
1707 journal
->j_num_lists
--;
1709 vfree(journal
->j_cnode_free_orig
) ;
1710 free_list_bitmaps(p_s_sb
, journal
->j_list_bitmap
) ;
1711 free_bitmap_nodes(p_s_sb
) ; /* must be after free_list_bitmaps */
1712 if (journal
->j_header_bh
) {
1713 brelse(journal
->j_header_bh
) ;
1715 /* j_header_bh is on the journal dev, make sure not to release the journal
1716 * dev until we brelse j_header_bh
1718 release_journal_dev(p_s_sb
, journal
);
1723 ** call on unmount. Only set error to 1 if you haven't made your way out
1724 ** of read_super() yet. Any other caller must keep error at 0.
1726 static int do_journal_release(struct reiserfs_transaction_handle
*th
, struct super_block
*p_s_sb
, int error
) {
1727 struct reiserfs_transaction_handle myth
;
1729 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
1731 /* we only want to flush out transactions if we were called with error == 0
1733 if (!error
&& !(p_s_sb
->s_flags
& MS_RDONLY
)) {
1734 /* end the current trans */
1735 BUG_ON (!th
->t_trans_id
);
1736 do_journal_end(th
, p_s_sb
,10, FLUSH_ALL
) ;
1738 /* make sure something gets logged to force our way into the flush code */
1739 if (!journal_join(&myth
, p_s_sb
, 1)) {
1740 reiserfs_prepare_for_journal(p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
), 1) ;
1741 journal_mark_dirty(&myth
, p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
)) ;
1742 do_journal_end(&myth
, p_s_sb
,1, FLUSH_ALL
) ;
1747 /* this also catches errors during the do_journal_end above */
1748 if (!error
&& reiserfs_is_journal_aborted(journal
)) {
1749 memset(&myth
, 0, sizeof(myth
));
1750 if (!journal_join_abort(&myth
, p_s_sb
, 1)) {
1751 reiserfs_prepare_for_journal(p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
), 1) ;
1752 journal_mark_dirty(&myth
, p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
)) ;
1753 do_journal_end(&myth
, p_s_sb
, 1, FLUSH_ALL
) ;
1757 reiserfs_mounted_fs_count
-- ;
1758 /* wait for all commits to finish */
1759 cancel_delayed_work(&SB_JOURNAL(p_s_sb
)->j_work
);
1760 flush_workqueue(commit_wq
);
1761 if (!reiserfs_mounted_fs_count
) {
1762 destroy_workqueue(commit_wq
);
1766 free_journal_ram(p_s_sb
) ;
1772 ** call on unmount. flush all journal trans, release all alloc'd ram
1774 int journal_release(struct reiserfs_transaction_handle
*th
, struct super_block
*p_s_sb
) {
1775 return do_journal_release(th
, p_s_sb
, 0) ;
1778 ** only call from an error condition inside reiserfs_read_super!
1780 int journal_release_error(struct reiserfs_transaction_handle
*th
, struct super_block
*p_s_sb
) {
1781 return do_journal_release(th
, p_s_sb
, 1) ;
1784 /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
1785 static int journal_compare_desc_commit(struct super_block
*p_s_sb
, struct reiserfs_journal_desc
*desc
,
1786 struct reiserfs_journal_commit
*commit
) {
1787 if (get_commit_trans_id (commit
) != get_desc_trans_id (desc
) ||
1788 get_commit_trans_len (commit
) != get_desc_trans_len (desc
) ||
1789 get_commit_trans_len (commit
) > SB_JOURNAL(p_s_sb
)->j_trans_max
||
1790 get_commit_trans_len (commit
) <= 0
1796 /* returns 0 if it did not find a description block
1797 ** returns -1 if it found a corrupt commit block
1798 ** returns 1 if both desc and commit were valid
1800 static int journal_transaction_is_valid(struct super_block
*p_s_sb
, struct buffer_head
*d_bh
, unsigned long *oldest_invalid_trans_id
, unsigned long *newest_mount_id
) {
1801 struct reiserfs_journal_desc
*desc
;
1802 struct reiserfs_journal_commit
*commit
;
1803 struct buffer_head
*c_bh
;
1804 unsigned long offset
;
1809 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
1810 if (get_desc_trans_len(desc
) > 0 && !memcmp(get_journal_desc_magic (d_bh
), JOURNAL_DESC_MAGIC
, 8)) {
1811 if (oldest_invalid_trans_id
&& *oldest_invalid_trans_id
&& get_desc_trans_id(desc
) > *oldest_invalid_trans_id
) {
1812 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-986: transaction "
1813 "is valid returning because trans_id %d is greater than "
1814 "oldest_invalid %lu", get_desc_trans_id(desc
),
1815 *oldest_invalid_trans_id
);
1818 if (newest_mount_id
&& *newest_mount_id
> get_desc_mount_id (desc
)) {
1819 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1087: transaction "
1820 "is valid returning because mount_id %d is less than "
1821 "newest_mount_id %lu", get_desc_mount_id (desc
),
1825 if ( get_desc_trans_len(desc
) > SB_JOURNAL(p_s_sb
)->j_trans_max
) {
1826 reiserfs_warning(p_s_sb
, "journal-2018: Bad transaction length %d encountered, ignoring transaction", get_desc_trans_len(desc
));
1829 offset
= d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) ;
1831 /* ok, we have a journal description block, lets see if the transaction was valid */
1832 c_bh
= journal_bread(p_s_sb
, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
1833 ((offset
+ get_desc_trans_len(desc
) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
))) ;
1836 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
1837 if (journal_compare_desc_commit(p_s_sb
, desc
, commit
)) {
1838 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
,
1839 "journal_transaction_is_valid, commit offset %ld had bad "
1840 "time %d or length %d",
1841 c_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
1842 get_commit_trans_id (commit
),
1843 get_commit_trans_len(commit
));
1845 if (oldest_invalid_trans_id
) {
1846 *oldest_invalid_trans_id
= get_desc_trans_id(desc
) ;
1847 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1004: "
1848 "transaction_is_valid setting oldest invalid trans_id "
1849 "to %d", get_desc_trans_id(desc
)) ;
1854 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1006: found valid "
1855 "transaction start offset %llu, len %d id %d",
1856 d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
1857 get_desc_trans_len(desc
), get_desc_trans_id(desc
)) ;
1864 static void brelse_array(struct buffer_head
**heads
, int num
) {
1866 for (i
= 0 ; i
< num
; i
++) {
1872 ** given the start, and values for the oldest acceptable transactions,
1873 ** this either reads in a replays a transaction, or returns because the transaction
1874 ** is invalid, or too old.
1876 static int journal_read_transaction(struct super_block
*p_s_sb
, unsigned long cur_dblock
, unsigned long oldest_start
,
1877 unsigned long oldest_trans_id
, unsigned long newest_mount_id
) {
1878 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
1879 struct reiserfs_journal_desc
*desc
;
1880 struct reiserfs_journal_commit
*commit
;
1881 unsigned long trans_id
= 0 ;
1882 struct buffer_head
*c_bh
;
1883 struct buffer_head
*d_bh
;
1884 struct buffer_head
**log_blocks
= NULL
;
1885 struct buffer_head
**real_blocks
= NULL
;
1886 unsigned long trans_offset
;
1890 d_bh
= journal_bread(p_s_sb
, cur_dblock
) ;
1893 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
1894 trans_offset
= d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) ;
1895 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1037: "
1896 "journal_read_transaction, offset %llu, len %d mount_id %d",
1897 d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
1898 get_desc_trans_len(desc
), get_desc_mount_id(desc
)) ;
1899 if (get_desc_trans_id(desc
) < oldest_trans_id
) {
1900 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1039: "
1901 "journal_read_trans skipping because %lu is too old",
1902 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
)) ;
1906 if (get_desc_mount_id(desc
) != newest_mount_id
) {
1907 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1146: "
1908 "journal_read_trans skipping because %d is != "
1909 "newest_mount_id %lu", get_desc_mount_id(desc
),
1914 c_bh
= journal_bread(p_s_sb
, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
1915 ((trans_offset
+ get_desc_trans_len(desc
) + 1) %
1916 SB_ONDISK_JOURNAL_SIZE(p_s_sb
))) ;
1921 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
1922 if (journal_compare_desc_commit(p_s_sb
, desc
, commit
)) {
1923 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal_read_transaction, "
1924 "commit offset %llu had bad time %d or length %d",
1925 c_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
1926 get_commit_trans_id(commit
), get_commit_trans_len(commit
));
1931 trans_id
= get_desc_trans_id(desc
) ;
1932 /* now we know we've got a good transaction, and it was inside the valid time ranges */
1933 log_blocks
= reiserfs_kmalloc(get_desc_trans_len(desc
) * sizeof(struct buffer_head
*), GFP_NOFS
, p_s_sb
) ;
1934 real_blocks
= reiserfs_kmalloc(get_desc_trans_len(desc
) * sizeof(struct buffer_head
*), GFP_NOFS
, p_s_sb
) ;
1935 if (!log_blocks
|| !real_blocks
) {
1938 reiserfs_kfree(log_blocks
, get_desc_trans_len(desc
) * sizeof(struct buffer_head
*), p_s_sb
) ;
1939 reiserfs_kfree(real_blocks
, get_desc_trans_len(desc
) * sizeof(struct buffer_head
*), p_s_sb
) ;
1940 reiserfs_warning(p_s_sb
, "journal-1169: kmalloc failed, unable to mount FS") ;
1943 /* get all the buffer heads */
1944 trans_half
= journal_trans_half (p_s_sb
->s_blocksize
) ;
1945 for(i
= 0 ; i
< get_desc_trans_len(desc
) ; i
++) {
1946 log_blocks
[i
] = journal_getblk(p_s_sb
, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) + (trans_offset
+ 1 + i
) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
));
1947 if (i
< trans_half
) {
1948 real_blocks
[i
] = sb_getblk(p_s_sb
, le32_to_cpu(desc
->j_realblock
[i
])) ;
1950 real_blocks
[i
] = sb_getblk(p_s_sb
, le32_to_cpu(commit
->j_realblock
[i
- trans_half
])) ;
1952 if ( real_blocks
[i
]->b_blocknr
> SB_BLOCK_COUNT(p_s_sb
) ) {
1953 reiserfs_warning(p_s_sb
, "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
1956 /* make sure we don't try to replay onto log or reserved area */
1957 if (is_block_in_log_or_reserved_area(p_s_sb
, real_blocks
[i
]->b_blocknr
)) {
1958 reiserfs_warning(p_s_sb
, "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block") ;
1960 brelse_array(log_blocks
, i
) ;
1961 brelse_array(real_blocks
, i
) ;
1964 reiserfs_kfree(log_blocks
, get_desc_trans_len(desc
) * sizeof(struct buffer_head
*), p_s_sb
) ;
1965 reiserfs_kfree(real_blocks
, get_desc_trans_len(desc
) * sizeof(struct buffer_head
*), p_s_sb
) ;
1969 /* read in the log blocks, memcpy to the corresponding real block */
1970 ll_rw_block(READ
, get_desc_trans_len(desc
), log_blocks
) ;
1971 for (i
= 0 ; i
< get_desc_trans_len(desc
) ; i
++) {
1972 wait_on_buffer(log_blocks
[i
]) ;
1973 if (!buffer_uptodate(log_blocks
[i
])) {
1974 reiserfs_warning(p_s_sb
, "journal-1212: REPLAY FAILURE fsck required! buffer write failed") ;
1975 brelse_array(log_blocks
+ i
, get_desc_trans_len(desc
) - i
) ;
1976 brelse_array(real_blocks
, get_desc_trans_len(desc
)) ;
1979 reiserfs_kfree(log_blocks
, get_desc_trans_len(desc
) * sizeof(struct buffer_head
*), p_s_sb
) ;
1980 reiserfs_kfree(real_blocks
, get_desc_trans_len(desc
) * sizeof(struct buffer_head
*), p_s_sb
) ;
1983 memcpy(real_blocks
[i
]->b_data
, log_blocks
[i
]->b_data
, real_blocks
[i
]->b_size
) ;
1984 set_buffer_uptodate(real_blocks
[i
]) ;
1985 brelse(log_blocks
[i
]) ;
1987 /* flush out the real blocks */
1988 for (i
= 0 ; i
< get_desc_trans_len(desc
) ; i
++) {
1989 set_buffer_dirty(real_blocks
[i
]) ;
1990 ll_rw_block(WRITE
, 1, real_blocks
+ i
) ;
1992 for (i
= 0 ; i
< get_desc_trans_len(desc
) ; i
++) {
1993 wait_on_buffer(real_blocks
[i
]) ;
1994 if (!buffer_uptodate(real_blocks
[i
])) {
1995 reiserfs_warning(p_s_sb
, "journal-1226: REPLAY FAILURE, fsck required! buffer write failed") ;
1996 brelse_array(real_blocks
+ i
, get_desc_trans_len(desc
) - i
) ;
1999 reiserfs_kfree(log_blocks
, get_desc_trans_len(desc
) * sizeof(struct buffer_head
*), p_s_sb
) ;
2000 reiserfs_kfree(real_blocks
, get_desc_trans_len(desc
) * sizeof(struct buffer_head
*), p_s_sb
) ;
2003 brelse(real_blocks
[i
]) ;
2005 cur_dblock
= SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) + ((trans_offset
+ get_desc_trans_len(desc
) + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
)) ;
2006 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1095: setting journal "
2007 "start to offset %ld",
2008 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
)) ;
2010 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2011 journal
->j_start
= cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) ;
2012 journal
->j_last_flush_trans_id
= trans_id
;
2013 journal
->j_trans_id
= trans_id
+ 1;
2016 reiserfs_kfree(log_blocks
, le32_to_cpu(desc
->j_len
) * sizeof(struct buffer_head
*), p_s_sb
) ;
2017 reiserfs_kfree(real_blocks
, le32_to_cpu(desc
->j_len
) * sizeof(struct buffer_head
*), p_s_sb
) ;
2021 /* This function reads blocks starting from block and to max_block of bufsize
2022 size (but no more than BUFNR blocks at a time). This proved to improve
2023 mounting speed on self-rebuilding raid5 arrays at least.
2024 Right now it is only used from journal code. But later we might use it
2026 Note: Do not use journal_getblk/sb_getblk functions here! */
2027 static struct buffer_head
* reiserfs_breada (struct block_device
*dev
, int block
, int bufsize
,
2028 unsigned int max_block
)
2030 struct buffer_head
* bhlist
[BUFNR
];
2031 unsigned int blocks
= BUFNR
;
2032 struct buffer_head
* bh
;
2035 bh
= __getblk (dev
, block
, bufsize
);
2036 if (buffer_uptodate (bh
))
2039 if (block
+ BUFNR
> max_block
) {
2040 blocks
= max_block
- block
;
2044 for (i
= 1; i
< blocks
; i
++) {
2045 bh
= __getblk (dev
, block
+ i
, bufsize
);
2046 if (buffer_uptodate (bh
)) {
2050 else bhlist
[j
++] = bh
;
2052 ll_rw_block (READ
, j
, bhlist
);
2053 for(i
= 1; i
< j
; i
++)
2056 wait_on_buffer (bh
);
2057 if (buffer_uptodate (bh
))
2064 ** read and replay the log
2065 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2066 ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
2068 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2070 ** On exit, it sets things up so the first transaction will work correctly.
2072 static int journal_read(struct super_block
*p_s_sb
) {
2073 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
2074 struct reiserfs_journal_desc
*desc
;
2075 unsigned long oldest_trans_id
= 0;
2076 unsigned long oldest_invalid_trans_id
= 0 ;
2078 unsigned long oldest_start
= 0;
2079 unsigned long cur_dblock
= 0 ;
2080 unsigned long newest_mount_id
= 9 ;
2081 struct buffer_head
*d_bh
;
2082 struct reiserfs_journal_header
*jh
;
2083 int valid_journal_header
= 0 ;
2084 int replay_count
= 0 ;
2085 int continue_replay
= 1 ;
2087 char b
[BDEVNAME_SIZE
];
2089 cur_dblock
= SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) ;
2090 reiserfs_info (p_s_sb
, "checking transaction log (%s)\n",
2091 bdevname(journal
->j_dev_bd
, b
));
2092 start
= get_seconds();
2094 /* step 1, read in the journal header block. Check the transaction it says
2095 ** is the first unflushed, and if that transaction is not valid,
2098 journal
->j_header_bh
= journal_bread(p_s_sb
,
2099 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2100 SB_ONDISK_JOURNAL_SIZE(p_s_sb
));
2101 if (!journal
->j_header_bh
) {
2104 jh
= (struct reiserfs_journal_header
*)(journal
->j_header_bh
->b_data
) ;
2105 if (le32_to_cpu(jh
->j_first_unflushed_offset
) >= 0 &&
2106 le32_to_cpu(jh
->j_first_unflushed_offset
) < SB_ONDISK_JOURNAL_SIZE(p_s_sb
) &&
2107 le32_to_cpu(jh
->j_last_flush_trans_id
) > 0) {
2108 oldest_start
= SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
2109 le32_to_cpu(jh
->j_first_unflushed_offset
) ;
2110 oldest_trans_id
= le32_to_cpu(jh
->j_last_flush_trans_id
) + 1;
2111 newest_mount_id
= le32_to_cpu(jh
->j_mount_id
);
2112 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1153: found in "
2113 "header: first_unflushed_offset %d, last_flushed_trans_id "
2114 "%lu", le32_to_cpu(jh
->j_first_unflushed_offset
),
2115 le32_to_cpu(jh
->j_last_flush_trans_id
)) ;
2116 valid_journal_header
= 1 ;
2118 /* now, we try to read the first unflushed offset. If it is not valid,
2119 ** there is nothing more we can do, and it makes no sense to read
2120 ** through the whole log.
2122 d_bh
= journal_bread(p_s_sb
, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) + le32_to_cpu(jh
->j_first_unflushed_offset
)) ;
2123 ret
= journal_transaction_is_valid(p_s_sb
, d_bh
, NULL
, NULL
) ;
2125 continue_replay
= 0 ;
2128 goto start_log_replay
;
2131 if (continue_replay
&& bdev_read_only(p_s_sb
->s_bdev
)) {
2132 reiserfs_warning (p_s_sb
,
2133 "clm-2076: device is readonly, unable to replay log") ;
2137 /* ok, there are transactions that need to be replayed. start with the first log block, find
2138 ** all the valid transactions, and pick out the oldest.
2140 while(continue_replay
&& cur_dblock
< (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) + SB_ONDISK_JOURNAL_SIZE(p_s_sb
))) {
2141 /* Note that it is required for blocksize of primary fs device and journal
2142 device to be the same */
2143 d_bh
= reiserfs_breada(journal
->j_dev_bd
, cur_dblock
, p_s_sb
->s_blocksize
,
2144 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) + SB_ONDISK_JOURNAL_SIZE(p_s_sb
)) ;
2145 ret
= journal_transaction_is_valid(p_s_sb
, d_bh
, &oldest_invalid_trans_id
, &newest_mount_id
) ;
2147 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2148 if (oldest_start
== 0) { /* init all oldest_ values */
2149 oldest_trans_id
= get_desc_trans_id(desc
) ;
2150 oldest_start
= d_bh
->b_blocknr
;
2151 newest_mount_id
= get_desc_mount_id(desc
) ;
2152 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1179: Setting "
2153 "oldest_start to offset %llu, trans_id %lu",
2154 oldest_start
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
2156 } else if (oldest_trans_id
> get_desc_trans_id(desc
)) {
2157 /* one we just read was older */
2158 oldest_trans_id
= get_desc_trans_id(desc
) ;
2159 oldest_start
= d_bh
->b_blocknr
;
2160 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1180: Resetting "
2161 "oldest_start to offset %lu, trans_id %lu",
2162 oldest_start
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
2165 if (newest_mount_id
< get_desc_mount_id(desc
)) {
2166 newest_mount_id
= get_desc_mount_id(desc
) ;
2167 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1299: Setting "
2168 "newest_mount_id to %d", get_desc_mount_id(desc
));
2170 cur_dblock
+= get_desc_trans_len(desc
) + 2 ;
2178 cur_dblock
= oldest_start
;
2179 if (oldest_trans_id
) {
2180 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1206: Starting replay "
2181 "from offset %llu, trans_id %lu",
2182 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
2187 while(continue_replay
&& oldest_trans_id
> 0) {
2188 ret
= journal_read_transaction(p_s_sb
, cur_dblock
, oldest_start
, oldest_trans_id
, newest_mount_id
) ;
2191 } else if (ret
!= 0) {
2194 cur_dblock
= SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) + journal
->j_start
;
2196 if (cur_dblock
== oldest_start
)
2200 if (oldest_trans_id
== 0) {
2201 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1225: No valid "
2202 "transactions found") ;
2204 /* j_start does not get set correctly if we don't replay any transactions.
2205 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2206 ** copy the trans_id from the header
2208 if (valid_journal_header
&& replay_count
== 0) {
2209 journal
->j_start
= le32_to_cpu(jh
->j_first_unflushed_offset
) ;
2210 journal
->j_trans_id
= le32_to_cpu(jh
->j_last_flush_trans_id
) + 1;
2211 journal
->j_last_flush_trans_id
= le32_to_cpu(jh
->j_last_flush_trans_id
) ;
2212 journal
->j_mount_id
= le32_to_cpu(jh
->j_mount_id
) + 1;
2214 journal
->j_mount_id
= newest_mount_id
+ 1 ;
2216 reiserfs_debug(p_s_sb
, REISERFS_DEBUG_CODE
, "journal-1299: Setting "
2217 "newest_mount_id to %lu", journal
->j_mount_id
) ;
2218 journal
->j_first_unflushed_offset
= journal
->j_start
;
2219 if (replay_count
> 0) {
2220 reiserfs_info (p_s_sb
, "replayed %d transactions in %lu seconds\n",
2221 replay_count
, get_seconds() - start
) ;
2223 if (!bdev_read_only(p_s_sb
->s_bdev
) &&
2224 _update_journal_header_block(p_s_sb
, journal
->j_start
,
2225 journal
->j_last_flush_trans_id
))
2227 /* replay failed, caller must call free_journal_ram and abort
2235 static struct reiserfs_journal_list
*alloc_journal_list(struct super_block
*s
)
2237 struct reiserfs_journal_list
*jl
;
2239 jl
= reiserfs_kmalloc(sizeof(struct reiserfs_journal_list
), GFP_NOFS
, s
);
2244 memset(jl
, 0, sizeof(*jl
));
2245 INIT_LIST_HEAD(&jl
->j_list
);
2246 INIT_LIST_HEAD(&jl
->j_working_list
);
2247 INIT_LIST_HEAD(&jl
->j_tail_bh_list
);
2248 INIT_LIST_HEAD(&jl
->j_bh_list
);
2249 sema_init(&jl
->j_commit_lock
, 1);
2250 SB_JOURNAL(s
)->j_num_lists
++;
2251 get_journal_list(jl
);
2255 static void journal_list_init(struct super_block
*p_s_sb
) {
2256 SB_JOURNAL(p_s_sb
)->j_current_jl
= alloc_journal_list(p_s_sb
);
2259 static int release_journal_dev( struct super_block
*super
,
2260 struct reiserfs_journal
*journal
)
2266 if( journal
-> j_dev_file
!= NULL
) {
2267 result
= filp_close( journal
-> j_dev_file
, NULL
);
2268 journal
-> j_dev_file
= NULL
;
2269 journal
-> j_dev_bd
= NULL
;
2270 } else if( journal
-> j_dev_bd
!= NULL
) {
2271 result
= blkdev_put( journal
-> j_dev_bd
);
2272 journal
-> j_dev_bd
= NULL
;
2276 reiserfs_warning(super
, "sh-457: release_journal_dev: Cannot release journal device: %i", result
);
2281 static int journal_init_dev( struct super_block
*super
,
2282 struct reiserfs_journal
*journal
,
2283 const char *jdev_name
)
2287 int blkdev_mode
= FMODE_READ
| FMODE_WRITE
;
2288 char b
[BDEVNAME_SIZE
];
2292 journal
-> j_dev_bd
= NULL
;
2293 journal
-> j_dev_file
= NULL
;
2294 jdev
= SB_ONDISK_JOURNAL_DEVICE( super
) ?
2295 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super
)) : super
->s_dev
;
2297 if (bdev_read_only(super
->s_bdev
))
2298 blkdev_mode
= FMODE_READ
;
2300 /* there is no "jdev" option and journal is on separate device */
2301 if( ( !jdev_name
|| !jdev_name
[ 0 ] ) ) {
2302 journal
->j_dev_bd
= open_by_devnum(jdev
, blkdev_mode
);
2303 if (IS_ERR(journal
->j_dev_bd
)) {
2304 result
= PTR_ERR(journal
->j_dev_bd
);
2305 journal
->j_dev_bd
= NULL
;
2306 reiserfs_warning (super
, "sh-458: journal_init_dev: "
2307 "cannot init journal device '%s': %i",
2308 __bdevname(jdev
, b
), result
);
2310 } else if (jdev
!= super
->s_dev
)
2311 set_blocksize(journal
->j_dev_bd
, super
->s_blocksize
);
2315 journal
-> j_dev_file
= filp_open( jdev_name
, 0, 0 );
2316 if( !IS_ERR( journal
-> j_dev_file
) ) {
2317 struct inode
*jdev_inode
= journal
->j_dev_file
->f_mapping
->host
;
2318 if( !S_ISBLK( jdev_inode
-> i_mode
) ) {
2319 reiserfs_warning(super
, "journal_init_dev: '%s' is "
2320 "not a block device", jdev_name
);
2322 release_journal_dev( super
, journal
);
2325 journal
->j_dev_bd
= I_BDEV(jdev_inode
);
2326 set_blocksize(journal
->j_dev_bd
, super
->s_blocksize
);
2327 reiserfs_info(super
, "journal_init_dev: journal device: %s\n",
2328 bdevname(journal
->j_dev_bd
, b
));
2331 result
= PTR_ERR( journal
-> j_dev_file
);
2332 journal
-> j_dev_file
= NULL
;
2333 reiserfs_warning (super
,
2334 "journal_init_dev: Cannot open '%s': %i",
2335 jdev_name
, result
);
2341 ** must be called once on fs mount. calls journal_read for you
2343 int journal_init(struct super_block
*p_s_sb
, const char * j_dev_name
, int old_format
, unsigned int commit_max_age
) {
2344 int num_cnodes
= SB_ONDISK_JOURNAL_SIZE(p_s_sb
) * 2 ;
2345 struct buffer_head
*bhjh
;
2346 struct reiserfs_super_block
* rs
;
2347 struct reiserfs_journal_header
*jh
;
2348 struct reiserfs_journal
*journal
;
2349 struct reiserfs_journal_list
*jl
;
2350 char b
[BDEVNAME_SIZE
];
2352 journal
= SB_JOURNAL(p_s_sb
) = vmalloc(sizeof (struct reiserfs_journal
)) ;
2354 reiserfs_warning (p_s_sb
, "journal-1256: unable to get memory for journal structure") ;
2357 memset(journal
, 0, sizeof(struct reiserfs_journal
)) ;
2358 INIT_LIST_HEAD(&journal
->j_bitmap_nodes
) ;
2359 INIT_LIST_HEAD (&journal
->j_prealloc_list
);
2360 INIT_LIST_HEAD(&journal
->j_working_list
);
2361 INIT_LIST_HEAD(&journal
->j_journal_list
);
2362 journal
->j_persistent_trans
= 0;
2363 if (reiserfs_allocate_list_bitmaps(p_s_sb
,
2364 journal
->j_list_bitmap
,
2365 SB_BMAP_NR(p_s_sb
)))
2366 goto free_and_return
;
2367 allocate_bitmap_nodes(p_s_sb
) ;
2369 /* reserved for journal area support */
2370 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb
) = (old_format
?
2371 REISERFS_OLD_DISK_OFFSET_IN_BYTES
/ p_s_sb
->s_blocksize
+
2372 SB_BMAP_NR(p_s_sb
) + 1 :
2373 REISERFS_DISK_OFFSET_IN_BYTES
/ p_s_sb
->s_blocksize
+ 2);
2375 /* Sanity check to see is the standard journal fitting withing first bitmap
2376 (actual for small blocksizes) */
2377 if ( !SB_ONDISK_JOURNAL_DEVICE( p_s_sb
) &&
2378 (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb
) + SB_ONDISK_JOURNAL_SIZE(p_s_sb
) > p_s_sb
->s_blocksize
* 8) ) {
2379 reiserfs_warning (p_s_sb
, "journal-1393: journal does not fit for area "
2380 "addressed by first of bitmap blocks. It starts at "
2381 "%u and its size is %u. Block size %ld",
2382 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb
),
2383 SB_ONDISK_JOURNAL_SIZE(p_s_sb
), p_s_sb
->s_blocksize
);
2384 goto free_and_return
;
2387 if( journal_init_dev( p_s_sb
, journal
, j_dev_name
) != 0 ) {
2388 reiserfs_warning (p_s_sb
, "sh-462: unable to initialize jornal device");
2389 goto free_and_return
;
2392 rs
= SB_DISK_SUPER_BLOCK(p_s_sb
);
2394 /* read journal header */
2395 bhjh
= journal_bread(p_s_sb
,
2396 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) + SB_ONDISK_JOURNAL_SIZE(p_s_sb
));
2398 reiserfs_warning (p_s_sb
, "sh-459: unable to read journal header");
2399 goto free_and_return
;
2401 jh
= (struct reiserfs_journal_header
*)(bhjh
->b_data
);
2403 /* make sure that journal matches to the super block */
2404 if (is_reiserfs_jr(rs
) && (le32_to_cpu(jh
->jh_journal
.jp_journal_magic
) != sb_jp_journal_magic(rs
))) {
2405 reiserfs_warning (p_s_sb
, "sh-460: journal header magic %x "
2406 "(device %s) does not match to magic found in super "
2408 jh
->jh_journal
.jp_journal_magic
,
2409 bdevname( journal
->j_dev_bd
, b
),
2410 sb_jp_journal_magic(rs
));
2412 goto free_and_return
;
2415 journal
->j_trans_max
= le32_to_cpu (jh
->jh_journal
.jp_journal_trans_max
);
2416 journal
->j_max_batch
= le32_to_cpu (jh
->jh_journal
.jp_journal_max_batch
);
2417 journal
->j_max_commit_age
= le32_to_cpu (jh
->jh_journal
.jp_journal_max_commit_age
);
2418 journal
->j_max_trans_age
= JOURNAL_MAX_TRANS_AGE
;
2420 if (journal
->j_trans_max
) {
2421 /* make sure these parameters are available, assign it if they are not */
2422 __u32 initial
= journal
->j_trans_max
;
2425 if (p_s_sb
->s_blocksize
< 4096)
2426 ratio
= 4096 / p_s_sb
->s_blocksize
;
2428 if (SB_ONDISK_JOURNAL_SIZE(p_s_sb
)/journal
->j_trans_max
< JOURNAL_MIN_RATIO
)
2429 journal
->j_trans_max
= SB_ONDISK_JOURNAL_SIZE(p_s_sb
) / JOURNAL_MIN_RATIO
;
2430 if (journal
->j_trans_max
> JOURNAL_TRANS_MAX_DEFAULT
/ ratio
)
2431 journal
->j_trans_max
= JOURNAL_TRANS_MAX_DEFAULT
/ ratio
;
2432 if (journal
->j_trans_max
< JOURNAL_TRANS_MIN_DEFAULT
/ ratio
)
2433 journal
->j_trans_max
= JOURNAL_TRANS_MIN_DEFAULT
/ ratio
;
2435 if (journal
->j_trans_max
!= initial
)
2436 reiserfs_warning (p_s_sb
, "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
2437 initial
, journal
->j_trans_max
);
2439 journal
->j_max_batch
= journal
->j_trans_max
*
2440 JOURNAL_MAX_BATCH_DEFAULT
/JOURNAL_TRANS_MAX_DEFAULT
;
2443 if (!journal
->j_trans_max
) {
2444 /*we have the file system was created by old version of mkreiserfs
2445 so this field contains zero value */
2446 journal
->j_trans_max
= JOURNAL_TRANS_MAX_DEFAULT
;
2447 journal
->j_max_batch
= JOURNAL_MAX_BATCH_DEFAULT
;
2448 journal
->j_max_commit_age
= JOURNAL_MAX_COMMIT_AGE
;
2450 /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
2451 trans max size is decreased proportionally */
2452 if (p_s_sb
->s_blocksize
< 4096) {
2453 journal
->j_trans_max
/= (4096 / p_s_sb
->s_blocksize
) ;
2454 journal
->j_max_batch
= (journal
->j_trans_max
) * 9 / 10 ;
2458 journal
->j_default_max_commit_age
= journal
->j_max_commit_age
;
2460 if (commit_max_age
!= 0) {
2461 journal
->j_max_commit_age
= commit_max_age
;
2462 journal
->j_max_trans_age
= commit_max_age
;
2465 reiserfs_info (p_s_sb
, "journal params: device %s, size %u, "
2466 "journal first block %u, max trans len %u, max batch %u, "
2467 "max commit age %u, max trans age %u\n",
2468 bdevname( journal
->j_dev_bd
, b
),
2469 SB_ONDISK_JOURNAL_SIZE(p_s_sb
),
2470 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
),
2471 journal
->j_trans_max
,
2472 journal
->j_max_batch
,
2473 journal
->j_max_commit_age
,
2474 journal
->j_max_trans_age
);
2478 journal
->j_list_bitmap_index
= 0 ;
2479 journal_list_init(p_s_sb
) ;
2481 memset(journal
->j_list_hash_table
, 0, JOURNAL_HASH_SIZE
* sizeof(struct reiserfs_journal_cnode
*)) ;
2483 INIT_LIST_HEAD(&journal
->j_dirty_buffers
) ;
2484 spin_lock_init(&journal
->j_dirty_buffers_lock
) ;
2486 journal
->j_start
= 0 ;
2487 journal
->j_len
= 0 ;
2488 journal
->j_len_alloc
= 0 ;
2489 atomic_set(&(journal
->j_wcount
), 0) ;
2490 atomic_set(&(journal
->j_async_throttle
), 0) ;
2491 journal
->j_bcount
= 0 ;
2492 journal
->j_trans_start_time
= 0 ;
2493 journal
->j_last
= NULL
;
2494 journal
->j_first
= NULL
;
2495 init_waitqueue_head(&(journal
->j_join_wait
)) ;
2496 sema_init(&journal
->j_lock
, 1);
2497 sema_init(&journal
->j_flush_sem
, 1);
2499 journal
->j_trans_id
= 10 ;
2500 journal
->j_mount_id
= 10 ;
2501 journal
->j_state
= 0 ;
2502 atomic_set(&(journal
->j_jlock
), 0) ;
2503 journal
->j_cnode_free_list
= allocate_cnodes(num_cnodes
) ;
2504 journal
->j_cnode_free_orig
= journal
->j_cnode_free_list
;
2505 journal
->j_cnode_free
= journal
->j_cnode_free_list
? num_cnodes
: 0 ;
2506 journal
->j_cnode_used
= 0 ;
2507 journal
->j_must_wait
= 0 ;
2509 init_journal_hash(p_s_sb
) ;
2510 jl
= journal
->j_current_jl
;
2511 jl
->j_list_bitmap
= get_list_bitmap(p_s_sb
, jl
);
2512 if (!jl
->j_list_bitmap
) {
2513 reiserfs_warning(p_s_sb
, "journal-2005, get_list_bitmap failed for journal list 0") ;
2514 goto free_and_return
;
2516 if (journal_read(p_s_sb
) < 0) {
2517 reiserfs_warning(p_s_sb
, "Replay Failure, unable to mount") ;
2518 goto free_and_return
;
2521 reiserfs_mounted_fs_count
++ ;
2522 if (reiserfs_mounted_fs_count
<= 1)
2523 commit_wq
= create_workqueue("reiserfs");
2525 INIT_WORK(&journal
->j_work
, flush_async_commits
, p_s_sb
);
2528 free_journal_ram(p_s_sb
);
2533 ** test for a polite end of the current transaction. Used by file_write, and should
2534 ** be used by delete to make sure they don't write more than can fit inside a single
2537 int journal_transaction_should_end(struct reiserfs_transaction_handle
*th
, int new_alloc
) {
2538 struct reiserfs_journal
*journal
= SB_JOURNAL (th
->t_super
);
2539 time_t now
= get_seconds() ;
2540 /* cannot restart while nested */
2541 BUG_ON (!th
->t_trans_id
);
2542 if (th
->t_refcount
> 1)
2544 if ( journal
->j_must_wait
> 0 ||
2545 (journal
->j_len_alloc
+ new_alloc
) >= journal
->j_max_batch
||
2546 atomic_read(&(journal
->j_jlock
)) ||
2547 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
||
2548 journal
->j_cnode_free
< (journal
->j_trans_max
* 3)) {
2554 /* this must be called inside a transaction, and requires the
2555 ** kernel_lock to be held
2557 void reiserfs_block_writes(struct reiserfs_transaction_handle
*th
) {
2558 struct reiserfs_journal
*journal
= SB_JOURNAL (th
->t_super
);
2559 BUG_ON (!th
->t_trans_id
);
2560 journal
->j_must_wait
= 1 ;
2561 set_bit(J_WRITERS_BLOCKED
, &journal
->j_state
) ;
2565 /* this must be called without a transaction started, and does not
2568 void reiserfs_allow_writes(struct super_block
*s
) {
2569 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
2570 clear_bit(J_WRITERS_BLOCKED
, &journal
->j_state
) ;
2571 wake_up(&journal
->j_join_wait
) ;
2574 /* this must be called without a transaction started, and does not
2577 void reiserfs_wait_on_write_block(struct super_block
*s
) {
2578 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
2579 wait_event(journal
->j_join_wait
,
2580 !test_bit(J_WRITERS_BLOCKED
, &journal
->j_state
)) ;
2583 static void queue_log_writer(struct super_block
*s
) {
2585 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
2586 set_bit(J_WRITERS_QUEUED
, &journal
->j_state
);
2589 * we don't want to use wait_event here because
2590 * we only want to wait once.
2592 init_waitqueue_entry(&wait
, current
);
2593 add_wait_queue(&journal
->j_join_wait
, &wait
);
2594 set_current_state(TASK_UNINTERRUPTIBLE
);
2595 if (test_bit(J_WRITERS_QUEUED
, &journal
->j_state
))
2597 current
->state
= TASK_RUNNING
;
2598 remove_wait_queue(&journal
->j_join_wait
, &wait
);
2601 static void wake_queued_writers(struct super_block
*s
) {
2602 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
2603 if (test_and_clear_bit(J_WRITERS_QUEUED
, &journal
->j_state
))
2604 wake_up(&journal
->j_join_wait
);
2607 static void let_transaction_grow(struct super_block
*sb
,
2608 unsigned long trans_id
)
2610 struct reiserfs_journal
*journal
= SB_JOURNAL (sb
);
2611 unsigned long bcount
= journal
->j_bcount
;
2613 set_current_state(TASK_UNINTERRUPTIBLE
);
2614 schedule_timeout(1);
2615 journal
->j_current_jl
->j_state
|= LIST_COMMIT_PENDING
;
2616 while ((atomic_read(&journal
->j_wcount
) > 0 ||
2617 atomic_read(&journal
->j_jlock
)) &&
2618 journal
->j_trans_id
== trans_id
) {
2619 queue_log_writer(sb
);
2621 if (journal
->j_trans_id
!= trans_id
)
2623 if (bcount
== journal
->j_bcount
)
2625 bcount
= journal
->j_bcount
;
2629 /* join == true if you must join an existing transaction.
2630 ** join == false if you can deal with waiting for others to finish
2632 ** this will block until the transaction is joinable. send the number of blocks you
2633 ** expect to use in nblocks.
2635 static int do_journal_begin_r(struct reiserfs_transaction_handle
*th
, struct super_block
* p_s_sb
,unsigned long nblocks
,int join
) {
2636 time_t now
= get_seconds() ;
2638 struct reiserfs_journal
*journal
= SB_JOURNAL(p_s_sb
);
2639 struct reiserfs_transaction_handle myth
;
2640 int sched_count
= 0;
2643 reiserfs_check_lock_depth(p_s_sb
, "journal_begin") ;
2644 if (nblocks
> journal
->j_trans_max
)
2647 PROC_INFO_INC( p_s_sb
, journal
.journal_being
);
2648 /* set here for journal_join */
2650 th
->t_super
= p_s_sb
;
2653 lock_journal(p_s_sb
) ;
2654 if (join
!= JBEGIN_ABORT
&& reiserfs_is_journal_aborted (journal
)) {
2655 unlock_journal (p_s_sb
);
2656 retval
= journal
->j_errno
;
2659 journal
->j_bcount
++;
2661 if (test_bit(J_WRITERS_BLOCKED
, &journal
->j_state
)) {
2662 unlock_journal(p_s_sb
) ;
2663 reiserfs_wait_on_write_block(p_s_sb
) ;
2664 PROC_INFO_INC( p_s_sb
, journal
.journal_relock_writers
);
2667 now
= get_seconds();
2669 /* if there is no room in the journal OR
2670 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2671 ** we don't sleep if there aren't other writers
2674 if ( (!join
&& journal
->j_must_wait
> 0) ||
2675 ( !join
&& (journal
->j_len_alloc
+ nblocks
+ 2) >= journal
->j_max_batch
) ||
2676 (!join
&& atomic_read(&journal
->j_wcount
) > 0 && journal
->j_trans_start_time
> 0 &&
2677 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
) ||
2678 (!join
&& atomic_read(&journal
->j_jlock
)) ||
2679 (!join
&& journal
->j_cnode_free
< (journal
->j_trans_max
* 3))) {
2681 old_trans_id
= journal
->j_trans_id
;
2682 unlock_journal(p_s_sb
) ; /* allow others to finish this transaction */
2684 if (!join
&& (journal
->j_len_alloc
+ nblocks
+ 2) >=
2685 journal
->j_max_batch
&&
2686 ((journal
->j_len
+ nblocks
+ 2) * 100) < (journal
->j_len_alloc
* 75))
2688 if (atomic_read(&journal
->j_wcount
) > 10) {
2690 queue_log_writer(p_s_sb
);
2694 /* don't mess with joining the transaction if all we have to do is
2695 * wait for someone else to do a commit
2697 if (atomic_read(&journal
->j_jlock
)) {
2698 while (journal
->j_trans_id
== old_trans_id
&&
2699 atomic_read(&journal
->j_jlock
)) {
2700 queue_log_writer(p_s_sb
);
2704 retval
= journal_join(&myth
, p_s_sb
, 1) ;
2708 /* someone might have ended the transaction while we joined */
2709 if (old_trans_id
!= journal
->j_trans_id
) {
2710 retval
= do_journal_end(&myth
, p_s_sb
, 1, 0) ;
2712 retval
= do_journal_end(&myth
, p_s_sb
, 1, COMMIT_NOW
) ;
2718 PROC_INFO_INC( p_s_sb
, journal
.journal_relock_wcount
);
2721 /* we are the first writer, set trans_id */
2722 if (journal
->j_trans_start_time
== 0) {
2723 journal
->j_trans_start_time
= get_seconds();
2725 atomic_inc(&(journal
->j_wcount
)) ;
2726 journal
->j_len_alloc
+= nblocks
;
2727 th
->t_blocks_logged
= 0 ;
2728 th
->t_blocks_allocated
= nblocks
;
2729 th
->t_trans_id
= journal
->j_trans_id
;
2730 unlock_journal(p_s_sb
) ;
2731 INIT_LIST_HEAD (&th
->t_list
);
2736 memset (th
, 0, sizeof (*th
));
2737 /* Re-set th->t_super, so we can properly keep track of how many
2738 * persistent transactions there are. We need to do this so if this
2739 * call is part of a failed restart_transaction, we can free it later */
2740 th
->t_super
= p_s_sb
;
2744 struct reiserfs_transaction_handle
*
2745 reiserfs_persistent_transaction(struct super_block
*s
, int nblocks
) {
2747 struct reiserfs_transaction_handle
*th
;
2749 /* if we're nesting into an existing transaction. It will be
2750 ** persistent on its own
2752 if (reiserfs_transaction_running(s
)) {
2753 th
= current
->journal_info
;
2755 if (th
->t_refcount
< 2) {
2760 th
= reiserfs_kmalloc(sizeof(struct reiserfs_transaction_handle
), GFP_NOFS
, s
) ;
2763 ret
= journal_begin(th
, s
, nblocks
) ;
2765 reiserfs_kfree(th
, sizeof(struct reiserfs_transaction_handle
), s
) ;
2769 SB_JOURNAL(s
)->j_persistent_trans
++;
2774 reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle
*th
) {
2775 struct super_block
*s
= th
->t_super
;
2778 ret
= journal_end(th
, th
->t_super
, th
->t_blocks_allocated
);
2781 if (th
->t_refcount
== 0) {
2782 SB_JOURNAL(s
)->j_persistent_trans
--;
2783 reiserfs_kfree(th
, sizeof(struct reiserfs_transaction_handle
), s
) ;
2788 static int journal_join(struct reiserfs_transaction_handle
*th
, struct super_block
*p_s_sb
, unsigned long nblocks
) {
2789 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
2791 /* this keeps do_journal_end from NULLing out the current->journal_info
2794 th
->t_handle_save
= cur_th
;
2795 if (cur_th
&& cur_th
->t_refcount
> 1) {
2798 return do_journal_begin_r(th
, p_s_sb
, nblocks
, JBEGIN_JOIN
) ;
2801 int journal_join_abort(struct reiserfs_transaction_handle
*th
, struct super_block
*p_s_sb
, unsigned long nblocks
) {
2802 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
2804 /* this keeps do_journal_end from NULLing out the current->journal_info
2807 th
->t_handle_save
= cur_th
;
2808 if (cur_th
&& cur_th
->t_refcount
> 1) {
2811 return do_journal_begin_r(th
, p_s_sb
, nblocks
, JBEGIN_ABORT
) ;
2814 int journal_begin(struct reiserfs_transaction_handle
*th
, struct super_block
* p_s_sb
, unsigned long nblocks
) {
2815 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
2818 th
->t_handle_save
= NULL
;
2820 /* we are nesting into the current transaction */
2821 if (cur_th
->t_super
== p_s_sb
) {
2822 BUG_ON (!cur_th
->t_refcount
);
2823 cur_th
->t_refcount
++ ;
2824 memcpy(th
, cur_th
, sizeof(*th
));
2825 if (th
->t_refcount
<= 1)
2826 reiserfs_warning (p_s_sb
, "BAD: refcount <= 1, but journal_info != 0");
2829 /* we've ended up with a handle from a different filesystem.
2830 ** save it and restore on journal_end. This should never
2833 reiserfs_warning(p_s_sb
, "clm-2100: nesting info a different FS") ;
2834 th
->t_handle_save
= current
->journal_info
;
2835 current
->journal_info
= th
;
2838 current
->journal_info
= th
;
2840 ret
= do_journal_begin_r(th
, p_s_sb
, nblocks
, JBEGIN_REG
) ;
2841 if (current
->journal_info
!= th
)
2844 /* I guess this boils down to being the reciprocal of clm-2100 above.
2845 * If do_journal_begin_r fails, we need to put it back, since journal_end
2846 * won't be called to do it. */
2848 current
->journal_info
= th
->t_handle_save
;
2850 BUG_ON (!th
->t_refcount
);
2856 ** puts bh into the current transaction. If it was already there, reorders removes the
2857 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
2859 ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
2860 ** transaction is committed.
2862 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
2864 int journal_mark_dirty(struct reiserfs_transaction_handle
*th
, struct super_block
*p_s_sb
, struct buffer_head
*bh
) {
2865 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
2866 struct reiserfs_journal_cnode
*cn
= NULL
;
2867 int count_already_incd
= 0 ;
2869 BUG_ON (!th
->t_trans_id
);
2871 PROC_INFO_INC( p_s_sb
, journal
.mark_dirty
);
2872 if (th
->t_trans_id
!= journal
->j_trans_id
) {
2873 reiserfs_panic(th
->t_super
, "journal-1577: handle trans id %ld != current trans id %ld\n",
2874 th
->t_trans_id
, journal
->j_trans_id
);
2879 prepared
= test_clear_buffer_journal_prepared (bh
);
2880 clear_buffer_journal_restore_dirty (bh
);
2881 /* already in this transaction, we are done */
2882 if (buffer_journaled(bh
)) {
2883 PROC_INFO_INC( p_s_sb
, journal
.mark_dirty_already
);
2887 /* this must be turned into a panic instead of a warning. We can't allow
2888 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
2889 ** could get to disk too early. NOT GOOD.
2891 if (!prepared
|| buffer_dirty(bh
)) {
2892 reiserfs_warning (p_s_sb
, "journal-1777: buffer %llu bad state "
2893 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
2894 (unsigned long long)bh
->b_blocknr
, prepared
? ' ' : '!',
2895 buffer_locked(bh
) ? ' ' : '!',
2896 buffer_dirty(bh
) ? ' ' : '!',
2897 buffer_journal_dirty(bh
) ? ' ' : '!') ;
2900 if (atomic_read(&(journal
->j_wcount
)) <= 0) {
2901 reiserfs_warning (p_s_sb
, "journal-1409: journal_mark_dirty returning because j_wcount was %d", atomic_read(&(journal
->j_wcount
))) ;
2904 /* this error means I've screwed up, and we've overflowed the transaction.
2905 ** Nothing can be done here, except make the FS readonly or panic.
2907 if (journal
->j_len
>= journal
->j_trans_max
) {
2908 reiserfs_panic(th
->t_super
, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", journal
->j_len
) ;
2911 if (buffer_journal_dirty(bh
)) {
2912 count_already_incd
= 1 ;
2913 PROC_INFO_INC( p_s_sb
, journal
.mark_dirty_notjournal
);
2914 clear_buffer_journal_dirty (bh
);
2917 if (journal
->j_len
> journal
->j_len_alloc
) {
2918 journal
->j_len_alloc
= journal
->j_len
+ JOURNAL_PER_BALANCE_CNT
;
2921 set_buffer_journaled (bh
);
2923 /* now put this guy on the end */
2925 cn
= get_cnode(p_s_sb
) ;
2927 reiserfs_panic(p_s_sb
, "get_cnode failed!\n");
2930 if (th
->t_blocks_logged
== th
->t_blocks_allocated
) {
2931 th
->t_blocks_allocated
+= JOURNAL_PER_BALANCE_CNT
;
2932 journal
->j_len_alloc
+= JOURNAL_PER_BALANCE_CNT
;
2934 th
->t_blocks_logged
++ ;
2938 cn
->blocknr
= bh
->b_blocknr
;
2941 insert_journal_hash(journal
->j_hash_table
, cn
) ;
2942 if (!count_already_incd
) {
2947 cn
->prev
= journal
->j_last
;
2949 if (journal
->j_last
) {
2950 journal
->j_last
->next
= cn
;
2951 journal
->j_last
= cn
;
2953 journal
->j_first
= cn
;
2954 journal
->j_last
= cn
;
2959 int journal_end(struct reiserfs_transaction_handle
*th
, struct super_block
*p_s_sb
, unsigned long nblocks
) {
2960 if (!current
->journal_info
&& th
->t_refcount
> 1)
2961 reiserfs_warning (p_s_sb
, "REISER-NESTING: th NULL, refcount %d",
2964 if (!th
->t_trans_id
) {
2970 if (th
->t_refcount
> 0) {
2971 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
2973 /* we aren't allowed to close a nested transaction on a different
2974 ** filesystem from the one in the task struct
2976 if (cur_th
->t_super
!= th
->t_super
)
2980 memcpy(current
->journal_info
, th
, sizeof(*th
));
2985 return do_journal_end(th
, p_s_sb
, nblocks
, 0) ;
2989 /* removes from the current transaction, relsing and descrementing any counters.
2990 ** also files the removed buffer directly onto the clean list
2992 ** called by journal_mark_freed when a block has been deleted
2994 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
2996 static int remove_from_transaction(struct super_block
*p_s_sb
, b_blocknr_t blocknr
, int already_cleaned
) {
2997 struct buffer_head
*bh
;
2998 struct reiserfs_journal_cnode
*cn
;
2999 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
3002 cn
= get_journal_hash_dev(p_s_sb
, journal
->j_hash_table
, blocknr
) ;
3003 if (!cn
|| !cn
->bh
) {
3008 cn
->prev
->next
= cn
->next
;
3011 cn
->next
->prev
= cn
->prev
;
3013 if (cn
== journal
->j_first
) {
3014 journal
->j_first
= cn
->next
;
3016 if (cn
== journal
->j_last
) {
3017 journal
->j_last
= cn
->prev
;
3020 remove_journal_hash(p_s_sb
, journal
->j_hash_table
, NULL
, bh
->b_blocknr
, 0) ;
3021 clear_buffer_journaled (bh
); /* don't log this one */
3023 if (!already_cleaned
) {
3024 clear_buffer_journal_dirty (bh
);
3025 clear_buffer_dirty(bh
);
3026 clear_buffer_journal_test (bh
);
3028 if (atomic_read(&(bh
->b_count
)) < 0) {
3029 reiserfs_warning (p_s_sb
, "journal-1752: remove from trans, b_count < 0");
3034 journal
->j_len_alloc
-- ;
3035 free_cnode(p_s_sb
, cn
) ;
3040 ** for any cnode in a journal list, it can only be dirtied of all the
3041 ** transactions that include it are commited to disk.
3042 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
3043 ** and 0 if you aren't
3045 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3046 ** blocks for a given transaction on disk
3049 static int can_dirty(struct reiserfs_journal_cnode
*cn
) {
3050 struct super_block
*sb
= cn
->sb
;
3051 b_blocknr_t blocknr
= cn
->blocknr
;
3052 struct reiserfs_journal_cnode
*cur
= cn
->hprev
;
3055 /* first test hprev. These are all newer than cn, so any node here
3056 ** with the same block number and dev means this node can't be sent
3057 ** to disk right now.
3059 while(cur
&& can_dirty
) {
3060 if (cur
->jlist
&& cur
->bh
&& cur
->blocknr
&& cur
->sb
== sb
&&
3061 cur
->blocknr
== blocknr
) {
3066 /* then test hnext. These are all older than cn. As long as they
3067 ** are committed to the log, it is safe to write cn to disk
3070 while(cur
&& can_dirty
) {
3071 if (cur
->jlist
&& cur
->jlist
->j_len
> 0 &&
3072 atomic_read(&(cur
->jlist
->j_commit_left
)) > 0 && cur
->bh
&&
3073 cur
->blocknr
&& cur
->sb
== sb
&& cur
->blocknr
== blocknr
) {
3081 /* syncs the commit blocks, but does not force the real buffers to disk
3082 ** will wait until the current transaction is done/commited before returning
3084 int journal_end_sync(struct reiserfs_transaction_handle
*th
, struct super_block
*p_s_sb
, unsigned long nblocks
) {
3085 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
3087 BUG_ON (!th
->t_trans_id
);
3088 /* you can sync while nested, very, very bad */
3089 if (th
->t_refcount
> 1) {
3092 if (journal
->j_len
== 0) {
3093 reiserfs_prepare_for_journal(p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
), 1) ;
3094 journal_mark_dirty(th
, p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
)) ;
3096 return do_journal_end(th
, p_s_sb
, nblocks
, COMMIT_NOW
| WAIT
) ;
3100 ** writeback the pending async commits to disk
3102 static void flush_async_commits(void *p
) {
3103 struct super_block
*p_s_sb
= p
;
3104 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
3105 struct reiserfs_journal_list
*jl
;
3106 struct list_head
*entry
;
3109 if (!list_empty(&journal
->j_journal_list
)) {
3110 /* last entry is the youngest, commit it and you get everything */
3111 entry
= journal
->j_journal_list
.prev
;
3112 jl
= JOURNAL_LIST_ENTRY(entry
);
3113 flush_commit_list(p_s_sb
, jl
, 1);
3117 * this is a little racey, but there's no harm in missing
3118 * the filemap_fdata_write
3120 if (!atomic_read(&journal
->j_async_throttle
) && !reiserfs_is_journal_aborted (journal
)) {
3121 atomic_inc(&journal
->j_async_throttle
);
3122 filemap_fdatawrite(p_s_sb
->s_bdev
->bd_inode
->i_mapping
);
3123 atomic_dec(&journal
->j_async_throttle
);
3128 ** flushes any old transactions to disk
3129 ** ends the current transaction if it is too old
3131 int reiserfs_flush_old_commits(struct super_block
*p_s_sb
) {
3133 struct reiserfs_transaction_handle th
;
3134 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
3136 now
= get_seconds();
3137 /* safety check so we don't flush while we are replaying the log during
3140 if (list_empty(&journal
->j_journal_list
)) {
3144 /* check the current transaction. If there are no writers, and it is
3145 * too old, finish it, and force the commit blocks to disk
3147 if (atomic_read(&journal
->j_wcount
) <= 0 &&
3148 journal
->j_trans_start_time
> 0 &&
3149 journal
->j_len
> 0 &&
3150 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
)
3152 if (!journal_join(&th
, p_s_sb
, 1)) {
3153 reiserfs_prepare_for_journal(p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
), 1) ;
3154 journal_mark_dirty(&th
, p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
)) ;
3156 /* we're only being called from kreiserfsd, it makes no sense to do
3157 ** an async commit so that kreiserfsd can do it later
3159 do_journal_end(&th
, p_s_sb
,1, COMMIT_NOW
| WAIT
) ;
3162 return p_s_sb
->s_dirt
;
3166 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3168 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3169 ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3170 ** flushes the commit list and returns 0.
3172 ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3174 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3176 static int check_journal_end(struct reiserfs_transaction_handle
*th
, struct super_block
* p_s_sb
,
3177 unsigned long nblocks
, int flags
) {
3180 int flush
= flags
& FLUSH_ALL
;
3181 int commit_now
= flags
& COMMIT_NOW
;
3182 int wait_on_commit
= flags
& WAIT
;
3183 struct reiserfs_journal_list
*jl
;
3184 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
3186 BUG_ON (!th
->t_trans_id
);
3188 if (th
->t_trans_id
!= journal
->j_trans_id
) {
3189 reiserfs_panic(th
->t_super
, "journal-1577: handle trans id %ld != current trans id %ld\n",
3190 th
->t_trans_id
, journal
->j_trans_id
);
3193 journal
->j_len_alloc
-= (th
->t_blocks_allocated
- th
->t_blocks_logged
) ;
3194 if (atomic_read(&(journal
->j_wcount
)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3195 atomic_dec(&(journal
->j_wcount
)) ;
3198 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3199 ** will be dealt with by next transaction that actually writes something, but should be taken
3200 ** care of in this trans
3202 if (journal
->j_len
== 0) {
3205 /* if wcount > 0, and we are called to with flush or commit_now,
3206 ** we wait on j_join_wait. We will wake up when the last writer has
3207 ** finished the transaction, and started it on its way to the disk.
3208 ** Then, we flush the commit or journal list, and just return 0
3209 ** because the rest of journal end was already done for this transaction.
3211 if (atomic_read(&(journal
->j_wcount
)) > 0) {
3212 if (flush
|| commit_now
) {
3215 jl
= journal
->j_current_jl
;
3216 trans_id
= jl
->j_trans_id
;
3218 jl
->j_state
|= LIST_COMMIT_PENDING
;
3219 atomic_set(&(journal
->j_jlock
), 1) ;
3221 journal
->j_next_full_flush
= 1 ;
3223 unlock_journal(p_s_sb
) ;
3225 /* sleep while the current transaction is still j_jlocked */
3226 while(journal
->j_trans_id
== trans_id
) {
3227 if (atomic_read(&journal
->j_jlock
)) {
3228 queue_log_writer(p_s_sb
);
3230 lock_journal(p_s_sb
);
3231 if (journal
->j_trans_id
== trans_id
) {
3232 atomic_set(&(journal
->j_jlock
), 1) ;
3234 unlock_journal(p_s_sb
);
3237 if (journal
->j_trans_id
== trans_id
) {
3240 if (commit_now
&& journal_list_still_alive(p_s_sb
, trans_id
) &&
3243 flush_commit_list(p_s_sb
, jl
, 1) ;
3247 unlock_journal(p_s_sb
) ;
3251 /* deal with old transactions where we are the last writers */
3252 now
= get_seconds();
3253 if ((now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
) {
3255 journal
->j_next_async_flush
= 1 ;
3257 /* don't batch when someone is waiting on j_join_wait */
3258 /* don't batch when syncing the commit or flushing the whole trans */
3259 if (!(journal
->j_must_wait
> 0) && !(atomic_read(&(journal
->j_jlock
))) && !flush
&& !commit_now
&&
3260 (journal
->j_len
< journal
->j_max_batch
) &&
3261 journal
->j_len_alloc
< journal
->j_max_batch
&& journal
->j_cnode_free
> (journal
->j_trans_max
* 3)) {
3262 journal
->j_bcount
++ ;
3263 unlock_journal(p_s_sb
) ;
3267 if (journal
->j_start
> SB_ONDISK_JOURNAL_SIZE(p_s_sb
)) {
3268 reiserfs_panic(p_s_sb
, "journal-003: journal_end: j_start (%ld) is too high\n", journal
->j_start
) ;
3274 ** Does all the work that makes deleting blocks safe.
3275 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3278 ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3279 ** before this transaction has finished.
3281 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3282 ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3283 ** the block can't be reallocated yet.
3285 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3287 int journal_mark_freed(struct reiserfs_transaction_handle
*th
, struct super_block
*p_s_sb
, b_blocknr_t blocknr
) {
3288 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
3289 struct reiserfs_journal_cnode
*cn
= NULL
;
3290 struct buffer_head
*bh
= NULL
;
3291 struct reiserfs_list_bitmap
*jb
= NULL
;
3293 BUG_ON (!th
->t_trans_id
);
3295 cn
= get_journal_hash_dev(p_s_sb
, journal
->j_hash_table
, blocknr
);
3300 /* if it is journal new, we just remove it from this transaction */
3301 if (bh
&& buffer_journal_new(bh
)) {
3302 clear_buffer_journal_new (bh
);
3303 clear_prepared_bits(bh
) ;
3304 reiserfs_clean_and_file_buffer(bh
) ;
3305 cleaned
= remove_from_transaction(p_s_sb
, blocknr
, cleaned
) ;
3307 /* set the bit for this block in the journal bitmap for this transaction */
3308 jb
= journal
->j_current_jl
->j_list_bitmap
;
3310 reiserfs_panic(p_s_sb
, "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n") ;
3312 set_bit_in_list_bitmap(p_s_sb
, blocknr
, jb
) ;
3314 /* Note, the entire while loop is not allowed to schedule. */
3317 clear_prepared_bits(bh
) ;
3318 reiserfs_clean_and_file_buffer(bh
) ;
3320 cleaned
= remove_from_transaction(p_s_sb
, blocknr
, cleaned
) ;
3322 /* find all older transactions with this block, make sure they don't try to write it out */
3323 cn
= get_journal_hash_dev(p_s_sb
,journal
->j_list_hash_table
, blocknr
) ;
3325 if (p_s_sb
== cn
->sb
&& blocknr
== cn
->blocknr
) {
3326 set_bit(BLOCK_FREED
, &cn
->state
) ;
3329 /* remove_from_transaction will brelse the buffer if it was
3330 ** in the current trans
3332 clear_buffer_journal_dirty (cn
->bh
);
3333 clear_buffer_dirty(cn
->bh
);
3334 clear_buffer_journal_test(cn
->bh
);
3337 if (atomic_read(&(cn
->bh
->b_count
)) < 0) {
3338 reiserfs_warning (p_s_sb
, "journal-2138: cn->bh->b_count < 0");
3341 if (cn
->jlist
) { /* since we are clearing the bh, we MUST dec nonzerolen */
3342 atomic_dec(&(cn
->jlist
->j_nonzerolen
)) ;
3352 put_bh(bh
) ; /* get_hash grabs the buffer */
3353 if (atomic_read(&(bh
->b_count
)) < 0) {
3354 reiserfs_warning (p_s_sb
, "journal-2165: bh->b_count < 0");
3360 void reiserfs_update_inode_transaction(struct inode
*inode
) {
3361 struct reiserfs_journal
*journal
= SB_JOURNAL (inode
->i_sb
);
3362 REISERFS_I(inode
)->i_jl
= journal
->j_current_jl
;
3363 REISERFS_I(inode
)->i_trans_id
= journal
->j_trans_id
;
3367 * returns -1 on error, 0 if no commits/barriers were done and 1
3368 * if a transaction was actually committed and the barrier was done
3370 static int __commit_trans_jl(struct inode
*inode
, unsigned long id
,
3371 struct reiserfs_journal_list
*jl
)
3373 struct reiserfs_transaction_handle th
;
3374 struct super_block
*sb
= inode
->i_sb
;
3375 struct reiserfs_journal
*journal
= SB_JOURNAL (sb
);
3378 /* is it from the current transaction, or from an unknown transaction? */
3379 if (id
== journal
->j_trans_id
) {
3380 jl
= journal
->j_current_jl
;
3381 /* try to let other writers come in and grow this transaction */
3382 let_transaction_grow(sb
, id
);
3383 if (journal
->j_trans_id
!= id
) {
3384 goto flush_commit_only
;
3387 ret
= journal_begin(&th
, sb
, 1) ;
3391 /* someone might have ended this transaction while we joined */
3392 if (journal
->j_trans_id
!= id
) {
3393 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
), 1) ;
3394 journal_mark_dirty(&th
, sb
, SB_BUFFER_WITH_SB(sb
)) ;
3395 ret
= journal_end(&th
, sb
, 1) ;
3396 goto flush_commit_only
;
3399 ret
= journal_end_sync(&th
, sb
, 1) ;
3404 /* this gets tricky, we have to make sure the journal list in
3405 * the inode still exists. We know the list is still around
3406 * if we've got a larger transaction id than the oldest list
3409 if (journal_list_still_alive(inode
->i_sb
, id
)) {
3411 * we only set ret to 1 when we know for sure
3412 * the barrier hasn't been started yet on the commit
3415 if (atomic_read(&jl
->j_commit_left
) > 1)
3417 flush_commit_list(sb
, jl
, 1) ;
3418 if (journal
->j_errno
)
3419 ret
= journal
->j_errno
;
3422 /* otherwise the list is gone, and long since committed */
3426 int reiserfs_commit_for_inode(struct inode
*inode
) {
3427 unsigned long id
= REISERFS_I(inode
)->i_trans_id
;
3428 struct reiserfs_journal_list
*jl
= REISERFS_I(inode
)->i_jl
;
3430 /* for the whole inode, assume unset id means it was
3431 * changed in the current transaction. More conservative
3434 reiserfs_update_inode_transaction(inode
) ;
3435 id
= REISERFS_I(inode
)->i_trans_id
;
3436 /* jl will be updated in __commit_trans_jl */
3439 return __commit_trans_jl(inode
, id
, jl
);
3442 void reiserfs_restore_prepared_buffer(struct super_block
*p_s_sb
,
3443 struct buffer_head
*bh
) {
3444 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
3445 PROC_INFO_INC( p_s_sb
, journal
.restore_prepared
);
3449 if (test_clear_buffer_journal_restore_dirty (bh
) &&
3450 buffer_journal_dirty(bh
)) {
3451 struct reiserfs_journal_cnode
*cn
;
3452 cn
= get_journal_hash_dev(p_s_sb
,
3453 journal
->j_list_hash_table
,
3455 if (cn
&& can_dirty(cn
)) {
3456 set_buffer_journal_test (bh
);
3457 mark_buffer_dirty(bh
);
3460 clear_buffer_journal_prepared (bh
);
3463 extern struct tree_balance
*cur_tb
;
3465 ** before we can change a metadata block, we have to make sure it won't
3466 ** be written to disk while we are altering it. So, we must:
3471 int reiserfs_prepare_for_journal(struct super_block
*p_s_sb
,
3472 struct buffer_head
*bh
, int wait
) {
3473 PROC_INFO_INC( p_s_sb
, journal
.prepare
);
3475 if (test_set_buffer_locked(bh
)) {
3480 set_buffer_journal_prepared (bh
);
3481 if (test_clear_buffer_dirty(bh
) && buffer_journal_dirty(bh
)) {
3482 clear_buffer_journal_test (bh
);
3483 set_buffer_journal_restore_dirty (bh
);
3489 static void flush_old_journal_lists(struct super_block
*s
) {
3490 struct reiserfs_journal
*journal
= SB_JOURNAL (s
);
3491 struct reiserfs_journal_list
*jl
;
3492 struct list_head
*entry
;
3493 time_t now
= get_seconds();
3495 while(!list_empty(&journal
->j_journal_list
)) {
3496 entry
= journal
->j_journal_list
.next
;
3497 jl
= JOURNAL_LIST_ENTRY(entry
);
3498 /* this check should always be run, to send old lists to disk */
3499 if (jl
->j_timestamp
< (now
- (JOURNAL_MAX_TRANS_AGE
* 4))) {
3500 flush_used_journal_lists(s
, jl
);
3508 ** long and ugly. If flush, will not return until all commit
3509 ** blocks and all real buffers in the trans are on disk.
3510 ** If no_async, won't return until all commit blocks are on disk.
3512 ** keep reading, there are comments as you go along
3514 ** If the journal is aborted, we just clean up. Things like flushing
3515 ** journal lists, etc just won't happen.
3517 static int do_journal_end(struct reiserfs_transaction_handle
*th
, struct super_block
* p_s_sb
, unsigned long nblocks
,
3519 struct reiserfs_journal
*journal
= SB_JOURNAL (p_s_sb
);
3520 struct reiserfs_journal_cnode
*cn
, *next
, *jl_cn
;
3521 struct reiserfs_journal_cnode
*last_cn
= NULL
;
3522 struct reiserfs_journal_desc
*desc
;
3523 struct reiserfs_journal_commit
*commit
;
3524 struct buffer_head
*c_bh
; /* commit bh */
3525 struct buffer_head
*d_bh
; /* desc bh */
3526 int cur_write_start
= 0 ; /* start index of current log write */
3529 int flush
= flags
& FLUSH_ALL
;
3530 int wait_on_commit
= flags
& WAIT
;
3531 struct reiserfs_journal_list
*jl
, *temp_jl
;
3532 struct list_head
*entry
, *safe
;
3533 unsigned long jindex
;
3534 unsigned long commit_trans_id
;
3537 BUG_ON (th
->t_refcount
> 1);
3538 BUG_ON (!th
->t_trans_id
);
3541 current
->journal_info
= th
->t_handle_save
;
3542 reiserfs_check_lock_depth(p_s_sb
, "journal end");
3543 if (journal
->j_len
== 0) {
3544 reiserfs_prepare_for_journal(p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
), 1) ;
3545 journal_mark_dirty(th
, p_s_sb
, SB_BUFFER_WITH_SB(p_s_sb
)) ;
3548 lock_journal(p_s_sb
) ;
3549 if (journal
->j_next_full_flush
) {
3550 flags
|= FLUSH_ALL
;
3553 if (journal
->j_next_async_flush
) {
3554 flags
|= COMMIT_NOW
| WAIT
;
3558 /* check_journal_end locks the journal, and unlocks if it does not return 1
3559 ** it tells us if we should continue with the journal_end, or just return
3561 if (!check_journal_end(th
, p_s_sb
, nblocks
, flags
)) {
3563 wake_queued_writers(p_s_sb
);
3564 reiserfs_async_progress_wait(p_s_sb
);
3568 /* check_journal_end might set these, check again */
3569 if (journal
->j_next_full_flush
) {
3574 ** j must wait means we have to flush the log blocks, and the real blocks for
3577 if (journal
->j_must_wait
> 0) {
3581 #ifdef REISERFS_PREALLOCATE
3582 /* quota ops might need to nest, setup the journal_info pointer for them */
3583 current
->journal_info
= th
;
3584 reiserfs_discard_all_prealloc(th
); /* it should not involve new blocks into
3585 * the transaction */
3586 current
->journal_info
= th
->t_handle_save
;
3589 /* setup description block */
3590 d_bh
= journal_getblk(p_s_sb
, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) + journal
->j_start
) ;
3591 set_buffer_uptodate(d_bh
);
3592 desc
= (struct reiserfs_journal_desc
*)(d_bh
)->b_data
;
3593 memset(d_bh
->b_data
, 0, d_bh
->b_size
) ;
3594 memcpy(get_journal_desc_magic (d_bh
), JOURNAL_DESC_MAGIC
, 8) ;
3595 set_desc_trans_id(desc
, journal
->j_trans_id
) ;
3597 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
3598 c_bh
= journal_getblk(p_s_sb
, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
3599 ((journal
->j_start
+ journal
->j_len
+ 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
))) ;
3600 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
3601 memset(c_bh
->b_data
, 0, c_bh
->b_size
) ;
3602 set_commit_trans_id(commit
, journal
->j_trans_id
) ;
3603 set_buffer_uptodate(c_bh
) ;
3605 /* init this journal list */
3606 jl
= journal
->j_current_jl
;
3608 /* we lock the commit before doing anything because
3609 * we want to make sure nobody tries to run flush_commit_list until
3610 * the new transaction is fully setup, and we've already flushed the
3613 down(&jl
->j_commit_lock
);
3615 /* save the transaction id in case we need to commit it later */
3616 commit_trans_id
= jl
->j_trans_id
;
3618 atomic_set(&jl
->j_older_commits_done
, 0) ;
3619 jl
->j_trans_id
= journal
->j_trans_id
;
3620 jl
->j_timestamp
= journal
->j_trans_start_time
;
3621 jl
->j_commit_bh
= c_bh
;
3622 jl
->j_start
= journal
->j_start
;
3623 jl
->j_len
= journal
->j_len
;
3624 atomic_set(&jl
->j_nonzerolen
, journal
->j_len
) ;
3625 atomic_set(&jl
->j_commit_left
, journal
->j_len
+ 2);
3626 jl
->j_realblock
= NULL
;
3628 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
3629 ** for each real block, add it to the journal list hash,
3630 ** copy into real block index array in the commit or desc block
3632 trans_half
= journal_trans_half(p_s_sb
->s_blocksize
);
3633 for (i
= 0, cn
= journal
->j_first
; cn
; cn
= cn
->next
, i
++) {
3634 if (buffer_journaled (cn
->bh
)) {
3635 jl_cn
= get_cnode(p_s_sb
) ;
3637 reiserfs_panic(p_s_sb
, "journal-1676, get_cnode returned NULL\n") ;
3640 jl
->j_realblock
= jl_cn
;
3642 jl_cn
->prev
= last_cn
;
3643 jl_cn
->next
= NULL
;
3645 last_cn
->next
= jl_cn
;
3648 /* make sure the block we are trying to log is not a block
3649 of journal or reserved area */
3651 if (is_block_in_log_or_reserved_area(p_s_sb
, cn
->bh
->b_blocknr
)) {
3652 reiserfs_panic(p_s_sb
, "journal-2332: Trying to log block %lu, which is a log block\n", cn
->bh
->b_blocknr
) ;
3654 jl_cn
->blocknr
= cn
->bh
->b_blocknr
;
3657 jl_cn
->bh
= cn
->bh
;
3659 insert_journal_hash(journal
->j_list_hash_table
, jl_cn
) ;
3660 if (i
< trans_half
) {
3661 desc
->j_realblock
[i
] = cpu_to_le32(cn
->bh
->b_blocknr
) ;
3663 commit
->j_realblock
[i
- trans_half
] = cpu_to_le32(cn
->bh
->b_blocknr
) ;
3669 set_desc_trans_len(desc
, journal
->j_len
) ;
3670 set_desc_mount_id(desc
, journal
->j_mount_id
) ;
3671 set_desc_trans_id(desc
, journal
->j_trans_id
) ;
3672 set_commit_trans_len(commit
, journal
->j_len
);
3674 /* special check in case all buffers in the journal were marked for not logging */
3675 if (journal
->j_len
== 0) {
3679 /* we're about to dirty all the log blocks, mark the description block
3680 * dirty now too. Don't mark the commit block dirty until all the
3681 * others are on disk
3683 mark_buffer_dirty(d_bh
);
3685 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
3686 cur_write_start
= journal
->j_start
;
3687 cn
= journal
->j_first
;
3688 jindex
= 1 ; /* start at one so we don't get the desc again */
3690 clear_buffer_journal_new (cn
->bh
);
3691 /* copy all the real blocks into log area. dirty log blocks */
3692 if (buffer_journaled (cn
->bh
)) {
3693 struct buffer_head
*tmp_bh
;
3696 tmp_bh
= journal_getblk(p_s_sb
, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb
) +
3697 ((cur_write_start
+ jindex
) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
))) ;
3698 set_buffer_uptodate(tmp_bh
);
3699 page
= cn
->bh
->b_page
;
3701 memcpy(tmp_bh
->b_data
, addr
+ offset_in_page(cn
->bh
->b_data
),
3704 mark_buffer_dirty(tmp_bh
);
3706 set_buffer_journal_dirty (cn
->bh
);
3707 clear_buffer_journaled (cn
->bh
);
3709 /* JDirty cleared sometime during transaction. don't log this one */
3710 reiserfs_warning(p_s_sb
, "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!") ;
3714 free_cnode(p_s_sb
, cn
) ;
3719 /* we are done with both the c_bh and d_bh, but
3720 ** c_bh must be written after all other commit blocks,
3721 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
3724 journal
->j_current_jl
= alloc_journal_list(p_s_sb
);
3726 /* now it is safe to insert this transaction on the main list */
3727 list_add_tail(&jl
->j_list
, &journal
->j_journal_list
);
3728 list_add_tail(&jl
->j_working_list
, &journal
->j_working_list
);
3729 journal
->j_num_work_lists
++;
3731 /* reset journal values for the next transaction */
3732 old_start
= journal
->j_start
;
3733 journal
->j_start
= (journal
->j_start
+ journal
->j_len
+ 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb
);
3734 atomic_set(&(journal
->j_wcount
), 0) ;
3735 journal
->j_bcount
= 0 ;
3736 journal
->j_last
= NULL
;
3737 journal
->j_first
= NULL
;
3738 journal
->j_len
= 0 ;
3739 journal
->j_trans_start_time
= 0 ;
3740 journal
->j_trans_id
++ ;
3741 journal
->j_current_jl
->j_trans_id
= journal
->j_trans_id
;
3742 journal
->j_must_wait
= 0 ;
3743 journal
->j_len_alloc
= 0 ;
3744 journal
->j_next_full_flush
= 0 ;
3745 journal
->j_next_async_flush
= 0 ;
3746 init_journal_hash(p_s_sb
) ;
3748 // make sure reiserfs_add_jh sees the new current_jl before we
3749 // write out the tails
3752 /* tail conversion targets have to hit the disk before we end the
3753 * transaction. Otherwise a later transaction might repack the tail
3754 * before this transaction commits, leaving the data block unflushed and
3755 * clean, if we crash before the later transaction commits, the data block
3758 if (!list_empty(&jl
->j_tail_bh_list
)) {
3760 write_ordered_buffers(&journal
->j_dirty_buffers_lock
,
3761 journal
, jl
, &jl
->j_tail_bh_list
);
3764 if (!list_empty(&jl
->j_tail_bh_list
))
3766 up(&jl
->j_commit_lock
);
3768 /* honor the flush wishes from the caller, simple commits can
3769 ** be done outside the journal lock, they are done below
3771 ** if we don't flush the commit list right now, we put it into
3772 ** the work queue so the people waiting on the async progress work
3773 ** queue don't wait for this proc to flush journal lists and such.
3776 flush_commit_list(p_s_sb
, jl
, 1) ;
3777 flush_journal_list(p_s_sb
, jl
, 1) ;
3778 } else if (!(jl
->j_state
& LIST_COMMIT_PENDING
))
3779 queue_delayed_work(commit_wq
, &journal
->j_work
, HZ
/10);
3782 /* if the next transaction has any chance of wrapping, flush
3783 ** transactions that might get overwritten. If any journal lists are very
3784 ** old flush them as well.
3787 list_for_each_safe(entry
, safe
, &journal
->j_journal_list
) {
3788 temp_jl
= JOURNAL_LIST_ENTRY(entry
);
3789 if (journal
->j_start
<= temp_jl
->j_start
) {
3790 if ((journal
->j_start
+ journal
->j_trans_max
+ 1) >=
3793 flush_used_journal_lists(p_s_sb
, temp_jl
);
3795 } else if ((journal
->j_start
+
3796 journal
->j_trans_max
+ 1) <
3797 SB_ONDISK_JOURNAL_SIZE(p_s_sb
))
3799 /* if we don't cross into the next transaction and we don't
3800 * wrap, there is no way we can overlap any later transactions
3805 } else if ((journal
->j_start
+
3806 journal
->j_trans_max
+ 1) >
3807 SB_ONDISK_JOURNAL_SIZE(p_s_sb
))
3809 if (((journal
->j_start
+ journal
->j_trans_max
+ 1) %
3810 SB_ONDISK_JOURNAL_SIZE(p_s_sb
)) >= temp_jl
->j_start
)
3812 flush_used_journal_lists(p_s_sb
, temp_jl
);
3815 /* we don't overlap anything from out start to the end of the
3816 * log, and our wrapped portion doesn't overlap anything at
3817 * the start of the log. We can break
3823 flush_old_journal_lists(p_s_sb
);
3825 journal
->j_current_jl
->j_list_bitmap
= get_list_bitmap(p_s_sb
, journal
->j_current_jl
) ;
3827 if (!(journal
->j_current_jl
->j_list_bitmap
)) {
3828 reiserfs_panic(p_s_sb
, "journal-1996: do_journal_end, could not get a list bitmap\n") ;
3831 atomic_set(&(journal
->j_jlock
), 0) ;
3832 unlock_journal(p_s_sb
) ;
3833 /* wake up any body waiting to join. */
3834 clear_bit(J_WRITERS_QUEUED
, &journal
->j_state
);
3835 wake_up(&(journal
->j_join_wait
)) ;
3837 if (!flush
&& wait_on_commit
&&
3838 journal_list_still_alive(p_s_sb
, commit_trans_id
)) {
3839 flush_commit_list(p_s_sb
, jl
, 1) ;
3842 reiserfs_check_lock_depth(p_s_sb
, "journal end2");
3844 memset (th
, 0, sizeof (*th
));
3845 /* Re-set th->t_super, so we can properly keep track of how many
3846 * persistent transactions there are. We need to do this so if this
3847 * call is part of a failed restart_transaction, we can free it later */
3848 th
->t_super
= p_s_sb
;
3850 return journal
->j_errno
;
3854 __reiserfs_journal_abort_hard (struct super_block
*sb
)
3856 struct reiserfs_journal
*journal
= SB_JOURNAL (sb
);
3857 if (test_bit (J_ABORTED
, &journal
->j_state
))
3860 printk (KERN_CRIT
"REISERFS: Aborting journal for filesystem on %s\n",
3861 reiserfs_bdevname (sb
));
3863 sb
->s_flags
|= MS_RDONLY
;
3864 set_bit (J_ABORTED
, &journal
->j_state
);
3866 #ifdef CONFIG_REISERFS_CHECK
3872 __reiserfs_journal_abort_soft (struct super_block
*sb
, int errno
)
3874 struct reiserfs_journal
*journal
= SB_JOURNAL (sb
);
3875 if (test_bit (J_ABORTED
, &journal
->j_state
))
3878 if (!journal
->j_errno
)
3879 journal
->j_errno
= errno
;
3881 __reiserfs_journal_abort_hard (sb
);
3885 reiserfs_journal_abort (struct super_block
*sb
, int errno
)
3887 return __reiserfs_journal_abort_soft (sb
, errno
);