2 ** Write ahead logging implementation copyright Chris Mason 2000
4 ** The background commits make this code very interrelated, and
5 ** overly complex. I need to rethink things a bit....The major players:
7 ** journal_begin -- call with the number of blocks you expect to log.
8 ** If the current transaction is too
9 ** old, it will block until the current transaction is
10 ** finished, and then start a new one.
11 ** Usually, your transaction will get joined in with
12 ** previous ones for speed.
14 ** journal_join -- same as journal_begin, but won't block on the current
15 ** transaction regardless of age. Don't ever call
16 ** this. Ever. There are only two places it should be
17 ** called from, and they are both inside this file.
19 ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20 ** that might make them get sent to disk
21 ** and then marks them BH_JDirty. Puts the buffer head
22 ** into the current transaction hash.
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 ** otherwise, it could do an async/synchronous commit, or
26 ** a full flush of all log and real blocks in the
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 ** commit blocks are sent to disk. Forces commit blocks
31 ** to disk for all backgrounded commits that have been
33 ** -- Note, if you call this as an immediate flush from
34 ** from within kupdate, it will ignore the immediate flag
37 #include <linux/time.h>
38 #include <linux/semaphore.h>
39 #include <linux/vmalloc.h>
41 #include <linux/kernel.h>
42 #include <linux/errno.h>
43 #include <linux/fcntl.h>
44 #include <linux/stat.h>
45 #include <linux/string.h>
46 #include <linux/buffer_head.h>
47 #include <linux/workqueue.h>
48 #include <linux/writeback.h>
49 #include <linux/blkdev.h>
50 #include <linux/backing-dev.h>
51 #include <linux/uaccess.h>
52 #include <linux/slab.h>
55 /* gets a struct reiserfs_journal_list * from a list head */
56 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
58 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
61 /* the number of mounted filesystems. This is used to decide when to
62 ** start and kill the commit workqueue
64 static int reiserfs_mounted_fs_count
;
66 static struct workqueue_struct
*commit_wq
;
68 #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
70 #define BUFNR 64 /*read ahead */
72 /* cnode stat bits. Move these into reiserfs_fs.h */
74 #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
75 #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
77 #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
78 #define BLOCK_DIRTIED 5
80 /* journal list state bits */
81 #define LIST_TOUCHED 1
83 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
85 /* flags for do_journal_end */
86 #define FLUSH_ALL 1 /* flush commit and real blocks */
87 #define COMMIT_NOW 2 /* end and commit this transaction */
88 #define WAIT 4 /* wait for the log blocks to hit the disk */
90 static int do_journal_end(struct reiserfs_transaction_handle
*,
91 struct super_block
*, unsigned long nblocks
,
93 static int flush_journal_list(struct super_block
*s
,
94 struct reiserfs_journal_list
*jl
, int flushall
);
95 static int flush_commit_list(struct super_block
*s
,
96 struct reiserfs_journal_list
*jl
, int flushall
);
97 static int can_dirty(struct reiserfs_journal_cnode
*cn
);
98 static int journal_join(struct reiserfs_transaction_handle
*th
,
99 struct super_block
*sb
, unsigned long nblocks
);
100 static void release_journal_dev(struct super_block
*super
,
101 struct reiserfs_journal
*journal
);
102 static int dirty_one_transaction(struct super_block
*s
,
103 struct reiserfs_journal_list
*jl
);
104 static void flush_async_commits(struct work_struct
*work
);
105 static void queue_log_writer(struct super_block
*s
);
107 /* values for join in do_journal_begin_r */
109 JBEGIN_REG
= 0, /* regular journal begin */
110 JBEGIN_JOIN
= 1, /* join the running transaction if at all possible */
111 JBEGIN_ABORT
= 2, /* called from cleanup code, ignores aborted flag */
114 static int do_journal_begin_r(struct reiserfs_transaction_handle
*th
,
115 struct super_block
*sb
,
116 unsigned long nblocks
, int join
);
118 static void init_journal_hash(struct super_block
*sb
)
120 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
121 memset(journal
->j_hash_table
, 0,
122 JOURNAL_HASH_SIZE
* sizeof(struct reiserfs_journal_cnode
*));
126 ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
127 ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
130 static int reiserfs_clean_and_file_buffer(struct buffer_head
*bh
)
133 clear_buffer_dirty(bh
);
134 clear_buffer_journal_test(bh
);
139 static struct reiserfs_bitmap_node
*allocate_bitmap_node(struct super_block
142 struct reiserfs_bitmap_node
*bn
;
145 bn
= kmalloc(sizeof(struct reiserfs_bitmap_node
), GFP_NOFS
);
149 bn
->data
= kzalloc(sb
->s_blocksize
, GFP_NOFS
);
155 INIT_LIST_HEAD(&bn
->list
);
159 static struct reiserfs_bitmap_node
*get_bitmap_node(struct super_block
*sb
)
161 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
162 struct reiserfs_bitmap_node
*bn
= NULL
;
163 struct list_head
*entry
= journal
->j_bitmap_nodes
.next
;
165 journal
->j_used_bitmap_nodes
++;
168 if (entry
!= &journal
->j_bitmap_nodes
) {
169 bn
= list_entry(entry
, struct reiserfs_bitmap_node
, list
);
171 memset(bn
->data
, 0, sb
->s_blocksize
);
172 journal
->j_free_bitmap_nodes
--;
175 bn
= allocate_bitmap_node(sb
);
182 static inline void free_bitmap_node(struct super_block
*sb
,
183 struct reiserfs_bitmap_node
*bn
)
185 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
186 journal
->j_used_bitmap_nodes
--;
187 if (journal
->j_free_bitmap_nodes
> REISERFS_MAX_BITMAP_NODES
) {
191 list_add(&bn
->list
, &journal
->j_bitmap_nodes
);
192 journal
->j_free_bitmap_nodes
++;
196 static void allocate_bitmap_nodes(struct super_block
*sb
)
199 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
200 struct reiserfs_bitmap_node
*bn
= NULL
;
201 for (i
= 0; i
< REISERFS_MIN_BITMAP_NODES
; i
++) {
202 bn
= allocate_bitmap_node(sb
);
204 list_add(&bn
->list
, &journal
->j_bitmap_nodes
);
205 journal
->j_free_bitmap_nodes
++;
207 break; /* this is ok, we'll try again when more are needed */
212 static int set_bit_in_list_bitmap(struct super_block
*sb
,
214 struct reiserfs_list_bitmap
*jb
)
216 unsigned int bmap_nr
= block
/ (sb
->s_blocksize
<< 3);
217 unsigned int bit_nr
= block
% (sb
->s_blocksize
<< 3);
219 if (!jb
->bitmaps
[bmap_nr
]) {
220 jb
->bitmaps
[bmap_nr
] = get_bitmap_node(sb
);
222 set_bit(bit_nr
, (unsigned long *)jb
->bitmaps
[bmap_nr
]->data
);
226 static void cleanup_bitmap_list(struct super_block
*sb
,
227 struct reiserfs_list_bitmap
*jb
)
230 if (jb
->bitmaps
== NULL
)
233 for (i
= 0; i
< reiserfs_bmap_count(sb
); i
++) {
234 if (jb
->bitmaps
[i
]) {
235 free_bitmap_node(sb
, jb
->bitmaps
[i
]);
236 jb
->bitmaps
[i
] = NULL
;
242 ** only call this on FS unmount.
244 static int free_list_bitmaps(struct super_block
*sb
,
245 struct reiserfs_list_bitmap
*jb_array
)
248 struct reiserfs_list_bitmap
*jb
;
249 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
251 jb
->journal_list
= NULL
;
252 cleanup_bitmap_list(sb
, jb
);
259 static int free_bitmap_nodes(struct super_block
*sb
)
261 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
262 struct list_head
*next
= journal
->j_bitmap_nodes
.next
;
263 struct reiserfs_bitmap_node
*bn
;
265 while (next
!= &journal
->j_bitmap_nodes
) {
266 bn
= list_entry(next
, struct reiserfs_bitmap_node
, list
);
270 next
= journal
->j_bitmap_nodes
.next
;
271 journal
->j_free_bitmap_nodes
--;
278 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
279 ** jb_array is the array to be filled in.
281 int reiserfs_allocate_list_bitmaps(struct super_block
*sb
,
282 struct reiserfs_list_bitmap
*jb_array
,
283 unsigned int bmap_nr
)
287 struct reiserfs_list_bitmap
*jb
;
288 int mem
= bmap_nr
* sizeof(struct reiserfs_bitmap_node
*);
290 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
292 jb
->journal_list
= NULL
;
293 jb
->bitmaps
= vzalloc(mem
);
295 reiserfs_warning(sb
, "clm-2000", "unable to "
296 "allocate bitmaps for journal lists");
302 free_list_bitmaps(sb
, jb_array
);
309 ** find an available list bitmap. If you can't find one, flush a commit list
312 static struct reiserfs_list_bitmap
*get_list_bitmap(struct super_block
*sb
,
313 struct reiserfs_journal_list
317 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
318 struct reiserfs_list_bitmap
*jb
= NULL
;
320 for (j
= 0; j
< (JOURNAL_NUM_BITMAPS
* 3); j
++) {
321 i
= journal
->j_list_bitmap_index
;
322 journal
->j_list_bitmap_index
= (i
+ 1) % JOURNAL_NUM_BITMAPS
;
323 jb
= journal
->j_list_bitmap
+ i
;
324 if (journal
->j_list_bitmap
[i
].journal_list
) {
325 flush_commit_list(sb
,
326 journal
->j_list_bitmap
[i
].
328 if (!journal
->j_list_bitmap
[i
].journal_list
) {
335 if (jb
->journal_list
) { /* double check to make sure if flushed correctly */
338 jb
->journal_list
= jl
;
343 ** allocates a new chunk of X nodes, and links them all together as a list.
344 ** Uses the cnode->next and cnode->prev pointers
345 ** returns NULL on failure
347 static struct reiserfs_journal_cnode
*allocate_cnodes(int num_cnodes
)
349 struct reiserfs_journal_cnode
*head
;
351 if (num_cnodes
<= 0) {
354 head
= vzalloc(num_cnodes
* sizeof(struct reiserfs_journal_cnode
));
359 head
[0].next
= head
+ 1;
360 for (i
= 1; i
< num_cnodes
; i
++) {
361 head
[i
].prev
= head
+ (i
- 1);
362 head
[i
].next
= head
+ (i
+ 1); /* if last one, overwrite it after the if */
364 head
[num_cnodes
- 1].next
= NULL
;
369 ** pulls a cnode off the free list, or returns NULL on failure
371 static struct reiserfs_journal_cnode
*get_cnode(struct super_block
*sb
)
373 struct reiserfs_journal_cnode
*cn
;
374 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
376 reiserfs_check_lock_depth(sb
, "get_cnode");
378 if (journal
->j_cnode_free
<= 0) {
381 journal
->j_cnode_used
++;
382 journal
->j_cnode_free
--;
383 cn
= journal
->j_cnode_free_list
;
388 cn
->next
->prev
= NULL
;
390 journal
->j_cnode_free_list
= cn
->next
;
391 memset(cn
, 0, sizeof(struct reiserfs_journal_cnode
));
396 ** returns a cnode to the free list
398 static void free_cnode(struct super_block
*sb
,
399 struct reiserfs_journal_cnode
*cn
)
401 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
403 reiserfs_check_lock_depth(sb
, "free_cnode");
405 journal
->j_cnode_used
--;
406 journal
->j_cnode_free
++;
407 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
408 cn
->next
= journal
->j_cnode_free_list
;
409 if (journal
->j_cnode_free_list
) {
410 journal
->j_cnode_free_list
->prev
= cn
;
412 cn
->prev
= NULL
; /* not needed with the memset, but I might kill the memset, and forget to do this */
413 journal
->j_cnode_free_list
= cn
;
416 static void clear_prepared_bits(struct buffer_head
*bh
)
418 clear_buffer_journal_prepared(bh
);
419 clear_buffer_journal_restore_dirty(bh
);
422 /* return a cnode with same dev, block number and size in table, or null if not found */
423 static inline struct reiserfs_journal_cnode
*get_journal_hash_dev(struct
427 reiserfs_journal_cnode
431 struct reiserfs_journal_cnode
*cn
;
432 cn
= journal_hash(table
, sb
, bl
);
434 if (cn
->blocknr
== bl
&& cn
->sb
== sb
)
438 return (struct reiserfs_journal_cnode
*)0;
442 ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
443 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
444 ** being overwritten by a replay after crashing.
446 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
447 ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
448 ** sure you never write the block without logging it.
450 ** next_zero_bit is a suggestion about the next block to try for find_forward.
451 ** when bl is rejected because it is set in a journal list bitmap, we search
452 ** for the next zero bit in the bitmap that rejected bl. Then, we return that
453 ** through next_zero_bit for find_forward to try.
455 ** Just because we return something in next_zero_bit does not mean we won't
456 ** reject it on the next call to reiserfs_in_journal
459 int reiserfs_in_journal(struct super_block
*sb
,
460 unsigned int bmap_nr
, int bit_nr
, int search_all
,
461 b_blocknr_t
* next_zero_bit
)
463 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
464 struct reiserfs_journal_cnode
*cn
;
465 struct reiserfs_list_bitmap
*jb
;
469 *next_zero_bit
= 0; /* always start this at zero. */
471 PROC_INFO_INC(sb
, journal
.in_journal
);
472 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
473 ** if we crash before the transaction that freed it commits, this transaction won't
474 ** have committed either, and the block will never be written
477 for (i
= 0; i
< JOURNAL_NUM_BITMAPS
; i
++) {
478 PROC_INFO_INC(sb
, journal
.in_journal_bitmap
);
479 jb
= journal
->j_list_bitmap
+ i
;
480 if (jb
->journal_list
&& jb
->bitmaps
[bmap_nr
] &&
482 (unsigned long *)jb
->bitmaps
[bmap_nr
]->
485 find_next_zero_bit((unsigned long *)
486 (jb
->bitmaps
[bmap_nr
]->
488 sb
->s_blocksize
<< 3,
495 bl
= bmap_nr
* (sb
->s_blocksize
<< 3) + bit_nr
;
496 /* is it in any old transactions? */
499 get_journal_hash_dev(sb
, journal
->j_list_hash_table
, bl
))) {
503 /* is it in the current transaction. This should never happen */
504 if ((cn
= get_journal_hash_dev(sb
, journal
->j_hash_table
, bl
))) {
509 PROC_INFO_INC(sb
, journal
.in_journal_reusable
);
514 /* insert cn into table
516 static inline void insert_journal_hash(struct reiserfs_journal_cnode
**table
,
517 struct reiserfs_journal_cnode
*cn
)
519 struct reiserfs_journal_cnode
*cn_orig
;
521 cn_orig
= journal_hash(table
, cn
->sb
, cn
->blocknr
);
527 journal_hash(table
, cn
->sb
, cn
->blocknr
) = cn
;
530 /* lock the current transaction */
531 static inline void lock_journal(struct super_block
*sb
)
533 PROC_INFO_INC(sb
, journal
.lock_journal
);
535 reiserfs_mutex_lock_safe(&SB_JOURNAL(sb
)->j_mutex
, sb
);
538 /* unlock the current transaction */
539 static inline void unlock_journal(struct super_block
*sb
)
541 mutex_unlock(&SB_JOURNAL(sb
)->j_mutex
);
544 static inline void get_journal_list(struct reiserfs_journal_list
*jl
)
549 static inline void put_journal_list(struct super_block
*s
,
550 struct reiserfs_journal_list
*jl
)
552 if (jl
->j_refcount
< 1) {
553 reiserfs_panic(s
, "journal-2", "trans id %u, refcount at %d",
554 jl
->j_trans_id
, jl
->j_refcount
);
556 if (--jl
->j_refcount
== 0)
561 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
562 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
565 static void cleanup_freed_for_journal_list(struct super_block
*sb
,
566 struct reiserfs_journal_list
*jl
)
569 struct reiserfs_list_bitmap
*jb
= jl
->j_list_bitmap
;
571 cleanup_bitmap_list(sb
, jb
);
573 jl
->j_list_bitmap
->journal_list
= NULL
;
574 jl
->j_list_bitmap
= NULL
;
577 static int journal_list_still_alive(struct super_block
*s
,
578 unsigned int trans_id
)
580 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
581 struct list_head
*entry
= &journal
->j_journal_list
;
582 struct reiserfs_journal_list
*jl
;
584 if (!list_empty(entry
)) {
585 jl
= JOURNAL_LIST_ENTRY(entry
->next
);
586 if (jl
->j_trans_id
<= trans_id
) {
594 * If page->mapping was null, we failed to truncate this page for
595 * some reason. Most likely because it was truncated after being
596 * logged via data=journal.
598 * This does a check to see if the buffer belongs to one of these
599 * lost pages before doing the final put_bh. If page->mapping was
600 * null, it tries to free buffers on the page, which should make the
601 * final page_cache_release drop the page from the lru.
603 static void release_buffer_page(struct buffer_head
*bh
)
605 struct page
*page
= bh
->b_page
;
606 if (!page
->mapping
&& trylock_page(page
)) {
607 page_cache_get(page
);
610 try_to_free_buffers(page
);
612 page_cache_release(page
);
618 static void reiserfs_end_buffer_io_sync(struct buffer_head
*bh
, int uptodate
)
620 char b
[BDEVNAME_SIZE
];
622 if (buffer_journaled(bh
)) {
623 reiserfs_warning(NULL
, "clm-2084",
624 "pinned buffer %lu:%s sent to disk",
625 bh
->b_blocknr
, bdevname(bh
->b_bdev
, b
));
628 set_buffer_uptodate(bh
);
630 clear_buffer_uptodate(bh
);
633 release_buffer_page(bh
);
636 static void reiserfs_end_ordered_io(struct buffer_head
*bh
, int uptodate
)
639 set_buffer_uptodate(bh
);
641 clear_buffer_uptodate(bh
);
646 static void submit_logged_buffer(struct buffer_head
*bh
)
649 bh
->b_end_io
= reiserfs_end_buffer_io_sync
;
650 clear_buffer_journal_new(bh
);
651 clear_buffer_dirty(bh
);
652 if (!test_clear_buffer_journal_test(bh
))
654 if (!buffer_uptodate(bh
))
656 submit_bh(WRITE
, bh
);
659 static void submit_ordered_buffer(struct buffer_head
*bh
)
662 bh
->b_end_io
= reiserfs_end_ordered_io
;
663 clear_buffer_dirty(bh
);
664 if (!buffer_uptodate(bh
))
666 submit_bh(WRITE
, bh
);
669 #define CHUNK_SIZE 32
670 struct buffer_chunk
{
671 struct buffer_head
*bh
[CHUNK_SIZE
];
675 static void write_chunk(struct buffer_chunk
*chunk
)
678 for (i
= 0; i
< chunk
->nr
; i
++) {
679 submit_logged_buffer(chunk
->bh
[i
]);
684 static void write_ordered_chunk(struct buffer_chunk
*chunk
)
687 for (i
= 0; i
< chunk
->nr
; i
++) {
688 submit_ordered_buffer(chunk
->bh
[i
]);
693 static int add_to_chunk(struct buffer_chunk
*chunk
, struct buffer_head
*bh
,
694 spinlock_t
* lock
, void (fn
) (struct buffer_chunk
*))
697 BUG_ON(chunk
->nr
>= CHUNK_SIZE
);
698 chunk
->bh
[chunk
->nr
++] = bh
;
699 if (chunk
->nr
>= CHUNK_SIZE
) {
710 static atomic_t nr_reiserfs_jh
= ATOMIC_INIT(0);
711 static struct reiserfs_jh
*alloc_jh(void)
713 struct reiserfs_jh
*jh
;
715 jh
= kmalloc(sizeof(*jh
), GFP_NOFS
);
717 atomic_inc(&nr_reiserfs_jh
);
725 * we want to free the jh when the buffer has been written
728 void reiserfs_free_jh(struct buffer_head
*bh
)
730 struct reiserfs_jh
*jh
;
734 bh
->b_private
= NULL
;
736 list_del_init(&jh
->list
);
738 if (atomic_read(&nr_reiserfs_jh
) <= 0)
740 atomic_dec(&nr_reiserfs_jh
);
745 static inline int __add_jh(struct reiserfs_journal
*j
, struct buffer_head
*bh
,
748 struct reiserfs_jh
*jh
;
751 spin_lock(&j
->j_dirty_buffers_lock
);
752 if (!bh
->b_private
) {
753 spin_unlock(&j
->j_dirty_buffers_lock
);
757 list_del_init(&jh
->list
);
762 spin_lock(&j
->j_dirty_buffers_lock
);
763 /* buffer must be locked for __add_jh, should be able to have
764 * two adds at the same time
766 BUG_ON(bh
->b_private
);
770 jh
->jl
= j
->j_current_jl
;
772 list_add_tail(&jh
->list
, &jh
->jl
->j_tail_bh_list
);
774 list_add_tail(&jh
->list
, &jh
->jl
->j_bh_list
);
776 spin_unlock(&j
->j_dirty_buffers_lock
);
780 int reiserfs_add_tail_list(struct inode
*inode
, struct buffer_head
*bh
)
782 return __add_jh(SB_JOURNAL(inode
->i_sb
), bh
, 1);
784 int reiserfs_add_ordered_list(struct inode
*inode
, struct buffer_head
*bh
)
786 return __add_jh(SB_JOURNAL(inode
->i_sb
), bh
, 0);
789 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
790 static int write_ordered_buffers(spinlock_t
* lock
,
791 struct reiserfs_journal
*j
,
792 struct reiserfs_journal_list
*jl
,
793 struct list_head
*list
)
795 struct buffer_head
*bh
;
796 struct reiserfs_jh
*jh
;
797 int ret
= j
->j_errno
;
798 struct buffer_chunk chunk
;
799 struct list_head tmp
;
800 INIT_LIST_HEAD(&tmp
);
804 while (!list_empty(list
)) {
805 jh
= JH_ENTRY(list
->next
);
808 if (!trylock_buffer(bh
)) {
809 if (!buffer_dirty(bh
)) {
810 list_move(&jh
->list
, &tmp
);
815 write_ordered_chunk(&chunk
);
821 /* in theory, dirty non-uptodate buffers should never get here,
822 * but the upper layer io error paths still have a few quirks.
823 * Handle them here as gracefully as we can
825 if (!buffer_uptodate(bh
) && buffer_dirty(bh
)) {
826 clear_buffer_dirty(bh
);
829 if (buffer_dirty(bh
)) {
830 list_move(&jh
->list
, &tmp
);
831 add_to_chunk(&chunk
, bh
, lock
, write_ordered_chunk
);
833 reiserfs_free_jh(bh
);
838 cond_resched_lock(lock
);
842 write_ordered_chunk(&chunk
);
845 while (!list_empty(&tmp
)) {
846 jh
= JH_ENTRY(tmp
.prev
);
849 reiserfs_free_jh(bh
);
851 if (buffer_locked(bh
)) {
856 if (!buffer_uptodate(bh
)) {
859 /* ugly interaction with invalidatepage here.
860 * reiserfs_invalidate_page will pin any buffer that has a valid
861 * journal head from an older transaction. If someone else sets
862 * our buffer dirty after we write it in the first loop, and
863 * then someone truncates the page away, nobody will ever write
864 * the buffer. We're safe if we write the page one last time
865 * after freeing the journal header.
867 if (buffer_dirty(bh
) && unlikely(bh
->b_page
->mapping
== NULL
)) {
869 ll_rw_block(WRITE
, 1, &bh
);
873 cond_resched_lock(lock
);
879 static int flush_older_commits(struct super_block
*s
,
880 struct reiserfs_journal_list
*jl
)
882 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
883 struct reiserfs_journal_list
*other_jl
;
884 struct reiserfs_journal_list
*first_jl
;
885 struct list_head
*entry
;
886 unsigned int trans_id
= jl
->j_trans_id
;
887 unsigned int other_trans_id
;
888 unsigned int first_trans_id
;
892 * first we walk backwards to find the oldest uncommitted transation
895 entry
= jl
->j_list
.prev
;
897 other_jl
= JOURNAL_LIST_ENTRY(entry
);
898 if (entry
== &journal
->j_journal_list
||
899 atomic_read(&other_jl
->j_older_commits_done
))
903 entry
= other_jl
->j_list
.prev
;
906 /* if we didn't find any older uncommitted transactions, return now */
907 if (first_jl
== jl
) {
911 first_trans_id
= first_jl
->j_trans_id
;
913 entry
= &first_jl
->j_list
;
915 other_jl
= JOURNAL_LIST_ENTRY(entry
);
916 other_trans_id
= other_jl
->j_trans_id
;
918 if (other_trans_id
< trans_id
) {
919 if (atomic_read(&other_jl
->j_commit_left
) != 0) {
920 flush_commit_list(s
, other_jl
, 0);
922 /* list we were called with is gone, return */
923 if (!journal_list_still_alive(s
, trans_id
))
926 /* the one we just flushed is gone, this means all
927 * older lists are also gone, so first_jl is no longer
928 * valid either. Go back to the beginning.
930 if (!journal_list_still_alive
931 (s
, other_trans_id
)) {
936 if (entry
== &journal
->j_journal_list
)
945 static int reiserfs_async_progress_wait(struct super_block
*s
)
947 struct reiserfs_journal
*j
= SB_JOURNAL(s
);
949 if (atomic_read(&j
->j_async_throttle
)) {
952 depth
= reiserfs_write_unlock_nested(s
);
953 congestion_wait(BLK_RW_ASYNC
, HZ
/ 10);
954 reiserfs_write_lock_nested(s
, depth
);
961 ** if this journal list still has commit blocks unflushed, send them to disk.
963 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
964 ** Before the commit block can by written, every other log block must be safely on disk
967 static int flush_commit_list(struct super_block
*s
,
968 struct reiserfs_journal_list
*jl
, int flushall
)
972 struct buffer_head
*tbh
= NULL
;
973 unsigned int trans_id
= jl
->j_trans_id
;
974 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
979 reiserfs_check_lock_depth(s
, "flush_commit_list");
981 if (atomic_read(&jl
->j_older_commits_done
)) {
985 /* before we can put our commit blocks on disk, we have to make sure everyone older than
988 BUG_ON(jl
->j_len
<= 0);
989 BUG_ON(trans_id
== journal
->j_trans_id
);
991 get_journal_list(jl
);
993 if (flush_older_commits(s
, jl
) == 1) {
994 /* list disappeared during flush_older_commits. return */
999 /* make sure nobody is trying to flush this one at the same time */
1000 reiserfs_mutex_lock_safe(&jl
->j_commit_mutex
, s
);
1002 if (!journal_list_still_alive(s
, trans_id
)) {
1003 mutex_unlock(&jl
->j_commit_mutex
);
1006 BUG_ON(jl
->j_trans_id
== 0);
1008 /* this commit is done, exit */
1009 if (atomic_read(&(jl
->j_commit_left
)) <= 0) {
1011 atomic_set(&(jl
->j_older_commits_done
), 1);
1013 mutex_unlock(&jl
->j_commit_mutex
);
1017 if (!list_empty(&jl
->j_bh_list
)) {
1021 * We might sleep in numerous places inside
1022 * write_ordered_buffers. Relax the write lock.
1024 depth
= reiserfs_write_unlock_nested(s
);
1025 ret
= write_ordered_buffers(&journal
->j_dirty_buffers_lock
,
1026 journal
, jl
, &jl
->j_bh_list
);
1027 if (ret
< 0 && retval
== 0)
1029 reiserfs_write_lock_nested(s
, depth
);
1031 BUG_ON(!list_empty(&jl
->j_bh_list
));
1033 * for the description block and all the log blocks, submit any buffers
1034 * that haven't already reached the disk. Try to write at least 256
1035 * log blocks. later on, we will only wait on blocks that correspond
1036 * to this transaction, but while we're unplugging we might as well
1037 * get a chunk of data on there.
1039 atomic_inc(&journal
->j_async_throttle
);
1040 write_len
= jl
->j_len
+ 1;
1041 if (write_len
< 256)
1043 for (i
= 0 ; i
< write_len
; i
++) {
1044 bn
= SB_ONDISK_JOURNAL_1st_BLOCK(s
) + (jl
->j_start
+ i
) %
1045 SB_ONDISK_JOURNAL_SIZE(s
);
1046 tbh
= journal_find_get_block(s
, bn
);
1048 if (buffer_dirty(tbh
)) {
1049 depth
= reiserfs_write_unlock_nested(s
);
1050 ll_rw_block(WRITE
, 1, &tbh
);
1051 reiserfs_write_lock_nested(s
, depth
);
1056 atomic_dec(&journal
->j_async_throttle
);
1058 for (i
= 0; i
< (jl
->j_len
+ 1); i
++) {
1059 bn
= SB_ONDISK_JOURNAL_1st_BLOCK(s
) +
1060 (jl
->j_start
+ i
) % SB_ONDISK_JOURNAL_SIZE(s
);
1061 tbh
= journal_find_get_block(s
, bn
);
1063 depth
= reiserfs_write_unlock_nested(s
);
1064 __wait_on_buffer(tbh
);
1065 reiserfs_write_lock_nested(s
, depth
);
1066 // since we're using ll_rw_blk above, it might have skipped over
1067 // a locked buffer. Double check here
1069 /* redundant, sync_dirty_buffer() checks */
1070 if (buffer_dirty(tbh
)) {
1071 depth
= reiserfs_write_unlock_nested(s
);
1072 sync_dirty_buffer(tbh
);
1073 reiserfs_write_lock_nested(s
, depth
);
1075 if (unlikely(!buffer_uptodate(tbh
))) {
1076 #ifdef CONFIG_REISERFS_CHECK
1077 reiserfs_warning(s
, "journal-601",
1078 "buffer write failed");
1082 put_bh(tbh
); /* once for journal_find_get_block */
1083 put_bh(tbh
); /* once due to original getblk in do_journal_end */
1084 atomic_dec(&(jl
->j_commit_left
));
1087 BUG_ON(atomic_read(&(jl
->j_commit_left
)) != 1);
1089 /* If there was a write error in the journal - we can't commit
1090 * this transaction - it will be invalid and, if successful,
1091 * will just end up propagating the write error out to
1092 * the file system. */
1093 if (likely(!retval
&& !reiserfs_is_journal_aborted (journal
))) {
1094 if (buffer_dirty(jl
->j_commit_bh
))
1096 mark_buffer_dirty(jl
->j_commit_bh
) ;
1097 depth
= reiserfs_write_unlock_nested(s
);
1098 if (reiserfs_barrier_flush(s
))
1099 __sync_dirty_buffer(jl
->j_commit_bh
, WRITE_FLUSH_FUA
);
1101 sync_dirty_buffer(jl
->j_commit_bh
);
1102 reiserfs_write_lock_nested(s
, depth
);
1105 /* If there was a write error in the journal - we can't commit this
1106 * transaction - it will be invalid and, if successful, will just end
1107 * up propagating the write error out to the filesystem. */
1108 if (unlikely(!buffer_uptodate(jl
->j_commit_bh
))) {
1109 #ifdef CONFIG_REISERFS_CHECK
1110 reiserfs_warning(s
, "journal-615", "buffer write failed");
1114 bforget(jl
->j_commit_bh
);
1115 if (journal
->j_last_commit_id
!= 0 &&
1116 (jl
->j_trans_id
- journal
->j_last_commit_id
) != 1) {
1117 reiserfs_warning(s
, "clm-2200", "last commit %lu, current %lu",
1118 journal
->j_last_commit_id
, jl
->j_trans_id
);
1120 journal
->j_last_commit_id
= jl
->j_trans_id
;
1122 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1123 cleanup_freed_for_journal_list(s
, jl
);
1125 retval
= retval
? retval
: journal
->j_errno
;
1127 /* mark the metadata dirty */
1129 dirty_one_transaction(s
, jl
);
1130 atomic_dec(&(jl
->j_commit_left
));
1133 atomic_set(&(jl
->j_older_commits_done
), 1);
1135 mutex_unlock(&jl
->j_commit_mutex
);
1137 put_journal_list(s
, jl
);
1140 reiserfs_abort(s
, retval
, "Journal write error in %s",
1146 ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1147 ** returns NULL if it can't find anything
1149 static struct reiserfs_journal_list
*find_newer_jl_for_cn(struct
1150 reiserfs_journal_cnode
1153 struct super_block
*sb
= cn
->sb
;
1154 b_blocknr_t blocknr
= cn
->blocknr
;
1158 if (cn
->sb
== sb
&& cn
->blocknr
== blocknr
&& cn
->jlist
) {
1166 static void remove_journal_hash(struct super_block
*,
1167 struct reiserfs_journal_cnode
**,
1168 struct reiserfs_journal_list
*, unsigned long,
1172 ** once all the real blocks have been flushed, it is safe to remove them from the
1173 ** journal list for this transaction. Aside from freeing the cnode, this also allows the
1174 ** block to be reallocated for data blocks if it had been deleted.
1176 static void remove_all_from_journal_list(struct super_block
*sb
,
1177 struct reiserfs_journal_list
*jl
,
1180 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1181 struct reiserfs_journal_cnode
*cn
, *last
;
1182 cn
= jl
->j_realblock
;
1184 /* which is better, to lock once around the whole loop, or
1185 ** to lock for each call to remove_journal_hash?
1188 if (cn
->blocknr
!= 0) {
1190 reiserfs_warning(sb
, "reiserfs-2201",
1191 "block %u, bh is %d, state %ld",
1192 cn
->blocknr
, cn
->bh
? 1 : 0,
1196 remove_journal_hash(sb
, journal
->j_list_hash_table
,
1197 jl
, cn
->blocknr
, 1);
1201 free_cnode(sb
, last
);
1203 jl
->j_realblock
= NULL
;
1207 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1208 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1209 ** releasing blocks in this transaction for reuse as data blocks.
1210 ** called by flush_journal_list, before it calls remove_all_from_journal_list
1213 static int _update_journal_header_block(struct super_block
*sb
,
1214 unsigned long offset
,
1215 unsigned int trans_id
)
1217 struct reiserfs_journal_header
*jh
;
1218 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1221 if (reiserfs_is_journal_aborted(journal
))
1224 if (trans_id
>= journal
->j_last_flush_trans_id
) {
1225 if (buffer_locked((journal
->j_header_bh
))) {
1226 depth
= reiserfs_write_unlock_nested(sb
);
1227 __wait_on_buffer(journal
->j_header_bh
);
1228 reiserfs_write_lock_nested(sb
, depth
);
1229 if (unlikely(!buffer_uptodate(journal
->j_header_bh
))) {
1230 #ifdef CONFIG_REISERFS_CHECK
1231 reiserfs_warning(sb
, "journal-699",
1232 "buffer write failed");
1237 journal
->j_last_flush_trans_id
= trans_id
;
1238 journal
->j_first_unflushed_offset
= offset
;
1239 jh
= (struct reiserfs_journal_header
*)(journal
->j_header_bh
->
1241 jh
->j_last_flush_trans_id
= cpu_to_le32(trans_id
);
1242 jh
->j_first_unflushed_offset
= cpu_to_le32(offset
);
1243 jh
->j_mount_id
= cpu_to_le32(journal
->j_mount_id
);
1245 set_buffer_dirty(journal
->j_header_bh
);
1246 depth
= reiserfs_write_unlock_nested(sb
);
1248 if (reiserfs_barrier_flush(sb
))
1249 __sync_dirty_buffer(journal
->j_header_bh
, WRITE_FLUSH_FUA
);
1251 sync_dirty_buffer(journal
->j_header_bh
);
1253 reiserfs_write_lock_nested(sb
, depth
);
1254 if (!buffer_uptodate(journal
->j_header_bh
)) {
1255 reiserfs_warning(sb
, "journal-837",
1256 "IO error during journal replay");
1263 static int update_journal_header_block(struct super_block
*sb
,
1264 unsigned long offset
,
1265 unsigned int trans_id
)
1267 return _update_journal_header_block(sb
, offset
, trans_id
);
1271 ** flush any and all journal lists older than you are
1272 ** can only be called from flush_journal_list
1274 static int flush_older_journal_lists(struct super_block
*sb
,
1275 struct reiserfs_journal_list
*jl
)
1277 struct list_head
*entry
;
1278 struct reiserfs_journal_list
*other_jl
;
1279 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1280 unsigned int trans_id
= jl
->j_trans_id
;
1282 /* we know we are the only ones flushing things, no extra race
1283 * protection is required.
1286 entry
= journal
->j_journal_list
.next
;
1288 if (entry
== &journal
->j_journal_list
)
1290 other_jl
= JOURNAL_LIST_ENTRY(entry
);
1291 if (other_jl
->j_trans_id
< trans_id
) {
1292 BUG_ON(other_jl
->j_refcount
<= 0);
1293 /* do not flush all */
1294 flush_journal_list(sb
, other_jl
, 0);
1296 /* other_jl is now deleted from the list */
1302 static void del_from_work_list(struct super_block
*s
,
1303 struct reiserfs_journal_list
*jl
)
1305 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1306 if (!list_empty(&jl
->j_working_list
)) {
1307 list_del_init(&jl
->j_working_list
);
1308 journal
->j_num_work_lists
--;
1312 /* flush a journal list, both commit and real blocks
1314 ** always set flushall to 1, unless you are calling from inside
1315 ** flush_journal_list
1317 ** IMPORTANT. This can only be called while there are no journal writers,
1318 ** and the journal is locked. That means it can only be called from
1319 ** do_journal_end, or by journal_release
1321 static int flush_journal_list(struct super_block
*s
,
1322 struct reiserfs_journal_list
*jl
, int flushall
)
1324 struct reiserfs_journal_list
*pjl
;
1325 struct reiserfs_journal_cnode
*cn
, *last
;
1329 struct buffer_head
*saved_bh
;
1330 unsigned long j_len_saved
= jl
->j_len
;
1331 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1335 BUG_ON(j_len_saved
<= 0);
1337 if (atomic_read(&journal
->j_wcount
) != 0) {
1338 reiserfs_warning(s
, "clm-2048", "called with wcount %d",
1339 atomic_read(&journal
->j_wcount
));
1342 /* if flushall == 0, the lock is already held */
1344 reiserfs_mutex_lock_safe(&journal
->j_flush_mutex
, s
);
1345 } else if (mutex_trylock(&journal
->j_flush_mutex
)) {
1350 if (j_len_saved
> journal
->j_trans_max
) {
1351 reiserfs_panic(s
, "journal-715", "length is %lu, trans id %lu",
1352 j_len_saved
, jl
->j_trans_id
);
1356 /* if all the work is already done, get out of here */
1357 if (atomic_read(&(jl
->j_nonzerolen
)) <= 0 &&
1358 atomic_read(&(jl
->j_commit_left
)) <= 0) {
1359 goto flush_older_and_return
;
1362 /* start by putting the commit list on disk. This will also flush
1363 ** the commit lists of any olders transactions
1365 flush_commit_list(s
, jl
, 1);
1367 if (!(jl
->j_state
& LIST_DIRTY
)
1368 && !reiserfs_is_journal_aborted(journal
))
1371 /* are we done now? */
1372 if (atomic_read(&(jl
->j_nonzerolen
)) <= 0 &&
1373 atomic_read(&(jl
->j_commit_left
)) <= 0) {
1374 goto flush_older_and_return
;
1377 /* loop through each cnode, see if we need to write it,
1378 ** or wait on a more recent transaction, or just ignore it
1380 if (atomic_read(&(journal
->j_wcount
)) != 0) {
1381 reiserfs_panic(s
, "journal-844", "journal list is flushing, "
1384 cn
= jl
->j_realblock
;
1389 /* blocknr of 0 is no longer in the hash, ignore it */
1390 if (cn
->blocknr
== 0) {
1394 /* This transaction failed commit. Don't write out to the disk */
1395 if (!(jl
->j_state
& LIST_DIRTY
))
1398 pjl
= find_newer_jl_for_cn(cn
);
1399 /* the order is important here. We check pjl to make sure we
1400 ** don't clear BH_JDirty_wait if we aren't the one writing this
1403 if (!pjl
&& cn
->bh
) {
1406 /* we do this to make sure nobody releases the buffer while
1407 ** we are working with it
1411 if (buffer_journal_dirty(saved_bh
)) {
1412 BUG_ON(!can_dirty(cn
));
1415 } else if (can_dirty(cn
)) {
1416 /* everything with !pjl && jwait should be writable */
1421 /* if someone has this block in a newer transaction, just make
1422 ** sure they are committed, and don't try writing it to disk
1425 if (atomic_read(&pjl
->j_commit_left
))
1426 flush_commit_list(s
, pjl
, 1);
1430 /* bh == NULL when the block got to disk on its own, OR,
1431 ** the block got freed in a future transaction
1433 if (saved_bh
== NULL
) {
1437 /* this should never happen. kupdate_one_transaction has this list
1438 ** locked while it works, so we should never see a buffer here that
1439 ** is not marked JDirty_wait
1441 if ((!was_jwait
) && !buffer_locked(saved_bh
)) {
1442 reiserfs_warning(s
, "journal-813",
1443 "BAD! buffer %llu %cdirty %cjwait, "
1444 "not in a newer tranasction",
1445 (unsigned long long)saved_bh
->
1446 b_blocknr
, was_dirty
? ' ' : '!',
1447 was_jwait
? ' ' : '!');
1450 /* we inc again because saved_bh gets decremented at free_cnode */
1452 set_bit(BLOCK_NEEDS_FLUSH
, &cn
->state
);
1453 lock_buffer(saved_bh
);
1454 BUG_ON(cn
->blocknr
!= saved_bh
->b_blocknr
);
1455 if (buffer_dirty(saved_bh
))
1456 submit_logged_buffer(saved_bh
);
1458 unlock_buffer(saved_bh
);
1461 reiserfs_warning(s
, "clm-2082",
1462 "Unable to flush buffer %llu in %s",
1463 (unsigned long long)saved_bh
->
1464 b_blocknr
, __func__
);
1470 /* we incremented this to keep others from taking the buffer head away */
1472 if (atomic_read(&(saved_bh
->b_count
)) < 0) {
1473 reiserfs_warning(s
, "journal-945",
1474 "saved_bh->b_count < 0");
1479 cn
= jl
->j_realblock
;
1481 if (test_bit(BLOCK_NEEDS_FLUSH
, &cn
->state
)) {
1483 reiserfs_panic(s
, "journal-1011",
1487 depth
= reiserfs_write_unlock_nested(s
);
1488 __wait_on_buffer(cn
->bh
);
1489 reiserfs_write_lock_nested(s
, depth
);
1492 reiserfs_panic(s
, "journal-1012",
1495 if (unlikely(!buffer_uptodate(cn
->bh
))) {
1496 #ifdef CONFIG_REISERFS_CHECK
1497 reiserfs_warning(s
, "journal-949",
1498 "buffer write failed");
1502 /* note, we must clear the JDirty_wait bit after the up to date
1503 ** check, otherwise we race against our flushpage routine
1505 BUG_ON(!test_clear_buffer_journal_dirty
1508 /* drop one ref for us */
1510 /* drop one ref for journal_mark_dirty */
1511 release_buffer_page(cn
->bh
);
1518 reiserfs_abort(s
, -EIO
,
1519 "Write error while pushing transaction to disk in %s",
1521 flush_older_and_return
:
1523 /* before we can update the journal header block, we _must_ flush all
1524 ** real blocks from all older transactions to disk. This is because
1525 ** once the header block is updated, this transaction will not be
1526 ** replayed after a crash
1529 flush_older_journal_lists(s
, jl
);
1532 err
= journal
->j_errno
;
1533 /* before we can remove everything from the hash tables for this
1534 ** transaction, we must make sure it can never be replayed
1536 ** since we are only called from do_journal_end, we know for sure there
1537 ** are no allocations going on while we are flushing journal lists. So,
1538 ** we only need to update the journal header block for the last list
1541 if (!err
&& flushall
) {
1543 update_journal_header_block(s
,
1544 (jl
->j_start
+ jl
->j_len
+
1545 2) % SB_ONDISK_JOURNAL_SIZE(s
),
1548 reiserfs_abort(s
, -EIO
,
1549 "Write error while updating journal header in %s",
1552 remove_all_from_journal_list(s
, jl
, 0);
1553 list_del_init(&jl
->j_list
);
1554 journal
->j_num_lists
--;
1555 del_from_work_list(s
, jl
);
1557 if (journal
->j_last_flush_id
!= 0 &&
1558 (jl
->j_trans_id
- journal
->j_last_flush_id
) != 1) {
1559 reiserfs_warning(s
, "clm-2201", "last flush %lu, current %lu",
1560 journal
->j_last_flush_id
, jl
->j_trans_id
);
1562 journal
->j_last_flush_id
= jl
->j_trans_id
;
1564 /* not strictly required since we are freeing the list, but it should
1565 * help find code using dead lists later on
1568 atomic_set(&(jl
->j_nonzerolen
), 0);
1570 jl
->j_realblock
= NULL
;
1571 jl
->j_commit_bh
= NULL
;
1574 put_journal_list(s
, jl
);
1576 mutex_unlock(&journal
->j_flush_mutex
);
1580 static int write_one_transaction(struct super_block
*s
,
1581 struct reiserfs_journal_list
*jl
,
1582 struct buffer_chunk
*chunk
)
1584 struct reiserfs_journal_cnode
*cn
;
1587 jl
->j_state
|= LIST_TOUCHED
;
1588 del_from_work_list(s
, jl
);
1589 if (jl
->j_len
== 0 || atomic_read(&jl
->j_nonzerolen
) == 0) {
1593 cn
= jl
->j_realblock
;
1595 /* if the blocknr == 0, this has been cleared from the hash,
1598 if (cn
->blocknr
== 0) {
1601 if (cn
->bh
&& can_dirty(cn
) && buffer_dirty(cn
->bh
)) {
1602 struct buffer_head
*tmp_bh
;
1603 /* we can race against journal_mark_freed when we try
1604 * to lock_buffer(cn->bh), so we have to inc the buffer
1605 * count, and recheck things after locking
1609 lock_buffer(tmp_bh
);
1610 if (cn
->bh
&& can_dirty(cn
) && buffer_dirty(tmp_bh
)) {
1611 if (!buffer_journal_dirty(tmp_bh
) ||
1612 buffer_journal_prepared(tmp_bh
))
1614 add_to_chunk(chunk
, tmp_bh
, NULL
, write_chunk
);
1617 /* note, cn->bh might be null now */
1618 unlock_buffer(tmp_bh
);
1629 /* used by flush_commit_list */
1630 static int dirty_one_transaction(struct super_block
*s
,
1631 struct reiserfs_journal_list
*jl
)
1633 struct reiserfs_journal_cnode
*cn
;
1634 struct reiserfs_journal_list
*pjl
;
1637 jl
->j_state
|= LIST_DIRTY
;
1638 cn
= jl
->j_realblock
;
1640 /* look for a more recent transaction that logged this
1641 ** buffer. Only the most recent transaction with a buffer in
1642 ** it is allowed to send that buffer to disk
1644 pjl
= find_newer_jl_for_cn(cn
);
1645 if (!pjl
&& cn
->blocknr
&& cn
->bh
1646 && buffer_journal_dirty(cn
->bh
)) {
1647 BUG_ON(!can_dirty(cn
));
1648 /* if the buffer is prepared, it will either be logged
1649 * or restored. If restored, we need to make sure
1650 * it actually gets marked dirty
1652 clear_buffer_journal_new(cn
->bh
);
1653 if (buffer_journal_prepared(cn
->bh
)) {
1654 set_buffer_journal_restore_dirty(cn
->bh
);
1656 set_buffer_journal_test(cn
->bh
);
1657 mark_buffer_dirty(cn
->bh
);
1665 static int kupdate_transactions(struct super_block
*s
,
1666 struct reiserfs_journal_list
*jl
,
1667 struct reiserfs_journal_list
**next_jl
,
1668 unsigned int *next_trans_id
,
1669 int num_blocks
, int num_trans
)
1673 int transactions_flushed
= 0;
1674 unsigned int orig_trans_id
= jl
->j_trans_id
;
1675 struct buffer_chunk chunk
;
1676 struct list_head
*entry
;
1677 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1680 reiserfs_mutex_lock_safe(&journal
->j_flush_mutex
, s
);
1681 if (!journal_list_still_alive(s
, orig_trans_id
)) {
1685 /* we've got j_flush_mutex held, nobody is going to delete any
1686 * of these lists out from underneath us
1688 while ((num_trans
&& transactions_flushed
< num_trans
) ||
1689 (!num_trans
&& written
< num_blocks
)) {
1691 if (jl
->j_len
== 0 || (jl
->j_state
& LIST_TOUCHED
) ||
1692 atomic_read(&jl
->j_commit_left
)
1693 || !(jl
->j_state
& LIST_DIRTY
)) {
1694 del_from_work_list(s
, jl
);
1697 ret
= write_one_transaction(s
, jl
, &chunk
);
1701 transactions_flushed
++;
1703 entry
= jl
->j_list
.next
;
1706 if (entry
== &journal
->j_journal_list
) {
1709 jl
= JOURNAL_LIST_ENTRY(entry
);
1711 /* don't bother with older transactions */
1712 if (jl
->j_trans_id
<= orig_trans_id
)
1716 write_chunk(&chunk
);
1720 mutex_unlock(&journal
->j_flush_mutex
);
1724 /* for o_sync and fsync heavy applications, they tend to use
1725 ** all the journa list slots with tiny transactions. These
1726 ** trigger lots and lots of calls to update the header block, which
1727 ** adds seeks and slows things down.
1729 ** This function tries to clear out a large chunk of the journal lists
1730 ** at once, which makes everything faster since only the newest journal
1731 ** list updates the header block
1733 static int flush_used_journal_lists(struct super_block
*s
,
1734 struct reiserfs_journal_list
*jl
)
1736 unsigned long len
= 0;
1737 unsigned long cur_len
;
1741 struct reiserfs_journal_list
*tjl
;
1742 struct reiserfs_journal_list
*flush_jl
;
1743 unsigned int trans_id
;
1744 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
1746 flush_jl
= tjl
= jl
;
1748 /* in data logging mode, try harder to flush a lot of blocks */
1749 if (reiserfs_data_log(s
))
1751 /* flush for 256 transactions or limit blocks, whichever comes first */
1752 for (i
= 0; i
< 256 && len
< limit
; i
++) {
1753 if (atomic_read(&tjl
->j_commit_left
) ||
1754 tjl
->j_trans_id
< jl
->j_trans_id
) {
1757 cur_len
= atomic_read(&tjl
->j_nonzerolen
);
1759 tjl
->j_state
&= ~LIST_TOUCHED
;
1763 if (tjl
->j_list
.next
== &journal
->j_journal_list
)
1765 tjl
= JOURNAL_LIST_ENTRY(tjl
->j_list
.next
);
1767 get_journal_list(jl
);
1768 get_journal_list(flush_jl
);
1769 /* try to find a group of blocks we can flush across all the
1770 ** transactions, but only bother if we've actually spanned
1771 ** across multiple lists
1773 if (flush_jl
!= jl
) {
1774 ret
= kupdate_transactions(s
, jl
, &tjl
, &trans_id
, len
, i
);
1776 flush_journal_list(s
, flush_jl
, 1);
1777 put_journal_list(s
, flush_jl
);
1778 put_journal_list(s
, jl
);
1783 ** removes any nodes in table with name block and dev as bh.
1784 ** only touchs the hnext and hprev pointers.
1786 void remove_journal_hash(struct super_block
*sb
,
1787 struct reiserfs_journal_cnode
**table
,
1788 struct reiserfs_journal_list
*jl
,
1789 unsigned long block
, int remove_freed
)
1791 struct reiserfs_journal_cnode
*cur
;
1792 struct reiserfs_journal_cnode
**head
;
1794 head
= &(journal_hash(table
, sb
, block
));
1800 if (cur
->blocknr
== block
&& cur
->sb
== sb
1801 && (jl
== NULL
|| jl
== cur
->jlist
)
1802 && (!test_bit(BLOCK_FREED
, &cur
->state
) || remove_freed
)) {
1804 cur
->hnext
->hprev
= cur
->hprev
;
1807 cur
->hprev
->hnext
= cur
->hnext
;
1814 if (cur
->bh
&& cur
->jlist
) /* anybody who clears the cur->bh will also dec the nonzerolen */
1815 atomic_dec(&(cur
->jlist
->j_nonzerolen
));
1823 static void free_journal_ram(struct super_block
*sb
)
1825 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1826 kfree(journal
->j_current_jl
);
1827 journal
->j_num_lists
--;
1829 vfree(journal
->j_cnode_free_orig
);
1830 free_list_bitmaps(sb
, journal
->j_list_bitmap
);
1831 free_bitmap_nodes(sb
); /* must be after free_list_bitmaps */
1832 if (journal
->j_header_bh
) {
1833 brelse(journal
->j_header_bh
);
1835 /* j_header_bh is on the journal dev, make sure not to release the journal
1836 * dev until we brelse j_header_bh
1838 release_journal_dev(sb
, journal
);
1843 ** call on unmount. Only set error to 1 if you haven't made your way out
1844 ** of read_super() yet. Any other caller must keep error at 0.
1846 static int do_journal_release(struct reiserfs_transaction_handle
*th
,
1847 struct super_block
*sb
, int error
)
1849 struct reiserfs_transaction_handle myth
;
1851 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
1853 /* we only want to flush out transactions if we were called with error == 0
1855 if (!error
&& !(sb
->s_flags
& MS_RDONLY
)) {
1856 /* end the current trans */
1857 BUG_ON(!th
->t_trans_id
);
1858 do_journal_end(th
, sb
, 10, FLUSH_ALL
);
1860 /* make sure something gets logged to force our way into the flush code */
1861 if (!journal_join(&myth
, sb
, 1)) {
1862 reiserfs_prepare_for_journal(sb
,
1863 SB_BUFFER_WITH_SB(sb
),
1865 journal_mark_dirty(&myth
, sb
,
1866 SB_BUFFER_WITH_SB(sb
));
1867 do_journal_end(&myth
, sb
, 1, FLUSH_ALL
);
1872 /* this also catches errors during the do_journal_end above */
1873 if (!error
&& reiserfs_is_journal_aborted(journal
)) {
1874 memset(&myth
, 0, sizeof(myth
));
1875 if (!journal_join_abort(&myth
, sb
, 1)) {
1876 reiserfs_prepare_for_journal(sb
,
1877 SB_BUFFER_WITH_SB(sb
),
1879 journal_mark_dirty(&myth
, sb
,
1880 SB_BUFFER_WITH_SB(sb
));
1881 do_journal_end(&myth
, sb
, 1, FLUSH_ALL
);
1885 reiserfs_mounted_fs_count
--;
1886 /* wait for all commits to finish */
1887 cancel_delayed_work(&SB_JOURNAL(sb
)->j_work
);
1890 * We must release the write lock here because
1891 * the workqueue job (flush_async_commit) needs this lock
1893 reiserfs_write_unlock(sb
);
1895 cancel_delayed_work_sync(&REISERFS_SB(sb
)->old_work
);
1896 flush_workqueue(commit_wq
);
1898 if (!reiserfs_mounted_fs_count
) {
1899 destroy_workqueue(commit_wq
);
1903 free_journal_ram(sb
);
1905 reiserfs_write_lock(sb
);
1911 ** call on unmount. flush all journal trans, release all alloc'd ram
1913 int journal_release(struct reiserfs_transaction_handle
*th
,
1914 struct super_block
*sb
)
1916 return do_journal_release(th
, sb
, 0);
1920 ** only call from an error condition inside reiserfs_read_super!
1922 int journal_release_error(struct reiserfs_transaction_handle
*th
,
1923 struct super_block
*sb
)
1925 return do_journal_release(th
, sb
, 1);
1928 /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
1929 static int journal_compare_desc_commit(struct super_block
*sb
,
1930 struct reiserfs_journal_desc
*desc
,
1931 struct reiserfs_journal_commit
*commit
)
1933 if (get_commit_trans_id(commit
) != get_desc_trans_id(desc
) ||
1934 get_commit_trans_len(commit
) != get_desc_trans_len(desc
) ||
1935 get_commit_trans_len(commit
) > SB_JOURNAL(sb
)->j_trans_max
||
1936 get_commit_trans_len(commit
) <= 0) {
1942 /* returns 0 if it did not find a description block
1943 ** returns -1 if it found a corrupt commit block
1944 ** returns 1 if both desc and commit were valid
1945 ** NOTE: only called during fs mount
1947 static int journal_transaction_is_valid(struct super_block
*sb
,
1948 struct buffer_head
*d_bh
,
1949 unsigned int *oldest_invalid_trans_id
,
1950 unsigned long *newest_mount_id
)
1952 struct reiserfs_journal_desc
*desc
;
1953 struct reiserfs_journal_commit
*commit
;
1954 struct buffer_head
*c_bh
;
1955 unsigned long offset
;
1960 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
1961 if (get_desc_trans_len(desc
) > 0
1962 && !memcmp(get_journal_desc_magic(d_bh
), JOURNAL_DESC_MAGIC
, 8)) {
1963 if (oldest_invalid_trans_id
&& *oldest_invalid_trans_id
1964 && get_desc_trans_id(desc
) > *oldest_invalid_trans_id
) {
1965 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
1966 "journal-986: transaction "
1967 "is valid returning because trans_id %d is greater than "
1968 "oldest_invalid %lu",
1969 get_desc_trans_id(desc
),
1970 *oldest_invalid_trans_id
);
1974 && *newest_mount_id
> get_desc_mount_id(desc
)) {
1975 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
1976 "journal-1087: transaction "
1977 "is valid returning because mount_id %d is less than "
1978 "newest_mount_id %lu",
1979 get_desc_mount_id(desc
),
1983 if (get_desc_trans_len(desc
) > SB_JOURNAL(sb
)->j_trans_max
) {
1984 reiserfs_warning(sb
, "journal-2018",
1985 "Bad transaction length %d "
1986 "encountered, ignoring transaction",
1987 get_desc_trans_len(desc
));
1990 offset
= d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
1992 /* ok, we have a journal description block, lets see if the transaction was valid */
1995 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
1996 ((offset
+ get_desc_trans_len(desc
) +
1997 1) % SB_ONDISK_JOURNAL_SIZE(sb
)));
2000 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
2001 if (journal_compare_desc_commit(sb
, desc
, commit
)) {
2002 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2003 "journal_transaction_is_valid, commit offset %ld had bad "
2004 "time %d or length %d",
2006 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2007 get_commit_trans_id(commit
),
2008 get_commit_trans_len(commit
));
2010 if (oldest_invalid_trans_id
) {
2011 *oldest_invalid_trans_id
=
2012 get_desc_trans_id(desc
);
2013 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2015 "transaction_is_valid setting oldest invalid trans_id "
2017 get_desc_trans_id(desc
));
2022 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2023 "journal-1006: found valid "
2024 "transaction start offset %llu, len %d id %d",
2026 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2027 get_desc_trans_len(desc
),
2028 get_desc_trans_id(desc
));
2035 static void brelse_array(struct buffer_head
**heads
, int num
)
2038 for (i
= 0; i
< num
; i
++) {
2044 ** given the start, and values for the oldest acceptable transactions,
2045 ** this either reads in a replays a transaction, or returns because the
2046 ** transaction is invalid, or too old.
2047 ** NOTE: only called during fs mount
2049 static int journal_read_transaction(struct super_block
*sb
,
2050 unsigned long cur_dblock
,
2051 unsigned long oldest_start
,
2052 unsigned int oldest_trans_id
,
2053 unsigned long newest_mount_id
)
2055 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2056 struct reiserfs_journal_desc
*desc
;
2057 struct reiserfs_journal_commit
*commit
;
2058 unsigned int trans_id
= 0;
2059 struct buffer_head
*c_bh
;
2060 struct buffer_head
*d_bh
;
2061 struct buffer_head
**log_blocks
= NULL
;
2062 struct buffer_head
**real_blocks
= NULL
;
2063 unsigned int trans_offset
;
2067 d_bh
= journal_bread(sb
, cur_dblock
);
2070 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2071 trans_offset
= d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2072 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1037: "
2073 "journal_read_transaction, offset %llu, len %d mount_id %d",
2074 d_bh
->b_blocknr
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2075 get_desc_trans_len(desc
), get_desc_mount_id(desc
));
2076 if (get_desc_trans_id(desc
) < oldest_trans_id
) {
2077 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1039: "
2078 "journal_read_trans skipping because %lu is too old",
2080 SB_ONDISK_JOURNAL_1st_BLOCK(sb
));
2084 if (get_desc_mount_id(desc
) != newest_mount_id
) {
2085 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1146: "
2086 "journal_read_trans skipping because %d is != "
2087 "newest_mount_id %lu", get_desc_mount_id(desc
),
2092 c_bh
= journal_bread(sb
, SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2093 ((trans_offset
+ get_desc_trans_len(desc
) + 1) %
2094 SB_ONDISK_JOURNAL_SIZE(sb
)));
2099 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
2100 if (journal_compare_desc_commit(sb
, desc
, commit
)) {
2101 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2102 "journal_read_transaction, "
2103 "commit offset %llu had bad time %d or length %d",
2105 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2106 get_commit_trans_id(commit
),
2107 get_commit_trans_len(commit
));
2113 if (bdev_read_only(sb
->s_bdev
)) {
2114 reiserfs_warning(sb
, "clm-2076",
2115 "device is readonly, unable to replay log");
2121 trans_id
= get_desc_trans_id(desc
);
2122 /* now we know we've got a good transaction, and it was inside the valid time ranges */
2123 log_blocks
= kmalloc(get_desc_trans_len(desc
) *
2124 sizeof(struct buffer_head
*), GFP_NOFS
);
2125 real_blocks
= kmalloc(get_desc_trans_len(desc
) *
2126 sizeof(struct buffer_head
*), GFP_NOFS
);
2127 if (!log_blocks
|| !real_blocks
) {
2132 reiserfs_warning(sb
, "journal-1169",
2133 "kmalloc failed, unable to mount FS");
2136 /* get all the buffer heads */
2137 trans_half
= journal_trans_half(sb
->s_blocksize
);
2138 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2141 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2143 i
) % SB_ONDISK_JOURNAL_SIZE(sb
));
2144 if (i
< trans_half
) {
2147 le32_to_cpu(desc
->j_realblock
[i
]));
2151 le32_to_cpu(commit
->
2152 j_realblock
[i
- trans_half
]));
2154 if (real_blocks
[i
]->b_blocknr
> SB_BLOCK_COUNT(sb
)) {
2155 reiserfs_warning(sb
, "journal-1207",
2156 "REPLAY FAILURE fsck required! "
2157 "Block to replay is outside of "
2161 /* make sure we don't try to replay onto log or reserved area */
2162 if (is_block_in_log_or_reserved_area
2163 (sb
, real_blocks
[i
]->b_blocknr
)) {
2164 reiserfs_warning(sb
, "journal-1204",
2165 "REPLAY FAILURE fsck required! "
2166 "Trying to replay onto a log block");
2168 brelse_array(log_blocks
, i
);
2169 brelse_array(real_blocks
, i
);
2177 /* read in the log blocks, memcpy to the corresponding real block */
2178 ll_rw_block(READ
, get_desc_trans_len(desc
), log_blocks
);
2179 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2181 wait_on_buffer(log_blocks
[i
]);
2182 if (!buffer_uptodate(log_blocks
[i
])) {
2183 reiserfs_warning(sb
, "journal-1212",
2184 "REPLAY FAILURE fsck required! "
2185 "buffer write failed");
2186 brelse_array(log_blocks
+ i
,
2187 get_desc_trans_len(desc
) - i
);
2188 brelse_array(real_blocks
, get_desc_trans_len(desc
));
2195 memcpy(real_blocks
[i
]->b_data
, log_blocks
[i
]->b_data
,
2196 real_blocks
[i
]->b_size
);
2197 set_buffer_uptodate(real_blocks
[i
]);
2198 brelse(log_blocks
[i
]);
2200 /* flush out the real blocks */
2201 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2202 set_buffer_dirty(real_blocks
[i
]);
2203 write_dirty_buffer(real_blocks
[i
], WRITE
);
2205 for (i
= 0; i
< get_desc_trans_len(desc
); i
++) {
2206 wait_on_buffer(real_blocks
[i
]);
2207 if (!buffer_uptodate(real_blocks
[i
])) {
2208 reiserfs_warning(sb
, "journal-1226",
2209 "REPLAY FAILURE, fsck required! "
2210 "buffer write failed");
2211 brelse_array(real_blocks
+ i
,
2212 get_desc_trans_len(desc
) - i
);
2219 brelse(real_blocks
[i
]);
2222 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2223 ((trans_offset
+ get_desc_trans_len(desc
) +
2224 2) % SB_ONDISK_JOURNAL_SIZE(sb
));
2225 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2226 "journal-1095: setting journal " "start to offset %ld",
2227 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
));
2229 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2230 journal
->j_start
= cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2231 journal
->j_last_flush_trans_id
= trans_id
;
2232 journal
->j_trans_id
= trans_id
+ 1;
2233 /* check for trans_id overflow */
2234 if (journal
->j_trans_id
== 0)
2235 journal
->j_trans_id
= 10;
2243 /* This function reads blocks starting from block and to max_block of bufsize
2244 size (but no more than BUFNR blocks at a time). This proved to improve
2245 mounting speed on self-rebuilding raid5 arrays at least.
2246 Right now it is only used from journal code. But later we might use it
2248 Note: Do not use journal_getblk/sb_getblk functions here! */
2249 static struct buffer_head
*reiserfs_breada(struct block_device
*dev
,
2250 b_blocknr_t block
, int bufsize
,
2251 b_blocknr_t max_block
)
2253 struct buffer_head
*bhlist
[BUFNR
];
2254 unsigned int blocks
= BUFNR
;
2255 struct buffer_head
*bh
;
2258 bh
= __getblk(dev
, block
, bufsize
);
2259 if (buffer_uptodate(bh
))
2262 if (block
+ BUFNR
> max_block
) {
2263 blocks
= max_block
- block
;
2267 for (i
= 1; i
< blocks
; i
++) {
2268 bh
= __getblk(dev
, block
+ i
, bufsize
);
2269 if (buffer_uptodate(bh
)) {
2275 ll_rw_block(READ
, j
, bhlist
);
2276 for (i
= 1; i
< j
; i
++)
2280 if (buffer_uptodate(bh
))
2287 ** read and replay the log
2288 ** on a clean unmount, the journal header's next unflushed pointer will
2289 ** be to an invalid transaction. This tests that before finding all the
2290 ** transactions in the log, which makes normal mount times fast.
2291 ** After a crash, this starts with the next unflushed transaction, and
2292 ** replays until it finds one too old, or invalid.
2293 ** On exit, it sets things up so the first transaction will work correctly.
2294 ** NOTE: only called during fs mount
2296 static int journal_read(struct super_block
*sb
)
2298 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2299 struct reiserfs_journal_desc
*desc
;
2300 unsigned int oldest_trans_id
= 0;
2301 unsigned int oldest_invalid_trans_id
= 0;
2303 unsigned long oldest_start
= 0;
2304 unsigned long cur_dblock
= 0;
2305 unsigned long newest_mount_id
= 9;
2306 struct buffer_head
*d_bh
;
2307 struct reiserfs_journal_header
*jh
;
2308 int valid_journal_header
= 0;
2309 int replay_count
= 0;
2310 int continue_replay
= 1;
2312 char b
[BDEVNAME_SIZE
];
2314 cur_dblock
= SB_ONDISK_JOURNAL_1st_BLOCK(sb
);
2315 reiserfs_info(sb
, "checking transaction log (%s)\n",
2316 bdevname(journal
->j_dev_bd
, b
));
2317 start
= get_seconds();
2319 /* step 1, read in the journal header block. Check the transaction it says
2320 ** is the first unflushed, and if that transaction is not valid,
2323 journal
->j_header_bh
= journal_bread(sb
,
2324 SB_ONDISK_JOURNAL_1st_BLOCK(sb
)
2325 + SB_ONDISK_JOURNAL_SIZE(sb
));
2326 if (!journal
->j_header_bh
) {
2329 jh
= (struct reiserfs_journal_header
*)(journal
->j_header_bh
->b_data
);
2330 if (le32_to_cpu(jh
->j_first_unflushed_offset
) <
2331 SB_ONDISK_JOURNAL_SIZE(sb
)
2332 && le32_to_cpu(jh
->j_last_flush_trans_id
) > 0) {
2334 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2335 le32_to_cpu(jh
->j_first_unflushed_offset
);
2336 oldest_trans_id
= le32_to_cpu(jh
->j_last_flush_trans_id
) + 1;
2337 newest_mount_id
= le32_to_cpu(jh
->j_mount_id
);
2338 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2339 "journal-1153: found in "
2340 "header: first_unflushed_offset %d, last_flushed_trans_id "
2341 "%lu", le32_to_cpu(jh
->j_first_unflushed_offset
),
2342 le32_to_cpu(jh
->j_last_flush_trans_id
));
2343 valid_journal_header
= 1;
2345 /* now, we try to read the first unflushed offset. If it is not valid,
2346 ** there is nothing more we can do, and it makes no sense to read
2347 ** through the whole log.
2351 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2352 le32_to_cpu(jh
->j_first_unflushed_offset
));
2353 ret
= journal_transaction_is_valid(sb
, d_bh
, NULL
, NULL
);
2355 continue_replay
= 0;
2358 goto start_log_replay
;
2361 /* ok, there are transactions that need to be replayed. start with the first log block, find
2362 ** all the valid transactions, and pick out the oldest.
2364 while (continue_replay
2366 (SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2367 SB_ONDISK_JOURNAL_SIZE(sb
))) {
2368 /* Note that it is required for blocksize of primary fs device and journal
2369 device to be the same */
2371 reiserfs_breada(journal
->j_dev_bd
, cur_dblock
,
2373 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2374 SB_ONDISK_JOURNAL_SIZE(sb
));
2376 journal_transaction_is_valid(sb
, d_bh
,
2377 &oldest_invalid_trans_id
,
2380 desc
= (struct reiserfs_journal_desc
*)d_bh
->b_data
;
2381 if (oldest_start
== 0) { /* init all oldest_ values */
2382 oldest_trans_id
= get_desc_trans_id(desc
);
2383 oldest_start
= d_bh
->b_blocknr
;
2384 newest_mount_id
= get_desc_mount_id(desc
);
2385 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2386 "journal-1179: Setting "
2387 "oldest_start to offset %llu, trans_id %lu",
2389 SB_ONDISK_JOURNAL_1st_BLOCK
2390 (sb
), oldest_trans_id
);
2391 } else if (oldest_trans_id
> get_desc_trans_id(desc
)) {
2392 /* one we just read was older */
2393 oldest_trans_id
= get_desc_trans_id(desc
);
2394 oldest_start
= d_bh
->b_blocknr
;
2395 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2396 "journal-1180: Resetting "
2397 "oldest_start to offset %lu, trans_id %lu",
2399 SB_ONDISK_JOURNAL_1st_BLOCK
2400 (sb
), oldest_trans_id
);
2402 if (newest_mount_id
< get_desc_mount_id(desc
)) {
2403 newest_mount_id
= get_desc_mount_id(desc
);
2404 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2405 "journal-1299: Setting "
2406 "newest_mount_id to %d",
2407 get_desc_mount_id(desc
));
2409 cur_dblock
+= get_desc_trans_len(desc
) + 2;
2417 cur_dblock
= oldest_start
;
2418 if (oldest_trans_id
) {
2419 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2420 "journal-1206: Starting replay "
2421 "from offset %llu, trans_id %lu",
2422 cur_dblock
- SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2427 while (continue_replay
&& oldest_trans_id
> 0) {
2429 journal_read_transaction(sb
, cur_dblock
, oldest_start
,
2430 oldest_trans_id
, newest_mount_id
);
2433 } else if (ret
!= 0) {
2437 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) + journal
->j_start
;
2439 if (cur_dblock
== oldest_start
)
2443 if (oldest_trans_id
== 0) {
2444 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
,
2445 "journal-1225: No valid " "transactions found");
2447 /* j_start does not get set correctly if we don't replay any transactions.
2448 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2449 ** copy the trans_id from the header
2451 if (valid_journal_header
&& replay_count
== 0) {
2452 journal
->j_start
= le32_to_cpu(jh
->j_first_unflushed_offset
);
2453 journal
->j_trans_id
=
2454 le32_to_cpu(jh
->j_last_flush_trans_id
) + 1;
2455 /* check for trans_id overflow */
2456 if (journal
->j_trans_id
== 0)
2457 journal
->j_trans_id
= 10;
2458 journal
->j_last_flush_trans_id
=
2459 le32_to_cpu(jh
->j_last_flush_trans_id
);
2460 journal
->j_mount_id
= le32_to_cpu(jh
->j_mount_id
) + 1;
2462 journal
->j_mount_id
= newest_mount_id
+ 1;
2464 reiserfs_debug(sb
, REISERFS_DEBUG_CODE
, "journal-1299: Setting "
2465 "newest_mount_id to %lu", journal
->j_mount_id
);
2466 journal
->j_first_unflushed_offset
= journal
->j_start
;
2467 if (replay_count
> 0) {
2469 "replayed %d transactions in %lu seconds\n",
2470 replay_count
, get_seconds() - start
);
2472 /* needed to satisfy the locking in _update_journal_header_block */
2473 reiserfs_write_lock(sb
);
2474 if (!bdev_read_only(sb
->s_bdev
) &&
2475 _update_journal_header_block(sb
, journal
->j_start
,
2476 journal
->j_last_flush_trans_id
)) {
2477 reiserfs_write_unlock(sb
);
2478 /* replay failed, caller must call free_journal_ram and abort
2483 reiserfs_write_unlock(sb
);
2487 static struct reiserfs_journal_list
*alloc_journal_list(struct super_block
*s
)
2489 struct reiserfs_journal_list
*jl
;
2490 jl
= kzalloc(sizeof(struct reiserfs_journal_list
),
2491 GFP_NOFS
| __GFP_NOFAIL
);
2492 INIT_LIST_HEAD(&jl
->j_list
);
2493 INIT_LIST_HEAD(&jl
->j_working_list
);
2494 INIT_LIST_HEAD(&jl
->j_tail_bh_list
);
2495 INIT_LIST_HEAD(&jl
->j_bh_list
);
2496 mutex_init(&jl
->j_commit_mutex
);
2497 SB_JOURNAL(s
)->j_num_lists
++;
2498 get_journal_list(jl
);
2502 static void journal_list_init(struct super_block
*sb
)
2504 SB_JOURNAL(sb
)->j_current_jl
= alloc_journal_list(sb
);
2507 static void release_journal_dev(struct super_block
*super
,
2508 struct reiserfs_journal
*journal
)
2510 if (journal
->j_dev_bd
!= NULL
) {
2511 blkdev_put(journal
->j_dev_bd
, journal
->j_dev_mode
);
2512 journal
->j_dev_bd
= NULL
;
2516 static int journal_init_dev(struct super_block
*super
,
2517 struct reiserfs_journal
*journal
,
2518 const char *jdev_name
)
2522 fmode_t blkdev_mode
= FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
;
2523 char b
[BDEVNAME_SIZE
];
2527 journal
->j_dev_bd
= NULL
;
2528 jdev
= SB_ONDISK_JOURNAL_DEVICE(super
) ?
2529 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super
)) : super
->s_dev
;
2531 if (bdev_read_only(super
->s_bdev
))
2532 blkdev_mode
= FMODE_READ
;
2534 /* there is no "jdev" option and journal is on separate device */
2535 if ((!jdev_name
|| !jdev_name
[0])) {
2536 if (jdev
== super
->s_dev
)
2537 blkdev_mode
&= ~FMODE_EXCL
;
2538 journal
->j_dev_bd
= blkdev_get_by_dev(jdev
, blkdev_mode
,
2540 journal
->j_dev_mode
= blkdev_mode
;
2541 if (IS_ERR(journal
->j_dev_bd
)) {
2542 result
= PTR_ERR(journal
->j_dev_bd
);
2543 journal
->j_dev_bd
= NULL
;
2544 reiserfs_warning(super
, "sh-458",
2545 "cannot init journal device '%s': %i",
2546 __bdevname(jdev
, b
), result
);
2548 } else if (jdev
!= super
->s_dev
)
2549 set_blocksize(journal
->j_dev_bd
, super
->s_blocksize
);
2554 journal
->j_dev_mode
= blkdev_mode
;
2555 journal
->j_dev_bd
= blkdev_get_by_path(jdev_name
, blkdev_mode
, journal
);
2556 if (IS_ERR(journal
->j_dev_bd
)) {
2557 result
= PTR_ERR(journal
->j_dev_bd
);
2558 journal
->j_dev_bd
= NULL
;
2559 reiserfs_warning(super
,
2560 "journal_init_dev: Cannot open '%s': %i",
2565 set_blocksize(journal
->j_dev_bd
, super
->s_blocksize
);
2566 reiserfs_info(super
,
2567 "journal_init_dev: journal device: %s\n",
2568 bdevname(journal
->j_dev_bd
, b
));
2573 * When creating/tuning a file system user can assign some
2574 * journal params within boundaries which depend on the ratio
2575 * blocksize/standard_blocksize.
2577 * For blocks >= standard_blocksize transaction size should
2578 * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more
2579 * then JOURNAL_TRANS_MAX_DEFAULT.
2581 * For blocks < standard_blocksize these boundaries should be
2582 * decreased proportionally.
2584 #define REISERFS_STANDARD_BLKSIZE (4096)
2586 static int check_advise_trans_params(struct super_block
*sb
,
2587 struct reiserfs_journal
*journal
)
2589 if (journal
->j_trans_max
) {
2590 /* Non-default journal params.
2591 Do sanity check for them. */
2593 if (sb
->s_blocksize
< REISERFS_STANDARD_BLKSIZE
)
2594 ratio
= REISERFS_STANDARD_BLKSIZE
/ sb
->s_blocksize
;
2596 if (journal
->j_trans_max
> JOURNAL_TRANS_MAX_DEFAULT
/ ratio
||
2597 journal
->j_trans_max
< JOURNAL_TRANS_MIN_DEFAULT
/ ratio
||
2598 SB_ONDISK_JOURNAL_SIZE(sb
) / journal
->j_trans_max
<
2599 JOURNAL_MIN_RATIO
) {
2600 reiserfs_warning(sb
, "sh-462",
2601 "bad transaction max size (%u). "
2602 "FSCK?", journal
->j_trans_max
);
2605 if (journal
->j_max_batch
!= (journal
->j_trans_max
) *
2606 JOURNAL_MAX_BATCH_DEFAULT
/JOURNAL_TRANS_MAX_DEFAULT
) {
2607 reiserfs_warning(sb
, "sh-463",
2608 "bad transaction max batch (%u). "
2609 "FSCK?", journal
->j_max_batch
);
2613 /* Default journal params.
2614 The file system was created by old version
2615 of mkreiserfs, so some fields contain zeros,
2616 and we need to advise proper values for them */
2617 if (sb
->s_blocksize
!= REISERFS_STANDARD_BLKSIZE
) {
2618 reiserfs_warning(sb
, "sh-464", "bad blocksize (%u)",
2622 journal
->j_trans_max
= JOURNAL_TRANS_MAX_DEFAULT
;
2623 journal
->j_max_batch
= JOURNAL_MAX_BATCH_DEFAULT
;
2624 journal
->j_max_commit_age
= JOURNAL_MAX_COMMIT_AGE
;
2630 ** must be called once on fs mount. calls journal_read for you
2632 int journal_init(struct super_block
*sb
, const char *j_dev_name
,
2633 int old_format
, unsigned int commit_max_age
)
2635 int num_cnodes
= SB_ONDISK_JOURNAL_SIZE(sb
) * 2;
2636 struct buffer_head
*bhjh
;
2637 struct reiserfs_super_block
*rs
;
2638 struct reiserfs_journal_header
*jh
;
2639 struct reiserfs_journal
*journal
;
2640 struct reiserfs_journal_list
*jl
;
2641 char b
[BDEVNAME_SIZE
];
2644 journal
= SB_JOURNAL(sb
) = vzalloc(sizeof(struct reiserfs_journal
));
2646 reiserfs_warning(sb
, "journal-1256",
2647 "unable to get memory for journal structure");
2650 INIT_LIST_HEAD(&journal
->j_bitmap_nodes
);
2651 INIT_LIST_HEAD(&journal
->j_prealloc_list
);
2652 INIT_LIST_HEAD(&journal
->j_working_list
);
2653 INIT_LIST_HEAD(&journal
->j_journal_list
);
2654 journal
->j_persistent_trans
= 0;
2655 if (reiserfs_allocate_list_bitmaps(sb
, journal
->j_list_bitmap
,
2656 reiserfs_bmap_count(sb
)))
2657 goto free_and_return
;
2659 allocate_bitmap_nodes(sb
);
2661 /* reserved for journal area support */
2662 SB_JOURNAL_1st_RESERVED_BLOCK(sb
) = (old_format
?
2663 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2665 reiserfs_bmap_count(sb
) +
2667 REISERFS_DISK_OFFSET_IN_BYTES
/
2668 sb
->s_blocksize
+ 2);
2670 /* Sanity check to see is the standard journal fitting within first bitmap
2671 (actual for small blocksizes) */
2672 if (!SB_ONDISK_JOURNAL_DEVICE(sb
) &&
2673 (SB_JOURNAL_1st_RESERVED_BLOCK(sb
) +
2674 SB_ONDISK_JOURNAL_SIZE(sb
) > sb
->s_blocksize
* 8)) {
2675 reiserfs_warning(sb
, "journal-1393",
2676 "journal does not fit for area addressed "
2677 "by first of bitmap blocks. It starts at "
2678 "%u and its size is %u. Block size %ld",
2679 SB_JOURNAL_1st_RESERVED_BLOCK(sb
),
2680 SB_ONDISK_JOURNAL_SIZE(sb
),
2682 goto free_and_return
;
2685 if (journal_init_dev(sb
, journal
, j_dev_name
) != 0) {
2686 reiserfs_warning(sb
, "sh-462",
2687 "unable to initialize jornal device");
2688 goto free_and_return
;
2691 rs
= SB_DISK_SUPER_BLOCK(sb
);
2693 /* read journal header */
2694 bhjh
= journal_bread(sb
,
2695 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
2696 SB_ONDISK_JOURNAL_SIZE(sb
));
2698 reiserfs_warning(sb
, "sh-459",
2699 "unable to read journal header");
2700 goto free_and_return
;
2702 jh
= (struct reiserfs_journal_header
*)(bhjh
->b_data
);
2704 /* make sure that journal matches to the super block */
2705 if (is_reiserfs_jr(rs
)
2706 && (le32_to_cpu(jh
->jh_journal
.jp_journal_magic
) !=
2707 sb_jp_journal_magic(rs
))) {
2708 reiserfs_warning(sb
, "sh-460",
2709 "journal header magic %x (device %s) does "
2710 "not match to magic found in super block %x",
2711 jh
->jh_journal
.jp_journal_magic
,
2712 bdevname(journal
->j_dev_bd
, b
),
2713 sb_jp_journal_magic(rs
));
2715 goto free_and_return
;
2718 journal
->j_trans_max
= le32_to_cpu(jh
->jh_journal
.jp_journal_trans_max
);
2719 journal
->j_max_batch
= le32_to_cpu(jh
->jh_journal
.jp_journal_max_batch
);
2720 journal
->j_max_commit_age
=
2721 le32_to_cpu(jh
->jh_journal
.jp_journal_max_commit_age
);
2722 journal
->j_max_trans_age
= JOURNAL_MAX_TRANS_AGE
;
2724 if (check_advise_trans_params(sb
, journal
) != 0)
2725 goto free_and_return
;
2726 journal
->j_default_max_commit_age
= journal
->j_max_commit_age
;
2728 if (commit_max_age
!= 0) {
2729 journal
->j_max_commit_age
= commit_max_age
;
2730 journal
->j_max_trans_age
= commit_max_age
;
2733 reiserfs_info(sb
, "journal params: device %s, size %u, "
2734 "journal first block %u, max trans len %u, max batch %u, "
2735 "max commit age %u, max trans age %u\n",
2736 bdevname(journal
->j_dev_bd
, b
),
2737 SB_ONDISK_JOURNAL_SIZE(sb
),
2738 SB_ONDISK_JOURNAL_1st_BLOCK(sb
),
2739 journal
->j_trans_max
,
2740 journal
->j_max_batch
,
2741 journal
->j_max_commit_age
, journal
->j_max_trans_age
);
2745 journal
->j_list_bitmap_index
= 0;
2746 journal_list_init(sb
);
2748 memset(journal
->j_list_hash_table
, 0,
2749 JOURNAL_HASH_SIZE
* sizeof(struct reiserfs_journal_cnode
*));
2751 INIT_LIST_HEAD(&journal
->j_dirty_buffers
);
2752 spin_lock_init(&journal
->j_dirty_buffers_lock
);
2754 journal
->j_start
= 0;
2756 journal
->j_len_alloc
= 0;
2757 atomic_set(&(journal
->j_wcount
), 0);
2758 atomic_set(&(journal
->j_async_throttle
), 0);
2759 journal
->j_bcount
= 0;
2760 journal
->j_trans_start_time
= 0;
2761 journal
->j_last
= NULL
;
2762 journal
->j_first
= NULL
;
2763 init_waitqueue_head(&(journal
->j_join_wait
));
2764 mutex_init(&journal
->j_mutex
);
2765 mutex_init(&journal
->j_flush_mutex
);
2767 journal
->j_trans_id
= 10;
2768 journal
->j_mount_id
= 10;
2769 journal
->j_state
= 0;
2770 atomic_set(&(journal
->j_jlock
), 0);
2771 journal
->j_cnode_free_list
= allocate_cnodes(num_cnodes
);
2772 journal
->j_cnode_free_orig
= journal
->j_cnode_free_list
;
2773 journal
->j_cnode_free
= journal
->j_cnode_free_list
? num_cnodes
: 0;
2774 journal
->j_cnode_used
= 0;
2775 journal
->j_must_wait
= 0;
2777 if (journal
->j_cnode_free
== 0) {
2778 reiserfs_warning(sb
, "journal-2004", "Journal cnode memory "
2779 "allocation failed (%ld bytes). Journal is "
2780 "too large for available memory. Usually "
2781 "this is due to a journal that is too large.",
2782 sizeof (struct reiserfs_journal_cnode
) * num_cnodes
);
2783 goto free_and_return
;
2786 init_journal_hash(sb
);
2787 jl
= journal
->j_current_jl
;
2790 * get_list_bitmap() may call flush_commit_list() which
2791 * requires the lock. Calling flush_commit_list() shouldn't happen
2792 * this early but I like to be paranoid.
2794 reiserfs_write_lock(sb
);
2795 jl
->j_list_bitmap
= get_list_bitmap(sb
, jl
);
2796 reiserfs_write_unlock(sb
);
2797 if (!jl
->j_list_bitmap
) {
2798 reiserfs_warning(sb
, "journal-2005",
2799 "get_list_bitmap failed for journal list 0");
2800 goto free_and_return
;
2803 ret
= journal_read(sb
);
2805 reiserfs_warning(sb
, "reiserfs-2006",
2806 "Replay Failure, unable to mount");
2807 goto free_and_return
;
2810 reiserfs_mounted_fs_count
++;
2811 if (reiserfs_mounted_fs_count
<= 1)
2812 commit_wq
= alloc_workqueue("reiserfs", WQ_MEM_RECLAIM
, 0);
2814 INIT_DELAYED_WORK(&journal
->j_work
, flush_async_commits
);
2815 journal
->j_work_sb
= sb
;
2818 free_journal_ram(sb
);
2823 ** test for a polite end of the current transaction. Used by file_write, and should
2824 ** be used by delete to make sure they don't write more than can fit inside a single
2827 int journal_transaction_should_end(struct reiserfs_transaction_handle
*th
,
2830 struct reiserfs_journal
*journal
= SB_JOURNAL(th
->t_super
);
2831 time_t now
= get_seconds();
2832 /* cannot restart while nested */
2833 BUG_ON(!th
->t_trans_id
);
2834 if (th
->t_refcount
> 1)
2836 if (journal
->j_must_wait
> 0 ||
2837 (journal
->j_len_alloc
+ new_alloc
) >= journal
->j_max_batch
||
2838 atomic_read(&(journal
->j_jlock
)) ||
2839 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
||
2840 journal
->j_cnode_free
< (journal
->j_trans_max
* 3)) {
2844 journal
->j_len_alloc
+= new_alloc
;
2845 th
->t_blocks_allocated
+= new_alloc
;
2849 /* this must be called inside a transaction
2851 void reiserfs_block_writes(struct reiserfs_transaction_handle
*th
)
2853 struct reiserfs_journal
*journal
= SB_JOURNAL(th
->t_super
);
2854 BUG_ON(!th
->t_trans_id
);
2855 journal
->j_must_wait
= 1;
2856 set_bit(J_WRITERS_BLOCKED
, &journal
->j_state
);
2860 /* this must be called without a transaction started
2862 void reiserfs_allow_writes(struct super_block
*s
)
2864 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2865 clear_bit(J_WRITERS_BLOCKED
, &journal
->j_state
);
2866 wake_up(&journal
->j_join_wait
);
2869 /* this must be called without a transaction started
2871 void reiserfs_wait_on_write_block(struct super_block
*s
)
2873 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2874 wait_event(journal
->j_join_wait
,
2875 !test_bit(J_WRITERS_BLOCKED
, &journal
->j_state
));
2878 static void queue_log_writer(struct super_block
*s
)
2881 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2882 set_bit(J_WRITERS_QUEUED
, &journal
->j_state
);
2885 * we don't want to use wait_event here because
2886 * we only want to wait once.
2888 init_waitqueue_entry(&wait
, current
);
2889 add_wait_queue(&journal
->j_join_wait
, &wait
);
2890 set_current_state(TASK_UNINTERRUPTIBLE
);
2891 if (test_bit(J_WRITERS_QUEUED
, &journal
->j_state
)) {
2892 int depth
= reiserfs_write_unlock_nested(s
);
2894 reiserfs_write_lock_nested(s
, depth
);
2896 __set_current_state(TASK_RUNNING
);
2897 remove_wait_queue(&journal
->j_join_wait
, &wait
);
2900 static void wake_queued_writers(struct super_block
*s
)
2902 struct reiserfs_journal
*journal
= SB_JOURNAL(s
);
2903 if (test_and_clear_bit(J_WRITERS_QUEUED
, &journal
->j_state
))
2904 wake_up(&journal
->j_join_wait
);
2907 static void let_transaction_grow(struct super_block
*sb
, unsigned int trans_id
)
2909 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2910 unsigned long bcount
= journal
->j_bcount
;
2914 depth
= reiserfs_write_unlock_nested(sb
);
2915 schedule_timeout_uninterruptible(1);
2916 reiserfs_write_lock_nested(sb
, depth
);
2918 journal
->j_current_jl
->j_state
|= LIST_COMMIT_PENDING
;
2919 while ((atomic_read(&journal
->j_wcount
) > 0 ||
2920 atomic_read(&journal
->j_jlock
)) &&
2921 journal
->j_trans_id
== trans_id
) {
2922 queue_log_writer(sb
);
2924 if (journal
->j_trans_id
!= trans_id
)
2926 if (bcount
== journal
->j_bcount
)
2928 bcount
= journal
->j_bcount
;
2932 /* join == true if you must join an existing transaction.
2933 ** join == false if you can deal with waiting for others to finish
2935 ** this will block until the transaction is joinable. send the number of blocks you
2936 ** expect to use in nblocks.
2938 static int do_journal_begin_r(struct reiserfs_transaction_handle
*th
,
2939 struct super_block
*sb
, unsigned long nblocks
,
2942 time_t now
= get_seconds();
2943 unsigned int old_trans_id
;
2944 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
2945 struct reiserfs_transaction_handle myth
;
2946 int sched_count
= 0;
2950 reiserfs_check_lock_depth(sb
, "journal_begin");
2951 BUG_ON(nblocks
> journal
->j_trans_max
);
2953 PROC_INFO_INC(sb
, journal
.journal_being
);
2954 /* set here for journal_join */
2960 if (join
!= JBEGIN_ABORT
&& reiserfs_is_journal_aborted(journal
)) {
2962 retval
= journal
->j_errno
;
2965 journal
->j_bcount
++;
2967 if (test_bit(J_WRITERS_BLOCKED
, &journal
->j_state
)) {
2969 depth
= reiserfs_write_unlock_nested(sb
);
2970 reiserfs_wait_on_write_block(sb
);
2971 reiserfs_write_lock_nested(sb
, depth
);
2972 PROC_INFO_INC(sb
, journal
.journal_relock_writers
);
2975 now
= get_seconds();
2977 /* if there is no room in the journal OR
2978 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2979 ** we don't sleep if there aren't other writers
2982 if ((!join
&& journal
->j_must_wait
> 0) ||
2984 && (journal
->j_len_alloc
+ nblocks
+ 2) >= journal
->j_max_batch
)
2985 || (!join
&& atomic_read(&journal
->j_wcount
) > 0
2986 && journal
->j_trans_start_time
> 0
2987 && (now
- journal
->j_trans_start_time
) >
2988 journal
->j_max_trans_age
) || (!join
2989 && atomic_read(&journal
->j_jlock
))
2990 || (!join
&& journal
->j_cnode_free
< (journal
->j_trans_max
* 3))) {
2992 old_trans_id
= journal
->j_trans_id
;
2993 unlock_journal(sb
); /* allow others to finish this transaction */
2995 if (!join
&& (journal
->j_len_alloc
+ nblocks
+ 2) >=
2996 journal
->j_max_batch
&&
2997 ((journal
->j_len
+ nblocks
+ 2) * 100) <
2998 (journal
->j_len_alloc
* 75)) {
2999 if (atomic_read(&journal
->j_wcount
) > 10) {
3001 queue_log_writer(sb
);
3005 /* don't mess with joining the transaction if all we have to do is
3006 * wait for someone else to do a commit
3008 if (atomic_read(&journal
->j_jlock
)) {
3009 while (journal
->j_trans_id
== old_trans_id
&&
3010 atomic_read(&journal
->j_jlock
)) {
3011 queue_log_writer(sb
);
3015 retval
= journal_join(&myth
, sb
, 1);
3019 /* someone might have ended the transaction while we joined */
3020 if (old_trans_id
!= journal
->j_trans_id
) {
3021 retval
= do_journal_end(&myth
, sb
, 1, 0);
3023 retval
= do_journal_end(&myth
, sb
, 1, COMMIT_NOW
);
3029 PROC_INFO_INC(sb
, journal
.journal_relock_wcount
);
3032 /* we are the first writer, set trans_id */
3033 if (journal
->j_trans_start_time
== 0) {
3034 journal
->j_trans_start_time
= get_seconds();
3036 atomic_inc(&(journal
->j_wcount
));
3037 journal
->j_len_alloc
+= nblocks
;
3038 th
->t_blocks_logged
= 0;
3039 th
->t_blocks_allocated
= nblocks
;
3040 th
->t_trans_id
= journal
->j_trans_id
;
3042 INIT_LIST_HEAD(&th
->t_list
);
3046 memset(th
, 0, sizeof(*th
));
3047 /* Re-set th->t_super, so we can properly keep track of how many
3048 * persistent transactions there are. We need to do this so if this
3049 * call is part of a failed restart_transaction, we can free it later */
3054 struct reiserfs_transaction_handle
*reiserfs_persistent_transaction(struct
3060 struct reiserfs_transaction_handle
*th
;
3062 /* if we're nesting into an existing transaction. It will be
3063 ** persistent on its own
3065 if (reiserfs_transaction_running(s
)) {
3066 th
= current
->journal_info
;
3068 BUG_ON(th
->t_refcount
< 2);
3072 th
= kmalloc(sizeof(struct reiserfs_transaction_handle
), GFP_NOFS
);
3075 ret
= journal_begin(th
, s
, nblocks
);
3081 SB_JOURNAL(s
)->j_persistent_trans
++;
3085 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle
*th
)
3087 struct super_block
*s
= th
->t_super
;
3090 ret
= journal_end(th
, th
->t_super
, th
->t_blocks_allocated
);
3093 if (th
->t_refcount
== 0) {
3094 SB_JOURNAL(s
)->j_persistent_trans
--;
3100 static int journal_join(struct reiserfs_transaction_handle
*th
,
3101 struct super_block
*sb
, unsigned long nblocks
)
3103 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3105 /* this keeps do_journal_end from NULLing out the current->journal_info
3108 th
->t_handle_save
= cur_th
;
3109 BUG_ON(cur_th
&& cur_th
->t_refcount
> 1);
3110 return do_journal_begin_r(th
, sb
, nblocks
, JBEGIN_JOIN
);
3113 int journal_join_abort(struct reiserfs_transaction_handle
*th
,
3114 struct super_block
*sb
, unsigned long nblocks
)
3116 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3118 /* this keeps do_journal_end from NULLing out the current->journal_info
3121 th
->t_handle_save
= cur_th
;
3122 BUG_ON(cur_th
&& cur_th
->t_refcount
> 1);
3123 return do_journal_begin_r(th
, sb
, nblocks
, JBEGIN_ABORT
);
3126 int journal_begin(struct reiserfs_transaction_handle
*th
,
3127 struct super_block
*sb
, unsigned long nblocks
)
3129 struct reiserfs_transaction_handle
*cur_th
= current
->journal_info
;
3132 th
->t_handle_save
= NULL
;
3134 /* we are nesting into the current transaction */
3135 if (cur_th
->t_super
== sb
) {
3136 BUG_ON(!cur_th
->t_refcount
);
3137 cur_th
->t_refcount
++;
3138 memcpy(th
, cur_th
, sizeof(*th
));
3139 if (th
->t_refcount
<= 1)
3140 reiserfs_warning(sb
, "reiserfs-2005",
3141 "BAD: refcount <= 1, but "
3142 "journal_info != 0");
3145 /* we've ended up with a handle from a different filesystem.
3146 ** save it and restore on journal_end. This should never
3149 reiserfs_warning(sb
, "clm-2100",
3150 "nesting info a different FS");
3151 th
->t_handle_save
= current
->journal_info
;
3152 current
->journal_info
= th
;
3155 current
->journal_info
= th
;
3157 ret
= do_journal_begin_r(th
, sb
, nblocks
, JBEGIN_REG
);
3158 BUG_ON(current
->journal_info
!= th
);
3160 /* I guess this boils down to being the reciprocal of clm-2100 above.
3161 * If do_journal_begin_r fails, we need to put it back, since journal_end
3162 * won't be called to do it. */
3164 current
->journal_info
= th
->t_handle_save
;
3166 BUG_ON(!th
->t_refcount
);
3172 ** puts bh into the current transaction. If it was already there, reorders removes the
3173 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3175 ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
3176 ** transaction is committed.
3178 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3180 int journal_mark_dirty(struct reiserfs_transaction_handle
*th
,
3181 struct super_block
*sb
, struct buffer_head
*bh
)
3183 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3184 struct reiserfs_journal_cnode
*cn
= NULL
;
3185 int count_already_incd
= 0;
3187 BUG_ON(!th
->t_trans_id
);
3189 PROC_INFO_INC(sb
, journal
.mark_dirty
);
3190 if (th
->t_trans_id
!= journal
->j_trans_id
) {
3191 reiserfs_panic(th
->t_super
, "journal-1577",
3192 "handle trans id %ld != current trans id %ld",
3193 th
->t_trans_id
, journal
->j_trans_id
);
3196 prepared
= test_clear_buffer_journal_prepared(bh
);
3197 clear_buffer_journal_restore_dirty(bh
);
3198 /* already in this transaction, we are done */
3199 if (buffer_journaled(bh
)) {
3200 PROC_INFO_INC(sb
, journal
.mark_dirty_already
);
3204 /* this must be turned into a panic instead of a warning. We can't allow
3205 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3206 ** could get to disk too early. NOT GOOD.
3208 if (!prepared
|| buffer_dirty(bh
)) {
3209 reiserfs_warning(sb
, "journal-1777",
3210 "buffer %llu bad state "
3211 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3212 (unsigned long long)bh
->b_blocknr
,
3213 prepared
? ' ' : '!',
3214 buffer_locked(bh
) ? ' ' : '!',
3215 buffer_dirty(bh
) ? ' ' : '!',
3216 buffer_journal_dirty(bh
) ? ' ' : '!');
3219 if (atomic_read(&(journal
->j_wcount
)) <= 0) {
3220 reiserfs_warning(sb
, "journal-1409",
3221 "returning because j_wcount was %d",
3222 atomic_read(&(journal
->j_wcount
)));
3225 /* this error means I've screwed up, and we've overflowed the transaction.
3226 ** Nothing can be done here, except make the FS readonly or panic.
3228 if (journal
->j_len
>= journal
->j_trans_max
) {
3229 reiserfs_panic(th
->t_super
, "journal-1413",
3230 "j_len (%lu) is too big",
3234 if (buffer_journal_dirty(bh
)) {
3235 count_already_incd
= 1;
3236 PROC_INFO_INC(sb
, journal
.mark_dirty_notjournal
);
3237 clear_buffer_journal_dirty(bh
);
3240 if (journal
->j_len
> journal
->j_len_alloc
) {
3241 journal
->j_len_alloc
= journal
->j_len
+ JOURNAL_PER_BALANCE_CNT
;
3244 set_buffer_journaled(bh
);
3246 /* now put this guy on the end */
3250 reiserfs_panic(sb
, "journal-4", "get_cnode failed!");
3253 if (th
->t_blocks_logged
== th
->t_blocks_allocated
) {
3254 th
->t_blocks_allocated
+= JOURNAL_PER_BALANCE_CNT
;
3255 journal
->j_len_alloc
+= JOURNAL_PER_BALANCE_CNT
;
3257 th
->t_blocks_logged
++;
3261 cn
->blocknr
= bh
->b_blocknr
;
3264 insert_journal_hash(journal
->j_hash_table
, cn
);
3265 if (!count_already_incd
) {
3270 cn
->prev
= journal
->j_last
;
3272 if (journal
->j_last
) {
3273 journal
->j_last
->next
= cn
;
3274 journal
->j_last
= cn
;
3276 journal
->j_first
= cn
;
3277 journal
->j_last
= cn
;
3279 reiserfs_schedule_old_flush(sb
);
3283 int journal_end(struct reiserfs_transaction_handle
*th
,
3284 struct super_block
*sb
, unsigned long nblocks
)
3286 if (!current
->journal_info
&& th
->t_refcount
> 1)
3287 reiserfs_warning(sb
, "REISER-NESTING",
3288 "th NULL, refcount %d", th
->t_refcount
);
3290 if (!th
->t_trans_id
) {
3296 if (th
->t_refcount
> 0) {
3297 struct reiserfs_transaction_handle
*cur_th
=
3298 current
->journal_info
;
3300 /* we aren't allowed to close a nested transaction on a different
3301 ** filesystem from the one in the task struct
3303 BUG_ON(cur_th
->t_super
!= th
->t_super
);
3306 memcpy(current
->journal_info
, th
, sizeof(*th
));
3311 return do_journal_end(th
, sb
, nblocks
, 0);
3315 /* removes from the current transaction, relsing and descrementing any counters.
3316 ** also files the removed buffer directly onto the clean list
3318 ** called by journal_mark_freed when a block has been deleted
3320 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3322 static int remove_from_transaction(struct super_block
*sb
,
3323 b_blocknr_t blocknr
, int already_cleaned
)
3325 struct buffer_head
*bh
;
3326 struct reiserfs_journal_cnode
*cn
;
3327 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3330 cn
= get_journal_hash_dev(sb
, journal
->j_hash_table
, blocknr
);
3331 if (!cn
|| !cn
->bh
) {
3336 cn
->prev
->next
= cn
->next
;
3339 cn
->next
->prev
= cn
->prev
;
3341 if (cn
== journal
->j_first
) {
3342 journal
->j_first
= cn
->next
;
3344 if (cn
== journal
->j_last
) {
3345 journal
->j_last
= cn
->prev
;
3348 remove_journal_hash(sb
, journal
->j_hash_table
, NULL
,
3350 clear_buffer_journaled(bh
); /* don't log this one */
3352 if (!already_cleaned
) {
3353 clear_buffer_journal_dirty(bh
);
3354 clear_buffer_dirty(bh
);
3355 clear_buffer_journal_test(bh
);
3357 if (atomic_read(&(bh
->b_count
)) < 0) {
3358 reiserfs_warning(sb
, "journal-1752",
3364 journal
->j_len_alloc
--;
3370 ** for any cnode in a journal list, it can only be dirtied of all the
3371 ** transactions that include it are committed to disk.
3372 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
3373 ** and 0 if you aren't
3375 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3376 ** blocks for a given transaction on disk
3379 static int can_dirty(struct reiserfs_journal_cnode
*cn
)
3381 struct super_block
*sb
= cn
->sb
;
3382 b_blocknr_t blocknr
= cn
->blocknr
;
3383 struct reiserfs_journal_cnode
*cur
= cn
->hprev
;
3386 /* first test hprev. These are all newer than cn, so any node here
3387 ** with the same block number and dev means this node can't be sent
3388 ** to disk right now.
3390 while (cur
&& can_dirty
) {
3391 if (cur
->jlist
&& cur
->bh
&& cur
->blocknr
&& cur
->sb
== sb
&&
3392 cur
->blocknr
== blocknr
) {
3397 /* then test hnext. These are all older than cn. As long as they
3398 ** are committed to the log, it is safe to write cn to disk
3401 while (cur
&& can_dirty
) {
3402 if (cur
->jlist
&& cur
->jlist
->j_len
> 0 &&
3403 atomic_read(&(cur
->jlist
->j_commit_left
)) > 0 && cur
->bh
&&
3404 cur
->blocknr
&& cur
->sb
== sb
&& cur
->blocknr
== blocknr
) {
3412 /* syncs the commit blocks, but does not force the real buffers to disk
3413 ** will wait until the current transaction is done/committed before returning
3415 int journal_end_sync(struct reiserfs_transaction_handle
*th
,
3416 struct super_block
*sb
, unsigned long nblocks
)
3418 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3420 BUG_ON(!th
->t_trans_id
);
3421 /* you can sync while nested, very, very bad */
3422 BUG_ON(th
->t_refcount
> 1);
3423 if (journal
->j_len
== 0) {
3424 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
),
3426 journal_mark_dirty(th
, sb
, SB_BUFFER_WITH_SB(sb
));
3428 return do_journal_end(th
, sb
, nblocks
, COMMIT_NOW
| WAIT
);
3432 ** writeback the pending async commits to disk
3434 static void flush_async_commits(struct work_struct
*work
)
3436 struct reiserfs_journal
*journal
=
3437 container_of(work
, struct reiserfs_journal
, j_work
.work
);
3438 struct super_block
*sb
= journal
->j_work_sb
;
3439 struct reiserfs_journal_list
*jl
;
3440 struct list_head
*entry
;
3442 reiserfs_write_lock(sb
);
3443 if (!list_empty(&journal
->j_journal_list
)) {
3444 /* last entry is the youngest, commit it and you get everything */
3445 entry
= journal
->j_journal_list
.prev
;
3446 jl
= JOURNAL_LIST_ENTRY(entry
);
3447 flush_commit_list(sb
, jl
, 1);
3449 reiserfs_write_unlock(sb
);
3453 ** flushes any old transactions to disk
3454 ** ends the current transaction if it is too old
3456 void reiserfs_flush_old_commits(struct super_block
*sb
)
3459 struct reiserfs_transaction_handle th
;
3460 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3462 now
= get_seconds();
3463 /* safety check so we don't flush while we are replaying the log during
3466 if (list_empty(&journal
->j_journal_list
))
3469 /* check the current transaction. If there are no writers, and it is
3470 * too old, finish it, and force the commit blocks to disk
3472 if (atomic_read(&journal
->j_wcount
) <= 0 &&
3473 journal
->j_trans_start_time
> 0 &&
3474 journal
->j_len
> 0 &&
3475 (now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
) {
3476 if (!journal_join(&th
, sb
, 1)) {
3477 reiserfs_prepare_for_journal(sb
,
3478 SB_BUFFER_WITH_SB(sb
),
3480 journal_mark_dirty(&th
, sb
,
3481 SB_BUFFER_WITH_SB(sb
));
3483 /* we're only being called from kreiserfsd, it makes no sense to do
3484 ** an async commit so that kreiserfsd can do it later
3486 do_journal_end(&th
, sb
, 1, COMMIT_NOW
| WAIT
);
3492 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3494 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3495 ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3496 ** flushes the commit list and returns 0.
3498 ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3500 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3502 static int check_journal_end(struct reiserfs_transaction_handle
*th
,
3503 struct super_block
*sb
, unsigned long nblocks
,
3508 int flush
= flags
& FLUSH_ALL
;
3509 int commit_now
= flags
& COMMIT_NOW
;
3510 int wait_on_commit
= flags
& WAIT
;
3511 struct reiserfs_journal_list
*jl
;
3512 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3514 BUG_ON(!th
->t_trans_id
);
3516 if (th
->t_trans_id
!= journal
->j_trans_id
) {
3517 reiserfs_panic(th
->t_super
, "journal-1577",
3518 "handle trans id %ld != current trans id %ld",
3519 th
->t_trans_id
, journal
->j_trans_id
);
3522 journal
->j_len_alloc
-= (th
->t_blocks_allocated
- th
->t_blocks_logged
);
3523 if (atomic_read(&(journal
->j_wcount
)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3524 atomic_dec(&(journal
->j_wcount
));
3527 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3528 ** will be dealt with by next transaction that actually writes something, but should be taken
3529 ** care of in this trans
3531 BUG_ON(journal
->j_len
== 0);
3533 /* if wcount > 0, and we are called to with flush or commit_now,
3534 ** we wait on j_join_wait. We will wake up when the last writer has
3535 ** finished the transaction, and started it on its way to the disk.
3536 ** Then, we flush the commit or journal list, and just return 0
3537 ** because the rest of journal end was already done for this transaction.
3539 if (atomic_read(&(journal
->j_wcount
)) > 0) {
3540 if (flush
|| commit_now
) {
3543 jl
= journal
->j_current_jl
;
3544 trans_id
= jl
->j_trans_id
;
3546 jl
->j_state
|= LIST_COMMIT_PENDING
;
3547 atomic_set(&(journal
->j_jlock
), 1);
3549 journal
->j_next_full_flush
= 1;
3553 /* sleep while the current transaction is still j_jlocked */
3554 while (journal
->j_trans_id
== trans_id
) {
3555 if (atomic_read(&journal
->j_jlock
)) {
3556 queue_log_writer(sb
);
3559 if (journal
->j_trans_id
== trans_id
) {
3560 atomic_set(&(journal
->j_jlock
),
3566 BUG_ON(journal
->j_trans_id
== trans_id
);
3569 && journal_list_still_alive(sb
, trans_id
)
3570 && wait_on_commit
) {
3571 flush_commit_list(sb
, jl
, 1);
3579 /* deal with old transactions where we are the last writers */
3580 now
= get_seconds();
3581 if ((now
- journal
->j_trans_start_time
) > journal
->j_max_trans_age
) {
3583 journal
->j_next_async_flush
= 1;
3585 /* don't batch when someone is waiting on j_join_wait */
3586 /* don't batch when syncing the commit or flushing the whole trans */
3587 if (!(journal
->j_must_wait
> 0) && !(atomic_read(&(journal
->j_jlock
)))
3588 && !flush
&& !commit_now
&& (journal
->j_len
< journal
->j_max_batch
)
3589 && journal
->j_len_alloc
< journal
->j_max_batch
3590 && journal
->j_cnode_free
> (journal
->j_trans_max
* 3)) {
3591 journal
->j_bcount
++;
3596 if (journal
->j_start
> SB_ONDISK_JOURNAL_SIZE(sb
)) {
3597 reiserfs_panic(sb
, "journal-003",
3598 "j_start (%ld) is too high",
3605 ** Does all the work that makes deleting blocks safe.
3606 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3609 ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3610 ** before this transaction has finished.
3612 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3613 ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3614 ** the block can't be reallocated yet.
3616 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3618 int journal_mark_freed(struct reiserfs_transaction_handle
*th
,
3619 struct super_block
*sb
, b_blocknr_t blocknr
)
3621 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3622 struct reiserfs_journal_cnode
*cn
= NULL
;
3623 struct buffer_head
*bh
= NULL
;
3624 struct reiserfs_list_bitmap
*jb
= NULL
;
3626 BUG_ON(!th
->t_trans_id
);
3628 cn
= get_journal_hash_dev(sb
, journal
->j_hash_table
, blocknr
);
3633 /* if it is journal new, we just remove it from this transaction */
3634 if (bh
&& buffer_journal_new(bh
)) {
3635 clear_buffer_journal_new(bh
);
3636 clear_prepared_bits(bh
);
3637 reiserfs_clean_and_file_buffer(bh
);
3638 cleaned
= remove_from_transaction(sb
, blocknr
, cleaned
);
3640 /* set the bit for this block in the journal bitmap for this transaction */
3641 jb
= journal
->j_current_jl
->j_list_bitmap
;
3643 reiserfs_panic(sb
, "journal-1702",
3644 "journal_list_bitmap is NULL");
3646 set_bit_in_list_bitmap(sb
, blocknr
, jb
);
3648 /* Note, the entire while loop is not allowed to schedule. */
3651 clear_prepared_bits(bh
);
3652 reiserfs_clean_and_file_buffer(bh
);
3654 cleaned
= remove_from_transaction(sb
, blocknr
, cleaned
);
3656 /* find all older transactions with this block, make sure they don't try to write it out */
3657 cn
= get_journal_hash_dev(sb
, journal
->j_list_hash_table
,
3660 if (sb
== cn
->sb
&& blocknr
== cn
->blocknr
) {
3661 set_bit(BLOCK_FREED
, &cn
->state
);
3664 /* remove_from_transaction will brelse the buffer if it was
3665 ** in the current trans
3667 clear_buffer_journal_dirty(cn
->
3669 clear_buffer_dirty(cn
->bh
);
3670 clear_buffer_journal_test(cn
->
3675 (&(cn
->bh
->b_count
)) < 0) {
3676 reiserfs_warning(sb
,
3678 "cn->bh->b_count < 0");
3681 if (cn
->jlist
) { /* since we are clearing the bh, we MUST dec nonzerolen */
3694 release_buffer_page(bh
); /* get_hash grabs the buffer */
3698 void reiserfs_update_inode_transaction(struct inode
*inode
)
3700 struct reiserfs_journal
*journal
= SB_JOURNAL(inode
->i_sb
);
3701 REISERFS_I(inode
)->i_jl
= journal
->j_current_jl
;
3702 REISERFS_I(inode
)->i_trans_id
= journal
->j_trans_id
;
3706 * returns -1 on error, 0 if no commits/barriers were done and 1
3707 * if a transaction was actually committed and the barrier was done
3709 static int __commit_trans_jl(struct inode
*inode
, unsigned long id
,
3710 struct reiserfs_journal_list
*jl
)
3712 struct reiserfs_transaction_handle th
;
3713 struct super_block
*sb
= inode
->i_sb
;
3714 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3717 /* is it from the current transaction, or from an unknown transaction? */
3718 if (id
== journal
->j_trans_id
) {
3719 jl
= journal
->j_current_jl
;
3720 /* try to let other writers come in and grow this transaction */
3721 let_transaction_grow(sb
, id
);
3722 if (journal
->j_trans_id
!= id
) {
3723 goto flush_commit_only
;
3726 ret
= journal_begin(&th
, sb
, 1);
3730 /* someone might have ended this transaction while we joined */
3731 if (journal
->j_trans_id
!= id
) {
3732 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
),
3734 journal_mark_dirty(&th
, sb
, SB_BUFFER_WITH_SB(sb
));
3735 ret
= journal_end(&th
, sb
, 1);
3736 goto flush_commit_only
;
3739 ret
= journal_end_sync(&th
, sb
, 1);
3744 /* this gets tricky, we have to make sure the journal list in
3745 * the inode still exists. We know the list is still around
3746 * if we've got a larger transaction id than the oldest list
3749 if (journal_list_still_alive(inode
->i_sb
, id
)) {
3751 * we only set ret to 1 when we know for sure
3752 * the barrier hasn't been started yet on the commit
3755 if (atomic_read(&jl
->j_commit_left
) > 1)
3757 flush_commit_list(sb
, jl
, 1);
3758 if (journal
->j_errno
)
3759 ret
= journal
->j_errno
;
3762 /* otherwise the list is gone, and long since committed */
3766 int reiserfs_commit_for_inode(struct inode
*inode
)
3768 unsigned int id
= REISERFS_I(inode
)->i_trans_id
;
3769 struct reiserfs_journal_list
*jl
= REISERFS_I(inode
)->i_jl
;
3771 /* for the whole inode, assume unset id means it was
3772 * changed in the current transaction. More conservative
3775 reiserfs_update_inode_transaction(inode
);
3776 id
= REISERFS_I(inode
)->i_trans_id
;
3777 /* jl will be updated in __commit_trans_jl */
3780 return __commit_trans_jl(inode
, id
, jl
);
3783 void reiserfs_restore_prepared_buffer(struct super_block
*sb
,
3784 struct buffer_head
*bh
)
3786 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3787 PROC_INFO_INC(sb
, journal
.restore_prepared
);
3791 if (test_clear_buffer_journal_restore_dirty(bh
) &&
3792 buffer_journal_dirty(bh
)) {
3793 struct reiserfs_journal_cnode
*cn
;
3794 reiserfs_write_lock(sb
);
3795 cn
= get_journal_hash_dev(sb
,
3796 journal
->j_list_hash_table
,
3798 if (cn
&& can_dirty(cn
)) {
3799 set_buffer_journal_test(bh
);
3800 mark_buffer_dirty(bh
);
3802 reiserfs_write_unlock(sb
);
3804 clear_buffer_journal_prepared(bh
);
3807 extern struct tree_balance
*cur_tb
;
3809 ** before we can change a metadata block, we have to make sure it won't
3810 ** be written to disk while we are altering it. So, we must:
3815 int reiserfs_prepare_for_journal(struct super_block
*sb
,
3816 struct buffer_head
*bh
, int wait
)
3818 PROC_INFO_INC(sb
, journal
.prepare
);
3820 if (!trylock_buffer(bh
)) {
3825 set_buffer_journal_prepared(bh
);
3826 if (test_clear_buffer_dirty(bh
) && buffer_journal_dirty(bh
)) {
3827 clear_buffer_journal_test(bh
);
3828 set_buffer_journal_restore_dirty(bh
);
3835 ** long and ugly. If flush, will not return until all commit
3836 ** blocks and all real buffers in the trans are on disk.
3837 ** If no_async, won't return until all commit blocks are on disk.
3839 ** keep reading, there are comments as you go along
3841 ** If the journal is aborted, we just clean up. Things like flushing
3842 ** journal lists, etc just won't happen.
3844 static int do_journal_end(struct reiserfs_transaction_handle
*th
,
3845 struct super_block
*sb
, unsigned long nblocks
,
3848 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
3849 struct reiserfs_journal_cnode
*cn
, *next
, *jl_cn
;
3850 struct reiserfs_journal_cnode
*last_cn
= NULL
;
3851 struct reiserfs_journal_desc
*desc
;
3852 struct reiserfs_journal_commit
*commit
;
3853 struct buffer_head
*c_bh
; /* commit bh */
3854 struct buffer_head
*d_bh
; /* desc bh */
3855 int cur_write_start
= 0; /* start index of current log write */
3860 struct reiserfs_journal_list
*jl
, *temp_jl
;
3861 struct list_head
*entry
, *safe
;
3862 unsigned long jindex
;
3863 unsigned int commit_trans_id
;
3867 BUG_ON(th
->t_refcount
> 1);
3868 BUG_ON(!th
->t_trans_id
);
3870 /* protect flush_older_commits from doing mistakes if the
3871 transaction ID counter gets overflowed. */
3872 if (th
->t_trans_id
== ~0U)
3873 flags
|= FLUSH_ALL
| COMMIT_NOW
| WAIT
;
3874 flush
= flags
& FLUSH_ALL
;
3875 wait_on_commit
= flags
& WAIT
;
3877 current
->journal_info
= th
->t_handle_save
;
3878 reiserfs_check_lock_depth(sb
, "journal end");
3879 if (journal
->j_len
== 0) {
3880 reiserfs_prepare_for_journal(sb
, SB_BUFFER_WITH_SB(sb
),
3882 journal_mark_dirty(th
, sb
, SB_BUFFER_WITH_SB(sb
));
3886 if (journal
->j_next_full_flush
) {
3890 if (journal
->j_next_async_flush
) {
3891 flags
|= COMMIT_NOW
| WAIT
;
3895 /* check_journal_end locks the journal, and unlocks if it does not return 1
3896 ** it tells us if we should continue with the journal_end, or just return
3898 if (!check_journal_end(th
, sb
, nblocks
, flags
)) {
3899 reiserfs_schedule_old_flush(sb
);
3900 wake_queued_writers(sb
);
3901 reiserfs_async_progress_wait(sb
);
3905 /* check_journal_end might set these, check again */
3906 if (journal
->j_next_full_flush
) {
3911 ** j must wait means we have to flush the log blocks, and the real blocks for
3914 if (journal
->j_must_wait
> 0) {
3917 #ifdef REISERFS_PREALLOCATE
3918 /* quota ops might need to nest, setup the journal_info pointer for them
3919 * and raise the refcount so that it is > 0. */
3920 current
->journal_info
= th
;
3922 reiserfs_discard_all_prealloc(th
); /* it should not involve new blocks into
3923 * the transaction */
3925 current
->journal_info
= th
->t_handle_save
;
3928 /* setup description block */
3931 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
3933 set_buffer_uptodate(d_bh
);
3934 desc
= (struct reiserfs_journal_desc
*)(d_bh
)->b_data
;
3935 memset(d_bh
->b_data
, 0, d_bh
->b_size
);
3936 memcpy(get_journal_desc_magic(d_bh
), JOURNAL_DESC_MAGIC
, 8);
3937 set_desc_trans_id(desc
, journal
->j_trans_id
);
3939 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
3940 c_bh
= journal_getblk(sb
, SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
3941 ((journal
->j_start
+ journal
->j_len
+
3942 1) % SB_ONDISK_JOURNAL_SIZE(sb
)));
3943 commit
= (struct reiserfs_journal_commit
*)c_bh
->b_data
;
3944 memset(c_bh
->b_data
, 0, c_bh
->b_size
);
3945 set_commit_trans_id(commit
, journal
->j_trans_id
);
3946 set_buffer_uptodate(c_bh
);
3948 /* init this journal list */
3949 jl
= journal
->j_current_jl
;
3951 /* we lock the commit before doing anything because
3952 * we want to make sure nobody tries to run flush_commit_list until
3953 * the new transaction is fully setup, and we've already flushed the
3956 reiserfs_mutex_lock_safe(&jl
->j_commit_mutex
, sb
);
3958 /* save the transaction id in case we need to commit it later */
3959 commit_trans_id
= jl
->j_trans_id
;
3961 atomic_set(&jl
->j_older_commits_done
, 0);
3962 jl
->j_trans_id
= journal
->j_trans_id
;
3963 jl
->j_timestamp
= journal
->j_trans_start_time
;
3964 jl
->j_commit_bh
= c_bh
;
3965 jl
->j_start
= journal
->j_start
;
3966 jl
->j_len
= journal
->j_len
;
3967 atomic_set(&jl
->j_nonzerolen
, journal
->j_len
);
3968 atomic_set(&jl
->j_commit_left
, journal
->j_len
+ 2);
3969 jl
->j_realblock
= NULL
;
3971 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
3972 ** for each real block, add it to the journal list hash,
3973 ** copy into real block index array in the commit or desc block
3975 trans_half
= journal_trans_half(sb
->s_blocksize
);
3976 for (i
= 0, cn
= journal
->j_first
; cn
; cn
= cn
->next
, i
++) {
3977 if (buffer_journaled(cn
->bh
)) {
3978 jl_cn
= get_cnode(sb
);
3980 reiserfs_panic(sb
, "journal-1676",
3981 "get_cnode returned NULL");
3984 jl
->j_realblock
= jl_cn
;
3986 jl_cn
->prev
= last_cn
;
3989 last_cn
->next
= jl_cn
;
3992 /* make sure the block we are trying to log is not a block
3993 of journal or reserved area */
3995 if (is_block_in_log_or_reserved_area
3996 (sb
, cn
->bh
->b_blocknr
)) {
3997 reiserfs_panic(sb
, "journal-2332",
3998 "Trying to log block %lu, "
3999 "which is a log block",
4002 jl_cn
->blocknr
= cn
->bh
->b_blocknr
;
4007 insert_journal_hash(journal
->j_list_hash_table
, jl_cn
);
4008 if (i
< trans_half
) {
4009 desc
->j_realblock
[i
] =
4010 cpu_to_le32(cn
->bh
->b_blocknr
);
4012 commit
->j_realblock
[i
- trans_half
] =
4013 cpu_to_le32(cn
->bh
->b_blocknr
);
4019 set_desc_trans_len(desc
, journal
->j_len
);
4020 set_desc_mount_id(desc
, journal
->j_mount_id
);
4021 set_desc_trans_id(desc
, journal
->j_trans_id
);
4022 set_commit_trans_len(commit
, journal
->j_len
);
4024 /* special check in case all buffers in the journal were marked for not logging */
4025 BUG_ON(journal
->j_len
== 0);
4027 /* we're about to dirty all the log blocks, mark the description block
4028 * dirty now too. Don't mark the commit block dirty until all the
4029 * others are on disk
4031 mark_buffer_dirty(d_bh
);
4033 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4034 cur_write_start
= journal
->j_start
;
4035 cn
= journal
->j_first
;
4036 jindex
= 1; /* start at one so we don't get the desc again */
4038 clear_buffer_journal_new(cn
->bh
);
4039 /* copy all the real blocks into log area. dirty log blocks */
4040 if (buffer_journaled(cn
->bh
)) {
4041 struct buffer_head
*tmp_bh
;
4046 SB_ONDISK_JOURNAL_1st_BLOCK(sb
) +
4049 SB_ONDISK_JOURNAL_SIZE(sb
)));
4050 set_buffer_uptodate(tmp_bh
);
4051 page
= cn
->bh
->b_page
;
4053 memcpy(tmp_bh
->b_data
,
4054 addr
+ offset_in_page(cn
->bh
->b_data
),
4057 mark_buffer_dirty(tmp_bh
);
4059 set_buffer_journal_dirty(cn
->bh
);
4060 clear_buffer_journaled(cn
->bh
);
4062 /* JDirty cleared sometime during transaction. don't log this one */
4063 reiserfs_warning(sb
, "journal-2048",
4064 "BAD, buffer in journal hash, "
4071 reiserfs_cond_resched(sb
);
4074 /* we are done with both the c_bh and d_bh, but
4075 ** c_bh must be written after all other commit blocks,
4076 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4079 journal
->j_current_jl
= alloc_journal_list(sb
);
4081 /* now it is safe to insert this transaction on the main list */
4082 list_add_tail(&jl
->j_list
, &journal
->j_journal_list
);
4083 list_add_tail(&jl
->j_working_list
, &journal
->j_working_list
);
4084 journal
->j_num_work_lists
++;
4086 /* reset journal values for the next transaction */
4087 old_start
= journal
->j_start
;
4089 (journal
->j_start
+ journal
->j_len
+
4090 2) % SB_ONDISK_JOURNAL_SIZE(sb
);
4091 atomic_set(&(journal
->j_wcount
), 0);
4092 journal
->j_bcount
= 0;
4093 journal
->j_last
= NULL
;
4094 journal
->j_first
= NULL
;
4096 journal
->j_trans_start_time
= 0;
4097 /* check for trans_id overflow */
4098 if (++journal
->j_trans_id
== 0)
4099 journal
->j_trans_id
= 10;
4100 journal
->j_current_jl
->j_trans_id
= journal
->j_trans_id
;
4101 journal
->j_must_wait
= 0;
4102 journal
->j_len_alloc
= 0;
4103 journal
->j_next_full_flush
= 0;
4104 journal
->j_next_async_flush
= 0;
4105 init_journal_hash(sb
);
4107 // make sure reiserfs_add_jh sees the new current_jl before we
4108 // write out the tails
4111 /* tail conversion targets have to hit the disk before we end the
4112 * transaction. Otherwise a later transaction might repack the tail
4113 * before this transaction commits, leaving the data block unflushed and
4114 * clean, if we crash before the later transaction commits, the data block
4117 if (!list_empty(&jl
->j_tail_bh_list
)) {
4118 depth
= reiserfs_write_unlock_nested(sb
);
4119 write_ordered_buffers(&journal
->j_dirty_buffers_lock
,
4120 journal
, jl
, &jl
->j_tail_bh_list
);
4121 reiserfs_write_lock_nested(sb
, depth
);
4123 BUG_ON(!list_empty(&jl
->j_tail_bh_list
));
4124 mutex_unlock(&jl
->j_commit_mutex
);
4126 /* honor the flush wishes from the caller, simple commits can
4127 ** be done outside the journal lock, they are done below
4129 ** if we don't flush the commit list right now, we put it into
4130 ** the work queue so the people waiting on the async progress work
4131 ** queue don't wait for this proc to flush journal lists and such.
4134 flush_commit_list(sb
, jl
, 1);
4135 flush_journal_list(sb
, jl
, 1);
4136 } else if (!(jl
->j_state
& LIST_COMMIT_PENDING
))
4137 queue_delayed_work(commit_wq
, &journal
->j_work
, HZ
/ 10);
4139 /* if the next transaction has any chance of wrapping, flush
4140 ** transactions that might get overwritten. If any journal lists are very
4141 ** old flush them as well.
4144 list_for_each_safe(entry
, safe
, &journal
->j_journal_list
) {
4145 temp_jl
= JOURNAL_LIST_ENTRY(entry
);
4146 if (journal
->j_start
<= temp_jl
->j_start
) {
4147 if ((journal
->j_start
+ journal
->j_trans_max
+ 1) >=
4149 flush_used_journal_lists(sb
, temp_jl
);
4151 } else if ((journal
->j_start
+
4152 journal
->j_trans_max
+ 1) <
4153 SB_ONDISK_JOURNAL_SIZE(sb
)) {
4154 /* if we don't cross into the next transaction and we don't
4155 * wrap, there is no way we can overlap any later transactions
4160 } else if ((journal
->j_start
+
4161 journal
->j_trans_max
+ 1) >
4162 SB_ONDISK_JOURNAL_SIZE(sb
)) {
4163 if (((journal
->j_start
+ journal
->j_trans_max
+ 1) %
4164 SB_ONDISK_JOURNAL_SIZE(sb
)) >=
4166 flush_used_journal_lists(sb
, temp_jl
);
4169 /* we don't overlap anything from out start to the end of the
4170 * log, and our wrapped portion doesn't overlap anything at
4171 * the start of the log. We can break
4178 journal
->j_current_jl
->j_list_bitmap
=
4179 get_list_bitmap(sb
, journal
->j_current_jl
);
4181 if (!(journal
->j_current_jl
->j_list_bitmap
)) {
4182 reiserfs_panic(sb
, "journal-1996",
4183 "could not get a list bitmap");
4186 atomic_set(&(journal
->j_jlock
), 0);
4188 /* wake up any body waiting to join. */
4189 clear_bit(J_WRITERS_QUEUED
, &journal
->j_state
);
4190 wake_up(&(journal
->j_join_wait
));
4192 if (!flush
&& wait_on_commit
&&
4193 journal_list_still_alive(sb
, commit_trans_id
)) {
4194 flush_commit_list(sb
, jl
, 1);
4197 reiserfs_check_lock_depth(sb
, "journal end2");
4199 memset(th
, 0, sizeof(*th
));
4200 /* Re-set th->t_super, so we can properly keep track of how many
4201 * persistent transactions there are. We need to do this so if this
4202 * call is part of a failed restart_transaction, we can free it later */
4205 return journal
->j_errno
;
4208 /* Send the file system read only and refuse new transactions */
4209 void reiserfs_abort_journal(struct super_block
*sb
, int errno
)
4211 struct reiserfs_journal
*journal
= SB_JOURNAL(sb
);
4212 if (test_bit(J_ABORTED
, &journal
->j_state
))
4215 if (!journal
->j_errno
)
4216 journal
->j_errno
= errno
;
4218 sb
->s_flags
|= MS_RDONLY
;
4219 set_bit(J_ABORTED
, &journal
->j_state
);
4221 #ifdef CONFIG_REISERFS_CHECK