2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/crc32.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/freezer.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/writeback.h>
23 #include <linux/list_sort.h>
34 #include "trace_gfs2.h"
37 * gfs2_struct2blk - compute stuff
38 * @sdp: the filesystem
39 * @nstruct: the number of structures
40 * @ssize: the size of the structures
42 * Compute the number of log descriptor blocks needed to hold a certain number
43 * of structures of a certain size.
45 * Returns: the number of blocks needed (minimum is always 1)
48 unsigned int gfs2_struct2blk(struct gfs2_sbd
*sdp
, unsigned int nstruct
,
52 unsigned int first
, second
;
55 first
= (sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_log_descriptor
)) / ssize
;
57 if (nstruct
> first
) {
58 second
= (sdp
->sd_sb
.sb_bsize
-
59 sizeof(struct gfs2_meta_header
)) / ssize
;
60 blks
+= DIV_ROUND_UP(nstruct
- first
, second
);
67 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
68 * @mapping: The associated mapping (maybe NULL)
69 * @bd: The gfs2_bufdata to remove
71 * The ail lock _must_ be held when calling this function
75 void gfs2_remove_from_ail(struct gfs2_bufdata
*bd
)
78 list_del_init(&bd
->bd_ail_st_list
);
79 list_del_init(&bd
->bd_ail_gl_list
);
80 atomic_dec(&bd
->bd_gl
->gl_ail_count
);
85 * gfs2_ail1_start_one - Start I/O on a part of the AIL
86 * @sdp: the filesystem
87 * @wbc: The writeback control structure
88 * @ai: The ail structure
92 static int gfs2_ail1_start_one(struct gfs2_sbd
*sdp
,
93 struct writeback_control
*wbc
,
94 struct gfs2_trans
*tr
)
95 __releases(&sdp
->sd_ail_lock
)
96 __acquires(&sdp
->sd_ail_lock
)
98 struct gfs2_glock
*gl
= NULL
;
99 struct address_space
*mapping
;
100 struct gfs2_bufdata
*bd
, *s
;
101 struct buffer_head
*bh
;
103 list_for_each_entry_safe_reverse(bd
, s
, &tr
->tr_ail1_list
, bd_ail_st_list
) {
106 gfs2_assert(sdp
, bd
->bd_tr
== tr
);
108 if (!buffer_busy(bh
)) {
109 if (!buffer_uptodate(bh
))
110 gfs2_io_error_bh(sdp
, bh
);
111 list_move(&bd
->bd_ail_st_list
, &tr
->tr_ail2_list
);
115 if (!buffer_dirty(bh
))
120 list_move(&bd
->bd_ail_st_list
, &tr
->tr_ail1_list
);
121 mapping
= bh
->b_page
->mapping
;
124 spin_unlock(&sdp
->sd_ail_lock
);
125 generic_writepages(mapping
, wbc
);
126 spin_lock(&sdp
->sd_ail_lock
);
127 if (wbc
->nr_to_write
<= 0)
137 * gfs2_ail1_flush - start writeback of some ail1 entries
138 * @sdp: The super block
139 * @wbc: The writeback control structure
141 * Writes back some ail1 entries, according to the limits in the
142 * writeback control structure
145 void gfs2_ail1_flush(struct gfs2_sbd
*sdp
, struct writeback_control
*wbc
)
147 struct list_head
*head
= &sdp
->sd_ail1_list
;
148 struct gfs2_trans
*tr
;
149 struct blk_plug plug
;
151 trace_gfs2_ail_flush(sdp
, wbc
, 1);
152 blk_start_plug(&plug
);
153 spin_lock(&sdp
->sd_ail_lock
);
155 list_for_each_entry_reverse(tr
, head
, tr_list
) {
156 if (wbc
->nr_to_write
<= 0)
158 if (gfs2_ail1_start_one(sdp
, wbc
, tr
))
161 spin_unlock(&sdp
->sd_ail_lock
);
162 blk_finish_plug(&plug
);
163 trace_gfs2_ail_flush(sdp
, wbc
, 0);
167 * gfs2_ail1_start - start writeback of all ail1 entries
168 * @sdp: The superblock
171 static void gfs2_ail1_start(struct gfs2_sbd
*sdp
)
173 struct writeback_control wbc
= {
174 .sync_mode
= WB_SYNC_NONE
,
175 .nr_to_write
= LONG_MAX
,
177 .range_end
= LLONG_MAX
,
180 return gfs2_ail1_flush(sdp
, &wbc
);
184 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
185 * @sdp: the filesystem
190 static void gfs2_ail1_empty_one(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
192 struct gfs2_bufdata
*bd
, *s
;
193 struct buffer_head
*bh
;
195 list_for_each_entry_safe_reverse(bd
, s
, &tr
->tr_ail1_list
,
198 gfs2_assert(sdp
, bd
->bd_tr
== tr
);
201 if (!buffer_uptodate(bh
))
202 gfs2_io_error_bh(sdp
, bh
);
203 list_move(&bd
->bd_ail_st_list
, &tr
->tr_ail2_list
);
209 * gfs2_ail1_empty - Try to empty the ail1 lists
210 * @sdp: The superblock
212 * Tries to empty the ail1 lists, starting with the oldest first
215 static int gfs2_ail1_empty(struct gfs2_sbd
*sdp
)
217 struct gfs2_trans
*tr
, *s
;
221 spin_lock(&sdp
->sd_ail_lock
);
222 list_for_each_entry_safe_reverse(tr
, s
, &sdp
->sd_ail1_list
, tr_list
) {
223 gfs2_ail1_empty_one(sdp
, tr
);
224 if (list_empty(&tr
->tr_ail1_list
) && oldest_tr
)
225 list_move(&tr
->tr_list
, &sdp
->sd_ail2_list
);
229 ret
= list_empty(&sdp
->sd_ail1_list
);
230 spin_unlock(&sdp
->sd_ail_lock
);
235 static void gfs2_ail1_wait(struct gfs2_sbd
*sdp
)
237 struct gfs2_trans
*tr
;
238 struct gfs2_bufdata
*bd
;
239 struct buffer_head
*bh
;
241 spin_lock(&sdp
->sd_ail_lock
);
242 list_for_each_entry_reverse(tr
, &sdp
->sd_ail1_list
, tr_list
) {
243 list_for_each_entry(bd
, &tr
->tr_ail1_list
, bd_ail_st_list
) {
245 if (!buffer_locked(bh
))
248 spin_unlock(&sdp
->sd_ail_lock
);
254 spin_unlock(&sdp
->sd_ail_lock
);
258 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
259 * @sdp: the filesystem
264 static void gfs2_ail2_empty_one(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
266 struct list_head
*head
= &tr
->tr_ail2_list
;
267 struct gfs2_bufdata
*bd
;
269 while (!list_empty(head
)) {
270 bd
= list_entry(head
->prev
, struct gfs2_bufdata
,
272 gfs2_assert(sdp
, bd
->bd_tr
== tr
);
273 gfs2_remove_from_ail(bd
);
277 static void ail2_empty(struct gfs2_sbd
*sdp
, unsigned int new_tail
)
279 struct gfs2_trans
*tr
, *safe
;
280 unsigned int old_tail
= sdp
->sd_log_tail
;
281 int wrap
= (new_tail
< old_tail
);
284 spin_lock(&sdp
->sd_ail_lock
);
286 list_for_each_entry_safe(tr
, safe
, &sdp
->sd_ail2_list
, tr_list
) {
287 a
= (old_tail
<= tr
->tr_first
);
288 b
= (tr
->tr_first
< new_tail
);
289 rm
= (wrap
) ? (a
|| b
) : (a
&& b
);
293 gfs2_ail2_empty_one(sdp
, tr
);
294 list_del(&tr
->tr_list
);
295 gfs2_assert_warn(sdp
, list_empty(&tr
->tr_ail1_list
));
296 gfs2_assert_warn(sdp
, list_empty(&tr
->tr_ail2_list
));
300 spin_unlock(&sdp
->sd_ail_lock
);
304 * gfs2_log_reserve - Make a log reservation
305 * @sdp: The GFS2 superblock
306 * @blks: The number of blocks to reserve
308 * Note that we never give out the last few blocks of the journal. Thats
309 * due to the fact that there is a small number of header blocks
310 * associated with each log flush. The exact number can't be known until
311 * flush time, so we ensure that we have just enough free blocks at all
312 * times to avoid running out during a log flush.
314 * We no longer flush the log here, instead we wake up logd to do that
315 * for us. To avoid the thundering herd and to ensure that we deal fairly
316 * with queued waiters, we use an exclusive wait. This means that when we
317 * get woken with enough journal space to get our reservation, we need to
318 * wake the next waiter on the list.
323 int gfs2_log_reserve(struct gfs2_sbd
*sdp
, unsigned int blks
)
325 unsigned reserved_blks
= 7 * (4096 / sdp
->sd_vfs
->s_blocksize
);
326 unsigned wanted
= blks
+ reserved_blks
;
329 unsigned int free_blocks
;
331 if (gfs2_assert_warn(sdp
, blks
) ||
332 gfs2_assert_warn(sdp
, blks
<= sdp
->sd_jdesc
->jd_blocks
))
335 free_blocks
= atomic_read(&sdp
->sd_log_blks_free
);
336 if (unlikely(free_blocks
<= wanted
)) {
338 prepare_to_wait_exclusive(&sdp
->sd_log_waitq
, &wait
,
339 TASK_UNINTERRUPTIBLE
);
340 wake_up(&sdp
->sd_logd_waitq
);
342 if (atomic_read(&sdp
->sd_log_blks_free
) <= wanted
)
344 free_blocks
= atomic_read(&sdp
->sd_log_blks_free
);
345 } while(free_blocks
<= wanted
);
346 finish_wait(&sdp
->sd_log_waitq
, &wait
);
348 if (atomic_cmpxchg(&sdp
->sd_log_blks_free
, free_blocks
,
349 free_blocks
- blks
) != free_blocks
)
351 trace_gfs2_log_blocks(sdp
, -blks
);
354 * If we waited, then so might others, wake them up _after_ we get
355 * our share of the log.
357 if (unlikely(did_wait
))
358 wake_up(&sdp
->sd_log_waitq
);
360 down_read(&sdp
->sd_log_flush_lock
);
366 * log_distance - Compute distance between two journal blocks
367 * @sdp: The GFS2 superblock
368 * @newer: The most recent journal block of the pair
369 * @older: The older journal block of the pair
371 * Compute the distance (in the journal direction) between two
372 * blocks in the journal
374 * Returns: the distance in blocks
377 static inline unsigned int log_distance(struct gfs2_sbd
*sdp
, unsigned int newer
,
382 dist
= newer
- older
;
384 dist
+= sdp
->sd_jdesc
->jd_blocks
;
390 * calc_reserved - Calculate the number of blocks to reserve when
391 * refunding a transaction's unused buffers.
392 * @sdp: The GFS2 superblock
394 * This is complex. We need to reserve room for all our currently used
395 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
396 * all our journaled data buffers for journaled files (e.g. files in the
397 * meta_fs like rindex, or files for which chattr +j was done.)
398 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
399 * will count it as free space (sd_log_blks_free) and corruption will follow.
401 * We can have metadata bufs and jdata bufs in the same journal. So each
402 * type gets its own log header, for which we need to reserve a block.
403 * In fact, each type has the potential for needing more than one header
404 * in cases where we have more buffers than will fit on a journal page.
405 * Metadata journal entries take up half the space of journaled buffer entries.
406 * Thus, metadata entries have buf_limit (502) and journaled buffers have
407 * databuf_limit (251) before they cause a wrap around.
409 * Also, we need to reserve blocks for revoke journal entries and one for an
410 * overall header for the lot.
412 * Returns: the number of blocks reserved
414 static unsigned int calc_reserved(struct gfs2_sbd
*sdp
)
416 unsigned int reserved
= 0;
419 struct gfs2_trans
*tr
= sdp
->sd_log_tr
;
422 mbuf
= tr
->tr_num_buf_new
- tr
->tr_num_buf_rm
;
423 dbuf
= tr
->tr_num_databuf_new
- tr
->tr_num_databuf_rm
;
424 reserved
= mbuf
+ dbuf
;
425 /* Account for header blocks */
426 reserved
+= DIV_ROUND_UP(mbuf
, buf_limit(sdp
));
427 reserved
+= DIV_ROUND_UP(dbuf
, databuf_limit(sdp
));
430 if (sdp
->sd_log_commited_revoke
> 0)
431 reserved
+= gfs2_struct2blk(sdp
, sdp
->sd_log_commited_revoke
,
433 /* One for the overall header */
439 static unsigned int current_tail(struct gfs2_sbd
*sdp
)
441 struct gfs2_trans
*tr
;
444 spin_lock(&sdp
->sd_ail_lock
);
446 if (list_empty(&sdp
->sd_ail1_list
)) {
447 tail
= sdp
->sd_log_head
;
449 tr
= list_entry(sdp
->sd_ail1_list
.prev
, struct gfs2_trans
,
454 spin_unlock(&sdp
->sd_ail_lock
);
459 static void log_pull_tail(struct gfs2_sbd
*sdp
, unsigned int new_tail
)
461 unsigned int dist
= log_distance(sdp
, new_tail
, sdp
->sd_log_tail
);
463 ail2_empty(sdp
, new_tail
);
465 atomic_add(dist
, &sdp
->sd_log_blks_free
);
466 trace_gfs2_log_blocks(sdp
, dist
);
467 gfs2_assert_withdraw(sdp
, atomic_read(&sdp
->sd_log_blks_free
) <=
468 sdp
->sd_jdesc
->jd_blocks
);
470 sdp
->sd_log_tail
= new_tail
;
474 static void log_flush_wait(struct gfs2_sbd
*sdp
)
478 if (atomic_read(&sdp
->sd_log_in_flight
)) {
480 prepare_to_wait(&sdp
->sd_log_flush_wait
, &wait
,
481 TASK_UNINTERRUPTIBLE
);
482 if (atomic_read(&sdp
->sd_log_in_flight
))
484 } while(atomic_read(&sdp
->sd_log_in_flight
));
485 finish_wait(&sdp
->sd_log_flush_wait
, &wait
);
489 static int ip_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
491 struct gfs2_inode
*ipa
, *ipb
;
493 ipa
= list_entry(a
, struct gfs2_inode
, i_ordered
);
494 ipb
= list_entry(b
, struct gfs2_inode
, i_ordered
);
496 if (ipa
->i_no_addr
< ipb
->i_no_addr
)
498 if (ipa
->i_no_addr
> ipb
->i_no_addr
)
503 static void gfs2_ordered_write(struct gfs2_sbd
*sdp
)
505 struct gfs2_inode
*ip
;
508 spin_lock(&sdp
->sd_ordered_lock
);
509 list_sort(NULL
, &sdp
->sd_log_le_ordered
, &ip_cmp
);
510 while (!list_empty(&sdp
->sd_log_le_ordered
)) {
511 ip
= list_entry(sdp
->sd_log_le_ordered
.next
, struct gfs2_inode
, i_ordered
);
512 list_move(&ip
->i_ordered
, &written
);
513 if (ip
->i_inode
.i_mapping
->nrpages
== 0)
515 spin_unlock(&sdp
->sd_ordered_lock
);
516 filemap_fdatawrite(ip
->i_inode
.i_mapping
);
517 spin_lock(&sdp
->sd_ordered_lock
);
519 list_splice(&written
, &sdp
->sd_log_le_ordered
);
520 spin_unlock(&sdp
->sd_ordered_lock
);
523 static void gfs2_ordered_wait(struct gfs2_sbd
*sdp
)
525 struct gfs2_inode
*ip
;
527 spin_lock(&sdp
->sd_ordered_lock
);
528 while (!list_empty(&sdp
->sd_log_le_ordered
)) {
529 ip
= list_entry(sdp
->sd_log_le_ordered
.next
, struct gfs2_inode
, i_ordered
);
530 list_del(&ip
->i_ordered
);
531 WARN_ON(!test_and_clear_bit(GIF_ORDERED
, &ip
->i_flags
));
532 if (ip
->i_inode
.i_mapping
->nrpages
== 0)
534 spin_unlock(&sdp
->sd_ordered_lock
);
535 filemap_fdatawait(ip
->i_inode
.i_mapping
);
536 spin_lock(&sdp
->sd_ordered_lock
);
538 spin_unlock(&sdp
->sd_ordered_lock
);
541 void gfs2_ordered_del_inode(struct gfs2_inode
*ip
)
543 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
545 spin_lock(&sdp
->sd_ordered_lock
);
546 if (test_and_clear_bit(GIF_ORDERED
, &ip
->i_flags
))
547 list_del(&ip
->i_ordered
);
548 spin_unlock(&sdp
->sd_ordered_lock
);
551 void gfs2_add_revoke(struct gfs2_sbd
*sdp
, struct gfs2_bufdata
*bd
)
553 struct buffer_head
*bh
= bd
->bd_bh
;
554 struct gfs2_glock
*gl
= bd
->bd_gl
;
556 bh
->b_private
= NULL
;
557 bd
->bd_blkno
= bh
->b_blocknr
;
558 gfs2_remove_from_ail(bd
); /* drops ref on bh */
560 bd
->bd_ops
= &gfs2_revoke_lops
;
561 sdp
->sd_log_num_revoke
++;
562 atomic_inc(&gl
->gl_revokes
);
563 set_bit(GLF_LFLUSH
, &gl
->gl_flags
);
564 list_add(&bd
->bd_list
, &sdp
->sd_log_le_revoke
);
567 void gfs2_write_revokes(struct gfs2_sbd
*sdp
)
569 struct gfs2_trans
*tr
;
570 struct gfs2_bufdata
*bd
, *tmp
;
571 int have_revokes
= 0;
572 int max_revokes
= (sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_log_descriptor
)) / sizeof(u64
);
574 gfs2_ail1_empty(sdp
);
575 spin_lock(&sdp
->sd_ail_lock
);
576 list_for_each_entry(tr
, &sdp
->sd_ail1_list
, tr_list
) {
577 list_for_each_entry(bd
, &tr
->tr_ail2_list
, bd_ail_st_list
) {
578 if (list_empty(&bd
->bd_list
)) {
585 spin_unlock(&sdp
->sd_ail_lock
);
586 if (have_revokes
== 0)
588 while (sdp
->sd_log_num_revoke
> max_revokes
)
589 max_revokes
+= (sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_meta_header
)) / sizeof(u64
);
590 max_revokes
-= sdp
->sd_log_num_revoke
;
591 if (!sdp
->sd_log_num_revoke
) {
592 atomic_dec(&sdp
->sd_log_blks_free
);
593 /* If no blocks have been reserved, we need to also
594 * reserve a block for the header */
595 if (!sdp
->sd_log_blks_reserved
)
596 atomic_dec(&sdp
->sd_log_blks_free
);
599 spin_lock(&sdp
->sd_ail_lock
);
600 list_for_each_entry(tr
, &sdp
->sd_ail1_list
, tr_list
) {
601 list_for_each_entry_safe(bd
, tmp
, &tr
->tr_ail2_list
, bd_ail_st_list
) {
602 if (max_revokes
== 0)
604 if (!list_empty(&bd
->bd_list
))
606 gfs2_add_revoke(sdp
, bd
);
611 spin_unlock(&sdp
->sd_ail_lock
);
612 gfs2_log_unlock(sdp
);
614 if (!sdp
->sd_log_num_revoke
) {
615 atomic_inc(&sdp
->sd_log_blks_free
);
616 if (!sdp
->sd_log_blks_reserved
)
617 atomic_inc(&sdp
->sd_log_blks_free
);
622 * log_write_header - Get and initialize a journal header buffer
623 * @sdp: The GFS2 superblock
625 * Returns: the initialized log buffer descriptor
628 static void log_write_header(struct gfs2_sbd
*sdp
, u32 flags
)
630 struct gfs2_log_header
*lh
;
633 int rw
= WRITE_FLUSH_FUA
| REQ_META
;
634 struct page
*page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
635 lh
= page_address(page
);
638 tail
= current_tail(sdp
);
640 lh
->lh_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
641 lh
->lh_header
.mh_type
= cpu_to_be32(GFS2_METATYPE_LH
);
642 lh
->lh_header
.__pad0
= cpu_to_be64(0);
643 lh
->lh_header
.mh_format
= cpu_to_be32(GFS2_FORMAT_LH
);
644 lh
->lh_header
.mh_jid
= cpu_to_be32(sdp
->sd_jdesc
->jd_jid
);
645 lh
->lh_sequence
= cpu_to_be64(sdp
->sd_log_sequence
++);
646 lh
->lh_flags
= cpu_to_be32(flags
);
647 lh
->lh_tail
= cpu_to_be32(tail
);
648 lh
->lh_blkno
= cpu_to_be32(sdp
->sd_log_flush_head
);
649 hash
= gfs2_disk_hash(page_address(page
), sizeof(struct gfs2_log_header
));
650 lh
->lh_hash
= cpu_to_be32(hash
);
652 if (test_bit(SDF_NOBARRIERS
, &sdp
->sd_flags
)) {
653 gfs2_ordered_wait(sdp
);
655 rw
= WRITE_SYNC
| REQ_META
| REQ_PRIO
;
658 sdp
->sd_log_idle
= (tail
== sdp
->sd_log_flush_head
);
659 gfs2_log_write_page(sdp
, page
);
660 gfs2_log_flush_bio(sdp
, rw
);
663 if (sdp
->sd_log_tail
!= tail
)
664 log_pull_tail(sdp
, tail
);
668 * gfs2_log_flush - flush incore transaction(s)
669 * @sdp: the filesystem
670 * @gl: The glock structure to flush. If NULL, flush the whole incore log
674 void gfs2_log_flush(struct gfs2_sbd
*sdp
, struct gfs2_glock
*gl
)
676 struct gfs2_trans
*tr
;
678 down_write(&sdp
->sd_log_flush_lock
);
680 /* Log might have been flushed while we waited for the flush lock */
681 if (gl
&& !test_bit(GLF_LFLUSH
, &gl
->gl_flags
)) {
682 up_write(&sdp
->sd_log_flush_lock
);
685 trace_gfs2_log_flush(sdp
, 1);
687 sdp
->sd_log_flush_head
= sdp
->sd_log_head
;
688 sdp
->sd_log_flush_wrapped
= 0;
691 sdp
->sd_log_tr
= NULL
;
692 INIT_LIST_HEAD(&tr
->tr_ail1_list
);
693 INIT_LIST_HEAD(&tr
->tr_ail2_list
);
694 tr
->tr_first
= sdp
->sd_log_flush_head
;
697 gfs2_assert_withdraw(sdp
,
698 sdp
->sd_log_num_revoke
== sdp
->sd_log_commited_revoke
);
700 gfs2_ordered_write(sdp
);
701 lops_before_commit(sdp
, tr
);
702 gfs2_log_flush_bio(sdp
, WRITE
);
704 if (sdp
->sd_log_head
!= sdp
->sd_log_flush_head
) {
706 log_write_header(sdp
, 0);
707 } else if (sdp
->sd_log_tail
!= current_tail(sdp
) && !sdp
->sd_log_idle
){
708 atomic_dec(&sdp
->sd_log_blks_free
); /* Adjust for unreserved buffer */
709 trace_gfs2_log_blocks(sdp
, -1);
710 log_write_header(sdp
, 0);
712 lops_after_commit(sdp
, tr
);
715 sdp
->sd_log_head
= sdp
->sd_log_flush_head
;
716 sdp
->sd_log_blks_reserved
= 0;
717 sdp
->sd_log_commited_revoke
= 0;
719 spin_lock(&sdp
->sd_ail_lock
);
720 if (tr
&& !list_empty(&tr
->tr_ail1_list
)) {
721 list_add(&tr
->tr_list
, &sdp
->sd_ail1_list
);
724 spin_unlock(&sdp
->sd_ail_lock
);
725 gfs2_log_unlock(sdp
);
726 trace_gfs2_log_flush(sdp
, 0);
727 up_write(&sdp
->sd_log_flush_lock
);
733 * gfs2_merge_trans - Merge a new transaction into a cached transaction
734 * @old: Original transaction to be expanded
735 * @new: New transaction to be merged
738 static void gfs2_merge_trans(struct gfs2_trans
*old
, struct gfs2_trans
*new)
740 WARN_ON_ONCE(old
->tr_attached
!= 1);
742 old
->tr_num_buf_new
+= new->tr_num_buf_new
;
743 old
->tr_num_databuf_new
+= new->tr_num_databuf_new
;
744 old
->tr_num_buf_rm
+= new->tr_num_buf_rm
;
745 old
->tr_num_databuf_rm
+= new->tr_num_databuf_rm
;
746 old
->tr_num_revoke
+= new->tr_num_revoke
;
747 old
->tr_num_revoke_rm
+= new->tr_num_revoke_rm
;
749 list_splice_tail_init(&new->tr_databuf
, &old
->tr_databuf
);
750 list_splice_tail_init(&new->tr_buf
, &old
->tr_buf
);
753 static void log_refund(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
755 unsigned int reserved
;
761 if (sdp
->sd_log_tr
) {
762 gfs2_merge_trans(sdp
->sd_log_tr
, tr
);
763 } else if (tr
->tr_num_buf_new
|| tr
->tr_num_databuf_new
) {
764 gfs2_assert_withdraw(sdp
, tr
->tr_t_gh
.gh_gl
);
769 sdp
->sd_log_commited_revoke
+= tr
->tr_num_revoke
- tr
->tr_num_revoke_rm
;
770 reserved
= calc_reserved(sdp
);
771 maxres
= sdp
->sd_log_blks_reserved
+ tr
->tr_reserved
;
772 gfs2_assert_withdraw(sdp
, maxres
>= reserved
);
773 unused
= maxres
- reserved
;
774 atomic_add(unused
, &sdp
->sd_log_blks_free
);
775 trace_gfs2_log_blocks(sdp
, unused
);
776 gfs2_assert_withdraw(sdp
, atomic_read(&sdp
->sd_log_blks_free
) <=
777 sdp
->sd_jdesc
->jd_blocks
);
778 sdp
->sd_log_blks_reserved
= reserved
;
780 gfs2_log_unlock(sdp
);
784 * gfs2_log_commit - Commit a transaction to the log
785 * @sdp: the filesystem
786 * @tr: the transaction
788 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
789 * or the total number of used blocks (pinned blocks plus AIL blocks)
790 * is greater than thresh2.
792 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
798 void gfs2_log_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
802 if (atomic_read(&sdp
->sd_log_pinned
) > atomic_read(&sdp
->sd_log_thresh1
) ||
803 ((sdp
->sd_jdesc
->jd_blocks
- atomic_read(&sdp
->sd_log_blks_free
)) >
804 atomic_read(&sdp
->sd_log_thresh2
)))
805 wake_up(&sdp
->sd_logd_waitq
);
809 * gfs2_log_shutdown - write a shutdown header into a journal
810 * @sdp: the filesystem
814 void gfs2_log_shutdown(struct gfs2_sbd
*sdp
)
816 down_write(&sdp
->sd_log_flush_lock
);
818 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_blks_reserved
);
819 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_num_revoke
);
820 gfs2_assert_withdraw(sdp
, list_empty(&sdp
->sd_ail1_list
));
822 sdp
->sd_log_flush_head
= sdp
->sd_log_head
;
823 sdp
->sd_log_flush_wrapped
= 0;
825 log_write_header(sdp
, GFS2_LOG_HEAD_UNMOUNT
);
827 gfs2_assert_warn(sdp
, atomic_read(&sdp
->sd_log_blks_free
) == sdp
->sd_jdesc
->jd_blocks
);
828 gfs2_assert_warn(sdp
, sdp
->sd_log_head
== sdp
->sd_log_tail
);
829 gfs2_assert_warn(sdp
, list_empty(&sdp
->sd_ail2_list
));
831 sdp
->sd_log_head
= sdp
->sd_log_flush_head
;
832 sdp
->sd_log_tail
= sdp
->sd_log_head
;
834 up_write(&sdp
->sd_log_flush_lock
);
839 * gfs2_meta_syncfs - sync all the buffers in a filesystem
840 * @sdp: the filesystem
844 void gfs2_meta_syncfs(struct gfs2_sbd
*sdp
)
846 gfs2_log_flush(sdp
, NULL
);
848 gfs2_ail1_start(sdp
);
850 if (gfs2_ail1_empty(sdp
))
853 gfs2_log_flush(sdp
, NULL
);
856 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd
*sdp
)
858 return (atomic_read(&sdp
->sd_log_pinned
) >= atomic_read(&sdp
->sd_log_thresh1
));
861 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd
*sdp
)
863 unsigned int used_blocks
= sdp
->sd_jdesc
->jd_blocks
- atomic_read(&sdp
->sd_log_blks_free
);
864 return used_blocks
>= atomic_read(&sdp
->sd_log_thresh2
);
868 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
869 * @sdp: Pointer to GFS2 superblock
871 * Also, periodically check to make sure that we're using the most recent
875 int gfs2_logd(void *data
)
877 struct gfs2_sbd
*sdp
= data
;
881 while (!kthread_should_stop()) {
883 if (gfs2_jrnl_flush_reqd(sdp
) || t
== 0) {
884 gfs2_ail1_empty(sdp
);
885 gfs2_log_flush(sdp
, NULL
);
888 if (gfs2_ail_flush_reqd(sdp
)) {
889 gfs2_ail1_start(sdp
);
891 gfs2_ail1_empty(sdp
);
892 gfs2_log_flush(sdp
, NULL
);
895 if (!gfs2_ail_flush_reqd(sdp
))
896 wake_up(&sdp
->sd_log_waitq
);
898 t
= gfs2_tune_get(sdp
, gt_logd_secs
) * HZ
;
903 prepare_to_wait(&sdp
->sd_logd_waitq
, &wait
,
905 if (!gfs2_ail_flush_reqd(sdp
) &&
906 !gfs2_jrnl_flush_reqd(sdp
) &&
907 !kthread_should_stop())
908 t
= schedule_timeout(t
);
909 } while(t
&& !gfs2_ail_flush_reqd(sdp
) &&
910 !gfs2_jrnl_flush_reqd(sdp
) &&
911 !kthread_should_stop());
912 finish_wait(&sdp
->sd_logd_waitq
, &wait
);