2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/crc32.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/freezer.h>
20 #include <linux/bio.h>
21 #include <linux/writeback.h>
22 #include <linux/list_sort.h>
33 #include "trace_gfs2.h"
36 * gfs2_struct2blk - compute stuff
37 * @sdp: the filesystem
38 * @nstruct: the number of structures
39 * @ssize: the size of the structures
41 * Compute the number of log descriptor blocks needed to hold a certain number
42 * of structures of a certain size.
44 * Returns: the number of blocks needed (minimum is always 1)
47 unsigned int gfs2_struct2blk(struct gfs2_sbd
*sdp
, unsigned int nstruct
,
51 unsigned int first
, second
;
54 first
= (sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_log_descriptor
)) / ssize
;
56 if (nstruct
> first
) {
57 second
= (sdp
->sd_sb
.sb_bsize
-
58 sizeof(struct gfs2_meta_header
)) / ssize
;
59 blks
+= DIV_ROUND_UP(nstruct
- first
, second
);
66 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
67 * @mapping: The associated mapping (maybe NULL)
68 * @bd: The gfs2_bufdata to remove
70 * The ail lock _must_ be held when calling this function
74 void gfs2_remove_from_ail(struct gfs2_bufdata
*bd
)
77 list_del_init(&bd
->bd_ail_st_list
);
78 list_del_init(&bd
->bd_ail_gl_list
);
79 atomic_dec(&bd
->bd_gl
->gl_ail_count
);
84 * gfs2_ail1_start_one - Start I/O on a part of the AIL
85 * @sdp: the filesystem
86 * @wbc: The writeback control structure
87 * @ai: The ail structure
91 static int gfs2_ail1_start_one(struct gfs2_sbd
*sdp
,
92 struct writeback_control
*wbc
,
94 __releases(&sdp
->sd_ail_lock
)
95 __acquires(&sdp
->sd_ail_lock
)
97 struct gfs2_glock
*gl
= NULL
;
98 struct address_space
*mapping
;
99 struct gfs2_bufdata
*bd
, *s
;
100 struct buffer_head
*bh
;
102 list_for_each_entry_safe_reverse(bd
, s
, &ai
->ai_ail1_list
, bd_ail_st_list
) {
105 gfs2_assert(sdp
, bd
->bd_ail
== ai
);
107 if (!buffer_busy(bh
)) {
108 if (!buffer_uptodate(bh
))
109 gfs2_io_error_bh(sdp
, bh
);
110 list_move(&bd
->bd_ail_st_list
, &ai
->ai_ail2_list
);
114 if (!buffer_dirty(bh
))
119 list_move(&bd
->bd_ail_st_list
, &ai
->ai_ail1_list
);
120 mapping
= bh
->b_page
->mapping
;
123 spin_unlock(&sdp
->sd_ail_lock
);
124 generic_writepages(mapping
, wbc
);
125 spin_lock(&sdp
->sd_ail_lock
);
126 if (wbc
->nr_to_write
<= 0)
136 * gfs2_ail1_flush - start writeback of some ail1 entries
137 * @sdp: The super block
138 * @wbc: The writeback control structure
140 * Writes back some ail1 entries, according to the limits in the
141 * writeback control structure
144 void gfs2_ail1_flush(struct gfs2_sbd
*sdp
, struct writeback_control
*wbc
)
146 struct list_head
*head
= &sdp
->sd_ail1_list
;
149 trace_gfs2_ail_flush(sdp
, wbc
, 1);
150 spin_lock(&sdp
->sd_ail_lock
);
152 list_for_each_entry_reverse(ai
, head
, ai_list
) {
153 if (wbc
->nr_to_write
<= 0)
155 if (gfs2_ail1_start_one(sdp
, wbc
, ai
))
158 spin_unlock(&sdp
->sd_ail_lock
);
159 trace_gfs2_ail_flush(sdp
, wbc
, 0);
163 * gfs2_ail1_start - start writeback of all ail1 entries
164 * @sdp: The superblock
167 static void gfs2_ail1_start(struct gfs2_sbd
*sdp
)
169 struct writeback_control wbc
= {
170 .sync_mode
= WB_SYNC_NONE
,
171 .nr_to_write
= LONG_MAX
,
173 .range_end
= LLONG_MAX
,
176 return gfs2_ail1_flush(sdp
, &wbc
);
180 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
181 * @sdp: the filesystem
186 static void gfs2_ail1_empty_one(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
188 struct gfs2_bufdata
*bd
, *s
;
189 struct buffer_head
*bh
;
191 list_for_each_entry_safe_reverse(bd
, s
, &ai
->ai_ail1_list
,
194 gfs2_assert(sdp
, bd
->bd_ail
== ai
);
197 if (!buffer_uptodate(bh
))
198 gfs2_io_error_bh(sdp
, bh
);
199 list_move(&bd
->bd_ail_st_list
, &ai
->ai_ail2_list
);
205 * gfs2_ail1_empty - Try to empty the ail1 lists
206 * @sdp: The superblock
208 * Tries to empty the ail1 lists, starting with the oldest first
211 static int gfs2_ail1_empty(struct gfs2_sbd
*sdp
)
213 struct gfs2_ail
*ai
, *s
;
216 spin_lock(&sdp
->sd_ail_lock
);
217 list_for_each_entry_safe_reverse(ai
, s
, &sdp
->sd_ail1_list
, ai_list
) {
218 gfs2_ail1_empty_one(sdp
, ai
);
219 if (list_empty(&ai
->ai_ail1_list
))
220 list_move(&ai
->ai_list
, &sdp
->sd_ail2_list
);
224 ret
= list_empty(&sdp
->sd_ail1_list
);
225 spin_unlock(&sdp
->sd_ail_lock
);
230 static void gfs2_ail1_wait(struct gfs2_sbd
*sdp
)
233 struct gfs2_bufdata
*bd
;
234 struct buffer_head
*bh
;
236 spin_lock(&sdp
->sd_ail_lock
);
237 list_for_each_entry_reverse(ai
, &sdp
->sd_ail1_list
, ai_list
) {
238 list_for_each_entry(bd
, &ai
->ai_ail1_list
, bd_ail_st_list
) {
240 if (!buffer_locked(bh
))
243 spin_unlock(&sdp
->sd_ail_lock
);
249 spin_unlock(&sdp
->sd_ail_lock
);
253 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
254 * @sdp: the filesystem
259 static void gfs2_ail2_empty_one(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
261 struct list_head
*head
= &ai
->ai_ail2_list
;
262 struct gfs2_bufdata
*bd
;
264 while (!list_empty(head
)) {
265 bd
= list_entry(head
->prev
, struct gfs2_bufdata
,
267 gfs2_assert(sdp
, bd
->bd_ail
== ai
);
268 gfs2_remove_from_ail(bd
);
272 static void ail2_empty(struct gfs2_sbd
*sdp
, unsigned int new_tail
)
274 struct gfs2_ail
*ai
, *safe
;
275 unsigned int old_tail
= sdp
->sd_log_tail
;
276 int wrap
= (new_tail
< old_tail
);
279 spin_lock(&sdp
->sd_ail_lock
);
281 list_for_each_entry_safe(ai
, safe
, &sdp
->sd_ail2_list
, ai_list
) {
282 a
= (old_tail
<= ai
->ai_first
);
283 b
= (ai
->ai_first
< new_tail
);
284 rm
= (wrap
) ? (a
|| b
) : (a
&& b
);
288 gfs2_ail2_empty_one(sdp
, ai
);
289 list_del(&ai
->ai_list
);
290 gfs2_assert_warn(sdp
, list_empty(&ai
->ai_ail1_list
));
291 gfs2_assert_warn(sdp
, list_empty(&ai
->ai_ail2_list
));
295 spin_unlock(&sdp
->sd_ail_lock
);
299 * gfs2_log_reserve - Make a log reservation
300 * @sdp: The GFS2 superblock
301 * @blks: The number of blocks to reserve
303 * Note that we never give out the last few blocks of the journal. Thats
304 * due to the fact that there is a small number of header blocks
305 * associated with each log flush. The exact number can't be known until
306 * flush time, so we ensure that we have just enough free blocks at all
307 * times to avoid running out during a log flush.
309 * We no longer flush the log here, instead we wake up logd to do that
310 * for us. To avoid the thundering herd and to ensure that we deal fairly
311 * with queued waiters, we use an exclusive wait. This means that when we
312 * get woken with enough journal space to get our reservation, we need to
313 * wake the next waiter on the list.
318 int gfs2_log_reserve(struct gfs2_sbd
*sdp
, unsigned int blks
)
320 unsigned reserved_blks
= 6 * (4096 / sdp
->sd_vfs
->s_blocksize
);
321 unsigned wanted
= blks
+ reserved_blks
;
324 unsigned int free_blocks
;
326 if (gfs2_assert_warn(sdp
, blks
) ||
327 gfs2_assert_warn(sdp
, blks
<= sdp
->sd_jdesc
->jd_blocks
))
330 free_blocks
= atomic_read(&sdp
->sd_log_blks_free
);
331 if (unlikely(free_blocks
<= wanted
)) {
333 prepare_to_wait_exclusive(&sdp
->sd_log_waitq
, &wait
,
334 TASK_UNINTERRUPTIBLE
);
335 wake_up(&sdp
->sd_logd_waitq
);
337 if (atomic_read(&sdp
->sd_log_blks_free
) <= wanted
)
339 free_blocks
= atomic_read(&sdp
->sd_log_blks_free
);
340 } while(free_blocks
<= wanted
);
341 finish_wait(&sdp
->sd_log_waitq
, &wait
);
343 if (atomic_cmpxchg(&sdp
->sd_log_blks_free
, free_blocks
,
344 free_blocks
- blks
) != free_blocks
)
346 trace_gfs2_log_blocks(sdp
, -blks
);
349 * If we waited, then so might others, wake them up _after_ we get
350 * our share of the log.
352 if (unlikely(did_wait
))
353 wake_up(&sdp
->sd_log_waitq
);
355 down_read(&sdp
->sd_log_flush_lock
);
361 * log_distance - Compute distance between two journal blocks
362 * @sdp: The GFS2 superblock
363 * @newer: The most recent journal block of the pair
364 * @older: The older journal block of the pair
366 * Compute the distance (in the journal direction) between two
367 * blocks in the journal
369 * Returns: the distance in blocks
372 static inline unsigned int log_distance(struct gfs2_sbd
*sdp
, unsigned int newer
,
377 dist
= newer
- older
;
379 dist
+= sdp
->sd_jdesc
->jd_blocks
;
385 * calc_reserved - Calculate the number of blocks to reserve when
386 * refunding a transaction's unused buffers.
387 * @sdp: The GFS2 superblock
389 * This is complex. We need to reserve room for all our currently used
390 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
391 * all our journaled data buffers for journaled files (e.g. files in the
392 * meta_fs like rindex, or files for which chattr +j was done.)
393 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
394 * will count it as free space (sd_log_blks_free) and corruption will follow.
396 * We can have metadata bufs and jdata bufs in the same journal. So each
397 * type gets its own log header, for which we need to reserve a block.
398 * In fact, each type has the potential for needing more than one header
399 * in cases where we have more buffers than will fit on a journal page.
400 * Metadata journal entries take up half the space of journaled buffer entries.
401 * Thus, metadata entries have buf_limit (502) and journaled buffers have
402 * databuf_limit (251) before they cause a wrap around.
404 * Also, we need to reserve blocks for revoke journal entries and one for an
405 * overall header for the lot.
407 * Returns: the number of blocks reserved
409 static unsigned int calc_reserved(struct gfs2_sbd
*sdp
)
411 unsigned int reserved
= 0;
412 unsigned int mbuf_limit
, metabufhdrs_needed
;
413 unsigned int dbuf_limit
, databufhdrs_needed
;
414 unsigned int revokes
= 0;
416 mbuf_limit
= buf_limit(sdp
);
417 metabufhdrs_needed
= (sdp
->sd_log_commited_buf
+
418 (mbuf_limit
- 1)) / mbuf_limit
;
419 dbuf_limit
= databuf_limit(sdp
);
420 databufhdrs_needed
= (sdp
->sd_log_commited_databuf
+
421 (dbuf_limit
- 1)) / dbuf_limit
;
423 if (sdp
->sd_log_commited_revoke
> 0)
424 revokes
= gfs2_struct2blk(sdp
, sdp
->sd_log_commited_revoke
,
427 reserved
= sdp
->sd_log_commited_buf
+ metabufhdrs_needed
+
428 sdp
->sd_log_commited_databuf
+ databufhdrs_needed
+
430 /* One for the overall header */
436 static unsigned int current_tail(struct gfs2_sbd
*sdp
)
441 spin_lock(&sdp
->sd_ail_lock
);
443 if (list_empty(&sdp
->sd_ail1_list
)) {
444 tail
= sdp
->sd_log_head
;
446 ai
= list_entry(sdp
->sd_ail1_list
.prev
, struct gfs2_ail
, ai_list
);
450 spin_unlock(&sdp
->sd_ail_lock
);
455 static void log_pull_tail(struct gfs2_sbd
*sdp
, unsigned int new_tail
)
457 unsigned int dist
= log_distance(sdp
, new_tail
, sdp
->sd_log_tail
);
459 ail2_empty(sdp
, new_tail
);
461 atomic_add(dist
, &sdp
->sd_log_blks_free
);
462 trace_gfs2_log_blocks(sdp
, dist
);
463 gfs2_assert_withdraw(sdp
, atomic_read(&sdp
->sd_log_blks_free
) <=
464 sdp
->sd_jdesc
->jd_blocks
);
466 sdp
->sd_log_tail
= new_tail
;
470 static void log_flush_wait(struct gfs2_sbd
*sdp
)
474 if (atomic_read(&sdp
->sd_log_in_flight
)) {
476 prepare_to_wait(&sdp
->sd_log_flush_wait
, &wait
,
477 TASK_UNINTERRUPTIBLE
);
478 if (atomic_read(&sdp
->sd_log_in_flight
))
480 } while(atomic_read(&sdp
->sd_log_in_flight
));
481 finish_wait(&sdp
->sd_log_flush_wait
, &wait
);
485 static int bd_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
487 struct gfs2_bufdata
*bda
, *bdb
;
489 bda
= list_entry(a
, struct gfs2_bufdata
, bd_list
);
490 bdb
= list_entry(b
, struct gfs2_bufdata
, bd_list
);
492 if (bda
->bd_bh
->b_blocknr
< bdb
->bd_bh
->b_blocknr
)
494 if (bda
->bd_bh
->b_blocknr
> bdb
->bd_bh
->b_blocknr
)
499 static void gfs2_ordered_write(struct gfs2_sbd
*sdp
)
501 struct gfs2_bufdata
*bd
;
502 struct buffer_head
*bh
;
506 list_sort(NULL
, &sdp
->sd_log_le_ordered
, &bd_cmp
);
507 while (!list_empty(&sdp
->sd_log_le_ordered
)) {
508 bd
= list_entry(sdp
->sd_log_le_ordered
.next
, struct gfs2_bufdata
, bd_list
);
509 list_move(&bd
->bd_list
, &written
);
511 if (!buffer_dirty(bh
))
514 gfs2_log_unlock(sdp
);
516 if (buffer_mapped(bh
) && test_clear_buffer_dirty(bh
)) {
517 bh
->b_end_io
= end_buffer_write_sync
;
518 submit_bh(WRITE_SYNC
, bh
);
525 list_splice(&written
, &sdp
->sd_log_le_ordered
);
526 gfs2_log_unlock(sdp
);
529 static void gfs2_ordered_wait(struct gfs2_sbd
*sdp
)
531 struct gfs2_bufdata
*bd
;
532 struct buffer_head
*bh
;
535 while (!list_empty(&sdp
->sd_log_le_ordered
)) {
536 bd
= list_entry(sdp
->sd_log_le_ordered
.prev
, struct gfs2_bufdata
, bd_list
);
538 if (buffer_locked(bh
)) {
540 gfs2_log_unlock(sdp
);
546 list_del_init(&bd
->bd_list
);
548 gfs2_log_unlock(sdp
);
552 * log_write_header - Get and initialize a journal header buffer
553 * @sdp: The GFS2 superblock
555 * Returns: the initialized log buffer descriptor
558 static void log_write_header(struct gfs2_sbd
*sdp
, u32 flags
)
560 struct gfs2_log_header
*lh
;
563 int rw
= WRITE_FLUSH_FUA
| REQ_META
;
564 struct page
*page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
565 lh
= page_address(page
);
568 gfs2_ail1_empty(sdp
);
569 tail
= current_tail(sdp
);
571 lh
->lh_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
572 lh
->lh_header
.mh_type
= cpu_to_be32(GFS2_METATYPE_LH
);
573 lh
->lh_header
.__pad0
= cpu_to_be64(0);
574 lh
->lh_header
.mh_format
= cpu_to_be32(GFS2_FORMAT_LH
);
575 lh
->lh_header
.mh_jid
= cpu_to_be32(sdp
->sd_jdesc
->jd_jid
);
576 lh
->lh_sequence
= cpu_to_be64(sdp
->sd_log_sequence
++);
577 lh
->lh_flags
= cpu_to_be32(flags
);
578 lh
->lh_tail
= cpu_to_be32(tail
);
579 lh
->lh_blkno
= cpu_to_be32(sdp
->sd_log_flush_head
);
580 hash
= gfs2_disk_hash(page_address(page
), sizeof(struct gfs2_log_header
));
581 lh
->lh_hash
= cpu_to_be32(hash
);
583 if (test_bit(SDF_NOBARRIERS
, &sdp
->sd_flags
)) {
584 gfs2_ordered_wait(sdp
);
586 rw
= WRITE_SYNC
| REQ_META
| REQ_PRIO
;
589 sdp
->sd_log_idle
= (tail
== sdp
->sd_log_flush_head
);
590 gfs2_log_write_page(sdp
, page
);
591 gfs2_log_flush_bio(sdp
, rw
);
594 if (sdp
->sd_log_tail
!= tail
)
595 log_pull_tail(sdp
, tail
);
599 * gfs2_log_flush - flush incore transaction(s)
600 * @sdp: the filesystem
601 * @gl: The glock structure to flush. If NULL, flush the whole incore log
605 void gfs2_log_flush(struct gfs2_sbd
*sdp
, struct gfs2_glock
*gl
)
609 down_write(&sdp
->sd_log_flush_lock
);
611 /* Log might have been flushed while we waited for the flush lock */
612 if (gl
&& !test_bit(GLF_LFLUSH
, &gl
->gl_flags
)) {
613 up_write(&sdp
->sd_log_flush_lock
);
616 trace_gfs2_log_flush(sdp
, 1);
618 ai
= kzalloc(sizeof(struct gfs2_ail
), GFP_NOFS
| __GFP_NOFAIL
);
619 INIT_LIST_HEAD(&ai
->ai_ail1_list
);
620 INIT_LIST_HEAD(&ai
->ai_ail2_list
);
622 if (sdp
->sd_log_num_buf
!= sdp
->sd_log_commited_buf
) {
623 printk(KERN_INFO
"GFS2: log buf %u %u\n", sdp
->sd_log_num_buf
,
624 sdp
->sd_log_commited_buf
);
625 gfs2_assert_withdraw(sdp
, 0);
627 if (sdp
->sd_log_num_databuf
!= sdp
->sd_log_commited_databuf
) {
628 printk(KERN_INFO
"GFS2: log databuf %u %u\n",
629 sdp
->sd_log_num_databuf
, sdp
->sd_log_commited_databuf
);
630 gfs2_assert_withdraw(sdp
, 0);
632 gfs2_assert_withdraw(sdp
,
633 sdp
->sd_log_num_revoke
== sdp
->sd_log_commited_revoke
);
635 sdp
->sd_log_flush_head
= sdp
->sd_log_head
;
636 sdp
->sd_log_flush_wrapped
= 0;
637 ai
->ai_first
= sdp
->sd_log_flush_head
;
639 gfs2_ordered_write(sdp
);
640 lops_before_commit(sdp
);
641 gfs2_log_flush_bio(sdp
, WRITE
);
643 if (sdp
->sd_log_head
!= sdp
->sd_log_flush_head
) {
644 log_write_header(sdp
, 0);
645 } else if (sdp
->sd_log_tail
!= current_tail(sdp
) && !sdp
->sd_log_idle
){
646 atomic_dec(&sdp
->sd_log_blks_free
); /* Adjust for unreserved buffer */
647 trace_gfs2_log_blocks(sdp
, -1);
648 log_write_header(sdp
, 0);
650 lops_after_commit(sdp
, ai
);
653 sdp
->sd_log_head
= sdp
->sd_log_flush_head
;
654 sdp
->sd_log_blks_reserved
= 0;
655 sdp
->sd_log_commited_buf
= 0;
656 sdp
->sd_log_commited_databuf
= 0;
657 sdp
->sd_log_commited_revoke
= 0;
659 spin_lock(&sdp
->sd_ail_lock
);
660 if (!list_empty(&ai
->ai_ail1_list
)) {
661 list_add(&ai
->ai_list
, &sdp
->sd_ail1_list
);
664 spin_unlock(&sdp
->sd_ail_lock
);
665 gfs2_log_unlock(sdp
);
666 trace_gfs2_log_flush(sdp
, 0);
667 up_write(&sdp
->sd_log_flush_lock
);
672 static void log_refund(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
674 unsigned int reserved
;
679 sdp
->sd_log_commited_buf
+= tr
->tr_num_buf_new
- tr
->tr_num_buf_rm
;
680 sdp
->sd_log_commited_databuf
+= tr
->tr_num_databuf_new
-
681 tr
->tr_num_databuf_rm
;
682 gfs2_assert_withdraw(sdp
, (((int)sdp
->sd_log_commited_buf
) >= 0) ||
683 (((int)sdp
->sd_log_commited_databuf
) >= 0));
684 sdp
->sd_log_commited_revoke
+= tr
->tr_num_revoke
- tr
->tr_num_revoke_rm
;
685 reserved
= calc_reserved(sdp
);
686 gfs2_assert_withdraw(sdp
, sdp
->sd_log_blks_reserved
+ tr
->tr_reserved
>= reserved
);
687 unused
= sdp
->sd_log_blks_reserved
- reserved
+ tr
->tr_reserved
;
688 atomic_add(unused
, &sdp
->sd_log_blks_free
);
689 trace_gfs2_log_blocks(sdp
, unused
);
690 gfs2_assert_withdraw(sdp
, atomic_read(&sdp
->sd_log_blks_free
) <=
691 sdp
->sd_jdesc
->jd_blocks
);
692 sdp
->sd_log_blks_reserved
= reserved
;
694 gfs2_log_unlock(sdp
);
698 * gfs2_log_commit - Commit a transaction to the log
699 * @sdp: the filesystem
700 * @tr: the transaction
702 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
703 * or the total number of used blocks (pinned blocks plus AIL blocks)
704 * is greater than thresh2.
706 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
712 void gfs2_log_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
715 up_read(&sdp
->sd_log_flush_lock
);
717 if (atomic_read(&sdp
->sd_log_pinned
) > atomic_read(&sdp
->sd_log_thresh1
) ||
718 ((sdp
->sd_jdesc
->jd_blocks
- atomic_read(&sdp
->sd_log_blks_free
)) >
719 atomic_read(&sdp
->sd_log_thresh2
)))
720 wake_up(&sdp
->sd_logd_waitq
);
724 * gfs2_log_shutdown - write a shutdown header into a journal
725 * @sdp: the filesystem
729 void gfs2_log_shutdown(struct gfs2_sbd
*sdp
)
731 down_write(&sdp
->sd_log_flush_lock
);
733 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_blks_reserved
);
734 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_num_buf
);
735 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_num_revoke
);
736 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_num_rg
);
737 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_num_databuf
);
738 gfs2_assert_withdraw(sdp
, list_empty(&sdp
->sd_ail1_list
));
740 sdp
->sd_log_flush_head
= sdp
->sd_log_head
;
741 sdp
->sd_log_flush_wrapped
= 0;
743 log_write_header(sdp
, GFS2_LOG_HEAD_UNMOUNT
);
745 gfs2_assert_warn(sdp
, atomic_read(&sdp
->sd_log_blks_free
) == sdp
->sd_jdesc
->jd_blocks
);
746 gfs2_assert_warn(sdp
, sdp
->sd_log_head
== sdp
->sd_log_tail
);
747 gfs2_assert_warn(sdp
, list_empty(&sdp
->sd_ail2_list
));
749 sdp
->sd_log_head
= sdp
->sd_log_flush_head
;
750 sdp
->sd_log_tail
= sdp
->sd_log_head
;
752 up_write(&sdp
->sd_log_flush_lock
);
757 * gfs2_meta_syncfs - sync all the buffers in a filesystem
758 * @sdp: the filesystem
762 void gfs2_meta_syncfs(struct gfs2_sbd
*sdp
)
764 gfs2_log_flush(sdp
, NULL
);
766 gfs2_ail1_start(sdp
);
768 if (gfs2_ail1_empty(sdp
))
771 gfs2_log_flush(sdp
, NULL
);
774 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd
*sdp
)
776 return (atomic_read(&sdp
->sd_log_pinned
) >= atomic_read(&sdp
->sd_log_thresh1
));
779 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd
*sdp
)
781 unsigned int used_blocks
= sdp
->sd_jdesc
->jd_blocks
- atomic_read(&sdp
->sd_log_blks_free
);
782 return used_blocks
>= atomic_read(&sdp
->sd_log_thresh2
);
786 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
787 * @sdp: Pointer to GFS2 superblock
789 * Also, periodically check to make sure that we're using the most recent
793 int gfs2_logd(void *data
)
795 struct gfs2_sbd
*sdp
= data
;
799 while (!kthread_should_stop()) {
801 if (gfs2_jrnl_flush_reqd(sdp
) || t
== 0) {
802 gfs2_ail1_empty(sdp
);
803 gfs2_log_flush(sdp
, NULL
);
806 if (gfs2_ail_flush_reqd(sdp
)) {
807 gfs2_ail1_start(sdp
);
809 gfs2_ail1_empty(sdp
);
810 gfs2_log_flush(sdp
, NULL
);
813 if (!gfs2_ail_flush_reqd(sdp
))
814 wake_up(&sdp
->sd_log_waitq
);
816 t
= gfs2_tune_get(sdp
, gt_logd_secs
) * HZ
;
821 prepare_to_wait(&sdp
->sd_logd_waitq
, &wait
,
823 if (!gfs2_ail_flush_reqd(sdp
) &&
824 !gfs2_jrnl_flush_reqd(sdp
) &&
825 !kthread_should_stop())
826 t
= schedule_timeout(t
);
827 } while(t
&& !gfs2_ail_flush_reqd(sdp
) &&
828 !gfs2_jrnl_flush_reqd(sdp
) &&
829 !kthread_should_stop());
830 finish_wait(&sdp
->sd_logd_waitq
, &wait
);