drm/panfrost: Remove set but not used variable 'bo'
[linux/fpc-iii.git] / fs / gfs2 / log.c
blob00a2e721a374fb467ccb1d2826cf9a4431599e23
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 */
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/crc32.h>
14 #include <linux/crc32c.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
21 #include <linux/list_sort.h>
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "bmap.h"
26 #include "glock.h"
27 #include "log.h"
28 #include "lops.h"
29 #include "meta_io.h"
30 #include "util.h"
31 #include "dir.h"
32 #include "trace_gfs2.h"
34 static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
36 /**
37 * gfs2_struct2blk - compute stuff
38 * @sdp: the filesystem
39 * @nstruct: the number of structures
41 * Compute the number of log descriptor blocks needed to hold a certain number
42 * of structures of a certain size.
44 * Returns: the number of blocks needed (minimum is always 1)
47 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
49 unsigned int blks;
50 unsigned int first, second;
52 blks = 1;
53 first = sdp->sd_ldptrs;
55 if (nstruct > first) {
56 second = sdp->sd_inptrs;
57 blks += DIV_ROUND_UP(nstruct - first, second);
60 return blks;
63 /**
64 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
65 * @mapping: The associated mapping (maybe NULL)
66 * @bd: The gfs2_bufdata to remove
68 * The ail lock _must_ be held when calling this function
72 static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
74 bd->bd_tr = NULL;
75 list_del_init(&bd->bd_ail_st_list);
76 list_del_init(&bd->bd_ail_gl_list);
77 atomic_dec(&bd->bd_gl->gl_ail_count);
78 brelse(bd->bd_bh);
81 /**
82 * gfs2_ail1_start_one - Start I/O on a part of the AIL
83 * @sdp: the filesystem
84 * @wbc: The writeback control structure
85 * @ai: The ail structure
89 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
90 struct writeback_control *wbc,
91 struct gfs2_trans *tr,
92 bool *withdraw)
93 __releases(&sdp->sd_ail_lock)
94 __acquires(&sdp->sd_ail_lock)
96 struct gfs2_glock *gl = NULL;
97 struct address_space *mapping;
98 struct gfs2_bufdata *bd, *s;
99 struct buffer_head *bh;
101 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
102 bh = bd->bd_bh;
104 gfs2_assert(sdp, bd->bd_tr == tr);
106 if (!buffer_busy(bh)) {
107 if (!buffer_uptodate(bh) &&
108 !test_and_set_bit(SDF_AIL1_IO_ERROR,
109 &sdp->sd_flags)) {
110 gfs2_io_error_bh(sdp, bh);
111 *withdraw = true;
113 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
114 continue;
117 if (!buffer_dirty(bh))
118 continue;
119 if (gl == bd->bd_gl)
120 continue;
121 gl = bd->bd_gl;
122 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
123 mapping = bh->b_page->mapping;
124 if (!mapping)
125 continue;
126 spin_unlock(&sdp->sd_ail_lock);
127 generic_writepages(mapping, wbc);
128 spin_lock(&sdp->sd_ail_lock);
129 if (wbc->nr_to_write <= 0)
130 break;
131 return 1;
134 return 0;
139 * gfs2_ail1_flush - start writeback of some ail1 entries
140 * @sdp: The super block
141 * @wbc: The writeback control structure
143 * Writes back some ail1 entries, according to the limits in the
144 * writeback control structure
147 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
149 struct list_head *head = &sdp->sd_ail1_list;
150 struct gfs2_trans *tr;
151 struct blk_plug plug;
152 bool withdraw = false;
154 trace_gfs2_ail_flush(sdp, wbc, 1);
155 blk_start_plug(&plug);
156 spin_lock(&sdp->sd_ail_lock);
157 restart:
158 list_for_each_entry_reverse(tr, head, tr_list) {
159 if (wbc->nr_to_write <= 0)
160 break;
161 if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw) &&
162 !gfs2_withdrawn(sdp))
163 goto restart;
165 spin_unlock(&sdp->sd_ail_lock);
166 blk_finish_plug(&plug);
167 if (withdraw)
168 gfs2_lm_withdraw(sdp, NULL);
169 trace_gfs2_ail_flush(sdp, wbc, 0);
173 * gfs2_ail1_start - start writeback of all ail1 entries
174 * @sdp: The superblock
177 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
179 struct writeback_control wbc = {
180 .sync_mode = WB_SYNC_NONE,
181 .nr_to_write = LONG_MAX,
182 .range_start = 0,
183 .range_end = LLONG_MAX,
186 return gfs2_ail1_flush(sdp, &wbc);
190 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
191 * @sdp: the filesystem
192 * @ai: the AIL entry
196 static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
197 bool *withdraw)
199 struct gfs2_bufdata *bd, *s;
200 struct buffer_head *bh;
202 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
203 bd_ail_st_list) {
204 bh = bd->bd_bh;
205 gfs2_assert(sdp, bd->bd_tr == tr);
206 if (buffer_busy(bh))
207 continue;
208 if (!buffer_uptodate(bh) &&
209 !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) {
210 gfs2_io_error_bh(sdp, bh);
211 *withdraw = true;
213 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
218 * gfs2_ail1_empty - Try to empty the ail1 lists
219 * @sdp: The superblock
221 * Tries to empty the ail1 lists, starting with the oldest first
224 static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
226 struct gfs2_trans *tr, *s;
227 int oldest_tr = 1;
228 int ret;
229 bool withdraw = false;
231 spin_lock(&sdp->sd_ail_lock);
232 list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
233 gfs2_ail1_empty_one(sdp, tr, &withdraw);
234 if (list_empty(&tr->tr_ail1_list) && oldest_tr)
235 list_move(&tr->tr_list, &sdp->sd_ail2_list);
236 else
237 oldest_tr = 0;
239 ret = list_empty(&sdp->sd_ail1_list);
240 spin_unlock(&sdp->sd_ail_lock);
242 if (withdraw)
243 gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");
245 return ret;
248 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
250 struct gfs2_trans *tr;
251 struct gfs2_bufdata *bd;
252 struct buffer_head *bh;
254 spin_lock(&sdp->sd_ail_lock);
255 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
256 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
257 bh = bd->bd_bh;
258 if (!buffer_locked(bh))
259 continue;
260 get_bh(bh);
261 spin_unlock(&sdp->sd_ail_lock);
262 wait_on_buffer(bh);
263 brelse(bh);
264 return;
267 spin_unlock(&sdp->sd_ail_lock);
271 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
272 * @sdp: the filesystem
273 * @ai: the AIL entry
277 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
279 struct list_head *head = &tr->tr_ail2_list;
280 struct gfs2_bufdata *bd;
282 while (!list_empty(head)) {
283 bd = list_entry(head->prev, struct gfs2_bufdata,
284 bd_ail_st_list);
285 gfs2_assert(sdp, bd->bd_tr == tr);
286 gfs2_remove_from_ail(bd);
290 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
292 struct gfs2_trans *tr, *safe;
293 unsigned int old_tail = sdp->sd_log_tail;
294 int wrap = (new_tail < old_tail);
295 int a, b, rm;
297 spin_lock(&sdp->sd_ail_lock);
299 list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
300 a = (old_tail <= tr->tr_first);
301 b = (tr->tr_first < new_tail);
302 rm = (wrap) ? (a || b) : (a && b);
303 if (!rm)
304 continue;
306 gfs2_ail2_empty_one(sdp, tr);
307 list_del(&tr->tr_list);
308 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
309 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
310 kfree(tr);
313 spin_unlock(&sdp->sd_ail_lock);
317 * gfs2_log_release - Release a given number of log blocks
318 * @sdp: The GFS2 superblock
319 * @blks: The number of blocks
323 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
326 atomic_add(blks, &sdp->sd_log_blks_free);
327 trace_gfs2_log_blocks(sdp, blks);
328 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
329 sdp->sd_jdesc->jd_blocks);
330 up_read(&sdp->sd_log_flush_lock);
334 * gfs2_log_reserve - Make a log reservation
335 * @sdp: The GFS2 superblock
336 * @blks: The number of blocks to reserve
338 * Note that we never give out the last few blocks of the journal. Thats
339 * due to the fact that there is a small number of header blocks
340 * associated with each log flush. The exact number can't be known until
341 * flush time, so we ensure that we have just enough free blocks at all
342 * times to avoid running out during a log flush.
344 * We no longer flush the log here, instead we wake up logd to do that
345 * for us. To avoid the thundering herd and to ensure that we deal fairly
346 * with queued waiters, we use an exclusive wait. This means that when we
347 * get woken with enough journal space to get our reservation, we need to
348 * wake the next waiter on the list.
350 * Returns: errno
353 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
355 int ret = 0;
356 unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
357 unsigned wanted = blks + reserved_blks;
358 DEFINE_WAIT(wait);
359 int did_wait = 0;
360 unsigned int free_blocks;
362 if (gfs2_assert_warn(sdp, blks) ||
363 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
364 return -EINVAL;
365 atomic_add(blks, &sdp->sd_log_blks_needed);
366 retry:
367 free_blocks = atomic_read(&sdp->sd_log_blks_free);
368 if (unlikely(free_blocks <= wanted)) {
369 do {
370 prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
371 TASK_UNINTERRUPTIBLE);
372 wake_up(&sdp->sd_logd_waitq);
373 did_wait = 1;
374 if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
375 io_schedule();
376 free_blocks = atomic_read(&sdp->sd_log_blks_free);
377 } while(free_blocks <= wanted);
378 finish_wait(&sdp->sd_log_waitq, &wait);
380 atomic_inc(&sdp->sd_reserving_log);
381 if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
382 free_blocks - blks) != free_blocks) {
383 if (atomic_dec_and_test(&sdp->sd_reserving_log))
384 wake_up(&sdp->sd_reserving_log_wait);
385 goto retry;
387 atomic_sub(blks, &sdp->sd_log_blks_needed);
388 trace_gfs2_log_blocks(sdp, -blks);
391 * If we waited, then so might others, wake them up _after_ we get
392 * our share of the log.
394 if (unlikely(did_wait))
395 wake_up(&sdp->sd_log_waitq);
397 down_read(&sdp->sd_log_flush_lock);
398 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
399 gfs2_log_release(sdp, blks);
400 ret = -EROFS;
402 if (atomic_dec_and_test(&sdp->sd_reserving_log))
403 wake_up(&sdp->sd_reserving_log_wait);
404 return ret;
408 * log_distance - Compute distance between two journal blocks
409 * @sdp: The GFS2 superblock
410 * @newer: The most recent journal block of the pair
411 * @older: The older journal block of the pair
413 * Compute the distance (in the journal direction) between two
414 * blocks in the journal
416 * Returns: the distance in blocks
419 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
420 unsigned int older)
422 int dist;
424 dist = newer - older;
425 if (dist < 0)
426 dist += sdp->sd_jdesc->jd_blocks;
428 return dist;
432 * calc_reserved - Calculate the number of blocks to reserve when
433 * refunding a transaction's unused buffers.
434 * @sdp: The GFS2 superblock
436 * This is complex. We need to reserve room for all our currently used
437 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
438 * all our journaled data buffers for journaled files (e.g. files in the
439 * meta_fs like rindex, or files for which chattr +j was done.)
440 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
441 * will count it as free space (sd_log_blks_free) and corruption will follow.
443 * We can have metadata bufs and jdata bufs in the same journal. So each
444 * type gets its own log header, for which we need to reserve a block.
445 * In fact, each type has the potential for needing more than one header
446 * in cases where we have more buffers than will fit on a journal page.
447 * Metadata journal entries take up half the space of journaled buffer entries.
448 * Thus, metadata entries have buf_limit (502) and journaled buffers have
449 * databuf_limit (251) before they cause a wrap around.
451 * Also, we need to reserve blocks for revoke journal entries and one for an
452 * overall header for the lot.
454 * Returns: the number of blocks reserved
456 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
458 unsigned int reserved = 0;
459 unsigned int mbuf;
460 unsigned int dbuf;
461 struct gfs2_trans *tr = sdp->sd_log_tr;
463 if (tr) {
464 mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
465 dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
466 reserved = mbuf + dbuf;
467 /* Account for header blocks */
468 reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
469 reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
472 if (sdp->sd_log_committed_revoke > 0)
473 reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke);
474 /* One for the overall header */
475 if (reserved)
476 reserved++;
477 return reserved;
480 static unsigned int current_tail(struct gfs2_sbd *sdp)
482 struct gfs2_trans *tr;
483 unsigned int tail;
485 spin_lock(&sdp->sd_ail_lock);
487 if (list_empty(&sdp->sd_ail1_list)) {
488 tail = sdp->sd_log_head;
489 } else {
490 tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
491 tr_list);
492 tail = tr->tr_first;
495 spin_unlock(&sdp->sd_ail_lock);
497 return tail;
500 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
502 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
504 ail2_empty(sdp, new_tail);
506 atomic_add(dist, &sdp->sd_log_blks_free);
507 trace_gfs2_log_blocks(sdp, dist);
508 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
509 sdp->sd_jdesc->jd_blocks);
511 sdp->sd_log_tail = new_tail;
515 static void log_flush_wait(struct gfs2_sbd *sdp)
517 DEFINE_WAIT(wait);
519 if (atomic_read(&sdp->sd_log_in_flight)) {
520 do {
521 prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
522 TASK_UNINTERRUPTIBLE);
523 if (atomic_read(&sdp->sd_log_in_flight))
524 io_schedule();
525 } while(atomic_read(&sdp->sd_log_in_flight));
526 finish_wait(&sdp->sd_log_flush_wait, &wait);
530 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
532 struct gfs2_inode *ipa, *ipb;
534 ipa = list_entry(a, struct gfs2_inode, i_ordered);
535 ipb = list_entry(b, struct gfs2_inode, i_ordered);
537 if (ipa->i_no_addr < ipb->i_no_addr)
538 return -1;
539 if (ipa->i_no_addr > ipb->i_no_addr)
540 return 1;
541 return 0;
544 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
546 struct gfs2_inode *ip;
547 LIST_HEAD(written);
549 spin_lock(&sdp->sd_ordered_lock);
550 list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
551 while (!list_empty(&sdp->sd_log_ordered)) {
552 ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
553 if (ip->i_inode.i_mapping->nrpages == 0) {
554 test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
555 list_del(&ip->i_ordered);
556 continue;
558 list_move(&ip->i_ordered, &written);
559 spin_unlock(&sdp->sd_ordered_lock);
560 filemap_fdatawrite(ip->i_inode.i_mapping);
561 spin_lock(&sdp->sd_ordered_lock);
563 list_splice(&written, &sdp->sd_log_ordered);
564 spin_unlock(&sdp->sd_ordered_lock);
567 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
569 struct gfs2_inode *ip;
571 spin_lock(&sdp->sd_ordered_lock);
572 while (!list_empty(&sdp->sd_log_ordered)) {
573 ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
574 list_del(&ip->i_ordered);
575 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
576 if (ip->i_inode.i_mapping->nrpages == 0)
577 continue;
578 spin_unlock(&sdp->sd_ordered_lock);
579 filemap_fdatawait(ip->i_inode.i_mapping);
580 spin_lock(&sdp->sd_ordered_lock);
582 spin_unlock(&sdp->sd_ordered_lock);
585 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
587 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
589 spin_lock(&sdp->sd_ordered_lock);
590 if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
591 list_del(&ip->i_ordered);
592 spin_unlock(&sdp->sd_ordered_lock);
595 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
597 struct buffer_head *bh = bd->bd_bh;
598 struct gfs2_glock *gl = bd->bd_gl;
600 bh->b_private = NULL;
601 bd->bd_blkno = bh->b_blocknr;
602 gfs2_remove_from_ail(bd); /* drops ref on bh */
603 bd->bd_bh = NULL;
604 sdp->sd_log_num_revoke++;
605 if (atomic_inc_return(&gl->gl_revokes) == 1)
606 gfs2_glock_hold(gl);
607 set_bit(GLF_LFLUSH, &gl->gl_flags);
608 list_add(&bd->bd_list, &sdp->sd_log_revokes);
611 void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
613 if (atomic_dec_return(&gl->gl_revokes) == 0) {
614 clear_bit(GLF_LFLUSH, &gl->gl_flags);
615 gfs2_glock_queue_put(gl);
619 void gfs2_write_revokes(struct gfs2_sbd *sdp)
621 struct gfs2_trans *tr;
622 struct gfs2_bufdata *bd, *tmp;
623 int have_revokes = 0;
624 int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
626 gfs2_ail1_empty(sdp);
627 spin_lock(&sdp->sd_ail_lock);
628 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
629 list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
630 if (list_empty(&bd->bd_list)) {
631 have_revokes = 1;
632 goto done;
636 done:
637 spin_unlock(&sdp->sd_ail_lock);
638 if (have_revokes == 0)
639 return;
640 while (sdp->sd_log_num_revoke > max_revokes)
641 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
642 max_revokes -= sdp->sd_log_num_revoke;
643 if (!sdp->sd_log_num_revoke) {
644 atomic_dec(&sdp->sd_log_blks_free);
645 /* If no blocks have been reserved, we need to also
646 * reserve a block for the header */
647 if (!sdp->sd_log_blks_reserved)
648 atomic_dec(&sdp->sd_log_blks_free);
650 gfs2_log_lock(sdp);
651 spin_lock(&sdp->sd_ail_lock);
652 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
653 list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
654 if (max_revokes == 0)
655 goto out_of_blocks;
656 if (!list_empty(&bd->bd_list))
657 continue;
658 gfs2_add_revoke(sdp, bd);
659 max_revokes--;
662 out_of_blocks:
663 spin_unlock(&sdp->sd_ail_lock);
664 gfs2_log_unlock(sdp);
666 if (!sdp->sd_log_num_revoke) {
667 atomic_inc(&sdp->sd_log_blks_free);
668 if (!sdp->sd_log_blks_reserved)
669 atomic_inc(&sdp->sd_log_blks_free);
674 * gfs2_write_log_header - Write a journal log header buffer at lblock
675 * @sdp: The GFS2 superblock
676 * @jd: journal descriptor of the journal to which we are writing
677 * @seq: sequence number
678 * @tail: tail of the log
679 * @lblock: value for lh_blkno (block number relative to start of journal)
680 * @flags: log header flags GFS2_LOG_HEAD_*
681 * @op_flags: flags to pass to the bio
683 * Returns: the initialized log buffer descriptor
686 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
687 u64 seq, u32 tail, u32 lblock, u32 flags,
688 int op_flags)
690 struct gfs2_log_header *lh;
691 u32 hash, crc;
692 struct page *page;
693 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
694 struct timespec64 tv;
695 struct super_block *sb = sdp->sd_vfs;
696 u64 dblock;
698 if (gfs2_withdrawn(sdp))
699 goto out;
701 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
702 lh = page_address(page);
703 clear_page(lh);
705 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
706 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
707 lh->lh_header.__pad0 = cpu_to_be64(0);
708 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
709 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
710 lh->lh_sequence = cpu_to_be64(seq);
711 lh->lh_flags = cpu_to_be32(flags);
712 lh->lh_tail = cpu_to_be32(tail);
713 lh->lh_blkno = cpu_to_be32(lblock);
714 hash = ~crc32(~0, lh, LH_V1_SIZE);
715 lh->lh_hash = cpu_to_be32(hash);
717 ktime_get_coarse_real_ts64(&tv);
718 lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
719 lh->lh_sec = cpu_to_be64(tv.tv_sec);
720 if (!list_empty(&jd->extent_list))
721 dblock = gfs2_log_bmap(jd, lblock);
722 else {
723 int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
724 if (gfs2_assert_withdraw(sdp, ret == 0))
725 return;
727 lh->lh_addr = cpu_to_be64(dblock);
728 lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
730 /* We may only write local statfs, quota, etc., when writing to our
731 own journal. The values are left 0 when recovering a journal
732 different from our own. */
733 if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
734 lh->lh_statfs_addr =
735 cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
736 lh->lh_quota_addr =
737 cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
739 spin_lock(&sdp->sd_statfs_spin);
740 lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
741 lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
742 lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
743 spin_unlock(&sdp->sd_statfs_spin);
746 BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
748 crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
749 sb->s_blocksize - LH_V1_SIZE - 4);
750 lh->lh_crc = cpu_to_be32(crc);
752 gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
753 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
754 out:
755 log_flush_wait(sdp);
759 * log_write_header - Get and initialize a journal header buffer
760 * @sdp: The GFS2 superblock
761 * @flags: The log header flags, including log header origin
763 * Returns: the initialized log buffer descriptor
766 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
768 unsigned int tail;
769 int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
770 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
772 gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
773 tail = current_tail(sdp);
775 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
776 gfs2_ordered_wait(sdp);
777 log_flush_wait(sdp);
778 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
780 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
781 gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
782 sdp->sd_log_flush_head, flags, op_flags);
783 gfs2_log_incr_head(sdp);
785 if (sdp->sd_log_tail != tail)
786 log_pull_tail(sdp, tail);
790 * gfs2_log_flush - flush incore transaction(s)
791 * @sdp: the filesystem
792 * @gl: The glock structure to flush. If NULL, flush the whole incore log
793 * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
797 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
799 struct gfs2_trans *tr;
800 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
802 down_write(&sdp->sd_log_flush_lock);
804 /* Log might have been flushed while we waited for the flush lock */
805 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
806 up_write(&sdp->sd_log_flush_lock);
807 return;
809 trace_gfs2_log_flush(sdp, 1, flags);
811 if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
812 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
814 sdp->sd_log_flush_head = sdp->sd_log_head;
815 tr = sdp->sd_log_tr;
816 if (tr) {
817 sdp->sd_log_tr = NULL;
818 INIT_LIST_HEAD(&tr->tr_ail1_list);
819 INIT_LIST_HEAD(&tr->tr_ail2_list);
820 tr->tr_first = sdp->sd_log_flush_head;
821 if (unlikely (state == SFS_FROZEN))
822 gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
825 if (unlikely(state == SFS_FROZEN))
826 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
827 gfs2_assert_withdraw(sdp,
828 sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke);
830 gfs2_ordered_write(sdp);
831 lops_before_commit(sdp, tr);
832 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
834 if (sdp->sd_log_head != sdp->sd_log_flush_head) {
835 log_flush_wait(sdp);
836 log_write_header(sdp, flags);
837 } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
838 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
839 trace_gfs2_log_blocks(sdp, -1);
840 log_write_header(sdp, flags);
842 lops_after_commit(sdp, tr);
844 gfs2_log_lock(sdp);
845 sdp->sd_log_head = sdp->sd_log_flush_head;
846 sdp->sd_log_blks_reserved = 0;
847 sdp->sd_log_committed_revoke = 0;
849 spin_lock(&sdp->sd_ail_lock);
850 if (tr && !list_empty(&tr->tr_ail1_list)) {
851 list_add(&tr->tr_list, &sdp->sd_ail1_list);
852 tr = NULL;
854 spin_unlock(&sdp->sd_ail_lock);
855 gfs2_log_unlock(sdp);
857 if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
858 if (!sdp->sd_log_idle) {
859 for (;;) {
860 gfs2_ail1_start(sdp);
861 gfs2_ail1_wait(sdp);
862 if (gfs2_ail1_empty(sdp))
863 break;
865 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
866 trace_gfs2_log_blocks(sdp, -1);
867 log_write_header(sdp, flags);
868 sdp->sd_log_head = sdp->sd_log_flush_head;
870 if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
871 GFS2_LOG_HEAD_FLUSH_FREEZE))
872 gfs2_log_shutdown(sdp);
873 if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
874 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
877 trace_gfs2_log_flush(sdp, 0, flags);
878 up_write(&sdp->sd_log_flush_lock);
880 kfree(tr);
884 * gfs2_merge_trans - Merge a new transaction into a cached transaction
885 * @old: Original transaction to be expanded
886 * @new: New transaction to be merged
889 static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
891 WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
893 old->tr_num_buf_new += new->tr_num_buf_new;
894 old->tr_num_databuf_new += new->tr_num_databuf_new;
895 old->tr_num_buf_rm += new->tr_num_buf_rm;
896 old->tr_num_databuf_rm += new->tr_num_databuf_rm;
897 old->tr_num_revoke += new->tr_num_revoke;
898 old->tr_num_revoke_rm += new->tr_num_revoke_rm;
900 list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
901 list_splice_tail_init(&new->tr_buf, &old->tr_buf);
904 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
906 unsigned int reserved;
907 unsigned int unused;
908 unsigned int maxres;
910 gfs2_log_lock(sdp);
912 if (sdp->sd_log_tr) {
913 gfs2_merge_trans(sdp->sd_log_tr, tr);
914 } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
915 gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
916 sdp->sd_log_tr = tr;
917 set_bit(TR_ATTACHED, &tr->tr_flags);
920 sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
921 reserved = calc_reserved(sdp);
922 maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
923 gfs2_assert_withdraw(sdp, maxres >= reserved);
924 unused = maxres - reserved;
925 atomic_add(unused, &sdp->sd_log_blks_free);
926 trace_gfs2_log_blocks(sdp, unused);
927 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
928 sdp->sd_jdesc->jd_blocks);
929 sdp->sd_log_blks_reserved = reserved;
931 gfs2_log_unlock(sdp);
935 * gfs2_log_commit - Commit a transaction to the log
936 * @sdp: the filesystem
937 * @tr: the transaction
939 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
940 * or the total number of used blocks (pinned blocks plus AIL blocks)
941 * is greater than thresh2.
943 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
944 * journal size.
946 * Returns: errno
949 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
951 log_refund(sdp, tr);
953 if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
954 ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
955 atomic_read(&sdp->sd_log_thresh2)))
956 wake_up(&sdp->sd_logd_waitq);
960 * gfs2_log_shutdown - write a shutdown header into a journal
961 * @sdp: the filesystem
965 static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
967 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
968 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
969 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
971 sdp->sd_log_flush_head = sdp->sd_log_head;
973 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
975 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
976 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
978 sdp->sd_log_head = sdp->sd_log_flush_head;
979 sdp->sd_log_tail = sdp->sd_log_head;
982 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
984 return (atomic_read(&sdp->sd_log_pinned) +
985 atomic_read(&sdp->sd_log_blks_needed) >=
986 atomic_read(&sdp->sd_log_thresh1));
989 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
991 unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
993 if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
994 return 1;
996 return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
997 atomic_read(&sdp->sd_log_thresh2);
1001 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
1002 * @sdp: Pointer to GFS2 superblock
1004 * Also, periodically check to make sure that we're using the most recent
1005 * journal index.
1008 int gfs2_logd(void *data)
1010 struct gfs2_sbd *sdp = data;
1011 unsigned long t = 1;
1012 DEFINE_WAIT(wait);
1013 bool did_flush;
1015 while (!kthread_should_stop()) {
1017 /* Check for errors writing to the journal */
1018 if (sdp->sd_log_error) {
1019 gfs2_lm_withdraw(sdp,
1020 "GFS2: fsid=%s: error %d: "
1021 "withdrawing the file system to "
1022 "prevent further damage.\n",
1023 sdp->sd_fsname, sdp->sd_log_error);
1026 did_flush = false;
1027 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1028 gfs2_ail1_empty(sdp);
1029 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1030 GFS2_LFC_LOGD_JFLUSH_REQD);
1031 did_flush = true;
1034 if (gfs2_ail_flush_reqd(sdp)) {
1035 gfs2_ail1_start(sdp);
1036 gfs2_ail1_wait(sdp);
1037 gfs2_ail1_empty(sdp);
1038 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1039 GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1040 did_flush = true;
1043 if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1044 wake_up(&sdp->sd_log_waitq);
1046 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1048 try_to_freeze();
1050 do {
1051 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1052 TASK_INTERRUPTIBLE);
1053 if (!gfs2_ail_flush_reqd(sdp) &&
1054 !gfs2_jrnl_flush_reqd(sdp) &&
1055 !kthread_should_stop())
1056 t = schedule_timeout(t);
1057 } while(t && !gfs2_ail_flush_reqd(sdp) &&
1058 !gfs2_jrnl_flush_reqd(sdp) &&
1059 !kthread_should_stop());
1060 finish_wait(&sdp->sd_logd_waitq, &wait);
1063 return 0;