1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mempool.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
16 #include <linux/list_sort.h>
17 #include <linux/blkdev.h>
32 #include "trace_gfs2.h"
35 * gfs2_pin - Pin a buffer in memory
36 * @sdp: The superblock
37 * @bh: The buffer to be pinned
39 * The log lock must be held when calling this function
41 void gfs2_pin(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
43 struct gfs2_bufdata
*bd
;
45 BUG_ON(!current
->journal_info
);
47 clear_buffer_dirty(bh
);
48 if (test_set_buffer_pinned(bh
))
49 gfs2_assert_withdraw(sdp
, 0);
50 if (!buffer_uptodate(bh
))
51 gfs2_io_error_bh_wd(sdp
, bh
);
53 /* If this buffer is in the AIL and it has already been written
54 * to in-place disk block, remove it from the AIL.
56 spin_lock(&sdp
->sd_ail_lock
);
58 list_move(&bd
->bd_ail_st_list
, &bd
->bd_tr
->tr_ail2_list
);
59 spin_unlock(&sdp
->sd_ail_lock
);
61 atomic_inc(&sdp
->sd_log_pinned
);
62 trace_gfs2_pin(bd
, 1);
65 static bool buffer_is_rgrp(const struct gfs2_bufdata
*bd
)
67 return bd
->bd_gl
->gl_name
.ln_type
== LM_TYPE_RGRP
;
70 static void maybe_release_space(struct gfs2_bufdata
*bd
)
72 struct gfs2_glock
*gl
= bd
->bd_gl
;
73 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
74 struct gfs2_rgrpd
*rgd
= gfs2_glock2rgrp(gl
);
75 unsigned int index
= bd
->bd_bh
->b_blocknr
- gl
->gl_name
.ln_number
;
76 struct gfs2_bitmap
*bi
= rgd
->rd_bits
+ index
;
78 if (bi
->bi_clone
== NULL
)
80 if (sdp
->sd_args
.ar_discard
)
81 gfs2_rgrp_send_discards(sdp
, rgd
->rd_data0
, bd
->bd_bh
, bi
, 1, NULL
);
82 memcpy(bi
->bi_clone
+ bi
->bi_offset
,
83 bd
->bd_bh
->b_data
+ bi
->bi_offset
, bi
->bi_bytes
);
84 clear_bit(GBF_FULL
, &bi
->bi_flags
);
85 rgd
->rd_free_clone
= rgd
->rd_free
;
86 rgd
->rd_extfail_pt
= rgd
->rd_free
;
90 * gfs2_unpin - Unpin a buffer
91 * @sdp: the filesystem the buffer belongs to
92 * @bh: The buffer to unpin
94 * @flags: The inode dirty flags
98 static void gfs2_unpin(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
,
99 struct gfs2_trans
*tr
)
101 struct gfs2_bufdata
*bd
= bh
->b_private
;
103 BUG_ON(!buffer_uptodate(bh
));
104 BUG_ON(!buffer_pinned(bh
));
107 mark_buffer_dirty(bh
);
108 clear_buffer_pinned(bh
);
110 if (buffer_is_rgrp(bd
))
111 maybe_release_space(bd
);
113 spin_lock(&sdp
->sd_ail_lock
);
115 list_del(&bd
->bd_ail_st_list
);
118 struct gfs2_glock
*gl
= bd
->bd_gl
;
119 list_add(&bd
->bd_ail_gl_list
, &gl
->gl_ail_list
);
120 atomic_inc(&gl
->gl_ail_count
);
123 list_add(&bd
->bd_ail_st_list
, &tr
->tr_ail1_list
);
124 spin_unlock(&sdp
->sd_ail_lock
);
126 clear_bit(GLF_LFLUSH
, &bd
->bd_gl
->gl_flags
);
127 trace_gfs2_pin(bd
, 0);
129 atomic_dec(&sdp
->sd_log_pinned
);
132 void gfs2_log_incr_head(struct gfs2_sbd
*sdp
)
134 BUG_ON((sdp
->sd_log_flush_head
== sdp
->sd_log_tail
) &&
135 (sdp
->sd_log_flush_head
!= sdp
->sd_log_head
));
137 if (++sdp
->sd_log_flush_head
== sdp
->sd_jdesc
->jd_blocks
)
138 sdp
->sd_log_flush_head
= 0;
141 u64
gfs2_log_bmap(struct gfs2_jdesc
*jd
, unsigned int lblock
)
143 struct gfs2_journal_extent
*je
;
145 list_for_each_entry(je
, &jd
->extent_list
, list
) {
146 if (lblock
>= je
->lblock
&& lblock
< je
->lblock
+ je
->blocks
)
147 return je
->dblock
+ lblock
- je
->lblock
;
154 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
155 * @sdp: The superblock
157 * @error: The i/o status
159 * This finds the relevant buffers and unlocks them and sets the
160 * error flag according to the status of the i/o request. This is
161 * used when the log is writing data which has an in-place version
162 * that is pinned in the pagecache.
165 static void gfs2_end_log_write_bh(struct gfs2_sbd
*sdp
,
166 struct bio_vec
*bvec
,
169 struct buffer_head
*bh
, *next
;
170 struct page
*page
= bvec
->bv_page
;
173 bh
= page_buffers(page
);
175 while (bh_offset(bh
) < bvec
->bv_offset
)
176 bh
= bh
->b_this_page
;
179 mark_buffer_write_io_error(bh
);
181 next
= bh
->b_this_page
;
189 * gfs2_end_log_write - end of i/o to the log
192 * Each bio_vec contains either data from the pagecache or data
193 * relating to the log itself. Here we iterate over the bio_vec
194 * array, processing both kinds of data.
198 static void gfs2_end_log_write(struct bio
*bio
)
200 struct gfs2_sbd
*sdp
= bio
->bi_private
;
201 struct bio_vec
*bvec
;
203 struct bvec_iter_all iter_all
;
205 if (bio
->bi_status
) {
206 if (!cmpxchg(&sdp
->sd_log_error
, 0, (int)bio
->bi_status
))
207 fs_err(sdp
, "Error %d writing to journal, jid=%u\n",
208 bio
->bi_status
, sdp
->sd_jdesc
->jd_jid
);
209 gfs2_withdraw_delayed(sdp
);
210 /* prevent more writes to the journal */
211 clear_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
212 wake_up(&sdp
->sd_logd_waitq
);
215 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
216 page
= bvec
->bv_page
;
217 if (page_has_buffers(page
))
218 gfs2_end_log_write_bh(sdp
, bvec
, bio
->bi_status
);
220 mempool_free(page
, gfs2_page_pool
);
224 if (atomic_dec_and_test(&sdp
->sd_log_in_flight
))
225 wake_up(&sdp
->sd_log_flush_wait
);
229 * gfs2_log_submit_bio - Submit any pending log bio
230 * @biop: Address of the bio pointer
231 * @opf: REQ_OP | op_flags
233 * Submit any pending part-built or full bio to the block device. If
234 * there is no pending bio, then this is a no-op.
237 void gfs2_log_submit_bio(struct bio
**biop
, int opf
)
239 struct bio
*bio
= *biop
;
241 struct gfs2_sbd
*sdp
= bio
->bi_private
;
242 atomic_inc(&sdp
->sd_log_in_flight
);
250 * gfs2_log_alloc_bio - Allocate a bio
251 * @sdp: The super block
252 * @blkno: The device block number we want to write to
253 * @end_io: The bi_end_io callback
255 * Allocate a new bio, initialize it with the given parameters and return it.
257 * Returns: The newly allocated bio
260 static struct bio
*gfs2_log_alloc_bio(struct gfs2_sbd
*sdp
, u64 blkno
,
261 bio_end_io_t
*end_io
)
263 struct super_block
*sb
= sdp
->sd_vfs
;
264 struct bio
*bio
= bio_alloc(GFP_NOIO
, BIO_MAX_PAGES
);
266 bio
->bi_iter
.bi_sector
= blkno
<< sdp
->sd_fsb2bb_shift
;
267 bio_set_dev(bio
, sb
->s_bdev
);
268 bio
->bi_end_io
= end_io
;
269 bio
->bi_private
= sdp
;
275 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
276 * @sdp: The super block
277 * @blkno: The device block number we want to write to
278 * @bio: The bio to get or allocate
280 * @end_io: The bi_end_io callback
281 * @flush: Always flush the current bio and allocate a new one?
283 * If there is a cached bio, then if the next block number is sequential
284 * with the previous one, return it, otherwise flush the bio to the
285 * device. If there is no cached bio, or we just flushed it, then
286 * allocate a new one.
288 * Returns: The bio to use for log writes
291 static struct bio
*gfs2_log_get_bio(struct gfs2_sbd
*sdp
, u64 blkno
,
292 struct bio
**biop
, int op
,
293 bio_end_io_t
*end_io
, bool flush
)
295 struct bio
*bio
= *biop
;
300 nblk
= bio_end_sector(bio
);
301 nblk
>>= sdp
->sd_fsb2bb_shift
;
302 if (blkno
== nblk
&& !flush
)
304 gfs2_log_submit_bio(biop
, op
);
307 *biop
= gfs2_log_alloc_bio(sdp
, blkno
, end_io
);
312 * gfs2_log_write - write to log
313 * @sdp: the filesystem
314 * @page: the page to write
315 * @size: the size of the data to write
316 * @offset: the offset within the page
317 * @blkno: block number of the log entry
319 * Try and add the page segment to the current bio. If that fails,
320 * submit the current bio to the device and create a new one, and
321 * then add the page segment to that.
324 void gfs2_log_write(struct gfs2_sbd
*sdp
, struct page
*page
,
325 unsigned size
, unsigned offset
, u64 blkno
)
330 bio
= gfs2_log_get_bio(sdp
, blkno
, &sdp
->sd_log_bio
, REQ_OP_WRITE
,
331 gfs2_end_log_write
, false);
332 ret
= bio_add_page(bio
, page
, size
, offset
);
334 bio
= gfs2_log_get_bio(sdp
, blkno
, &sdp
->sd_log_bio
,
335 REQ_OP_WRITE
, gfs2_end_log_write
, true);
336 ret
= bio_add_page(bio
, page
, size
, offset
);
342 * gfs2_log_write_bh - write a buffer's content to the log
343 * @sdp: The super block
344 * @bh: The buffer pointing to the in-place location
346 * This writes the content of the buffer to the next available location
347 * in the log. The buffer will be unlocked once the i/o to the log has
351 static void gfs2_log_write_bh(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
355 dblock
= gfs2_log_bmap(sdp
->sd_jdesc
, sdp
->sd_log_flush_head
);
356 gfs2_log_incr_head(sdp
);
357 gfs2_log_write(sdp
, bh
->b_page
, bh
->b_size
, bh_offset(bh
), dblock
);
361 * gfs2_log_write_page - write one block stored in a page, into the log
362 * @sdp: The superblock
363 * @page: The struct page
365 * This writes the first block-sized part of the page into the log. Note
366 * that the page must have been allocated from the gfs2_page_pool mempool
367 * and that after this has been called, ownership has been transferred and
368 * the page may be freed at any time.
371 void gfs2_log_write_page(struct gfs2_sbd
*sdp
, struct page
*page
)
373 struct super_block
*sb
= sdp
->sd_vfs
;
376 dblock
= gfs2_log_bmap(sdp
->sd_jdesc
, sdp
->sd_log_flush_head
);
377 gfs2_log_incr_head(sdp
);
378 gfs2_log_write(sdp
, page
, sb
->s_blocksize
, 0, dblock
);
382 * gfs2_end_log_read - end I/O callback for reads from the log
385 * Simply unlock the pages in the bio. The main thread will wait on them and
386 * process them in order as necessary.
389 static void gfs2_end_log_read(struct bio
*bio
)
392 struct bio_vec
*bvec
;
393 struct bvec_iter_all iter_all
;
395 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
396 page
= bvec
->bv_page
;
397 if (bio
->bi_status
) {
398 int err
= blk_status_to_errno(bio
->bi_status
);
401 mapping_set_error(page
->mapping
, err
);
410 * gfs2_jhead_pg_srch - Look for the journal head in a given page.
411 * @jd: The journal descriptor
412 * @page: The page to look in
414 * Returns: 1 if found, 0 otherwise.
417 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc
*jd
,
418 struct gfs2_log_header_host
*head
,
421 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
422 struct gfs2_log_header_host
uninitialized_var(lh
);
423 void *kaddr
= kmap_atomic(page
);
427 for (offset
= 0; offset
< PAGE_SIZE
; offset
+= sdp
->sd_sb
.sb_bsize
) {
428 if (!__get_log_header(sdp
, kaddr
+ offset
, 0, &lh
)) {
429 if (lh
.lh_sequence
>= head
->lh_sequence
)
437 kunmap_atomic(kaddr
);
442 * gfs2_jhead_process_page - Search/cleanup a page
443 * @jd: The journal descriptor
444 * @index: Index of the page to look into
445 * @done: If set, perform only cleanup, else search and set if found.
447 * Find the page with 'index' in the journal's mapping. Search the page for
448 * the journal head if requested (cleanup == false). Release refs on the
449 * page so the page cache can reclaim it (put_page() twice). We grabbed a
450 * reference on this page two times, first when we did a find_or_create_page()
451 * to obtain the page to add it to the bio and second when we do a
452 * find_get_page() here to get the page to wait on while I/O on it is being
454 * This function is also used to free up a page we might've grabbed but not
455 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
456 * submitted the I/O, but we already found the jhead so we only need to drop
457 * our references to the page.
460 static void gfs2_jhead_process_page(struct gfs2_jdesc
*jd
, unsigned long index
,
461 struct gfs2_log_header_host
*head
,
466 page
= find_get_page(jd
->jd_inode
->i_mapping
, index
);
467 wait_on_page_locked(page
);
473 *done
= gfs2_jhead_pg_srch(jd
, head
, page
);
475 put_page(page
); /* Once for find_get_page */
476 put_page(page
); /* Once more for find_or_create_page */
479 static struct bio
*gfs2_chain_bio(struct bio
*prev
, unsigned int nr_iovecs
)
483 new = bio_alloc(GFP_NOIO
, nr_iovecs
);
484 bio_copy_dev(new, prev
);
485 new->bi_iter
.bi_sector
= bio_end_sector(prev
);
486 new->bi_opf
= prev
->bi_opf
;
487 new->bi_write_hint
= prev
->bi_write_hint
;
488 bio_chain(new, prev
);
494 * gfs2_find_jhead - find the head of a log
495 * @jd: The journal descriptor
496 * @head: The log descriptor for the head of the log is returned here
498 * Do a search of a journal by reading it in large chunks using bios and find
499 * the valid log entry with the highest sequence number. (i.e. the log head)
501 * Returns: 0 on success, errno otherwise
503 int gfs2_find_jhead(struct gfs2_jdesc
*jd
, struct gfs2_log_header_host
*head
,
506 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
507 struct address_space
*mapping
= jd
->jd_inode
->i_mapping
;
508 unsigned int block
= 0, blocks_submitted
= 0, blocks_read
= 0;
509 unsigned int bsize
= sdp
->sd_sb
.sb_bsize
, off
;
510 unsigned int bsize_shift
= sdp
->sd_sb
.sb_bsize_shift
;
511 unsigned int shift
= PAGE_SHIFT
- bsize_shift
;
512 unsigned int max_blocks
= 2 * 1024 * 1024 >> bsize_shift
;
513 struct gfs2_journal_extent
*je
;
515 struct bio
*bio
= NULL
;
516 struct page
*page
= NULL
;
520 memset(head
, 0, sizeof(*head
));
521 if (list_empty(&jd
->extent_list
))
522 gfs2_map_journal_extents(sdp
, jd
);
524 since
= filemap_sample_wb_err(mapping
);
525 list_for_each_entry(je
, &jd
->extent_list
, list
) {
526 u64 dblock
= je
->dblock
;
528 for (; block
< je
->lblock
+ je
->blocks
; block
++, dblock
++) {
530 page
= find_or_create_page(mapping
,
531 block
>> shift
, GFP_NOFS
);
540 if (bio
&& (off
|| block
< blocks_submitted
+ max_blocks
)) {
541 sector_t sector
= dblock
<< sdp
->sd_fsb2bb_shift
;
543 if (bio_end_sector(bio
) == sector
) {
544 sz
= bio_add_page(bio
, page
, bsize
, off
);
549 unsigned int blocks
=
550 (PAGE_SIZE
- off
) >> bsize_shift
;
552 bio
= gfs2_chain_bio(bio
, blocks
);
553 goto add_block_to_new_bio
;
558 blocks_submitted
= block
;
562 bio
= gfs2_log_alloc_bio(sdp
, dblock
, gfs2_end_log_read
);
563 bio
->bi_opf
= REQ_OP_READ
;
564 add_block_to_new_bio
:
565 sz
= bio_add_page(bio
, page
, bsize
, off
);
569 if (off
== PAGE_SIZE
)
571 if (blocks_submitted
<= blocks_read
+ max_blocks
) {
572 /* Keep at least one bio in flight */
576 gfs2_jhead_process_page(jd
, blocks_read
>> shift
, head
, &done
);
577 blocks_read
+= PAGE_SIZE
>> bsize_shift
;
579 goto out
; /* found */
586 while (blocks_read
< block
) {
587 gfs2_jhead_process_page(jd
, blocks_read
>> shift
, head
, &done
);
588 blocks_read
+= PAGE_SIZE
>> bsize_shift
;
592 ret
= filemap_check_wb_err(mapping
, since
);
595 truncate_inode_pages(mapping
, 0);
600 static struct page
*gfs2_get_log_desc(struct gfs2_sbd
*sdp
, u32 ld_type
,
601 u32 ld_length
, u32 ld_data1
)
603 struct page
*page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
604 struct gfs2_log_descriptor
*ld
= page_address(page
);
606 ld
->ld_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
607 ld
->ld_header
.mh_type
= cpu_to_be32(GFS2_METATYPE_LD
);
608 ld
->ld_header
.mh_format
= cpu_to_be32(GFS2_FORMAT_LD
);
609 ld
->ld_type
= cpu_to_be32(ld_type
);
610 ld
->ld_length
= cpu_to_be32(ld_length
);
611 ld
->ld_data1
= cpu_to_be32(ld_data1
);
616 static void gfs2_check_magic(struct buffer_head
*bh
)
621 clear_buffer_escaped(bh
);
622 kaddr
= kmap_atomic(bh
->b_page
);
623 ptr
= kaddr
+ bh_offset(bh
);
624 if (*ptr
== cpu_to_be32(GFS2_MAGIC
))
625 set_buffer_escaped(bh
);
626 kunmap_atomic(kaddr
);
629 static int blocknr_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
631 struct gfs2_bufdata
*bda
, *bdb
;
633 bda
= list_entry(a
, struct gfs2_bufdata
, bd_list
);
634 bdb
= list_entry(b
, struct gfs2_bufdata
, bd_list
);
636 if (bda
->bd_bh
->b_blocknr
< bdb
->bd_bh
->b_blocknr
)
638 if (bda
->bd_bh
->b_blocknr
> bdb
->bd_bh
->b_blocknr
)
643 static void gfs2_before_commit(struct gfs2_sbd
*sdp
, unsigned int limit
,
644 unsigned int total
, struct list_head
*blist
,
647 struct gfs2_log_descriptor
*ld
;
648 struct gfs2_bufdata
*bd1
= NULL
, *bd2
;
655 list_sort(NULL
, blist
, blocknr_cmp
);
656 bd1
= bd2
= list_prepare_entry(bd1
, blist
, bd_list
);
661 gfs2_log_unlock(sdp
);
662 page
= gfs2_get_log_desc(sdp
,
663 is_databuf
? GFS2_LOG_DESC_JDATA
:
664 GFS2_LOG_DESC_METADATA
, num
+ 1, num
);
665 ld
= page_address(page
);
667 ptr
= (__be64
*)(ld
+ 1);
670 list_for_each_entry_continue(bd1
, blist
, bd_list
) {
671 *ptr
++ = cpu_to_be64(bd1
->bd_bh
->b_blocknr
);
673 gfs2_check_magic(bd1
->bd_bh
);
674 *ptr
++ = cpu_to_be64(buffer_escaped(bd1
->bd_bh
) ? 1 : 0);
680 gfs2_log_unlock(sdp
);
681 gfs2_log_write_page(sdp
, page
);
685 list_for_each_entry_continue(bd2
, blist
, bd_list
) {
687 gfs2_log_unlock(sdp
);
688 lock_buffer(bd2
->bd_bh
);
690 if (buffer_escaped(bd2
->bd_bh
)) {
692 page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
693 ptr
= page_address(page
);
694 kaddr
= kmap_atomic(bd2
->bd_bh
->b_page
);
695 memcpy(ptr
, kaddr
+ bh_offset(bd2
->bd_bh
),
697 kunmap_atomic(kaddr
);
699 clear_buffer_escaped(bd2
->bd_bh
);
700 unlock_buffer(bd2
->bd_bh
);
702 gfs2_log_write_page(sdp
, page
);
704 gfs2_log_write_bh(sdp
, bd2
->bd_bh
);
714 gfs2_log_unlock(sdp
);
717 static void buf_lo_before_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
719 unsigned int limit
= buf_limit(sdp
); /* 503 for 4k blocks */
723 nbuf
= tr
->tr_num_buf_new
- tr
->tr_num_buf_rm
;
724 gfs2_before_commit(sdp
, limit
, nbuf
, &tr
->tr_buf
, 0);
727 static void buf_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
729 struct list_head
*head
;
730 struct gfs2_bufdata
*bd
;
736 while (!list_empty(head
)) {
737 bd
= list_first_entry(head
, struct gfs2_bufdata
, bd_list
);
738 list_del_init(&bd
->bd_list
);
739 gfs2_unpin(sdp
, bd
->bd_bh
, tr
);
743 static void buf_lo_before_scan(struct gfs2_jdesc
*jd
,
744 struct gfs2_log_header_host
*head
, int pass
)
749 jd
->jd_found_blocks
= 0;
750 jd
->jd_replayed_blocks
= 0;
753 static int buf_lo_scan_elements(struct gfs2_jdesc
*jd
, u32 start
,
754 struct gfs2_log_descriptor
*ld
, __be64
*ptr
,
757 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
758 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
759 struct gfs2_glock
*gl
= ip
->i_gl
;
760 unsigned int blks
= be32_to_cpu(ld
->ld_data1
);
761 struct buffer_head
*bh_log
, *bh_ip
;
765 if (pass
!= 1 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_METADATA
)
768 gfs2_replay_incr_blk(jd
, &start
);
770 for (; blks
; gfs2_replay_incr_blk(jd
, &start
), blks
--) {
771 blkno
= be64_to_cpu(*ptr
++);
773 jd
->jd_found_blocks
++;
775 if (gfs2_revoke_check(jd
, blkno
, start
))
778 error
= gfs2_replay_read_block(jd
, start
, &bh_log
);
782 bh_ip
= gfs2_meta_new(gl
, blkno
);
783 memcpy(bh_ip
->b_data
, bh_log
->b_data
, bh_log
->b_size
);
785 if (gfs2_meta_check(sdp
, bh_ip
))
788 struct gfs2_meta_header
*mh
=
789 (struct gfs2_meta_header
*)bh_ip
->b_data
;
791 if (mh
->mh_type
== cpu_to_be32(GFS2_METATYPE_RG
)) {
792 struct gfs2_rgrpd
*rgd
;
794 rgd
= gfs2_blk2rgrpd(sdp
, blkno
, false);
795 if (rgd
&& rgd
->rd_addr
== blkno
&&
796 rgd
->rd_bits
&& rgd
->rd_bits
->bi_bh
) {
797 fs_info(sdp
, "Replaying 0x%llx but we "
798 "already have a bh!\n",
799 (unsigned long long)blkno
);
800 fs_info(sdp
, "busy:%d, pinned:%d\n",
801 buffer_busy(rgd
->rd_bits
->bi_bh
) ? 1 : 0,
802 buffer_pinned(rgd
->rd_bits
->bi_bh
));
803 gfs2_dump_glock(NULL
, rgd
->rd_gl
, true);
806 mark_buffer_dirty(bh_ip
);
814 jd
->jd_replayed_blocks
++;
821 * gfs2_meta_sync - Sync all buffers associated with a glock
826 static void gfs2_meta_sync(struct gfs2_glock
*gl
)
828 struct address_space
*mapping
= gfs2_glock2aspace(gl
);
829 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
833 mapping
= &sdp
->sd_aspace
;
835 filemap_fdatawrite(mapping
);
836 error
= filemap_fdatawait(mapping
);
839 gfs2_io_error(gl
->gl_name
.ln_sbd
);
842 static void buf_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
844 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
845 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
848 gfs2_meta_sync(ip
->i_gl
);
854 gfs2_meta_sync(ip
->i_gl
);
856 fs_info(sdp
, "jid=%u: Replayed %u of %u blocks\n",
857 jd
->jd_jid
, jd
->jd_replayed_blocks
, jd
->jd_found_blocks
);
860 static void revoke_lo_before_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
862 struct gfs2_meta_header
*mh
;
864 struct list_head
*head
= &sdp
->sd_log_revokes
;
865 struct gfs2_bufdata
*bd
;
869 gfs2_write_revokes(sdp
);
870 if (!sdp
->sd_log_num_revoke
)
873 length
= gfs2_struct2blk(sdp
, sdp
->sd_log_num_revoke
);
874 page
= gfs2_get_log_desc(sdp
, GFS2_LOG_DESC_REVOKE
, length
, sdp
->sd_log_num_revoke
);
875 offset
= sizeof(struct gfs2_log_descriptor
);
877 list_for_each_entry(bd
, head
, bd_list
) {
878 sdp
->sd_log_num_revoke
--;
880 if (offset
+ sizeof(u64
) > sdp
->sd_sb
.sb_bsize
) {
882 gfs2_log_write_page(sdp
, page
);
883 page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
884 mh
= page_address(page
);
886 mh
->mh_magic
= cpu_to_be32(GFS2_MAGIC
);
887 mh
->mh_type
= cpu_to_be32(GFS2_METATYPE_LB
);
888 mh
->mh_format
= cpu_to_be32(GFS2_FORMAT_LB
);
889 offset
= sizeof(struct gfs2_meta_header
);
892 *(__be64
*)(page_address(page
) + offset
) = cpu_to_be64(bd
->bd_blkno
);
893 offset
+= sizeof(u64
);
895 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_num_revoke
);
897 gfs2_log_write_page(sdp
, page
);
900 static void revoke_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
902 struct list_head
*head
= &sdp
->sd_log_revokes
;
903 struct gfs2_bufdata
*bd
;
904 struct gfs2_glock
*gl
;
906 while (!list_empty(head
)) {
907 bd
= list_first_entry(head
, struct gfs2_bufdata
, bd_list
);
908 list_del_init(&bd
->bd_list
);
910 gfs2_glock_remove_revoke(gl
);
911 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
915 static void revoke_lo_before_scan(struct gfs2_jdesc
*jd
,
916 struct gfs2_log_header_host
*head
, int pass
)
921 jd
->jd_found_revokes
= 0;
922 jd
->jd_replay_tail
= head
->lh_tail
;
925 static int revoke_lo_scan_elements(struct gfs2_jdesc
*jd
, u32 start
,
926 struct gfs2_log_descriptor
*ld
, __be64
*ptr
,
929 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
930 unsigned int blks
= be32_to_cpu(ld
->ld_length
);
931 unsigned int revokes
= be32_to_cpu(ld
->ld_data1
);
932 struct buffer_head
*bh
;
938 if (pass
!= 0 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_REVOKE
)
941 offset
= sizeof(struct gfs2_log_descriptor
);
943 for (; blks
; gfs2_replay_incr_blk(jd
, &start
), blks
--) {
944 error
= gfs2_replay_read_block(jd
, start
, &bh
);
949 gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_LB
);
951 while (offset
+ sizeof(u64
) <= sdp
->sd_sb
.sb_bsize
) {
952 blkno
= be64_to_cpu(*(__be64
*)(bh
->b_data
+ offset
));
954 error
= gfs2_revoke_add(jd
, blkno
, start
);
960 jd
->jd_found_revokes
++;
964 offset
+= sizeof(u64
);
968 offset
= sizeof(struct gfs2_meta_header
);
975 static void revoke_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
977 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
980 gfs2_revoke_clean(jd
);
986 fs_info(sdp
, "jid=%u: Found %u revoke tags\n",
987 jd
->jd_jid
, jd
->jd_found_revokes
);
989 gfs2_revoke_clean(jd
);
993 * databuf_lo_before_commit - Scan the data buffers, writing as we go
997 static void databuf_lo_before_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
999 unsigned int limit
= databuf_limit(sdp
);
1003 nbuf
= tr
->tr_num_databuf_new
- tr
->tr_num_databuf_rm
;
1004 gfs2_before_commit(sdp
, limit
, nbuf
, &tr
->tr_databuf
, 1);
1007 static int databuf_lo_scan_elements(struct gfs2_jdesc
*jd
, u32 start
,
1008 struct gfs2_log_descriptor
*ld
,
1009 __be64
*ptr
, int pass
)
1011 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
1012 struct gfs2_glock
*gl
= ip
->i_gl
;
1013 unsigned int blks
= be32_to_cpu(ld
->ld_data1
);
1014 struct buffer_head
*bh_log
, *bh_ip
;
1019 if (pass
!= 1 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_JDATA
)
1022 gfs2_replay_incr_blk(jd
, &start
);
1023 for (; blks
; gfs2_replay_incr_blk(jd
, &start
), blks
--) {
1024 blkno
= be64_to_cpu(*ptr
++);
1025 esc
= be64_to_cpu(*ptr
++);
1027 jd
->jd_found_blocks
++;
1029 if (gfs2_revoke_check(jd
, blkno
, start
))
1032 error
= gfs2_replay_read_block(jd
, start
, &bh_log
);
1036 bh_ip
= gfs2_meta_new(gl
, blkno
);
1037 memcpy(bh_ip
->b_data
, bh_log
->b_data
, bh_log
->b_size
);
1041 __be32
*eptr
= (__be32
*)bh_ip
->b_data
;
1042 *eptr
= cpu_to_be32(GFS2_MAGIC
);
1044 mark_buffer_dirty(bh_ip
);
1049 jd
->jd_replayed_blocks
++;
1055 /* FIXME: sort out accounting for log blocks etc. */
1057 static void databuf_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
1059 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
1060 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
1063 gfs2_meta_sync(ip
->i_gl
);
1070 gfs2_meta_sync(ip
->i_gl
);
1072 fs_info(sdp
, "jid=%u: Replayed %u of %u data blocks\n",
1073 jd
->jd_jid
, jd
->jd_replayed_blocks
, jd
->jd_found_blocks
);
1076 static void databuf_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
1078 struct list_head
*head
;
1079 struct gfs2_bufdata
*bd
;
1084 head
= &tr
->tr_databuf
;
1085 while (!list_empty(head
)) {
1086 bd
= list_first_entry(head
, struct gfs2_bufdata
, bd_list
);
1087 list_del_init(&bd
->bd_list
);
1088 gfs2_unpin(sdp
, bd
->bd_bh
, tr
);
1093 static const struct gfs2_log_operations gfs2_buf_lops
= {
1094 .lo_before_commit
= buf_lo_before_commit
,
1095 .lo_after_commit
= buf_lo_after_commit
,
1096 .lo_before_scan
= buf_lo_before_scan
,
1097 .lo_scan_elements
= buf_lo_scan_elements
,
1098 .lo_after_scan
= buf_lo_after_scan
,
1102 static const struct gfs2_log_operations gfs2_revoke_lops
= {
1103 .lo_before_commit
= revoke_lo_before_commit
,
1104 .lo_after_commit
= revoke_lo_after_commit
,
1105 .lo_before_scan
= revoke_lo_before_scan
,
1106 .lo_scan_elements
= revoke_lo_scan_elements
,
1107 .lo_after_scan
= revoke_lo_after_scan
,
1108 .lo_name
= "revoke",
1111 static const struct gfs2_log_operations gfs2_databuf_lops
= {
1112 .lo_before_commit
= databuf_lo_before_commit
,
1113 .lo_after_commit
= databuf_lo_after_commit
,
1114 .lo_scan_elements
= databuf_lo_scan_elements
,
1115 .lo_after_scan
= databuf_lo_after_scan
,
1116 .lo_name
= "databuf",
1119 const struct gfs2_log_operations
*gfs2_log_ops
[] = {