1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
13 #include <linux/pagemap.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/delay.h>
17 #include <linux/bio.h>
18 #include <linux/gfs2_ondisk.h>
31 #include "trace_gfs2.h"
33 static void gfs2_aspace_write_folio(struct folio
*folio
,
34 struct writeback_control
*wbc
)
36 struct buffer_head
*bh
, *head
;
38 blk_opf_t write_flags
= REQ_META
| REQ_PRIO
| wbc_to_write_flags(wbc
);
40 BUG_ON(!folio_test_locked(folio
));
42 head
= folio_buffers(folio
);
46 if (!buffer_mapped(bh
))
49 * If it's a fully non-blocking write attempt and we cannot
50 * lock the buffer then redirty the page. Note that this can
51 * potentially cause a busy-wait loop from flusher thread and kswapd
52 * activity, but those code paths have their own higher-level
55 if (wbc
->sync_mode
!= WB_SYNC_NONE
) {
57 } else if (!trylock_buffer(bh
)) {
58 folio_redirty_for_writepage(wbc
, folio
);
61 if (test_clear_buffer_dirty(bh
)) {
62 mark_buffer_async_write(bh
);
66 } while ((bh
= bh
->b_this_page
) != head
);
69 * The folio and its buffers are protected from truncation by
70 * the writeback flag, so we can drop the bh refcounts early.
72 BUG_ON(folio_test_writeback(folio
));
73 folio_start_writeback(folio
);
76 struct buffer_head
*next
= bh
->b_this_page
;
77 if (buffer_async_write(bh
)) {
78 submit_bh(REQ_OP_WRITE
| write_flags
, bh
);
86 folio_end_writeback(folio
);
89 static int gfs2_aspace_writepages(struct address_space
*mapping
,
90 struct writeback_control
*wbc
)
92 struct folio
*folio
= NULL
;
95 while ((folio
= writeback_iter(mapping
, wbc
, folio
, &error
)))
96 gfs2_aspace_write_folio(folio
, wbc
);
101 const struct address_space_operations gfs2_meta_aops
= {
102 .dirty_folio
= block_dirty_folio
,
103 .invalidate_folio
= block_invalidate_folio
,
104 .writepages
= gfs2_aspace_writepages
,
105 .release_folio
= gfs2_release_folio
,
108 const struct address_space_operations gfs2_rgrp_aops
= {
109 .dirty_folio
= block_dirty_folio
,
110 .invalidate_folio
= block_invalidate_folio
,
111 .writepages
= gfs2_aspace_writepages
,
112 .release_folio
= gfs2_release_folio
,
116 * gfs2_getbuf - Get a buffer with a given address space
118 * @blkno: the block number (filesystem scope)
119 * @create: 1 if the buffer should be created
121 * Returns: the buffer
124 struct buffer_head
*gfs2_getbuf(struct gfs2_glock
*gl
, u64 blkno
, int create
)
126 struct address_space
*mapping
= gfs2_glock2aspace(gl
);
127 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
129 struct buffer_head
*bh
;
135 mapping
= &sdp
->sd_aspace
;
137 shift
= PAGE_SHIFT
- sdp
->sd_sb
.sb_bsize_shift
;
138 index
= blkno
>> shift
; /* convert block to page */
139 bufnum
= blkno
- (index
<< shift
); /* block buf index within page */
142 folio
= __filemap_get_folio(mapping
, index
,
143 FGP_LOCK
| FGP_ACCESSED
| FGP_CREAT
,
144 mapping_gfp_mask(mapping
) | __GFP_NOFAIL
);
145 bh
= folio_buffers(folio
);
147 bh
= create_empty_buffers(folio
,
148 sdp
->sd_sb
.sb_bsize
, 0);
150 folio
= __filemap_get_folio(mapping
, index
,
151 FGP_LOCK
| FGP_ACCESSED
, 0);
154 bh
= folio_buffers(folio
);
160 bh
= get_nth_bh(bh
, bufnum
);
161 if (!buffer_mapped(bh
))
162 map_bh(bh
, sdp
->sd_vfs
, blkno
);
171 static void meta_prep_new(struct buffer_head
*bh
)
173 struct gfs2_meta_header
*mh
= (struct gfs2_meta_header
*)bh
->b_data
;
176 clear_buffer_dirty(bh
);
177 set_buffer_uptodate(bh
);
180 mh
->mh_magic
= cpu_to_be32(GFS2_MAGIC
);
184 * gfs2_meta_new - Get a block
185 * @gl: The glock associated with this block
186 * @blkno: The block number
188 * Returns: The buffer
191 struct buffer_head
*gfs2_meta_new(struct gfs2_glock
*gl
, u64 blkno
)
193 struct buffer_head
*bh
;
194 bh
= gfs2_getbuf(gl
, blkno
, CREATE
);
199 static void gfs2_meta_read_endio(struct bio
*bio
)
201 struct bio_vec
*bvec
;
202 struct bvec_iter_all iter_all
;
204 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
205 struct page
*page
= bvec
->bv_page
;
206 struct buffer_head
*bh
= page_buffers(page
);
207 unsigned int len
= bvec
->bv_len
;
209 while (bh_offset(bh
) < bvec
->bv_offset
)
210 bh
= bh
->b_this_page
;
212 struct buffer_head
*next
= bh
->b_this_page
;
214 bh
->b_end_io(bh
, !bio
->bi_status
);
222 * Submit several consecutive buffer head I/O requests as a single bio I/O
223 * request. (See submit_bh_wbc.)
225 static void gfs2_submit_bhs(blk_opf_t opf
, struct buffer_head
*bhs
[], int num
)
228 struct buffer_head
*bh
= *bhs
;
231 bio
= bio_alloc(bh
->b_bdev
, num
, opf
, GFP_NOIO
);
232 bio
->bi_iter
.bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
235 if (!bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
))) {
236 BUG_ON(bio
->bi_iter
.bi_size
== 0);
242 bio
->bi_end_io
= gfs2_meta_read_endio
;
248 * gfs2_meta_read - Read a block from disk
249 * @gl: The glock covering the block
250 * @blkno: The block number
252 * @rahead: Do read-ahead
253 * @bhp: the place where the buffer is returned (NULL on failure)
258 int gfs2_meta_read(struct gfs2_glock
*gl
, u64 blkno
, int flags
,
259 int rahead
, struct buffer_head
**bhp
)
261 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
262 struct buffer_head
*bh
, *bhs
[2];
265 if (gfs2_withdrawing_or_withdrawn(sdp
) &&
266 !gfs2_withdraw_in_prog(sdp
)) {
271 *bhp
= bh
= gfs2_getbuf(gl
, blkno
, CREATE
);
274 if (buffer_uptodate(bh
)) {
278 bh
->b_end_io
= end_buffer_read_sync
;
284 bh
= gfs2_getbuf(gl
, blkno
+ 1, CREATE
);
287 if (buffer_uptodate(bh
)) {
291 bh
->b_end_io
= end_buffer_read_sync
;
296 gfs2_submit_bhs(REQ_OP_READ
| REQ_META
| REQ_PRIO
, bhs
, num
);
297 if (!(flags
& DIO_WAIT
))
302 if (unlikely(!buffer_uptodate(bh
))) {
303 struct gfs2_trans
*tr
= current
->journal_info
;
304 if (tr
&& test_bit(TR_TOUCHED
, &tr
->tr_flags
))
305 gfs2_io_error_bh_wd(sdp
, bh
);
315 * gfs2_meta_wait - Reread a block from disk
316 * @sdp: the filesystem
317 * @bh: The block to wait for
322 int gfs2_meta_wait(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
324 if (gfs2_withdrawing_or_withdrawn(sdp
) &&
325 !gfs2_withdraw_in_prog(sdp
))
330 if (!buffer_uptodate(bh
)) {
331 struct gfs2_trans
*tr
= current
->journal_info
;
332 if (tr
&& test_bit(TR_TOUCHED
, &tr
->tr_flags
))
333 gfs2_io_error_bh_wd(sdp
, bh
);
336 if (gfs2_withdrawing_or_withdrawn(sdp
) &&
337 !gfs2_withdraw_in_prog(sdp
))
343 void gfs2_remove_from_journal(struct buffer_head
*bh
, int meta
)
345 struct address_space
*mapping
= bh
->b_folio
->mapping
;
346 struct gfs2_sbd
*sdp
= gfs2_mapping2sbd(mapping
);
347 struct gfs2_bufdata
*bd
= bh
->b_private
;
348 struct gfs2_trans
*tr
= current
->journal_info
;
351 if (test_clear_buffer_pinned(bh
)) {
352 trace_gfs2_pin(bd
, 0);
353 atomic_dec(&sdp
->sd_log_pinned
);
354 list_del_init(&bd
->bd_list
);
355 if (meta
== REMOVE_META
)
358 tr
->tr_num_databuf_rm
++;
359 set_bit(TR_TOUCHED
, &tr
->tr_flags
);
365 gfs2_trans_add_revoke(sdp
, bd
);
366 } else if (was_pinned
) {
367 bh
->b_private
= NULL
;
368 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
369 } else if (!list_empty(&bd
->bd_ail_st_list
) &&
370 !list_empty(&bd
->bd_ail_gl_list
)) {
371 gfs2_remove_from_ail(bd
);
374 clear_buffer_dirty(bh
);
375 clear_buffer_uptodate(bh
);
379 * gfs2_ail1_wipe - remove deleted/freed buffers from the ail1 list
381 * @bstart: starting block address of buffers to remove
382 * @blen: length of buffers to be removed
384 * This function is called from gfs2_journal wipe, whose job is to remove
385 * buffers, corresponding to deleted blocks, from the journal. If we find any
386 * bufdata elements on the system ail1 list, they haven't been written to
387 * the journal yet. So we remove them.
389 static void gfs2_ail1_wipe(struct gfs2_sbd
*sdp
, u64 bstart
, u32 blen
)
391 struct gfs2_trans
*tr
, *s
;
392 struct gfs2_bufdata
*bd
, *bs
;
393 struct buffer_head
*bh
;
394 u64 end
= bstart
+ blen
;
397 spin_lock(&sdp
->sd_ail_lock
);
398 list_for_each_entry_safe(tr
, s
, &sdp
->sd_ail1_list
, tr_list
) {
399 list_for_each_entry_safe(bd
, bs
, &tr
->tr_ail1_list
,
402 if (bh
->b_blocknr
< bstart
|| bh
->b_blocknr
>= end
)
405 gfs2_remove_from_journal(bh
, REMOVE_JDATA
);
408 spin_unlock(&sdp
->sd_ail_lock
);
409 gfs2_log_unlock(sdp
);
412 static struct buffer_head
*gfs2_getjdatabuf(struct gfs2_inode
*ip
, u64 blkno
)
414 struct address_space
*mapping
= ip
->i_inode
.i_mapping
;
415 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
417 struct buffer_head
*bh
;
418 unsigned int shift
= PAGE_SHIFT
- sdp
->sd_sb
.sb_bsize_shift
;
419 unsigned long index
= blkno
>> shift
; /* convert block to page */
420 unsigned int bufnum
= blkno
- (index
<< shift
);
422 folio
= __filemap_get_folio(mapping
, index
, FGP_LOCK
| FGP_ACCESSED
, 0);
425 bh
= folio_buffers(folio
);
427 bh
= get_nth_bh(bh
, bufnum
);
434 * gfs2_journal_wipe - make inode's buffers so they aren't dirty/pinned anymore
435 * @ip: the inode who owns the buffers
436 * @bstart: the first buffer in the run
437 * @blen: the number of buffers in the run
441 void gfs2_journal_wipe(struct gfs2_inode
*ip
, u64 bstart
, u32 blen
)
443 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
444 struct buffer_head
*bh
;
448 /* This can only happen during incomplete inode creation. */
449 BUG_ON(!test_bit(GIF_ALLOC_FAILED
, &ip
->i_flags
));
453 gfs2_ail1_wipe(sdp
, bstart
, blen
);
456 bh
= gfs2_getbuf(ip
->i_gl
, bstart
, NO_CREATE
);
457 if (!bh
&& gfs2_is_jdata(ip
)) {
458 bh
= gfs2_getjdatabuf(ip
, bstart
);
464 spin_lock(&sdp
->sd_ail_lock
);
465 gfs2_remove_from_journal(bh
, ty
);
466 spin_unlock(&sdp
->sd_ail_lock
);
467 gfs2_log_unlock(sdp
);
478 * gfs2_meta_buffer - Get a metadata buffer
479 * @ip: The GFS2 inode
480 * @mtype: The block type (GFS2_METATYPE_*)
481 * @num: The block number (device relative) of the buffer
482 * @bhp: the buffer is returned here
487 int gfs2_meta_buffer(struct gfs2_inode
*ip
, u32 mtype
, u64 num
,
488 struct buffer_head
**bhp
)
490 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
491 struct gfs2_glock
*gl
= ip
->i_gl
;
492 struct buffer_head
*bh
;
496 if (num
== ip
->i_no_addr
)
497 rahead
= ip
->i_rahead
;
499 ret
= gfs2_meta_read(gl
, num
, DIO_WAIT
, rahead
, &bh
);
500 if (ret
== 0 && gfs2_metatype_check(sdp
, bh
, mtype
)) {
510 * gfs2_meta_ra - start readahead on an extent of a file
511 * @gl: the glock the blocks belong to
512 * @dblock: the starting disk block
513 * @extlen: the number of blocks in the extent
515 * returns: the first buffer in the extent
518 struct buffer_head
*gfs2_meta_ra(struct gfs2_glock
*gl
, u64 dblock
, u32 extlen
)
520 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
521 struct buffer_head
*first_bh
, *bh
;
522 u32 max_ra
= gfs2_tune_get(sdp
, gt_max_readahead
) >>
523 sdp
->sd_sb
.sb_bsize_shift
;
532 first_bh
= gfs2_getbuf(gl
, dblock
, CREATE
);
534 if (buffer_uptodate(first_bh
))
536 bh_read_nowait(first_bh
, REQ_META
| REQ_PRIO
);
542 bh
= gfs2_getbuf(gl
, dblock
, CREATE
);
544 bh_readahead(bh
, REQ_RAHEAD
| REQ_META
| REQ_PRIO
);
548 if (!buffer_locked(first_bh
) && buffer_uptodate(first_bh
))
552 wait_on_buffer(first_bh
);