drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / fs / gfs2 / lops.c
blob314ec2a70167f2622c1542242db3558e8df5c2de
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mempool.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/fs.h>
16 #include <linux/list_sort.h>
17 #include <linux/blkdev.h>
19 #include "bmap.h"
20 #include "dir.h"
21 #include "gfs2.h"
22 #include "incore.h"
23 #include "inode.h"
24 #include "glock.h"
25 #include "glops.h"
26 #include "log.h"
27 #include "lops.h"
28 #include "meta_io.h"
29 #include "recovery.h"
30 #include "rgrp.h"
31 #include "trans.h"
32 #include "util.h"
33 #include "trace_gfs2.h"
35 /**
36 * gfs2_pin - Pin a buffer in memory
37 * @sdp: The superblock
38 * @bh: The buffer to be pinned
40 * The log lock must be held when calling this function
42 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
44 struct gfs2_bufdata *bd;
46 BUG_ON(!current->journal_info);
48 clear_buffer_dirty(bh);
49 if (test_set_buffer_pinned(bh))
50 gfs2_assert_withdraw(sdp, 0);
51 if (!buffer_uptodate(bh))
52 gfs2_io_error_bh_wd(sdp, bh);
53 bd = bh->b_private;
54 /* If this buffer is in the AIL and it has already been written
55 * to in-place disk block, remove it from the AIL.
57 spin_lock(&sdp->sd_ail_lock);
58 if (bd->bd_tr)
59 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 spin_unlock(&sdp->sd_ail_lock);
61 get_bh(bh);
62 atomic_inc(&sdp->sd_log_pinned);
63 trace_gfs2_pin(bd, 1);
66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
68 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
71 static void maybe_release_space(struct gfs2_bufdata *bd)
73 struct gfs2_glock *gl = bd->bd_gl;
74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 struct gfs2_bitmap *bi = rgd->rd_bits + index;
79 rgrp_lock_local(rgd);
80 if (bi->bi_clone == NULL)
81 goto out;
82 if (sdp->sd_args.ar_discard)
83 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
84 memcpy(bi->bi_clone + bi->bi_offset,
85 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
86 clear_bit(GBF_FULL, &bi->bi_flags);
87 rgd->rd_free_clone = rgd->rd_free;
88 BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
89 rgd->rd_extfail_pt = rgd->rd_free;
91 out:
92 rgrp_unlock_local(rgd);
95 /**
96 * gfs2_unpin - Unpin a buffer
97 * @sdp: the filesystem the buffer belongs to
98 * @bh: The buffer to unpin
99 * @tr: The system transaction being flushed
102 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
103 struct gfs2_trans *tr)
105 struct gfs2_bufdata *bd = bh->b_private;
107 BUG_ON(!buffer_uptodate(bh));
108 BUG_ON(!buffer_pinned(bh));
110 lock_buffer(bh);
111 mark_buffer_dirty(bh);
112 clear_buffer_pinned(bh);
114 if (buffer_is_rgrp(bd))
115 maybe_release_space(bd);
117 spin_lock(&sdp->sd_ail_lock);
118 if (bd->bd_tr) {
119 list_del(&bd->bd_ail_st_list);
120 brelse(bh);
121 } else {
122 struct gfs2_glock *gl = bd->bd_gl;
123 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
124 atomic_inc(&gl->gl_ail_count);
126 bd->bd_tr = tr;
127 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
128 spin_unlock(&sdp->sd_ail_lock);
130 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
131 trace_gfs2_pin(bd, 0);
132 unlock_buffer(bh);
133 atomic_dec(&sdp->sd_log_pinned);
136 void gfs2_log_incr_head(struct gfs2_sbd *sdp)
138 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
139 (sdp->sd_log_flush_head != sdp->sd_log_head));
141 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
142 sdp->sd_log_flush_head = 0;
145 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
147 struct gfs2_journal_extent *je;
149 list_for_each_entry(je, &jd->extent_list, list) {
150 if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
151 return je->dblock + lblock - je->lblock;
154 return -1;
158 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
159 * @sdp: The superblock
160 * @bvec: The bio_vec
161 * @error: The i/o status
163 * This finds the relevant buffers and unlocks them and sets the
164 * error flag according to the status of the i/o request. This is
165 * used when the log is writing data which has an in-place version
166 * that is pinned in the pagecache.
169 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
170 struct bio_vec *bvec,
171 blk_status_t error)
173 struct buffer_head *bh, *next;
174 struct page *page = bvec->bv_page;
175 unsigned size;
177 bh = page_buffers(page);
178 size = bvec->bv_len;
179 while (bh_offset(bh) < bvec->bv_offset)
180 bh = bh->b_this_page;
181 do {
182 if (error)
183 mark_buffer_write_io_error(bh);
184 unlock_buffer(bh);
185 next = bh->b_this_page;
186 size -= bh->b_size;
187 brelse(bh);
188 bh = next;
189 } while(bh && size);
193 * gfs2_end_log_write - end of i/o to the log
194 * @bio: The bio
196 * Each bio_vec contains either data from the pagecache or data
197 * relating to the log itself. Here we iterate over the bio_vec
198 * array, processing both kinds of data.
202 static void gfs2_end_log_write(struct bio *bio)
204 struct gfs2_sbd *sdp = bio->bi_private;
205 struct bio_vec *bvec;
206 struct page *page;
207 struct bvec_iter_all iter_all;
209 if (bio->bi_status) {
210 if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
211 fs_err(sdp, "Error %d writing to journal, jid=%u\n",
212 bio->bi_status, sdp->sd_jdesc->jd_jid);
213 gfs2_withdraw_delayed(sdp);
214 /* prevent more writes to the journal */
215 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
216 wake_up(&sdp->sd_logd_waitq);
219 bio_for_each_segment_all(bvec, bio, iter_all) {
220 page = bvec->bv_page;
221 if (page_has_buffers(page))
222 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
223 else
224 mempool_free(page, gfs2_page_pool);
227 bio_put(bio);
228 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
229 wake_up(&sdp->sd_log_flush_wait);
233 * gfs2_log_submit_bio - Submit any pending log bio
234 * @biop: Address of the bio pointer
235 * @opf: REQ_OP | op_flags
237 * Submit any pending part-built or full bio to the block device. If
238 * there is no pending bio, then this is a no-op.
241 void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf)
243 struct bio *bio = *biop;
244 if (bio) {
245 struct gfs2_sbd *sdp = bio->bi_private;
246 atomic_inc(&sdp->sd_log_in_flight);
247 bio->bi_opf = opf;
248 submit_bio(bio);
249 *biop = NULL;
254 * gfs2_log_alloc_bio - Allocate a bio
255 * @sdp: The super block
256 * @blkno: The device block number we want to write to
257 * @end_io: The bi_end_io callback
259 * Allocate a new bio, initialize it with the given parameters and return it.
261 * Returns: The newly allocated bio
264 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
265 bio_end_io_t *end_io)
267 struct super_block *sb = sdp->sd_vfs;
268 struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
270 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
271 bio->bi_end_io = end_io;
272 bio->bi_private = sdp;
274 return bio;
278 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
279 * @sdp: The super block
280 * @blkno: The device block number we want to write to
281 * @biop: The bio to get or allocate
282 * @op: REQ_OP
283 * @end_io: The bi_end_io callback
284 * @flush: Always flush the current bio and allocate a new one?
286 * If there is a cached bio, then if the next block number is sequential
287 * with the previous one, return it, otherwise flush the bio to the
288 * device. If there is no cached bio, or we just flushed it, then
289 * allocate a new one.
291 * Returns: The bio to use for log writes
294 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
295 struct bio **biop, enum req_op op,
296 bio_end_io_t *end_io, bool flush)
298 struct bio *bio = *biop;
300 if (bio) {
301 u64 nblk;
303 nblk = bio_end_sector(bio);
304 nblk >>= sdp->sd_fsb2bb_shift;
305 if (blkno == nblk && !flush)
306 return bio;
307 gfs2_log_submit_bio(biop, op);
310 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
311 return *biop;
315 * gfs2_log_write - write to log
316 * @sdp: the filesystem
317 * @jd: The journal descriptor
318 * @page: the page to write
319 * @size: the size of the data to write
320 * @offset: the offset within the page
321 * @blkno: block number of the log entry
323 * Try and add the page segment to the current bio. If that fails,
324 * submit the current bio to the device and create a new one, and
325 * then add the page segment to that.
328 void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
329 struct page *page, unsigned size, unsigned offset,
330 u64 blkno)
332 struct bio *bio;
333 int ret;
335 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
336 gfs2_end_log_write, false);
337 ret = bio_add_page(bio, page, size, offset);
338 if (ret == 0) {
339 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
340 REQ_OP_WRITE, gfs2_end_log_write, true);
341 ret = bio_add_page(bio, page, size, offset);
342 WARN_ON(ret == 0);
347 * gfs2_log_write_bh - write a buffer's content to the log
348 * @sdp: The super block
349 * @bh: The buffer pointing to the in-place location
351 * This writes the content of the buffer to the next available location
352 * in the log. The buffer will be unlocked once the i/o to the log has
353 * completed.
356 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
358 u64 dblock;
360 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
361 gfs2_log_incr_head(sdp);
362 gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
363 bh_offset(bh), dblock);
367 * gfs2_log_write_page - write one block stored in a page, into the log
368 * @sdp: The superblock
369 * @page: The struct page
371 * This writes the first block-sized part of the page into the log. Note
372 * that the page must have been allocated from the gfs2_page_pool mempool
373 * and that after this has been called, ownership has been transferred and
374 * the page may be freed at any time.
377 static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
379 struct super_block *sb = sdp->sd_vfs;
380 u64 dblock;
382 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
383 gfs2_log_incr_head(sdp);
384 gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
388 * gfs2_end_log_read - end I/O callback for reads from the log
389 * @bio: The bio
391 * Simply unlock the pages in the bio. The main thread will wait on them and
392 * process them in order as necessary.
394 static void gfs2_end_log_read(struct bio *bio)
396 int error = blk_status_to_errno(bio->bi_status);
397 struct folio_iter fi;
399 bio_for_each_folio_all(fi, bio) {
400 /* We're abusing wb_err to get the error to gfs2_find_jhead */
401 filemap_set_wb_err(fi.folio->mapping, error);
402 folio_end_read(fi.folio, !error);
405 bio_put(bio);
409 * gfs2_jhead_pg_srch - Look for the journal head in a given page.
410 * @jd: The journal descriptor
411 * @head: The journal head to start from
412 * @page: The page to look in
414 * Returns: 1 if found, 0 otherwise.
417 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
418 struct gfs2_log_header_host *head,
419 struct page *page)
421 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
422 struct gfs2_log_header_host lh;
423 void *kaddr;
424 unsigned int offset;
425 bool ret = false;
427 kaddr = kmap_local_page(page);
428 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
429 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
430 if (lh.lh_sequence >= head->lh_sequence)
431 *head = lh;
432 else {
433 ret = true;
434 break;
438 kunmap_local(kaddr);
439 return ret;
443 * gfs2_jhead_process_page - Search/cleanup a page
444 * @jd: The journal descriptor
445 * @index: Index of the page to look into
446 * @head: The journal head to start from
447 * @done: If set, perform only cleanup, else search and set if found.
449 * Find the folio with 'index' in the journal's mapping. Search the folio for
450 * the journal head if requested (cleanup == false). Release refs on the
451 * folio so the page cache can reclaim it. We grabbed a
452 * reference on this folio twice, first when we did a grab_cache_page()
453 * to obtain the folio to add it to the bio and second when we do a
454 * filemap_get_folio() here to get the folio to wait on while I/O on it is being
455 * completed.
456 * This function is also used to free up a folio we might've grabbed but not
457 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
458 * submitted the I/O, but we already found the jhead so we only need to drop
459 * our references to the folio.
462 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
463 struct gfs2_log_header_host *head,
464 bool *done)
466 struct folio *folio;
468 folio = filemap_get_folio(jd->jd_inode->i_mapping, index);
470 folio_wait_locked(folio);
471 if (!folio_test_uptodate(folio))
472 *done = true;
474 if (!*done)
475 *done = gfs2_jhead_pg_srch(jd, head, &folio->page);
477 /* filemap_get_folio() and the earlier grab_cache_page() */
478 folio_put_refs(folio, 2);
481 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
483 struct bio *new;
485 new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
486 bio_clone_blkg_association(new, prev);
487 new->bi_iter.bi_sector = bio_end_sector(prev);
488 bio_chain(new, prev);
489 submit_bio(prev);
490 return new;
494 * gfs2_find_jhead - find the head of a log
495 * @jd: The journal descriptor
496 * @head: The log descriptor for the head of the log is returned here
497 * @keep_cache: If set inode pages will not be truncated
499 * Do a search of a journal by reading it in large chunks using bios and find
500 * the valid log entry with the highest sequence number. (i.e. the log head)
502 * Returns: 0 on success, errno otherwise
504 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
505 bool keep_cache)
507 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
508 struct address_space *mapping = jd->jd_inode->i_mapping;
509 unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
510 unsigned int bsize = sdp->sd_sb.sb_bsize, off;
511 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
512 unsigned int shift = PAGE_SHIFT - bsize_shift;
513 unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
514 struct gfs2_journal_extent *je;
515 int sz, ret = 0;
516 struct bio *bio = NULL;
517 struct page *page = NULL;
518 bool done = false;
519 errseq_t since;
521 memset(head, 0, sizeof(*head));
522 if (list_empty(&jd->extent_list))
523 gfs2_map_journal_extents(sdp, jd);
525 since = filemap_sample_wb_err(mapping);
526 list_for_each_entry(je, &jd->extent_list, list) {
527 u64 dblock = je->dblock;
529 for (; block < je->lblock + je->blocks; block++, dblock++) {
530 if (!page) {
531 page = grab_cache_page(mapping, block >> shift);
532 if (!page) {
533 ret = -ENOMEM;
534 done = true;
535 goto out;
537 off = 0;
540 if (bio && (off || block < blocks_submitted + max_blocks)) {
541 sector_t sector = dblock << sdp->sd_fsb2bb_shift;
543 if (bio_end_sector(bio) == sector) {
544 sz = bio_add_page(bio, page, bsize, off);
545 if (sz == bsize)
546 goto block_added;
548 if (off) {
549 unsigned int blocks =
550 (PAGE_SIZE - off) >> bsize_shift;
552 bio = gfs2_chain_bio(bio, blocks);
553 goto add_block_to_new_bio;
557 if (bio) {
558 blocks_submitted = block;
559 submit_bio(bio);
562 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
563 bio->bi_opf = REQ_OP_READ;
564 add_block_to_new_bio:
565 sz = bio_add_page(bio, page, bsize, off);
566 BUG_ON(sz != bsize);
567 block_added:
568 off += bsize;
569 if (off == PAGE_SIZE)
570 page = NULL;
571 if (blocks_submitted <= blocks_read + max_blocks) {
572 /* Keep at least one bio in flight */
573 continue;
576 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
577 blocks_read += PAGE_SIZE >> bsize_shift;
578 if (done)
579 goto out; /* found */
583 out:
584 if (bio)
585 submit_bio(bio);
586 while (blocks_read < block) {
587 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
588 blocks_read += PAGE_SIZE >> bsize_shift;
591 if (!ret)
592 ret = filemap_check_wb_err(mapping, since);
594 if (!keep_cache)
595 truncate_inode_pages(mapping, 0);
597 return ret;
600 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
601 u32 ld_length, u32 ld_data1)
603 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
604 struct gfs2_log_descriptor *ld = page_address(page);
605 clear_page(ld);
606 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
607 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
608 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
609 ld->ld_type = cpu_to_be32(ld_type);
610 ld->ld_length = cpu_to_be32(ld_length);
611 ld->ld_data1 = cpu_to_be32(ld_data1);
612 ld->ld_data2 = 0;
613 return page;
616 static void gfs2_check_magic(struct buffer_head *bh)
618 void *kaddr;
619 __be32 *ptr;
621 clear_buffer_escaped(bh);
622 kaddr = kmap_local_page(bh->b_page);
623 ptr = kaddr + bh_offset(bh);
624 if (*ptr == cpu_to_be32(GFS2_MAGIC))
625 set_buffer_escaped(bh);
626 kunmap_local(kaddr);
629 static int blocknr_cmp(void *priv, const struct list_head *a,
630 const struct list_head *b)
632 struct gfs2_bufdata *bda, *bdb;
634 bda = list_entry(a, struct gfs2_bufdata, bd_list);
635 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
637 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
638 return -1;
639 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
640 return 1;
641 return 0;
644 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
645 unsigned int total, struct list_head *blist,
646 bool is_databuf)
648 struct gfs2_log_descriptor *ld;
649 struct gfs2_bufdata *bd1 = NULL, *bd2;
650 struct page *page;
651 unsigned int num;
652 unsigned n;
653 __be64 *ptr;
655 gfs2_log_lock(sdp);
656 list_sort(NULL, blist, blocknr_cmp);
657 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
658 while(total) {
659 num = total;
660 if (total > limit)
661 num = limit;
662 gfs2_log_unlock(sdp);
663 page = gfs2_get_log_desc(sdp,
664 is_databuf ? GFS2_LOG_DESC_JDATA :
665 GFS2_LOG_DESC_METADATA, num + 1, num);
666 ld = page_address(page);
667 gfs2_log_lock(sdp);
668 ptr = (__be64 *)(ld + 1);
670 n = 0;
671 list_for_each_entry_continue(bd1, blist, bd_list) {
672 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
673 if (is_databuf) {
674 gfs2_check_magic(bd1->bd_bh);
675 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
677 if (++n >= num)
678 break;
681 gfs2_log_unlock(sdp);
682 gfs2_log_write_page(sdp, page);
683 gfs2_log_lock(sdp);
685 n = 0;
686 list_for_each_entry_continue(bd2, blist, bd_list) {
687 get_bh(bd2->bd_bh);
688 gfs2_log_unlock(sdp);
689 lock_buffer(bd2->bd_bh);
691 if (buffer_escaped(bd2->bd_bh)) {
692 void *p;
694 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
695 p = page_address(page);
696 memcpy_from_page(p, page, bh_offset(bd2->bd_bh), bd2->bd_bh->b_size);
697 *(__be32 *)p = 0;
698 clear_buffer_escaped(bd2->bd_bh);
699 unlock_buffer(bd2->bd_bh);
700 brelse(bd2->bd_bh);
701 gfs2_log_write_page(sdp, page);
702 } else {
703 gfs2_log_write_bh(sdp, bd2->bd_bh);
705 gfs2_log_lock(sdp);
706 if (++n >= num)
707 break;
710 BUG_ON(total < num);
711 total -= num;
713 gfs2_log_unlock(sdp);
716 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
718 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
719 unsigned int nbuf;
720 if (tr == NULL)
721 return;
722 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
723 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
726 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
728 struct list_head *head;
729 struct gfs2_bufdata *bd;
731 if (tr == NULL)
732 return;
734 head = &tr->tr_buf;
735 while (!list_empty(head)) {
736 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
737 list_del_init(&bd->bd_list);
738 gfs2_unpin(sdp, bd->bd_bh, tr);
742 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
743 struct gfs2_log_header_host *head, int pass)
745 if (pass != 0)
746 return;
748 jd->jd_found_blocks = 0;
749 jd->jd_replayed_blocks = 0;
752 #define obsolete_rgrp_replay \
753 "Replaying 0x%llx from jid=%d/0x%llx but we already have a bh!\n"
754 #define obsolete_rgrp_replay2 \
755 "busy:%d, pinned:%d rg_gen:0x%llx, j_gen:0x%llx\n"
757 static void obsolete_rgrp(struct gfs2_jdesc *jd, struct buffer_head *bh_log,
758 u64 blkno)
760 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
761 struct gfs2_rgrpd *rgd;
762 struct gfs2_rgrp *jrgd = (struct gfs2_rgrp *)bh_log->b_data;
764 rgd = gfs2_blk2rgrpd(sdp, blkno, false);
765 if (rgd && rgd->rd_addr == blkno &&
766 rgd->rd_bits && rgd->rd_bits->bi_bh) {
767 fs_info(sdp, obsolete_rgrp_replay, (unsigned long long)blkno,
768 jd->jd_jid, bh_log->b_blocknr);
769 fs_info(sdp, obsolete_rgrp_replay2,
770 buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
771 buffer_pinned(rgd->rd_bits->bi_bh),
772 rgd->rd_igeneration,
773 be64_to_cpu(jrgd->rg_igeneration));
774 gfs2_dump_glock(NULL, rgd->rd_gl, true);
778 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
779 struct gfs2_log_descriptor *ld, __be64 *ptr,
780 int pass)
782 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
783 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
784 struct gfs2_glock *gl = ip->i_gl;
785 unsigned int blks = be32_to_cpu(ld->ld_data1);
786 struct buffer_head *bh_log, *bh_ip;
787 u64 blkno;
788 int error = 0;
790 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
791 return 0;
793 gfs2_replay_incr_blk(jd, &start);
795 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
796 blkno = be64_to_cpu(*ptr++);
798 jd->jd_found_blocks++;
800 if (gfs2_revoke_check(jd, blkno, start))
801 continue;
803 error = gfs2_replay_read_block(jd, start, &bh_log);
804 if (error)
805 return error;
807 bh_ip = gfs2_meta_new(gl, blkno);
808 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
810 if (gfs2_meta_check(sdp, bh_ip))
811 error = -EIO;
812 else {
813 struct gfs2_meta_header *mh =
814 (struct gfs2_meta_header *)bh_ip->b_data;
816 if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG))
817 obsolete_rgrp(jd, bh_log, blkno);
819 mark_buffer_dirty(bh_ip);
821 brelse(bh_log);
822 brelse(bh_ip);
824 if (error)
825 break;
827 jd->jd_replayed_blocks++;
830 return error;
833 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
835 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
836 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
838 if (error) {
839 gfs2_inode_metasync(ip->i_gl);
840 return;
842 if (pass != 1)
843 return;
845 gfs2_inode_metasync(ip->i_gl);
847 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
848 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
851 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
853 struct gfs2_meta_header *mh;
854 unsigned int offset;
855 struct list_head *head = &sdp->sd_log_revokes;
856 struct gfs2_bufdata *bd;
857 struct page *page;
858 unsigned int length;
860 gfs2_flush_revokes(sdp);
861 if (!sdp->sd_log_num_revoke)
862 return;
864 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
865 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
866 offset = sizeof(struct gfs2_log_descriptor);
868 list_for_each_entry(bd, head, bd_list) {
869 sdp->sd_log_num_revoke--;
871 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
872 gfs2_log_write_page(sdp, page);
873 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
874 mh = page_address(page);
875 clear_page(mh);
876 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
877 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
878 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
879 offset = sizeof(struct gfs2_meta_header);
882 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
883 offset += sizeof(u64);
885 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
887 gfs2_log_write_page(sdp, page);
890 void gfs2_drain_revokes(struct gfs2_sbd *sdp)
892 struct list_head *head = &sdp->sd_log_revokes;
893 struct gfs2_bufdata *bd;
894 struct gfs2_glock *gl;
896 while (!list_empty(head)) {
897 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
898 list_del_init(&bd->bd_list);
899 gl = bd->bd_gl;
900 gfs2_glock_remove_revoke(gl);
901 kmem_cache_free(gfs2_bufdata_cachep, bd);
905 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
907 gfs2_drain_revokes(sdp);
910 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
911 struct gfs2_log_header_host *head, int pass)
913 if (pass != 0)
914 return;
916 jd->jd_found_revokes = 0;
917 jd->jd_replay_tail = head->lh_tail;
920 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
921 struct gfs2_log_descriptor *ld, __be64 *ptr,
922 int pass)
924 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
925 unsigned int blks = be32_to_cpu(ld->ld_length);
926 unsigned int revokes = be32_to_cpu(ld->ld_data1);
927 struct buffer_head *bh;
928 unsigned int offset;
929 u64 blkno;
930 int first = 1;
931 int error;
933 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
934 return 0;
936 offset = sizeof(struct gfs2_log_descriptor);
938 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
939 error = gfs2_replay_read_block(jd, start, &bh);
940 if (error)
941 return error;
943 if (!first)
944 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
946 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
947 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
949 error = gfs2_revoke_add(jd, blkno, start);
950 if (error < 0) {
951 brelse(bh);
952 return error;
954 else if (error)
955 jd->jd_found_revokes++;
957 if (!--revokes)
958 break;
959 offset += sizeof(u64);
962 brelse(bh);
963 offset = sizeof(struct gfs2_meta_header);
964 first = 0;
967 return 0;
970 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
972 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
974 if (error) {
975 gfs2_revoke_clean(jd);
976 return;
978 if (pass != 1)
979 return;
981 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
982 jd->jd_jid, jd->jd_found_revokes);
984 gfs2_revoke_clean(jd);
988 * databuf_lo_before_commit - Scan the data buffers, writing as we go
989 * @sdp: The filesystem
990 * @tr: The system transaction being flushed
993 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
995 unsigned int limit = databuf_limit(sdp);
996 unsigned int nbuf;
997 if (tr == NULL)
998 return;
999 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
1000 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
1003 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
1004 struct gfs2_log_descriptor *ld,
1005 __be64 *ptr, int pass)
1007 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1008 struct gfs2_glock *gl = ip->i_gl;
1009 unsigned int blks = be32_to_cpu(ld->ld_data1);
1010 struct buffer_head *bh_log, *bh_ip;
1011 u64 blkno;
1012 u64 esc;
1013 int error = 0;
1015 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1016 return 0;
1018 gfs2_replay_incr_blk(jd, &start);
1019 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1020 blkno = be64_to_cpu(*ptr++);
1021 esc = be64_to_cpu(*ptr++);
1023 jd->jd_found_blocks++;
1025 if (gfs2_revoke_check(jd, blkno, start))
1026 continue;
1028 error = gfs2_replay_read_block(jd, start, &bh_log);
1029 if (error)
1030 return error;
1032 bh_ip = gfs2_meta_new(gl, blkno);
1033 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1035 /* Unescape */
1036 if (esc) {
1037 __be32 *eptr = (__be32 *)bh_ip->b_data;
1038 *eptr = cpu_to_be32(GFS2_MAGIC);
1040 mark_buffer_dirty(bh_ip);
1042 brelse(bh_log);
1043 brelse(bh_ip);
1045 jd->jd_replayed_blocks++;
1048 return error;
1051 /* FIXME: sort out accounting for log blocks etc. */
1053 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1055 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1056 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1058 if (error) {
1059 gfs2_inode_metasync(ip->i_gl);
1060 return;
1062 if (pass != 1)
1063 return;
1065 /* data sync? */
1066 gfs2_inode_metasync(ip->i_gl);
1068 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1069 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1072 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1074 struct list_head *head;
1075 struct gfs2_bufdata *bd;
1077 if (tr == NULL)
1078 return;
1080 head = &tr->tr_databuf;
1081 while (!list_empty(head)) {
1082 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1083 list_del_init(&bd->bd_list);
1084 gfs2_unpin(sdp, bd->bd_bh, tr);
1089 static const struct gfs2_log_operations gfs2_buf_lops = {
1090 .lo_before_commit = buf_lo_before_commit,
1091 .lo_after_commit = buf_lo_after_commit,
1092 .lo_before_scan = buf_lo_before_scan,
1093 .lo_scan_elements = buf_lo_scan_elements,
1094 .lo_after_scan = buf_lo_after_scan,
1095 .lo_name = "buf",
1098 static const struct gfs2_log_operations gfs2_revoke_lops = {
1099 .lo_before_commit = revoke_lo_before_commit,
1100 .lo_after_commit = revoke_lo_after_commit,
1101 .lo_before_scan = revoke_lo_before_scan,
1102 .lo_scan_elements = revoke_lo_scan_elements,
1103 .lo_after_scan = revoke_lo_after_scan,
1104 .lo_name = "revoke",
1107 static const struct gfs2_log_operations gfs2_databuf_lops = {
1108 .lo_before_commit = databuf_lo_before_commit,
1109 .lo_after_commit = databuf_lo_after_commit,
1110 .lo_scan_elements = databuf_lo_scan_elements,
1111 .lo_after_scan = databuf_lo_after_scan,
1112 .lo_name = "databuf",
1115 const struct gfs2_log_operations *gfs2_log_ops[] = {
1116 &gfs2_databuf_lops,
1117 &gfs2_buf_lops,
1118 &gfs2_revoke_lops,
1119 NULL,