2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/backing-dev.h>
23 #include <linux/uio.h>
24 #include <trace/events/writeback.h>
41 static void gfs2_page_add_databufs(struct gfs2_inode
*ip
, struct page
*page
,
42 unsigned int from
, unsigned int to
)
44 struct buffer_head
*head
= page_buffers(page
);
45 unsigned int bsize
= head
->b_size
;
46 struct buffer_head
*bh
;
47 unsigned int start
, end
;
49 for (bh
= head
, start
= 0; bh
!= head
|| !start
;
50 bh
= bh
->b_this_page
, start
= end
) {
52 if (end
<= from
|| start
>= to
)
54 if (gfs2_is_jdata(ip
))
55 set_buffer_uptodate(bh
);
56 gfs2_trans_add_data(ip
->i_gl
, bh
);
61 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @lblock: The block number to look up
64 * @bh_result: The buffer head to return the result in
65 * @create: Non-zero if we may add block to the file
70 static int gfs2_get_block_noalloc(struct inode
*inode
, sector_t lblock
,
71 struct buffer_head
*bh_result
, int create
)
75 error
= gfs2_block_map(inode
, lblock
, bh_result
, 0);
78 if (!buffer_mapped(bh_result
))
83 static int gfs2_get_block_direct(struct inode
*inode
, sector_t lblock
,
84 struct buffer_head
*bh_result
, int create
)
86 return gfs2_block_map(inode
, lblock
, bh_result
, 0);
90 * gfs2_writepage_common - Common bits of writepage
91 * @page: The page to be written
92 * @wbc: The writeback control
94 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
97 static int gfs2_writepage_common(struct page
*page
,
98 struct writeback_control
*wbc
)
100 struct inode
*inode
= page
->mapping
->host
;
101 struct gfs2_inode
*ip
= GFS2_I(inode
);
102 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
103 loff_t i_size
= i_size_read(inode
);
104 pgoff_t end_index
= i_size
>> PAGE_SHIFT
;
107 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(ip
->i_gl
)))
109 if (current
->journal_info
)
111 /* Is the page fully outside i_size? (truncate in progress) */
112 offset
= i_size
& (PAGE_SIZE
-1);
113 if (page
->index
> end_index
|| (page
->index
== end_index
&& !offset
)) {
114 page
->mapping
->a_ops
->invalidatepage(page
, 0, PAGE_SIZE
);
119 redirty_page_for_writepage(wbc
, page
);
126 * gfs2_writepage - Write page for writeback mappings
128 * @wbc: The writeback control
132 static int gfs2_writepage(struct page
*page
, struct writeback_control
*wbc
)
136 ret
= gfs2_writepage_common(page
, wbc
);
140 return nobh_writepage(page
, gfs2_get_block_noalloc
, wbc
);
143 /* This is the same as calling block_write_full_page, but it also
144 * writes pages outside of i_size
146 static int gfs2_write_full_page(struct page
*page
, get_block_t
*get_block
,
147 struct writeback_control
*wbc
)
149 struct inode
* const inode
= page
->mapping
->host
;
150 loff_t i_size
= i_size_read(inode
);
151 const pgoff_t end_index
= i_size
>> PAGE_SHIFT
;
155 * The page straddles i_size. It must be zeroed out on each and every
156 * writepage invocation because it may be mmapped. "A file is mapped
157 * in multiples of the page size. For a file that is not a multiple of
158 * the page size, the remaining memory is zeroed when mapped, and
159 * writes to that region are not written out to the file."
161 offset
= i_size
& (PAGE_SIZE
-1);
162 if (page
->index
== end_index
&& offset
)
163 zero_user_segment(page
, offset
, PAGE_SIZE
);
165 return __block_write_full_page(inode
, page
, get_block
, wbc
,
166 end_buffer_async_write
);
170 * __gfs2_jdata_writepage - The core of jdata writepage
171 * @page: The page to write
172 * @wbc: The writeback control
174 * This is shared between writepage and writepages and implements the
175 * core of the writepage operation. If a transaction is required then
176 * PageChecked will have been set and the transaction will have
177 * already been started before this is called.
180 static int __gfs2_jdata_writepage(struct page
*page
, struct writeback_control
*wbc
)
182 struct inode
*inode
= page
->mapping
->host
;
183 struct gfs2_inode
*ip
= GFS2_I(inode
);
184 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
186 if (PageChecked(page
)) {
187 ClearPageChecked(page
);
188 if (!page_has_buffers(page
)) {
189 create_empty_buffers(page
, inode
->i_sb
->s_blocksize
,
190 BIT(BH_Dirty
)|BIT(BH_Uptodate
));
192 gfs2_page_add_databufs(ip
, page
, 0, sdp
->sd_vfs
->s_blocksize
-1);
194 return gfs2_write_full_page(page
, gfs2_get_block_noalloc
, wbc
);
198 * gfs2_jdata_writepage - Write complete page
199 * @page: Page to write
200 * @wbc: The writeback control
206 static int gfs2_jdata_writepage(struct page
*page
, struct writeback_control
*wbc
)
208 struct inode
*inode
= page
->mapping
->host
;
209 struct gfs2_inode
*ip
= GFS2_I(inode
);
210 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
213 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(ip
->i_gl
)))
215 if (PageChecked(page
) || current
->journal_info
)
217 ret
= __gfs2_jdata_writepage(page
, wbc
);
221 redirty_page_for_writepage(wbc
, page
);
228 * gfs2_writepages - Write a bunch of dirty pages back to disk
229 * @mapping: The mapping to write
230 * @wbc: Write-back control
232 * Used for both ordered and writeback modes.
234 static int gfs2_writepages(struct address_space
*mapping
,
235 struct writeback_control
*wbc
)
237 struct gfs2_sbd
*sdp
= gfs2_mapping2sbd(mapping
);
238 int ret
= mpage_writepages(mapping
, wbc
, gfs2_get_block_noalloc
);
241 * Even if we didn't write any pages here, we might still be holding
242 * dirty pages in the ail. We forcibly flush the ail because we don't
243 * want balance_dirty_pages() to loop indefinitely trying to write out
244 * pages held in the ail that it can't find.
247 set_bit(SDF_FORCE_AIL_FLUSH
, &sdp
->sd_flags
);
253 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
254 * @mapping: The mapping
255 * @wbc: The writeback control
256 * @pvec: The vector of pages
257 * @nr_pages: The number of pages to write
259 * @done_index: Page index
261 * Returns: non-zero if loop should terminate, zero otherwise
264 static int gfs2_write_jdata_pagevec(struct address_space
*mapping
,
265 struct writeback_control
*wbc
,
266 struct pagevec
*pvec
,
267 int nr_pages
, pgoff_t end
,
270 struct inode
*inode
= mapping
->host
;
271 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
272 unsigned nrblocks
= nr_pages
* (PAGE_SIZE
/inode
->i_sb
->s_blocksize
);
276 ret
= gfs2_trans_begin(sdp
, nrblocks
, nrblocks
);
280 for(i
= 0; i
< nr_pages
; i
++) {
281 struct page
*page
= pvec
->pages
[i
];
283 *done_index
= page
->index
;
287 if (unlikely(page
->mapping
!= mapping
)) {
293 if (!PageDirty(page
)) {
294 /* someone wrote it for us */
295 goto continue_unlock
;
298 if (PageWriteback(page
)) {
299 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
300 wait_on_page_writeback(page
);
302 goto continue_unlock
;
305 BUG_ON(PageWriteback(page
));
306 if (!clear_page_dirty_for_io(page
))
307 goto continue_unlock
;
309 trace_wbc_writepage(wbc
, inode_to_bdi(inode
));
311 ret
= __gfs2_jdata_writepage(page
, wbc
);
313 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
319 * done_index is set past this page,
320 * so media errors will not choke
321 * background writeout for the entire
322 * file. This has consequences for
323 * range_cyclic semantics (ie. it may
324 * not be suitable for data integrity
327 *done_index
= page
->index
+ 1;
334 * We stop writing back only if we are not doing
335 * integrity sync. In case of integrity sync we have to
336 * keep going until we have written all the pages
337 * we tagged for writeback prior to entering this loop.
339 if (--wbc
->nr_to_write
<= 0 && wbc
->sync_mode
== WB_SYNC_NONE
) {
350 * gfs2_write_cache_jdata - Like write_cache_pages but different
351 * @mapping: The mapping to write
352 * @wbc: The writeback control
354 * The reason that we use our own function here is that we need to
355 * start transactions before we grab page locks. This allows us
356 * to get the ordering right.
359 static int gfs2_write_cache_jdata(struct address_space
*mapping
,
360 struct writeback_control
*wbc
)
366 pgoff_t
uninitialized_var(writeback_index
);
375 if (wbc
->range_cyclic
) {
376 writeback_index
= mapping
->writeback_index
; /* prev offset */
377 index
= writeback_index
;
384 index
= wbc
->range_start
>> PAGE_SHIFT
;
385 end
= wbc
->range_end
>> PAGE_SHIFT
;
386 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
388 cycled
= 1; /* ignore range_cyclic tests */
390 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
391 tag
= PAGECACHE_TAG_TOWRITE
;
393 tag
= PAGECACHE_TAG_DIRTY
;
396 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
397 tag_pages_for_writeback(mapping
, index
, end
);
399 while (!done
&& (index
<= end
)) {
400 nr_pages
= pagevec_lookup_range_tag(&pvec
, mapping
, &index
, end
,
405 ret
= gfs2_write_jdata_pagevec(mapping
, wbc
, &pvec
, nr_pages
, end
, &done_index
);
410 pagevec_release(&pvec
);
414 if (!cycled
&& !done
) {
417 * We hit the last page and there is more work to be done: wrap
418 * back to the start of the file
422 end
= writeback_index
- 1;
426 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
427 mapping
->writeback_index
= done_index
;
434 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
435 * @mapping: The mapping to write
436 * @wbc: The writeback control
440 static int gfs2_jdata_writepages(struct address_space
*mapping
,
441 struct writeback_control
*wbc
)
443 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
444 struct gfs2_sbd
*sdp
= GFS2_SB(mapping
->host
);
447 ret
= gfs2_write_cache_jdata(mapping
, wbc
);
448 if (ret
== 0 && wbc
->sync_mode
== WB_SYNC_ALL
) {
449 gfs2_log_flush(sdp
, ip
->i_gl
, NORMAL_FLUSH
);
450 ret
= gfs2_write_cache_jdata(mapping
, wbc
);
456 * stuffed_readpage - Fill in a Linux page with stuffed file data
463 static int stuffed_readpage(struct gfs2_inode
*ip
, struct page
*page
)
465 struct buffer_head
*dibh
;
466 u64 dsize
= i_size_read(&ip
->i_inode
);
471 * Due to the order of unstuffing files and ->fault(), we can be
472 * asked for a zero page in the case of a stuffed file being extended,
473 * so we need to supply one here. It doesn't happen often.
475 if (unlikely(page
->index
)) {
476 zero_user(page
, 0, PAGE_SIZE
);
477 SetPageUptodate(page
);
481 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
485 kaddr
= kmap_atomic(page
);
486 if (dsize
> (dibh
->b_size
- sizeof(struct gfs2_dinode
)))
487 dsize
= (dibh
->b_size
- sizeof(struct gfs2_dinode
));
488 memcpy(kaddr
, dibh
->b_data
+ sizeof(struct gfs2_dinode
), dsize
);
489 memset(kaddr
+ dsize
, 0, PAGE_SIZE
- dsize
);
490 kunmap_atomic(kaddr
);
491 flush_dcache_page(page
);
493 SetPageUptodate(page
);
500 * __gfs2_readpage - readpage
501 * @file: The file to read a page for
502 * @page: The page to read
504 * This is the core of gfs2's readpage. Its used by the internal file
505 * reading code as in that case we already hold the glock. Also its
506 * called by gfs2_readpage() once the required lock has been granted.
510 static int __gfs2_readpage(void *file
, struct page
*page
)
512 struct gfs2_inode
*ip
= GFS2_I(page
->mapping
->host
);
513 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
516 if (gfs2_is_stuffed(ip
)) {
517 error
= stuffed_readpage(ip
, page
);
520 error
= mpage_readpage(page
, gfs2_block_map
);
523 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
530 * gfs2_readpage - read a page of a file
531 * @file: The file to read
532 * @page: The page of the file
534 * This deals with the locking required. We have to unlock and
535 * relock the page in order to get the locking in the right
539 static int gfs2_readpage(struct file
*file
, struct page
*page
)
541 struct address_space
*mapping
= page
->mapping
;
542 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
543 struct gfs2_holder gh
;
547 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
548 error
= gfs2_glock_nq(&gh
);
551 error
= AOP_TRUNCATED_PAGE
;
553 if (page
->mapping
== mapping
&& !PageUptodate(page
))
554 error
= __gfs2_readpage(file
, page
);
559 gfs2_holder_uninit(&gh
);
560 if (error
&& error
!= AOP_TRUNCATED_PAGE
)
566 * gfs2_internal_read - read an internal file
567 * @ip: The gfs2 inode
568 * @buf: The buffer to fill
569 * @pos: The file position
570 * @size: The amount to read
574 int gfs2_internal_read(struct gfs2_inode
*ip
, char *buf
, loff_t
*pos
,
577 struct address_space
*mapping
= ip
->i_inode
.i_mapping
;
578 unsigned long index
= *pos
/ PAGE_SIZE
;
579 unsigned offset
= *pos
& (PAGE_SIZE
- 1);
587 if (offset
+ size
> PAGE_SIZE
)
588 amt
= PAGE_SIZE
- offset
;
589 page
= read_cache_page(mapping
, index
, __gfs2_readpage
, NULL
);
591 return PTR_ERR(page
);
592 p
= kmap_atomic(page
);
593 memcpy(buf
+ copied
, p
+ offset
, amt
);
599 } while(copied
< size
);
605 * gfs2_readpages - Read a bunch of pages at once
606 * @file: The file to read from
607 * @mapping: Address space info
608 * @pages: List of pages to read
609 * @nr_pages: Number of pages to read
612 * 1. This is only for readahead, so we can simply ignore any things
613 * which are slightly inconvenient (such as locking conflicts between
614 * the page lock and the glock) and return having done no I/O. Its
615 * obviously not something we'd want to do on too regular a basis.
616 * Any I/O we ignore at this time will be done via readpage later.
617 * 2. We don't handle stuffed files here we let readpage do the honours.
618 * 3. mpage_readpages() does most of the heavy lifting in the common case.
619 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
622 static int gfs2_readpages(struct file
*file
, struct address_space
*mapping
,
623 struct list_head
*pages
, unsigned nr_pages
)
625 struct inode
*inode
= mapping
->host
;
626 struct gfs2_inode
*ip
= GFS2_I(inode
);
627 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
628 struct gfs2_holder gh
;
631 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
632 ret
= gfs2_glock_nq(&gh
);
635 if (!gfs2_is_stuffed(ip
))
636 ret
= mpage_readpages(mapping
, pages
, nr_pages
, gfs2_block_map
);
639 gfs2_holder_uninit(&gh
);
640 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
646 * gfs2_write_begin - Begin to write to a file
647 * @file: The file to write to
648 * @mapping: The mapping in which to write
649 * @pos: The file offset at which to start writing
650 * @len: Length of the write
651 * @flags: Various flags
652 * @pagep: Pointer to return the page
653 * @fsdata: Pointer to return fs data (unused by GFS2)
658 static int gfs2_write_begin(struct file
*file
, struct address_space
*mapping
,
659 loff_t pos
, unsigned len
, unsigned flags
,
660 struct page
**pagep
, void **fsdata
)
662 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
663 struct gfs2_sbd
*sdp
= GFS2_SB(mapping
->host
);
664 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
665 unsigned int data_blocks
= 0, ind_blocks
= 0, rblocks
;
666 unsigned requested
= 0;
669 pgoff_t index
= pos
>> PAGE_SHIFT
;
670 unsigned from
= pos
& (PAGE_SIZE
- 1);
673 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &ip
->i_gh
);
674 error
= gfs2_glock_nq(&ip
->i_gh
);
677 if (&ip
->i_inode
== sdp
->sd_rindex
) {
678 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
,
679 GL_NOCACHE
, &m_ip
->i_gh
);
680 if (unlikely(error
)) {
681 gfs2_glock_dq(&ip
->i_gh
);
686 alloc_required
= gfs2_write_alloc_required(ip
, pos
, len
);
688 if (alloc_required
|| gfs2_is_jdata(ip
))
689 gfs2_write_calc_reserv(ip
, len
, &data_blocks
, &ind_blocks
);
691 if (alloc_required
) {
692 struct gfs2_alloc_parms ap
= { .aflags
= 0, };
693 requested
= data_blocks
+ ind_blocks
;
694 ap
.target
= requested
;
695 error
= gfs2_quota_lock_check(ip
, &ap
);
699 error
= gfs2_inplace_reserve(ip
, &ap
);
704 rblocks
= RES_DINODE
+ ind_blocks
;
705 if (gfs2_is_jdata(ip
))
706 rblocks
+= data_blocks
? data_blocks
: 1;
707 if (ind_blocks
|| data_blocks
)
708 rblocks
+= RES_STATFS
+ RES_QUOTA
;
709 if (&ip
->i_inode
== sdp
->sd_rindex
)
710 rblocks
+= 2 * RES_STATFS
;
712 rblocks
+= gfs2_rg_blocks(ip
, requested
);
714 error
= gfs2_trans_begin(sdp
, rblocks
,
715 PAGE_SIZE
/sdp
->sd_sb
.sb_bsize
);
720 flags
|= AOP_FLAG_NOFS
;
721 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
726 if (gfs2_is_stuffed(ip
)) {
728 if (pos
+ len
> sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_dinode
)) {
729 error
= gfs2_unstuff_dinode(ip
, page
);
732 } else if (!PageUptodate(page
)) {
733 error
= stuffed_readpage(ip
, page
);
739 error
= __block_write_begin(page
, from
, len
, gfs2_block_map
);
748 if (pos
+ len
> ip
->i_inode
.i_size
)
749 gfs2_trim_blocks(&ip
->i_inode
);
755 if (alloc_required
) {
756 gfs2_inplace_release(ip
);
758 gfs2_quota_unlock(ip
);
761 if (&ip
->i_inode
== sdp
->sd_rindex
) {
762 gfs2_glock_dq(&m_ip
->i_gh
);
763 gfs2_holder_uninit(&m_ip
->i_gh
);
765 gfs2_glock_dq(&ip
->i_gh
);
767 gfs2_holder_uninit(&ip
->i_gh
);
772 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
773 * @inode: the rindex inode
775 static void adjust_fs_space(struct inode
*inode
)
777 struct gfs2_sbd
*sdp
= inode
->i_sb
->s_fs_info
;
778 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
779 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
780 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
781 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
782 struct buffer_head
*m_bh
, *l_bh
;
783 u64 fs_total
, new_free
;
785 /* Total up the file system space, according to the latest rindex. */
786 fs_total
= gfs2_ri_total(sdp
);
787 if (gfs2_meta_inode_buffer(m_ip
, &m_bh
) != 0)
790 spin_lock(&sdp
->sd_statfs_spin
);
791 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
792 sizeof(struct gfs2_dinode
));
793 if (fs_total
> (m_sc
->sc_total
+ l_sc
->sc_total
))
794 new_free
= fs_total
- (m_sc
->sc_total
+ l_sc
->sc_total
);
797 spin_unlock(&sdp
->sd_statfs_spin
);
798 fs_warn(sdp
, "File system extended by %llu blocks.\n",
799 (unsigned long long)new_free
);
800 gfs2_statfs_change(sdp
, new_free
, new_free
, 0);
802 if (gfs2_meta_inode_buffer(l_ip
, &l_bh
) != 0)
804 update_statfs(sdp
, m_bh
, l_bh
);
811 * gfs2_stuffed_write_end - Write end for stuffed files
813 * @dibh: The buffer_head containing the on-disk inode
814 * @pos: The file position
815 * @len: The length of the write
816 * @copied: How much was actually copied by the VFS
819 * This copies the data from the page into the inode block after
820 * the inode data structure itself.
824 static int gfs2_stuffed_write_end(struct inode
*inode
, struct buffer_head
*dibh
,
825 loff_t pos
, unsigned len
, unsigned copied
,
828 struct gfs2_inode
*ip
= GFS2_I(inode
);
829 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
830 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
831 u64 to
= pos
+ copied
;
833 unsigned char *buf
= dibh
->b_data
+ sizeof(struct gfs2_dinode
);
835 BUG_ON((pos
+ len
) > (dibh
->b_size
- sizeof(struct gfs2_dinode
)));
836 kaddr
= kmap_atomic(page
);
837 memcpy(buf
+ pos
, kaddr
+ pos
, copied
);
838 flush_dcache_page(page
);
839 kunmap_atomic(kaddr
);
841 WARN_ON(!PageUptodate(page
));
846 if (inode
->i_size
< to
)
847 i_size_write(inode
, to
);
848 mark_inode_dirty(inode
);
851 if (inode
== sdp
->sd_rindex
) {
852 adjust_fs_space(inode
);
853 sdp
->sd_rindex_uptodate
= 0;
858 if (inode
== sdp
->sd_rindex
) {
859 gfs2_glock_dq(&m_ip
->i_gh
);
860 gfs2_holder_uninit(&m_ip
->i_gh
);
862 gfs2_glock_dq(&ip
->i_gh
);
863 gfs2_holder_uninit(&ip
->i_gh
);
869 * @file: The file to write to
870 * @mapping: The address space to write to
871 * @pos: The file position
872 * @len: The length of the data
873 * @copied: How much was actually copied by the VFS
874 * @page: The page that has been written
875 * @fsdata: The fsdata (unused in GFS2)
877 * The main write_end function for GFS2. We have a separate one for
878 * stuffed files as they are slightly different, otherwise we just
879 * put our locking around the VFS provided functions.
884 static int gfs2_write_end(struct file
*file
, struct address_space
*mapping
,
885 loff_t pos
, unsigned len
, unsigned copied
,
886 struct page
*page
, void *fsdata
)
888 struct inode
*inode
= page
->mapping
->host
;
889 struct gfs2_inode
*ip
= GFS2_I(inode
);
890 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
891 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
892 struct buffer_head
*dibh
;
893 unsigned int from
= pos
& (PAGE_SIZE
- 1);
894 unsigned int to
= from
+ len
;
896 struct gfs2_trans
*tr
= current
->journal_info
;
899 BUG_ON(gfs2_glock_is_locked_by_me(ip
->i_gl
) == NULL
);
901 ret
= gfs2_meta_inode_buffer(ip
, &dibh
);
908 if (gfs2_is_stuffed(ip
))
909 return gfs2_stuffed_write_end(inode
, dibh
, pos
, len
, copied
, page
);
911 if (!gfs2_is_writeback(ip
))
912 gfs2_page_add_databufs(ip
, page
, from
, to
);
914 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
915 if (tr
->tr_num_buf_new
)
916 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
918 gfs2_trans_add_meta(ip
->i_gl
, dibh
);
921 if (inode
== sdp
->sd_rindex
) {
922 adjust_fs_space(inode
);
923 sdp
->sd_rindex_uptodate
= 0;
929 gfs2_inplace_release(ip
);
930 if (ip
->i_qadata
&& ip
->i_qadata
->qa_qd_num
)
931 gfs2_quota_unlock(ip
);
932 if (inode
== sdp
->sd_rindex
) {
933 gfs2_glock_dq(&m_ip
->i_gh
);
934 gfs2_holder_uninit(&m_ip
->i_gh
);
936 gfs2_glock_dq(&ip
->i_gh
);
937 gfs2_holder_uninit(&ip
->i_gh
);
942 * gfs2_set_page_dirty - Page dirtying function
943 * @page: The page to dirty
945 * Returns: 1 if it dirtyed the page, or 0 otherwise
948 static int gfs2_set_page_dirty(struct page
*page
)
950 SetPageChecked(page
);
951 return __set_page_dirty_buffers(page
);
955 * gfs2_bmap - Block map function
956 * @mapping: Address space info
957 * @lblock: The block to map
959 * Returns: The disk address for the block or 0 on hole or error
962 static sector_t
gfs2_bmap(struct address_space
*mapping
, sector_t lblock
)
964 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
965 struct gfs2_holder i_gh
;
969 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
973 if (!gfs2_is_stuffed(ip
))
974 dblock
= generic_block_bmap(mapping
, lblock
, gfs2_block_map
);
976 gfs2_glock_dq_uninit(&i_gh
);
981 static void gfs2_discard(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
983 struct gfs2_bufdata
*bd
;
987 clear_buffer_dirty(bh
);
990 if (!list_empty(&bd
->bd_list
) && !buffer_pinned(bh
))
991 list_del_init(&bd
->bd_list
);
993 gfs2_remove_from_journal(bh
, REMOVE_JDATA
);
996 clear_buffer_mapped(bh
);
997 clear_buffer_req(bh
);
998 clear_buffer_new(bh
);
999 gfs2_log_unlock(sdp
);
1003 static void gfs2_invalidatepage(struct page
*page
, unsigned int offset
,
1004 unsigned int length
)
1006 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
1007 unsigned int stop
= offset
+ length
;
1008 int partial_page
= (offset
|| length
< PAGE_SIZE
);
1009 struct buffer_head
*bh
, *head
;
1010 unsigned long pos
= 0;
1012 BUG_ON(!PageLocked(page
));
1014 ClearPageChecked(page
);
1015 if (!page_has_buffers(page
))
1018 bh
= head
= page_buffers(page
);
1020 if (pos
+ bh
->b_size
> stop
)
1024 gfs2_discard(sdp
, bh
);
1026 bh
= bh
->b_this_page
;
1027 } while (bh
!= head
);
1030 try_to_release_page(page
, 0);
1034 * gfs2_ok_for_dio - check that dio is valid on this file
1036 * @offset: The offset at which we are reading or writing
1038 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
1039 * 1 (to accept the i/o request)
1041 static int gfs2_ok_for_dio(struct gfs2_inode
*ip
, loff_t offset
)
1044 * Should we return an error here? I can't see that O_DIRECT for
1045 * a stuffed file makes any sense. For now we'll silently fall
1046 * back to buffered I/O
1048 if (gfs2_is_stuffed(ip
))
1051 if (offset
>= i_size_read(&ip
->i_inode
))
1058 static ssize_t
gfs2_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
1060 struct file
*file
= iocb
->ki_filp
;
1061 struct inode
*inode
= file
->f_mapping
->host
;
1062 struct address_space
*mapping
= inode
->i_mapping
;
1063 struct gfs2_inode
*ip
= GFS2_I(inode
);
1064 loff_t offset
= iocb
->ki_pos
;
1065 struct gfs2_holder gh
;
1069 * Deferred lock, even if its a write, since we do no allocation
1070 * on this path. All we need change is atime, and this lock mode
1071 * ensures that other nodes have flushed their buffered read caches
1072 * (i.e. their page cache entries for this inode). We do not,
1073 * unfortunately have the option of only flushing a range like
1076 gfs2_holder_init(ip
->i_gl
, LM_ST_DEFERRED
, 0, &gh
);
1077 rv
= gfs2_glock_nq(&gh
);
1080 rv
= gfs2_ok_for_dio(ip
, offset
);
1082 goto out
; /* dio not valid, fall back to buffered i/o */
1085 * Now since we are holding a deferred (CW) lock at this point, you
1086 * might be wondering why this is ever needed. There is a case however
1087 * where we've granted a deferred local lock against a cached exclusive
1088 * glock. That is ok provided all granted local locks are deferred, but
1089 * it also means that it is possible to encounter pages which are
1090 * cached and possibly also mapped. So here we check for that and sort
1091 * them out ahead of the dio. The glock state machine will take care of
1094 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
1095 * the first place, mapping->nr_pages will always be zero.
1097 if (mapping
->nrpages
) {
1098 loff_t lstart
= offset
& ~(PAGE_SIZE
- 1);
1099 loff_t len
= iov_iter_count(iter
);
1100 loff_t end
= PAGE_ALIGN(offset
+ len
) - 1;
1105 if (test_and_clear_bit(GIF_SW_PAGED
, &ip
->i_flags
))
1106 unmap_shared_mapping_range(ip
->i_inode
.i_mapping
, offset
, len
);
1107 rv
= filemap_write_and_wait_range(mapping
, lstart
, end
);
1110 if (iov_iter_rw(iter
) == WRITE
)
1111 truncate_inode_pages_range(mapping
, lstart
, end
);
1114 rv
= __blockdev_direct_IO(iocb
, inode
, inode
->i_sb
->s_bdev
, iter
,
1115 gfs2_get_block_direct
, NULL
, NULL
, 0);
1119 gfs2_holder_uninit(&gh
);
1124 * gfs2_releasepage - free the metadata associated with a page
1125 * @page: the page that's being released
1126 * @gfp_mask: passed from Linux VFS, ignored by us
1128 * Call try_to_free_buffers() if the buffers in this page can be
1134 int gfs2_releasepage(struct page
*page
, gfp_t gfp_mask
)
1136 struct address_space
*mapping
= page
->mapping
;
1137 struct gfs2_sbd
*sdp
= gfs2_mapping2sbd(mapping
);
1138 struct buffer_head
*bh
, *head
;
1139 struct gfs2_bufdata
*bd
;
1141 if (!page_has_buffers(page
))
1145 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
1146 * clean pages might not have had the dirty bit cleared. Thus, it can
1147 * send actual dirty pages to ->releasepage() via shrink_active_list().
1149 * As a workaround, we skip pages that contain dirty buffers below.
1150 * Once ->releasepage isn't called on dirty pages anymore, we can warn
1151 * on dirty buffers like we used to here again.
1155 spin_lock(&sdp
->sd_ail_lock
);
1156 head
= bh
= page_buffers(page
);
1158 if (atomic_read(&bh
->b_count
))
1159 goto cannot_release
;
1161 if (bd
&& bd
->bd_tr
)
1162 goto cannot_release
;
1163 if (buffer_dirty(bh
) || WARN_ON(buffer_pinned(bh
)))
1164 goto cannot_release
;
1165 bh
= bh
->b_this_page
;
1166 } while(bh
!= head
);
1167 spin_unlock(&sdp
->sd_ail_lock
);
1169 head
= bh
= page_buffers(page
);
1173 gfs2_assert_warn(sdp
, bd
->bd_bh
== bh
);
1174 if (!list_empty(&bd
->bd_list
))
1175 list_del_init(&bd
->bd_list
);
1177 bh
->b_private
= NULL
;
1178 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
1181 bh
= bh
->b_this_page
;
1182 } while (bh
!= head
);
1183 gfs2_log_unlock(sdp
);
1185 return try_to_free_buffers(page
);
1188 spin_unlock(&sdp
->sd_ail_lock
);
1189 gfs2_log_unlock(sdp
);
1193 static const struct address_space_operations gfs2_writeback_aops
= {
1194 .writepage
= gfs2_writepage
,
1195 .writepages
= gfs2_writepages
,
1196 .readpage
= gfs2_readpage
,
1197 .readpages
= gfs2_readpages
,
1198 .write_begin
= gfs2_write_begin
,
1199 .write_end
= gfs2_write_end
,
1201 .invalidatepage
= gfs2_invalidatepage
,
1202 .releasepage
= gfs2_releasepage
,
1203 .direct_IO
= gfs2_direct_IO
,
1204 .migratepage
= buffer_migrate_page
,
1205 .is_partially_uptodate
= block_is_partially_uptodate
,
1206 .error_remove_page
= generic_error_remove_page
,
1209 static const struct address_space_operations gfs2_ordered_aops
= {
1210 .writepage
= gfs2_writepage
,
1211 .writepages
= gfs2_writepages
,
1212 .readpage
= gfs2_readpage
,
1213 .readpages
= gfs2_readpages
,
1214 .write_begin
= gfs2_write_begin
,
1215 .write_end
= gfs2_write_end
,
1216 .set_page_dirty
= gfs2_set_page_dirty
,
1218 .invalidatepage
= gfs2_invalidatepage
,
1219 .releasepage
= gfs2_releasepage
,
1220 .direct_IO
= gfs2_direct_IO
,
1221 .migratepage
= buffer_migrate_page
,
1222 .is_partially_uptodate
= block_is_partially_uptodate
,
1223 .error_remove_page
= generic_error_remove_page
,
1226 static const struct address_space_operations gfs2_jdata_aops
= {
1227 .writepage
= gfs2_jdata_writepage
,
1228 .writepages
= gfs2_jdata_writepages
,
1229 .readpage
= gfs2_readpage
,
1230 .readpages
= gfs2_readpages
,
1231 .write_begin
= gfs2_write_begin
,
1232 .write_end
= gfs2_write_end
,
1233 .set_page_dirty
= gfs2_set_page_dirty
,
1235 .invalidatepage
= gfs2_invalidatepage
,
1236 .releasepage
= gfs2_releasepage
,
1237 .is_partially_uptodate
= block_is_partially_uptodate
,
1238 .error_remove_page
= generic_error_remove_page
,
1241 void gfs2_set_aops(struct inode
*inode
)
1243 struct gfs2_inode
*ip
= GFS2_I(inode
);
1245 if (gfs2_is_writeback(ip
))
1246 inode
->i_mapping
->a_ops
= &gfs2_writeback_aops
;
1247 else if (gfs2_is_ordered(ip
))
1248 inode
->i_mapping
->a_ops
= &gfs2_ordered_aops
;
1249 else if (gfs2_is_jdata(ip
))
1250 inode
->i_mapping
->a_ops
= &gfs2_jdata_aops
;