2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/backing-dev.h>
23 #include <linux/aio.h>
40 static void gfs2_page_add_databufs(struct gfs2_inode
*ip
, struct page
*page
,
41 unsigned int from
, unsigned int to
)
43 struct buffer_head
*head
= page_buffers(page
);
44 unsigned int bsize
= head
->b_size
;
45 struct buffer_head
*bh
;
46 unsigned int start
, end
;
48 for (bh
= head
, start
= 0; bh
!= head
|| !start
;
49 bh
= bh
->b_this_page
, start
= end
) {
51 if (end
<= from
|| start
>= to
)
53 if (gfs2_is_jdata(ip
))
54 set_buffer_uptodate(bh
);
55 gfs2_trans_add_data(ip
->i_gl
, bh
);
60 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
62 * @lblock: The block number to look up
63 * @bh_result: The buffer head to return the result in
64 * @create: Non-zero if we may add block to the file
69 static int gfs2_get_block_noalloc(struct inode
*inode
, sector_t lblock
,
70 struct buffer_head
*bh_result
, int create
)
74 error
= gfs2_block_map(inode
, lblock
, bh_result
, 0);
77 if (!buffer_mapped(bh_result
))
82 static int gfs2_get_block_direct(struct inode
*inode
, sector_t lblock
,
83 struct buffer_head
*bh_result
, int create
)
85 return gfs2_block_map(inode
, lblock
, bh_result
, 0);
89 * gfs2_writepage_common - Common bits of writepage
90 * @page: The page to be written
91 * @wbc: The writeback control
93 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
96 static int gfs2_writepage_common(struct page
*page
,
97 struct writeback_control
*wbc
)
99 struct inode
*inode
= page
->mapping
->host
;
100 struct gfs2_inode
*ip
= GFS2_I(inode
);
101 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
102 loff_t i_size
= i_size_read(inode
);
103 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
106 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(ip
->i_gl
)))
108 if (current
->journal_info
)
110 /* Is the page fully outside i_size? (truncate in progress) */
111 offset
= i_size
& (PAGE_CACHE_SIZE
-1);
112 if (page
->index
> end_index
|| (page
->index
== end_index
&& !offset
)) {
113 page
->mapping
->a_ops
->invalidatepage(page
, 0, PAGE_CACHE_SIZE
);
118 redirty_page_for_writepage(wbc
, page
);
125 * gfs2_writepage - Write page for writeback mappings
127 * @wbc: The writeback control
131 static int gfs2_writepage(struct page
*page
, struct writeback_control
*wbc
)
135 ret
= gfs2_writepage_common(page
, wbc
);
139 return nobh_writepage(page
, gfs2_get_block_noalloc
, wbc
);
143 * __gfs2_jdata_writepage - The core of jdata writepage
144 * @page: The page to write
145 * @wbc: The writeback control
147 * This is shared between writepage and writepages and implements the
148 * core of the writepage operation. If a transaction is required then
149 * PageChecked will have been set and the transaction will have
150 * already been started before this is called.
153 static int __gfs2_jdata_writepage(struct page
*page
, struct writeback_control
*wbc
)
155 struct inode
*inode
= page
->mapping
->host
;
156 struct gfs2_inode
*ip
= GFS2_I(inode
);
157 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
159 if (PageChecked(page
)) {
160 ClearPageChecked(page
);
161 if (!page_has_buffers(page
)) {
162 create_empty_buffers(page
, inode
->i_sb
->s_blocksize
,
163 (1 << BH_Dirty
)|(1 << BH_Uptodate
));
165 gfs2_page_add_databufs(ip
, page
, 0, sdp
->sd_vfs
->s_blocksize
-1);
167 return block_write_full_page(page
, gfs2_get_block_noalloc
, wbc
);
171 * gfs2_jdata_writepage - Write complete page
172 * @page: Page to write
178 static int gfs2_jdata_writepage(struct page
*page
, struct writeback_control
*wbc
)
180 struct inode
*inode
= page
->mapping
->host
;
181 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
185 if (PageChecked(page
)) {
186 if (wbc
->sync_mode
!= WB_SYNC_ALL
)
188 ret
= gfs2_trans_begin(sdp
, RES_DINODE
+ 1, 0);
193 ret
= gfs2_writepage_common(page
, wbc
);
195 ret
= __gfs2_jdata_writepage(page
, wbc
);
201 redirty_page_for_writepage(wbc
, page
);
207 * gfs2_writepages - Write a bunch of dirty pages back to disk
208 * @mapping: The mapping to write
209 * @wbc: Write-back control
211 * Used for both ordered and writeback modes.
213 static int gfs2_writepages(struct address_space
*mapping
,
214 struct writeback_control
*wbc
)
216 return mpage_writepages(mapping
, wbc
, gfs2_get_block_noalloc
);
220 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
221 * @mapping: The mapping
222 * @wbc: The writeback control
223 * @writepage: The writepage function to call for each page
224 * @pvec: The vector of pages
225 * @nr_pages: The number of pages to write
227 * Returns: non-zero if loop should terminate, zero otherwise
230 static int gfs2_write_jdata_pagevec(struct address_space
*mapping
,
231 struct writeback_control
*wbc
,
232 struct pagevec
*pvec
,
233 int nr_pages
, pgoff_t end
)
235 struct inode
*inode
= mapping
->host
;
236 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
237 loff_t i_size
= i_size_read(inode
);
238 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
239 unsigned offset
= i_size
& (PAGE_CACHE_SIZE
-1);
240 unsigned nrblocks
= nr_pages
* (PAGE_CACHE_SIZE
/inode
->i_sb
->s_blocksize
);
244 ret
= gfs2_trans_begin(sdp
, nrblocks
, nrblocks
);
248 for(i
= 0; i
< nr_pages
; i
++) {
249 struct page
*page
= pvec
->pages
[i
];
253 if (unlikely(page
->mapping
!= mapping
)) {
258 if (!wbc
->range_cyclic
&& page
->index
> end
) {
264 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
265 wait_on_page_writeback(page
);
267 if (PageWriteback(page
) ||
268 !clear_page_dirty_for_io(page
)) {
273 /* Is the page fully outside i_size? (truncate in progress) */
274 if (page
->index
> end_index
|| (page
->index
== end_index
&& !offset
)) {
275 page
->mapping
->a_ops
->invalidatepage(page
, 0,
281 ret
= __gfs2_jdata_writepage(page
, wbc
);
283 if (ret
|| (--(wbc
->nr_to_write
) <= 0))
291 * gfs2_write_cache_jdata - Like write_cache_pages but different
292 * @mapping: The mapping to write
293 * @wbc: The writeback control
294 * @writepage: The writepage function to call
295 * @data: The data to pass to writepage
297 * The reason that we use our own function here is that we need to
298 * start transactions before we grab page locks. This allows us
299 * to get the ordering right.
302 static int gfs2_write_cache_jdata(struct address_space
*mapping
,
303 struct writeback_control
*wbc
)
314 pagevec_init(&pvec
, 0);
315 if (wbc
->range_cyclic
) {
316 index
= mapping
->writeback_index
; /* Start from prev offset */
319 index
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
320 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
321 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
327 while (!done
&& (index
<= end
) &&
328 (nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
330 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1))) {
332 ret
= gfs2_write_jdata_pagevec(mapping
, wbc
, &pvec
, nr_pages
, end
);
338 pagevec_release(&pvec
);
342 if (!scanned
&& !done
) {
344 * We hit the last page and there is more work to be done: wrap
345 * back to the start of the file
352 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
353 mapping
->writeback_index
= index
;
359 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
360 * @mapping: The mapping to write
361 * @wbc: The writeback control
365 static int gfs2_jdata_writepages(struct address_space
*mapping
,
366 struct writeback_control
*wbc
)
368 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
369 struct gfs2_sbd
*sdp
= GFS2_SB(mapping
->host
);
372 ret
= gfs2_write_cache_jdata(mapping
, wbc
);
373 if (ret
== 0 && wbc
->sync_mode
== WB_SYNC_ALL
) {
374 gfs2_log_flush(sdp
, ip
->i_gl
);
375 ret
= gfs2_write_cache_jdata(mapping
, wbc
);
381 * stuffed_readpage - Fill in a Linux page with stuffed file data
388 static int stuffed_readpage(struct gfs2_inode
*ip
, struct page
*page
)
390 struct buffer_head
*dibh
;
391 u64 dsize
= i_size_read(&ip
->i_inode
);
396 * Due to the order of unstuffing files and ->fault(), we can be
397 * asked for a zero page in the case of a stuffed file being extended,
398 * so we need to supply one here. It doesn't happen often.
400 if (unlikely(page
->index
)) {
401 zero_user(page
, 0, PAGE_CACHE_SIZE
);
402 SetPageUptodate(page
);
406 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
410 kaddr
= kmap_atomic(page
);
411 if (dsize
> (dibh
->b_size
- sizeof(struct gfs2_dinode
)))
412 dsize
= (dibh
->b_size
- sizeof(struct gfs2_dinode
));
413 memcpy(kaddr
, dibh
->b_data
+ sizeof(struct gfs2_dinode
), dsize
);
414 memset(kaddr
+ dsize
, 0, PAGE_CACHE_SIZE
- dsize
);
415 kunmap_atomic(kaddr
);
416 flush_dcache_page(page
);
418 SetPageUptodate(page
);
425 * __gfs2_readpage - readpage
426 * @file: The file to read a page for
427 * @page: The page to read
429 * This is the core of gfs2's readpage. Its used by the internal file
430 * reading code as in that case we already hold the glock. Also its
431 * called by gfs2_readpage() once the required lock has been granted.
435 static int __gfs2_readpage(void *file
, struct page
*page
)
437 struct gfs2_inode
*ip
= GFS2_I(page
->mapping
->host
);
438 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
441 if (gfs2_is_stuffed(ip
)) {
442 error
= stuffed_readpage(ip
, page
);
445 error
= mpage_readpage(page
, gfs2_block_map
);
448 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
455 * gfs2_readpage - read a page of a file
456 * @file: The file to read
457 * @page: The page of the file
459 * This deals with the locking required. We have to unlock and
460 * relock the page in order to get the locking in the right
464 static int gfs2_readpage(struct file
*file
, struct page
*page
)
466 struct address_space
*mapping
= page
->mapping
;
467 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
468 struct gfs2_holder gh
;
472 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
473 error
= gfs2_glock_nq(&gh
);
476 error
= AOP_TRUNCATED_PAGE
;
478 if (page
->mapping
== mapping
&& !PageUptodate(page
))
479 error
= __gfs2_readpage(file
, page
);
484 gfs2_holder_uninit(&gh
);
485 if (error
&& error
!= AOP_TRUNCATED_PAGE
)
491 * gfs2_internal_read - read an internal file
492 * @ip: The gfs2 inode
493 * @buf: The buffer to fill
494 * @pos: The file position
495 * @size: The amount to read
499 int gfs2_internal_read(struct gfs2_inode
*ip
, char *buf
, loff_t
*pos
,
502 struct address_space
*mapping
= ip
->i_inode
.i_mapping
;
503 unsigned long index
= *pos
/ PAGE_CACHE_SIZE
;
504 unsigned offset
= *pos
& (PAGE_CACHE_SIZE
- 1);
512 if (offset
+ size
> PAGE_CACHE_SIZE
)
513 amt
= PAGE_CACHE_SIZE
- offset
;
514 page
= read_cache_page(mapping
, index
, __gfs2_readpage
, NULL
);
516 return PTR_ERR(page
);
517 p
= kmap_atomic(page
);
518 memcpy(buf
+ copied
, p
+ offset
, amt
);
520 page_cache_release(page
);
524 } while(copied
< size
);
530 * gfs2_readpages - Read a bunch of pages at once
533 * 1. This is only for readahead, so we can simply ignore any things
534 * which are slightly inconvenient (such as locking conflicts between
535 * the page lock and the glock) and return having done no I/O. Its
536 * obviously not something we'd want to do on too regular a basis.
537 * Any I/O we ignore at this time will be done via readpage later.
538 * 2. We don't handle stuffed files here we let readpage do the honours.
539 * 3. mpage_readpages() does most of the heavy lifting in the common case.
540 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
543 static int gfs2_readpages(struct file
*file
, struct address_space
*mapping
,
544 struct list_head
*pages
, unsigned nr_pages
)
546 struct inode
*inode
= mapping
->host
;
547 struct gfs2_inode
*ip
= GFS2_I(inode
);
548 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
549 struct gfs2_holder gh
;
552 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
553 ret
= gfs2_glock_nq(&gh
);
556 if (!gfs2_is_stuffed(ip
))
557 ret
= mpage_readpages(mapping
, pages
, nr_pages
, gfs2_block_map
);
560 gfs2_holder_uninit(&gh
);
561 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
567 * gfs2_write_begin - Begin to write to a file
568 * @file: The file to write to
569 * @mapping: The mapping in which to write
570 * @pos: The file offset at which to start writing
571 * @len: Length of the write
572 * @flags: Various flags
573 * @pagep: Pointer to return the page
574 * @fsdata: Pointer to return fs data (unused by GFS2)
579 static int gfs2_write_begin(struct file
*file
, struct address_space
*mapping
,
580 loff_t pos
, unsigned len
, unsigned flags
,
581 struct page
**pagep
, void **fsdata
)
583 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
584 struct gfs2_sbd
*sdp
= GFS2_SB(mapping
->host
);
585 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
586 unsigned int data_blocks
= 0, ind_blocks
= 0, rblocks
;
587 unsigned requested
= 0;
590 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
591 unsigned from
= pos
& (PAGE_CACHE_SIZE
- 1);
594 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &ip
->i_gh
);
595 error
= gfs2_glock_nq(&ip
->i_gh
);
598 if (&ip
->i_inode
== sdp
->sd_rindex
) {
599 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
,
600 GL_NOCACHE
, &m_ip
->i_gh
);
601 if (unlikely(error
)) {
602 gfs2_glock_dq(&ip
->i_gh
);
607 alloc_required
= gfs2_write_alloc_required(ip
, pos
, len
);
609 if (alloc_required
|| gfs2_is_jdata(ip
))
610 gfs2_write_calc_reserv(ip
, len
, &data_blocks
, &ind_blocks
);
612 if (alloc_required
) {
613 error
= gfs2_quota_lock_check(ip
);
617 requested
= data_blocks
+ ind_blocks
;
618 error
= gfs2_inplace_reserve(ip
, requested
, 0);
623 rblocks
= RES_DINODE
+ ind_blocks
;
624 if (gfs2_is_jdata(ip
))
625 rblocks
+= data_blocks
? data_blocks
: 1;
626 if (ind_blocks
|| data_blocks
)
627 rblocks
+= RES_STATFS
+ RES_QUOTA
;
628 if (&ip
->i_inode
== sdp
->sd_rindex
)
629 rblocks
+= 2 * RES_STATFS
;
631 rblocks
+= gfs2_rg_blocks(ip
, requested
);
633 error
= gfs2_trans_begin(sdp
, rblocks
,
634 PAGE_CACHE_SIZE
/sdp
->sd_sb
.sb_bsize
);
639 flags
|= AOP_FLAG_NOFS
;
640 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
645 if (gfs2_is_stuffed(ip
)) {
647 if (pos
+ len
> sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_dinode
)) {
648 error
= gfs2_unstuff_dinode(ip
, page
);
651 } else if (!PageUptodate(page
)) {
652 error
= stuffed_readpage(ip
, page
);
658 error
= __block_write_begin(page
, from
, len
, gfs2_block_map
);
664 page_cache_release(page
);
667 if (pos
+ len
> ip
->i_inode
.i_size
)
668 gfs2_trim_blocks(&ip
->i_inode
);
674 if (alloc_required
) {
675 gfs2_inplace_release(ip
);
677 gfs2_quota_unlock(ip
);
680 if (&ip
->i_inode
== sdp
->sd_rindex
) {
681 gfs2_glock_dq(&m_ip
->i_gh
);
682 gfs2_holder_uninit(&m_ip
->i_gh
);
684 gfs2_glock_dq(&ip
->i_gh
);
686 gfs2_holder_uninit(&ip
->i_gh
);
691 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
692 * @inode: the rindex inode
694 static void adjust_fs_space(struct inode
*inode
)
696 struct gfs2_sbd
*sdp
= inode
->i_sb
->s_fs_info
;
697 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
698 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
699 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
700 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
701 struct buffer_head
*m_bh
, *l_bh
;
702 u64 fs_total
, new_free
;
704 /* Total up the file system space, according to the latest rindex. */
705 fs_total
= gfs2_ri_total(sdp
);
706 if (gfs2_meta_inode_buffer(m_ip
, &m_bh
) != 0)
709 spin_lock(&sdp
->sd_statfs_spin
);
710 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
711 sizeof(struct gfs2_dinode
));
712 if (fs_total
> (m_sc
->sc_total
+ l_sc
->sc_total
))
713 new_free
= fs_total
- (m_sc
->sc_total
+ l_sc
->sc_total
);
716 spin_unlock(&sdp
->sd_statfs_spin
);
717 fs_warn(sdp
, "File system extended by %llu blocks.\n",
718 (unsigned long long)new_free
);
719 gfs2_statfs_change(sdp
, new_free
, new_free
, 0);
721 if (gfs2_meta_inode_buffer(l_ip
, &l_bh
) != 0)
723 update_statfs(sdp
, m_bh
, l_bh
);
730 * gfs2_stuffed_write_end - Write end for stuffed files
732 * @dibh: The buffer_head containing the on-disk inode
733 * @pos: The file position
734 * @len: The length of the write
735 * @copied: How much was actually copied by the VFS
738 * This copies the data from the page into the inode block after
739 * the inode data structure itself.
743 static int gfs2_stuffed_write_end(struct inode
*inode
, struct buffer_head
*dibh
,
744 loff_t pos
, unsigned len
, unsigned copied
,
747 struct gfs2_inode
*ip
= GFS2_I(inode
);
748 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
749 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
750 u64 to
= pos
+ copied
;
752 unsigned char *buf
= dibh
->b_data
+ sizeof(struct gfs2_dinode
);
754 BUG_ON((pos
+ len
) > (dibh
->b_size
- sizeof(struct gfs2_dinode
)));
755 kaddr
= kmap_atomic(page
);
756 memcpy(buf
+ pos
, kaddr
+ pos
, copied
);
757 memset(kaddr
+ pos
+ copied
, 0, len
- copied
);
758 flush_dcache_page(page
);
759 kunmap_atomic(kaddr
);
761 if (!PageUptodate(page
))
762 SetPageUptodate(page
);
764 page_cache_release(page
);
767 if (inode
->i_size
< to
)
768 i_size_write(inode
, to
);
769 mark_inode_dirty(inode
);
772 if (inode
== sdp
->sd_rindex
) {
773 adjust_fs_space(inode
);
774 sdp
->sd_rindex_uptodate
= 0;
779 if (inode
== sdp
->sd_rindex
) {
780 gfs2_glock_dq(&m_ip
->i_gh
);
781 gfs2_holder_uninit(&m_ip
->i_gh
);
783 gfs2_glock_dq(&ip
->i_gh
);
784 gfs2_holder_uninit(&ip
->i_gh
);
790 * @file: The file to write to
791 * @mapping: The address space to write to
792 * @pos: The file position
793 * @len: The length of the data
795 * @page: The page that has been written
796 * @fsdata: The fsdata (unused in GFS2)
798 * The main write_end function for GFS2. We have a separate one for
799 * stuffed files as they are slightly different, otherwise we just
800 * put our locking around the VFS provided functions.
805 static int gfs2_write_end(struct file
*file
, struct address_space
*mapping
,
806 loff_t pos
, unsigned len
, unsigned copied
,
807 struct page
*page
, void *fsdata
)
809 struct inode
*inode
= page
->mapping
->host
;
810 struct gfs2_inode
*ip
= GFS2_I(inode
);
811 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
812 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
813 struct buffer_head
*dibh
;
814 unsigned int from
= pos
& (PAGE_CACHE_SIZE
- 1);
815 unsigned int to
= from
+ len
;
817 struct gfs2_trans
*tr
= current
->journal_info
;
820 BUG_ON(gfs2_glock_is_locked_by_me(ip
->i_gl
) == NULL
);
822 ret
= gfs2_meta_inode_buffer(ip
, &dibh
);
825 page_cache_release(page
);
829 if (gfs2_is_stuffed(ip
))
830 return gfs2_stuffed_write_end(inode
, dibh
, pos
, len
, copied
, page
);
832 if (!gfs2_is_writeback(ip
))
833 gfs2_page_add_databufs(ip
, page
, from
, to
);
835 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
836 if (tr
->tr_num_buf_new
)
837 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
839 gfs2_trans_add_meta(ip
->i_gl
, dibh
);
842 if (inode
== sdp
->sd_rindex
) {
843 adjust_fs_space(inode
);
844 sdp
->sd_rindex_uptodate
= 0;
850 gfs2_inplace_release(ip
);
851 if (ip
->i_res
->rs_qa_qd_num
)
852 gfs2_quota_unlock(ip
);
853 if (inode
== sdp
->sd_rindex
) {
854 gfs2_glock_dq(&m_ip
->i_gh
);
855 gfs2_holder_uninit(&m_ip
->i_gh
);
857 gfs2_glock_dq(&ip
->i_gh
);
858 gfs2_holder_uninit(&ip
->i_gh
);
863 * gfs2_set_page_dirty - Page dirtying function
864 * @page: The page to dirty
866 * Returns: 1 if it dirtyed the page, or 0 otherwise
869 static int gfs2_set_page_dirty(struct page
*page
)
871 SetPageChecked(page
);
872 return __set_page_dirty_buffers(page
);
876 * gfs2_bmap - Block map function
877 * @mapping: Address space info
878 * @lblock: The block to map
880 * Returns: The disk address for the block or 0 on hole or error
883 static sector_t
gfs2_bmap(struct address_space
*mapping
, sector_t lblock
)
885 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
886 struct gfs2_holder i_gh
;
890 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
894 if (!gfs2_is_stuffed(ip
))
895 dblock
= generic_block_bmap(mapping
, lblock
, gfs2_block_map
);
897 gfs2_glock_dq_uninit(&i_gh
);
902 static void gfs2_discard(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
904 struct gfs2_bufdata
*bd
;
908 clear_buffer_dirty(bh
);
911 if (!list_empty(&bd
->bd_list
) && !buffer_pinned(bh
))
912 list_del_init(&bd
->bd_list
);
914 gfs2_remove_from_journal(bh
, current
->journal_info
, 0);
917 clear_buffer_mapped(bh
);
918 clear_buffer_req(bh
);
919 clear_buffer_new(bh
);
920 gfs2_log_unlock(sdp
);
924 static void gfs2_invalidatepage(struct page
*page
, unsigned int offset
,
927 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
928 unsigned int stop
= offset
+ length
;
929 int partial_page
= (offset
|| length
< PAGE_CACHE_SIZE
);
930 struct buffer_head
*bh
, *head
;
931 unsigned long pos
= 0;
933 BUG_ON(!PageLocked(page
));
935 ClearPageChecked(page
);
936 if (!page_has_buffers(page
))
939 bh
= head
= page_buffers(page
);
941 if (pos
+ bh
->b_size
> stop
)
945 gfs2_discard(sdp
, bh
);
947 bh
= bh
->b_this_page
;
948 } while (bh
!= head
);
951 try_to_release_page(page
, 0);
955 * gfs2_ok_for_dio - check that dio is valid on this file
958 * @offset: The offset at which we are reading or writing
960 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
961 * 1 (to accept the i/o request)
963 static int gfs2_ok_for_dio(struct gfs2_inode
*ip
, int rw
, loff_t offset
)
966 * Should we return an error here? I can't see that O_DIRECT for
967 * a stuffed file makes any sense. For now we'll silently fall
968 * back to buffered I/O
970 if (gfs2_is_stuffed(ip
))
973 if (offset
>= i_size_read(&ip
->i_inode
))
980 static ssize_t
gfs2_direct_IO(int rw
, struct kiocb
*iocb
,
981 const struct iovec
*iov
, loff_t offset
,
982 unsigned long nr_segs
)
984 struct file
*file
= iocb
->ki_filp
;
985 struct inode
*inode
= file
->f_mapping
->host
;
986 struct address_space
*mapping
= inode
->i_mapping
;
987 struct gfs2_inode
*ip
= GFS2_I(inode
);
988 struct gfs2_holder gh
;
992 * Deferred lock, even if its a write, since we do no allocation
993 * on this path. All we need change is atime, and this lock mode
994 * ensures that other nodes have flushed their buffered read caches
995 * (i.e. their page cache entries for this inode). We do not,
996 * unfortunately have the option of only flushing a range like
999 gfs2_holder_init(ip
->i_gl
, LM_ST_DEFERRED
, 0, &gh
);
1000 rv
= gfs2_glock_nq(&gh
);
1003 rv
= gfs2_ok_for_dio(ip
, rw
, offset
);
1005 goto out
; /* dio not valid, fall back to buffered i/o */
1008 * Now since we are holding a deferred (CW) lock at this point, you
1009 * might be wondering why this is ever needed. There is a case however
1010 * where we've granted a deferred local lock against a cached exclusive
1011 * glock. That is ok provided all granted local locks are deferred, but
1012 * it also means that it is possible to encounter pages which are
1013 * cached and possibly also mapped. So here we check for that and sort
1014 * them out ahead of the dio. The glock state machine will take care of
1017 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
1018 * the first place, mapping->nr_pages will always be zero.
1020 if (mapping
->nrpages
) {
1021 loff_t lstart
= offset
& (PAGE_CACHE_SIZE
- 1);
1022 loff_t len
= iov_length(iov
, nr_segs
);
1023 loff_t end
= PAGE_ALIGN(offset
+ len
) - 1;
1028 if (test_and_clear_bit(GIF_SW_PAGED
, &ip
->i_flags
))
1029 unmap_shared_mapping_range(ip
->i_inode
.i_mapping
, offset
, len
);
1030 rv
= filemap_write_and_wait_range(mapping
, lstart
, end
);
1033 truncate_inode_pages_range(mapping
, lstart
, end
);
1036 rv
= __blockdev_direct_IO(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
, iov
,
1037 offset
, nr_segs
, gfs2_get_block_direct
,
1041 gfs2_holder_uninit(&gh
);
1046 * gfs2_releasepage - free the metadata associated with a page
1047 * @page: the page that's being released
1048 * @gfp_mask: passed from Linux VFS, ignored by us
1050 * Call try_to_free_buffers() if the buffers in this page can be
1056 int gfs2_releasepage(struct page
*page
, gfp_t gfp_mask
)
1058 struct address_space
*mapping
= page
->mapping
;
1059 struct gfs2_sbd
*sdp
= gfs2_mapping2sbd(mapping
);
1060 struct buffer_head
*bh
, *head
;
1061 struct gfs2_bufdata
*bd
;
1063 if (!page_has_buffers(page
))
1067 spin_lock(&sdp
->sd_ail_lock
);
1068 head
= bh
= page_buffers(page
);
1070 if (atomic_read(&bh
->b_count
))
1071 goto cannot_release
;
1073 if (bd
&& bd
->bd_tr
)
1074 goto cannot_release
;
1075 if (buffer_pinned(bh
) || buffer_dirty(bh
))
1077 bh
= bh
->b_this_page
;
1078 } while(bh
!= head
);
1079 spin_unlock(&sdp
->sd_ail_lock
);
1080 gfs2_log_unlock(sdp
);
1082 head
= bh
= page_buffers(page
);
1087 gfs2_assert_warn(sdp
, bd
->bd_bh
== bh
);
1088 if (!list_empty(&bd
->bd_list
)) {
1089 if (!buffer_pinned(bh
))
1090 list_del_init(&bd
->bd_list
);
1096 bh
->b_private
= NULL
;
1098 gfs2_log_unlock(sdp
);
1100 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
1102 bh
= bh
->b_this_page
;
1103 } while (bh
!= head
);
1105 return try_to_free_buffers(page
);
1107 not_possible
: /* Should never happen */
1108 WARN_ON(buffer_dirty(bh
));
1109 WARN_ON(buffer_pinned(bh
));
1111 spin_unlock(&sdp
->sd_ail_lock
);
1112 gfs2_log_unlock(sdp
);
1116 static const struct address_space_operations gfs2_writeback_aops
= {
1117 .writepage
= gfs2_writepage
,
1118 .writepages
= gfs2_writepages
,
1119 .readpage
= gfs2_readpage
,
1120 .readpages
= gfs2_readpages
,
1121 .write_begin
= gfs2_write_begin
,
1122 .write_end
= gfs2_write_end
,
1124 .invalidatepage
= gfs2_invalidatepage
,
1125 .releasepage
= gfs2_releasepage
,
1126 .direct_IO
= gfs2_direct_IO
,
1127 .migratepage
= buffer_migrate_page
,
1128 .is_partially_uptodate
= block_is_partially_uptodate
,
1129 .error_remove_page
= generic_error_remove_page
,
1132 static const struct address_space_operations gfs2_ordered_aops
= {
1133 .writepage
= gfs2_writepage
,
1134 .writepages
= gfs2_writepages
,
1135 .readpage
= gfs2_readpage
,
1136 .readpages
= gfs2_readpages
,
1137 .write_begin
= gfs2_write_begin
,
1138 .write_end
= gfs2_write_end
,
1139 .set_page_dirty
= gfs2_set_page_dirty
,
1141 .invalidatepage
= gfs2_invalidatepage
,
1142 .releasepage
= gfs2_releasepage
,
1143 .direct_IO
= gfs2_direct_IO
,
1144 .migratepage
= buffer_migrate_page
,
1145 .is_partially_uptodate
= block_is_partially_uptodate
,
1146 .error_remove_page
= generic_error_remove_page
,
1149 static const struct address_space_operations gfs2_jdata_aops
= {
1150 .writepage
= gfs2_jdata_writepage
,
1151 .writepages
= gfs2_jdata_writepages
,
1152 .readpage
= gfs2_readpage
,
1153 .readpages
= gfs2_readpages
,
1154 .write_begin
= gfs2_write_begin
,
1155 .write_end
= gfs2_write_end
,
1156 .set_page_dirty
= gfs2_set_page_dirty
,
1158 .invalidatepage
= gfs2_invalidatepage
,
1159 .releasepage
= gfs2_releasepage
,
1160 .is_partially_uptodate
= block_is_partially_uptodate
,
1161 .error_remove_page
= generic_error_remove_page
,
1164 void gfs2_set_aops(struct inode
*inode
)
1166 struct gfs2_inode
*ip
= GFS2_I(inode
);
1168 if (gfs2_is_writeback(ip
))
1169 inode
->i_mapping
->a_ops
= &gfs2_writeback_aops
;
1170 else if (gfs2_is_ordered(ip
))
1171 inode
->i_mapping
->a_ops
= &gfs2_ordered_aops
;
1172 else if (gfs2_is_jdata(ip
))
1173 inode
->i_mapping
->a_ops
= &gfs2_jdata_aops
;