2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/lm_interface.h>
31 #include "ops_address.h"
41 static void gfs2_page_add_databufs(struct gfs2_inode
*ip
, struct page
*page
,
42 unsigned int from
, unsigned int to
)
44 struct buffer_head
*head
= page_buffers(page
);
45 unsigned int bsize
= head
->b_size
;
46 struct buffer_head
*bh
;
47 unsigned int start
, end
;
49 for (bh
= head
, start
= 0; bh
!= head
|| !start
;
50 bh
= bh
->b_this_page
, start
= end
) {
52 if (end
<= from
|| start
>= to
)
54 if (gfs2_is_jdata(ip
))
55 set_buffer_uptodate(bh
);
56 gfs2_trans_add_bh(ip
->i_gl
, bh
, 0);
61 * gfs2_get_block - Fills in a buffer head with details about a block
63 * @lblock: The block number to look up
64 * @bh_result: The buffer head to return the result in
65 * @create: Non-zero if we may add block to the file
70 int gfs2_get_block(struct inode
*inode
, sector_t lblock
,
71 struct buffer_head
*bh_result
, int create
)
73 return gfs2_block_map(inode
, lblock
, create
, bh_result
);
77 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
79 * @lblock: The block number to look up
80 * @bh_result: The buffer head to return the result in
81 * @create: Non-zero if we may add block to the file
86 static int gfs2_get_block_noalloc(struct inode
*inode
, sector_t lblock
,
87 struct buffer_head
*bh_result
, int create
)
91 error
= gfs2_block_map(inode
, lblock
, 0, bh_result
);
94 if (!buffer_mapped(bh_result
))
99 static int gfs2_get_block_direct(struct inode
*inode
, sector_t lblock
,
100 struct buffer_head
*bh_result
, int create
)
102 return gfs2_block_map(inode
, lblock
, 0, bh_result
);
106 * gfs2_writepage - Write complete page
107 * @page: Page to write
111 * Some of this is copied from block_write_full_page() although we still
112 * call it to do most of the work.
115 static int gfs2_writepage(struct page
*page
, struct writeback_control
*wbc
)
117 struct inode
*inode
= page
->mapping
->host
;
118 struct gfs2_inode
*ip
= GFS2_I(inode
);
119 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
120 loff_t i_size
= i_size_read(inode
);
121 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
126 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(ip
->i_gl
))) {
130 if (current
->journal_info
)
133 /* Is the page fully outside i_size? (truncate in progress) */
134 offset
= i_size
& (PAGE_CACHE_SIZE
-1);
135 if (page
->index
> end_index
|| (page
->index
== end_index
&& !offset
)) {
136 page
->mapping
->a_ops
->invalidatepage(page
, 0);
138 return 0; /* don't care */
141 if ((sdp
->sd_args
.ar_data
== GFS2_DATA_ORDERED
|| gfs2_is_jdata(ip
)) &&
143 ClearPageChecked(page
);
144 error
= gfs2_trans_begin(sdp
, RES_DINODE
+ 1, 0);
147 if (!page_has_buffers(page
)) {
148 create_empty_buffers(page
, inode
->i_sb
->s_blocksize
,
149 (1 << BH_Dirty
)|(1 << BH_Uptodate
));
151 gfs2_page_add_databufs(ip
, page
, 0, sdp
->sd_vfs
->s_blocksize
-1);
154 error
= block_write_full_page(page
, gfs2_get_block_noalloc
, wbc
);
157 gfs2_meta_cache_flush(ip
);
161 redirty_page_for_writepage(wbc
, page
);
167 * gfs2_writepages - Write a bunch of dirty pages back to disk
168 * @mapping: The mapping to write
169 * @wbc: Write-back control
171 * For journaled files and/or ordered writes this just falls back to the
172 * kernel's default writepages path for now. We will probably want to change
173 * that eventually (i.e. when we look at allocate on flush).
175 * For the data=writeback case though we can already ignore buffer heads
176 * and write whole extents at once. This is a big reduction in the
177 * number of I/O requests we send and the bmap calls we make in this case.
179 static int gfs2_writepages(struct address_space
*mapping
,
180 struct writeback_control
*wbc
)
182 struct inode
*inode
= mapping
->host
;
183 struct gfs2_inode
*ip
= GFS2_I(inode
);
184 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
186 if (sdp
->sd_args
.ar_data
== GFS2_DATA_WRITEBACK
&& !gfs2_is_jdata(ip
))
187 return mpage_writepages(mapping
, wbc
, gfs2_get_block_noalloc
);
189 return generic_writepages(mapping
, wbc
);
193 * stuffed_readpage - Fill in a Linux page with stuffed file data
200 static int stuffed_readpage(struct gfs2_inode
*ip
, struct page
*page
)
202 struct buffer_head
*dibh
;
207 * Due to the order of unstuffing files and ->nopage(), we can be
208 * asked for a zero page in the case of a stuffed file being extended,
209 * so we need to supply one here. It doesn't happen often.
211 if (unlikely(page
->index
)) {
212 zero_user_page(page
, 0, PAGE_CACHE_SIZE
, KM_USER0
);
216 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
220 kaddr
= kmap_atomic(page
, KM_USER0
);
221 memcpy(kaddr
, dibh
->b_data
+ sizeof(struct gfs2_dinode
),
223 memset(kaddr
+ ip
->i_di
.di_size
, 0, PAGE_CACHE_SIZE
- ip
->i_di
.di_size
);
224 kunmap_atomic(kaddr
, KM_USER0
);
225 flush_dcache_page(page
);
227 SetPageUptodate(page
);
234 * gfs2_readpage - readpage with locking
235 * @file: The file to read a page for. N.B. This may be NULL if we are
236 * reading an internal file.
237 * @page: The page to read
242 static int gfs2_readpage(struct file
*file
, struct page
*page
)
244 struct gfs2_inode
*ip
= GFS2_I(page
->mapping
->host
);
245 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
246 struct gfs2_file
*gf
= NULL
;
247 struct gfs2_holder gh
;
251 if (likely(file
!= &gfs2_internal_file_sentinel
)) {
253 gf
= file
->private_data
;
254 if (test_bit(GFF_EXLOCK
, &gf
->f_flags
))
255 /* gfs2_sharewrite_fault has grabbed the ip->i_gl already */
258 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, GL_ATIME
|LM_FLAG_TRY_1CB
, &gh
);
260 error
= gfs2_glock_nq_atime(&gh
);
266 if (gfs2_is_stuffed(ip
)) {
267 error
= stuffed_readpage(ip
, page
);
270 error
= mpage_readpage(page
, gfs2_get_block
);
272 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
276 gfs2_glock_dq_m(1, &gh
);
277 gfs2_holder_uninit(&gh
);
283 if (error
== GLR_TRYFAILED
) {
284 error
= AOP_TRUNCATED_PAGE
;
288 gfs2_holder_uninit(&gh
);
293 * gfs2_readpages - Read a bunch of pages at once
296 * 1. This is only for readahead, so we can simply ignore any things
297 * which are slightly inconvenient (such as locking conflicts between
298 * the page lock and the glock) and return having done no I/O. Its
299 * obviously not something we'd want to do on too regular a basis.
300 * Any I/O we ignore at this time will be done via readpage later.
301 * 2. We don't handle stuffed files here we let readpage do the honours.
302 * 3. mpage_readpages() does most of the heavy lifting in the common case.
303 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
304 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
305 * well as read-ahead.
307 static int gfs2_readpages(struct file
*file
, struct address_space
*mapping
,
308 struct list_head
*pages
, unsigned nr_pages
)
310 struct inode
*inode
= mapping
->host
;
311 struct gfs2_inode
*ip
= GFS2_I(inode
);
312 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
313 struct gfs2_holder gh
;
317 if (likely(file
!= &gfs2_internal_file_sentinel
)) {
319 struct gfs2_file
*gf
= file
->private_data
;
320 if (test_bit(GFF_EXLOCK
, &gf
->f_flags
))
323 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
,
324 LM_FLAG_TRY_1CB
|GL_ATIME
, &gh
);
326 ret
= gfs2_glock_nq_atime(&gh
);
327 if (ret
== GLR_TRYFAILED
)
333 if (!gfs2_is_stuffed(ip
))
334 ret
= mpage_readpages(mapping
, pages
, nr_pages
, gfs2_get_block
);
337 gfs2_glock_dq_m(1, &gh
);
338 gfs2_holder_uninit(&gh
);
341 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
348 gfs2_holder_uninit(&gh
);
353 * gfs2_write_begin - Begin to write to a file
354 * @file: The file to write to
355 * @mapping: The mapping in which to write
356 * @pos: The file offset at which to start writing
357 * @len: Length of the write
358 * @flags: Various flags
359 * @pagep: Pointer to return the page
360 * @fsdata: Pointer to return fs data (unused by GFS2)
365 static int gfs2_write_begin(struct file
*file
, struct address_space
*mapping
,
366 loff_t pos
, unsigned len
, unsigned flags
,
367 struct page
**pagep
, void **fsdata
)
369 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
370 struct gfs2_sbd
*sdp
= GFS2_SB(mapping
->host
);
371 unsigned int data_blocks
, ind_blocks
, rblocks
;
374 struct gfs2_alloc
*al
;
375 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
376 unsigned from
= pos
& (PAGE_CACHE_SIZE
- 1);
377 unsigned to
= from
+ len
;
380 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_ATIME
, &ip
->i_gh
);
381 error
= gfs2_glock_nq_atime(&ip
->i_gh
);
386 page
= __grab_cache_page(mapping
, index
);
391 gfs2_write_calc_reserv(ip
, len
, &data_blocks
, &ind_blocks
);
393 error
= gfs2_write_alloc_required(ip
, pos
, len
, &alloc_required
);
398 ip
->i_alloc
.al_requested
= 0;
399 if (alloc_required
) {
400 al
= gfs2_alloc_get(ip
);
402 error
= gfs2_quota_lock(ip
, NO_QUOTA_CHANGE
, NO_QUOTA_CHANGE
);
406 error
= gfs2_quota_check(ip
, ip
->i_inode
.i_uid
, ip
->i_inode
.i_gid
);
410 al
->al_requested
= data_blocks
+ ind_blocks
;
411 error
= gfs2_inplace_reserve(ip
);
416 rblocks
= RES_DINODE
+ ind_blocks
;
417 if (gfs2_is_jdata(ip
))
418 rblocks
+= data_blocks
? data_blocks
: 1;
419 if (ind_blocks
|| data_blocks
)
420 rblocks
+= RES_STATFS
+ RES_QUOTA
;
422 error
= gfs2_trans_begin(sdp
, rblocks
,
423 PAGE_CACHE_SIZE
/sdp
->sd_sb
.sb_bsize
);
427 if (gfs2_is_stuffed(ip
)) {
428 if (pos
+ len
> sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_dinode
)) {
429 error
= gfs2_unstuff_dinode(ip
, page
);
432 } else if (!PageUptodate(page
))
433 error
= stuffed_readpage(ip
, page
);
438 error
= block_prepare_write(page
, from
, to
, gfs2_get_block
);
444 if (alloc_required
) {
445 gfs2_inplace_release(ip
);
447 gfs2_quota_unlock(ip
);
452 page_cache_release(page
);
453 if (pos
+ len
> ip
->i_inode
.i_size
)
454 vmtruncate(&ip
->i_inode
, ip
->i_inode
.i_size
);
456 gfs2_glock_dq_m(1, &ip
->i_gh
);
458 gfs2_holder_uninit(&ip
->i_gh
);
465 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
466 * @inode: the rindex inode
468 static void adjust_fs_space(struct inode
*inode
)
470 struct gfs2_sbd
*sdp
= inode
->i_sb
->s_fs_info
;
471 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
472 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
473 u64 fs_total
, new_free
;
475 /* Total up the file system space, according to the latest rindex. */
476 fs_total
= gfs2_ri_total(sdp
);
478 spin_lock(&sdp
->sd_statfs_spin
);
479 if (fs_total
> (m_sc
->sc_total
+ l_sc
->sc_total
))
480 new_free
= fs_total
- (m_sc
->sc_total
+ l_sc
->sc_total
);
483 spin_unlock(&sdp
->sd_statfs_spin
);
484 fs_warn(sdp
, "File system extended by %llu blocks.\n",
485 (unsigned long long)new_free
);
486 gfs2_statfs_change(sdp
, new_free
, new_free
, 0);
490 * gfs2_stuffed_write_end - Write end for stuffed files
492 * @dibh: The buffer_head containing the on-disk inode
493 * @pos: The file position
494 * @len: The length of the write
495 * @copied: How much was actually copied by the VFS
498 * This copies the data from the page into the inode block after
499 * the inode data structure itself.
503 static int gfs2_stuffed_write_end(struct inode
*inode
, struct buffer_head
*dibh
,
504 loff_t pos
, unsigned len
, unsigned copied
,
507 struct gfs2_inode
*ip
= GFS2_I(inode
);
508 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
509 u64 to
= pos
+ copied
;
511 unsigned char *buf
= dibh
->b_data
+ sizeof(struct gfs2_dinode
);
512 struct gfs2_dinode
*di
= (struct gfs2_dinode
*)dibh
->b_data
;
514 BUG_ON((pos
+ len
) > (dibh
->b_size
- sizeof(struct gfs2_dinode
)));
515 kaddr
= kmap_atomic(page
, KM_USER0
);
516 memcpy(buf
+ pos
, kaddr
+ pos
, copied
);
517 memset(kaddr
+ pos
+ copied
, 0, len
- copied
);
518 flush_dcache_page(page
);
519 kunmap_atomic(kaddr
, KM_USER0
);
521 if (!PageUptodate(page
))
522 SetPageUptodate(page
);
524 page_cache_release(page
);
526 if (inode
->i_size
< to
) {
527 i_size_write(inode
, to
);
528 ip
->i_di
.di_size
= inode
->i_size
;
529 di
->di_size
= cpu_to_be64(inode
->i_size
);
530 mark_inode_dirty(inode
);
533 if (inode
== sdp
->sd_rindex
)
534 adjust_fs_space(inode
);
538 gfs2_glock_dq(&ip
->i_gh
);
539 gfs2_holder_uninit(&ip
->i_gh
);
545 * @file: The file to write to
546 * @mapping: The address space to write to
547 * @pos: The file position
548 * @len: The length of the data
550 * @page: The page that has been written
551 * @fsdata: The fsdata (unused in GFS2)
553 * The main write_end function for GFS2. We have a separate one for
554 * stuffed files as they are slightly different, otherwise we just
555 * put our locking around the VFS provided functions.
560 static int gfs2_write_end(struct file
*file
, struct address_space
*mapping
,
561 loff_t pos
, unsigned len
, unsigned copied
,
562 struct page
*page
, void *fsdata
)
564 struct inode
*inode
= page
->mapping
->host
;
565 struct gfs2_inode
*ip
= GFS2_I(inode
);
566 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
567 struct buffer_head
*dibh
;
568 struct gfs2_alloc
*al
= &ip
->i_alloc
;
569 struct gfs2_dinode
*di
;
570 unsigned int from
= pos
& (PAGE_CACHE_SIZE
- 1);
571 unsigned int to
= from
+ len
;
574 BUG_ON(gfs2_glock_is_locked_by_me(ip
->i_gl
) == 0);
576 ret
= gfs2_meta_inode_buffer(ip
, &dibh
);
579 page_cache_release(page
);
583 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
585 if (gfs2_is_stuffed(ip
))
586 return gfs2_stuffed_write_end(inode
, dibh
, pos
, len
, copied
, page
);
588 if (sdp
->sd_args
.ar_data
== GFS2_DATA_ORDERED
|| gfs2_is_jdata(ip
))
589 gfs2_page_add_databufs(ip
, page
, from
, to
);
591 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
593 if (likely(ret
>= 0)) {
595 if ((pos
+ copied
) > inode
->i_size
) {
596 di
= (struct gfs2_dinode
*)dibh
->b_data
;
597 ip
->i_di
.di_size
= inode
->i_size
;
598 di
->di_size
= cpu_to_be64(inode
->i_size
);
599 mark_inode_dirty(inode
);
603 if (inode
== sdp
->sd_rindex
)
604 adjust_fs_space(inode
);
609 if (al
->al_requested
) {
610 gfs2_inplace_release(ip
);
611 gfs2_quota_unlock(ip
);
614 gfs2_glock_dq(&ip
->i_gh
);
615 gfs2_holder_uninit(&ip
->i_gh
);
620 * gfs2_set_page_dirty - Page dirtying function
621 * @page: The page to dirty
623 * Returns: 1 if it dirtyed the page, or 0 otherwise
626 static int gfs2_set_page_dirty(struct page
*page
)
628 struct gfs2_inode
*ip
= GFS2_I(page
->mapping
->host
);
629 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
631 if (sdp
->sd_args
.ar_data
== GFS2_DATA_ORDERED
|| gfs2_is_jdata(ip
))
632 SetPageChecked(page
);
633 return __set_page_dirty_buffers(page
);
637 * gfs2_bmap - Block map function
638 * @mapping: Address space info
639 * @lblock: The block to map
641 * Returns: The disk address for the block or 0 on hole or error
644 static sector_t
gfs2_bmap(struct address_space
*mapping
, sector_t lblock
)
646 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
647 struct gfs2_holder i_gh
;
651 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
655 if (!gfs2_is_stuffed(ip
))
656 dblock
= generic_block_bmap(mapping
, lblock
, gfs2_get_block
);
658 gfs2_glock_dq_uninit(&i_gh
);
663 static void gfs2_discard(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
665 struct gfs2_bufdata
*bd
;
669 clear_buffer_dirty(bh
);
672 if (!list_empty(&bd
->bd_le
.le_list
) && !buffer_pinned(bh
))
673 list_del_init(&bd
->bd_le
.le_list
);
675 gfs2_remove_from_journal(bh
, current
->journal_info
, 0);
678 clear_buffer_mapped(bh
);
679 clear_buffer_req(bh
);
680 clear_buffer_new(bh
);
681 gfs2_log_unlock(sdp
);
685 static void gfs2_invalidatepage(struct page
*page
, unsigned long offset
)
687 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
688 struct buffer_head
*bh
, *head
;
689 unsigned long pos
= 0;
691 BUG_ON(!PageLocked(page
));
693 ClearPageChecked(page
);
694 if (!page_has_buffers(page
))
697 bh
= head
= page_buffers(page
);
700 gfs2_discard(sdp
, bh
);
702 bh
= bh
->b_this_page
;
703 } while (bh
!= head
);
706 try_to_release_page(page
, 0);
710 * gfs2_ok_for_dio - check that dio is valid on this file
713 * @offset: The offset at which we are reading or writing
715 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
716 * 1 (to accept the i/o request)
718 static int gfs2_ok_for_dio(struct gfs2_inode
*ip
, int rw
, loff_t offset
)
721 * Should we return an error here? I can't see that O_DIRECT for
722 * a journaled file makes any sense. For now we'll silently fall
723 * back to buffered I/O, likewise we do the same for stuffed
724 * files since they are (a) small and (b) unaligned.
726 if (gfs2_is_jdata(ip
))
729 if (gfs2_is_stuffed(ip
))
732 if (offset
> i_size_read(&ip
->i_inode
))
739 static ssize_t
gfs2_direct_IO(int rw
, struct kiocb
*iocb
,
740 const struct iovec
*iov
, loff_t offset
,
741 unsigned long nr_segs
)
743 struct file
*file
= iocb
->ki_filp
;
744 struct inode
*inode
= file
->f_mapping
->host
;
745 struct gfs2_inode
*ip
= GFS2_I(inode
);
746 struct gfs2_holder gh
;
750 * Deferred lock, even if its a write, since we do no allocation
751 * on this path. All we need change is atime, and this lock mode
752 * ensures that other nodes have flushed their buffered read caches
753 * (i.e. their page cache entries for this inode). We do not,
754 * unfortunately have the option of only flushing a range like
757 gfs2_holder_init(ip
->i_gl
, LM_ST_DEFERRED
, GL_ATIME
, &gh
);
758 rv
= gfs2_glock_nq_atime(&gh
);
761 rv
= gfs2_ok_for_dio(ip
, rw
, offset
);
763 goto out
; /* dio not valid, fall back to buffered i/o */
765 rv
= blockdev_direct_IO_no_locking(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
,
766 iov
, offset
, nr_segs
,
767 gfs2_get_block_direct
, NULL
);
769 gfs2_glock_dq_m(1, &gh
);
770 gfs2_holder_uninit(&gh
);
775 * gfs2_releasepage - free the metadata associated with a page
776 * @page: the page that's being released
777 * @gfp_mask: passed from Linux VFS, ignored by us
779 * Call try_to_free_buffers() if the buffers in this page can be
785 int gfs2_releasepage(struct page
*page
, gfp_t gfp_mask
)
787 struct inode
*aspace
= page
->mapping
->host
;
788 struct gfs2_sbd
*sdp
= aspace
->i_sb
->s_fs_info
;
789 struct buffer_head
*bh
, *head
;
790 struct gfs2_bufdata
*bd
;
792 if (!page_has_buffers(page
))
796 head
= bh
= page_buffers(page
);
798 if (atomic_read(&bh
->b_count
))
801 if (bd
&& bd
->bd_ail
)
803 gfs2_assert_warn(sdp
, !buffer_pinned(bh
));
804 gfs2_assert_warn(sdp
, !buffer_dirty(bh
));
805 bh
= bh
->b_this_page
;
807 gfs2_log_unlock(sdp
);
809 head
= bh
= page_buffers(page
);
814 gfs2_assert_warn(sdp
, bd
->bd_bh
== bh
);
815 gfs2_assert_warn(sdp
, list_empty(&bd
->bd_list_tr
));
816 if (!list_empty(&bd
->bd_le
.le_list
)) {
817 if (!buffer_pinned(bh
))
818 list_del_init(&bd
->bd_le
.le_list
);
824 bh
->b_private
= NULL
;
826 gfs2_log_unlock(sdp
);
828 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
830 bh
= bh
->b_this_page
;
831 } while (bh
!= head
);
833 return try_to_free_buffers(page
);
835 gfs2_log_unlock(sdp
);
839 const struct address_space_operations gfs2_file_aops
= {
840 .writepage
= gfs2_writepage
,
841 .writepages
= gfs2_writepages
,
842 .readpage
= gfs2_readpage
,
843 .readpages
= gfs2_readpages
,
844 .sync_page
= block_sync_page
,
845 .write_begin
= gfs2_write_begin
,
846 .write_end
= gfs2_write_end
,
847 .set_page_dirty
= gfs2_set_page_dirty
,
849 .invalidatepage
= gfs2_invalidatepage
,
850 .releasepage
= gfs2_releasepage
,
851 .direct_IO
= gfs2_direct_IO
,