Linux 3.12.39
[linux/fpc-iii.git] / fs / gfs2 / aops.c
blobf3aee0bbe886f82041100368b8645a9728eba360
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
18 #include <linux/fs.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/backing-dev.h>
23 #include <linux/aio.h>
25 #include "gfs2.h"
26 #include "incore.h"
27 #include "bmap.h"
28 #include "glock.h"
29 #include "inode.h"
30 #include "log.h"
31 #include "meta_io.h"
32 #include "quota.h"
33 #include "trans.h"
34 #include "rgrp.h"
35 #include "super.h"
36 #include "util.h"
37 #include "glops.h"
40 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int to)
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
46 unsigned int start, end;
48 for (bh = head, start = 0; bh != head || !start;
49 bh = bh->b_this_page, start = end) {
50 end = start + bsize;
51 if (end <= from || start >= to)
52 continue;
53 if (gfs2_is_jdata(ip))
54 set_buffer_uptodate(bh);
55 gfs2_trans_add_data(ip->i_gl, bh);
59 /**
60 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
61 * @inode: The inode
62 * @lblock: The block number to look up
63 * @bh_result: The buffer head to return the result in
64 * @create: Non-zero if we may add block to the file
66 * Returns: errno
69 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
70 struct buffer_head *bh_result, int create)
72 int error;
74 error = gfs2_block_map(inode, lblock, bh_result, 0);
75 if (error)
76 return error;
77 if (!buffer_mapped(bh_result))
78 return -EIO;
79 return 0;
82 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
83 struct buffer_head *bh_result, int create)
85 return gfs2_block_map(inode, lblock, bh_result, 0);
88 /**
89 * gfs2_writepage_common - Common bits of writepage
90 * @page: The page to be written
91 * @wbc: The writeback control
93 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
96 static int gfs2_writepage_common(struct page *page,
97 struct writeback_control *wbc)
99 struct inode *inode = page->mapping->host;
100 struct gfs2_inode *ip = GFS2_I(inode);
101 struct gfs2_sbd *sdp = GFS2_SB(inode);
102 loff_t i_size = i_size_read(inode);
103 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
104 unsigned offset;
106 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
107 goto out;
108 if (current->journal_info)
109 goto redirty;
110 /* Is the page fully outside i_size? (truncate in progress) */
111 offset = i_size & (PAGE_CACHE_SIZE-1);
112 if (page->index > end_index || (page->index == end_index && !offset)) {
113 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
114 goto out;
116 return 1;
117 redirty:
118 redirty_page_for_writepage(wbc, page);
119 out:
120 unlock_page(page);
121 return 0;
125 * gfs2_writepage - Write page for writeback mappings
126 * @page: The page
127 * @wbc: The writeback control
131 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
133 int ret;
135 ret = gfs2_writepage_common(page, wbc);
136 if (ret <= 0)
137 return ret;
139 return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
143 * __gfs2_jdata_writepage - The core of jdata writepage
144 * @page: The page to write
145 * @wbc: The writeback control
147 * This is shared between writepage and writepages and implements the
148 * core of the writepage operation. If a transaction is required then
149 * PageChecked will have been set and the transaction will have
150 * already been started before this is called.
153 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
155 struct inode *inode = page->mapping->host;
156 struct gfs2_inode *ip = GFS2_I(inode);
157 struct gfs2_sbd *sdp = GFS2_SB(inode);
159 if (PageChecked(page)) {
160 ClearPageChecked(page);
161 if (!page_has_buffers(page)) {
162 create_empty_buffers(page, inode->i_sb->s_blocksize,
163 (1 << BH_Dirty)|(1 << BH_Uptodate));
165 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
167 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
171 * gfs2_jdata_writepage - Write complete page
172 * @page: Page to write
174 * Returns: errno
178 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
180 struct inode *inode = page->mapping->host;
181 struct gfs2_sbd *sdp = GFS2_SB(inode);
182 int ret;
183 int done_trans = 0;
185 if (PageChecked(page)) {
186 if (wbc->sync_mode != WB_SYNC_ALL)
187 goto out_ignore;
188 ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
189 if (ret)
190 goto out_ignore;
191 done_trans = 1;
193 ret = gfs2_writepage_common(page, wbc);
194 if (ret > 0)
195 ret = __gfs2_jdata_writepage(page, wbc);
196 if (done_trans)
197 gfs2_trans_end(sdp);
198 return ret;
200 out_ignore:
201 redirty_page_for_writepage(wbc, page);
202 unlock_page(page);
203 return 0;
207 * gfs2_writepages - Write a bunch of dirty pages back to disk
208 * @mapping: The mapping to write
209 * @wbc: Write-back control
211 * Used for both ordered and writeback modes.
213 static int gfs2_writepages(struct address_space *mapping,
214 struct writeback_control *wbc)
216 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
220 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
221 * @mapping: The mapping
222 * @wbc: The writeback control
223 * @writepage: The writepage function to call for each page
224 * @pvec: The vector of pages
225 * @nr_pages: The number of pages to write
227 * Returns: non-zero if loop should terminate, zero otherwise
230 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
231 struct writeback_control *wbc,
232 struct pagevec *pvec,
233 int nr_pages, pgoff_t end)
235 struct inode *inode = mapping->host;
236 struct gfs2_sbd *sdp = GFS2_SB(inode);
237 loff_t i_size = i_size_read(inode);
238 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
239 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
240 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
241 int i;
242 int ret;
244 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
245 if (ret < 0)
246 return ret;
248 for(i = 0; i < nr_pages; i++) {
249 struct page *page = pvec->pages[i];
251 lock_page(page);
253 if (unlikely(page->mapping != mapping)) {
254 unlock_page(page);
255 continue;
258 if (!wbc->range_cyclic && page->index > end) {
259 ret = 1;
260 unlock_page(page);
261 continue;
264 if (wbc->sync_mode != WB_SYNC_NONE)
265 wait_on_page_writeback(page);
267 if (PageWriteback(page) ||
268 !clear_page_dirty_for_io(page)) {
269 unlock_page(page);
270 continue;
273 /* Is the page fully outside i_size? (truncate in progress) */
274 if (page->index > end_index || (page->index == end_index && !offset)) {
275 page->mapping->a_ops->invalidatepage(page, 0,
276 PAGE_CACHE_SIZE);
277 unlock_page(page);
278 continue;
281 ret = __gfs2_jdata_writepage(page, wbc);
283 if (ret || (--(wbc->nr_to_write) <= 0))
284 ret = 1;
286 gfs2_trans_end(sdp);
287 return ret;
291 * gfs2_write_cache_jdata - Like write_cache_pages but different
292 * @mapping: The mapping to write
293 * @wbc: The writeback control
294 * @writepage: The writepage function to call
295 * @data: The data to pass to writepage
297 * The reason that we use our own function here is that we need to
298 * start transactions before we grab page locks. This allows us
299 * to get the ordering right.
302 static int gfs2_write_cache_jdata(struct address_space *mapping,
303 struct writeback_control *wbc)
305 int ret = 0;
306 int done = 0;
307 struct pagevec pvec;
308 int nr_pages;
309 pgoff_t index;
310 pgoff_t end;
311 int scanned = 0;
312 int range_whole = 0;
314 pagevec_init(&pvec, 0);
315 if (wbc->range_cyclic) {
316 index = mapping->writeback_index; /* Start from prev offset */
317 end = -1;
318 } else {
319 index = wbc->range_start >> PAGE_CACHE_SHIFT;
320 end = wbc->range_end >> PAGE_CACHE_SHIFT;
321 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
322 range_whole = 1;
323 scanned = 1;
326 retry:
327 while (!done && (index <= end) &&
328 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
329 PAGECACHE_TAG_DIRTY,
330 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
331 scanned = 1;
332 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
333 if (ret)
334 done = 1;
335 if (ret > 0)
336 ret = 0;
338 pagevec_release(&pvec);
339 cond_resched();
342 if (!scanned && !done) {
344 * We hit the last page and there is more work to be done: wrap
345 * back to the start of the file
347 scanned = 1;
348 index = 0;
349 goto retry;
352 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
353 mapping->writeback_index = index;
354 return ret;
359 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
360 * @mapping: The mapping to write
361 * @wbc: The writeback control
365 static int gfs2_jdata_writepages(struct address_space *mapping,
366 struct writeback_control *wbc)
368 struct gfs2_inode *ip = GFS2_I(mapping->host);
369 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
370 int ret;
372 ret = gfs2_write_cache_jdata(mapping, wbc);
373 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
374 gfs2_log_flush(sdp, ip->i_gl);
375 ret = gfs2_write_cache_jdata(mapping, wbc);
377 return ret;
381 * stuffed_readpage - Fill in a Linux page with stuffed file data
382 * @ip: the inode
383 * @page: the page
385 * Returns: errno
388 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
390 struct buffer_head *dibh;
391 u64 dsize = i_size_read(&ip->i_inode);
392 void *kaddr;
393 int error;
396 * Due to the order of unstuffing files and ->fault(), we can be
397 * asked for a zero page in the case of a stuffed file being extended,
398 * so we need to supply one here. It doesn't happen often.
400 if (unlikely(page->index)) {
401 zero_user(page, 0, PAGE_CACHE_SIZE);
402 SetPageUptodate(page);
403 return 0;
406 error = gfs2_meta_inode_buffer(ip, &dibh);
407 if (error)
408 return error;
410 kaddr = kmap_atomic(page);
411 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
412 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
413 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
414 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
415 kunmap_atomic(kaddr);
416 flush_dcache_page(page);
417 brelse(dibh);
418 SetPageUptodate(page);
420 return 0;
425 * __gfs2_readpage - readpage
426 * @file: The file to read a page for
427 * @page: The page to read
429 * This is the core of gfs2's readpage. Its used by the internal file
430 * reading code as in that case we already hold the glock. Also its
431 * called by gfs2_readpage() once the required lock has been granted.
435 static int __gfs2_readpage(void *file, struct page *page)
437 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
438 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
439 int error;
441 if (gfs2_is_stuffed(ip)) {
442 error = stuffed_readpage(ip, page);
443 unlock_page(page);
444 } else {
445 error = mpage_readpage(page, gfs2_block_map);
448 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
449 return -EIO;
451 return error;
455 * gfs2_readpage - read a page of a file
456 * @file: The file to read
457 * @page: The page of the file
459 * This deals with the locking required. We have to unlock and
460 * relock the page in order to get the locking in the right
461 * order.
464 static int gfs2_readpage(struct file *file, struct page *page)
466 struct address_space *mapping = page->mapping;
467 struct gfs2_inode *ip = GFS2_I(mapping->host);
468 struct gfs2_holder gh;
469 int error;
471 unlock_page(page);
472 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
473 error = gfs2_glock_nq(&gh);
474 if (unlikely(error))
475 goto out;
476 error = AOP_TRUNCATED_PAGE;
477 lock_page(page);
478 if (page->mapping == mapping && !PageUptodate(page))
479 error = __gfs2_readpage(file, page);
480 else
481 unlock_page(page);
482 gfs2_glock_dq(&gh);
483 out:
484 gfs2_holder_uninit(&gh);
485 if (error && error != AOP_TRUNCATED_PAGE)
486 lock_page(page);
487 return error;
491 * gfs2_internal_read - read an internal file
492 * @ip: The gfs2 inode
493 * @buf: The buffer to fill
494 * @pos: The file position
495 * @size: The amount to read
499 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
500 unsigned size)
502 struct address_space *mapping = ip->i_inode.i_mapping;
503 unsigned long index = *pos / PAGE_CACHE_SIZE;
504 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
505 unsigned copied = 0;
506 unsigned amt;
507 struct page *page;
508 void *p;
510 do {
511 amt = size - copied;
512 if (offset + size > PAGE_CACHE_SIZE)
513 amt = PAGE_CACHE_SIZE - offset;
514 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
515 if (IS_ERR(page))
516 return PTR_ERR(page);
517 p = kmap_atomic(page);
518 memcpy(buf + copied, p + offset, amt);
519 kunmap_atomic(p);
520 page_cache_release(page);
521 copied += amt;
522 index++;
523 offset = 0;
524 } while(copied < size);
525 (*pos) += size;
526 return size;
530 * gfs2_readpages - Read a bunch of pages at once
532 * Some notes:
533 * 1. This is only for readahead, so we can simply ignore any things
534 * which are slightly inconvenient (such as locking conflicts between
535 * the page lock and the glock) and return having done no I/O. Its
536 * obviously not something we'd want to do on too regular a basis.
537 * Any I/O we ignore at this time will be done via readpage later.
538 * 2. We don't handle stuffed files here we let readpage do the honours.
539 * 3. mpage_readpages() does most of the heavy lifting in the common case.
540 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
543 static int gfs2_readpages(struct file *file, struct address_space *mapping,
544 struct list_head *pages, unsigned nr_pages)
546 struct inode *inode = mapping->host;
547 struct gfs2_inode *ip = GFS2_I(inode);
548 struct gfs2_sbd *sdp = GFS2_SB(inode);
549 struct gfs2_holder gh;
550 int ret;
552 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
553 ret = gfs2_glock_nq(&gh);
554 if (unlikely(ret))
555 goto out_uninit;
556 if (!gfs2_is_stuffed(ip))
557 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
558 gfs2_glock_dq(&gh);
559 out_uninit:
560 gfs2_holder_uninit(&gh);
561 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
562 ret = -EIO;
563 return ret;
567 * gfs2_write_begin - Begin to write to a file
568 * @file: The file to write to
569 * @mapping: The mapping in which to write
570 * @pos: The file offset at which to start writing
571 * @len: Length of the write
572 * @flags: Various flags
573 * @pagep: Pointer to return the page
574 * @fsdata: Pointer to return fs data (unused by GFS2)
576 * Returns: errno
579 static int gfs2_write_begin(struct file *file, struct address_space *mapping,
580 loff_t pos, unsigned len, unsigned flags,
581 struct page **pagep, void **fsdata)
583 struct gfs2_inode *ip = GFS2_I(mapping->host);
584 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
585 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
586 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
587 unsigned requested = 0;
588 int alloc_required;
589 int error = 0;
590 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
591 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
592 struct page *page;
594 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
595 error = gfs2_glock_nq(&ip->i_gh);
596 if (unlikely(error))
597 goto out_uninit;
598 if (&ip->i_inode == sdp->sd_rindex) {
599 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
600 GL_NOCACHE, &m_ip->i_gh);
601 if (unlikely(error)) {
602 gfs2_glock_dq(&ip->i_gh);
603 goto out_uninit;
607 alloc_required = gfs2_write_alloc_required(ip, pos, len);
609 if (alloc_required || gfs2_is_jdata(ip))
610 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
612 if (alloc_required) {
613 error = gfs2_quota_lock_check(ip);
614 if (error)
615 goto out_unlock;
617 requested = data_blocks + ind_blocks;
618 error = gfs2_inplace_reserve(ip, requested, 0);
619 if (error)
620 goto out_qunlock;
623 rblocks = RES_DINODE + ind_blocks;
624 if (gfs2_is_jdata(ip))
625 rblocks += data_blocks ? data_blocks : 1;
626 if (ind_blocks || data_blocks)
627 rblocks += RES_STATFS + RES_QUOTA;
628 if (&ip->i_inode == sdp->sd_rindex)
629 rblocks += 2 * RES_STATFS;
630 if (alloc_required)
631 rblocks += gfs2_rg_blocks(ip, requested);
633 error = gfs2_trans_begin(sdp, rblocks,
634 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
635 if (error)
636 goto out_trans_fail;
638 error = -ENOMEM;
639 flags |= AOP_FLAG_NOFS;
640 page = grab_cache_page_write_begin(mapping, index, flags);
641 *pagep = page;
642 if (unlikely(!page))
643 goto out_endtrans;
645 if (gfs2_is_stuffed(ip)) {
646 error = 0;
647 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
648 error = gfs2_unstuff_dinode(ip, page);
649 if (error == 0)
650 goto prepare_write;
651 } else if (!PageUptodate(page)) {
652 error = stuffed_readpage(ip, page);
654 goto out;
657 prepare_write:
658 error = __block_write_begin(page, from, len, gfs2_block_map);
659 out:
660 if (error == 0)
661 return 0;
663 unlock_page(page);
664 page_cache_release(page);
666 gfs2_trans_end(sdp);
667 if (pos + len > ip->i_inode.i_size)
668 gfs2_trim_blocks(&ip->i_inode);
669 goto out_trans_fail;
671 out_endtrans:
672 gfs2_trans_end(sdp);
673 out_trans_fail:
674 if (alloc_required) {
675 gfs2_inplace_release(ip);
676 out_qunlock:
677 gfs2_quota_unlock(ip);
679 out_unlock:
680 if (&ip->i_inode == sdp->sd_rindex) {
681 gfs2_glock_dq(&m_ip->i_gh);
682 gfs2_holder_uninit(&m_ip->i_gh);
684 gfs2_glock_dq(&ip->i_gh);
685 out_uninit:
686 gfs2_holder_uninit(&ip->i_gh);
687 return error;
691 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
692 * @inode: the rindex inode
694 static void adjust_fs_space(struct inode *inode)
696 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
697 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
698 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
699 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
700 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
701 struct buffer_head *m_bh, *l_bh;
702 u64 fs_total, new_free;
704 /* Total up the file system space, according to the latest rindex. */
705 fs_total = gfs2_ri_total(sdp);
706 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
707 return;
709 spin_lock(&sdp->sd_statfs_spin);
710 gfs2_statfs_change_in(m_sc, m_bh->b_data +
711 sizeof(struct gfs2_dinode));
712 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
713 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
714 else
715 new_free = 0;
716 spin_unlock(&sdp->sd_statfs_spin);
717 fs_warn(sdp, "File system extended by %llu blocks.\n",
718 (unsigned long long)new_free);
719 gfs2_statfs_change(sdp, new_free, new_free, 0);
721 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
722 goto out;
723 update_statfs(sdp, m_bh, l_bh);
724 brelse(l_bh);
725 out:
726 brelse(m_bh);
730 * gfs2_stuffed_write_end - Write end for stuffed files
731 * @inode: The inode
732 * @dibh: The buffer_head containing the on-disk inode
733 * @pos: The file position
734 * @len: The length of the write
735 * @copied: How much was actually copied by the VFS
736 * @page: The page
738 * This copies the data from the page into the inode block after
739 * the inode data structure itself.
741 * Returns: errno
743 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
744 loff_t pos, unsigned len, unsigned copied,
745 struct page *page)
747 struct gfs2_inode *ip = GFS2_I(inode);
748 struct gfs2_sbd *sdp = GFS2_SB(inode);
749 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
750 u64 to = pos + copied;
751 void *kaddr;
752 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
754 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
755 kaddr = kmap_atomic(page);
756 memcpy(buf + pos, kaddr + pos, copied);
757 memset(kaddr + pos + copied, 0, len - copied);
758 flush_dcache_page(page);
759 kunmap_atomic(kaddr);
761 if (!PageUptodate(page))
762 SetPageUptodate(page);
763 unlock_page(page);
764 page_cache_release(page);
766 if (copied) {
767 if (inode->i_size < to)
768 i_size_write(inode, to);
769 mark_inode_dirty(inode);
772 if (inode == sdp->sd_rindex) {
773 adjust_fs_space(inode);
774 sdp->sd_rindex_uptodate = 0;
777 brelse(dibh);
778 gfs2_trans_end(sdp);
779 if (inode == sdp->sd_rindex) {
780 gfs2_glock_dq(&m_ip->i_gh);
781 gfs2_holder_uninit(&m_ip->i_gh);
783 gfs2_glock_dq(&ip->i_gh);
784 gfs2_holder_uninit(&ip->i_gh);
785 return copied;
789 * gfs2_write_end
790 * @file: The file to write to
791 * @mapping: The address space to write to
792 * @pos: The file position
793 * @len: The length of the data
794 * @copied:
795 * @page: The page that has been written
796 * @fsdata: The fsdata (unused in GFS2)
798 * The main write_end function for GFS2. We have a separate one for
799 * stuffed files as they are slightly different, otherwise we just
800 * put our locking around the VFS provided functions.
802 * Returns: errno
805 static int gfs2_write_end(struct file *file, struct address_space *mapping,
806 loff_t pos, unsigned len, unsigned copied,
807 struct page *page, void *fsdata)
809 struct inode *inode = page->mapping->host;
810 struct gfs2_inode *ip = GFS2_I(inode);
811 struct gfs2_sbd *sdp = GFS2_SB(inode);
812 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
813 struct buffer_head *dibh;
814 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
815 unsigned int to = from + len;
816 int ret;
817 struct gfs2_trans *tr = current->journal_info;
818 BUG_ON(!tr);
820 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
822 ret = gfs2_meta_inode_buffer(ip, &dibh);
823 if (unlikely(ret)) {
824 unlock_page(page);
825 page_cache_release(page);
826 goto failed;
829 if (gfs2_is_stuffed(ip))
830 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
832 if (!gfs2_is_writeback(ip))
833 gfs2_page_add_databufs(ip, page, from, to);
835 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
836 if (tr->tr_num_buf_new)
837 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
838 else
839 gfs2_trans_add_meta(ip->i_gl, dibh);
842 if (inode == sdp->sd_rindex) {
843 adjust_fs_space(inode);
844 sdp->sd_rindex_uptodate = 0;
847 brelse(dibh);
848 failed:
849 gfs2_trans_end(sdp);
850 gfs2_inplace_release(ip);
851 if (ip->i_res->rs_qa_qd_num)
852 gfs2_quota_unlock(ip);
853 if (inode == sdp->sd_rindex) {
854 gfs2_glock_dq(&m_ip->i_gh);
855 gfs2_holder_uninit(&m_ip->i_gh);
857 gfs2_glock_dq(&ip->i_gh);
858 gfs2_holder_uninit(&ip->i_gh);
859 return ret;
863 * gfs2_set_page_dirty - Page dirtying function
864 * @page: The page to dirty
866 * Returns: 1 if it dirtyed the page, or 0 otherwise
869 static int gfs2_set_page_dirty(struct page *page)
871 SetPageChecked(page);
872 return __set_page_dirty_buffers(page);
876 * gfs2_bmap - Block map function
877 * @mapping: Address space info
878 * @lblock: The block to map
880 * Returns: The disk address for the block or 0 on hole or error
883 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
885 struct gfs2_inode *ip = GFS2_I(mapping->host);
886 struct gfs2_holder i_gh;
887 sector_t dblock = 0;
888 int error;
890 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
891 if (error)
892 return 0;
894 if (!gfs2_is_stuffed(ip))
895 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
897 gfs2_glock_dq_uninit(&i_gh);
899 return dblock;
902 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
904 struct gfs2_bufdata *bd;
906 lock_buffer(bh);
907 gfs2_log_lock(sdp);
908 clear_buffer_dirty(bh);
909 bd = bh->b_private;
910 if (bd) {
911 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
912 list_del_init(&bd->bd_list);
913 else
914 gfs2_remove_from_journal(bh, current->journal_info, 0);
916 bh->b_bdev = NULL;
917 clear_buffer_mapped(bh);
918 clear_buffer_req(bh);
919 clear_buffer_new(bh);
920 gfs2_log_unlock(sdp);
921 unlock_buffer(bh);
924 static void gfs2_invalidatepage(struct page *page, unsigned int offset,
925 unsigned int length)
927 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
928 unsigned int stop = offset + length;
929 int partial_page = (offset || length < PAGE_CACHE_SIZE);
930 struct buffer_head *bh, *head;
931 unsigned long pos = 0;
933 BUG_ON(!PageLocked(page));
934 if (!partial_page)
935 ClearPageChecked(page);
936 if (!page_has_buffers(page))
937 goto out;
939 bh = head = page_buffers(page);
940 do {
941 if (pos + bh->b_size > stop)
942 return;
944 if (offset <= pos)
945 gfs2_discard(sdp, bh);
946 pos += bh->b_size;
947 bh = bh->b_this_page;
948 } while (bh != head);
949 out:
950 if (!partial_page)
951 try_to_release_page(page, 0);
955 * gfs2_ok_for_dio - check that dio is valid on this file
956 * @ip: The inode
957 * @rw: READ or WRITE
958 * @offset: The offset at which we are reading or writing
960 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
961 * 1 (to accept the i/o request)
963 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
966 * Should we return an error here? I can't see that O_DIRECT for
967 * a stuffed file makes any sense. For now we'll silently fall
968 * back to buffered I/O
970 if (gfs2_is_stuffed(ip))
971 return 0;
973 if (offset >= i_size_read(&ip->i_inode))
974 return 0;
975 return 1;
980 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
981 const struct iovec *iov, loff_t offset,
982 unsigned long nr_segs)
984 struct file *file = iocb->ki_filp;
985 struct inode *inode = file->f_mapping->host;
986 struct address_space *mapping = inode->i_mapping;
987 struct gfs2_inode *ip = GFS2_I(inode);
988 struct gfs2_holder gh;
989 int rv;
992 * Deferred lock, even if its a write, since we do no allocation
993 * on this path. All we need change is atime, and this lock mode
994 * ensures that other nodes have flushed their buffered read caches
995 * (i.e. their page cache entries for this inode). We do not,
996 * unfortunately have the option of only flushing a range like
997 * the VFS does.
999 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1000 rv = gfs2_glock_nq(&gh);
1001 if (rv)
1002 return rv;
1003 rv = gfs2_ok_for_dio(ip, rw, offset);
1004 if (rv != 1)
1005 goto out; /* dio not valid, fall back to buffered i/o */
1008 * Now since we are holding a deferred (CW) lock at this point, you
1009 * might be wondering why this is ever needed. There is a case however
1010 * where we've granted a deferred local lock against a cached exclusive
1011 * glock. That is ok provided all granted local locks are deferred, but
1012 * it also means that it is possible to encounter pages which are
1013 * cached and possibly also mapped. So here we check for that and sort
1014 * them out ahead of the dio. The glock state machine will take care of
1015 * everything else.
1017 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
1018 * the first place, mapping->nr_pages will always be zero.
1020 if (mapping->nrpages) {
1021 loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
1022 loff_t len = iov_length(iov, nr_segs);
1023 loff_t end = PAGE_ALIGN(offset + len) - 1;
1025 rv = 0;
1026 if (len == 0)
1027 goto out;
1028 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
1029 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
1030 rv = filemap_write_and_wait_range(mapping, lstart, end);
1031 if (rv)
1032 return rv;
1033 truncate_inode_pages_range(mapping, lstart, end);
1036 rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1037 offset, nr_segs, gfs2_get_block_direct,
1038 NULL, NULL, 0);
1039 out:
1040 gfs2_glock_dq(&gh);
1041 gfs2_holder_uninit(&gh);
1042 return rv;
1046 * gfs2_releasepage - free the metadata associated with a page
1047 * @page: the page that's being released
1048 * @gfp_mask: passed from Linux VFS, ignored by us
1050 * Call try_to_free_buffers() if the buffers in this page can be
1051 * released.
1053 * Returns: 0
1056 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1058 struct address_space *mapping = page->mapping;
1059 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
1060 struct buffer_head *bh, *head;
1061 struct gfs2_bufdata *bd;
1063 if (!page_has_buffers(page))
1064 return 0;
1066 gfs2_log_lock(sdp);
1067 spin_lock(&sdp->sd_ail_lock);
1068 head = bh = page_buffers(page);
1069 do {
1070 if (atomic_read(&bh->b_count))
1071 goto cannot_release;
1072 bd = bh->b_private;
1073 if (bd && bd->bd_tr)
1074 goto cannot_release;
1075 if (buffer_pinned(bh) || buffer_dirty(bh))
1076 goto not_possible;
1077 bh = bh->b_this_page;
1078 } while(bh != head);
1079 spin_unlock(&sdp->sd_ail_lock);
1080 gfs2_log_unlock(sdp);
1082 head = bh = page_buffers(page);
1083 do {
1084 gfs2_log_lock(sdp);
1085 bd = bh->b_private;
1086 if (bd) {
1087 gfs2_assert_warn(sdp, bd->bd_bh == bh);
1088 if (!list_empty(&bd->bd_list)) {
1089 if (!buffer_pinned(bh))
1090 list_del_init(&bd->bd_list);
1091 else
1092 bd = NULL;
1094 if (bd)
1095 bd->bd_bh = NULL;
1096 bh->b_private = NULL;
1098 gfs2_log_unlock(sdp);
1099 if (bd)
1100 kmem_cache_free(gfs2_bufdata_cachep, bd);
1102 bh = bh->b_this_page;
1103 } while (bh != head);
1105 return try_to_free_buffers(page);
1107 not_possible: /* Should never happen */
1108 WARN_ON(buffer_dirty(bh));
1109 WARN_ON(buffer_pinned(bh));
1110 cannot_release:
1111 spin_unlock(&sdp->sd_ail_lock);
1112 gfs2_log_unlock(sdp);
1113 return 0;
1116 static const struct address_space_operations gfs2_writeback_aops = {
1117 .writepage = gfs2_writepage,
1118 .writepages = gfs2_writepages,
1119 .readpage = gfs2_readpage,
1120 .readpages = gfs2_readpages,
1121 .write_begin = gfs2_write_begin,
1122 .write_end = gfs2_write_end,
1123 .bmap = gfs2_bmap,
1124 .invalidatepage = gfs2_invalidatepage,
1125 .releasepage = gfs2_releasepage,
1126 .direct_IO = gfs2_direct_IO,
1127 .migratepage = buffer_migrate_page,
1128 .is_partially_uptodate = block_is_partially_uptodate,
1129 .error_remove_page = generic_error_remove_page,
1132 static const struct address_space_operations gfs2_ordered_aops = {
1133 .writepage = gfs2_writepage,
1134 .writepages = gfs2_writepages,
1135 .readpage = gfs2_readpage,
1136 .readpages = gfs2_readpages,
1137 .write_begin = gfs2_write_begin,
1138 .write_end = gfs2_write_end,
1139 .set_page_dirty = gfs2_set_page_dirty,
1140 .bmap = gfs2_bmap,
1141 .invalidatepage = gfs2_invalidatepage,
1142 .releasepage = gfs2_releasepage,
1143 .direct_IO = gfs2_direct_IO,
1144 .migratepage = buffer_migrate_page,
1145 .is_partially_uptodate = block_is_partially_uptodate,
1146 .error_remove_page = generic_error_remove_page,
1149 static const struct address_space_operations gfs2_jdata_aops = {
1150 .writepage = gfs2_jdata_writepage,
1151 .writepages = gfs2_jdata_writepages,
1152 .readpage = gfs2_readpage,
1153 .readpages = gfs2_readpages,
1154 .write_begin = gfs2_write_begin,
1155 .write_end = gfs2_write_end,
1156 .set_page_dirty = gfs2_set_page_dirty,
1157 .bmap = gfs2_bmap,
1158 .invalidatepage = gfs2_invalidatepage,
1159 .releasepage = gfs2_releasepage,
1160 .is_partially_uptodate = block_is_partially_uptodate,
1161 .error_remove_page = generic_error_remove_page,
1164 void gfs2_set_aops(struct inode *inode)
1166 struct gfs2_inode *ip = GFS2_I(inode);
1168 if (gfs2_is_writeback(ip))
1169 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1170 else if (gfs2_is_ordered(ip))
1171 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1172 else if (gfs2_is_jdata(ip))
1173 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1174 else
1175 BUG();