m68knommu: use IRQF_DISABLE in m68328serial.c
[wrt350n-kernel.git] / fs / gfs2 / ops_address.c
blob38dbe99a30ede1ac388596278ef02ca9e9590bae
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
18 #include <linux/fs.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/lm_interface.h>
23 #include <linux/backing-dev.h>
24 #include <linux/pagevec.h>
26 #include "gfs2.h"
27 #include "incore.h"
28 #include "bmap.h"
29 #include "glock.h"
30 #include "inode.h"
31 #include "log.h"
32 #include "meta_io.h"
33 #include "ops_address.h"
34 #include "quota.h"
35 #include "trans.h"
36 #include "rgrp.h"
37 #include "super.h"
38 #include "util.h"
39 #include "glops.h"
42 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
43 unsigned int from, unsigned int to)
45 struct buffer_head *head = page_buffers(page);
46 unsigned int bsize = head->b_size;
47 struct buffer_head *bh;
48 unsigned int start, end;
50 for (bh = head, start = 0; bh != head || !start;
51 bh = bh->b_this_page, start = end) {
52 end = start + bsize;
53 if (end <= from || start >= to)
54 continue;
55 if (gfs2_is_jdata(ip))
56 set_buffer_uptodate(bh);
57 gfs2_trans_add_bh(ip->i_gl, bh, 0);
61 /**
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
68 * Returns: errno
71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
74 int error;
76 error = gfs2_block_map(inode, lblock, bh_result, 0);
77 if (error)
78 return error;
79 if (!buffer_mapped(bh_result))
80 return -EIO;
81 return 0;
84 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
85 struct buffer_head *bh_result, int create)
87 return gfs2_block_map(inode, lblock, bh_result, 0);
90 /**
91 * gfs2_writepage_common - Common bits of writepage
92 * @page: The page to be written
93 * @wbc: The writeback control
95 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
98 static int gfs2_writepage_common(struct page *page,
99 struct writeback_control *wbc)
101 struct inode *inode = page->mapping->host;
102 struct gfs2_inode *ip = GFS2_I(inode);
103 struct gfs2_sbd *sdp = GFS2_SB(inode);
104 loff_t i_size = i_size_read(inode);
105 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
106 unsigned offset;
107 int ret = -EIO;
109 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
110 goto out;
111 ret = 0;
112 if (current->journal_info)
113 goto redirty;
114 /* Is the page fully outside i_size? (truncate in progress) */
115 offset = i_size & (PAGE_CACHE_SIZE-1);
116 if (page->index > end_index || (page->index == end_index && !offset)) {
117 page->mapping->a_ops->invalidatepage(page, 0);
118 goto out;
120 return 1;
121 redirty:
122 redirty_page_for_writepage(wbc, page);
123 out:
124 unlock_page(page);
125 return 0;
129 * gfs2_writeback_writepage - Write page for writeback mappings
130 * @page: The page
131 * @wbc: The writeback control
135 static int gfs2_writeback_writepage(struct page *page,
136 struct writeback_control *wbc)
138 int ret;
140 ret = gfs2_writepage_common(page, wbc);
141 if (ret <= 0)
142 return ret;
144 ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
145 if (ret == -EAGAIN)
146 ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
147 return ret;
151 * gfs2_ordered_writepage - Write page for ordered data files
152 * @page: The page to write
153 * @wbc: The writeback control
157 static int gfs2_ordered_writepage(struct page *page,
158 struct writeback_control *wbc)
160 struct inode *inode = page->mapping->host;
161 struct gfs2_inode *ip = GFS2_I(inode);
162 int ret;
164 ret = gfs2_writepage_common(page, wbc);
165 if (ret <= 0)
166 return ret;
168 if (!page_has_buffers(page)) {
169 create_empty_buffers(page, inode->i_sb->s_blocksize,
170 (1 << BH_Dirty)|(1 << BH_Uptodate));
172 gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
173 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
177 * __gfs2_jdata_writepage - The core of jdata writepage
178 * @page: The page to write
179 * @wbc: The writeback control
181 * This is shared between writepage and writepages and implements the
182 * core of the writepage operation. If a transaction is required then
183 * PageChecked will have been set and the transaction will have
184 * already been started before this is called.
187 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
189 struct inode *inode = page->mapping->host;
190 struct gfs2_inode *ip = GFS2_I(inode);
191 struct gfs2_sbd *sdp = GFS2_SB(inode);
193 if (PageChecked(page)) {
194 ClearPageChecked(page);
195 if (!page_has_buffers(page)) {
196 create_empty_buffers(page, inode->i_sb->s_blocksize,
197 (1 << BH_Dirty)|(1 << BH_Uptodate));
199 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
201 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
205 * gfs2_jdata_writepage - Write complete page
206 * @page: Page to write
208 * Returns: errno
212 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
214 struct inode *inode = page->mapping->host;
215 struct gfs2_sbd *sdp = GFS2_SB(inode);
216 int error;
217 int done_trans = 0;
219 error = gfs2_writepage_common(page, wbc);
220 if (error <= 0)
221 return error;
223 if (PageChecked(page)) {
224 if (wbc->sync_mode != WB_SYNC_ALL)
225 goto out_ignore;
226 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
227 if (error)
228 goto out_ignore;
229 done_trans = 1;
231 error = __gfs2_jdata_writepage(page, wbc);
232 if (done_trans)
233 gfs2_trans_end(sdp);
234 return error;
236 out_ignore:
237 redirty_page_for_writepage(wbc, page);
238 unlock_page(page);
239 return 0;
243 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
244 * @mapping: The mapping to write
245 * @wbc: Write-back control
247 * For the data=writeback case we can already ignore buffer heads
248 * and write whole extents at once. This is a big reduction in the
249 * number of I/O requests we send and the bmap calls we make in this case.
251 static int gfs2_writeback_writepages(struct address_space *mapping,
252 struct writeback_control *wbc)
254 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
258 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
259 * @mapping: The mapping
260 * @wbc: The writeback control
261 * @writepage: The writepage function to call for each page
262 * @pvec: The vector of pages
263 * @nr_pages: The number of pages to write
265 * Returns: non-zero if loop should terminate, zero otherwise
268 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
269 struct writeback_control *wbc,
270 struct pagevec *pvec,
271 int nr_pages, pgoff_t end)
273 struct inode *inode = mapping->host;
274 struct gfs2_sbd *sdp = GFS2_SB(inode);
275 loff_t i_size = i_size_read(inode);
276 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
277 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
278 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
279 struct backing_dev_info *bdi = mapping->backing_dev_info;
280 int i;
281 int ret;
283 ret = gfs2_trans_begin(sdp, nrblocks, 0);
284 if (ret < 0)
285 return ret;
287 for(i = 0; i < nr_pages; i++) {
288 struct page *page = pvec->pages[i];
290 lock_page(page);
292 if (unlikely(page->mapping != mapping)) {
293 unlock_page(page);
294 continue;
297 if (!wbc->range_cyclic && page->index > end) {
298 ret = 1;
299 unlock_page(page);
300 continue;
303 if (wbc->sync_mode != WB_SYNC_NONE)
304 wait_on_page_writeback(page);
306 if (PageWriteback(page) ||
307 !clear_page_dirty_for_io(page)) {
308 unlock_page(page);
309 continue;
312 /* Is the page fully outside i_size? (truncate in progress) */
313 if (page->index > end_index || (page->index == end_index && !offset)) {
314 page->mapping->a_ops->invalidatepage(page, 0);
315 unlock_page(page);
316 continue;
319 ret = __gfs2_jdata_writepage(page, wbc);
321 if (ret || (--(wbc->nr_to_write) <= 0))
322 ret = 1;
323 if (wbc->nonblocking && bdi_write_congested(bdi)) {
324 wbc->encountered_congestion = 1;
325 ret = 1;
329 gfs2_trans_end(sdp);
330 return ret;
334 * gfs2_write_cache_jdata - Like write_cache_pages but different
335 * @mapping: The mapping to write
336 * @wbc: The writeback control
337 * @writepage: The writepage function to call
338 * @data: The data to pass to writepage
340 * The reason that we use our own function here is that we need to
341 * start transactions before we grab page locks. This allows us
342 * to get the ordering right.
345 static int gfs2_write_cache_jdata(struct address_space *mapping,
346 struct writeback_control *wbc)
348 struct backing_dev_info *bdi = mapping->backing_dev_info;
349 int ret = 0;
350 int done = 0;
351 struct pagevec pvec;
352 int nr_pages;
353 pgoff_t index;
354 pgoff_t end;
355 int scanned = 0;
356 int range_whole = 0;
358 if (wbc->nonblocking && bdi_write_congested(bdi)) {
359 wbc->encountered_congestion = 1;
360 return 0;
363 pagevec_init(&pvec, 0);
364 if (wbc->range_cyclic) {
365 index = mapping->writeback_index; /* Start from prev offset */
366 end = -1;
367 } else {
368 index = wbc->range_start >> PAGE_CACHE_SHIFT;
369 end = wbc->range_end >> PAGE_CACHE_SHIFT;
370 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
371 range_whole = 1;
372 scanned = 1;
375 retry:
376 while (!done && (index <= end) &&
377 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
378 PAGECACHE_TAG_DIRTY,
379 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
380 scanned = 1;
381 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
382 if (ret)
383 done = 1;
384 if (ret > 0)
385 ret = 0;
387 pagevec_release(&pvec);
388 cond_resched();
391 if (!scanned && !done) {
393 * We hit the last page and there is more work to be done: wrap
394 * back to the start of the file
396 scanned = 1;
397 index = 0;
398 goto retry;
401 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
402 mapping->writeback_index = index;
403 return ret;
408 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
409 * @mapping: The mapping to write
410 * @wbc: The writeback control
414 static int gfs2_jdata_writepages(struct address_space *mapping,
415 struct writeback_control *wbc)
417 struct gfs2_inode *ip = GFS2_I(mapping->host);
418 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
419 int ret;
421 ret = gfs2_write_cache_jdata(mapping, wbc);
422 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
423 gfs2_log_flush(sdp, ip->i_gl);
424 ret = gfs2_write_cache_jdata(mapping, wbc);
426 return ret;
430 * stuffed_readpage - Fill in a Linux page with stuffed file data
431 * @ip: the inode
432 * @page: the page
434 * Returns: errno
437 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
439 struct buffer_head *dibh;
440 void *kaddr;
441 int error;
444 * Due to the order of unstuffing files and ->nopage(), we can be
445 * asked for a zero page in the case of a stuffed file being extended,
446 * so we need to supply one here. It doesn't happen often.
448 if (unlikely(page->index)) {
449 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
450 return 0;
453 error = gfs2_meta_inode_buffer(ip, &dibh);
454 if (error)
455 return error;
457 kaddr = kmap_atomic(page, KM_USER0);
458 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
459 ip->i_di.di_size);
460 memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
461 kunmap_atomic(kaddr, KM_USER0);
462 flush_dcache_page(page);
463 brelse(dibh);
464 SetPageUptodate(page);
466 return 0;
471 * __gfs2_readpage - readpage
472 * @file: The file to read a page for
473 * @page: The page to read
475 * This is the core of gfs2's readpage. Its used by the internal file
476 * reading code as in that case we already hold the glock. Also its
477 * called by gfs2_readpage() once the required lock has been granted.
481 static int __gfs2_readpage(void *file, struct page *page)
483 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
484 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
485 int error;
487 if (gfs2_is_stuffed(ip)) {
488 error = stuffed_readpage(ip, page);
489 unlock_page(page);
490 } else {
491 error = mpage_readpage(page, gfs2_block_map);
494 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
495 return -EIO;
497 return error;
501 * gfs2_readpage - read a page of a file
502 * @file: The file to read
503 * @page: The page of the file
505 * This deals with the locking required. We use a trylock in order to
506 * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE
507 * in the event that we are unable to get the lock.
510 static int gfs2_readpage(struct file *file, struct page *page)
512 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
513 struct gfs2_holder gh;
514 int error;
516 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
517 error = gfs2_glock_nq_atime(&gh);
518 if (unlikely(error)) {
519 unlock_page(page);
520 goto out;
522 error = __gfs2_readpage(file, page);
523 gfs2_glock_dq(&gh);
524 out:
525 gfs2_holder_uninit(&gh);
526 if (error == GLR_TRYFAILED) {
527 yield();
528 return AOP_TRUNCATED_PAGE;
530 return error;
534 * gfs2_internal_read - read an internal file
535 * @ip: The gfs2 inode
536 * @ra_state: The readahead state (or NULL for no readahead)
537 * @buf: The buffer to fill
538 * @pos: The file position
539 * @size: The amount to read
543 int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
544 char *buf, loff_t *pos, unsigned size)
546 struct address_space *mapping = ip->i_inode.i_mapping;
547 unsigned long index = *pos / PAGE_CACHE_SIZE;
548 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
549 unsigned copied = 0;
550 unsigned amt;
551 struct page *page;
552 void *p;
554 do {
555 amt = size - copied;
556 if (offset + size > PAGE_CACHE_SIZE)
557 amt = PAGE_CACHE_SIZE - offset;
558 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
559 if (IS_ERR(page))
560 return PTR_ERR(page);
561 p = kmap_atomic(page, KM_USER0);
562 memcpy(buf + copied, p + offset, amt);
563 kunmap_atomic(p, KM_USER0);
564 mark_page_accessed(page);
565 page_cache_release(page);
566 copied += amt;
567 index++;
568 offset = 0;
569 } while(copied < size);
570 (*pos) += size;
571 return size;
575 * gfs2_readpages - Read a bunch of pages at once
577 * Some notes:
578 * 1. This is only for readahead, so we can simply ignore any things
579 * which are slightly inconvenient (such as locking conflicts between
580 * the page lock and the glock) and return having done no I/O. Its
581 * obviously not something we'd want to do on too regular a basis.
582 * Any I/O we ignore at this time will be done via readpage later.
583 * 2. We don't handle stuffed files here we let readpage do the honours.
584 * 3. mpage_readpages() does most of the heavy lifting in the common case.
585 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
588 static int gfs2_readpages(struct file *file, struct address_space *mapping,
589 struct list_head *pages, unsigned nr_pages)
591 struct inode *inode = mapping->host;
592 struct gfs2_inode *ip = GFS2_I(inode);
593 struct gfs2_sbd *sdp = GFS2_SB(inode);
594 struct gfs2_holder gh;
595 int ret;
597 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
598 ret = gfs2_glock_nq_atime(&gh);
599 if (unlikely(ret))
600 goto out_uninit;
601 if (!gfs2_is_stuffed(ip))
602 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
603 gfs2_glock_dq(&gh);
604 out_uninit:
605 gfs2_holder_uninit(&gh);
606 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
607 ret = -EIO;
608 return ret;
612 * gfs2_write_begin - Begin to write to a file
613 * @file: The file to write to
614 * @mapping: The mapping in which to write
615 * @pos: The file offset at which to start writing
616 * @len: Length of the write
617 * @flags: Various flags
618 * @pagep: Pointer to return the page
619 * @fsdata: Pointer to return fs data (unused by GFS2)
621 * Returns: errno
624 static int gfs2_write_begin(struct file *file, struct address_space *mapping,
625 loff_t pos, unsigned len, unsigned flags,
626 struct page **pagep, void **fsdata)
628 struct gfs2_inode *ip = GFS2_I(mapping->host);
629 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
630 unsigned int data_blocks, ind_blocks, rblocks;
631 int alloc_required;
632 int error = 0;
633 struct gfs2_alloc *al;
634 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
635 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
636 unsigned to = from + len;
637 struct page *page;
639 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh);
640 error = gfs2_glock_nq_atime(&ip->i_gh);
641 if (unlikely(error))
642 goto out_uninit;
644 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
645 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
646 if (error)
647 goto out_unlock;
649 if (alloc_required) {
650 al = gfs2_alloc_get(ip);
652 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
653 if (error)
654 goto out_alloc_put;
656 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
657 if (error)
658 goto out_qunlock;
660 al->al_requested = data_blocks + ind_blocks;
661 error = gfs2_inplace_reserve(ip);
662 if (error)
663 goto out_qunlock;
666 rblocks = RES_DINODE + ind_blocks;
667 if (gfs2_is_jdata(ip))
668 rblocks += data_blocks ? data_blocks : 1;
669 if (ind_blocks || data_blocks)
670 rblocks += RES_STATFS + RES_QUOTA;
672 error = gfs2_trans_begin(sdp, rblocks,
673 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
674 if (error)
675 goto out_trans_fail;
677 error = -ENOMEM;
678 page = __grab_cache_page(mapping, index);
679 *pagep = page;
680 if (unlikely(!page))
681 goto out_endtrans;
683 if (gfs2_is_stuffed(ip)) {
684 error = 0;
685 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
686 error = gfs2_unstuff_dinode(ip, page);
687 if (error == 0)
688 goto prepare_write;
689 } else if (!PageUptodate(page)) {
690 error = stuffed_readpage(ip, page);
692 goto out;
695 prepare_write:
696 error = block_prepare_write(page, from, to, gfs2_block_map);
697 out:
698 if (error == 0)
699 return 0;
701 page_cache_release(page);
702 if (pos + len > ip->i_inode.i_size)
703 vmtruncate(&ip->i_inode, ip->i_inode.i_size);
704 out_endtrans:
705 gfs2_trans_end(sdp);
706 out_trans_fail:
707 if (alloc_required) {
708 gfs2_inplace_release(ip);
709 out_qunlock:
710 gfs2_quota_unlock(ip);
711 out_alloc_put:
712 gfs2_alloc_put(ip);
714 out_unlock:
715 gfs2_glock_dq(&ip->i_gh);
716 out_uninit:
717 gfs2_holder_uninit(&ip->i_gh);
718 return error;
722 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
723 * @inode: the rindex inode
725 static void adjust_fs_space(struct inode *inode)
727 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
728 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
729 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
730 u64 fs_total, new_free;
732 /* Total up the file system space, according to the latest rindex. */
733 fs_total = gfs2_ri_total(sdp);
735 spin_lock(&sdp->sd_statfs_spin);
736 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
737 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
738 else
739 new_free = 0;
740 spin_unlock(&sdp->sd_statfs_spin);
741 fs_warn(sdp, "File system extended by %llu blocks.\n",
742 (unsigned long long)new_free);
743 gfs2_statfs_change(sdp, new_free, new_free, 0);
747 * gfs2_stuffed_write_end - Write end for stuffed files
748 * @inode: The inode
749 * @dibh: The buffer_head containing the on-disk inode
750 * @pos: The file position
751 * @len: The length of the write
752 * @copied: How much was actually copied by the VFS
753 * @page: The page
755 * This copies the data from the page into the inode block after
756 * the inode data structure itself.
758 * Returns: errno
760 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
761 loff_t pos, unsigned len, unsigned copied,
762 struct page *page)
764 struct gfs2_inode *ip = GFS2_I(inode);
765 struct gfs2_sbd *sdp = GFS2_SB(inode);
766 u64 to = pos + copied;
767 void *kaddr;
768 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
769 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
771 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
772 kaddr = kmap_atomic(page, KM_USER0);
773 memcpy(buf + pos, kaddr + pos, copied);
774 memset(kaddr + pos + copied, 0, len - copied);
775 flush_dcache_page(page);
776 kunmap_atomic(kaddr, KM_USER0);
778 if (!PageUptodate(page))
779 SetPageUptodate(page);
780 unlock_page(page);
781 page_cache_release(page);
783 if (inode->i_size < to) {
784 i_size_write(inode, to);
785 ip->i_di.di_size = inode->i_size;
786 di->di_size = cpu_to_be64(inode->i_size);
787 mark_inode_dirty(inode);
790 if (inode == sdp->sd_rindex)
791 adjust_fs_space(inode);
793 brelse(dibh);
794 gfs2_trans_end(sdp);
795 gfs2_glock_dq(&ip->i_gh);
796 gfs2_holder_uninit(&ip->i_gh);
797 return copied;
801 * gfs2_write_end
802 * @file: The file to write to
803 * @mapping: The address space to write to
804 * @pos: The file position
805 * @len: The length of the data
806 * @copied:
807 * @page: The page that has been written
808 * @fsdata: The fsdata (unused in GFS2)
810 * The main write_end function for GFS2. We have a separate one for
811 * stuffed files as they are slightly different, otherwise we just
812 * put our locking around the VFS provided functions.
814 * Returns: errno
817 static int gfs2_write_end(struct file *file, struct address_space *mapping,
818 loff_t pos, unsigned len, unsigned copied,
819 struct page *page, void *fsdata)
821 struct inode *inode = page->mapping->host;
822 struct gfs2_inode *ip = GFS2_I(inode);
823 struct gfs2_sbd *sdp = GFS2_SB(inode);
824 struct buffer_head *dibh;
825 struct gfs2_alloc *al = ip->i_alloc;
826 struct gfs2_dinode *di;
827 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
828 unsigned int to = from + len;
829 int ret;
831 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == 0);
833 ret = gfs2_meta_inode_buffer(ip, &dibh);
834 if (unlikely(ret)) {
835 unlock_page(page);
836 page_cache_release(page);
837 goto failed;
840 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
842 if (gfs2_is_stuffed(ip))
843 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
845 if (!gfs2_is_writeback(ip))
846 gfs2_page_add_databufs(ip, page, from, to);
848 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
850 if (likely(ret >= 0) && (inode->i_size > ip->i_di.di_size)) {
851 di = (struct gfs2_dinode *)dibh->b_data;
852 ip->i_di.di_size = inode->i_size;
853 di->di_size = cpu_to_be64(inode->i_size);
854 mark_inode_dirty(inode);
857 if (inode == sdp->sd_rindex)
858 adjust_fs_space(inode);
860 brelse(dibh);
861 gfs2_trans_end(sdp);
862 failed:
863 if (al) {
864 gfs2_inplace_release(ip);
865 gfs2_quota_unlock(ip);
866 gfs2_alloc_put(ip);
868 gfs2_glock_dq(&ip->i_gh);
869 gfs2_holder_uninit(&ip->i_gh);
870 return ret;
874 * gfs2_set_page_dirty - Page dirtying function
875 * @page: The page to dirty
877 * Returns: 1 if it dirtyed the page, or 0 otherwise
880 static int gfs2_set_page_dirty(struct page *page)
882 SetPageChecked(page);
883 return __set_page_dirty_buffers(page);
887 * gfs2_bmap - Block map function
888 * @mapping: Address space info
889 * @lblock: The block to map
891 * Returns: The disk address for the block or 0 on hole or error
894 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
896 struct gfs2_inode *ip = GFS2_I(mapping->host);
897 struct gfs2_holder i_gh;
898 sector_t dblock = 0;
899 int error;
901 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
902 if (error)
903 return 0;
905 if (!gfs2_is_stuffed(ip))
906 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
908 gfs2_glock_dq_uninit(&i_gh);
910 return dblock;
913 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
915 struct gfs2_bufdata *bd;
917 lock_buffer(bh);
918 gfs2_log_lock(sdp);
919 clear_buffer_dirty(bh);
920 bd = bh->b_private;
921 if (bd) {
922 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
923 list_del_init(&bd->bd_le.le_list);
924 else
925 gfs2_remove_from_journal(bh, current->journal_info, 0);
927 bh->b_bdev = NULL;
928 clear_buffer_mapped(bh);
929 clear_buffer_req(bh);
930 clear_buffer_new(bh);
931 gfs2_log_unlock(sdp);
932 unlock_buffer(bh);
935 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
937 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
938 struct buffer_head *bh, *head;
939 unsigned long pos = 0;
941 BUG_ON(!PageLocked(page));
942 if (offset == 0)
943 ClearPageChecked(page);
944 if (!page_has_buffers(page))
945 goto out;
947 bh = head = page_buffers(page);
948 do {
949 if (offset <= pos)
950 gfs2_discard(sdp, bh);
951 pos += bh->b_size;
952 bh = bh->b_this_page;
953 } while (bh != head);
954 out:
955 if (offset == 0)
956 try_to_release_page(page, 0);
960 * gfs2_ok_for_dio - check that dio is valid on this file
961 * @ip: The inode
962 * @rw: READ or WRITE
963 * @offset: The offset at which we are reading or writing
965 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
966 * 1 (to accept the i/o request)
968 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
971 * Should we return an error here? I can't see that O_DIRECT for
972 * a stuffed file makes any sense. For now we'll silently fall
973 * back to buffered I/O
975 if (gfs2_is_stuffed(ip))
976 return 0;
978 if (offset > i_size_read(&ip->i_inode))
979 return 0;
980 return 1;
985 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
986 const struct iovec *iov, loff_t offset,
987 unsigned long nr_segs)
989 struct file *file = iocb->ki_filp;
990 struct inode *inode = file->f_mapping->host;
991 struct gfs2_inode *ip = GFS2_I(inode);
992 struct gfs2_holder gh;
993 int rv;
996 * Deferred lock, even if its a write, since we do no allocation
997 * on this path. All we need change is atime, and this lock mode
998 * ensures that other nodes have flushed their buffered read caches
999 * (i.e. their page cache entries for this inode). We do not,
1000 * unfortunately have the option of only flushing a range like
1001 * the VFS does.
1003 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
1004 rv = gfs2_glock_nq_atime(&gh);
1005 if (rv)
1006 return rv;
1007 rv = gfs2_ok_for_dio(ip, rw, offset);
1008 if (rv != 1)
1009 goto out; /* dio not valid, fall back to buffered i/o */
1011 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
1012 iov, offset, nr_segs,
1013 gfs2_get_block_direct, NULL);
1014 out:
1015 gfs2_glock_dq_m(1, &gh);
1016 gfs2_holder_uninit(&gh);
1017 return rv;
1021 * gfs2_releasepage - free the metadata associated with a page
1022 * @page: the page that's being released
1023 * @gfp_mask: passed from Linux VFS, ignored by us
1025 * Call try_to_free_buffers() if the buffers in this page can be
1026 * released.
1028 * Returns: 0
1031 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1033 struct inode *aspace = page->mapping->host;
1034 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
1035 struct buffer_head *bh, *head;
1036 struct gfs2_bufdata *bd;
1038 if (!page_has_buffers(page))
1039 return 0;
1041 gfs2_log_lock(sdp);
1042 head = bh = page_buffers(page);
1043 do {
1044 if (atomic_read(&bh->b_count))
1045 goto cannot_release;
1046 bd = bh->b_private;
1047 if (bd && bd->bd_ail)
1048 goto cannot_release;
1049 gfs2_assert_warn(sdp, !buffer_pinned(bh));
1050 gfs2_assert_warn(sdp, !buffer_dirty(bh));
1051 bh = bh->b_this_page;
1052 } while(bh != head);
1053 gfs2_log_unlock(sdp);
1055 head = bh = page_buffers(page);
1056 do {
1057 gfs2_log_lock(sdp);
1058 bd = bh->b_private;
1059 if (bd) {
1060 gfs2_assert_warn(sdp, bd->bd_bh == bh);
1061 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
1062 if (!list_empty(&bd->bd_le.le_list)) {
1063 if (!buffer_pinned(bh))
1064 list_del_init(&bd->bd_le.le_list);
1065 else
1066 bd = NULL;
1068 if (bd)
1069 bd->bd_bh = NULL;
1070 bh->b_private = NULL;
1072 gfs2_log_unlock(sdp);
1073 if (bd)
1074 kmem_cache_free(gfs2_bufdata_cachep, bd);
1076 bh = bh->b_this_page;
1077 } while (bh != head);
1079 return try_to_free_buffers(page);
1080 cannot_release:
1081 gfs2_log_unlock(sdp);
1082 return 0;
1085 static const struct address_space_operations gfs2_writeback_aops = {
1086 .writepage = gfs2_writeback_writepage,
1087 .writepages = gfs2_writeback_writepages,
1088 .readpage = gfs2_readpage,
1089 .readpages = gfs2_readpages,
1090 .sync_page = block_sync_page,
1091 .write_begin = gfs2_write_begin,
1092 .write_end = gfs2_write_end,
1093 .bmap = gfs2_bmap,
1094 .invalidatepage = gfs2_invalidatepage,
1095 .releasepage = gfs2_releasepage,
1096 .direct_IO = gfs2_direct_IO,
1097 .migratepage = buffer_migrate_page,
1100 static const struct address_space_operations gfs2_ordered_aops = {
1101 .writepage = gfs2_ordered_writepage,
1102 .readpage = gfs2_readpage,
1103 .readpages = gfs2_readpages,
1104 .sync_page = block_sync_page,
1105 .write_begin = gfs2_write_begin,
1106 .write_end = gfs2_write_end,
1107 .set_page_dirty = gfs2_set_page_dirty,
1108 .bmap = gfs2_bmap,
1109 .invalidatepage = gfs2_invalidatepage,
1110 .releasepage = gfs2_releasepage,
1111 .direct_IO = gfs2_direct_IO,
1112 .migratepage = buffer_migrate_page,
1115 static const struct address_space_operations gfs2_jdata_aops = {
1116 .writepage = gfs2_jdata_writepage,
1117 .writepages = gfs2_jdata_writepages,
1118 .readpage = gfs2_readpage,
1119 .readpages = gfs2_readpages,
1120 .sync_page = block_sync_page,
1121 .write_begin = gfs2_write_begin,
1122 .write_end = gfs2_write_end,
1123 .set_page_dirty = gfs2_set_page_dirty,
1124 .bmap = gfs2_bmap,
1125 .invalidatepage = gfs2_invalidatepage,
1126 .releasepage = gfs2_releasepage,
1129 void gfs2_set_aops(struct inode *inode)
1131 struct gfs2_inode *ip = GFS2_I(inode);
1133 if (gfs2_is_writeback(ip))
1134 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1135 else if (gfs2_is_ordered(ip))
1136 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1137 else if (gfs2_is_jdata(ip))
1138 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1139 else
1140 BUG();