eCryptfs: Remove mmap from directory operations
[linux/fpc-iii.git] / fs / xfs / linux-2.6 / xfs_aops.c
blob66abe36c1213e60ae49e483c82193c72a0d57758
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_bit.h"
20 #include "xfs_log.h"
21 #include "xfs_inum.h"
22 #include "xfs_sb.h"
23 #include "xfs_ag.h"
24 #include "xfs_dir2.h"
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dir2_sf.h"
32 #include "xfs_attr_sf.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_alloc.h"
36 #include "xfs_btree.h"
37 #include "xfs_error.h"
38 #include "xfs_rw.h"
39 #include "xfs_iomap.h"
40 #include "xfs_vnodeops.h"
41 #include "xfs_trace.h"
42 #include <linux/mpage.h>
43 #include <linux/pagevec.h>
44 #include <linux/writeback.h>
48 * Prime number of hash buckets since address is used as the key.
50 #define NVSYNC 37
51 #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
52 static wait_queue_head_t xfs_ioend_wq[NVSYNC];
54 void __init
55 xfs_ioend_init(void)
57 int i;
59 for (i = 0; i < NVSYNC; i++)
60 init_waitqueue_head(&xfs_ioend_wq[i]);
63 void
64 xfs_ioend_wait(
65 xfs_inode_t *ip)
67 wait_queue_head_t *wq = to_ioend_wq(ip);
69 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
72 STATIC void
73 xfs_ioend_wake(
74 xfs_inode_t *ip)
76 if (atomic_dec_and_test(&ip->i_iocount))
77 wake_up(to_ioend_wq(ip));
80 void
81 xfs_count_page_state(
82 struct page *page,
83 int *delalloc,
84 int *unmapped,
85 int *unwritten)
87 struct buffer_head *bh, *head;
89 *delalloc = *unmapped = *unwritten = 0;
91 bh = head = page_buffers(page);
92 do {
93 if (buffer_uptodate(bh) && !buffer_mapped(bh))
94 (*unmapped) = 1;
95 else if (buffer_unwritten(bh))
96 (*unwritten) = 1;
97 else if (buffer_delay(bh))
98 (*delalloc) = 1;
99 } while ((bh = bh->b_this_page) != head);
102 STATIC struct block_device *
103 xfs_find_bdev_for_inode(
104 struct xfs_inode *ip)
106 struct xfs_mount *mp = ip->i_mount;
108 if (XFS_IS_REALTIME_INODE(ip))
109 return mp->m_rtdev_targp->bt_bdev;
110 else
111 return mp->m_ddev_targp->bt_bdev;
115 * We're now finished for good with this ioend structure.
116 * Update the page state via the associated buffer_heads,
117 * release holds on the inode and bio, and finally free
118 * up memory. Do not use the ioend after this.
120 STATIC void
121 xfs_destroy_ioend(
122 xfs_ioend_t *ioend)
124 struct buffer_head *bh, *next;
125 struct xfs_inode *ip = XFS_I(ioend->io_inode);
127 for (bh = ioend->io_buffer_head; bh; bh = next) {
128 next = bh->b_private;
129 bh->b_end_io(bh, !ioend->io_error);
133 * Volume managers supporting multiple paths can send back ENODEV
134 * when the final path disappears. In this case continuing to fill
135 * the page cache with dirty data which cannot be written out is
136 * evil, so prevent that.
138 if (unlikely(ioend->io_error == -ENODEV)) {
139 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
140 __FILE__, __LINE__);
143 xfs_ioend_wake(ip);
144 mempool_free(ioend, xfs_ioend_pool);
148 * If the end of the current ioend is beyond the current EOF,
149 * return the new EOF value, otherwise zero.
151 STATIC xfs_fsize_t
152 xfs_ioend_new_eof(
153 xfs_ioend_t *ioend)
155 xfs_inode_t *ip = XFS_I(ioend->io_inode);
156 xfs_fsize_t isize;
157 xfs_fsize_t bsize;
159 bsize = ioend->io_offset + ioend->io_size;
160 isize = MAX(ip->i_size, ip->i_new_size);
161 isize = MIN(isize, bsize);
162 return isize > ip->i_d.di_size ? isize : 0;
166 * Update on-disk file size now that data has been written to disk.
167 * The current in-memory file size is i_size. If a write is beyond
168 * eof i_new_size will be the intended file size until i_size is
169 * updated. If this write does not extend all the way to the valid
170 * file size then restrict this update to the end of the write.
173 STATIC void
174 xfs_setfilesize(
175 xfs_ioend_t *ioend)
177 xfs_inode_t *ip = XFS_I(ioend->io_inode);
178 xfs_fsize_t isize;
180 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
181 ASSERT(ioend->io_type != IOMAP_READ);
183 if (unlikely(ioend->io_error))
184 return;
186 xfs_ilock(ip, XFS_ILOCK_EXCL);
187 isize = xfs_ioend_new_eof(ioend);
188 if (isize) {
189 ip->i_d.di_size = isize;
190 xfs_mark_inode_dirty_sync(ip);
193 xfs_iunlock(ip, XFS_ILOCK_EXCL);
197 * IO write completion.
199 STATIC void
200 xfs_end_io(
201 struct work_struct *work)
203 xfs_ioend_t *ioend =
204 container_of(work, xfs_ioend_t, io_work);
205 struct xfs_inode *ip = XFS_I(ioend->io_inode);
208 * For unwritten extents we need to issue transactions to convert a
209 * range to normal written extens after the data I/O has finished.
211 if (ioend->io_type == IOMAP_UNWRITTEN &&
212 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
213 int error;
215 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
216 ioend->io_size);
217 if (error)
218 ioend->io_error = error;
222 * We might have to update the on-disk file size after extending
223 * writes.
225 if (ioend->io_type != IOMAP_READ)
226 xfs_setfilesize(ioend);
227 xfs_destroy_ioend(ioend);
231 * Schedule IO completion handling on a xfsdatad if this was
232 * the final hold on this ioend. If we are asked to wait,
233 * flush the workqueue.
235 STATIC void
236 xfs_finish_ioend(
237 xfs_ioend_t *ioend,
238 int wait)
240 if (atomic_dec_and_test(&ioend->io_remaining)) {
241 struct workqueue_struct *wq;
243 wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
244 xfsconvertd_workqueue : xfsdatad_workqueue;
245 queue_work(wq, &ioend->io_work);
246 if (wait)
247 flush_workqueue(wq);
252 * Allocate and initialise an IO completion structure.
253 * We need to track unwritten extent write completion here initially.
254 * We'll need to extend this for updating the ondisk inode size later
255 * (vs. incore size).
257 STATIC xfs_ioend_t *
258 xfs_alloc_ioend(
259 struct inode *inode,
260 unsigned int type)
262 xfs_ioend_t *ioend;
264 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
267 * Set the count to 1 initially, which will prevent an I/O
268 * completion callback from happening before we have started
269 * all the I/O from calling the completion routine too early.
271 atomic_set(&ioend->io_remaining, 1);
272 ioend->io_error = 0;
273 ioend->io_list = NULL;
274 ioend->io_type = type;
275 ioend->io_inode = inode;
276 ioend->io_buffer_head = NULL;
277 ioend->io_buffer_tail = NULL;
278 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
279 ioend->io_offset = 0;
280 ioend->io_size = 0;
282 INIT_WORK(&ioend->io_work, xfs_end_io);
283 return ioend;
286 STATIC int
287 xfs_map_blocks(
288 struct inode *inode,
289 loff_t offset,
290 ssize_t count,
291 xfs_iomap_t *mapp,
292 int flags)
294 int nmaps = 1;
296 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
299 STATIC int
300 xfs_iomap_valid(
301 xfs_iomap_t *iomapp,
302 loff_t offset)
304 return offset >= iomapp->iomap_offset &&
305 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
309 * BIO completion handler for buffered IO.
311 STATIC void
312 xfs_end_bio(
313 struct bio *bio,
314 int error)
316 xfs_ioend_t *ioend = bio->bi_private;
318 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
319 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
321 /* Toss bio and pass work off to an xfsdatad thread */
322 bio->bi_private = NULL;
323 bio->bi_end_io = NULL;
324 bio_put(bio);
326 xfs_finish_ioend(ioend, 0);
329 STATIC void
330 xfs_submit_ioend_bio(
331 struct writeback_control *wbc,
332 xfs_ioend_t *ioend,
333 struct bio *bio)
335 atomic_inc(&ioend->io_remaining);
336 bio->bi_private = ioend;
337 bio->bi_end_io = xfs_end_bio;
340 * If the I/O is beyond EOF we mark the inode dirty immediately
341 * but don't update the inode size until I/O completion.
343 if (xfs_ioend_new_eof(ioend))
344 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
346 submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
347 WRITE_SYNC_PLUG : WRITE, bio);
348 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
349 bio_put(bio);
352 STATIC struct bio *
353 xfs_alloc_ioend_bio(
354 struct buffer_head *bh)
356 struct bio *bio;
357 int nvecs = bio_get_nr_vecs(bh->b_bdev);
359 do {
360 bio = bio_alloc(GFP_NOIO, nvecs);
361 nvecs >>= 1;
362 } while (!bio);
364 ASSERT(bio->bi_private == NULL);
365 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
366 bio->bi_bdev = bh->b_bdev;
367 bio_get(bio);
368 return bio;
371 STATIC void
372 xfs_start_buffer_writeback(
373 struct buffer_head *bh)
375 ASSERT(buffer_mapped(bh));
376 ASSERT(buffer_locked(bh));
377 ASSERT(!buffer_delay(bh));
378 ASSERT(!buffer_unwritten(bh));
380 mark_buffer_async_write(bh);
381 set_buffer_uptodate(bh);
382 clear_buffer_dirty(bh);
385 STATIC void
386 xfs_start_page_writeback(
387 struct page *page,
388 int clear_dirty,
389 int buffers)
391 ASSERT(PageLocked(page));
392 ASSERT(!PageWriteback(page));
393 if (clear_dirty)
394 clear_page_dirty_for_io(page);
395 set_page_writeback(page);
396 unlock_page(page);
397 /* If no buffers on the page are to be written, finish it here */
398 if (!buffers)
399 end_page_writeback(page);
402 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
404 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
408 * Submit all of the bios for all of the ioends we have saved up, covering the
409 * initial writepage page and also any probed pages.
411 * Because we may have multiple ioends spanning a page, we need to start
412 * writeback on all the buffers before we submit them for I/O. If we mark the
413 * buffers as we got, then we can end up with a page that only has buffers
414 * marked async write and I/O complete on can occur before we mark the other
415 * buffers async write.
417 * The end result of this is that we trip a bug in end_page_writeback() because
418 * we call it twice for the one page as the code in end_buffer_async_write()
419 * assumes that all buffers on the page are started at the same time.
421 * The fix is two passes across the ioend list - one to start writeback on the
422 * buffer_heads, and then submit them for I/O on the second pass.
424 STATIC void
425 xfs_submit_ioend(
426 struct writeback_control *wbc,
427 xfs_ioend_t *ioend)
429 xfs_ioend_t *head = ioend;
430 xfs_ioend_t *next;
431 struct buffer_head *bh;
432 struct bio *bio;
433 sector_t lastblock = 0;
435 /* Pass 1 - start writeback */
436 do {
437 next = ioend->io_list;
438 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
439 xfs_start_buffer_writeback(bh);
441 } while ((ioend = next) != NULL);
443 /* Pass 2 - submit I/O */
444 ioend = head;
445 do {
446 next = ioend->io_list;
447 bio = NULL;
449 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
451 if (!bio) {
452 retry:
453 bio = xfs_alloc_ioend_bio(bh);
454 } else if (bh->b_blocknr != lastblock + 1) {
455 xfs_submit_ioend_bio(wbc, ioend, bio);
456 goto retry;
459 if (bio_add_buffer(bio, bh) != bh->b_size) {
460 xfs_submit_ioend_bio(wbc, ioend, bio);
461 goto retry;
464 lastblock = bh->b_blocknr;
466 if (bio)
467 xfs_submit_ioend_bio(wbc, ioend, bio);
468 xfs_finish_ioend(ioend, 0);
469 } while ((ioend = next) != NULL);
473 * Cancel submission of all buffer_heads so far in this endio.
474 * Toss the endio too. Only ever called for the initial page
475 * in a writepage request, so only ever one page.
477 STATIC void
478 xfs_cancel_ioend(
479 xfs_ioend_t *ioend)
481 xfs_ioend_t *next;
482 struct buffer_head *bh, *next_bh;
484 do {
485 next = ioend->io_list;
486 bh = ioend->io_buffer_head;
487 do {
488 next_bh = bh->b_private;
489 clear_buffer_async_write(bh);
490 unlock_buffer(bh);
491 } while ((bh = next_bh) != NULL);
493 xfs_ioend_wake(XFS_I(ioend->io_inode));
494 mempool_free(ioend, xfs_ioend_pool);
495 } while ((ioend = next) != NULL);
499 * Test to see if we've been building up a completion structure for
500 * earlier buffers -- if so, we try to append to this ioend if we
501 * can, otherwise we finish off any current ioend and start another.
502 * Return true if we've finished the given ioend.
504 STATIC void
505 xfs_add_to_ioend(
506 struct inode *inode,
507 struct buffer_head *bh,
508 xfs_off_t offset,
509 unsigned int type,
510 xfs_ioend_t **result,
511 int need_ioend)
513 xfs_ioend_t *ioend = *result;
515 if (!ioend || need_ioend || type != ioend->io_type) {
516 xfs_ioend_t *previous = *result;
518 ioend = xfs_alloc_ioend(inode, type);
519 ioend->io_offset = offset;
520 ioend->io_buffer_head = bh;
521 ioend->io_buffer_tail = bh;
522 if (previous)
523 previous->io_list = ioend;
524 *result = ioend;
525 } else {
526 ioend->io_buffer_tail->b_private = bh;
527 ioend->io_buffer_tail = bh;
530 bh->b_private = NULL;
531 ioend->io_size += bh->b_size;
534 STATIC void
535 xfs_map_buffer(
536 struct buffer_head *bh,
537 xfs_iomap_t *mp,
538 xfs_off_t offset,
539 uint block_bits)
541 sector_t bn;
543 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
545 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
546 ((offset - mp->iomap_offset) >> block_bits);
548 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
550 bh->b_blocknr = bn;
551 set_buffer_mapped(bh);
554 STATIC void
555 xfs_map_at_offset(
556 struct buffer_head *bh,
557 loff_t offset,
558 int block_bits,
559 xfs_iomap_t *iomapp)
561 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
562 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
564 lock_buffer(bh);
565 xfs_map_buffer(bh, iomapp, offset, block_bits);
566 bh->b_bdev = iomapp->iomap_target->bt_bdev;
567 set_buffer_mapped(bh);
568 clear_buffer_delay(bh);
569 clear_buffer_unwritten(bh);
573 * Look for a page at index that is suitable for clustering.
575 STATIC unsigned int
576 xfs_probe_page(
577 struct page *page,
578 unsigned int pg_offset,
579 int mapped)
581 int ret = 0;
583 if (PageWriteback(page))
584 return 0;
586 if (page->mapping && PageDirty(page)) {
587 if (page_has_buffers(page)) {
588 struct buffer_head *bh, *head;
590 bh = head = page_buffers(page);
591 do {
592 if (!buffer_uptodate(bh))
593 break;
594 if (mapped != buffer_mapped(bh))
595 break;
596 ret += bh->b_size;
597 if (ret >= pg_offset)
598 break;
599 } while ((bh = bh->b_this_page) != head);
600 } else
601 ret = mapped ? 0 : PAGE_CACHE_SIZE;
604 return ret;
607 STATIC size_t
608 xfs_probe_cluster(
609 struct inode *inode,
610 struct page *startpage,
611 struct buffer_head *bh,
612 struct buffer_head *head,
613 int mapped)
615 struct pagevec pvec;
616 pgoff_t tindex, tlast, tloff;
617 size_t total = 0;
618 int done = 0, i;
620 /* First sum forwards in this page */
621 do {
622 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
623 return total;
624 total += bh->b_size;
625 } while ((bh = bh->b_this_page) != head);
627 /* if we reached the end of the page, sum forwards in following pages */
628 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
629 tindex = startpage->index + 1;
631 /* Prune this back to avoid pathological behavior */
632 tloff = min(tlast, startpage->index + 64);
634 pagevec_init(&pvec, 0);
635 while (!done && tindex <= tloff) {
636 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
638 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
639 break;
641 for (i = 0; i < pagevec_count(&pvec); i++) {
642 struct page *page = pvec.pages[i];
643 size_t pg_offset, pg_len = 0;
645 if (tindex == tlast) {
646 pg_offset =
647 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
648 if (!pg_offset) {
649 done = 1;
650 break;
652 } else
653 pg_offset = PAGE_CACHE_SIZE;
655 if (page->index == tindex && trylock_page(page)) {
656 pg_len = xfs_probe_page(page, pg_offset, mapped);
657 unlock_page(page);
660 if (!pg_len) {
661 done = 1;
662 break;
665 total += pg_len;
666 tindex++;
669 pagevec_release(&pvec);
670 cond_resched();
673 return total;
677 * Test if a given page is suitable for writing as part of an unwritten
678 * or delayed allocate extent.
680 STATIC int
681 xfs_is_delayed_page(
682 struct page *page,
683 unsigned int type)
685 if (PageWriteback(page))
686 return 0;
688 if (page->mapping && page_has_buffers(page)) {
689 struct buffer_head *bh, *head;
690 int acceptable = 0;
692 bh = head = page_buffers(page);
693 do {
694 if (buffer_unwritten(bh))
695 acceptable = (type == IOMAP_UNWRITTEN);
696 else if (buffer_delay(bh))
697 acceptable = (type == IOMAP_DELAY);
698 else if (buffer_dirty(bh) && buffer_mapped(bh))
699 acceptable = (type == IOMAP_NEW);
700 else
701 break;
702 } while ((bh = bh->b_this_page) != head);
704 if (acceptable)
705 return 1;
708 return 0;
712 * Allocate & map buffers for page given the extent map. Write it out.
713 * except for the original page of a writepage, this is called on
714 * delalloc/unwritten pages only, for the original page it is possible
715 * that the page has no mapping at all.
717 STATIC int
718 xfs_convert_page(
719 struct inode *inode,
720 struct page *page,
721 loff_t tindex,
722 xfs_iomap_t *mp,
723 xfs_ioend_t **ioendp,
724 struct writeback_control *wbc,
725 int startio,
726 int all_bh)
728 struct buffer_head *bh, *head;
729 xfs_off_t end_offset;
730 unsigned long p_offset;
731 unsigned int type;
732 int bbits = inode->i_blkbits;
733 int len, page_dirty;
734 int count = 0, done = 0, uptodate = 1;
735 xfs_off_t offset = page_offset(page);
737 if (page->index != tindex)
738 goto fail;
739 if (!trylock_page(page))
740 goto fail;
741 if (PageWriteback(page))
742 goto fail_unlock_page;
743 if (page->mapping != inode->i_mapping)
744 goto fail_unlock_page;
745 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
746 goto fail_unlock_page;
749 * page_dirty is initially a count of buffers on the page before
750 * EOF and is decremented as we move each into a cleanable state.
752 * Derivation:
754 * End offset is the highest offset that this page should represent.
755 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
756 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
757 * hence give us the correct page_dirty count. On any other page,
758 * it will be zero and in that case we need page_dirty to be the
759 * count of buffers on the page.
761 end_offset = min_t(unsigned long long,
762 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
763 i_size_read(inode));
765 len = 1 << inode->i_blkbits;
766 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
767 PAGE_CACHE_SIZE);
768 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
769 page_dirty = p_offset / len;
771 bh = head = page_buffers(page);
772 do {
773 if (offset >= end_offset)
774 break;
775 if (!buffer_uptodate(bh))
776 uptodate = 0;
777 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
778 done = 1;
779 continue;
782 if (buffer_unwritten(bh) || buffer_delay(bh)) {
783 if (buffer_unwritten(bh))
784 type = IOMAP_UNWRITTEN;
785 else
786 type = IOMAP_DELAY;
788 if (!xfs_iomap_valid(mp, offset)) {
789 done = 1;
790 continue;
793 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
794 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
796 xfs_map_at_offset(bh, offset, bbits, mp);
797 if (startio) {
798 xfs_add_to_ioend(inode, bh, offset,
799 type, ioendp, done);
800 } else {
801 set_buffer_dirty(bh);
802 unlock_buffer(bh);
803 mark_buffer_dirty(bh);
805 page_dirty--;
806 count++;
807 } else {
808 type = IOMAP_NEW;
809 if (buffer_mapped(bh) && all_bh && startio) {
810 lock_buffer(bh);
811 xfs_add_to_ioend(inode, bh, offset,
812 type, ioendp, done);
813 count++;
814 page_dirty--;
815 } else {
816 done = 1;
819 } while (offset += len, (bh = bh->b_this_page) != head);
821 if (uptodate && bh == head)
822 SetPageUptodate(page);
824 if (startio) {
825 if (count) {
826 wbc->nr_to_write--;
827 if (wbc->nr_to_write <= 0)
828 done = 1;
830 xfs_start_page_writeback(page, !page_dirty, count);
833 return done;
834 fail_unlock_page:
835 unlock_page(page);
836 fail:
837 return 1;
841 * Convert & write out a cluster of pages in the same extent as defined
842 * by mp and following the start page.
844 STATIC void
845 xfs_cluster_write(
846 struct inode *inode,
847 pgoff_t tindex,
848 xfs_iomap_t *iomapp,
849 xfs_ioend_t **ioendp,
850 struct writeback_control *wbc,
851 int startio,
852 int all_bh,
853 pgoff_t tlast)
855 struct pagevec pvec;
856 int done = 0, i;
858 pagevec_init(&pvec, 0);
859 while (!done && tindex <= tlast) {
860 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
862 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
863 break;
865 for (i = 0; i < pagevec_count(&pvec); i++) {
866 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
867 iomapp, ioendp, wbc, startio, all_bh);
868 if (done)
869 break;
872 pagevec_release(&pvec);
873 cond_resched();
878 * Calling this without startio set means we are being asked to make a dirty
879 * page ready for freeing it's buffers. When called with startio set then
880 * we are coming from writepage.
882 * When called with startio set it is important that we write the WHOLE
883 * page if possible.
884 * The bh->b_state's cannot know if any of the blocks or which block for
885 * that matter are dirty due to mmap writes, and therefore bh uptodate is
886 * only valid if the page itself isn't completely uptodate. Some layers
887 * may clear the page dirty flag prior to calling write page, under the
888 * assumption the entire page will be written out; by not writing out the
889 * whole page the page can be reused before all valid dirty data is
890 * written out. Note: in the case of a page that has been dirty'd by
891 * mapwrite and but partially setup by block_prepare_write the
892 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
893 * valid state, thus the whole page must be written out thing.
896 STATIC int
897 xfs_page_state_convert(
898 struct inode *inode,
899 struct page *page,
900 struct writeback_control *wbc,
901 int startio,
902 int unmapped) /* also implies page uptodate */
904 struct buffer_head *bh, *head;
905 xfs_iomap_t iomap;
906 xfs_ioend_t *ioend = NULL, *iohead = NULL;
907 loff_t offset;
908 unsigned long p_offset = 0;
909 unsigned int type;
910 __uint64_t end_offset;
911 pgoff_t end_index, last_index, tlast;
912 ssize_t size, len;
913 int flags, err, iomap_valid = 0, uptodate = 1;
914 int page_dirty, count = 0;
915 int trylock = 0;
916 int all_bh = unmapped;
918 if (startio) {
919 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
920 trylock |= BMAPI_TRYLOCK;
923 /* Is this page beyond the end of the file? */
924 offset = i_size_read(inode);
925 end_index = offset >> PAGE_CACHE_SHIFT;
926 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
927 if (page->index >= end_index) {
928 if ((page->index >= end_index + 1) ||
929 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
930 if (startio)
931 unlock_page(page);
932 return 0;
937 * page_dirty is initially a count of buffers on the page before
938 * EOF and is decremented as we move each into a cleanable state.
940 * Derivation:
942 * End offset is the highest offset that this page should represent.
943 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
944 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
945 * hence give us the correct page_dirty count. On any other page,
946 * it will be zero and in that case we need page_dirty to be the
947 * count of buffers on the page.
949 end_offset = min_t(unsigned long long,
950 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
951 len = 1 << inode->i_blkbits;
952 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
953 PAGE_CACHE_SIZE);
954 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
955 page_dirty = p_offset / len;
957 bh = head = page_buffers(page);
958 offset = page_offset(page);
959 flags = BMAPI_READ;
960 type = IOMAP_NEW;
962 /* TODO: cleanup count and page_dirty */
964 do {
965 if (offset >= end_offset)
966 break;
967 if (!buffer_uptodate(bh))
968 uptodate = 0;
969 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
971 * the iomap is actually still valid, but the ioend
972 * isn't. shouldn't happen too often.
974 iomap_valid = 0;
975 continue;
978 if (iomap_valid)
979 iomap_valid = xfs_iomap_valid(&iomap, offset);
982 * First case, map an unwritten extent and prepare for
983 * extent state conversion transaction on completion.
985 * Second case, allocate space for a delalloc buffer.
986 * We can return EAGAIN here in the release page case.
988 * Third case, an unmapped buffer was found, and we are
989 * in a path where we need to write the whole page out.
991 if (buffer_unwritten(bh) || buffer_delay(bh) ||
992 ((buffer_uptodate(bh) || PageUptodate(page)) &&
993 !buffer_mapped(bh) && (unmapped || startio))) {
994 int new_ioend = 0;
997 * Make sure we don't use a read-only iomap
999 if (flags == BMAPI_READ)
1000 iomap_valid = 0;
1002 if (buffer_unwritten(bh)) {
1003 type = IOMAP_UNWRITTEN;
1004 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
1005 } else if (buffer_delay(bh)) {
1006 type = IOMAP_DELAY;
1007 flags = BMAPI_ALLOCATE | trylock;
1008 } else {
1009 type = IOMAP_NEW;
1010 flags = BMAPI_WRITE | BMAPI_MMAP;
1013 if (!iomap_valid) {
1015 * if we didn't have a valid mapping then we
1016 * need to ensure that we put the new mapping
1017 * in a new ioend structure. This needs to be
1018 * done to ensure that the ioends correctly
1019 * reflect the block mappings at io completion
1020 * for unwritten extent conversion.
1022 new_ioend = 1;
1023 if (type == IOMAP_NEW) {
1024 size = xfs_probe_cluster(inode,
1025 page, bh, head, 0);
1026 } else {
1027 size = len;
1030 err = xfs_map_blocks(inode, offset, size,
1031 &iomap, flags);
1032 if (err)
1033 goto error;
1034 iomap_valid = xfs_iomap_valid(&iomap, offset);
1036 if (iomap_valid) {
1037 xfs_map_at_offset(bh, offset,
1038 inode->i_blkbits, &iomap);
1039 if (startio) {
1040 xfs_add_to_ioend(inode, bh, offset,
1041 type, &ioend,
1042 new_ioend);
1043 } else {
1044 set_buffer_dirty(bh);
1045 unlock_buffer(bh);
1046 mark_buffer_dirty(bh);
1048 page_dirty--;
1049 count++;
1051 } else if (buffer_uptodate(bh) && startio) {
1053 * we got here because the buffer is already mapped.
1054 * That means it must already have extents allocated
1055 * underneath it. Map the extent by reading it.
1057 if (!iomap_valid || flags != BMAPI_READ) {
1058 flags = BMAPI_READ;
1059 size = xfs_probe_cluster(inode, page, bh,
1060 head, 1);
1061 err = xfs_map_blocks(inode, offset, size,
1062 &iomap, flags);
1063 if (err)
1064 goto error;
1065 iomap_valid = xfs_iomap_valid(&iomap, offset);
1069 * We set the type to IOMAP_NEW in case we are doing a
1070 * small write at EOF that is extending the file but
1071 * without needing an allocation. We need to update the
1072 * file size on I/O completion in this case so it is
1073 * the same case as having just allocated a new extent
1074 * that we are writing into for the first time.
1076 type = IOMAP_NEW;
1077 if (trylock_buffer(bh)) {
1078 ASSERT(buffer_mapped(bh));
1079 if (iomap_valid)
1080 all_bh = 1;
1081 xfs_add_to_ioend(inode, bh, offset, type,
1082 &ioend, !iomap_valid);
1083 page_dirty--;
1084 count++;
1085 } else {
1086 iomap_valid = 0;
1088 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1089 (unmapped || startio)) {
1090 iomap_valid = 0;
1093 if (!iohead)
1094 iohead = ioend;
1096 } while (offset += len, ((bh = bh->b_this_page) != head));
1098 if (uptodate && bh == head)
1099 SetPageUptodate(page);
1101 if (startio)
1102 xfs_start_page_writeback(page, 1, count);
1104 if (ioend && iomap_valid) {
1105 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
1106 PAGE_CACHE_SHIFT;
1107 tlast = min_t(pgoff_t, offset, last_index);
1108 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
1109 wbc, startio, all_bh, tlast);
1112 if (iohead)
1113 xfs_submit_ioend(wbc, iohead);
1115 return page_dirty;
1117 error:
1118 if (iohead)
1119 xfs_cancel_ioend(iohead);
1122 * If it's delalloc and we have nowhere to put it,
1123 * throw it away, unless the lower layers told
1124 * us to try again.
1126 if (err != -EAGAIN) {
1127 if (!unmapped)
1128 block_invalidatepage(page, 0);
1129 ClearPageUptodate(page);
1131 return err;
1135 * writepage: Called from one of two places:
1137 * 1. we are flushing a delalloc buffer head.
1139 * 2. we are writing out a dirty page. Typically the page dirty
1140 * state is cleared before we get here. In this case is it
1141 * conceivable we have no buffer heads.
1143 * For delalloc space on the page we need to allocate space and
1144 * flush it. For unmapped buffer heads on the page we should
1145 * allocate space if the page is uptodate. For any other dirty
1146 * buffer heads on the page we should flush them.
1148 * If we detect that a transaction would be required to flush
1149 * the page, we have to check the process flags first, if we
1150 * are already in a transaction or disk I/O during allocations
1151 * is off, we need to fail the writepage and redirty the page.
1154 STATIC int
1155 xfs_vm_writepage(
1156 struct page *page,
1157 struct writeback_control *wbc)
1159 int error;
1160 int need_trans;
1161 int delalloc, unmapped, unwritten;
1162 struct inode *inode = page->mapping->host;
1164 trace_xfs_writepage(inode, page, 0);
1167 * We need a transaction if:
1168 * 1. There are delalloc buffers on the page
1169 * 2. The page is uptodate and we have unmapped buffers
1170 * 3. The page is uptodate and we have no buffers
1171 * 4. There are unwritten buffers on the page
1174 if (!page_has_buffers(page)) {
1175 unmapped = 1;
1176 need_trans = 1;
1177 } else {
1178 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1179 if (!PageUptodate(page))
1180 unmapped = 0;
1181 need_trans = delalloc + unmapped + unwritten;
1185 * If we need a transaction and the process flags say
1186 * we are already in a transaction, or no IO is allowed
1187 * then mark the page dirty again and leave the page
1188 * as is.
1190 if (current_test_flags(PF_FSTRANS) && need_trans)
1191 goto out_fail;
1194 * Delay hooking up buffer heads until we have
1195 * made our go/no-go decision.
1197 if (!page_has_buffers(page))
1198 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1202 * VM calculation for nr_to_write seems off. Bump it way
1203 * up, this gets simple streaming writes zippy again.
1204 * To be reviewed again after Jens' writeback changes.
1206 wbc->nr_to_write *= 4;
1209 * Convert delayed allocate, unwritten or unmapped space
1210 * to real space and flush out to disk.
1212 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1213 if (error == -EAGAIN)
1214 goto out_fail;
1215 if (unlikely(error < 0))
1216 goto out_unlock;
1218 return 0;
1220 out_fail:
1221 redirty_page_for_writepage(wbc, page);
1222 unlock_page(page);
1223 return 0;
1224 out_unlock:
1225 unlock_page(page);
1226 return error;
1229 STATIC int
1230 xfs_vm_writepages(
1231 struct address_space *mapping,
1232 struct writeback_control *wbc)
1234 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1235 return generic_writepages(mapping, wbc);
1239 * Called to move a page into cleanable state - and from there
1240 * to be released. Possibly the page is already clean. We always
1241 * have buffer heads in this call.
1243 * Returns 0 if the page is ok to release, 1 otherwise.
1245 * Possible scenarios are:
1247 * 1. We are being called to release a page which has been written
1248 * to via regular I/O. buffer heads will be dirty and possibly
1249 * delalloc. If no delalloc buffer heads in this case then we
1250 * can just return zero.
1252 * 2. We are called to release a page which has been written via
1253 * mmap, all we need to do is ensure there is no delalloc
1254 * state in the buffer heads, if not we can let the caller
1255 * free them and we should come back later via writepage.
1257 STATIC int
1258 xfs_vm_releasepage(
1259 struct page *page,
1260 gfp_t gfp_mask)
1262 struct inode *inode = page->mapping->host;
1263 int dirty, delalloc, unmapped, unwritten;
1264 struct writeback_control wbc = {
1265 .sync_mode = WB_SYNC_ALL,
1266 .nr_to_write = 1,
1269 trace_xfs_releasepage(inode, page, 0);
1271 if (!page_has_buffers(page))
1272 return 0;
1274 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1275 if (!delalloc && !unwritten)
1276 goto free_buffers;
1278 if (!(gfp_mask & __GFP_FS))
1279 return 0;
1281 /* If we are already inside a transaction or the thread cannot
1282 * do I/O, we cannot release this page.
1284 if (current_test_flags(PF_FSTRANS))
1285 return 0;
1288 * Convert delalloc space to real space, do not flush the
1289 * data out to disk, that will be done by the caller.
1290 * Never need to allocate space here - we will always
1291 * come back to writepage in that case.
1293 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1294 if (dirty == 0 && !unwritten)
1295 goto free_buffers;
1296 return 0;
1298 free_buffers:
1299 return try_to_free_buffers(page);
1302 STATIC int
1303 __xfs_get_blocks(
1304 struct inode *inode,
1305 sector_t iblock,
1306 struct buffer_head *bh_result,
1307 int create,
1308 int direct,
1309 bmapi_flags_t flags)
1311 xfs_iomap_t iomap;
1312 xfs_off_t offset;
1313 ssize_t size;
1314 int niomap = 1;
1315 int error;
1317 offset = (xfs_off_t)iblock << inode->i_blkbits;
1318 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1319 size = bh_result->b_size;
1321 if (!create && direct && offset >= i_size_read(inode))
1322 return 0;
1324 error = xfs_iomap(XFS_I(inode), offset, size,
1325 create ? flags : BMAPI_READ, &iomap, &niomap);
1326 if (error)
1327 return -error;
1328 if (niomap == 0)
1329 return 0;
1331 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
1333 * For unwritten extents do not report a disk address on
1334 * the read case (treat as if we're reading into a hole).
1336 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1337 xfs_map_buffer(bh_result, &iomap, offset,
1338 inode->i_blkbits);
1340 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1341 if (direct)
1342 bh_result->b_private = inode;
1343 set_buffer_unwritten(bh_result);
1348 * If this is a realtime file, data may be on a different device.
1349 * to that pointed to from the buffer_head b_bdev currently.
1351 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1354 * If we previously allocated a block out beyond eof and we are now
1355 * coming back to use it then we will need to flag it as new even if it
1356 * has a disk address.
1358 * With sub-block writes into unwritten extents we also need to mark
1359 * the buffer as new so that the unwritten parts of the buffer gets
1360 * correctly zeroed.
1362 if (create &&
1363 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1364 (offset >= i_size_read(inode)) ||
1365 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
1366 set_buffer_new(bh_result);
1368 if (iomap.iomap_flags & IOMAP_DELAY) {
1369 BUG_ON(direct);
1370 if (create) {
1371 set_buffer_uptodate(bh_result);
1372 set_buffer_mapped(bh_result);
1373 set_buffer_delay(bh_result);
1377 if (direct || size > (1 << inode->i_blkbits)) {
1378 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1379 offset = min_t(xfs_off_t,
1380 iomap.iomap_bsize - iomap.iomap_delta, size);
1381 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1384 return 0;
1388 xfs_get_blocks(
1389 struct inode *inode,
1390 sector_t iblock,
1391 struct buffer_head *bh_result,
1392 int create)
1394 return __xfs_get_blocks(inode, iblock,
1395 bh_result, create, 0, BMAPI_WRITE);
1398 STATIC int
1399 xfs_get_blocks_direct(
1400 struct inode *inode,
1401 sector_t iblock,
1402 struct buffer_head *bh_result,
1403 int create)
1405 return __xfs_get_blocks(inode, iblock,
1406 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1409 STATIC void
1410 xfs_end_io_direct(
1411 struct kiocb *iocb,
1412 loff_t offset,
1413 ssize_t size,
1414 void *private)
1416 xfs_ioend_t *ioend = iocb->private;
1419 * Non-NULL private data means we need to issue a transaction to
1420 * convert a range from unwritten to written extents. This needs
1421 * to happen from process context but aio+dio I/O completion
1422 * happens from irq context so we need to defer it to a workqueue.
1423 * This is not necessary for synchronous direct I/O, but we do
1424 * it anyway to keep the code uniform and simpler.
1426 * Well, if only it were that simple. Because synchronous direct I/O
1427 * requires extent conversion to occur *before* we return to userspace,
1428 * we have to wait for extent conversion to complete. Look at the
1429 * iocb that has been passed to us to determine if this is AIO or
1430 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1431 * workqueue and wait for it to complete.
1433 * The core direct I/O code might be changed to always call the
1434 * completion handler in the future, in which case all this can
1435 * go away.
1437 ioend->io_offset = offset;
1438 ioend->io_size = size;
1439 if (ioend->io_type == IOMAP_READ) {
1440 xfs_finish_ioend(ioend, 0);
1441 } else if (private && size > 0) {
1442 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
1443 } else {
1445 * A direct I/O write ioend starts it's life in unwritten
1446 * state in case they map an unwritten extent. This write
1447 * didn't map an unwritten extent so switch it's completion
1448 * handler.
1450 ioend->io_type = IOMAP_NEW;
1451 xfs_finish_ioend(ioend, 0);
1455 * blockdev_direct_IO can return an error even after the I/O
1456 * completion handler was called. Thus we need to protect
1457 * against double-freeing.
1459 iocb->private = NULL;
1462 STATIC ssize_t
1463 xfs_vm_direct_IO(
1464 int rw,
1465 struct kiocb *iocb,
1466 const struct iovec *iov,
1467 loff_t offset,
1468 unsigned long nr_segs)
1470 struct file *file = iocb->ki_filp;
1471 struct inode *inode = file->f_mapping->host;
1472 struct block_device *bdev;
1473 ssize_t ret;
1475 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
1477 iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
1478 IOMAP_UNWRITTEN : IOMAP_READ);
1480 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
1481 offset, nr_segs,
1482 xfs_get_blocks_direct,
1483 xfs_end_io_direct);
1485 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1486 xfs_destroy_ioend(iocb->private);
1487 return ret;
1490 STATIC int
1491 xfs_vm_write_begin(
1492 struct file *file,
1493 struct address_space *mapping,
1494 loff_t pos,
1495 unsigned len,
1496 unsigned flags,
1497 struct page **pagep,
1498 void **fsdata)
1500 *pagep = NULL;
1501 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1502 xfs_get_blocks);
1505 STATIC sector_t
1506 xfs_vm_bmap(
1507 struct address_space *mapping,
1508 sector_t block)
1510 struct inode *inode = (struct inode *)mapping->host;
1511 struct xfs_inode *ip = XFS_I(inode);
1513 xfs_itrace_entry(XFS_I(inode));
1514 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1515 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1516 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1517 return generic_block_bmap(mapping, block, xfs_get_blocks);
1520 STATIC int
1521 xfs_vm_readpage(
1522 struct file *unused,
1523 struct page *page)
1525 return mpage_readpage(page, xfs_get_blocks);
1528 STATIC int
1529 xfs_vm_readpages(
1530 struct file *unused,
1531 struct address_space *mapping,
1532 struct list_head *pages,
1533 unsigned nr_pages)
1535 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1538 STATIC void
1539 xfs_vm_invalidatepage(
1540 struct page *page,
1541 unsigned long offset)
1543 trace_xfs_invalidatepage(page->mapping->host, page, offset);
1544 block_invalidatepage(page, offset);
1547 const struct address_space_operations xfs_address_space_operations = {
1548 .readpage = xfs_vm_readpage,
1549 .readpages = xfs_vm_readpages,
1550 .writepage = xfs_vm_writepage,
1551 .writepages = xfs_vm_writepages,
1552 .sync_page = block_sync_page,
1553 .releasepage = xfs_vm_releasepage,
1554 .invalidatepage = xfs_vm_invalidatepage,
1555 .write_begin = xfs_vm_write_begin,
1556 .write_end = generic_write_end,
1557 .bmap = xfs_vm_bmap,
1558 .direct_IO = xfs_vm_direct_IO,
1559 .migratepage = buffer_migrate_page,
1560 .is_partially_uptodate = block_is_partially_uptodate,
1561 .error_remove_page = generic_error_remove_page,