Add linux-next specific files for 20110426
[linux-2.6/next.git] / mm / filemap.c
blobec6fa2d7e200c2213a82897f7cb6d1a8c202d883
1 /*
2 * linux/mm/filemap.c
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
7 /*
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
12 #include <linux/module.h>
13 #include <linux/compiler.h>
14 #include <linux/fs.h>
15 #include <linux/uaccess.h>
16 #include <linux/aio.h>
17 #include <linux/capability.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/mman.h>
23 #include <linux/pagemap.h>
24 #include <linux/file.h>
25 #include <linux/uio.h>
26 #include <linux/hash.h>
27 #include <linux/writeback.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagevec.h>
30 #include <linux/blkdev.h>
31 #include <linux/security.h>
32 #include <linux/syscalls.h>
33 #include <linux/cpuset.h>
34 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
35 #include <linux/memcontrol.h>
36 #include <linux/mm_inline.h> /* for page_is_file_cache() */
37 #include <linux/cleancache.h>
38 #include "internal.h"
41 * FIXME: remove all knowledge of the buffer layer from the core VM
43 #include <linux/buffer_head.h> /* for try_to_free_buffers */
45 #include <asm/mman.h>
48 * Shared mappings implemented 30.11.1994. It's not fully working yet,
49 * though.
51 * Shared mappings now work. 15.8.1995 Bruno.
53 * finished 'unifying' the page and buffer cache and SMP-threaded the
54 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
56 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
60 * Lock ordering:
62 * ->i_mmap_lock (truncate_pagecache)
63 * ->private_lock (__free_pte->__set_page_dirty_buffers)
64 * ->swap_lock (exclusive_swap_page, others)
65 * ->mapping->tree_lock
67 * ->i_mutex
68 * ->i_mmap_lock (truncate->unmap_mapping_range)
70 * ->mmap_sem
71 * ->i_mmap_lock
72 * ->page_table_lock or pte_lock (various, mainly in memory.c)
73 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
75 * ->mmap_sem
76 * ->lock_page (access_process_vm)
78 * ->i_mutex (generic_file_buffered_write)
79 * ->mmap_sem (fault_in_pages_readable->do_page_fault)
81 * ->i_mutex
82 * ->i_alloc_sem (various)
84 * inode_wb_list_lock
85 * sb_lock (fs/fs-writeback.c)
86 * ->mapping->tree_lock (__sync_single_inode)
88 * ->i_mmap_lock
89 * ->anon_vma.lock (vma_adjust)
91 * ->anon_vma.lock
92 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
94 * ->page_table_lock or pte_lock
95 * ->swap_lock (try_to_unmap_one)
96 * ->private_lock (try_to_unmap_one)
97 * ->tree_lock (try_to_unmap_one)
98 * ->zone.lru_lock (follow_page->mark_page_accessed)
99 * ->zone.lru_lock (check_pte_range->isolate_lru_page)
100 * ->private_lock (page_remove_rmap->set_page_dirty)
101 * ->tree_lock (page_remove_rmap->set_page_dirty)
102 * inode_wb_list_lock (page_remove_rmap->set_page_dirty)
103 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
104 * inode_wb_list_lock (zap_pte_range->set_page_dirty)
105 * ->inode->i_lock (zap_pte_range->set_page_dirty)
106 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
108 * (code doesn't rely on that order, so you could switch it around)
109 * ->tasklist_lock (memory_failure, collect_procs_ao)
110 * ->i_mmap_lock
114 * Delete a page from the page cache and free it. Caller has to make
115 * sure the page is locked and that nobody else uses it - or that usage
116 * is safe. The caller must hold the mapping's tree_lock.
118 void __delete_from_page_cache(struct page *page)
120 struct address_space *mapping = page->mapping;
123 * if we're uptodate, flush out into the cleancache, otherwise
124 * invalidate any existing cleancache entries. We can't leave
125 * stale data around in the cleancache once our page is gone
127 if (PageUptodate(page) && PageMappedToDisk(page))
128 cleancache_put_page(page);
129 else
130 cleancache_flush_page(mapping, page);
132 radix_tree_delete(&mapping->page_tree, page->index);
133 page->mapping = NULL;
134 mapping->nrpages--;
135 __dec_zone_page_state(page, NR_FILE_PAGES);
136 if (PageSwapBacked(page))
137 __dec_zone_page_state(page, NR_SHMEM);
138 BUG_ON(page_mapped(page));
141 * Some filesystems seem to re-dirty the page even after
142 * the VM has canceled the dirty bit (eg ext3 journaling).
144 * Fix it up by doing a final dirty accounting check after
145 * having removed the page entirely.
147 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
148 dec_zone_page_state(page, NR_FILE_DIRTY);
149 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
154 * delete_from_page_cache - delete page from page cache
155 * @page: the page which the kernel is trying to remove from page cache
157 * This must be called only on pages that have been verified to be in the page
158 * cache and locked. It will never put the page into the free list, the caller
159 * has a reference on the page.
161 void delete_from_page_cache(struct page *page)
163 struct address_space *mapping = page->mapping;
164 void (*freepage)(struct page *);
166 BUG_ON(!PageLocked(page));
168 freepage = mapping->a_ops->freepage;
169 spin_lock_irq(&mapping->tree_lock);
170 __delete_from_page_cache(page);
171 spin_unlock_irq(&mapping->tree_lock);
172 mem_cgroup_uncharge_cache_page(page);
174 if (freepage)
175 freepage(page);
176 page_cache_release(page);
178 EXPORT_SYMBOL(delete_from_page_cache);
180 static int sleep_on_page(void *word)
182 io_schedule();
183 return 0;
186 static int sleep_on_page_killable(void *word)
188 sleep_on_page(word);
189 return fatal_signal_pending(current) ? -EINTR : 0;
193 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
194 * @mapping: address space structure to write
195 * @start: offset in bytes where the range starts
196 * @end: offset in bytes where the range ends (inclusive)
197 * @sync_mode: enable synchronous operation
199 * Start writeback against all of a mapping's dirty pages that lie
200 * within the byte offsets <start, end> inclusive.
202 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
203 * opposed to a regular memory cleansing writeback. The difference between
204 * these two operations is that if a dirty page/buffer is encountered, it must
205 * be waited upon, and not just skipped over.
207 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
208 loff_t end, int sync_mode)
210 int ret;
211 struct writeback_control wbc = {
212 .sync_mode = sync_mode,
213 .nr_to_write = LONG_MAX,
214 .range_start = start,
215 .range_end = end,
218 if (!mapping_cap_writeback_dirty(mapping))
219 return 0;
221 ret = do_writepages(mapping, &wbc);
222 return ret;
225 static inline int __filemap_fdatawrite(struct address_space *mapping,
226 int sync_mode)
228 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
231 int filemap_fdatawrite(struct address_space *mapping)
233 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
235 EXPORT_SYMBOL(filemap_fdatawrite);
237 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
238 loff_t end)
240 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
242 EXPORT_SYMBOL(filemap_fdatawrite_range);
245 * filemap_flush - mostly a non-blocking flush
246 * @mapping: target address_space
248 * This is a mostly non-blocking flush. Not suitable for data-integrity
249 * purposes - I/O may not be started against all dirty pages.
251 int filemap_flush(struct address_space *mapping)
253 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
255 EXPORT_SYMBOL(filemap_flush);
258 * filemap_fdatawait_range - wait for writeback to complete
259 * @mapping: address space structure to wait for
260 * @start_byte: offset in bytes where the range starts
261 * @end_byte: offset in bytes where the range ends (inclusive)
263 * Walk the list of under-writeback pages of the given address space
264 * in the given range and wait for all of them.
266 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
267 loff_t end_byte)
269 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
270 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
271 struct pagevec pvec;
272 int nr_pages;
273 int ret = 0;
275 if (end_byte < start_byte)
276 return 0;
278 pagevec_init(&pvec, 0);
279 while ((index <= end) &&
280 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
281 PAGECACHE_TAG_WRITEBACK,
282 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
283 unsigned i;
285 for (i = 0; i < nr_pages; i++) {
286 struct page *page = pvec.pages[i];
288 /* until radix tree lookup accepts end_index */
289 if (page->index > end)
290 continue;
292 wait_on_page_writeback(page);
293 if (TestClearPageError(page))
294 ret = -EIO;
296 pagevec_release(&pvec);
297 cond_resched();
300 /* Check for outstanding write errors */
301 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
302 ret = -ENOSPC;
303 if (test_and_clear_bit(AS_EIO, &mapping->flags))
304 ret = -EIO;
306 return ret;
308 EXPORT_SYMBOL(filemap_fdatawait_range);
311 * filemap_fdatawait - wait for all under-writeback pages to complete
312 * @mapping: address space structure to wait for
314 * Walk the list of under-writeback pages of the given address space
315 * and wait for all of them.
317 int filemap_fdatawait(struct address_space *mapping)
319 loff_t i_size = i_size_read(mapping->host);
321 if (i_size == 0)
322 return 0;
324 return filemap_fdatawait_range(mapping, 0, i_size - 1);
326 EXPORT_SYMBOL(filemap_fdatawait);
328 int filemap_write_and_wait(struct address_space *mapping)
330 int err = 0;
332 if (mapping->nrpages) {
333 err = filemap_fdatawrite(mapping);
335 * Even if the above returned error, the pages may be
336 * written partially (e.g. -ENOSPC), so we wait for it.
337 * But the -EIO is special case, it may indicate the worst
338 * thing (e.g. bug) happened, so we avoid waiting for it.
340 if (err != -EIO) {
341 int err2 = filemap_fdatawait(mapping);
342 if (!err)
343 err = err2;
346 return err;
348 EXPORT_SYMBOL(filemap_write_and_wait);
351 * filemap_write_and_wait_range - write out & wait on a file range
352 * @mapping: the address_space for the pages
353 * @lstart: offset in bytes where the range starts
354 * @lend: offset in bytes where the range ends (inclusive)
356 * Write out and wait upon file offsets lstart->lend, inclusive.
358 * Note that `lend' is inclusive (describes the last byte to be written) so
359 * that this function can be used to write to the very end-of-file (end = -1).
361 int filemap_write_and_wait_range(struct address_space *mapping,
362 loff_t lstart, loff_t lend)
364 int err = 0;
366 if (mapping->nrpages) {
367 err = __filemap_fdatawrite_range(mapping, lstart, lend,
368 WB_SYNC_ALL);
369 /* See comment of filemap_write_and_wait() */
370 if (err != -EIO) {
371 int err2 = filemap_fdatawait_range(mapping,
372 lstart, lend);
373 if (!err)
374 err = err2;
377 return err;
379 EXPORT_SYMBOL(filemap_write_and_wait_range);
382 * replace_page_cache_page - replace a pagecache page with a new one
383 * @old: page to be replaced
384 * @new: page to replace with
385 * @gfp_mask: allocation mode
387 * This function replaces a page in the pagecache with a new one. On
388 * success it acquires the pagecache reference for the new page and
389 * drops it for the old page. Both the old and new pages must be
390 * locked. This function does not add the new page to the LRU, the
391 * caller must do that.
393 * The remove + add is atomic. The only way this function can fail is
394 * memory allocation failure.
396 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
398 int error;
399 struct mem_cgroup *memcg = NULL;
401 VM_BUG_ON(!PageLocked(old));
402 VM_BUG_ON(!PageLocked(new));
403 VM_BUG_ON(new->mapping);
406 * This is not page migration, but prepare_migration and
407 * end_migration does enough work for charge replacement.
409 * In the longer term we probably want a specialized function
410 * for moving the charge from old to new in a more efficient
411 * manner.
413 error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
414 if (error)
415 return error;
417 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
418 if (!error) {
419 struct address_space *mapping = old->mapping;
420 void (*freepage)(struct page *);
422 pgoff_t offset = old->index;
423 freepage = mapping->a_ops->freepage;
425 page_cache_get(new);
426 new->mapping = mapping;
427 new->index = offset;
429 spin_lock_irq(&mapping->tree_lock);
430 __delete_from_page_cache(old);
431 error = radix_tree_insert(&mapping->page_tree, offset, new);
432 BUG_ON(error);
433 mapping->nrpages++;
434 __inc_zone_page_state(new, NR_FILE_PAGES);
435 if (PageSwapBacked(new))
436 __inc_zone_page_state(new, NR_SHMEM);
437 spin_unlock_irq(&mapping->tree_lock);
438 radix_tree_preload_end();
439 if (freepage)
440 freepage(old);
441 page_cache_release(old);
442 mem_cgroup_end_migration(memcg, old, new, true);
443 } else {
444 mem_cgroup_end_migration(memcg, old, new, false);
447 return error;
449 EXPORT_SYMBOL_GPL(replace_page_cache_page);
452 * add_to_page_cache_locked - add a locked page to the pagecache
453 * @page: page to add
454 * @mapping: the page's address_space
455 * @offset: page index
456 * @gfp_mask: page allocation mode
458 * This function is used to add a page to the pagecache. It must be locked.
459 * This function does not add the page to the LRU. The caller must do that.
461 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
462 pgoff_t offset, gfp_t gfp_mask)
464 int error;
466 VM_BUG_ON(!PageLocked(page));
468 error = mem_cgroup_cache_charge(page, current->mm,
469 gfp_mask & GFP_RECLAIM_MASK);
470 if (error)
471 goto out;
473 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
474 if (error == 0) {
475 page_cache_get(page);
476 page->mapping = mapping;
477 page->index = offset;
479 spin_lock_irq(&mapping->tree_lock);
480 error = radix_tree_insert(&mapping->page_tree, offset, page);
481 if (likely(!error)) {
482 mapping->nrpages++;
483 __inc_zone_page_state(page, NR_FILE_PAGES);
484 if (PageSwapBacked(page))
485 __inc_zone_page_state(page, NR_SHMEM);
486 spin_unlock_irq(&mapping->tree_lock);
487 } else {
488 page->mapping = NULL;
489 spin_unlock_irq(&mapping->tree_lock);
490 mem_cgroup_uncharge_cache_page(page);
491 page_cache_release(page);
493 radix_tree_preload_end();
494 } else
495 mem_cgroup_uncharge_cache_page(page);
496 out:
497 return error;
499 EXPORT_SYMBOL(add_to_page_cache_locked);
501 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
502 pgoff_t offset, gfp_t gfp_mask)
504 int ret;
507 * Splice_read and readahead add shmem/tmpfs pages into the page cache
508 * before shmem_readpage has a chance to mark them as SwapBacked: they
509 * need to go on the anon lru below, and mem_cgroup_cache_charge
510 * (called in add_to_page_cache) needs to know where they're going too.
512 if (mapping_cap_swap_backed(mapping))
513 SetPageSwapBacked(page);
515 ret = add_to_page_cache(page, mapping, offset, gfp_mask);
516 if (ret == 0) {
517 if (page_is_file_cache(page))
518 lru_cache_add_file(page);
519 else
520 lru_cache_add_anon(page);
522 return ret;
524 EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
526 #ifdef CONFIG_NUMA
527 struct page *__page_cache_alloc(gfp_t gfp)
529 int n;
530 struct page *page;
532 if (cpuset_do_page_mem_spread()) {
533 get_mems_allowed();
534 n = cpuset_mem_spread_node();
535 page = alloc_pages_exact_node(n, gfp, 0);
536 put_mems_allowed();
537 return page;
539 return alloc_pages(gfp, 0);
541 EXPORT_SYMBOL(__page_cache_alloc);
542 #endif
545 * In order to wait for pages to become available there must be
546 * waitqueues associated with pages. By using a hash table of
547 * waitqueues where the bucket discipline is to maintain all
548 * waiters on the same queue and wake all when any of the pages
549 * become available, and for the woken contexts to check to be
550 * sure the appropriate page became available, this saves space
551 * at a cost of "thundering herd" phenomena during rare hash
552 * collisions.
554 static wait_queue_head_t *page_waitqueue(struct page *page)
556 const struct zone *zone = page_zone(page);
558 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
561 static inline void wake_up_page(struct page *page, int bit)
563 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
566 void wait_on_page_bit(struct page *page, int bit_nr)
568 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
570 if (test_bit(bit_nr, &page->flags))
571 __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
572 TASK_UNINTERRUPTIBLE);
574 EXPORT_SYMBOL(wait_on_page_bit);
577 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
578 * @page: Page defining the wait queue of interest
579 * @waiter: Waiter to add to the queue
581 * Add an arbitrary @waiter to the wait queue for the nominated @page.
583 void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
585 wait_queue_head_t *q = page_waitqueue(page);
586 unsigned long flags;
588 spin_lock_irqsave(&q->lock, flags);
589 __add_wait_queue(q, waiter);
590 spin_unlock_irqrestore(&q->lock, flags);
592 EXPORT_SYMBOL_GPL(add_page_wait_queue);
595 * unlock_page - unlock a locked page
596 * @page: the page
598 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
599 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
600 * mechananism between PageLocked pages and PageWriteback pages is shared.
601 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
603 * The mb is necessary to enforce ordering between the clear_bit and the read
604 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
606 void unlock_page(struct page *page)
608 VM_BUG_ON(!PageLocked(page));
609 clear_bit_unlock(PG_locked, &page->flags);
610 smp_mb__after_clear_bit();
611 wake_up_page(page, PG_locked);
613 EXPORT_SYMBOL(unlock_page);
616 * end_page_writeback - end writeback against a page
617 * @page: the page
619 void end_page_writeback(struct page *page)
621 if (TestClearPageReclaim(page))
622 rotate_reclaimable_page(page);
624 if (!test_clear_page_writeback(page))
625 BUG();
627 smp_mb__after_clear_bit();
628 wake_up_page(page, PG_writeback);
630 EXPORT_SYMBOL(end_page_writeback);
633 * __lock_page - get a lock on the page, assuming we need to sleep to get it
634 * @page: the page to lock
636 void __lock_page(struct page *page)
638 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
640 __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
641 TASK_UNINTERRUPTIBLE);
643 EXPORT_SYMBOL(__lock_page);
645 int __lock_page_killable(struct page *page)
647 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
649 return __wait_on_bit_lock(page_waitqueue(page), &wait,
650 sleep_on_page_killable, TASK_KILLABLE);
652 EXPORT_SYMBOL_GPL(__lock_page_killable);
654 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
655 unsigned int flags)
657 if (!(flags & FAULT_FLAG_ALLOW_RETRY)) {
658 __lock_page(page);
659 return 1;
660 } else {
661 if (!(flags & FAULT_FLAG_RETRY_NOWAIT)) {
662 up_read(&mm->mmap_sem);
663 wait_on_page_locked(page);
665 return 0;
670 * find_get_page - find and get a page reference
671 * @mapping: the address_space to search
672 * @offset: the page index
674 * Is there a pagecache struct page at the given (mapping, offset) tuple?
675 * If yes, increment its refcount and return it; if no, return NULL.
677 struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
679 void **pagep;
680 struct page *page;
682 rcu_read_lock();
683 repeat:
684 page = NULL;
685 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
686 if (pagep) {
687 page = radix_tree_deref_slot(pagep);
688 if (unlikely(!page))
689 goto out;
690 if (radix_tree_deref_retry(page))
691 goto repeat;
693 if (!page_cache_get_speculative(page))
694 goto repeat;
697 * Has the page moved?
698 * This is part of the lockless pagecache protocol. See
699 * include/linux/pagemap.h for details.
701 if (unlikely(page != *pagep)) {
702 page_cache_release(page);
703 goto repeat;
706 out:
707 rcu_read_unlock();
709 return page;
711 EXPORT_SYMBOL(find_get_page);
714 * find_lock_page - locate, pin and lock a pagecache page
715 * @mapping: the address_space to search
716 * @offset: the page index
718 * Locates the desired pagecache page, locks it, increments its reference
719 * count and returns its address.
721 * Returns zero if the page was not present. find_lock_page() may sleep.
723 struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
725 struct page *page;
727 repeat:
728 page = find_get_page(mapping, offset);
729 if (page) {
730 lock_page(page);
731 /* Has the page been truncated? */
732 if (unlikely(page->mapping != mapping)) {
733 unlock_page(page);
734 page_cache_release(page);
735 goto repeat;
737 VM_BUG_ON(page->index != offset);
739 return page;
741 EXPORT_SYMBOL(find_lock_page);
744 * find_or_create_page - locate or add a pagecache page
745 * @mapping: the page's address_space
746 * @index: the page's index into the mapping
747 * @gfp_mask: page allocation mode
749 * Locates a page in the pagecache. If the page is not present, a new page
750 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
751 * LRU list. The returned page is locked and has its reference count
752 * incremented.
754 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
755 * allocation!
757 * find_or_create_page() returns the desired page's address, or zero on
758 * memory exhaustion.
760 struct page *find_or_create_page(struct address_space *mapping,
761 pgoff_t index, gfp_t gfp_mask)
763 struct page *page;
764 int err;
765 repeat:
766 page = find_lock_page(mapping, index);
767 if (!page) {
768 page = __page_cache_alloc(gfp_mask);
769 if (!page)
770 return NULL;
772 * We want a regular kernel memory (not highmem or DMA etc)
773 * allocation for the radix tree nodes, but we need to honour
774 * the context-specific requirements the caller has asked for.
775 * GFP_RECLAIM_MASK collects those requirements.
777 err = add_to_page_cache_lru(page, mapping, index,
778 (gfp_mask & GFP_RECLAIM_MASK));
779 if (unlikely(err)) {
780 page_cache_release(page);
781 page = NULL;
782 if (err == -EEXIST)
783 goto repeat;
786 return page;
788 EXPORT_SYMBOL(find_or_create_page);
791 * find_get_pages - gang pagecache lookup
792 * @mapping: The address_space to search
793 * @start: The starting page index
794 * @nr_pages: The maximum number of pages
795 * @pages: Where the resulting pages are placed
797 * find_get_pages() will search for and return a group of up to
798 * @nr_pages pages in the mapping. The pages are placed at @pages.
799 * find_get_pages() takes a reference against the returned pages.
801 * The search returns a group of mapping-contiguous pages with ascending
802 * indexes. There may be holes in the indices due to not-present pages.
804 * find_get_pages() returns the number of pages which were found.
806 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
807 unsigned int nr_pages, struct page **pages)
809 unsigned int i;
810 unsigned int ret;
811 unsigned int nr_found;
813 rcu_read_lock();
814 restart:
815 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
816 (void ***)pages, start, nr_pages);
817 ret = 0;
818 for (i = 0; i < nr_found; i++) {
819 struct page *page;
820 repeat:
821 page = radix_tree_deref_slot((void **)pages[i]);
822 if (unlikely(!page))
823 continue;
826 * This can only trigger when the entry at index 0 moves out
827 * of or back to the root: none yet gotten, safe to restart.
829 if (radix_tree_deref_retry(page)) {
830 WARN_ON(start | i);
831 goto restart;
834 if (!page_cache_get_speculative(page))
835 goto repeat;
837 /* Has the page moved? */
838 if (unlikely(page != *((void **)pages[i]))) {
839 page_cache_release(page);
840 goto repeat;
843 pages[ret] = page;
844 ret++;
848 * If all entries were removed before we could secure them,
849 * try again, because callers stop trying once 0 is returned.
851 if (unlikely(!ret && nr_found))
852 goto restart;
853 rcu_read_unlock();
854 return ret;
858 * find_get_pages_contig - gang contiguous pagecache lookup
859 * @mapping: The address_space to search
860 * @index: The starting page index
861 * @nr_pages: The maximum number of pages
862 * @pages: Where the resulting pages are placed
864 * find_get_pages_contig() works exactly like find_get_pages(), except
865 * that the returned number of pages are guaranteed to be contiguous.
867 * find_get_pages_contig() returns the number of pages which were found.
869 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
870 unsigned int nr_pages, struct page **pages)
872 unsigned int i;
873 unsigned int ret;
874 unsigned int nr_found;
876 rcu_read_lock();
877 restart:
878 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
879 (void ***)pages, index, nr_pages);
880 ret = 0;
881 for (i = 0; i < nr_found; i++) {
882 struct page *page;
883 repeat:
884 page = radix_tree_deref_slot((void **)pages[i]);
885 if (unlikely(!page))
886 continue;
889 * This can only trigger when the entry at index 0 moves out
890 * of or back to the root: none yet gotten, safe to restart.
892 if (radix_tree_deref_retry(page))
893 goto restart;
895 if (!page_cache_get_speculative(page))
896 goto repeat;
898 /* Has the page moved? */
899 if (unlikely(page != *((void **)pages[i]))) {
900 page_cache_release(page);
901 goto repeat;
905 * must check mapping and index after taking the ref.
906 * otherwise we can get both false positives and false
907 * negatives, which is just confusing to the caller.
909 if (page->mapping == NULL || page->index != index) {
910 page_cache_release(page);
911 break;
914 pages[ret] = page;
915 ret++;
916 index++;
918 rcu_read_unlock();
919 return ret;
921 EXPORT_SYMBOL(find_get_pages_contig);
924 * find_get_pages_tag - find and return pages that match @tag
925 * @mapping: the address_space to search
926 * @index: the starting page index
927 * @tag: the tag index
928 * @nr_pages: the maximum number of pages
929 * @pages: where the resulting pages are placed
931 * Like find_get_pages, except we only return pages which are tagged with
932 * @tag. We update @index to index the next page for the traversal.
934 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
935 int tag, unsigned int nr_pages, struct page **pages)
937 unsigned int i;
938 unsigned int ret;
939 unsigned int nr_found;
941 rcu_read_lock();
942 restart:
943 nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree,
944 (void ***)pages, *index, nr_pages, tag);
945 ret = 0;
946 for (i = 0; i < nr_found; i++) {
947 struct page *page;
948 repeat:
949 page = radix_tree_deref_slot((void **)pages[i]);
950 if (unlikely(!page))
951 continue;
954 * This can only trigger when the entry at index 0 moves out
955 * of or back to the root: none yet gotten, safe to restart.
957 if (radix_tree_deref_retry(page))
958 goto restart;
960 if (!page_cache_get_speculative(page))
961 goto repeat;
963 /* Has the page moved? */
964 if (unlikely(page != *((void **)pages[i]))) {
965 page_cache_release(page);
966 goto repeat;
969 pages[ret] = page;
970 ret++;
974 * If all entries were removed before we could secure them,
975 * try again, because callers stop trying once 0 is returned.
977 if (unlikely(!ret && nr_found))
978 goto restart;
979 rcu_read_unlock();
981 if (ret)
982 *index = pages[ret - 1]->index + 1;
984 return ret;
986 EXPORT_SYMBOL(find_get_pages_tag);
989 * grab_cache_page_nowait - returns locked page at given index in given cache
990 * @mapping: target address_space
991 * @index: the page index
993 * Same as grab_cache_page(), but do not wait if the page is unavailable.
994 * This is intended for speculative data generators, where the data can
995 * be regenerated if the page couldn't be grabbed. This routine should
996 * be safe to call while holding the lock for another page.
998 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
999 * and deadlock against the caller's locked page.
1001 struct page *
1002 grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
1004 struct page *page = find_get_page(mapping, index);
1006 if (page) {
1007 if (trylock_page(page))
1008 return page;
1009 page_cache_release(page);
1010 return NULL;
1012 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
1013 if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
1014 page_cache_release(page);
1015 page = NULL;
1017 return page;
1019 EXPORT_SYMBOL(grab_cache_page_nowait);
1022 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1023 * a _large_ part of the i/o request. Imagine the worst scenario:
1025 * ---R__________________________________________B__________
1026 * ^ reading here ^ bad block(assume 4k)
1028 * read(R) => miss => readahead(R...B) => media error => frustrating retries
1029 * => failing the whole request => read(R) => read(R+1) =>
1030 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1031 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1032 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1034 * It is going insane. Fix it by quickly scaling down the readahead size.
1036 static void shrink_readahead_size_eio(struct file *filp,
1037 struct file_ra_state *ra)
1039 ra->ra_pages /= 4;
1043 * do_generic_file_read - generic file read routine
1044 * @filp: the file to read
1045 * @ppos: current file position
1046 * @desc: read_descriptor
1047 * @actor: read method
1049 * This is a generic file read routine, and uses the
1050 * mapping->a_ops->readpage() function for the actual low-level stuff.
1052 * This is really ugly. But the goto's actually try to clarify some
1053 * of the logic when it comes to error handling etc.
1055 static void do_generic_file_read(struct file *filp, loff_t *ppos,
1056 read_descriptor_t *desc, read_actor_t actor)
1058 struct address_space *mapping = filp->f_mapping;
1059 struct inode *inode = mapping->host;
1060 struct file_ra_state *ra = &filp->f_ra;
1061 pgoff_t index;
1062 pgoff_t last_index;
1063 pgoff_t prev_index;
1064 unsigned long offset; /* offset into pagecache page */
1065 unsigned int prev_offset;
1066 int error;
1068 index = *ppos >> PAGE_CACHE_SHIFT;
1069 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1070 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1071 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1072 offset = *ppos & ~PAGE_CACHE_MASK;
1074 for (;;) {
1075 struct page *page;
1076 pgoff_t end_index;
1077 loff_t isize;
1078 unsigned long nr, ret;
1080 cond_resched();
1081 find_page:
1082 page = find_get_page(mapping, index);
1083 if (!page) {
1084 page_cache_sync_readahead(mapping,
1085 ra, filp,
1086 index, last_index - index);
1087 page = find_get_page(mapping, index);
1088 if (unlikely(page == NULL))
1089 goto no_cached_page;
1091 if (PageReadahead(page)) {
1092 page_cache_async_readahead(mapping,
1093 ra, filp, page,
1094 index, last_index - index);
1096 if (!PageUptodate(page)) {
1097 if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1098 !mapping->a_ops->is_partially_uptodate)
1099 goto page_not_up_to_date;
1100 if (!trylock_page(page))
1101 goto page_not_up_to_date;
1102 /* Did it get truncated before we got the lock? */
1103 if (!page->mapping)
1104 goto page_not_up_to_date_locked;
1105 if (!mapping->a_ops->is_partially_uptodate(page,
1106 desc, offset))
1107 goto page_not_up_to_date_locked;
1108 unlock_page(page);
1110 page_ok:
1112 * i_size must be checked after we know the page is Uptodate.
1114 * Checking i_size after the check allows us to calculate
1115 * the correct value for "nr", which means the zero-filled
1116 * part of the page is not copied back to userspace (unless
1117 * another truncate extends the file - this is desired though).
1120 isize = i_size_read(inode);
1121 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1122 if (unlikely(!isize || index > end_index)) {
1123 page_cache_release(page);
1124 goto out;
1127 /* nr is the maximum number of bytes to copy from this page */
1128 nr = PAGE_CACHE_SIZE;
1129 if (index == end_index) {
1130 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1131 if (nr <= offset) {
1132 page_cache_release(page);
1133 goto out;
1136 nr = nr - offset;
1138 /* If users can be writing to this page using arbitrary
1139 * virtual addresses, take care about potential aliasing
1140 * before reading the page on the kernel side.
1142 if (mapping_writably_mapped(mapping))
1143 flush_dcache_page(page);
1146 * When a sequential read accesses a page several times,
1147 * only mark it as accessed the first time.
1149 if (prev_index != index || offset != prev_offset)
1150 mark_page_accessed(page);
1151 prev_index = index;
1154 * Ok, we have the page, and it's up-to-date, so
1155 * now we can copy it to user space...
1157 * The actor routine returns how many bytes were actually used..
1158 * NOTE! This may not be the same as how much of a user buffer
1159 * we filled up (we may be padding etc), so we can only update
1160 * "pos" here (the actor routine has to update the user buffer
1161 * pointers and the remaining count).
1163 ret = actor(desc, page, offset, nr);
1164 offset += ret;
1165 index += offset >> PAGE_CACHE_SHIFT;
1166 offset &= ~PAGE_CACHE_MASK;
1167 prev_offset = offset;
1169 page_cache_release(page);
1170 if (ret == nr && desc->count)
1171 continue;
1172 goto out;
1174 page_not_up_to_date:
1175 /* Get exclusive access to the page ... */
1176 error = lock_page_killable(page);
1177 if (unlikely(error))
1178 goto readpage_error;
1180 page_not_up_to_date_locked:
1181 /* Did it get truncated before we got the lock? */
1182 if (!page->mapping) {
1183 unlock_page(page);
1184 page_cache_release(page);
1185 continue;
1188 /* Did somebody else fill it already? */
1189 if (PageUptodate(page)) {
1190 unlock_page(page);
1191 goto page_ok;
1194 readpage:
1196 * A previous I/O error may have been due to temporary
1197 * failures, eg. multipath errors.
1198 * PG_error will be set again if readpage fails.
1200 ClearPageError(page);
1201 /* Start the actual read. The read will unlock the page. */
1202 error = mapping->a_ops->readpage(filp, page);
1204 if (unlikely(error)) {
1205 if (error == AOP_TRUNCATED_PAGE) {
1206 page_cache_release(page);
1207 goto find_page;
1209 goto readpage_error;
1212 if (!PageUptodate(page)) {
1213 error = lock_page_killable(page);
1214 if (unlikely(error))
1215 goto readpage_error;
1216 if (!PageUptodate(page)) {
1217 if (page->mapping == NULL) {
1219 * invalidate_mapping_pages got it
1221 unlock_page(page);
1222 page_cache_release(page);
1223 goto find_page;
1225 unlock_page(page);
1226 shrink_readahead_size_eio(filp, ra);
1227 error = -EIO;
1228 goto readpage_error;
1230 unlock_page(page);
1233 goto page_ok;
1235 readpage_error:
1236 /* UHHUH! A synchronous read error occurred. Report it */
1237 desc->error = error;
1238 page_cache_release(page);
1239 goto out;
1241 no_cached_page:
1243 * Ok, it wasn't cached, so we need to create a new
1244 * page..
1246 page = page_cache_alloc_cold(mapping);
1247 if (!page) {
1248 desc->error = -ENOMEM;
1249 goto out;
1251 error = add_to_page_cache_lru(page, mapping,
1252 index, GFP_KERNEL);
1253 if (error) {
1254 page_cache_release(page);
1255 if (error == -EEXIST)
1256 goto find_page;
1257 desc->error = error;
1258 goto out;
1260 goto readpage;
1263 out:
1264 ra->prev_pos = prev_index;
1265 ra->prev_pos <<= PAGE_CACHE_SHIFT;
1266 ra->prev_pos |= prev_offset;
1268 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1269 file_accessed(filp);
1272 int file_read_actor(read_descriptor_t *desc, struct page *page,
1273 unsigned long offset, unsigned long size)
1275 char *kaddr;
1276 unsigned long left, count = desc->count;
1278 if (size > count)
1279 size = count;
1282 * Faults on the destination of a read are common, so do it before
1283 * taking the kmap.
1285 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1286 kaddr = kmap_atomic(page, KM_USER0);
1287 left = __copy_to_user_inatomic(desc->arg.buf,
1288 kaddr + offset, size);
1289 kunmap_atomic(kaddr, KM_USER0);
1290 if (left == 0)
1291 goto success;
1294 /* Do it the slow way */
1295 kaddr = kmap(page);
1296 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1297 kunmap(page);
1299 if (left) {
1300 size -= left;
1301 desc->error = -EFAULT;
1303 success:
1304 desc->count = count - size;
1305 desc->written += size;
1306 desc->arg.buf += size;
1307 return size;
1311 * Performs necessary checks before doing a write
1312 * @iov: io vector request
1313 * @nr_segs: number of segments in the iovec
1314 * @count: number of bytes to write
1315 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1317 * Adjust number of segments and amount of bytes to write (nr_segs should be
1318 * properly initialized first). Returns appropriate error code that caller
1319 * should return or zero in case that write should be allowed.
1321 int generic_segment_checks(const struct iovec *iov,
1322 unsigned long *nr_segs, size_t *count, int access_flags)
1324 unsigned long seg;
1325 size_t cnt = 0;
1326 for (seg = 0; seg < *nr_segs; seg++) {
1327 const struct iovec *iv = &iov[seg];
1330 * If any segment has a negative length, or the cumulative
1331 * length ever wraps negative then return -EINVAL.
1333 cnt += iv->iov_len;
1334 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1335 return -EINVAL;
1336 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1337 continue;
1338 if (seg == 0)
1339 return -EFAULT;
1340 *nr_segs = seg;
1341 cnt -= iv->iov_len; /* This segment is no good */
1342 break;
1344 *count = cnt;
1345 return 0;
1347 EXPORT_SYMBOL(generic_segment_checks);
1350 * generic_file_aio_read - generic filesystem read routine
1351 * @iocb: kernel I/O control block
1352 * @iov: io vector request
1353 * @nr_segs: number of segments in the iovec
1354 * @pos: current file position
1356 * This is the "read()" routine for all filesystems
1357 * that can use the page cache directly.
1359 ssize_t
1360 generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1361 unsigned long nr_segs, loff_t pos)
1363 struct file *filp = iocb->ki_filp;
1364 ssize_t retval;
1365 unsigned long seg = 0;
1366 size_t count;
1367 loff_t *ppos = &iocb->ki_pos;
1368 struct blk_plug plug;
1370 count = 0;
1371 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1372 if (retval)
1373 return retval;
1375 blk_start_plug(&plug);
1377 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1378 if (filp->f_flags & O_DIRECT) {
1379 loff_t size;
1380 struct address_space *mapping;
1381 struct inode *inode;
1383 mapping = filp->f_mapping;
1384 inode = mapping->host;
1385 if (!count)
1386 goto out; /* skip atime */
1387 size = i_size_read(inode);
1388 if (pos < size) {
1389 retval = filemap_write_and_wait_range(mapping, pos,
1390 pos + iov_length(iov, nr_segs) - 1);
1391 if (!retval) {
1392 retval = mapping->a_ops->direct_IO(READ, iocb,
1393 iov, pos, nr_segs);
1395 if (retval > 0) {
1396 *ppos = pos + retval;
1397 count -= retval;
1401 * Btrfs can have a short DIO read if we encounter
1402 * compressed extents, so if there was an error, or if
1403 * we've already read everything we wanted to, or if
1404 * there was a short read because we hit EOF, go ahead
1405 * and return. Otherwise fallthrough to buffered io for
1406 * the rest of the read.
1408 if (retval < 0 || !count || *ppos >= size) {
1409 file_accessed(filp);
1410 goto out;
1415 count = retval;
1416 for (seg = 0; seg < nr_segs; seg++) {
1417 read_descriptor_t desc;
1418 loff_t offset = 0;
1421 * If we did a short DIO read we need to skip the section of the
1422 * iov that we've already read data into.
1424 if (count) {
1425 if (count > iov[seg].iov_len) {
1426 count -= iov[seg].iov_len;
1427 continue;
1429 offset = count;
1430 count = 0;
1433 desc.written = 0;
1434 desc.arg.buf = iov[seg].iov_base + offset;
1435 desc.count = iov[seg].iov_len - offset;
1436 if (desc.count == 0)
1437 continue;
1438 desc.error = 0;
1439 do_generic_file_read(filp, ppos, &desc, file_read_actor);
1440 retval += desc.written;
1441 if (desc.error) {
1442 retval = retval ?: desc.error;
1443 break;
1445 if (desc.count > 0)
1446 break;
1448 out:
1449 blk_finish_plug(&plug);
1450 return retval;
1452 EXPORT_SYMBOL(generic_file_aio_read);
1454 static ssize_t
1455 do_readahead(struct address_space *mapping, struct file *filp,
1456 pgoff_t index, unsigned long nr)
1458 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1459 return -EINVAL;
1461 force_page_cache_readahead(mapping, filp, index, nr);
1462 return 0;
1465 SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
1467 ssize_t ret;
1468 struct file *file;
1470 ret = -EBADF;
1471 file = fget(fd);
1472 if (file) {
1473 if (file->f_mode & FMODE_READ) {
1474 struct address_space *mapping = file->f_mapping;
1475 pgoff_t start = offset >> PAGE_CACHE_SHIFT;
1476 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1477 unsigned long len = end - start + 1;
1478 ret = do_readahead(mapping, file, start, len);
1480 fput(file);
1482 return ret;
1484 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1485 asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
1487 return SYSC_readahead((int) fd, offset, (size_t) count);
1489 SYSCALL_ALIAS(sys_readahead, SyS_readahead);
1490 #endif
1492 #ifdef CONFIG_MMU
1494 * page_cache_read - adds requested page to the page cache if not already there
1495 * @file: file to read
1496 * @offset: page index
1498 * This adds the requested page to the page cache if it isn't already there,
1499 * and schedules an I/O to read in its contents from disk.
1501 static int page_cache_read(struct file *file, pgoff_t offset)
1503 struct address_space *mapping = file->f_mapping;
1504 struct page *page;
1505 int ret;
1507 do {
1508 page = page_cache_alloc_cold(mapping);
1509 if (!page)
1510 return -ENOMEM;
1512 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1513 if (ret == 0)
1514 ret = mapping->a_ops->readpage(file, page);
1515 else if (ret == -EEXIST)
1516 ret = 0; /* losing race to add is OK */
1518 page_cache_release(page);
1520 } while (ret == AOP_TRUNCATED_PAGE);
1522 return ret;
1525 #define MMAP_LOTSAMISS (100)
1528 * Synchronous readahead happens when we don't even find
1529 * a page in the page cache at all.
1531 static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1532 struct file_ra_state *ra,
1533 struct file *file,
1534 pgoff_t offset)
1536 unsigned long ra_pages;
1537 struct address_space *mapping = file->f_mapping;
1539 /* If we don't want any read-ahead, don't bother */
1540 if (VM_RandomReadHint(vma))
1541 return;
1543 if (VM_SequentialReadHint(vma) ||
1544 offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
1545 page_cache_sync_readahead(mapping, ra, file, offset,
1546 ra->ra_pages);
1547 return;
1550 if (ra->mmap_miss < INT_MAX)
1551 ra->mmap_miss++;
1554 * Do we miss much more than hit in this file? If so,
1555 * stop bothering with read-ahead. It will only hurt.
1557 if (ra->mmap_miss > MMAP_LOTSAMISS)
1558 return;
1561 * mmap read-around
1563 ra_pages = max_sane_readahead(ra->ra_pages);
1564 if (ra_pages) {
1565 ra->start = max_t(long, 0, offset - ra_pages/2);
1566 ra->size = ra_pages;
1567 ra->async_size = 0;
1568 ra_submit(ra, mapping, file);
1573 * Asynchronous readahead happens when we find the page and PG_readahead,
1574 * so we want to possibly extend the readahead further..
1576 static void do_async_mmap_readahead(struct vm_area_struct *vma,
1577 struct file_ra_state *ra,
1578 struct file *file,
1579 struct page *page,
1580 pgoff_t offset)
1582 struct address_space *mapping = file->f_mapping;
1584 /* If we don't want any read-ahead, don't bother */
1585 if (VM_RandomReadHint(vma))
1586 return;
1587 if (ra->mmap_miss > 0)
1588 ra->mmap_miss--;
1589 if (PageReadahead(page))
1590 page_cache_async_readahead(mapping, ra, file,
1591 page, offset, ra->ra_pages);
1595 * filemap_fault - read in file data for page fault handling
1596 * @vma: vma in which the fault was taken
1597 * @vmf: struct vm_fault containing details of the fault
1599 * filemap_fault() is invoked via the vma operations vector for a
1600 * mapped memory region to read in file data during a page fault.
1602 * The goto's are kind of ugly, but this streamlines the normal case of having
1603 * it in the page cache, and handles the special cases reasonably without
1604 * having a lot of duplicated code.
1606 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1608 int error;
1609 struct file *file = vma->vm_file;
1610 struct address_space *mapping = file->f_mapping;
1611 struct file_ra_state *ra = &file->f_ra;
1612 struct inode *inode = mapping->host;
1613 pgoff_t offset = vmf->pgoff;
1614 struct page *page;
1615 pgoff_t size;
1616 int ret = 0;
1618 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1619 if (offset >= size)
1620 return VM_FAULT_SIGBUS;
1623 * Do we have something in the page cache already?
1625 page = find_get_page(mapping, offset);
1626 if (likely(page)) {
1628 * We found the page, so try async readahead before
1629 * waiting for the lock.
1631 do_async_mmap_readahead(vma, ra, file, page, offset);
1632 } else {
1633 /* No page in the page cache at all */
1634 do_sync_mmap_readahead(vma, ra, file, offset);
1635 count_vm_event(PGMAJFAULT);
1636 ret = VM_FAULT_MAJOR;
1637 retry_find:
1638 page = find_get_page(mapping, offset);
1639 if (!page)
1640 goto no_cached_page;
1643 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1644 page_cache_release(page);
1645 return ret | VM_FAULT_RETRY;
1648 /* Did it get truncated? */
1649 if (unlikely(page->mapping != mapping)) {
1650 unlock_page(page);
1651 put_page(page);
1652 goto retry_find;
1654 VM_BUG_ON(page->index != offset);
1657 * We have a locked page in the page cache, now we need to check
1658 * that it's up-to-date. If not, it is going to be due to an error.
1660 if (unlikely(!PageUptodate(page)))
1661 goto page_not_uptodate;
1664 * Found the page and have a reference on it.
1665 * We must recheck i_size under page lock.
1667 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1668 if (unlikely(offset >= size)) {
1669 unlock_page(page);
1670 page_cache_release(page);
1671 return VM_FAULT_SIGBUS;
1674 ra->prev_pos = (loff_t)offset << PAGE_CACHE_SHIFT;
1675 vmf->page = page;
1676 return ret | VM_FAULT_LOCKED;
1678 no_cached_page:
1680 * We're only likely to ever get here if MADV_RANDOM is in
1681 * effect.
1683 error = page_cache_read(file, offset);
1686 * The page we want has now been added to the page cache.
1687 * In the unlikely event that someone removed it in the
1688 * meantime, we'll just come back here and read it again.
1690 if (error >= 0)
1691 goto retry_find;
1694 * An error return from page_cache_read can result if the
1695 * system is low on memory, or a problem occurs while trying
1696 * to schedule I/O.
1698 if (error == -ENOMEM)
1699 return VM_FAULT_OOM;
1700 return VM_FAULT_SIGBUS;
1702 page_not_uptodate:
1704 * Umm, take care of errors if the page isn't up-to-date.
1705 * Try to re-read it _once_. We do this synchronously,
1706 * because there really aren't any performance issues here
1707 * and we need to check for errors.
1709 ClearPageError(page);
1710 error = mapping->a_ops->readpage(file, page);
1711 if (!error) {
1712 wait_on_page_locked(page);
1713 if (!PageUptodate(page))
1714 error = -EIO;
1716 page_cache_release(page);
1718 if (!error || error == AOP_TRUNCATED_PAGE)
1719 goto retry_find;
1721 /* Things didn't work out. Return zero to tell the mm layer so. */
1722 shrink_readahead_size_eio(file, ra);
1723 return VM_FAULT_SIGBUS;
1725 EXPORT_SYMBOL(filemap_fault);
1727 const struct vm_operations_struct generic_file_vm_ops = {
1728 .fault = filemap_fault,
1731 /* This is used for a general mmap of a disk file */
1733 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1735 struct address_space *mapping = file->f_mapping;
1737 if (!mapping->a_ops->readpage)
1738 return -ENOEXEC;
1739 file_accessed(file);
1740 vma->vm_ops = &generic_file_vm_ops;
1741 vma->vm_flags |= VM_CAN_NONLINEAR;
1742 return 0;
1746 * This is for filesystems which do not implement ->writepage.
1748 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1750 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1751 return -EINVAL;
1752 return generic_file_mmap(file, vma);
1754 #else
1755 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1757 return -ENOSYS;
1759 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1761 return -ENOSYS;
1763 #endif /* CONFIG_MMU */
1765 EXPORT_SYMBOL(generic_file_mmap);
1766 EXPORT_SYMBOL(generic_file_readonly_mmap);
1768 static struct page *__read_cache_page(struct address_space *mapping,
1769 pgoff_t index,
1770 int (*filler)(void *,struct page*),
1771 void *data,
1772 gfp_t gfp)
1774 struct page *page;
1775 int err;
1776 repeat:
1777 page = find_get_page(mapping, index);
1778 if (!page) {
1779 page = __page_cache_alloc(gfp | __GFP_COLD);
1780 if (!page)
1781 return ERR_PTR(-ENOMEM);
1782 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
1783 if (unlikely(err)) {
1784 page_cache_release(page);
1785 if (err == -EEXIST)
1786 goto repeat;
1787 /* Presumably ENOMEM for radix tree node */
1788 return ERR_PTR(err);
1790 err = filler(data, page);
1791 if (err < 0) {
1792 page_cache_release(page);
1793 page = ERR_PTR(err);
1796 return page;
1799 static struct page *do_read_cache_page(struct address_space *mapping,
1800 pgoff_t index,
1801 int (*filler)(void *,struct page*),
1802 void *data,
1803 gfp_t gfp)
1806 struct page *page;
1807 int err;
1809 retry:
1810 page = __read_cache_page(mapping, index, filler, data, gfp);
1811 if (IS_ERR(page))
1812 return page;
1813 if (PageUptodate(page))
1814 goto out;
1816 lock_page(page);
1817 if (!page->mapping) {
1818 unlock_page(page);
1819 page_cache_release(page);
1820 goto retry;
1822 if (PageUptodate(page)) {
1823 unlock_page(page);
1824 goto out;
1826 err = filler(data, page);
1827 if (err < 0) {
1828 page_cache_release(page);
1829 return ERR_PTR(err);
1831 out:
1832 mark_page_accessed(page);
1833 return page;
1837 * read_cache_page_async - read into page cache, fill it if needed
1838 * @mapping: the page's address_space
1839 * @index: the page index
1840 * @filler: function to perform the read
1841 * @data: destination for read data
1843 * Same as read_cache_page, but don't wait for page to become unlocked
1844 * after submitting it to the filler.
1846 * Read into the page cache. If a page already exists, and PageUptodate() is
1847 * not set, try to fill the page but don't wait for it to become unlocked.
1849 * If the page does not get brought uptodate, return -EIO.
1851 struct page *read_cache_page_async(struct address_space *mapping,
1852 pgoff_t index,
1853 int (*filler)(void *,struct page*),
1854 void *data)
1856 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1858 EXPORT_SYMBOL(read_cache_page_async);
1860 static struct page *wait_on_page_read(struct page *page)
1862 if (!IS_ERR(page)) {
1863 wait_on_page_locked(page);
1864 if (!PageUptodate(page)) {
1865 page_cache_release(page);
1866 page = ERR_PTR(-EIO);
1869 return page;
1873 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
1874 * @mapping: the page's address_space
1875 * @index: the page index
1876 * @gfp: the page allocator flags to use if allocating
1878 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
1879 * any new page allocations done using the specified allocation flags. Note
1880 * that the Radix tree operations will still use GFP_KERNEL, so you can't
1881 * expect to do this atomically or anything like that - but you can pass in
1882 * other page requirements.
1884 * If the page does not get brought uptodate, return -EIO.
1886 struct page *read_cache_page_gfp(struct address_space *mapping,
1887 pgoff_t index,
1888 gfp_t gfp)
1890 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1892 return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1894 EXPORT_SYMBOL(read_cache_page_gfp);
1897 * read_cache_page - read into page cache, fill it if needed
1898 * @mapping: the page's address_space
1899 * @index: the page index
1900 * @filler: function to perform the read
1901 * @data: destination for read data
1903 * Read into the page cache. If a page already exists, and PageUptodate() is
1904 * not set, try to fill the page then wait for it to become unlocked.
1906 * If the page does not get brought uptodate, return -EIO.
1908 struct page *read_cache_page(struct address_space *mapping,
1909 pgoff_t index,
1910 int (*filler)(void *,struct page*),
1911 void *data)
1913 return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
1915 EXPORT_SYMBOL(read_cache_page);
1918 * The logic we want is
1920 * if suid or (sgid and xgrp)
1921 * remove privs
1923 int should_remove_suid(struct dentry *dentry)
1925 mode_t mode = dentry->d_inode->i_mode;
1926 int kill = 0;
1928 /* suid always must be killed */
1929 if (unlikely(mode & S_ISUID))
1930 kill = ATTR_KILL_SUID;
1933 * sgid without any exec bits is just a mandatory locking mark; leave
1934 * it alone. If some exec bits are set, it's a real sgid; kill it.
1936 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1937 kill |= ATTR_KILL_SGID;
1939 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1940 return kill;
1942 return 0;
1944 EXPORT_SYMBOL(should_remove_suid);
1946 static int __remove_suid(struct dentry *dentry, int kill)
1948 struct iattr newattrs;
1950 newattrs.ia_valid = ATTR_FORCE | kill;
1951 return notify_change(dentry, &newattrs);
1954 int file_remove_suid(struct file *file)
1956 struct dentry *dentry = file->f_path.dentry;
1957 int killsuid = should_remove_suid(dentry);
1958 int killpriv = security_inode_need_killpriv(dentry);
1959 int error = 0;
1961 if (killpriv < 0)
1962 return killpriv;
1963 if (killpriv)
1964 error = security_inode_killpriv(dentry);
1965 if (!error && killsuid)
1966 error = __remove_suid(dentry, killsuid);
1968 return error;
1970 EXPORT_SYMBOL(file_remove_suid);
1972 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1973 const struct iovec *iov, size_t base, size_t bytes)
1975 size_t copied = 0, left = 0;
1977 while (bytes) {
1978 char __user *buf = iov->iov_base + base;
1979 int copy = min(bytes, iov->iov_len - base);
1981 base = 0;
1982 left = __copy_from_user_inatomic(vaddr, buf, copy);
1983 copied += copy;
1984 bytes -= copy;
1985 vaddr += copy;
1986 iov++;
1988 if (unlikely(left))
1989 break;
1991 return copied - left;
1995 * Copy as much as we can into the page and return the number of bytes which
1996 * were successfully copied. If a fault is encountered then return the number of
1997 * bytes which were copied.
1999 size_t iov_iter_copy_from_user_atomic(struct page *page,
2000 struct iov_iter *i, unsigned long offset, size_t bytes)
2002 char *kaddr;
2003 size_t copied;
2005 BUG_ON(!in_atomic());
2006 kaddr = kmap_atomic(page, KM_USER0);
2007 if (likely(i->nr_segs == 1)) {
2008 int left;
2009 char __user *buf = i->iov->iov_base + i->iov_offset;
2010 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
2011 copied = bytes - left;
2012 } else {
2013 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2014 i->iov, i->iov_offset, bytes);
2016 kunmap_atomic(kaddr, KM_USER0);
2018 return copied;
2020 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
2023 * This has the same sideeffects and return value as
2024 * iov_iter_copy_from_user_atomic().
2025 * The difference is that it attempts to resolve faults.
2026 * Page must not be locked.
2028 size_t iov_iter_copy_from_user(struct page *page,
2029 struct iov_iter *i, unsigned long offset, size_t bytes)
2031 char *kaddr;
2032 size_t copied;
2034 kaddr = kmap(page);
2035 if (likely(i->nr_segs == 1)) {
2036 int left;
2037 char __user *buf = i->iov->iov_base + i->iov_offset;
2038 left = __copy_from_user(kaddr + offset, buf, bytes);
2039 copied = bytes - left;
2040 } else {
2041 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2042 i->iov, i->iov_offset, bytes);
2044 kunmap(page);
2045 return copied;
2047 EXPORT_SYMBOL(iov_iter_copy_from_user);
2049 void iov_iter_advance(struct iov_iter *i, size_t bytes)
2051 BUG_ON(i->count < bytes);
2053 if (likely(i->nr_segs == 1)) {
2054 i->iov_offset += bytes;
2055 i->count -= bytes;
2056 } else {
2057 const struct iovec *iov = i->iov;
2058 size_t base = i->iov_offset;
2061 * The !iov->iov_len check ensures we skip over unlikely
2062 * zero-length segments (without overruning the iovec).
2064 while (bytes || unlikely(i->count && !iov->iov_len)) {
2065 int copy;
2067 copy = min(bytes, iov->iov_len - base);
2068 BUG_ON(!i->count || i->count < copy);
2069 i->count -= copy;
2070 bytes -= copy;
2071 base += copy;
2072 if (iov->iov_len == base) {
2073 iov++;
2074 base = 0;
2077 i->iov = iov;
2078 i->iov_offset = base;
2081 EXPORT_SYMBOL(iov_iter_advance);
2084 * Fault in the first iovec of the given iov_iter, to a maximum length
2085 * of bytes. Returns 0 on success, or non-zero if the memory could not be
2086 * accessed (ie. because it is an invalid address).
2088 * writev-intensive code may want this to prefault several iovecs -- that
2089 * would be possible (callers must not rely on the fact that _only_ the
2090 * first iovec will be faulted with the current implementation).
2092 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
2094 char __user *buf = i->iov->iov_base + i->iov_offset;
2095 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
2096 return fault_in_pages_readable(buf, bytes);
2098 EXPORT_SYMBOL(iov_iter_fault_in_readable);
2101 * Return the count of just the current iov_iter segment.
2103 size_t iov_iter_single_seg_count(struct iov_iter *i)
2105 const struct iovec *iov = i->iov;
2106 if (i->nr_segs == 1)
2107 return i->count;
2108 else
2109 return min(i->count, iov->iov_len - i->iov_offset);
2111 EXPORT_SYMBOL(iov_iter_single_seg_count);
2114 * Performs necessary checks before doing a write
2116 * Can adjust writing position or amount of bytes to write.
2117 * Returns appropriate error code that caller should return or
2118 * zero in case that write should be allowed.
2120 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
2122 struct inode *inode = file->f_mapping->host;
2123 unsigned long limit = rlimit(RLIMIT_FSIZE);
2125 if (unlikely(*pos < 0))
2126 return -EINVAL;
2128 if (!isblk) {
2129 /* FIXME: this is for backwards compatibility with 2.4 */
2130 if (file->f_flags & O_APPEND)
2131 *pos = i_size_read(inode);
2133 if (limit != RLIM_INFINITY) {
2134 if (*pos >= limit) {
2135 send_sig(SIGXFSZ, current, 0);
2136 return -EFBIG;
2138 if (*count > limit - (typeof(limit))*pos) {
2139 *count = limit - (typeof(limit))*pos;
2145 * LFS rule
2147 if (unlikely(*pos + *count > MAX_NON_LFS &&
2148 !(file->f_flags & O_LARGEFILE))) {
2149 if (*pos >= MAX_NON_LFS) {
2150 return -EFBIG;
2152 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
2153 *count = MAX_NON_LFS - (unsigned long)*pos;
2158 * Are we about to exceed the fs block limit ?
2160 * If we have written data it becomes a short write. If we have
2161 * exceeded without writing data we send a signal and return EFBIG.
2162 * Linus frestrict idea will clean these up nicely..
2164 if (likely(!isblk)) {
2165 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
2166 if (*count || *pos > inode->i_sb->s_maxbytes) {
2167 return -EFBIG;
2169 /* zero-length writes at ->s_maxbytes are OK */
2172 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
2173 *count = inode->i_sb->s_maxbytes - *pos;
2174 } else {
2175 #ifdef CONFIG_BLOCK
2176 loff_t isize;
2177 if (bdev_read_only(I_BDEV(inode)))
2178 return -EPERM;
2179 isize = i_size_read(inode);
2180 if (*pos >= isize) {
2181 if (*count || *pos > isize)
2182 return -ENOSPC;
2185 if (*pos + *count > isize)
2186 *count = isize - *pos;
2187 #else
2188 return -EPERM;
2189 #endif
2191 return 0;
2193 EXPORT_SYMBOL(generic_write_checks);
2195 int pagecache_write_begin(struct file *file, struct address_space *mapping,
2196 loff_t pos, unsigned len, unsigned flags,
2197 struct page **pagep, void **fsdata)
2199 const struct address_space_operations *aops = mapping->a_ops;
2201 return aops->write_begin(file, mapping, pos, len, flags,
2202 pagep, fsdata);
2204 EXPORT_SYMBOL(pagecache_write_begin);
2206 int pagecache_write_end(struct file *file, struct address_space *mapping,
2207 loff_t pos, unsigned len, unsigned copied,
2208 struct page *page, void *fsdata)
2210 const struct address_space_operations *aops = mapping->a_ops;
2212 mark_page_accessed(page);
2213 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2215 EXPORT_SYMBOL(pagecache_write_end);
2217 ssize_t
2218 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2219 unsigned long *nr_segs, loff_t pos, loff_t *ppos,
2220 size_t count, size_t ocount)
2222 struct file *file = iocb->ki_filp;
2223 struct address_space *mapping = file->f_mapping;
2224 struct inode *inode = mapping->host;
2225 ssize_t written;
2226 size_t write_len;
2227 pgoff_t end;
2229 if (count != ocount)
2230 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2232 write_len = iov_length(iov, *nr_segs);
2233 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2235 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2236 if (written)
2237 goto out;
2240 * After a write we want buffered reads to be sure to go to disk to get
2241 * the new data. We invalidate clean cached page from the region we're
2242 * about to write. We do this *before* the write so that we can return
2243 * without clobbering -EIOCBQUEUED from ->direct_IO().
2245 if (mapping->nrpages) {
2246 written = invalidate_inode_pages2_range(mapping,
2247 pos >> PAGE_CACHE_SHIFT, end);
2249 * If a page can not be invalidated, return 0 to fall back
2250 * to buffered write.
2252 if (written) {
2253 if (written == -EBUSY)
2254 return 0;
2255 goto out;
2259 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2262 * Finally, try again to invalidate clean pages which might have been
2263 * cached by non-direct readahead, or faulted in by get_user_pages()
2264 * if the source of the write was an mmap'ed region of the file
2265 * we're writing. Either one is a pretty crazy thing to do,
2266 * so we don't support it 100%. If this invalidation
2267 * fails, tough, the write still worked...
2269 if (mapping->nrpages) {
2270 invalidate_inode_pages2_range(mapping,
2271 pos >> PAGE_CACHE_SHIFT, end);
2274 if (written > 0) {
2275 pos += written;
2276 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2277 i_size_write(inode, pos);
2278 mark_inode_dirty(inode);
2280 *ppos = pos;
2282 out:
2283 return written;
2285 EXPORT_SYMBOL(generic_file_direct_write);
2288 * Find or create a page at the given pagecache position. Return the locked
2289 * page. This function is specifically for buffered writes.
2291 struct page *grab_cache_page_write_begin(struct address_space *mapping,
2292 pgoff_t index, unsigned flags)
2294 int status;
2295 struct page *page;
2296 gfp_t gfp_notmask = 0;
2297 if (flags & AOP_FLAG_NOFS)
2298 gfp_notmask = __GFP_FS;
2299 repeat:
2300 page = find_lock_page(mapping, index);
2301 if (page)
2302 return page;
2304 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
2305 if (!page)
2306 return NULL;
2307 status = add_to_page_cache_lru(page, mapping, index,
2308 GFP_KERNEL & ~gfp_notmask);
2309 if (unlikely(status)) {
2310 page_cache_release(page);
2311 if (status == -EEXIST)
2312 goto repeat;
2313 return NULL;
2315 return page;
2317 EXPORT_SYMBOL(grab_cache_page_write_begin);
2319 static ssize_t generic_perform_write(struct file *file,
2320 struct iov_iter *i, loff_t pos)
2322 struct address_space *mapping = file->f_mapping;
2323 const struct address_space_operations *a_ops = mapping->a_ops;
2324 long status = 0;
2325 ssize_t written = 0;
2326 unsigned int flags = 0;
2329 * Copies from kernel address space cannot fail (NFSD is a big user).
2331 if (segment_eq(get_fs(), KERNEL_DS))
2332 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2334 do {
2335 struct page *page;
2336 unsigned long offset; /* Offset into pagecache page */
2337 unsigned long bytes; /* Bytes to write to page */
2338 size_t copied; /* Bytes copied from user */
2339 void *fsdata;
2341 offset = (pos & (PAGE_CACHE_SIZE - 1));
2342 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2343 iov_iter_count(i));
2345 again:
2348 * Bring in the user page that we will copy from _first_.
2349 * Otherwise there's a nasty deadlock on copying from the
2350 * same page as we're writing to, without it being marked
2351 * up-to-date.
2353 * Not only is this an optimisation, but it is also required
2354 * to check that the address is actually valid, when atomic
2355 * usercopies are used, below.
2357 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2358 status = -EFAULT;
2359 break;
2362 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2363 &page, &fsdata);
2364 if (unlikely(status))
2365 break;
2367 if (mapping_writably_mapped(mapping))
2368 flush_dcache_page(page);
2370 pagefault_disable();
2371 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2372 pagefault_enable();
2373 flush_dcache_page(page);
2375 mark_page_accessed(page);
2376 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2377 page, fsdata);
2378 if (unlikely(status < 0))
2379 break;
2380 copied = status;
2382 cond_resched();
2384 iov_iter_advance(i, copied);
2385 if (unlikely(copied == 0)) {
2387 * If we were unable to copy any data at all, we must
2388 * fall back to a single segment length write.
2390 * If we didn't fallback here, we could livelock
2391 * because not all segments in the iov can be copied at
2392 * once without a pagefault.
2394 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2395 iov_iter_single_seg_count(i));
2396 goto again;
2398 pos += copied;
2399 written += copied;
2401 balance_dirty_pages_ratelimited(mapping);
2403 } while (iov_iter_count(i));
2405 return written ? written : status;
2408 ssize_t
2409 generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2410 unsigned long nr_segs, loff_t pos, loff_t *ppos,
2411 size_t count, ssize_t written)
2413 struct file *file = iocb->ki_filp;
2414 ssize_t status;
2415 struct iov_iter i;
2417 iov_iter_init(&i, iov, nr_segs, count, written);
2418 status = generic_perform_write(file, &i, pos);
2420 if (likely(status >= 0)) {
2421 written += status;
2422 *ppos = pos + status;
2425 return written ? written : status;
2427 EXPORT_SYMBOL(generic_file_buffered_write);
2430 * __generic_file_aio_write - write data to a file
2431 * @iocb: IO state structure (file, offset, etc.)
2432 * @iov: vector with data to write
2433 * @nr_segs: number of segments in the vector
2434 * @ppos: position where to write
2436 * This function does all the work needed for actually writing data to a
2437 * file. It does all basic checks, removes SUID from the file, updates
2438 * modification times and calls proper subroutines depending on whether we
2439 * do direct IO or a standard buffered write.
2441 * It expects i_mutex to be grabbed unless we work on a block device or similar
2442 * object which does not need locking at all.
2444 * This function does *not* take care of syncing data in case of O_SYNC write.
2445 * A caller has to handle it. This is mainly due to the fact that we want to
2446 * avoid syncing under i_mutex.
2448 ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2449 unsigned long nr_segs, loff_t *ppos)
2451 struct file *file = iocb->ki_filp;
2452 struct address_space * mapping = file->f_mapping;
2453 size_t ocount; /* original count */
2454 size_t count; /* after file limit checks */
2455 struct inode *inode = mapping->host;
2456 loff_t pos;
2457 ssize_t written;
2458 ssize_t err;
2460 ocount = 0;
2461 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2462 if (err)
2463 return err;
2465 count = ocount;
2466 pos = *ppos;
2468 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2470 /* We can write back this queue in page reclaim */
2471 current->backing_dev_info = mapping->backing_dev_info;
2472 written = 0;
2474 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2475 if (err)
2476 goto out;
2478 if (count == 0)
2479 goto out;
2481 err = file_remove_suid(file);
2482 if (err)
2483 goto out;
2485 file_update_time(file);
2487 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2488 if (unlikely(file->f_flags & O_DIRECT)) {
2489 loff_t endbyte;
2490 ssize_t written_buffered;
2492 written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2493 ppos, count, ocount);
2494 if (written < 0 || written == count)
2495 goto out;
2497 * direct-io write to a hole: fall through to buffered I/O
2498 * for completing the rest of the request.
2500 pos += written;
2501 count -= written;
2502 written_buffered = generic_file_buffered_write(iocb, iov,
2503 nr_segs, pos, ppos, count,
2504 written);
2506 * If generic_file_buffered_write() retuned a synchronous error
2507 * then we want to return the number of bytes which were
2508 * direct-written, or the error code if that was zero. Note
2509 * that this differs from normal direct-io semantics, which
2510 * will return -EFOO even if some bytes were written.
2512 if (written_buffered < 0) {
2513 err = written_buffered;
2514 goto out;
2518 * We need to ensure that the page cache pages are written to
2519 * disk and invalidated to preserve the expected O_DIRECT
2520 * semantics.
2522 endbyte = pos + written_buffered - written - 1;
2523 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2524 if (err == 0) {
2525 written = written_buffered;
2526 invalidate_mapping_pages(mapping,
2527 pos >> PAGE_CACHE_SHIFT,
2528 endbyte >> PAGE_CACHE_SHIFT);
2529 } else {
2531 * We don't know how much we wrote, so just return
2532 * the number of bytes which were direct-written
2535 } else {
2536 written = generic_file_buffered_write(iocb, iov, nr_segs,
2537 pos, ppos, count, written);
2539 out:
2540 current->backing_dev_info = NULL;
2541 return written ? written : err;
2543 EXPORT_SYMBOL(__generic_file_aio_write);
2546 * generic_file_aio_write - write data to a file
2547 * @iocb: IO state structure
2548 * @iov: vector with data to write
2549 * @nr_segs: number of segments in the vector
2550 * @pos: position in file where to write
2552 * This is a wrapper around __generic_file_aio_write() to be used by most
2553 * filesystems. It takes care of syncing the file in case of O_SYNC file
2554 * and acquires i_mutex as needed.
2556 ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2557 unsigned long nr_segs, loff_t pos)
2559 struct file *file = iocb->ki_filp;
2560 struct inode *inode = file->f_mapping->host;
2561 struct blk_plug plug;
2562 ssize_t ret;
2564 BUG_ON(iocb->ki_pos != pos);
2566 mutex_lock(&inode->i_mutex);
2567 blk_start_plug(&plug);
2568 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2569 mutex_unlock(&inode->i_mutex);
2571 if (ret > 0 || ret == -EIOCBQUEUED) {
2572 ssize_t err;
2574 err = generic_write_sync(file, pos, ret);
2575 if (err < 0 && ret > 0)
2576 ret = err;
2578 blk_finish_plug(&plug);
2579 return ret;
2581 EXPORT_SYMBOL(generic_file_aio_write);
2584 * try_to_release_page() - release old fs-specific metadata on a page
2586 * @page: the page which the kernel is trying to free
2587 * @gfp_mask: memory allocation flags (and I/O mode)
2589 * The address_space is to try to release any data against the page
2590 * (presumably at page->private). If the release was successful, return `1'.
2591 * Otherwise return zero.
2593 * This may also be called if PG_fscache is set on a page, indicating that the
2594 * page is known to the local caching routines.
2596 * The @gfp_mask argument specifies whether I/O may be performed to release
2597 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2600 int try_to_release_page(struct page *page, gfp_t gfp_mask)
2602 struct address_space * const mapping = page->mapping;
2604 BUG_ON(!PageLocked(page));
2605 if (PageWriteback(page))
2606 return 0;
2608 if (mapping && mapping->a_ops->releasepage)
2609 return mapping->a_ops->releasepage(page, gfp_mask);
2610 return try_to_free_buffers(page);
2613 EXPORT_SYMBOL(try_to_release_page);