2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
21 * Lock ordering in mm:
23 * inode->i_mutex (while writing or truncating, not reading or faulting)
24 * inode->i_alloc_sem (vmtruncate_range)
26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock
29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty)
35 * sb_lock (within inode_lock in fs/fs-writeback.c)
36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode)
42 #include <linux/pagemap.h>
43 #include <linux/swap.h>
44 #include <linux/swapops.h>
45 #include <linux/slab.h>
46 #include <linux/init.h>
47 #include <linux/rmap.h>
48 #include <linux/rcupdate.h>
49 #include <linux/module.h>
50 #include <linux/kallsyms.h>
52 #include <asm/tlbflush.h>
54 struct kmem_cache
*anon_vma_cachep
;
56 static inline void validate_anon_vma(struct vm_area_struct
*find_vma
)
58 #ifdef CONFIG_DEBUG_VM
59 struct anon_vma
*anon_vma
= find_vma
->anon_vma
;
60 struct vm_area_struct
*vma
;
61 unsigned int mapcount
= 0;
64 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
66 BUG_ON(mapcount
> 100000);
74 /* This must be called under the mmap_sem. */
75 int anon_vma_prepare(struct vm_area_struct
*vma
)
77 struct anon_vma
*anon_vma
= vma
->anon_vma
;
80 if (unlikely(!anon_vma
)) {
81 struct mm_struct
*mm
= vma
->vm_mm
;
82 struct anon_vma
*allocated
, *locked
;
84 anon_vma
= find_mergeable_anon_vma(vma
);
88 spin_lock(&locked
->lock
);
90 anon_vma
= anon_vma_alloc();
91 if (unlikely(!anon_vma
))
97 /* page_table_lock to protect against threads */
98 spin_lock(&mm
->page_table_lock
);
99 if (likely(!vma
->anon_vma
)) {
100 vma
->anon_vma
= anon_vma
;
101 list_add_tail(&vma
->anon_vma_node
, &anon_vma
->head
);
104 spin_unlock(&mm
->page_table_lock
);
107 spin_unlock(&locked
->lock
);
108 if (unlikely(allocated
))
109 anon_vma_free(allocated
);
114 void __anon_vma_merge(struct vm_area_struct
*vma
, struct vm_area_struct
*next
)
116 BUG_ON(vma
->anon_vma
!= next
->anon_vma
);
117 list_del(&next
->anon_vma_node
);
120 void __anon_vma_link(struct vm_area_struct
*vma
)
122 struct anon_vma
*anon_vma
= vma
->anon_vma
;
125 list_add_tail(&vma
->anon_vma_node
, &anon_vma
->head
);
126 validate_anon_vma(vma
);
130 void anon_vma_link(struct vm_area_struct
*vma
)
132 struct anon_vma
*anon_vma
= vma
->anon_vma
;
135 spin_lock(&anon_vma
->lock
);
136 list_add_tail(&vma
->anon_vma_node
, &anon_vma
->head
);
137 validate_anon_vma(vma
);
138 spin_unlock(&anon_vma
->lock
);
142 void anon_vma_unlink(struct vm_area_struct
*vma
)
144 struct anon_vma
*anon_vma
= vma
->anon_vma
;
150 spin_lock(&anon_vma
->lock
);
151 validate_anon_vma(vma
);
152 list_del(&vma
->anon_vma_node
);
154 /* We must garbage collect the anon_vma if it's empty */
155 empty
= list_empty(&anon_vma
->head
);
156 spin_unlock(&anon_vma
->lock
);
159 anon_vma_free(anon_vma
);
162 static void anon_vma_ctor(void *data
, struct kmem_cache
*cachep
,
165 if (flags
& SLAB_CTOR_CONSTRUCTOR
) {
166 struct anon_vma
*anon_vma
= data
;
168 spin_lock_init(&anon_vma
->lock
);
169 INIT_LIST_HEAD(&anon_vma
->head
);
173 void __init
anon_vma_init(void)
175 anon_vma_cachep
= kmem_cache_create("anon_vma", sizeof(struct anon_vma
),
176 0, SLAB_DESTROY_BY_RCU
|SLAB_PANIC
, anon_vma_ctor
, NULL
);
180 * Getting a lock on a stable anon_vma from a page off the LRU is
181 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
183 static struct anon_vma
*page_lock_anon_vma(struct page
*page
)
185 struct anon_vma
*anon_vma
;
186 unsigned long anon_mapping
;
189 anon_mapping
= (unsigned long) page
->mapping
;
190 if (!(anon_mapping
& PAGE_MAPPING_ANON
))
192 if (!page_mapped(page
))
195 anon_vma
= (struct anon_vma
*) (anon_mapping
- PAGE_MAPPING_ANON
);
196 spin_lock(&anon_vma
->lock
);
203 static void page_unlock_anon_vma(struct anon_vma
*anon_vma
)
205 spin_unlock(&anon_vma
->lock
);
210 * At what user virtual address is page expected in vma?
212 static inline unsigned long
213 vma_address(struct page
*page
, struct vm_area_struct
*vma
)
215 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
216 unsigned long address
;
218 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
219 if (unlikely(address
< vma
->vm_start
|| address
>= vma
->vm_end
)) {
220 /* page should be within any vma from prio_tree_next */
221 BUG_ON(!PageAnon(page
));
228 * At what user virtual address is page expected in vma? checking that the
229 * page matches the vma: currently only used on anon pages, by unuse_vma;
231 unsigned long page_address_in_vma(struct page
*page
, struct vm_area_struct
*vma
)
233 if (PageAnon(page
)) {
234 if ((void *)vma
->anon_vma
!=
235 (void *)page
->mapping
- PAGE_MAPPING_ANON
)
237 } else if (page
->mapping
&& !(vma
->vm_flags
& VM_NONLINEAR
)) {
239 vma
->vm_file
->f_mapping
!= page
->mapping
)
243 return vma_address(page
, vma
);
247 * Check that @page is mapped at @address into @mm.
249 * On success returns with pte mapped and locked.
251 pte_t
*page_check_address(struct page
*page
, struct mm_struct
*mm
,
252 unsigned long address
, spinlock_t
**ptlp
)
260 pgd
= pgd_offset(mm
, address
);
261 if (!pgd_present(*pgd
))
264 pud
= pud_offset(pgd
, address
);
265 if (!pud_present(*pud
))
268 pmd
= pmd_offset(pud
, address
);
269 if (!pmd_present(*pmd
))
272 pte
= pte_offset_map(pmd
, address
);
273 /* Make a quick check before getting the lock */
274 if (!pte_present(*pte
)) {
279 ptl
= pte_lockptr(mm
, pmd
);
281 if (pte_present(*pte
) && page_to_pfn(page
) == pte_pfn(*pte
)) {
285 pte_unmap_unlock(pte
, ptl
);
290 * Subfunctions of page_referenced: page_referenced_one called
291 * repeatedly from either page_referenced_anon or page_referenced_file.
293 static int page_referenced_one(struct page
*page
,
294 struct vm_area_struct
*vma
, unsigned int *mapcount
)
296 struct mm_struct
*mm
= vma
->vm_mm
;
297 unsigned long address
;
302 address
= vma_address(page
, vma
);
303 if (address
== -EFAULT
)
306 pte
= page_check_address(page
, mm
, address
, &ptl
);
310 if (ptep_clear_flush_young(vma
, address
, pte
))
313 /* Pretend the page is referenced if the task has the
314 swap token and is in the middle of a page fault. */
315 if (mm
!= current
->mm
&& has_swap_token(mm
) &&
316 rwsem_is_locked(&mm
->mmap_sem
))
320 pte_unmap_unlock(pte
, ptl
);
325 static int page_referenced_anon(struct page
*page
)
327 unsigned int mapcount
;
328 struct anon_vma
*anon_vma
;
329 struct vm_area_struct
*vma
;
332 anon_vma
= page_lock_anon_vma(page
);
336 mapcount
= page_mapcount(page
);
337 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
338 referenced
+= page_referenced_one(page
, vma
, &mapcount
);
343 page_unlock_anon_vma(anon_vma
);
348 * page_referenced_file - referenced check for object-based rmap
349 * @page: the page we're checking references on.
351 * For an object-based mapped page, find all the places it is mapped and
352 * check/clear the referenced flag. This is done by following the page->mapping
353 * pointer, then walking the chain of vmas it holds. It returns the number
354 * of references it found.
356 * This function is only called from page_referenced for object-based pages.
358 static int page_referenced_file(struct page
*page
)
360 unsigned int mapcount
;
361 struct address_space
*mapping
= page
->mapping
;
362 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
363 struct vm_area_struct
*vma
;
364 struct prio_tree_iter iter
;
368 * The caller's checks on page->mapping and !PageAnon have made
369 * sure that this is a file page: the check for page->mapping
370 * excludes the case just before it gets set on an anon page.
372 BUG_ON(PageAnon(page
));
375 * The page lock not only makes sure that page->mapping cannot
376 * suddenly be NULLified by truncation, it makes sure that the
377 * structure at mapping cannot be freed and reused yet,
378 * so we can safely take mapping->i_mmap_lock.
380 BUG_ON(!PageLocked(page
));
382 spin_lock(&mapping
->i_mmap_lock
);
385 * i_mmap_lock does not stabilize mapcount at all, but mapcount
386 * is more likely to be accurate if we note it after spinning.
388 mapcount
= page_mapcount(page
);
390 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
391 if ((vma
->vm_flags
& (VM_LOCKED
|VM_MAYSHARE
))
392 == (VM_LOCKED
|VM_MAYSHARE
)) {
396 referenced
+= page_referenced_one(page
, vma
, &mapcount
);
401 spin_unlock(&mapping
->i_mmap_lock
);
406 * page_referenced - test if the page was referenced
407 * @page: the page to test
408 * @is_locked: caller holds lock on the page
410 * Quick test_and_clear_referenced for all mappings to a page,
411 * returns the number of ptes which referenced the page.
413 int page_referenced(struct page
*page
, int is_locked
)
417 if (page_test_and_clear_young(page
))
420 if (TestClearPageReferenced(page
))
423 if (page_mapped(page
) && page
->mapping
) {
425 referenced
+= page_referenced_anon(page
);
427 referenced
+= page_referenced_file(page
);
428 else if (TestSetPageLocked(page
))
432 referenced
+= page_referenced_file(page
);
439 static int page_mkclean_one(struct page
*page
, struct vm_area_struct
*vma
)
441 struct mm_struct
*mm
= vma
->vm_mm
;
442 unsigned long address
;
447 address
= vma_address(page
, vma
);
448 if (address
== -EFAULT
)
451 pte
= page_check_address(page
, mm
, address
, &ptl
);
455 if (pte_dirty(*pte
) || pte_write(*pte
)) {
458 flush_cache_page(vma
, address
, pte_pfn(*pte
));
459 entry
= ptep_clear_flush(vma
, address
, pte
);
460 entry
= pte_wrprotect(entry
);
461 entry
= pte_mkclean(entry
);
462 set_pte_at(mm
, address
, pte
, entry
);
463 lazy_mmu_prot_update(entry
);
467 pte_unmap_unlock(pte
, ptl
);
472 static int page_mkclean_file(struct address_space
*mapping
, struct page
*page
)
474 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
475 struct vm_area_struct
*vma
;
476 struct prio_tree_iter iter
;
479 BUG_ON(PageAnon(page
));
481 spin_lock(&mapping
->i_mmap_lock
);
482 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
483 if (vma
->vm_flags
& VM_SHARED
)
484 ret
+= page_mkclean_one(page
, vma
);
486 spin_unlock(&mapping
->i_mmap_lock
);
490 int page_mkclean(struct page
*page
)
494 BUG_ON(!PageLocked(page
));
496 if (page_mapped(page
)) {
497 struct address_space
*mapping
= page_mapping(page
);
499 ret
= page_mkclean_file(mapping
, page
);
500 if (page_test_dirty(page
)) {
501 page_clear_dirty(page
);
508 EXPORT_SYMBOL_GPL(page_mkclean
);
511 * page_set_anon_rmap - setup new anonymous rmap
512 * @page: the page to add the mapping to
513 * @vma: the vm area in which the mapping is added
514 * @address: the user virtual address mapped
516 static void __page_set_anon_rmap(struct page
*page
,
517 struct vm_area_struct
*vma
, unsigned long address
)
519 struct anon_vma
*anon_vma
= vma
->anon_vma
;
522 anon_vma
= (void *) anon_vma
+ PAGE_MAPPING_ANON
;
523 page
->mapping
= (struct address_space
*) anon_vma
;
525 page
->index
= linear_page_index(vma
, address
);
528 * nr_mapped state can be updated without turning off
529 * interrupts because it is not modified via interrupt.
531 __inc_zone_page_state(page
, NR_ANON_PAGES
);
535 * page_add_anon_rmap - add pte mapping to an anonymous page
536 * @page: the page to add the mapping to
537 * @vma: the vm area in which the mapping is added
538 * @address: the user virtual address mapped
540 * The caller needs to hold the pte lock.
542 void page_add_anon_rmap(struct page
*page
,
543 struct vm_area_struct
*vma
, unsigned long address
)
545 if (atomic_inc_and_test(&page
->_mapcount
))
546 __page_set_anon_rmap(page
, vma
, address
);
547 /* else checking page index and mapping is racy */
551 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
552 * @page: the page to add the mapping to
553 * @vma: the vm area in which the mapping is added
554 * @address: the user virtual address mapped
556 * Same as page_add_anon_rmap but must only be called on *new* pages.
557 * This means the inc-and-test can be bypassed.
559 void page_add_new_anon_rmap(struct page
*page
,
560 struct vm_area_struct
*vma
, unsigned long address
)
562 atomic_set(&page
->_mapcount
, 0); /* elevate count by 1 (starts at -1) */
563 __page_set_anon_rmap(page
, vma
, address
);
567 * page_add_file_rmap - add pte mapping to a file page
568 * @page: the page to add the mapping to
570 * The caller needs to hold the pte lock.
572 void page_add_file_rmap(struct page
*page
)
574 if (atomic_inc_and_test(&page
->_mapcount
))
575 __inc_zone_page_state(page
, NR_FILE_MAPPED
);
579 * page_remove_rmap - take down pte mapping from a page
580 * @page: page to remove mapping from
582 * The caller needs to hold the pte lock.
584 void page_remove_rmap(struct page
*page
, struct vm_area_struct
*vma
)
586 if (atomic_add_negative(-1, &page
->_mapcount
)) {
587 if (unlikely(page_mapcount(page
) < 0)) {
588 printk (KERN_EMERG
"Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page
));
589 printk (KERN_EMERG
" page pfn = %lx\n", page_to_pfn(page
));
590 printk (KERN_EMERG
" page->flags = %lx\n", page
->flags
);
591 printk (KERN_EMERG
" page->count = %x\n", page_count(page
));
592 printk (KERN_EMERG
" page->mapping = %p\n", page
->mapping
);
593 print_symbol (KERN_EMERG
" vma->vm_ops = %s\n", (unsigned long)vma
->vm_ops
);
595 print_symbol (KERN_EMERG
" vma->vm_ops->nopage = %s\n", (unsigned long)vma
->vm_ops
->nopage
);
596 if (vma
->vm_file
&& vma
->vm_file
->f_op
)
597 print_symbol (KERN_EMERG
" vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma
->vm_file
->f_op
->mmap
);
602 * It would be tidy to reset the PageAnon mapping here,
603 * but that might overwrite a racing page_add_anon_rmap
604 * which increments mapcount after us but sets mapping
605 * before us: so leave the reset to free_hot_cold_page,
606 * and remember that it's only reliable while mapped.
607 * Leaving it set also helps swapoff to reinstate ptes
608 * faster for those pages still in swapcache.
610 if (page_test_dirty(page
)) {
611 page_clear_dirty(page
);
612 set_page_dirty(page
);
614 __dec_zone_page_state(page
,
615 PageAnon(page
) ? NR_ANON_PAGES
: NR_FILE_MAPPED
);
620 * Subfunctions of try_to_unmap: try_to_unmap_one called
621 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
623 static int try_to_unmap_one(struct page
*page
, struct vm_area_struct
*vma
,
626 struct mm_struct
*mm
= vma
->vm_mm
;
627 unsigned long address
;
631 int ret
= SWAP_AGAIN
;
633 address
= vma_address(page
, vma
);
634 if (address
== -EFAULT
)
637 pte
= page_check_address(page
, mm
, address
, &ptl
);
642 * If the page is mlock()d, we cannot swap it out.
643 * If it's recently referenced (perhaps page_referenced
644 * skipped over this mm) then we should reactivate it.
646 if (!migration
&& ((vma
->vm_flags
& VM_LOCKED
) ||
647 (ptep_clear_flush_young(vma
, address
, pte
)))) {
652 /* Nuke the page table entry. */
653 flush_cache_page(vma
, address
, page_to_pfn(page
));
654 pteval
= ptep_clear_flush(vma
, address
, pte
);
656 /* Move the dirty bit to the physical page now the pte is gone. */
657 if (pte_dirty(pteval
))
658 set_page_dirty(page
);
660 /* Update high watermark before we lower rss */
661 update_hiwater_rss(mm
);
663 if (PageAnon(page
)) {
664 swp_entry_t entry
= { .val
= page_private(page
) };
666 if (PageSwapCache(page
)) {
668 * Store the swap location in the pte.
669 * See handle_pte_fault() ...
671 swap_duplicate(entry
);
672 if (list_empty(&mm
->mmlist
)) {
673 spin_lock(&mmlist_lock
);
674 if (list_empty(&mm
->mmlist
))
675 list_add(&mm
->mmlist
, &init_mm
.mmlist
);
676 spin_unlock(&mmlist_lock
);
678 dec_mm_counter(mm
, anon_rss
);
679 #ifdef CONFIG_MIGRATION
682 * Store the pfn of the page in a special migration
683 * pte. do_swap_page() will wait until the migration
684 * pte is removed and then restart fault handling.
687 entry
= make_migration_entry(page
, pte_write(pteval
));
690 set_pte_at(mm
, address
, pte
, swp_entry_to_pte(entry
));
691 BUG_ON(pte_file(*pte
));
693 #ifdef CONFIG_MIGRATION
695 /* Establish migration entry for a file page */
697 entry
= make_migration_entry(page
, pte_write(pteval
));
698 set_pte_at(mm
, address
, pte
, swp_entry_to_pte(entry
));
701 dec_mm_counter(mm
, file_rss
);
704 page_remove_rmap(page
, vma
);
705 page_cache_release(page
);
708 pte_unmap_unlock(pte
, ptl
);
714 * objrmap doesn't work for nonlinear VMAs because the assumption that
715 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
716 * Consequently, given a particular page and its ->index, we cannot locate the
717 * ptes which are mapping that page without an exhaustive linear search.
719 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
720 * maps the file to which the target page belongs. The ->vm_private_data field
721 * holds the current cursor into that scan. Successive searches will circulate
722 * around the vma's virtual address space.
724 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
725 * more scanning pressure is placed against them as well. Eventually pages
726 * will become fully unmapped and are eligible for eviction.
728 * For very sparsely populated VMAs this is a little inefficient - chances are
729 * there there won't be many ptes located within the scan cluster. In this case
730 * maybe we could scan further - to the end of the pte page, perhaps.
732 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
733 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
735 static void try_to_unmap_cluster(unsigned long cursor
,
736 unsigned int *mapcount
, struct vm_area_struct
*vma
)
738 struct mm_struct
*mm
= vma
->vm_mm
;
746 unsigned long address
;
749 address
= (vma
->vm_start
+ cursor
) & CLUSTER_MASK
;
750 end
= address
+ CLUSTER_SIZE
;
751 if (address
< vma
->vm_start
)
752 address
= vma
->vm_start
;
753 if (end
> vma
->vm_end
)
756 pgd
= pgd_offset(mm
, address
);
757 if (!pgd_present(*pgd
))
760 pud
= pud_offset(pgd
, address
);
761 if (!pud_present(*pud
))
764 pmd
= pmd_offset(pud
, address
);
765 if (!pmd_present(*pmd
))
768 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
770 /* Update high watermark before we lower rss */
771 update_hiwater_rss(mm
);
773 for (; address
< end
; pte
++, address
+= PAGE_SIZE
) {
774 if (!pte_present(*pte
))
776 page
= vm_normal_page(vma
, address
, *pte
);
777 BUG_ON(!page
|| PageAnon(page
));
779 if (ptep_clear_flush_young(vma
, address
, pte
))
782 /* Nuke the page table entry. */
783 flush_cache_page(vma
, address
, pte_pfn(*pte
));
784 pteval
= ptep_clear_flush(vma
, address
, pte
);
786 /* If nonlinear, store the file page offset in the pte. */
787 if (page
->index
!= linear_page_index(vma
, address
))
788 set_pte_at(mm
, address
, pte
, pgoff_to_pte(page
->index
));
790 /* Move the dirty bit to the physical page now the pte is gone. */
791 if (pte_dirty(pteval
))
792 set_page_dirty(page
);
794 page_remove_rmap(page
, vma
);
795 page_cache_release(page
);
796 dec_mm_counter(mm
, file_rss
);
799 pte_unmap_unlock(pte
- 1, ptl
);
802 static int try_to_unmap_anon(struct page
*page
, int migration
)
804 struct anon_vma
*anon_vma
;
805 struct vm_area_struct
*vma
;
806 int ret
= SWAP_AGAIN
;
808 anon_vma
= page_lock_anon_vma(page
);
812 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
813 ret
= try_to_unmap_one(page
, vma
, migration
);
814 if (ret
== SWAP_FAIL
|| !page_mapped(page
))
818 page_unlock_anon_vma(anon_vma
);
823 * try_to_unmap_file - unmap file page using the object-based rmap method
824 * @page: the page to unmap
826 * Find all the mappings of a page using the mapping pointer and the vma chains
827 * contained in the address_space struct it points to.
829 * This function is only called from try_to_unmap for object-based pages.
831 static int try_to_unmap_file(struct page
*page
, int migration
)
833 struct address_space
*mapping
= page
->mapping
;
834 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
835 struct vm_area_struct
*vma
;
836 struct prio_tree_iter iter
;
837 int ret
= SWAP_AGAIN
;
838 unsigned long cursor
;
839 unsigned long max_nl_cursor
= 0;
840 unsigned long max_nl_size
= 0;
841 unsigned int mapcount
;
843 spin_lock(&mapping
->i_mmap_lock
);
844 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
845 ret
= try_to_unmap_one(page
, vma
, migration
);
846 if (ret
== SWAP_FAIL
|| !page_mapped(page
))
850 if (list_empty(&mapping
->i_mmap_nonlinear
))
853 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
854 shared
.vm_set
.list
) {
855 if ((vma
->vm_flags
& VM_LOCKED
) && !migration
)
857 cursor
= (unsigned long) vma
->vm_private_data
;
858 if (cursor
> max_nl_cursor
)
859 max_nl_cursor
= cursor
;
860 cursor
= vma
->vm_end
- vma
->vm_start
;
861 if (cursor
> max_nl_size
)
862 max_nl_size
= cursor
;
865 if (max_nl_size
== 0) { /* any nonlinears locked or reserved */
871 * We don't try to search for this page in the nonlinear vmas,
872 * and page_referenced wouldn't have found it anyway. Instead
873 * just walk the nonlinear vmas trying to age and unmap some.
874 * The mapcount of the page we came in with is irrelevant,
875 * but even so use it as a guide to how hard we should try?
877 mapcount
= page_mapcount(page
);
880 cond_resched_lock(&mapping
->i_mmap_lock
);
882 max_nl_size
= (max_nl_size
+ CLUSTER_SIZE
- 1) & CLUSTER_MASK
;
883 if (max_nl_cursor
== 0)
884 max_nl_cursor
= CLUSTER_SIZE
;
887 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
888 shared
.vm_set
.list
) {
889 if ((vma
->vm_flags
& VM_LOCKED
) && !migration
)
891 cursor
= (unsigned long) vma
->vm_private_data
;
892 while ( cursor
< max_nl_cursor
&&
893 cursor
< vma
->vm_end
- vma
->vm_start
) {
894 try_to_unmap_cluster(cursor
, &mapcount
, vma
);
895 cursor
+= CLUSTER_SIZE
;
896 vma
->vm_private_data
= (void *) cursor
;
897 if ((int)mapcount
<= 0)
900 vma
->vm_private_data
= (void *) max_nl_cursor
;
902 cond_resched_lock(&mapping
->i_mmap_lock
);
903 max_nl_cursor
+= CLUSTER_SIZE
;
904 } while (max_nl_cursor
<= max_nl_size
);
907 * Don't loop forever (perhaps all the remaining pages are
908 * in locked vmas). Reset cursor on all unreserved nonlinear
909 * vmas, now forgetting on which ones it had fallen behind.
911 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
, shared
.vm_set
.list
)
912 vma
->vm_private_data
= NULL
;
914 spin_unlock(&mapping
->i_mmap_lock
);
919 * try_to_unmap - try to remove all page table mappings to a page
920 * @page: the page to get unmapped
922 * Tries to remove all the page table entries which are mapping this
923 * page, used in the pageout path. Caller must hold the page lock.
926 * SWAP_SUCCESS - we succeeded in removing all mappings
927 * SWAP_AGAIN - we missed a mapping, try again later
928 * SWAP_FAIL - the page is unswappable
930 int try_to_unmap(struct page
*page
, int migration
)
934 BUG_ON(!PageLocked(page
));
937 ret
= try_to_unmap_anon(page
, migration
);
939 ret
= try_to_unmap_file(page
, migration
);
941 if (!page_mapped(page
))