Linux 2.6.33-rc2
[pohmelfs.git] / include / linux / rmap.h
blobb019ae64e2ab1d356daf92aaa7e92547227246c7
1 #ifndef _LINUX_RMAP_H
2 #define _LINUX_RMAP_H
3 /*
4 * Declarations for Reverse Mapping functions in mm/rmap.c
5 */
7 #include <linux/list.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/memcontrol.h>
14 * The anon_vma heads a list of private "related" vmas, to scan if
15 * an anonymous page pointing to this anon_vma needs to be unmapped:
16 * the vmas on the list will be related by forking, or by splitting.
18 * Since vmas come and go as they are split and merged (particularly
19 * in mprotect), the mapping field of an anonymous page cannot point
20 * directly to a vma: instead it points to an anon_vma, on whose list
21 * the related vmas can be easily linked or unlinked.
23 * After unlinking the last vma on the list, we must garbage collect
24 * the anon_vma object itself: we're guaranteed no page can be
25 * pointing to this anon_vma once its vma list is empty.
27 struct anon_vma {
28 spinlock_t lock; /* Serialize access to vma list */
29 #ifdef CONFIG_KSM
30 atomic_t ksm_refcount;
31 #endif
33 * NOTE: the LSB of the head.next is set by
34 * mm_take_all_locks() _after_ taking the above lock. So the
35 * head must only be read/written after taking the above lock
36 * to be sure to see a valid next pointer. The LSB bit itself
37 * is serialized by a system wide lock only visible to
38 * mm_take_all_locks() (mm_all_locks_mutex).
40 struct list_head head; /* List of private "related" vmas */
43 #ifdef CONFIG_MMU
44 #ifdef CONFIG_KSM
45 static inline void ksm_refcount_init(struct anon_vma *anon_vma)
47 atomic_set(&anon_vma->ksm_refcount, 0);
50 static inline int ksm_refcount(struct anon_vma *anon_vma)
52 return atomic_read(&anon_vma->ksm_refcount);
54 #else
55 static inline void ksm_refcount_init(struct anon_vma *anon_vma)
59 static inline int ksm_refcount(struct anon_vma *anon_vma)
61 return 0;
63 #endif /* CONFIG_KSM */
65 static inline struct anon_vma *page_anon_vma(struct page *page)
67 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
68 PAGE_MAPPING_ANON)
69 return NULL;
70 return page_rmapping(page);
73 static inline void anon_vma_lock(struct vm_area_struct *vma)
75 struct anon_vma *anon_vma = vma->anon_vma;
76 if (anon_vma)
77 spin_lock(&anon_vma->lock);
80 static inline void anon_vma_unlock(struct vm_area_struct *vma)
82 struct anon_vma *anon_vma = vma->anon_vma;
83 if (anon_vma)
84 spin_unlock(&anon_vma->lock);
88 * anon_vma helper functions.
90 void anon_vma_init(void); /* create anon_vma_cachep */
91 int anon_vma_prepare(struct vm_area_struct *);
92 void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
93 void anon_vma_unlink(struct vm_area_struct *);
94 void anon_vma_link(struct vm_area_struct *);
95 void __anon_vma_link(struct vm_area_struct *);
96 void anon_vma_free(struct anon_vma *);
99 * rmap interfaces called when adding or removing pte of page
101 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
102 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
103 void page_add_file_rmap(struct page *);
104 void page_remove_rmap(struct page *);
106 static inline void page_dup_rmap(struct page *page)
108 atomic_inc(&page->_mapcount);
112 * Called from mm/vmscan.c to handle paging out
114 int page_referenced(struct page *, int is_locked,
115 struct mem_cgroup *cnt, unsigned long *vm_flags);
116 int page_referenced_one(struct page *, struct vm_area_struct *,
117 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
119 enum ttu_flags {
120 TTU_UNMAP = 0, /* unmap mode */
121 TTU_MIGRATION = 1, /* migration mode */
122 TTU_MUNLOCK = 2, /* munlock mode */
123 TTU_ACTION_MASK = 0xff,
125 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
126 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
127 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
129 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
131 int try_to_unmap(struct page *, enum ttu_flags flags);
132 int try_to_unmap_one(struct page *, struct vm_area_struct *,
133 unsigned long address, enum ttu_flags flags);
136 * Called from mm/filemap_xip.c to unmap empty zero page
138 pte_t *page_check_address(struct page *, struct mm_struct *,
139 unsigned long, spinlock_t **, int);
142 * Used by swapoff to help locate where page is expected in vma.
144 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
147 * Cleans the PTEs of shared mappings.
148 * (and since clean PTEs should also be readonly, write protects them too)
150 * returns the number of cleaned PTEs.
152 int page_mkclean(struct page *);
155 * called in munlock()/munmap() path to check for other vmas holding
156 * the page mlocked.
158 int try_to_munlock(struct page *);
161 * Called by memory-failure.c to kill processes.
163 struct anon_vma *page_lock_anon_vma(struct page *page);
164 void page_unlock_anon_vma(struct anon_vma *anon_vma);
165 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
168 * Called by migrate.c to remove migration ptes, but might be used more later.
170 int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
171 struct vm_area_struct *, unsigned long, void *), void *arg);
173 #else /* !CONFIG_MMU */
175 #define anon_vma_init() do {} while (0)
176 #define anon_vma_prepare(vma) (0)
177 #define anon_vma_link(vma) do {} while (0)
179 static inline int page_referenced(struct page *page, int is_locked,
180 struct mem_cgroup *cnt,
181 unsigned long *vm_flags)
183 *vm_flags = 0;
184 return TestClearPageReferenced(page);
187 #define try_to_unmap(page, refs) SWAP_FAIL
189 static inline int page_mkclean(struct page *page)
191 return 0;
195 #endif /* CONFIG_MMU */
198 * Return values of try_to_unmap
200 #define SWAP_SUCCESS 0
201 #define SWAP_AGAIN 1
202 #define SWAP_FAIL 2
203 #define SWAP_MLOCK 3
205 #endif /* _LINUX_RMAP_H */