mm: export fragmentation index via debugfs
[linux/fpc-iii.git] / mm / mmu_notifier.c
blob438951d366f27cb449d7872f40ced3f791186f07
1 /*
2 * linux/mm/mmu_notifier.c
4 * Copyright (C) 2008 Qumranet, Inc.
5 * Copyright (C) 2008 SGI
6 * Christoph Lameter <clameter@sgi.com>
8 * This work is licensed under the terms of the GNU GPL, version 2. See
9 * the COPYING file in the top-level directory.
12 #include <linux/rculist.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/err.h>
17 #include <linux/rcupdate.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
22 * This function can't run concurrently against mmu_notifier_register
23 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
24 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
25 * in parallel despite there being no task using this mm any more,
26 * through the vmas outside of the exit_mmap context, such as with
27 * vmtruncate. This serializes against mmu_notifier_unregister with
28 * the mmu_notifier_mm->lock in addition to RCU and it serializes
29 * against the other mmu notifiers with RCU. struct mmu_notifier_mm
30 * can't go away from under us as exit_mmap holds an mm_count pin
31 * itself.
33 void __mmu_notifier_release(struct mm_struct *mm)
35 struct mmu_notifier *mn;
37 spin_lock(&mm->mmu_notifier_mm->lock);
38 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
39 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
40 struct mmu_notifier,
41 hlist);
43 * We arrived before mmu_notifier_unregister so
44 * mmu_notifier_unregister will do nothing other than
45 * to wait ->release to finish and
46 * mmu_notifier_unregister to return.
48 hlist_del_init_rcu(&mn->hlist);
50 * RCU here will block mmu_notifier_unregister until
51 * ->release returns.
53 rcu_read_lock();
54 spin_unlock(&mm->mmu_notifier_mm->lock);
56 * if ->release runs before mmu_notifier_unregister it
57 * must be handled as it's the only way for the driver
58 * to flush all existing sptes and stop the driver
59 * from establishing any more sptes before all the
60 * pages in the mm are freed.
62 if (mn->ops->release)
63 mn->ops->release(mn, mm);
64 rcu_read_unlock();
65 spin_lock(&mm->mmu_notifier_mm->lock);
67 spin_unlock(&mm->mmu_notifier_mm->lock);
70 * synchronize_rcu here prevents mmu_notifier_release to
71 * return to exit_mmap (which would proceed freeing all pages
72 * in the mm) until the ->release method returns, if it was
73 * invoked by mmu_notifier_unregister.
75 * The mmu_notifier_mm can't go away from under us because one
76 * mm_count is hold by exit_mmap.
78 synchronize_rcu();
82 * If no young bitflag is supported by the hardware, ->clear_flush_young can
83 * unmap the address and return 1 or 0 depending if the mapping previously
84 * existed or not.
86 int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
87 unsigned long address)
89 struct mmu_notifier *mn;
90 struct hlist_node *n;
91 int young = 0;
93 rcu_read_lock();
94 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
95 if (mn->ops->clear_flush_young)
96 young |= mn->ops->clear_flush_young(mn, mm, address);
98 rcu_read_unlock();
100 return young;
103 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
104 pte_t pte)
106 struct mmu_notifier *mn;
107 struct hlist_node *n;
109 rcu_read_lock();
110 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
111 if (mn->ops->change_pte)
112 mn->ops->change_pte(mn, mm, address, pte);
114 * Some drivers don't have change_pte,
115 * so we must call invalidate_page in that case.
117 else if (mn->ops->invalidate_page)
118 mn->ops->invalidate_page(mn, mm, address);
120 rcu_read_unlock();
123 void __mmu_notifier_invalidate_page(struct mm_struct *mm,
124 unsigned long address)
126 struct mmu_notifier *mn;
127 struct hlist_node *n;
129 rcu_read_lock();
130 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
131 if (mn->ops->invalidate_page)
132 mn->ops->invalidate_page(mn, mm, address);
134 rcu_read_unlock();
137 void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
138 unsigned long start, unsigned long end)
140 struct mmu_notifier *mn;
141 struct hlist_node *n;
143 rcu_read_lock();
144 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
145 if (mn->ops->invalidate_range_start)
146 mn->ops->invalidate_range_start(mn, mm, start, end);
148 rcu_read_unlock();
151 void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
152 unsigned long start, unsigned long end)
154 struct mmu_notifier *mn;
155 struct hlist_node *n;
157 rcu_read_lock();
158 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
159 if (mn->ops->invalidate_range_end)
160 mn->ops->invalidate_range_end(mn, mm, start, end);
162 rcu_read_unlock();
165 static int do_mmu_notifier_register(struct mmu_notifier *mn,
166 struct mm_struct *mm,
167 int take_mmap_sem)
169 struct mmu_notifier_mm *mmu_notifier_mm;
170 int ret;
172 BUG_ON(atomic_read(&mm->mm_users) <= 0);
174 ret = -ENOMEM;
175 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
176 if (unlikely(!mmu_notifier_mm))
177 goto out;
179 if (take_mmap_sem)
180 down_write(&mm->mmap_sem);
181 ret = mm_take_all_locks(mm);
182 if (unlikely(ret))
183 goto out_cleanup;
185 if (!mm_has_notifiers(mm)) {
186 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
187 spin_lock_init(&mmu_notifier_mm->lock);
188 mm->mmu_notifier_mm = mmu_notifier_mm;
189 mmu_notifier_mm = NULL;
191 atomic_inc(&mm->mm_count);
194 * Serialize the update against mmu_notifier_unregister. A
195 * side note: mmu_notifier_release can't run concurrently with
196 * us because we hold the mm_users pin (either implicitly as
197 * current->mm or explicitly with get_task_mm() or similar).
198 * We can't race against any other mmu notifier method either
199 * thanks to mm_take_all_locks().
201 spin_lock(&mm->mmu_notifier_mm->lock);
202 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
203 spin_unlock(&mm->mmu_notifier_mm->lock);
205 mm_drop_all_locks(mm);
206 out_cleanup:
207 if (take_mmap_sem)
208 up_write(&mm->mmap_sem);
209 /* kfree() does nothing if mmu_notifier_mm is NULL */
210 kfree(mmu_notifier_mm);
211 out:
212 BUG_ON(atomic_read(&mm->mm_users) <= 0);
213 return ret;
217 * Must not hold mmap_sem nor any other VM related lock when calling
218 * this registration function. Must also ensure mm_users can't go down
219 * to zero while this runs to avoid races with mmu_notifier_release,
220 * so mm has to be current->mm or the mm should be pinned safely such
221 * as with get_task_mm(). If the mm is not current->mm, the mm_users
222 * pin should be released by calling mmput after mmu_notifier_register
223 * returns. mmu_notifier_unregister must be always called to
224 * unregister the notifier. mm_count is automatically pinned to allow
225 * mmu_notifier_unregister to safely run at any time later, before or
226 * after exit_mmap. ->release will always be called before exit_mmap
227 * frees the pages.
229 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
231 return do_mmu_notifier_register(mn, mm, 1);
233 EXPORT_SYMBOL_GPL(mmu_notifier_register);
236 * Same as mmu_notifier_register but here the caller must hold the
237 * mmap_sem in write mode.
239 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
241 return do_mmu_notifier_register(mn, mm, 0);
243 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
245 /* this is called after the last mmu_notifier_unregister() returned */
246 void __mmu_notifier_mm_destroy(struct mm_struct *mm)
248 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
249 kfree(mm->mmu_notifier_mm);
250 mm->mmu_notifier_mm = LIST_POISON1; /* debug */
254 * This releases the mm_count pin automatically and frees the mm
255 * structure if it was the last user of it. It serializes against
256 * running mmu notifiers with RCU and against mmu_notifier_unregister
257 * with the unregister lock + RCU. All sptes must be dropped before
258 * calling mmu_notifier_unregister. ->release or any other notifier
259 * method may be invoked concurrently with mmu_notifier_unregister,
260 * and only after mmu_notifier_unregister returned we're guaranteed
261 * that ->release or any other method can't run anymore.
263 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
265 BUG_ON(atomic_read(&mm->mm_count) <= 0);
267 spin_lock(&mm->mmu_notifier_mm->lock);
268 if (!hlist_unhashed(&mn->hlist)) {
269 hlist_del_rcu(&mn->hlist);
272 * RCU here will force exit_mmap to wait ->release to finish
273 * before freeing the pages.
275 rcu_read_lock();
276 spin_unlock(&mm->mmu_notifier_mm->lock);
278 * exit_mmap will block in mmu_notifier_release to
279 * guarantee ->release is called before freeing the
280 * pages.
282 if (mn->ops->release)
283 mn->ops->release(mn, mm);
284 rcu_read_unlock();
285 } else
286 spin_unlock(&mm->mmu_notifier_mm->lock);
289 * Wait any running method to finish, of course including
290 * ->release if it was run by mmu_notifier_relase instead of us.
292 synchronize_rcu();
294 BUG_ON(atomic_read(&mm->mm_count) <= 0);
296 mmdrop(mm);
298 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);