scsi: aacraid: Make sure ioctl returns on controller reset
[linux/fpc-iii.git] / mm / mmu_notifier.c
blob54ca545629286223a16ef931830a3757b29da77d
1 /*
2 * linux/mm/mmu_notifier.c
4 * Copyright (C) 2008 Qumranet, Inc.
5 * Copyright (C) 2008 SGI
6 * Christoph Lameter <cl@linux.com>
8 * This work is licensed under the terms of the GNU GPL, version 2. See
9 * the COPYING file in the top-level directory.
12 #include <linux/rculist.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/export.h>
15 #include <linux/mm.h>
16 #include <linux/err.h>
17 #include <linux/srcu.h>
18 #include <linux/rcupdate.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/slab.h>
23 /* global SRCU for all MMs */
24 DEFINE_STATIC_SRCU(srcu);
27 * This function allows mmu_notifier::release callback to delay a call to
28 * a function that will free appropriate resources. The function must be
29 * quick and must not block.
31 void mmu_notifier_call_srcu(struct rcu_head *rcu,
32 void (*func)(struct rcu_head *rcu))
34 call_srcu(&srcu, rcu, func);
36 EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);
38 void mmu_notifier_synchronize(void)
40 /* Wait for any running method to finish. */
41 srcu_barrier(&srcu);
43 EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
46 * This function can't run concurrently against mmu_notifier_register
47 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
48 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
49 * in parallel despite there being no task using this mm any more,
50 * through the vmas outside of the exit_mmap context, such as with
51 * vmtruncate. This serializes against mmu_notifier_unregister with
52 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
53 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
54 * can't go away from under us as exit_mmap holds an mm_count pin
55 * itself.
57 void __mmu_notifier_release(struct mm_struct *mm)
59 struct mmu_notifier *mn;
60 int id;
63 * SRCU here will block mmu_notifier_unregister until
64 * ->release returns.
66 id = srcu_read_lock(&srcu);
67 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
69 * If ->release runs before mmu_notifier_unregister it must be
70 * handled, as it's the only way for the driver to flush all
71 * existing sptes and stop the driver from establishing any more
72 * sptes before all the pages in the mm are freed.
74 if (mn->ops->release)
75 mn->ops->release(mn, mm);
77 spin_lock(&mm->mmu_notifier_mm->lock);
78 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
79 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
80 struct mmu_notifier,
81 hlist);
83 * We arrived before mmu_notifier_unregister so
84 * mmu_notifier_unregister will do nothing other than to wait
85 * for ->release to finish and for mmu_notifier_unregister to
86 * return.
88 hlist_del_init_rcu(&mn->hlist);
90 spin_unlock(&mm->mmu_notifier_mm->lock);
91 srcu_read_unlock(&srcu, id);
94 * synchronize_srcu here prevents mmu_notifier_release from returning to
95 * exit_mmap (which would proceed with freeing all pages in the mm)
96 * until the ->release method returns, if it was invoked by
97 * mmu_notifier_unregister.
99 * The mmu_notifier_mm can't go away from under us because one mm_count
100 * is held by exit_mmap.
102 synchronize_srcu(&srcu);
106 * If no young bitflag is supported by the hardware, ->clear_flush_young can
107 * unmap the address and return 1 or 0 depending if the mapping previously
108 * existed or not.
110 int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
111 unsigned long start,
112 unsigned long end)
114 struct mmu_notifier *mn;
115 int young = 0, id;
117 id = srcu_read_lock(&srcu);
118 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
119 if (mn->ops->clear_flush_young)
120 young |= mn->ops->clear_flush_young(mn, mm, start, end);
122 srcu_read_unlock(&srcu, id);
124 return young;
127 int __mmu_notifier_clear_young(struct mm_struct *mm,
128 unsigned long start,
129 unsigned long end)
131 struct mmu_notifier *mn;
132 int young = 0, id;
134 id = srcu_read_lock(&srcu);
135 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
136 if (mn->ops->clear_young)
137 young |= mn->ops->clear_young(mn, mm, start, end);
139 srcu_read_unlock(&srcu, id);
141 return young;
144 int __mmu_notifier_test_young(struct mm_struct *mm,
145 unsigned long address)
147 struct mmu_notifier *mn;
148 int young = 0, id;
150 id = srcu_read_lock(&srcu);
151 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
152 if (mn->ops->test_young) {
153 young = mn->ops->test_young(mn, mm, address);
154 if (young)
155 break;
158 srcu_read_unlock(&srcu, id);
160 return young;
163 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
164 pte_t pte)
166 struct mmu_notifier *mn;
167 int id;
169 id = srcu_read_lock(&srcu);
170 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
171 if (mn->ops->change_pte)
172 mn->ops->change_pte(mn, mm, address, pte);
174 srcu_read_unlock(&srcu, id);
177 void __mmu_notifier_invalidate_page(struct mm_struct *mm,
178 unsigned long address)
180 struct mmu_notifier *mn;
181 int id;
183 id = srcu_read_lock(&srcu);
184 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
185 if (mn->ops->invalidate_page)
186 mn->ops->invalidate_page(mn, mm, address);
188 srcu_read_unlock(&srcu, id);
191 void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
192 unsigned long start, unsigned long end)
194 struct mmu_notifier *mn;
195 int id;
197 id = srcu_read_lock(&srcu);
198 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
199 if (mn->ops->invalidate_range_start)
200 mn->ops->invalidate_range_start(mn, mm, start, end);
202 srcu_read_unlock(&srcu, id);
204 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
206 void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
207 unsigned long start, unsigned long end)
209 struct mmu_notifier *mn;
210 int id;
212 id = srcu_read_lock(&srcu);
213 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
215 * Call invalidate_range here too to avoid the need for the
216 * subsystem of having to register an invalidate_range_end
217 * call-back when there is invalidate_range already. Usually a
218 * subsystem registers either invalidate_range_start()/end() or
219 * invalidate_range(), so this will be no additional overhead
220 * (besides the pointer check).
222 if (mn->ops->invalidate_range)
223 mn->ops->invalidate_range(mn, mm, start, end);
224 if (mn->ops->invalidate_range_end)
225 mn->ops->invalidate_range_end(mn, mm, start, end);
227 srcu_read_unlock(&srcu, id);
229 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
231 void __mmu_notifier_invalidate_range(struct mm_struct *mm,
232 unsigned long start, unsigned long end)
234 struct mmu_notifier *mn;
235 int id;
237 id = srcu_read_lock(&srcu);
238 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
239 if (mn->ops->invalidate_range)
240 mn->ops->invalidate_range(mn, mm, start, end);
242 srcu_read_unlock(&srcu, id);
244 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
246 static int do_mmu_notifier_register(struct mmu_notifier *mn,
247 struct mm_struct *mm,
248 int take_mmap_sem)
250 struct mmu_notifier_mm *mmu_notifier_mm;
251 int ret;
253 BUG_ON(atomic_read(&mm->mm_users) <= 0);
255 ret = -ENOMEM;
256 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
257 if (unlikely(!mmu_notifier_mm))
258 goto out;
260 if (take_mmap_sem)
261 down_write(&mm->mmap_sem);
262 ret = mm_take_all_locks(mm);
263 if (unlikely(ret))
264 goto out_clean;
266 if (!mm_has_notifiers(mm)) {
267 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
268 spin_lock_init(&mmu_notifier_mm->lock);
270 mm->mmu_notifier_mm = mmu_notifier_mm;
271 mmu_notifier_mm = NULL;
273 mmgrab(mm);
276 * Serialize the update against mmu_notifier_unregister. A
277 * side note: mmu_notifier_release can't run concurrently with
278 * us because we hold the mm_users pin (either implicitly as
279 * current->mm or explicitly with get_task_mm() or similar).
280 * We can't race against any other mmu notifier method either
281 * thanks to mm_take_all_locks().
283 spin_lock(&mm->mmu_notifier_mm->lock);
284 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
285 spin_unlock(&mm->mmu_notifier_mm->lock);
287 mm_drop_all_locks(mm);
288 out_clean:
289 if (take_mmap_sem)
290 up_write(&mm->mmap_sem);
291 kfree(mmu_notifier_mm);
292 out:
293 BUG_ON(atomic_read(&mm->mm_users) <= 0);
294 return ret;
298 * Must not hold mmap_sem nor any other VM related lock when calling
299 * this registration function. Must also ensure mm_users can't go down
300 * to zero while this runs to avoid races with mmu_notifier_release,
301 * so mm has to be current->mm or the mm should be pinned safely such
302 * as with get_task_mm(). If the mm is not current->mm, the mm_users
303 * pin should be released by calling mmput after mmu_notifier_register
304 * returns. mmu_notifier_unregister must be always called to
305 * unregister the notifier. mm_count is automatically pinned to allow
306 * mmu_notifier_unregister to safely run at any time later, before or
307 * after exit_mmap. ->release will always be called before exit_mmap
308 * frees the pages.
310 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
312 return do_mmu_notifier_register(mn, mm, 1);
314 EXPORT_SYMBOL_GPL(mmu_notifier_register);
317 * Same as mmu_notifier_register but here the caller must hold the
318 * mmap_sem in write mode.
320 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
322 return do_mmu_notifier_register(mn, mm, 0);
324 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
326 /* this is called after the last mmu_notifier_unregister() returned */
327 void __mmu_notifier_mm_destroy(struct mm_struct *mm)
329 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
330 kfree(mm->mmu_notifier_mm);
331 mm->mmu_notifier_mm = LIST_POISON1; /* debug */
335 * This releases the mm_count pin automatically and frees the mm
336 * structure if it was the last user of it. It serializes against
337 * running mmu notifiers with SRCU and against mmu_notifier_unregister
338 * with the unregister lock + SRCU. All sptes must be dropped before
339 * calling mmu_notifier_unregister. ->release or any other notifier
340 * method may be invoked concurrently with mmu_notifier_unregister,
341 * and only after mmu_notifier_unregister returned we're guaranteed
342 * that ->release or any other method can't run anymore.
344 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
346 BUG_ON(atomic_read(&mm->mm_count) <= 0);
348 if (!hlist_unhashed(&mn->hlist)) {
350 * SRCU here will force exit_mmap to wait for ->release to
351 * finish before freeing the pages.
353 int id;
355 id = srcu_read_lock(&srcu);
357 * exit_mmap will block in mmu_notifier_release to guarantee
358 * that ->release is called before freeing the pages.
360 if (mn->ops->release)
361 mn->ops->release(mn, mm);
362 srcu_read_unlock(&srcu, id);
364 spin_lock(&mm->mmu_notifier_mm->lock);
366 * Can not use list_del_rcu() since __mmu_notifier_release
367 * can delete it before we hold the lock.
369 hlist_del_init_rcu(&mn->hlist);
370 spin_unlock(&mm->mmu_notifier_mm->lock);
374 * Wait for any running method to finish, of course including
375 * ->release if it was run by mmu_notifier_release instead of us.
377 synchronize_srcu(&srcu);
379 BUG_ON(atomic_read(&mm->mm_count) <= 0);
381 mmdrop(mm);
383 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
386 * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
388 void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
389 struct mm_struct *mm)
391 spin_lock(&mm->mmu_notifier_mm->lock);
393 * Can not use list_del_rcu() since __mmu_notifier_release
394 * can delete it before we hold the lock.
396 hlist_del_init_rcu(&mn->hlist);
397 spin_unlock(&mm->mmu_notifier_mm->lock);
399 BUG_ON(atomic_read(&mm->mm_count) <= 0);
400 mmdrop(mm);
402 EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);