1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/mm/mmu_notifier.c
5 * Copyright (C) 2008 Qumranet, Inc.
6 * Copyright (C) 2008 SGI
7 * Christoph Lameter <cl@linux.com>
10 #include <linux/rculist.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/export.h>
14 #include <linux/err.h>
15 #include <linux/interval_tree.h>
16 #include <linux/srcu.h>
17 #include <linux/rcupdate.h>
18 #include <linux/sched.h>
19 #include <linux/sched/mm.h>
20 #include <linux/slab.h>
22 /* global SRCU for all MMs */
23 DEFINE_STATIC_SRCU(srcu
);
26 struct lockdep_map __mmu_notifier_invalidate_range_start_map
= {
27 .name
= "mmu_notifier_invalidate_range_start"
32 * The mmu_notifier_subscriptions structure is allocated and installed in
33 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
34 * critical section and it's released only when mm_count reaches zero
37 struct mmu_notifier_subscriptions
{
38 /* all mmu notifiers registered in this mm are queued in this list */
39 struct hlist_head list
;
41 /* to serialize the list modifications and hlist_unhashed */
43 unsigned long invalidate_seq
;
44 unsigned long active_invalidate_ranges
;
45 struct rb_root_cached itree
;
47 struct hlist_head deferred_list
;
51 * This is a collision-retry read-side/write-side 'lock', a lot like a
52 * seqcount, however this allows multiple write-sides to hold it at
53 * once. Conceptually the write side is protecting the values of the PTEs in
54 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
57 * Note that the core mm creates nested invalidate_range_start()/end() regions
58 * within the same thread, and runs invalidate_range_start()/end() in parallel
59 * on multiple CPUs. This is designed to not reduce concurrency or block
60 * progress on the mm side.
62 * As a secondary function, holding the full write side also serves to prevent
63 * writers for the itree, this is an optimization to avoid extra locking
64 * during invalidate_range_start/end notifiers.
66 * The write side has two states, fully excluded:
67 * - mm->active_invalidate_ranges != 0
68 * - subscriptions->invalidate_seq & 1 == True (odd)
69 * - some range on the mm_struct is being invalidated
70 * - the itree is not allowed to change
72 * And partially excluded:
73 * - mm->active_invalidate_ranges != 0
74 * - subscriptions->invalidate_seq & 1 == False (even)
75 * - some range on the mm_struct is being invalidated
76 * - the itree is allowed to change
78 * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
79 * seq |= 1 # Begin writing
80 * seq++ # Release the writing state
81 * seq & 1 # True if a writer exists
83 * The later state avoids some expensive work on inv_end in the common case of
84 * no mmu_interval_notifier monitoring the VA.
87 mn_itree_is_invalidating(struct mmu_notifier_subscriptions
*subscriptions
)
89 lockdep_assert_held(&subscriptions
->lock
);
90 return subscriptions
->invalidate_seq
& 1;
93 static struct mmu_interval_notifier
*
94 mn_itree_inv_start_range(struct mmu_notifier_subscriptions
*subscriptions
,
95 const struct mmu_notifier_range
*range
,
98 struct interval_tree_node
*node
;
99 struct mmu_interval_notifier
*res
= NULL
;
101 spin_lock(&subscriptions
->lock
);
102 subscriptions
->active_invalidate_ranges
++;
103 node
= interval_tree_iter_first(&subscriptions
->itree
, range
->start
,
106 subscriptions
->invalidate_seq
|= 1;
107 res
= container_of(node
, struct mmu_interval_notifier
,
111 *seq
= subscriptions
->invalidate_seq
;
112 spin_unlock(&subscriptions
->lock
);
116 static struct mmu_interval_notifier
*
117 mn_itree_inv_next(struct mmu_interval_notifier
*interval_sub
,
118 const struct mmu_notifier_range
*range
)
120 struct interval_tree_node
*node
;
122 node
= interval_tree_iter_next(&interval_sub
->interval_tree
,
123 range
->start
, range
->end
- 1);
126 return container_of(node
, struct mmu_interval_notifier
, interval_tree
);
129 static void mn_itree_inv_end(struct mmu_notifier_subscriptions
*subscriptions
)
131 struct mmu_interval_notifier
*interval_sub
;
132 struct hlist_node
*next
;
134 spin_lock(&subscriptions
->lock
);
135 if (--subscriptions
->active_invalidate_ranges
||
136 !mn_itree_is_invalidating(subscriptions
)) {
137 spin_unlock(&subscriptions
->lock
);
141 /* Make invalidate_seq even */
142 subscriptions
->invalidate_seq
++;
145 * The inv_end incorporates a deferred mechanism like rtnl_unlock().
146 * Adds and removes are queued until the final inv_end happens then
147 * they are progressed. This arrangement for tree updates is used to
148 * avoid using a blocking lock during invalidate_range_start.
150 hlist_for_each_entry_safe(interval_sub
, next
,
151 &subscriptions
->deferred_list
,
153 if (RB_EMPTY_NODE(&interval_sub
->interval_tree
.rb
))
154 interval_tree_insert(&interval_sub
->interval_tree
,
155 &subscriptions
->itree
);
157 interval_tree_remove(&interval_sub
->interval_tree
,
158 &subscriptions
->itree
);
159 hlist_del(&interval_sub
->deferred_item
);
161 spin_unlock(&subscriptions
->lock
);
163 wake_up_all(&subscriptions
->wq
);
167 * mmu_interval_read_begin - Begin a read side critical section against a VA
169 * @interval_sub: The interval subscription
171 * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
172 * collision-retry scheme similar to seqcount for the VA range under
173 * subscription. If the mm invokes invalidation during the critical section
174 * then mmu_interval_read_retry() will return true.
176 * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
177 * require a blocking context. The critical region formed by this can sleep,
178 * and the required 'user_lock' can also be a sleeping lock.
180 * The caller is required to provide a 'user_lock' to serialize both teardown
183 * The return value should be passed to mmu_interval_read_retry().
186 mmu_interval_read_begin(struct mmu_interval_notifier
*interval_sub
)
188 struct mmu_notifier_subscriptions
*subscriptions
=
189 interval_sub
->mm
->notifier_subscriptions
;
191 bool is_invalidating
;
194 * If the subscription has a different seq value under the user_lock
195 * than we started with then it has collided.
197 * If the subscription currently has the same seq value as the
198 * subscriptions seq, then it is currently between
199 * invalidate_start/end and is colliding.
201 * The locking looks broadly like this:
202 * mn_tree_invalidate_start(): mmu_interval_read_begin():
204 * seq = READ_ONCE(interval_sub->invalidate_seq);
205 * seq == subs->invalidate_seq
208 * seq = ++subscriptions->invalidate_seq
210 * op->invalidate_range():
212 * mmu_interval_set_seq()
213 * interval_sub->invalidate_seq = seq
216 * [Required: mmu_interval_read_retry() == true]
218 * mn_itree_inv_end():
220 * seq = ++subscriptions->invalidate_seq
224 * mmu_interval_read_retry():
225 * interval_sub->invalidate_seq != seq
228 * Barriers are not needed here as any races here are closed by an
229 * eventual mmu_interval_read_retry(), which provides a barrier via the
232 spin_lock(&subscriptions
->lock
);
233 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
234 seq
= READ_ONCE(interval_sub
->invalidate_seq
);
235 is_invalidating
= seq
== subscriptions
->invalidate_seq
;
236 spin_unlock(&subscriptions
->lock
);
239 * interval_sub->invalidate_seq must always be set to an odd value via
240 * mmu_interval_set_seq() using the provided cur_seq from
241 * mn_itree_inv_start_range(). This ensures that if seq does wrap we
242 * will always clear the below sleep in some reasonable time as
243 * subscriptions->invalidate_seq is even in the idle state.
245 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map
);
246 lock_map_release(&__mmu_notifier_invalidate_range_start_map
);
248 wait_event(subscriptions
->wq
,
249 READ_ONCE(subscriptions
->invalidate_seq
) != seq
);
252 * Notice that mmu_interval_read_retry() can already be true at this
253 * point, avoiding loops here allows the caller to provide a global
259 EXPORT_SYMBOL_GPL(mmu_interval_read_begin
);
261 static void mn_itree_release(struct mmu_notifier_subscriptions
*subscriptions
,
262 struct mm_struct
*mm
)
264 struct mmu_notifier_range range
= {
265 .flags
= MMU_NOTIFIER_RANGE_BLOCKABLE
,
266 .event
= MMU_NOTIFY_RELEASE
,
271 struct mmu_interval_notifier
*interval_sub
;
272 unsigned long cur_seq
;
276 mn_itree_inv_start_range(subscriptions
, &range
, &cur_seq
);
278 interval_sub
= mn_itree_inv_next(interval_sub
, &range
)) {
279 ret
= interval_sub
->ops
->invalidate(interval_sub
, &range
,
284 mn_itree_inv_end(subscriptions
);
288 * This function can't run concurrently against mmu_notifier_register
289 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
290 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
291 * in parallel despite there being no task using this mm any more,
292 * through the vmas outside of the exit_mmap context, such as with
293 * vmtruncate. This serializes against mmu_notifier_unregister with
294 * the notifier_subscriptions->lock in addition to SRCU and it serializes
295 * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
296 * can't go away from under us as exit_mmap holds an mm_count pin
299 static void mn_hlist_release(struct mmu_notifier_subscriptions
*subscriptions
,
300 struct mm_struct
*mm
)
302 struct mmu_notifier
*subscription
;
306 * SRCU here will block mmu_notifier_unregister until
309 id
= srcu_read_lock(&srcu
);
310 hlist_for_each_entry_rcu(subscription
, &subscriptions
->list
, hlist
,
311 srcu_read_lock_held(&srcu
))
313 * If ->release runs before mmu_notifier_unregister it must be
314 * handled, as it's the only way for the driver to flush all
315 * existing sptes and stop the driver from establishing any more
316 * sptes before all the pages in the mm are freed.
318 if (subscription
->ops
->release
)
319 subscription
->ops
->release(subscription
, mm
);
321 spin_lock(&subscriptions
->lock
);
322 while (unlikely(!hlist_empty(&subscriptions
->list
))) {
323 subscription
= hlist_entry(subscriptions
->list
.first
,
324 struct mmu_notifier
, hlist
);
326 * We arrived before mmu_notifier_unregister so
327 * mmu_notifier_unregister will do nothing other than to wait
328 * for ->release to finish and for mmu_notifier_unregister to
331 hlist_del_init_rcu(&subscription
->hlist
);
333 spin_unlock(&subscriptions
->lock
);
334 srcu_read_unlock(&srcu
, id
);
337 * synchronize_srcu here prevents mmu_notifier_release from returning to
338 * exit_mmap (which would proceed with freeing all pages in the mm)
339 * until the ->release method returns, if it was invoked by
340 * mmu_notifier_unregister.
342 * The notifier_subscriptions can't go away from under us because
343 * one mm_count is held by exit_mmap.
345 synchronize_srcu(&srcu
);
348 void __mmu_notifier_release(struct mm_struct
*mm
)
350 struct mmu_notifier_subscriptions
*subscriptions
=
351 mm
->notifier_subscriptions
;
353 if (subscriptions
->has_itree
)
354 mn_itree_release(subscriptions
, mm
);
356 if (!hlist_empty(&subscriptions
->list
))
357 mn_hlist_release(subscriptions
, mm
);
361 * If no young bitflag is supported by the hardware, ->clear_flush_young can
362 * unmap the address and return 1 or 0 depending if the mapping previously
365 int __mmu_notifier_clear_flush_young(struct mm_struct
*mm
,
369 struct mmu_notifier
*subscription
;
372 id
= srcu_read_lock(&srcu
);
373 hlist_for_each_entry_rcu(subscription
,
374 &mm
->notifier_subscriptions
->list
, hlist
,
375 srcu_read_lock_held(&srcu
)) {
376 if (subscription
->ops
->clear_flush_young
)
377 young
|= subscription
->ops
->clear_flush_young(
378 subscription
, mm
, start
, end
);
380 srcu_read_unlock(&srcu
, id
);
385 int __mmu_notifier_clear_young(struct mm_struct
*mm
,
389 struct mmu_notifier
*subscription
;
392 id
= srcu_read_lock(&srcu
);
393 hlist_for_each_entry_rcu(subscription
,
394 &mm
->notifier_subscriptions
->list
, hlist
,
395 srcu_read_lock_held(&srcu
)) {
396 if (subscription
->ops
->clear_young
)
397 young
|= subscription
->ops
->clear_young(subscription
,
400 srcu_read_unlock(&srcu
, id
);
405 int __mmu_notifier_test_young(struct mm_struct
*mm
,
406 unsigned long address
)
408 struct mmu_notifier
*subscription
;
411 id
= srcu_read_lock(&srcu
);
412 hlist_for_each_entry_rcu(subscription
,
413 &mm
->notifier_subscriptions
->list
, hlist
,
414 srcu_read_lock_held(&srcu
)) {
415 if (subscription
->ops
->test_young
) {
416 young
= subscription
->ops
->test_young(subscription
, mm
,
422 srcu_read_unlock(&srcu
, id
);
427 void __mmu_notifier_change_pte(struct mm_struct
*mm
, unsigned long address
,
430 struct mmu_notifier
*subscription
;
433 id
= srcu_read_lock(&srcu
);
434 hlist_for_each_entry_rcu(subscription
,
435 &mm
->notifier_subscriptions
->list
, hlist
,
436 srcu_read_lock_held(&srcu
)) {
437 if (subscription
->ops
->change_pte
)
438 subscription
->ops
->change_pte(subscription
, mm
, address
,
441 srcu_read_unlock(&srcu
, id
);
444 static int mn_itree_invalidate(struct mmu_notifier_subscriptions
*subscriptions
,
445 const struct mmu_notifier_range
*range
)
447 struct mmu_interval_notifier
*interval_sub
;
448 unsigned long cur_seq
;
451 mn_itree_inv_start_range(subscriptions
, range
, &cur_seq
);
453 interval_sub
= mn_itree_inv_next(interval_sub
, range
)) {
456 ret
= interval_sub
->ops
->invalidate(interval_sub
, range
,
459 if (WARN_ON(mmu_notifier_range_blockable(range
)))
461 goto out_would_block
;
468 * On -EAGAIN the non-blocking caller is not allowed to call
469 * invalidate_range_end()
471 mn_itree_inv_end(subscriptions
);
475 static int mn_hlist_invalidate_range_start(
476 struct mmu_notifier_subscriptions
*subscriptions
,
477 struct mmu_notifier_range
*range
)
479 struct mmu_notifier
*subscription
;
483 id
= srcu_read_lock(&srcu
);
484 hlist_for_each_entry_rcu(subscription
, &subscriptions
->list
, hlist
,
485 srcu_read_lock_held(&srcu
)) {
486 const struct mmu_notifier_ops
*ops
= subscription
->ops
;
488 if (ops
->invalidate_range_start
) {
491 if (!mmu_notifier_range_blockable(range
))
493 _ret
= ops
->invalidate_range_start(subscription
, range
);
494 if (!mmu_notifier_range_blockable(range
))
497 pr_info("%pS callback failed with %d in %sblockable context.\n",
498 ops
->invalidate_range_start
, _ret
,
499 !mmu_notifier_range_blockable(range
) ?
502 WARN_ON(mmu_notifier_range_blockable(range
) ||
508 srcu_read_unlock(&srcu
, id
);
513 int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range
*range
)
515 struct mmu_notifier_subscriptions
*subscriptions
=
516 range
->mm
->notifier_subscriptions
;
519 if (subscriptions
->has_itree
) {
520 ret
= mn_itree_invalidate(subscriptions
, range
);
524 if (!hlist_empty(&subscriptions
->list
))
525 return mn_hlist_invalidate_range_start(subscriptions
, range
);
530 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions
*subscriptions
,
531 struct mmu_notifier_range
*range
, bool only_end
)
533 struct mmu_notifier
*subscription
;
536 id
= srcu_read_lock(&srcu
);
537 hlist_for_each_entry_rcu(subscription
, &subscriptions
->list
, hlist
,
538 srcu_read_lock_held(&srcu
)) {
540 * Call invalidate_range here too to avoid the need for the
541 * subsystem of having to register an invalidate_range_end
542 * call-back when there is invalidate_range already. Usually a
543 * subsystem registers either invalidate_range_start()/end() or
544 * invalidate_range(), so this will be no additional overhead
545 * (besides the pointer check).
547 * We skip call to invalidate_range() if we know it is safe ie
548 * call site use mmu_notifier_invalidate_range_only_end() which
549 * is safe to do when we know that a call to invalidate_range()
550 * already happen under page table lock.
552 if (!only_end
&& subscription
->ops
->invalidate_range
)
553 subscription
->ops
->invalidate_range(subscription
,
557 if (subscription
->ops
->invalidate_range_end
) {
558 if (!mmu_notifier_range_blockable(range
))
560 subscription
->ops
->invalidate_range_end(subscription
,
562 if (!mmu_notifier_range_blockable(range
))
566 srcu_read_unlock(&srcu
, id
);
569 void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range
*range
,
572 struct mmu_notifier_subscriptions
*subscriptions
=
573 range
->mm
->notifier_subscriptions
;
575 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map
);
576 if (subscriptions
->has_itree
)
577 mn_itree_inv_end(subscriptions
);
579 if (!hlist_empty(&subscriptions
->list
))
580 mn_hlist_invalidate_end(subscriptions
, range
, only_end
);
581 lock_map_release(&__mmu_notifier_invalidate_range_start_map
);
584 void __mmu_notifier_invalidate_range(struct mm_struct
*mm
,
585 unsigned long start
, unsigned long end
)
587 struct mmu_notifier
*subscription
;
590 id
= srcu_read_lock(&srcu
);
591 hlist_for_each_entry_rcu(subscription
,
592 &mm
->notifier_subscriptions
->list
, hlist
,
593 srcu_read_lock_held(&srcu
)) {
594 if (subscription
->ops
->invalidate_range
)
595 subscription
->ops
->invalidate_range(subscription
, mm
,
598 srcu_read_unlock(&srcu
, id
);
602 * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
603 * write mode. A NULL mn signals the notifier is being registered for itree
606 int __mmu_notifier_register(struct mmu_notifier
*subscription
,
607 struct mm_struct
*mm
)
609 struct mmu_notifier_subscriptions
*subscriptions
= NULL
;
612 mmap_assert_write_locked(mm
);
613 BUG_ON(atomic_read(&mm
->mm_users
) <= 0);
615 if (!mm
->notifier_subscriptions
) {
617 * kmalloc cannot be called under mm_take_all_locks(), but we
618 * know that mm->notifier_subscriptions can't change while we
619 * hold the write side of the mmap_lock.
621 subscriptions
= kzalloc(
622 sizeof(struct mmu_notifier_subscriptions
), GFP_KERNEL
);
626 INIT_HLIST_HEAD(&subscriptions
->list
);
627 spin_lock_init(&subscriptions
->lock
);
628 subscriptions
->invalidate_seq
= 2;
629 subscriptions
->itree
= RB_ROOT_CACHED
;
630 init_waitqueue_head(&subscriptions
->wq
);
631 INIT_HLIST_HEAD(&subscriptions
->deferred_list
);
634 ret
= mm_take_all_locks(mm
);
639 * Serialize the update against mmu_notifier_unregister. A
640 * side note: mmu_notifier_release can't run concurrently with
641 * us because we hold the mm_users pin (either implicitly as
642 * current->mm or explicitly with get_task_mm() or similar).
643 * We can't race against any other mmu notifier method either
644 * thanks to mm_take_all_locks().
646 * release semantics on the initialization of the
647 * mmu_notifier_subscriptions's contents are provided for unlocked
648 * readers. acquire can only be used while holding the mmgrab or
649 * mmget, and is safe because once created the
650 * mmu_notifier_subscriptions is not freed until the mm is destroyed.
651 * As above, users holding the mmap_lock or one of the
652 * mm_take_all_locks() do not need to use acquire semantics.
655 smp_store_release(&mm
->notifier_subscriptions
, subscriptions
);
658 /* Pairs with the mmdrop in mmu_notifier_unregister_* */
660 subscription
->mm
= mm
;
661 subscription
->users
= 1;
663 spin_lock(&mm
->notifier_subscriptions
->lock
);
664 hlist_add_head_rcu(&subscription
->hlist
,
665 &mm
->notifier_subscriptions
->list
);
666 spin_unlock(&mm
->notifier_subscriptions
->lock
);
668 mm
->notifier_subscriptions
->has_itree
= true;
670 mm_drop_all_locks(mm
);
671 BUG_ON(atomic_read(&mm
->mm_users
) <= 0);
675 kfree(subscriptions
);
678 EXPORT_SYMBOL_GPL(__mmu_notifier_register
);
681 * mmu_notifier_register - Register a notifier on a mm
682 * @subscription: The notifier to attach
683 * @mm: The mm to attach the notifier to
685 * Must not hold mmap_lock nor any other VM related lock when calling
686 * this registration function. Must also ensure mm_users can't go down
687 * to zero while this runs to avoid races with mmu_notifier_release,
688 * so mm has to be current->mm or the mm should be pinned safely such
689 * as with get_task_mm(). If the mm is not current->mm, the mm_users
690 * pin should be released by calling mmput after mmu_notifier_register
693 * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
694 * unregister the notifier.
696 * While the caller has a mmu_notifier get the subscription->mm pointer will remain
697 * valid, and can be converted to an active mm pointer via mmget_not_zero().
699 int mmu_notifier_register(struct mmu_notifier
*subscription
,
700 struct mm_struct
*mm
)
705 ret
= __mmu_notifier_register(subscription
, mm
);
706 mmap_write_unlock(mm
);
709 EXPORT_SYMBOL_GPL(mmu_notifier_register
);
711 static struct mmu_notifier
*
712 find_get_mmu_notifier(struct mm_struct
*mm
, const struct mmu_notifier_ops
*ops
)
714 struct mmu_notifier
*subscription
;
716 spin_lock(&mm
->notifier_subscriptions
->lock
);
717 hlist_for_each_entry_rcu(subscription
,
718 &mm
->notifier_subscriptions
->list
, hlist
,
719 lockdep_is_held(&mm
->notifier_subscriptions
->lock
)) {
720 if (subscription
->ops
!= ops
)
723 if (likely(subscription
->users
!= UINT_MAX
))
724 subscription
->users
++;
726 subscription
= ERR_PTR(-EOVERFLOW
);
727 spin_unlock(&mm
->notifier_subscriptions
->lock
);
730 spin_unlock(&mm
->notifier_subscriptions
->lock
);
735 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
737 * @ops: The operations struct being subscribe with
738 * @mm : The mm to attach notifiers too
740 * This function either allocates a new mmu_notifier via
741 * ops->alloc_notifier(), or returns an already existing notifier on the
742 * list. The value of the ops pointer is used to determine when two notifiers
745 * Each call to mmu_notifier_get() must be paired with a call to
746 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
748 * While the caller has a mmu_notifier get the mm pointer will remain valid,
749 * and can be converted to an active mm pointer via mmget_not_zero().
751 struct mmu_notifier
*mmu_notifier_get_locked(const struct mmu_notifier_ops
*ops
,
752 struct mm_struct
*mm
)
754 struct mmu_notifier
*subscription
;
757 mmap_assert_write_locked(mm
);
759 if (mm
->notifier_subscriptions
) {
760 subscription
= find_get_mmu_notifier(mm
, ops
);
765 subscription
= ops
->alloc_notifier(mm
);
766 if (IS_ERR(subscription
))
768 subscription
->ops
= ops
;
769 ret
= __mmu_notifier_register(subscription
, mm
);
774 subscription
->ops
->free_notifier(subscription
);
777 EXPORT_SYMBOL_GPL(mmu_notifier_get_locked
);
779 /* this is called after the last mmu_notifier_unregister() returned */
780 void __mmu_notifier_subscriptions_destroy(struct mm_struct
*mm
)
782 BUG_ON(!hlist_empty(&mm
->notifier_subscriptions
->list
));
783 kfree(mm
->notifier_subscriptions
);
784 mm
->notifier_subscriptions
= LIST_POISON1
; /* debug */
788 * This releases the mm_count pin automatically and frees the mm
789 * structure if it was the last user of it. It serializes against
790 * running mmu notifiers with SRCU and against mmu_notifier_unregister
791 * with the unregister lock + SRCU. All sptes must be dropped before
792 * calling mmu_notifier_unregister. ->release or any other notifier
793 * method may be invoked concurrently with mmu_notifier_unregister,
794 * and only after mmu_notifier_unregister returned we're guaranteed
795 * that ->release or any other method can't run anymore.
797 void mmu_notifier_unregister(struct mmu_notifier
*subscription
,
798 struct mm_struct
*mm
)
800 BUG_ON(atomic_read(&mm
->mm_count
) <= 0);
802 if (!hlist_unhashed(&subscription
->hlist
)) {
804 * SRCU here will force exit_mmap to wait for ->release to
805 * finish before freeing the pages.
809 id
= srcu_read_lock(&srcu
);
811 * exit_mmap will block in mmu_notifier_release to guarantee
812 * that ->release is called before freeing the pages.
814 if (subscription
->ops
->release
)
815 subscription
->ops
->release(subscription
, mm
);
816 srcu_read_unlock(&srcu
, id
);
818 spin_lock(&mm
->notifier_subscriptions
->lock
);
820 * Can not use list_del_rcu() since __mmu_notifier_release
821 * can delete it before we hold the lock.
823 hlist_del_init_rcu(&subscription
->hlist
);
824 spin_unlock(&mm
->notifier_subscriptions
->lock
);
828 * Wait for any running method to finish, of course including
829 * ->release if it was run by mmu_notifier_release instead of us.
831 synchronize_srcu(&srcu
);
833 BUG_ON(atomic_read(&mm
->mm_count
) <= 0);
837 EXPORT_SYMBOL_GPL(mmu_notifier_unregister
);
839 static void mmu_notifier_free_rcu(struct rcu_head
*rcu
)
841 struct mmu_notifier
*subscription
=
842 container_of(rcu
, struct mmu_notifier
, rcu
);
843 struct mm_struct
*mm
= subscription
->mm
;
845 subscription
->ops
->free_notifier(subscription
);
846 /* Pairs with the get in __mmu_notifier_register() */
851 * mmu_notifier_put - Release the reference on the notifier
852 * @subscription: The notifier to act on
854 * This function must be paired with each mmu_notifier_get(), it releases the
855 * reference obtained by the get. If this is the last reference then process
856 * to free the notifier will be run asynchronously.
858 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
859 * when the mm_struct is destroyed. Instead free_notifier is always called to
860 * release any resources held by the user.
862 * As ops->release is not guaranteed to be called, the user must ensure that
863 * all sptes are dropped, and no new sptes can be established before
864 * mmu_notifier_put() is called.
866 * This function can be called from the ops->release callback, however the
867 * caller must still ensure it is called pairwise with mmu_notifier_get().
869 * Modules calling this function must call mmu_notifier_synchronize() in
870 * their __exit functions to ensure the async work is completed.
872 void mmu_notifier_put(struct mmu_notifier
*subscription
)
874 struct mm_struct
*mm
= subscription
->mm
;
876 spin_lock(&mm
->notifier_subscriptions
->lock
);
877 if (WARN_ON(!subscription
->users
) || --subscription
->users
)
879 hlist_del_init_rcu(&subscription
->hlist
);
880 spin_unlock(&mm
->notifier_subscriptions
->lock
);
882 call_srcu(&srcu
, &subscription
->rcu
, mmu_notifier_free_rcu
);
886 spin_unlock(&mm
->notifier_subscriptions
->lock
);
888 EXPORT_SYMBOL_GPL(mmu_notifier_put
);
890 static int __mmu_interval_notifier_insert(
891 struct mmu_interval_notifier
*interval_sub
, struct mm_struct
*mm
,
892 struct mmu_notifier_subscriptions
*subscriptions
, unsigned long start
,
893 unsigned long length
, const struct mmu_interval_notifier_ops
*ops
)
895 interval_sub
->mm
= mm
;
896 interval_sub
->ops
= ops
;
897 RB_CLEAR_NODE(&interval_sub
->interval_tree
.rb
);
898 interval_sub
->interval_tree
.start
= start
;
900 * Note that the representation of the intervals in the interval tree
901 * considers the ending point as contained in the interval.
904 check_add_overflow(start
, length
- 1,
905 &interval_sub
->interval_tree
.last
))
908 /* Must call with a mmget() held */
909 if (WARN_ON(atomic_read(&mm
->mm_users
) <= 0))
912 /* pairs with mmdrop in mmu_interval_notifier_remove() */
916 * If some invalidate_range_start/end region is going on in parallel
917 * we don't know what VA ranges are affected, so we must assume this
918 * new range is included.
920 * If the itree is invalidating then we are not allowed to change
921 * it. Retrying until invalidation is done is tricky due to the
922 * possibility for live lock, instead defer the add to
923 * mn_itree_inv_end() so this algorithm is deterministic.
925 * In all cases the value for the interval_sub->invalidate_seq should be
926 * odd, see mmu_interval_read_begin()
928 spin_lock(&subscriptions
->lock
);
929 if (subscriptions
->active_invalidate_ranges
) {
930 if (mn_itree_is_invalidating(subscriptions
))
931 hlist_add_head(&interval_sub
->deferred_item
,
932 &subscriptions
->deferred_list
);
934 subscriptions
->invalidate_seq
|= 1;
935 interval_tree_insert(&interval_sub
->interval_tree
,
936 &subscriptions
->itree
);
938 interval_sub
->invalidate_seq
= subscriptions
->invalidate_seq
;
940 WARN_ON(mn_itree_is_invalidating(subscriptions
));
942 * The starting seq for a subscription not under invalidation
943 * should be odd, not equal to the current invalidate_seq and
944 * invalidate_seq should not 'wrap' to the new seq any time
947 interval_sub
->invalidate_seq
=
948 subscriptions
->invalidate_seq
- 1;
949 interval_tree_insert(&interval_sub
->interval_tree
,
950 &subscriptions
->itree
);
952 spin_unlock(&subscriptions
->lock
);
957 * mmu_interval_notifier_insert - Insert an interval notifier
958 * @interval_sub: Interval subscription to register
959 * @start: Starting virtual address to monitor
960 * @length: Length of the range to monitor
961 * @mm: mm_struct to attach to
962 * @ops: Interval notifier operations to be called on matching events
964 * This function subscribes the interval notifier for notifications from the
965 * mm. Upon return the ops related to mmu_interval_notifier will be called
966 * whenever an event that intersects with the given range occurs.
968 * Upon return the range_notifier may not be present in the interval tree yet.
969 * The caller must use the normal interval notifier read flow via
970 * mmu_interval_read_begin() to establish SPTEs for this range.
972 int mmu_interval_notifier_insert(struct mmu_interval_notifier
*interval_sub
,
973 struct mm_struct
*mm
, unsigned long start
,
974 unsigned long length
,
975 const struct mmu_interval_notifier_ops
*ops
)
977 struct mmu_notifier_subscriptions
*subscriptions
;
980 might_lock(&mm
->mmap_lock
);
982 subscriptions
= smp_load_acquire(&mm
->notifier_subscriptions
);
983 if (!subscriptions
|| !subscriptions
->has_itree
) {
984 ret
= mmu_notifier_register(NULL
, mm
);
987 subscriptions
= mm
->notifier_subscriptions
;
989 return __mmu_interval_notifier_insert(interval_sub
, mm
, subscriptions
,
992 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert
);
994 int mmu_interval_notifier_insert_locked(
995 struct mmu_interval_notifier
*interval_sub
, struct mm_struct
*mm
,
996 unsigned long start
, unsigned long length
,
997 const struct mmu_interval_notifier_ops
*ops
)
999 struct mmu_notifier_subscriptions
*subscriptions
=
1000 mm
->notifier_subscriptions
;
1003 mmap_assert_write_locked(mm
);
1005 if (!subscriptions
|| !subscriptions
->has_itree
) {
1006 ret
= __mmu_notifier_register(NULL
, mm
);
1009 subscriptions
= mm
->notifier_subscriptions
;
1011 return __mmu_interval_notifier_insert(interval_sub
, mm
, subscriptions
,
1012 start
, length
, ops
);
1014 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked
);
1017 * mmu_interval_notifier_remove - Remove a interval notifier
1018 * @interval_sub: Interval subscription to unregister
1020 * This function must be paired with mmu_interval_notifier_insert(). It cannot
1021 * be called from any ops callback.
1023 * Once this returns ops callbacks are no longer running on other CPUs and
1024 * will not be called in future.
1026 void mmu_interval_notifier_remove(struct mmu_interval_notifier
*interval_sub
)
1028 struct mm_struct
*mm
= interval_sub
->mm
;
1029 struct mmu_notifier_subscriptions
*subscriptions
=
1030 mm
->notifier_subscriptions
;
1031 unsigned long seq
= 0;
1035 spin_lock(&subscriptions
->lock
);
1036 if (mn_itree_is_invalidating(subscriptions
)) {
1038 * remove is being called after insert put this on the
1039 * deferred list, but before the deferred list was processed.
1041 if (RB_EMPTY_NODE(&interval_sub
->interval_tree
.rb
)) {
1042 hlist_del(&interval_sub
->deferred_item
);
1044 hlist_add_head(&interval_sub
->deferred_item
,
1045 &subscriptions
->deferred_list
);
1046 seq
= subscriptions
->invalidate_seq
;
1049 WARN_ON(RB_EMPTY_NODE(&interval_sub
->interval_tree
.rb
));
1050 interval_tree_remove(&interval_sub
->interval_tree
,
1051 &subscriptions
->itree
);
1053 spin_unlock(&subscriptions
->lock
);
1056 * The possible sleep on progress in the invalidation requires the
1057 * caller not hold any locks held by invalidation callbacks.
1059 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map
);
1060 lock_map_release(&__mmu_notifier_invalidate_range_start_map
);
1062 wait_event(subscriptions
->wq
,
1063 READ_ONCE(subscriptions
->invalidate_seq
) != seq
);
1065 /* pairs with mmgrab in mmu_interval_notifier_insert() */
1068 EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove
);
1071 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
1073 * This function ensures that all outstanding async SRU work from
1074 * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
1075 * associated with an unused mmu_notifier will no longer be called.
1077 * Before using the caller must ensure that all of its mmu_notifiers have been
1078 * fully released via mmu_notifier_put().
1080 * Modules using the mmu_notifier_put() API should call this in their __exit
1081 * function to avoid module unloading races.
1083 void mmu_notifier_synchronize(void)
1085 synchronize_srcu(&srcu
);
1087 EXPORT_SYMBOL_GPL(mmu_notifier_synchronize
);
1090 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range
*range
)
1092 if (!range
->vma
|| range
->event
!= MMU_NOTIFY_PROTECTION_VMA
)
1094 /* Return true if the vma still have the read flag set. */
1095 return range
->vma
->vm_flags
& VM_READ
;
1097 EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only
);