1 #ifndef _LINUX_MMAP_LOCK_H
2 #define _LINUX_MMAP_LOCK_H
4 #include <linux/lockdep.h>
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/rwsem.h>
8 #include <linux/tracepoint-defs.h>
9 #include <linux/types.h>
11 #define MMAP_LOCK_INITIALIZER(name) \
12 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
14 DECLARE_TRACEPOINT(mmap_lock_start_locking
);
15 DECLARE_TRACEPOINT(mmap_lock_acquire_returned
);
16 DECLARE_TRACEPOINT(mmap_lock_released
);
20 void __mmap_lock_do_trace_start_locking(struct mm_struct
*mm
, bool write
);
21 void __mmap_lock_do_trace_acquire_returned(struct mm_struct
*mm
, bool write
,
23 void __mmap_lock_do_trace_released(struct mm_struct
*mm
, bool write
);
25 static inline void __mmap_lock_trace_start_locking(struct mm_struct
*mm
,
28 if (tracepoint_enabled(mmap_lock_start_locking
))
29 __mmap_lock_do_trace_start_locking(mm
, write
);
32 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct
*mm
,
33 bool write
, bool success
)
35 if (tracepoint_enabled(mmap_lock_acquire_returned
))
36 __mmap_lock_do_trace_acquire_returned(mm
, write
, success
);
39 static inline void __mmap_lock_trace_released(struct mm_struct
*mm
, bool write
)
41 if (tracepoint_enabled(mmap_lock_released
))
42 __mmap_lock_do_trace_released(mm
, write
);
45 #else /* !CONFIG_TRACING */
47 static inline void __mmap_lock_trace_start_locking(struct mm_struct
*mm
,
52 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct
*mm
,
53 bool write
, bool success
)
57 static inline void __mmap_lock_trace_released(struct mm_struct
*mm
, bool write
)
61 #endif /* CONFIG_TRACING */
63 static inline void mmap_assert_locked(const struct mm_struct
*mm
)
65 rwsem_assert_held(&mm
->mmap_lock
);
68 static inline void mmap_assert_write_locked(const struct mm_struct
*mm
)
70 rwsem_assert_held_write(&mm
->mmap_lock
);
73 #ifdef CONFIG_PER_VMA_LOCK
75 * Drop all currently-held per-VMA locks.
76 * This is called from the mmap_lock implementation directly before releasing
77 * a write-locked mmap_lock (or downgrading it to read-locked).
78 * This should normally NOT be called manually from other places.
79 * If you want to call this manually anyway, keep in mind that this will release
80 * *all* VMA write locks, including ones from further up the stack.
82 static inline void vma_end_write_all(struct mm_struct
*mm
)
84 mmap_assert_write_locked(mm
);
86 * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
87 * mmap_lock being held.
88 * We need RELEASE semantics here to ensure that preceding stores into
89 * the VMA take effect before we unlock it with this store.
90 * Pairs with ACQUIRE semantics in vma_start_read().
92 smp_store_release(&mm
->mm_lock_seq
, mm
->mm_lock_seq
+ 1);
95 static inline void vma_end_write_all(struct mm_struct
*mm
) {}
98 static inline void mmap_init_lock(struct mm_struct
*mm
)
100 init_rwsem(&mm
->mmap_lock
);
103 static inline void mmap_write_lock(struct mm_struct
*mm
)
105 __mmap_lock_trace_start_locking(mm
, true);
106 down_write(&mm
->mmap_lock
);
107 __mmap_lock_trace_acquire_returned(mm
, true, true);
110 static inline void mmap_write_lock_nested(struct mm_struct
*mm
, int subclass
)
112 __mmap_lock_trace_start_locking(mm
, true);
113 down_write_nested(&mm
->mmap_lock
, subclass
);
114 __mmap_lock_trace_acquire_returned(mm
, true, true);
117 static inline int mmap_write_lock_killable(struct mm_struct
*mm
)
121 __mmap_lock_trace_start_locking(mm
, true);
122 ret
= down_write_killable(&mm
->mmap_lock
);
123 __mmap_lock_trace_acquire_returned(mm
, true, ret
== 0);
127 static inline void mmap_write_unlock(struct mm_struct
*mm
)
129 __mmap_lock_trace_released(mm
, true);
130 vma_end_write_all(mm
);
131 up_write(&mm
->mmap_lock
);
134 static inline void mmap_write_downgrade(struct mm_struct
*mm
)
136 __mmap_lock_trace_acquire_returned(mm
, false, true);
137 vma_end_write_all(mm
);
138 downgrade_write(&mm
->mmap_lock
);
141 static inline void mmap_read_lock(struct mm_struct
*mm
)
143 __mmap_lock_trace_start_locking(mm
, false);
144 down_read(&mm
->mmap_lock
);
145 __mmap_lock_trace_acquire_returned(mm
, false, true);
148 static inline int mmap_read_lock_killable(struct mm_struct
*mm
)
152 __mmap_lock_trace_start_locking(mm
, false);
153 ret
= down_read_killable(&mm
->mmap_lock
);
154 __mmap_lock_trace_acquire_returned(mm
, false, ret
== 0);
158 static inline bool mmap_read_trylock(struct mm_struct
*mm
)
162 __mmap_lock_trace_start_locking(mm
, false);
163 ret
= down_read_trylock(&mm
->mmap_lock
) != 0;
164 __mmap_lock_trace_acquire_returned(mm
, false, ret
);
168 static inline void mmap_read_unlock(struct mm_struct
*mm
)
170 __mmap_lock_trace_released(mm
, false);
171 up_read(&mm
->mmap_lock
);
174 static inline void mmap_read_unlock_non_owner(struct mm_struct
*mm
)
176 __mmap_lock_trace_released(mm
, false);
177 up_read_non_owner(&mm
->mmap_lock
);
180 static inline int mmap_lock_is_contended(struct mm_struct
*mm
)
182 return rwsem_is_contended(&mm
->mmap_lock
);
185 #endif /* _LINUX_MMAP_LOCK_H */