Linux 4.13.16
[linux/fpc-iii.git] / include / linux / page_ref.h
blob1fd71733aa68efb95b390eb708616213271a0fd9
1 #ifndef _LINUX_PAGE_REF_H
2 #define _LINUX_PAGE_REF_H
4 #include <linux/atomic.h>
5 #include <linux/mm_types.h>
6 #include <linux/page-flags.h>
7 #include <linux/tracepoint-defs.h>
9 extern struct tracepoint __tracepoint_page_ref_set;
10 extern struct tracepoint __tracepoint_page_ref_mod;
11 extern struct tracepoint __tracepoint_page_ref_mod_and_test;
12 extern struct tracepoint __tracepoint_page_ref_mod_and_return;
13 extern struct tracepoint __tracepoint_page_ref_mod_unless;
14 extern struct tracepoint __tracepoint_page_ref_freeze;
15 extern struct tracepoint __tracepoint_page_ref_unfreeze;
17 #ifdef CONFIG_DEBUG_PAGE_REF
20 * Ideally we would want to use the trace_<tracepoint>_enabled() helper
21 * functions. But due to include header file issues, that is not
22 * feasible. Instead we have to open code the static key functions.
24 * See trace_##name##_enabled(void) in include/linux/tracepoint.h
26 #define page_ref_tracepoint_active(t) static_key_false(&(t).key)
28 extern void __page_ref_set(struct page *page, int v);
29 extern void __page_ref_mod(struct page *page, int v);
30 extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
31 extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
32 extern void __page_ref_mod_unless(struct page *page, int v, int u);
33 extern void __page_ref_freeze(struct page *page, int v, int ret);
34 extern void __page_ref_unfreeze(struct page *page, int v);
36 #else
38 #define page_ref_tracepoint_active(t) false
40 static inline void __page_ref_set(struct page *page, int v)
43 static inline void __page_ref_mod(struct page *page, int v)
46 static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
49 static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
52 static inline void __page_ref_mod_unless(struct page *page, int v, int u)
55 static inline void __page_ref_freeze(struct page *page, int v, int ret)
58 static inline void __page_ref_unfreeze(struct page *page, int v)
62 #endif
64 static inline int page_ref_count(struct page *page)
66 return atomic_read(&page->_refcount);
69 static inline int page_count(struct page *page)
71 return atomic_read(&compound_head(page)->_refcount);
74 static inline void set_page_count(struct page *page, int v)
76 atomic_set(&page->_refcount, v);
77 if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
78 __page_ref_set(page, v);
82 * Setup the page count before being freed into the page allocator for
83 * the first time (boot or memory hotplug)
85 static inline void init_page_count(struct page *page)
87 set_page_count(page, 1);
90 static inline void page_ref_add(struct page *page, int nr)
92 atomic_add(nr, &page->_refcount);
93 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
94 __page_ref_mod(page, nr);
97 static inline void page_ref_sub(struct page *page, int nr)
99 atomic_sub(nr, &page->_refcount);
100 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
101 __page_ref_mod(page, -nr);
104 static inline void page_ref_inc(struct page *page)
106 atomic_inc(&page->_refcount);
107 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
108 __page_ref_mod(page, 1);
111 static inline void page_ref_dec(struct page *page)
113 atomic_dec(&page->_refcount);
114 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
115 __page_ref_mod(page, -1);
118 static inline int page_ref_sub_and_test(struct page *page, int nr)
120 int ret = atomic_sub_and_test(nr, &page->_refcount);
122 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
123 __page_ref_mod_and_test(page, -nr, ret);
124 return ret;
127 static inline int page_ref_inc_return(struct page *page)
129 int ret = atomic_inc_return(&page->_refcount);
131 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
132 __page_ref_mod_and_return(page, 1, ret);
133 return ret;
136 static inline int page_ref_dec_and_test(struct page *page)
138 int ret = atomic_dec_and_test(&page->_refcount);
140 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
141 __page_ref_mod_and_test(page, -1, ret);
142 return ret;
145 static inline int page_ref_dec_return(struct page *page)
147 int ret = atomic_dec_return(&page->_refcount);
149 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
150 __page_ref_mod_and_return(page, -1, ret);
151 return ret;
154 static inline int page_ref_add_unless(struct page *page, int nr, int u)
156 int ret = atomic_add_unless(&page->_refcount, nr, u);
158 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
159 __page_ref_mod_unless(page, nr, ret);
160 return ret;
163 static inline int page_ref_freeze(struct page *page, int count)
165 int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
167 if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
168 __page_ref_freeze(page, count, ret);
169 return ret;
172 static inline void page_ref_unfreeze(struct page *page, int count)
174 VM_BUG_ON_PAGE(page_count(page) != 0, page);
175 VM_BUG_ON(count == 0);
177 smp_mb();
178 atomic_set(&page->_refcount, count);
179 if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
180 __page_ref_unfreeze(page, count);
183 #endif