1 /* SPDX-License-Identifier: GPL-2.0 */
3 * page allocation tagging
5 #ifndef _LINUX_PGALLOC_TAG_H
6 #define _LINUX_PGALLOC_TAG_H
8 #include <linux/alloc_tag.h>
10 #ifdef CONFIG_MEM_ALLOC_PROFILING
12 #include <linux/page_ext.h>
14 extern struct page_ext_operations page_alloc_tagging_ops
;
15 extern unsigned long alloc_tag_ref_mask
;
16 extern int alloc_tag_ref_offs
;
17 extern struct alloc_tag_kernel_section kernel_tags
;
19 DECLARE_STATIC_KEY_FALSE(mem_profiling_compressed
);
21 typedef u16 pgalloc_tag_idx
;
23 union pgtag_ref_handle
{
24 union codetag_ref
*ref
; /* reference in page extension */
25 struct page
*page
; /* reference in page flags */
28 /* Reserved indexes */
29 #define CODETAG_ID_NULL 0
30 #define CODETAG_ID_EMPTY 1
31 #define CODETAG_ID_FIRST 2
35 extern struct alloc_tag_module_section module_tags
;
37 static inline struct alloc_tag
*module_idx_to_tag(pgalloc_tag_idx idx
)
39 return &module_tags
.first_tag
[idx
- kernel_tags
.count
];
42 static inline pgalloc_tag_idx
module_tag_to_idx(struct alloc_tag
*tag
)
44 return CODETAG_ID_FIRST
+ kernel_tags
.count
+ (tag
- module_tags
.first_tag
);
47 #else /* CONFIG_MODULES */
49 static inline struct alloc_tag
*module_idx_to_tag(pgalloc_tag_idx idx
)
51 pr_warn("invalid page tag reference %lu\n", (unsigned long)idx
);
55 static inline pgalloc_tag_idx
module_tag_to_idx(struct alloc_tag
*tag
)
57 pr_warn("invalid page tag 0x%lx\n", (unsigned long)tag
);
58 return CODETAG_ID_NULL
;
61 #endif /* CONFIG_MODULES */
63 static inline void idx_to_ref(pgalloc_tag_idx idx
, union codetag_ref
*ref
)
66 case (CODETAG_ID_NULL
):
69 case (CODETAG_ID_EMPTY
):
70 set_codetag_empty(ref
);
73 idx
-= CODETAG_ID_FIRST
;
74 ref
->ct
= idx
< kernel_tags
.count
?
75 &kernel_tags
.first_tag
[idx
].ct
:
76 &module_idx_to_tag(idx
)->ct
;
81 static inline pgalloc_tag_idx
ref_to_idx(union codetag_ref
*ref
)
83 struct alloc_tag
*tag
;
86 return CODETAG_ID_NULL
;
88 if (is_codetag_empty(ref
))
89 return CODETAG_ID_EMPTY
;
91 tag
= ct_to_alloc_tag(ref
->ct
);
92 if (tag
>= kernel_tags
.first_tag
&& tag
< kernel_tags
.first_tag
+ kernel_tags
.count
)
93 return CODETAG_ID_FIRST
+ (tag
- kernel_tags
.first_tag
);
95 return module_tag_to_idx(tag
);
100 /* Should be called only if mem_alloc_profiling_enabled() */
101 static inline bool get_page_tag_ref(struct page
*page
, union codetag_ref
*ref
,
102 union pgtag_ref_handle
*handle
)
107 if (static_key_enabled(&mem_profiling_compressed
)) {
110 idx
= (page
->flags
>> alloc_tag_ref_offs
) & alloc_tag_ref_mask
;
111 idx_to_ref(idx
, ref
);
114 struct page_ext
*page_ext
;
115 union codetag_ref
*tmp
;
117 page_ext
= page_ext_get(page
);
121 tmp
= (union codetag_ref
*)page_ext_data(page_ext
, &page_alloc_tagging_ops
);
129 static inline void put_page_tag_ref(union pgtag_ref_handle handle
)
131 if (WARN_ON(!handle
.ref
))
134 if (!static_key_enabled(&mem_profiling_compressed
))
135 page_ext_put((void *)handle
.ref
- page_alloc_tagging_ops
.offset
);
138 static inline void update_page_tag_ref(union pgtag_ref_handle handle
, union codetag_ref
*ref
)
140 if (static_key_enabled(&mem_profiling_compressed
)) {
141 struct page
*page
= handle
.page
;
142 unsigned long old_flags
;
146 if (WARN_ON(!page
|| !ref
))
149 idx
= (unsigned long)ref_to_idx(ref
);
150 idx
= (idx
& alloc_tag_ref_mask
) << alloc_tag_ref_offs
;
152 old_flags
= READ_ONCE(page
->flags
);
154 flags
&= ~(alloc_tag_ref_mask
<< alloc_tag_ref_offs
);
156 } while (unlikely(!try_cmpxchg(&page
->flags
, &old_flags
, flags
)));
158 if (WARN_ON(!handle
.ref
|| !ref
))
161 handle
.ref
->ct
= ref
->ct
;
165 static inline void clear_page_tag_ref(struct page
*page
)
167 if (mem_alloc_profiling_enabled()) {
168 union pgtag_ref_handle handle
;
169 union codetag_ref ref
;
171 if (get_page_tag_ref(page
, &ref
, &handle
)) {
172 set_codetag_empty(&ref
);
173 update_page_tag_ref(handle
, &ref
);
174 put_page_tag_ref(handle
);
179 static inline void pgalloc_tag_add(struct page
*page
, struct task_struct
*task
,
182 if (mem_alloc_profiling_enabled()) {
183 union pgtag_ref_handle handle
;
184 union codetag_ref ref
;
186 if (get_page_tag_ref(page
, &ref
, &handle
)) {
187 alloc_tag_add(&ref
, task
->alloc_tag
, PAGE_SIZE
* nr
);
188 update_page_tag_ref(handle
, &ref
);
189 put_page_tag_ref(handle
);
194 static inline void pgalloc_tag_sub(struct page
*page
, unsigned int nr
)
196 if (mem_alloc_profiling_enabled()) {
197 union pgtag_ref_handle handle
;
198 union codetag_ref ref
;
200 if (get_page_tag_ref(page
, &ref
, &handle
)) {
201 alloc_tag_sub(&ref
, PAGE_SIZE
* nr
);
202 update_page_tag_ref(handle
, &ref
);
203 put_page_tag_ref(handle
);
208 static inline struct alloc_tag
*pgalloc_tag_get(struct page
*page
)
210 struct alloc_tag
*tag
= NULL
;
212 if (mem_alloc_profiling_enabled()) {
213 union pgtag_ref_handle handle
;
214 union codetag_ref ref
;
216 if (get_page_tag_ref(page
, &ref
, &handle
)) {
217 alloc_tag_sub_check(&ref
);
219 tag
= ct_to_alloc_tag(ref
.ct
);
220 put_page_tag_ref(handle
);
227 static inline void pgalloc_tag_sub_pages(struct alloc_tag
*tag
, unsigned int nr
)
229 if (mem_alloc_profiling_enabled() && tag
)
230 this_cpu_sub(tag
->counters
->bytes
, PAGE_SIZE
* nr
);
233 void pgalloc_tag_split(struct folio
*folio
, int old_order
, int new_order
);
234 void pgalloc_tag_copy(struct folio
*new, struct folio
*old
);
236 void __init
alloc_tag_sec_init(void);
238 #else /* CONFIG_MEM_ALLOC_PROFILING */
240 static inline void clear_page_tag_ref(struct page
*page
) {}
241 static inline void pgalloc_tag_add(struct page
*page
, struct task_struct
*task
,
243 static inline void pgalloc_tag_sub(struct page
*page
, unsigned int nr
) {}
244 static inline struct alloc_tag
*pgalloc_tag_get(struct page
*page
) { return NULL
; }
245 static inline void pgalloc_tag_sub_pages(struct alloc_tag
*tag
, unsigned int nr
) {}
246 static inline void alloc_tag_sec_init(void) {}
247 static inline void pgalloc_tag_split(struct folio
*folio
, int old_order
, int new_order
) {}
248 static inline void pgalloc_tag_copy(struct folio
*new, struct folio
*old
) {}
250 #endif /* CONFIG_MEM_ALLOC_PROFILING */
252 #endif /* _LINUX_PGALLOC_TAG_H */