1 /* SPDX-License-Identifier: GPL-2.0 */
5 #ifndef _LINUX_ALLOC_TAG_H
6 #define _LINUX_ALLOC_TAG_H
9 #include <linux/codetag.h>
10 #include <linux/container_of.h>
11 #include <linux/preempt.h>
12 #include <asm/percpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/smp.h>
15 #include <linux/static_key.h>
16 #include <linux/irqflags.h>
18 struct alloc_tag_counters
{
24 * An instance of this structure is created in a special ELF section at every
25 * allocation callsite. At runtime, the special section is treated as
26 * an array of these. Embedded codetag utilizes codetag framework.
30 struct alloc_tag_counters __percpu
*counters
;
33 struct alloc_tag_kernel_section
{
34 struct alloc_tag
*first_tag
;
38 struct alloc_tag_module_section
{
40 unsigned long start_addr
;
41 struct alloc_tag
*first_tag
;
43 unsigned long end_addr
;
48 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
50 #define CODETAG_EMPTY ((void *)1)
52 static inline bool is_codetag_empty(union codetag_ref
*ref
)
54 return ref
->ct
== CODETAG_EMPTY
;
57 static inline void set_codetag_empty(union codetag_ref
*ref
)
60 ref
->ct
= CODETAG_EMPTY
;
63 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
65 static inline bool is_codetag_empty(union codetag_ref
*ref
) { return false; }
67 static inline void set_codetag_empty(union codetag_ref
*ref
)
73 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
75 #ifdef CONFIG_MEM_ALLOC_PROFILING
77 #define ALLOC_TAG_SECTION_NAME "alloc_tags"
79 struct codetag_bytes
{
84 size_t alloc_tag_top_users(struct codetag_bytes
*tags
, size_t count
, bool can_sleep
);
86 static inline struct alloc_tag
*ct_to_alloc_tag(struct codetag
*ct
)
88 return container_of(ct
, struct alloc_tag
, ct
);
91 #ifdef ARCH_NEEDS_WEAK_PER_CPU
93 * When percpu variables are required to be defined as weak, static percpu
94 * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
95 * Instead we will account all module allocations to a single counter.
97 DECLARE_PER_CPU(struct alloc_tag_counters
, _shared_alloc_tag
);
99 #define DEFINE_ALLOC_TAG(_alloc_tag) \
100 static struct alloc_tag _alloc_tag __used __aligned(8) \
101 __section(ALLOC_TAG_SECTION_NAME) = { \
102 .ct = CODE_TAG_INIT, \
103 .counters = &_shared_alloc_tag };
105 #else /* ARCH_NEEDS_WEAK_PER_CPU */
107 #define DEFINE_ALLOC_TAG(_alloc_tag) \
108 static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
109 static struct alloc_tag _alloc_tag __used __aligned(8) \
110 __section(ALLOC_TAG_SECTION_NAME) = { \
111 .ct = CODE_TAG_INIT, \
112 .counters = &_alloc_tag_cntr };
114 #endif /* ARCH_NEEDS_WEAK_PER_CPU */
116 DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
,
117 mem_alloc_profiling_key
);
119 static inline bool mem_alloc_profiling_enabled(void)
121 return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
,
122 &mem_alloc_profiling_key
);
125 static inline struct alloc_tag_counters
alloc_tag_read(struct alloc_tag
*tag
)
127 struct alloc_tag_counters v
= { 0, 0 };
128 struct alloc_tag_counters
*counter
;
131 for_each_possible_cpu(cpu
) {
132 counter
= per_cpu_ptr(tag
->counters
, cpu
);
133 v
.bytes
+= counter
->bytes
;
134 v
.calls
+= counter
->calls
;
140 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
141 static inline void alloc_tag_add_check(union codetag_ref
*ref
, struct alloc_tag
*tag
)
143 WARN_ONCE(ref
&& ref
->ct
&& !is_codetag_empty(ref
),
144 "alloc_tag was not cleared (got tag for %s:%u)\n",
145 ref
->ct
->filename
, ref
->ct
->lineno
);
147 WARN_ONCE(!tag
, "current->alloc_tag not set\n");
150 static inline void alloc_tag_sub_check(union codetag_ref
*ref
)
152 WARN_ONCE(ref
&& !ref
->ct
, "alloc_tag was not set\n");
155 static inline void alloc_tag_add_check(union codetag_ref
*ref
, struct alloc_tag
*tag
) {}
156 static inline void alloc_tag_sub_check(union codetag_ref
*ref
) {}
159 /* Caller should verify both ref and tag to be valid */
160 static inline bool __alloc_tag_ref_set(union codetag_ref
*ref
, struct alloc_tag
*tag
)
162 alloc_tag_add_check(ref
, tag
);
170 static inline bool alloc_tag_ref_set(union codetag_ref
*ref
, struct alloc_tag
*tag
)
172 if (unlikely(!__alloc_tag_ref_set(ref
, tag
)))
176 * We need in increment the call counter every time we have a new
177 * allocation or when we split a large allocation into smaller ones.
178 * Each new reference for every sub-allocation needs to increment call
179 * counter because when we free each part the counter will be decremented.
181 this_cpu_inc(tag
->counters
->calls
);
185 static inline void alloc_tag_add(union codetag_ref
*ref
, struct alloc_tag
*tag
, size_t bytes
)
187 if (likely(alloc_tag_ref_set(ref
, tag
)))
188 this_cpu_add(tag
->counters
->bytes
, bytes
);
191 static inline void alloc_tag_sub(union codetag_ref
*ref
, size_t bytes
)
193 struct alloc_tag
*tag
;
195 alloc_tag_sub_check(ref
);
196 if (!ref
|| !ref
->ct
)
199 if (is_codetag_empty(ref
)) {
204 tag
= ct_to_alloc_tag(ref
->ct
);
206 this_cpu_sub(tag
->counters
->bytes
, bytes
);
207 this_cpu_dec(tag
->counters
->calls
);
212 #define alloc_tag_record(p) ((p) = current->alloc_tag)
214 #else /* CONFIG_MEM_ALLOC_PROFILING */
216 #define DEFINE_ALLOC_TAG(_alloc_tag)
217 static inline bool mem_alloc_profiling_enabled(void) { return false; }
218 static inline void alloc_tag_add(union codetag_ref
*ref
, struct alloc_tag
*tag
,
220 static inline void alloc_tag_sub(union codetag_ref
*ref
, size_t bytes
) {}
221 #define alloc_tag_record(p) do {} while (0)
223 #endif /* CONFIG_MEM_ALLOC_PROFILING */
225 #define alloc_hooks_tag(_tag, _do_alloc) \
227 typeof(_do_alloc) _res; \
228 if (mem_alloc_profiling_enabled()) { \
229 struct alloc_tag * __maybe_unused _old; \
230 _old = alloc_tag_save(_tag); \
232 alloc_tag_restore(_tag, _old); \
238 #define alloc_hooks(_do_alloc) \
240 DEFINE_ALLOC_TAG(_alloc_tag); \
241 alloc_hooks_tag(&_alloc_tag, _do_alloc); \
244 #endif /* _LINUX_ALLOC_TAG_H */