2 * Performance events callchain code, extracted from core.c:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/slab.h>
16 struct callchain_cpus_entries
{
17 struct rcu_head rcu_head
;
18 struct perf_callchain_entry
*cpu_entries
[0];
21 int sysctl_perf_event_max_stack __read_mostly
= PERF_MAX_STACK_DEPTH
;
22 int sysctl_perf_event_max_contexts_per_stack __read_mostly
= PERF_MAX_CONTEXTS_PER_STACK
;
24 static inline size_t perf_callchain_entry__sizeof(void)
26 return (sizeof(struct perf_callchain_entry
) +
27 sizeof(__u64
) * (sysctl_perf_event_max_stack
+
28 sysctl_perf_event_max_contexts_per_stack
));
31 static DEFINE_PER_CPU(int, callchain_recursion
[PERF_NR_CONTEXTS
]);
32 static atomic_t nr_callchain_events
;
33 static DEFINE_MUTEX(callchain_mutex
);
34 static struct callchain_cpus_entries
*callchain_cpus_entries
;
37 __weak
void perf_callchain_kernel(struct perf_callchain_entry_ctx
*entry
,
42 __weak
void perf_callchain_user(struct perf_callchain_entry_ctx
*entry
,
47 static void release_callchain_buffers_rcu(struct rcu_head
*head
)
49 struct callchain_cpus_entries
*entries
;
52 entries
= container_of(head
, struct callchain_cpus_entries
, rcu_head
);
54 for_each_possible_cpu(cpu
)
55 kfree(entries
->cpu_entries
[cpu
]);
60 static void release_callchain_buffers(void)
62 struct callchain_cpus_entries
*entries
;
64 entries
= callchain_cpus_entries
;
65 RCU_INIT_POINTER(callchain_cpus_entries
, NULL
);
66 call_rcu(&entries
->rcu_head
, release_callchain_buffers_rcu
);
69 static int alloc_callchain_buffers(void)
73 struct callchain_cpus_entries
*entries
;
76 * We can't use the percpu allocation API for data that can be
77 * accessed from NMI. Use a temporary manual per cpu allocation
78 * until that gets sorted out.
80 size
= offsetof(struct callchain_cpus_entries
, cpu_entries
[nr_cpu_ids
]);
82 entries
= kzalloc(size
, GFP_KERNEL
);
86 size
= perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS
;
88 for_each_possible_cpu(cpu
) {
89 entries
->cpu_entries
[cpu
] = kmalloc_node(size
, GFP_KERNEL
,
91 if (!entries
->cpu_entries
[cpu
])
95 rcu_assign_pointer(callchain_cpus_entries
, entries
);
100 for_each_possible_cpu(cpu
)
101 kfree(entries
->cpu_entries
[cpu
]);
107 int get_callchain_buffers(int event_max_stack
)
112 mutex_lock(&callchain_mutex
);
114 count
= atomic_inc_return(&nr_callchain_events
);
115 if (WARN_ON_ONCE(count
< 1)) {
121 /* If the allocation failed, give up */
122 if (!callchain_cpus_entries
)
125 * If requesting per event more than the global cap,
126 * return a different error to help userspace figure
129 * And also do it here so that we have &callchain_mutex held.
131 if (event_max_stack
> sysctl_perf_event_max_stack
)
136 err
= alloc_callchain_buffers();
139 atomic_dec(&nr_callchain_events
);
141 mutex_unlock(&callchain_mutex
);
146 void put_callchain_buffers(void)
148 if (atomic_dec_and_mutex_lock(&nr_callchain_events
, &callchain_mutex
)) {
149 release_callchain_buffers();
150 mutex_unlock(&callchain_mutex
);
154 static struct perf_callchain_entry
*get_callchain_entry(int *rctx
)
157 struct callchain_cpus_entries
*entries
;
159 *rctx
= get_recursion_context(this_cpu_ptr(callchain_recursion
));
163 entries
= rcu_dereference(callchain_cpus_entries
);
167 cpu
= smp_processor_id();
169 return (((void *)entries
->cpu_entries
[cpu
]) +
170 (*rctx
* perf_callchain_entry__sizeof()));
174 put_callchain_entry(int rctx
)
176 put_recursion_context(this_cpu_ptr(callchain_recursion
), rctx
);
179 struct perf_callchain_entry
*
180 perf_callchain(struct perf_event
*event
, struct pt_regs
*regs
)
182 bool kernel
= !event
->attr
.exclude_callchain_kernel
;
183 bool user
= !event
->attr
.exclude_callchain_user
;
184 /* Disallow cross-task user callchains. */
185 bool crosstask
= event
->ctx
->task
&& event
->ctx
->task
!= current
;
186 const u32 max_stack
= event
->attr
.sample_max_stack
;
188 if (!kernel
&& !user
)
191 return get_perf_callchain(regs
, 0, kernel
, user
, max_stack
, crosstask
, true);
194 struct perf_callchain_entry
*
195 get_perf_callchain(struct pt_regs
*regs
, u32 init_nr
, bool kernel
, bool user
,
196 u32 max_stack
, bool crosstask
, bool add_mark
)
198 struct perf_callchain_entry
*entry
;
199 struct perf_callchain_entry_ctx ctx
;
202 entry
= get_callchain_entry(&rctx
);
210 ctx
.max_stack
= max_stack
;
211 ctx
.nr
= entry
->nr
= init_nr
;
213 ctx
.contexts_maxed
= false;
215 if (kernel
&& !user_mode(regs
)) {
217 perf_callchain_store_context(&ctx
, PERF_CONTEXT_KERNEL
);
218 perf_callchain_kernel(&ctx
, regs
);
222 if (!user_mode(regs
)) {
224 regs
= task_pt_regs(current
);
234 perf_callchain_store_context(&ctx
, PERF_CONTEXT_USER
);
235 perf_callchain_user(&ctx
, regs
);
240 put_callchain_entry(rctx
);
246 * Used for sysctl_perf_event_max_stack and
247 * sysctl_perf_event_max_contexts_per_stack.
249 int perf_event_max_stack_handler(struct ctl_table
*table
, int write
,
250 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
252 int *value
= table
->data
;
253 int new_value
= *value
, ret
;
254 struct ctl_table new_table
= *table
;
256 new_table
.data
= &new_value
;
257 ret
= proc_dointvec_minmax(&new_table
, write
, buffer
, lenp
, ppos
);
261 mutex_lock(&callchain_mutex
);
262 if (atomic_read(&nr_callchain_events
))
267 mutex_unlock(&callchain_mutex
);