2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
12 static char __percpu
*perf_trace_buf
[PERF_NR_CONTEXTS
];
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE
/ sizeof(unsigned long)])
21 /* Count the events in use (per event id, not per instance) */
22 static int total_ref_count
;
24 static int perf_trace_event_perm(struct ftrace_event_call
*tp_event
,
25 struct perf_event
*p_event
)
27 /* The ftrace function trace is allowed only for root. */
28 if (ftrace_event_is_function(tp_event
) &&
29 perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN
))
32 /* No tracing, just counting, so no obvious leak */
33 if (!(p_event
->attr
.sample_type
& PERF_SAMPLE_RAW
))
36 /* Some events are ok to be traced by non-root users... */
37 if (p_event
->attach_state
== PERF_ATTACH_TASK
) {
38 if (tp_event
->flags
& TRACE_EVENT_FL_CAP_ANY
)
43 * ...otherwise raw tracepoint data can be a severe data leak,
44 * only allow root to have these.
46 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN
))
52 static int perf_trace_event_reg(struct ftrace_event_call
*tp_event
,
53 struct perf_event
*p_event
)
55 struct hlist_head __percpu
*list
;
59 p_event
->tp_event
= tp_event
;
60 if (tp_event
->perf_refcount
++ > 0)
63 list
= alloc_percpu(struct hlist_head
);
67 for_each_possible_cpu(cpu
)
68 INIT_HLIST_HEAD(per_cpu_ptr(list
, cpu
));
70 tp_event
->perf_events
= list
;
72 if (!total_ref_count
) {
76 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
77 buf
= (char __percpu
*)alloc_percpu(perf_trace_t
);
81 perf_trace_buf
[i
] = buf
;
85 ret
= tp_event
->class->reg(tp_event
, TRACE_REG_PERF_REGISTER
, NULL
);
93 if (!total_ref_count
) {
96 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
97 free_percpu(perf_trace_buf
[i
]);
98 perf_trace_buf
[i
] = NULL
;
102 if (!--tp_event
->perf_refcount
) {
103 free_percpu(tp_event
->perf_events
);
104 tp_event
->perf_events
= NULL
;
110 static void perf_trace_event_unreg(struct perf_event
*p_event
)
112 struct ftrace_event_call
*tp_event
= p_event
->tp_event
;
115 if (--tp_event
->perf_refcount
> 0)
118 tp_event
->class->reg(tp_event
, TRACE_REG_PERF_UNREGISTER
, NULL
);
121 * Ensure our callback won't be called anymore. The buffers
122 * will be freed after that.
124 tracepoint_synchronize_unregister();
126 free_percpu(tp_event
->perf_events
);
127 tp_event
->perf_events
= NULL
;
129 if (!--total_ref_count
) {
130 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
131 free_percpu(perf_trace_buf
[i
]);
132 perf_trace_buf
[i
] = NULL
;
136 module_put(tp_event
->mod
);
139 static int perf_trace_event_open(struct perf_event
*p_event
)
141 struct ftrace_event_call
*tp_event
= p_event
->tp_event
;
142 return tp_event
->class->reg(tp_event
, TRACE_REG_PERF_OPEN
, p_event
);
145 static void perf_trace_event_close(struct perf_event
*p_event
)
147 struct ftrace_event_call
*tp_event
= p_event
->tp_event
;
148 tp_event
->class->reg(tp_event
, TRACE_REG_PERF_CLOSE
, p_event
);
151 static int perf_trace_event_init(struct ftrace_event_call
*tp_event
,
152 struct perf_event
*p_event
)
156 ret
= perf_trace_event_perm(tp_event
, p_event
);
160 ret
= perf_trace_event_reg(tp_event
, p_event
);
164 ret
= perf_trace_event_open(p_event
);
166 perf_trace_event_unreg(p_event
);
173 int perf_trace_init(struct perf_event
*p_event
)
175 struct ftrace_event_call
*tp_event
;
176 int event_id
= p_event
->attr
.config
;
179 mutex_lock(&event_mutex
);
180 list_for_each_entry(tp_event
, &ftrace_events
, list
) {
181 if (tp_event
->event
.type
== event_id
&&
182 tp_event
->class && tp_event
->class->reg
&&
183 try_module_get(tp_event
->mod
)) {
184 ret
= perf_trace_event_init(tp_event
, p_event
);
186 module_put(tp_event
->mod
);
190 mutex_unlock(&event_mutex
);
195 void perf_trace_destroy(struct perf_event
*p_event
)
197 mutex_lock(&event_mutex
);
198 perf_trace_event_close(p_event
);
199 perf_trace_event_unreg(p_event
);
200 mutex_unlock(&event_mutex
);
203 int perf_trace_add(struct perf_event
*p_event
, int flags
)
205 struct ftrace_event_call
*tp_event
= p_event
->tp_event
;
206 struct hlist_head __percpu
*pcpu_list
;
207 struct hlist_head
*list
;
209 pcpu_list
= tp_event
->perf_events
;
210 if (WARN_ON_ONCE(!pcpu_list
))
213 if (!(flags
& PERF_EF_START
))
214 p_event
->hw
.state
= PERF_HES_STOPPED
;
216 list
= this_cpu_ptr(pcpu_list
);
217 hlist_add_head_rcu(&p_event
->hlist_entry
, list
);
219 return tp_event
->class->reg(tp_event
, TRACE_REG_PERF_ADD
, p_event
);
222 void perf_trace_del(struct perf_event
*p_event
, int flags
)
224 struct ftrace_event_call
*tp_event
= p_event
->tp_event
;
225 hlist_del_rcu(&p_event
->hlist_entry
);
226 tp_event
->class->reg(tp_event
, TRACE_REG_PERF_DEL
, p_event
);
229 __kprobes
void *perf_trace_buf_prepare(int size
, unsigned short type
,
230 struct pt_regs
*regs
, int *rctxp
)
232 struct trace_entry
*entry
;
237 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(unsigned long));
239 pc
= preempt_count();
241 *rctxp
= perf_swevent_get_recursion_context();
245 raw_data
= this_cpu_ptr(perf_trace_buf
[*rctxp
]);
247 /* zero the dead bytes from align to not leak stack to user */
248 memset(&raw_data
[size
- sizeof(u64
)], 0, sizeof(u64
));
250 entry
= (struct trace_entry
*)raw_data
;
251 local_save_flags(flags
);
252 tracing_generic_entry_update(entry
, flags
, pc
);
257 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare
);
259 #ifdef CONFIG_FUNCTION_TRACER
261 perf_ftrace_function_call(unsigned long ip
, unsigned long parent_ip
)
263 struct ftrace_entry
*entry
;
264 struct hlist_head
*head
;
268 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
269 sizeof(u64)) - sizeof(u32))
271 BUILD_BUG_ON(ENTRY_SIZE
> PERF_MAX_TRACE_SIZE
);
273 perf_fetch_caller_regs(®s
);
275 entry
= perf_trace_buf_prepare(ENTRY_SIZE
, TRACE_FN
, NULL
, &rctx
);
280 entry
->parent_ip
= parent_ip
;
282 head
= this_cpu_ptr(event_function
.perf_events
);
283 perf_trace_buf_submit(entry
, ENTRY_SIZE
, rctx
, 0,
284 1, ®s
, head
, NULL
);
289 static int perf_ftrace_function_register(struct perf_event
*event
)
291 struct ftrace_ops
*ops
= &event
->ftrace_ops
;
293 ops
->flags
|= FTRACE_OPS_FL_CONTROL
;
294 ops
->func
= perf_ftrace_function_call
;
295 return register_ftrace_function(ops
);
298 static int perf_ftrace_function_unregister(struct perf_event
*event
)
300 struct ftrace_ops
*ops
= &event
->ftrace_ops
;
301 int ret
= unregister_ftrace_function(ops
);
302 ftrace_free_filter(ops
);
306 static void perf_ftrace_function_enable(struct perf_event
*event
)
308 ftrace_function_local_enable(&event
->ftrace_ops
);
311 static void perf_ftrace_function_disable(struct perf_event
*event
)
313 ftrace_function_local_disable(&event
->ftrace_ops
);
316 int perf_ftrace_event_register(struct ftrace_event_call
*call
,
317 enum trace_reg type
, void *data
)
320 case TRACE_REG_REGISTER
:
321 case TRACE_REG_UNREGISTER
:
323 case TRACE_REG_PERF_REGISTER
:
324 case TRACE_REG_PERF_UNREGISTER
:
326 case TRACE_REG_PERF_OPEN
:
327 return perf_ftrace_function_register(data
);
328 case TRACE_REG_PERF_CLOSE
:
329 return perf_ftrace_function_unregister(data
);
330 case TRACE_REG_PERF_ADD
:
331 perf_ftrace_function_enable(data
);
333 case TRACE_REG_PERF_DEL
:
334 perf_ftrace_function_disable(data
);
340 #endif /* CONFIG_FUNCTION_TRACER */