2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
11 #include "trace_probe.h"
13 static char __percpu
*perf_trace_buf
[PERF_NR_CONTEXTS
];
16 * Force it to be aligned to unsigned long to avoid misaligned accesses
19 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE
/ sizeof(unsigned long)])
22 /* Count the events in use (per event id, not per instance) */
23 static int total_ref_count
;
25 static int perf_trace_event_perm(struct trace_event_call
*tp_event
,
26 struct perf_event
*p_event
)
28 if (tp_event
->perf_perm
) {
29 int ret
= tp_event
->perf_perm(tp_event
, p_event
);
35 * We checked and allowed to create parent,
36 * allow children without checking.
42 * It's ok to check current process (owner) permissions in here,
43 * because code below is called only via perf_event_open syscall.
46 /* The ftrace function trace is allowed only for root. */
47 if (ftrace_event_is_function(tp_event
)) {
48 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN
))
51 if (!is_sampling_event(p_event
))
55 * We don't allow user space callchains for function trace
56 * event, due to issues with page faults while tracing page
57 * fault handler and its overall trickiness nature.
59 if (!p_event
->attr
.exclude_callchain_user
)
63 * Same reason to disable user stack dump as for user space
66 if (p_event
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)
70 /* No tracing, just counting, so no obvious leak */
71 if (!(p_event
->attr
.sample_type
& PERF_SAMPLE_RAW
))
74 /* Some events are ok to be traced by non-root users... */
75 if (p_event
->attach_state
== PERF_ATTACH_TASK
) {
76 if (tp_event
->flags
& TRACE_EVENT_FL_CAP_ANY
)
81 * ...otherwise raw tracepoint data can be a severe data leak,
82 * only allow root to have these.
84 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN
))
90 static int perf_trace_event_reg(struct trace_event_call
*tp_event
,
91 struct perf_event
*p_event
)
93 struct hlist_head __percpu
*list
;
97 p_event
->tp_event
= tp_event
;
98 if (tp_event
->perf_refcount
++ > 0)
101 list
= alloc_percpu(struct hlist_head
);
105 for_each_possible_cpu(cpu
)
106 INIT_HLIST_HEAD(per_cpu_ptr(list
, cpu
));
108 tp_event
->perf_events
= list
;
110 if (!total_ref_count
) {
114 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
115 buf
= (char __percpu
*)alloc_percpu(perf_trace_t
);
119 perf_trace_buf
[i
] = buf
;
123 ret
= tp_event
->class->reg(tp_event
, TRACE_REG_PERF_REGISTER
, NULL
);
131 if (!total_ref_count
) {
134 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
135 free_percpu(perf_trace_buf
[i
]);
136 perf_trace_buf
[i
] = NULL
;
140 if (!--tp_event
->perf_refcount
) {
141 free_percpu(tp_event
->perf_events
);
142 tp_event
->perf_events
= NULL
;
148 static void perf_trace_event_unreg(struct perf_event
*p_event
)
150 struct trace_event_call
*tp_event
= p_event
->tp_event
;
153 if (--tp_event
->perf_refcount
> 0)
156 tp_event
->class->reg(tp_event
, TRACE_REG_PERF_UNREGISTER
, NULL
);
159 * Ensure our callback won't be called anymore. The buffers
160 * will be freed after that.
162 tracepoint_synchronize_unregister();
164 free_percpu(tp_event
->perf_events
);
165 tp_event
->perf_events
= NULL
;
167 if (!--total_ref_count
) {
168 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
169 free_percpu(perf_trace_buf
[i
]);
170 perf_trace_buf
[i
] = NULL
;
174 module_put(tp_event
->mod
);
177 static int perf_trace_event_open(struct perf_event
*p_event
)
179 struct trace_event_call
*tp_event
= p_event
->tp_event
;
180 return tp_event
->class->reg(tp_event
, TRACE_REG_PERF_OPEN
, p_event
);
183 static void perf_trace_event_close(struct perf_event
*p_event
)
185 struct trace_event_call
*tp_event
= p_event
->tp_event
;
186 tp_event
->class->reg(tp_event
, TRACE_REG_PERF_CLOSE
, p_event
);
189 static int perf_trace_event_init(struct trace_event_call
*tp_event
,
190 struct perf_event
*p_event
)
194 ret
= perf_trace_event_perm(tp_event
, p_event
);
198 ret
= perf_trace_event_reg(tp_event
, p_event
);
202 ret
= perf_trace_event_open(p_event
);
204 perf_trace_event_unreg(p_event
);
211 int perf_trace_init(struct perf_event
*p_event
)
213 struct trace_event_call
*tp_event
;
214 u64 event_id
= p_event
->attr
.config
;
217 mutex_lock(&event_mutex
);
218 list_for_each_entry(tp_event
, &ftrace_events
, list
) {
219 if (tp_event
->event
.type
== event_id
&&
220 tp_event
->class && tp_event
->class->reg
&&
221 try_module_get(tp_event
->mod
)) {
222 ret
= perf_trace_event_init(tp_event
, p_event
);
224 module_put(tp_event
->mod
);
228 mutex_unlock(&event_mutex
);
233 void perf_trace_destroy(struct perf_event
*p_event
)
235 mutex_lock(&event_mutex
);
236 perf_trace_event_close(p_event
);
237 perf_trace_event_unreg(p_event
);
238 mutex_unlock(&event_mutex
);
241 #ifdef CONFIG_KPROBE_EVENTS
242 int perf_kprobe_init(struct perf_event
*p_event
, bool is_retprobe
)
246 struct trace_event_call
*tp_event
;
248 if (p_event
->attr
.kprobe_func
) {
249 func
= kzalloc(KSYM_NAME_LEN
, GFP_KERNEL
);
252 ret
= strncpy_from_user(
253 func
, u64_to_user_ptr(p_event
->attr
.kprobe_func
),
255 if (ret
== KSYM_NAME_LEN
)
260 if (func
[0] == '\0') {
266 tp_event
= create_local_trace_kprobe(
267 func
, (void *)(unsigned long)(p_event
->attr
.kprobe_addr
),
268 p_event
->attr
.probe_offset
, is_retprobe
);
269 if (IS_ERR(tp_event
)) {
270 ret
= PTR_ERR(tp_event
);
274 ret
= perf_trace_event_init(tp_event
, p_event
);
276 destroy_local_trace_kprobe(tp_event
);
282 void perf_kprobe_destroy(struct perf_event
*p_event
)
284 perf_trace_event_close(p_event
);
285 perf_trace_event_unreg(p_event
);
287 destroy_local_trace_kprobe(p_event
->tp_event
);
289 #endif /* CONFIG_KPROBE_EVENTS */
291 #ifdef CONFIG_UPROBE_EVENTS
292 int perf_uprobe_init(struct perf_event
*p_event
, bool is_retprobe
)
296 struct trace_event_call
*tp_event
;
298 if (!p_event
->attr
.uprobe_path
)
300 path
= kzalloc(PATH_MAX
, GFP_KERNEL
);
303 ret
= strncpy_from_user(
304 path
, u64_to_user_ptr(p_event
->attr
.uprobe_path
), PATH_MAX
);
309 if (path
[0] == '\0') {
314 tp_event
= create_local_trace_uprobe(
315 path
, p_event
->attr
.probe_offset
, is_retprobe
);
316 if (IS_ERR(tp_event
)) {
317 ret
= PTR_ERR(tp_event
);
322 * local trace_uprobe need to hold event_mutex to call
323 * uprobe_buffer_enable() and uprobe_buffer_disable().
324 * event_mutex is not required for local trace_kprobes.
326 mutex_lock(&event_mutex
);
327 ret
= perf_trace_event_init(tp_event
, p_event
);
329 destroy_local_trace_uprobe(tp_event
);
330 mutex_unlock(&event_mutex
);
336 void perf_uprobe_destroy(struct perf_event
*p_event
)
338 mutex_lock(&event_mutex
);
339 perf_trace_event_close(p_event
);
340 perf_trace_event_unreg(p_event
);
341 mutex_unlock(&event_mutex
);
342 destroy_local_trace_uprobe(p_event
->tp_event
);
344 #endif /* CONFIG_UPROBE_EVENTS */
346 int perf_trace_add(struct perf_event
*p_event
, int flags
)
348 struct trace_event_call
*tp_event
= p_event
->tp_event
;
350 if (!(flags
& PERF_EF_START
))
351 p_event
->hw
.state
= PERF_HES_STOPPED
;
354 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
355 * and we need to take the default action of enqueueing our event on
356 * the right per-cpu hlist.
358 if (!tp_event
->class->reg(tp_event
, TRACE_REG_PERF_ADD
, p_event
)) {
359 struct hlist_head __percpu
*pcpu_list
;
360 struct hlist_head
*list
;
362 pcpu_list
= tp_event
->perf_events
;
363 if (WARN_ON_ONCE(!pcpu_list
))
366 list
= this_cpu_ptr(pcpu_list
);
367 hlist_add_head_rcu(&p_event
->hlist_entry
, list
);
373 void perf_trace_del(struct perf_event
*p_event
, int flags
)
375 struct trace_event_call
*tp_event
= p_event
->tp_event
;
378 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
379 * and we need to take the default action of dequeueing our event from
380 * the right per-cpu hlist.
382 if (!tp_event
->class->reg(tp_event
, TRACE_REG_PERF_DEL
, p_event
))
383 hlist_del_rcu(&p_event
->hlist_entry
);
386 void *perf_trace_buf_alloc(int size
, struct pt_regs
**regs
, int *rctxp
)
391 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(unsigned long));
393 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
394 "perf buffer not large enough"))
397 *rctxp
= rctx
= perf_swevent_get_recursion_context();
402 *regs
= this_cpu_ptr(&__perf_regs
[rctx
]);
403 raw_data
= this_cpu_ptr(perf_trace_buf
[rctx
]);
405 /* zero the dead bytes from align to not leak stack to user */
406 memset(&raw_data
[size
- sizeof(u64
)], 0, sizeof(u64
));
409 EXPORT_SYMBOL_GPL(perf_trace_buf_alloc
);
410 NOKPROBE_SYMBOL(perf_trace_buf_alloc
);
412 void perf_trace_buf_update(void *record
, u16 type
)
414 struct trace_entry
*entry
= record
;
415 int pc
= preempt_count();
418 local_save_flags(flags
);
419 tracing_generic_entry_update(entry
, flags
, pc
);
422 NOKPROBE_SYMBOL(perf_trace_buf_update
);
424 #ifdef CONFIG_FUNCTION_TRACER
426 perf_ftrace_function_call(unsigned long ip
, unsigned long parent_ip
,
427 struct ftrace_ops
*ops
, struct pt_regs
*pt_regs
)
429 struct ftrace_entry
*entry
;
430 struct perf_event
*event
;
431 struct hlist_head head
;
435 if ((unsigned long)ops
->private != smp_processor_id())
438 event
= container_of(ops
, struct perf_event
, ftrace_ops
);
441 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
442 * the perf code does is hlist_for_each_entry_rcu(), so we can
443 * get away with simply setting the @head.first pointer in order
444 * to create a singular list.
446 head
.first
= &event
->hlist_entry
;
448 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
449 sizeof(u64)) - sizeof(u32))
451 BUILD_BUG_ON(ENTRY_SIZE
> PERF_MAX_TRACE_SIZE
);
453 memset(®s
, 0, sizeof(regs
));
454 perf_fetch_caller_regs(®s
);
456 entry
= perf_trace_buf_alloc(ENTRY_SIZE
, NULL
, &rctx
);
461 entry
->parent_ip
= parent_ip
;
462 perf_trace_buf_submit(entry
, ENTRY_SIZE
, rctx
, TRACE_FN
,
463 1, ®s
, &head
, NULL
);
468 static int perf_ftrace_function_register(struct perf_event
*event
)
470 struct ftrace_ops
*ops
= &event
->ftrace_ops
;
472 ops
->flags
= FTRACE_OPS_FL_RCU
;
473 ops
->func
= perf_ftrace_function_call
;
474 ops
->private = (void *)(unsigned long)nr_cpu_ids
;
476 return register_ftrace_function(ops
);
479 static int perf_ftrace_function_unregister(struct perf_event
*event
)
481 struct ftrace_ops
*ops
= &event
->ftrace_ops
;
482 int ret
= unregister_ftrace_function(ops
);
483 ftrace_free_filter(ops
);
487 int perf_ftrace_event_register(struct trace_event_call
*call
,
488 enum trace_reg type
, void *data
)
490 struct perf_event
*event
= data
;
493 case TRACE_REG_REGISTER
:
494 case TRACE_REG_UNREGISTER
:
496 case TRACE_REG_PERF_REGISTER
:
497 case TRACE_REG_PERF_UNREGISTER
:
499 case TRACE_REG_PERF_OPEN
:
500 return perf_ftrace_function_register(data
);
501 case TRACE_REG_PERF_CLOSE
:
502 return perf_ftrace_function_unregister(data
);
503 case TRACE_REG_PERF_ADD
:
504 event
->ftrace_ops
.private = (void *)(unsigned long)smp_processor_id();
506 case TRACE_REG_PERF_DEL
:
507 event
->ftrace_ops
.private = (void *)(unsigned long)nr_cpu_ids
;
513 #endif /* CONFIG_FUNCTION_TRACER */