1 // SPDX-License-Identifier: GPL-2.0
3 * trace event based perf event profiling/tracing
5 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
6 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <linux/module.h>
10 #include <linux/kprobes.h>
12 #include "trace_probe.h"
14 static char __percpu
*perf_trace_buf
[PERF_NR_CONTEXTS
];
17 * Force it to be aligned to unsigned long to avoid misaligned accesses
20 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE
/ sizeof(unsigned long)])
23 /* Count the events in use (per event id, not per instance) */
24 static int total_ref_count
;
26 static int perf_trace_event_perm(struct trace_event_call
*tp_event
,
27 struct perf_event
*p_event
)
29 if (tp_event
->perf_perm
) {
30 int ret
= tp_event
->perf_perm(tp_event
, p_event
);
36 * We checked and allowed to create parent,
37 * allow children without checking.
43 * It's ok to check current process (owner) permissions in here,
44 * because code below is called only via perf_event_open syscall.
47 /* The ftrace function trace is allowed only for root. */
48 if (ftrace_event_is_function(tp_event
)) {
49 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN
))
52 if (!is_sampling_event(p_event
))
56 * We don't allow user space callchains for function trace
57 * event, due to issues with page faults while tracing page
58 * fault handler and its overall trickiness nature.
60 if (!p_event
->attr
.exclude_callchain_user
)
64 * Same reason to disable user stack dump as for user space
67 if (p_event
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)
71 /* No tracing, just counting, so no obvious leak */
72 if (!(p_event
->attr
.sample_type
& PERF_SAMPLE_RAW
))
75 /* Some events are ok to be traced by non-root users... */
76 if (p_event
->attach_state
== PERF_ATTACH_TASK
) {
77 if (tp_event
->flags
& TRACE_EVENT_FL_CAP_ANY
)
82 * ...otherwise raw tracepoint data can be a severe data leak,
83 * only allow root to have these.
85 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN
))
91 static int perf_trace_event_reg(struct trace_event_call
*tp_event
,
92 struct perf_event
*p_event
)
94 struct hlist_head __percpu
*list
;
98 p_event
->tp_event
= tp_event
;
99 if (tp_event
->perf_refcount
++ > 0)
102 list
= alloc_percpu(struct hlist_head
);
106 for_each_possible_cpu(cpu
)
107 INIT_HLIST_HEAD(per_cpu_ptr(list
, cpu
));
109 tp_event
->perf_events
= list
;
111 if (!total_ref_count
) {
115 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
116 buf
= (char __percpu
*)alloc_percpu(perf_trace_t
);
120 perf_trace_buf
[i
] = buf
;
124 ret
= tp_event
->class->reg(tp_event
, TRACE_REG_PERF_REGISTER
, NULL
);
132 if (!total_ref_count
) {
135 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
136 free_percpu(perf_trace_buf
[i
]);
137 perf_trace_buf
[i
] = NULL
;
141 if (!--tp_event
->perf_refcount
) {
142 free_percpu(tp_event
->perf_events
);
143 tp_event
->perf_events
= NULL
;
149 static void perf_trace_event_unreg(struct perf_event
*p_event
)
151 struct trace_event_call
*tp_event
= p_event
->tp_event
;
154 if (--tp_event
->perf_refcount
> 0)
157 tp_event
->class->reg(tp_event
, TRACE_REG_PERF_UNREGISTER
, NULL
);
160 * Ensure our callback won't be called anymore. The buffers
161 * will be freed after that.
163 tracepoint_synchronize_unregister();
165 free_percpu(tp_event
->perf_events
);
166 tp_event
->perf_events
= NULL
;
168 if (!--total_ref_count
) {
169 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
170 free_percpu(perf_trace_buf
[i
]);
171 perf_trace_buf
[i
] = NULL
;
175 module_put(tp_event
->mod
);
178 static int perf_trace_event_open(struct perf_event
*p_event
)
180 struct trace_event_call
*tp_event
= p_event
->tp_event
;
181 return tp_event
->class->reg(tp_event
, TRACE_REG_PERF_OPEN
, p_event
);
184 static void perf_trace_event_close(struct perf_event
*p_event
)
186 struct trace_event_call
*tp_event
= p_event
->tp_event
;
187 tp_event
->class->reg(tp_event
, TRACE_REG_PERF_CLOSE
, p_event
);
190 static int perf_trace_event_init(struct trace_event_call
*tp_event
,
191 struct perf_event
*p_event
)
195 ret
= perf_trace_event_perm(tp_event
, p_event
);
199 ret
= perf_trace_event_reg(tp_event
, p_event
);
203 ret
= perf_trace_event_open(p_event
);
205 perf_trace_event_unreg(p_event
);
212 int perf_trace_init(struct perf_event
*p_event
)
214 struct trace_event_call
*tp_event
;
215 u64 event_id
= p_event
->attr
.config
;
218 mutex_lock(&event_mutex
);
219 list_for_each_entry(tp_event
, &ftrace_events
, list
) {
220 if (tp_event
->event
.type
== event_id
&&
221 tp_event
->class && tp_event
->class->reg
&&
222 try_module_get(tp_event
->mod
)) {
223 ret
= perf_trace_event_init(tp_event
, p_event
);
225 module_put(tp_event
->mod
);
229 mutex_unlock(&event_mutex
);
234 void perf_trace_destroy(struct perf_event
*p_event
)
236 mutex_lock(&event_mutex
);
237 perf_trace_event_close(p_event
);
238 perf_trace_event_unreg(p_event
);
239 mutex_unlock(&event_mutex
);
242 #ifdef CONFIG_KPROBE_EVENTS
243 int perf_kprobe_init(struct perf_event
*p_event
, bool is_retprobe
)
247 struct trace_event_call
*tp_event
;
249 if (p_event
->attr
.kprobe_func
) {
250 func
= kzalloc(KSYM_NAME_LEN
, GFP_KERNEL
);
253 ret
= strncpy_from_user(
254 func
, u64_to_user_ptr(p_event
->attr
.kprobe_func
),
256 if (ret
== KSYM_NAME_LEN
)
261 if (func
[0] == '\0') {
267 tp_event
= create_local_trace_kprobe(
268 func
, (void *)(unsigned long)(p_event
->attr
.kprobe_addr
),
269 p_event
->attr
.probe_offset
, is_retprobe
);
270 if (IS_ERR(tp_event
)) {
271 ret
= PTR_ERR(tp_event
);
275 ret
= perf_trace_event_init(tp_event
, p_event
);
277 destroy_local_trace_kprobe(tp_event
);
283 void perf_kprobe_destroy(struct perf_event
*p_event
)
285 perf_trace_event_close(p_event
);
286 perf_trace_event_unreg(p_event
);
288 destroy_local_trace_kprobe(p_event
->tp_event
);
290 #endif /* CONFIG_KPROBE_EVENTS */
292 #ifdef CONFIG_UPROBE_EVENTS
293 int perf_uprobe_init(struct perf_event
*p_event
,
294 unsigned long ref_ctr_offset
, bool is_retprobe
)
298 struct trace_event_call
*tp_event
;
300 if (!p_event
->attr
.uprobe_path
)
302 path
= kzalloc(PATH_MAX
, GFP_KERNEL
);
305 ret
= strncpy_from_user(
306 path
, u64_to_user_ptr(p_event
->attr
.uprobe_path
), PATH_MAX
);
311 if (path
[0] == '\0') {
316 tp_event
= create_local_trace_uprobe(path
, p_event
->attr
.probe_offset
,
317 ref_ctr_offset
, is_retprobe
);
318 if (IS_ERR(tp_event
)) {
319 ret
= PTR_ERR(tp_event
);
324 * local trace_uprobe need to hold event_mutex to call
325 * uprobe_buffer_enable() and uprobe_buffer_disable().
326 * event_mutex is not required for local trace_kprobes.
328 mutex_lock(&event_mutex
);
329 ret
= perf_trace_event_init(tp_event
, p_event
);
331 destroy_local_trace_uprobe(tp_event
);
332 mutex_unlock(&event_mutex
);
338 void perf_uprobe_destroy(struct perf_event
*p_event
)
340 mutex_lock(&event_mutex
);
341 perf_trace_event_close(p_event
);
342 perf_trace_event_unreg(p_event
);
343 mutex_unlock(&event_mutex
);
344 destroy_local_trace_uprobe(p_event
->tp_event
);
346 #endif /* CONFIG_UPROBE_EVENTS */
348 int perf_trace_add(struct perf_event
*p_event
, int flags
)
350 struct trace_event_call
*tp_event
= p_event
->tp_event
;
352 if (!(flags
& PERF_EF_START
))
353 p_event
->hw
.state
= PERF_HES_STOPPED
;
356 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
357 * and we need to take the default action of enqueueing our event on
358 * the right per-cpu hlist.
360 if (!tp_event
->class->reg(tp_event
, TRACE_REG_PERF_ADD
, p_event
)) {
361 struct hlist_head __percpu
*pcpu_list
;
362 struct hlist_head
*list
;
364 pcpu_list
= tp_event
->perf_events
;
365 if (WARN_ON_ONCE(!pcpu_list
))
368 list
= this_cpu_ptr(pcpu_list
);
369 hlist_add_head_rcu(&p_event
->hlist_entry
, list
);
375 void perf_trace_del(struct perf_event
*p_event
, int flags
)
377 struct trace_event_call
*tp_event
= p_event
->tp_event
;
380 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
381 * and we need to take the default action of dequeueing our event from
382 * the right per-cpu hlist.
384 if (!tp_event
->class->reg(tp_event
, TRACE_REG_PERF_DEL
, p_event
))
385 hlist_del_rcu(&p_event
->hlist_entry
);
388 void *perf_trace_buf_alloc(int size
, struct pt_regs
**regs
, int *rctxp
)
393 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(unsigned long));
395 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
396 "perf buffer not large enough"))
399 *rctxp
= rctx
= perf_swevent_get_recursion_context();
404 *regs
= this_cpu_ptr(&__perf_regs
[rctx
]);
405 raw_data
= this_cpu_ptr(perf_trace_buf
[rctx
]);
407 /* zero the dead bytes from align to not leak stack to user */
408 memset(&raw_data
[size
- sizeof(u64
)], 0, sizeof(u64
));
411 EXPORT_SYMBOL_GPL(perf_trace_buf_alloc
);
412 NOKPROBE_SYMBOL(perf_trace_buf_alloc
);
414 void perf_trace_buf_update(void *record
, u16 type
)
416 struct trace_entry
*entry
= record
;
417 int pc
= preempt_count();
420 local_save_flags(flags
);
421 tracing_generic_entry_update(entry
, flags
, pc
);
424 NOKPROBE_SYMBOL(perf_trace_buf_update
);
426 #ifdef CONFIG_FUNCTION_TRACER
428 perf_ftrace_function_call(unsigned long ip
, unsigned long parent_ip
,
429 struct ftrace_ops
*ops
, struct pt_regs
*pt_regs
)
431 struct ftrace_entry
*entry
;
432 struct perf_event
*event
;
433 struct hlist_head head
;
437 if ((unsigned long)ops
->private != smp_processor_id())
440 event
= container_of(ops
, struct perf_event
, ftrace_ops
);
443 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
444 * the perf code does is hlist_for_each_entry_rcu(), so we can
445 * get away with simply setting the @head.first pointer in order
446 * to create a singular list.
448 head
.first
= &event
->hlist_entry
;
450 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
451 sizeof(u64)) - sizeof(u32))
453 BUILD_BUG_ON(ENTRY_SIZE
> PERF_MAX_TRACE_SIZE
);
455 memset(®s
, 0, sizeof(regs
));
456 perf_fetch_caller_regs(®s
);
458 entry
= perf_trace_buf_alloc(ENTRY_SIZE
, NULL
, &rctx
);
463 entry
->parent_ip
= parent_ip
;
464 perf_trace_buf_submit(entry
, ENTRY_SIZE
, rctx
, TRACE_FN
,
465 1, ®s
, &head
, NULL
);
470 static int perf_ftrace_function_register(struct perf_event
*event
)
472 struct ftrace_ops
*ops
= &event
->ftrace_ops
;
474 ops
->flags
= FTRACE_OPS_FL_RCU
;
475 ops
->func
= perf_ftrace_function_call
;
476 ops
->private = (void *)(unsigned long)nr_cpu_ids
;
478 return register_ftrace_function(ops
);
481 static int perf_ftrace_function_unregister(struct perf_event
*event
)
483 struct ftrace_ops
*ops
= &event
->ftrace_ops
;
484 int ret
= unregister_ftrace_function(ops
);
485 ftrace_free_filter(ops
);
489 int perf_ftrace_event_register(struct trace_event_call
*call
,
490 enum trace_reg type
, void *data
)
492 struct perf_event
*event
= data
;
495 case TRACE_REG_REGISTER
:
496 case TRACE_REG_UNREGISTER
:
498 case TRACE_REG_PERF_REGISTER
:
499 case TRACE_REG_PERF_UNREGISTER
:
501 case TRACE_REG_PERF_OPEN
:
502 return perf_ftrace_function_register(data
);
503 case TRACE_REG_PERF_CLOSE
:
504 return perf_ftrace_function_unregister(data
);
505 case TRACE_REG_PERF_ADD
:
506 event
->ftrace_ops
.private = (void *)(unsigned long)smp_processor_id();
508 case TRACE_REG_PERF_DEL
:
509 event
->ftrace_ops
.private = (void *)(unsigned long)nr_cpu_ids
;
515 #endif /* CONFIG_FUNCTION_TRACER */