Linux 4.15.6
[linux/fpc-iii.git] / kernel / trace / trace_event_perf.c
blob55d6dff37dafad5732da6adf85cddc38a4bb43bc
1 /*
2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
12 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
16 * suprises
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 perf_trace_t;
21 /* Count the events in use (per event id, not per instance) */
22 static int total_ref_count;
24 static int perf_trace_event_perm(struct trace_event_call *tp_event,
25 struct perf_event *p_event)
27 if (tp_event->perf_perm) {
28 int ret = tp_event->perf_perm(tp_event, p_event);
29 if (ret)
30 return ret;
34 * We checked and allowed to create parent,
35 * allow children without checking.
37 if (p_event->parent)
38 return 0;
41 * It's ok to check current process (owner) permissions in here,
42 * because code below is called only via perf_event_open syscall.
45 /* The ftrace function trace is allowed only for root. */
46 if (ftrace_event_is_function(tp_event)) {
47 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
48 return -EPERM;
50 if (!is_sampling_event(p_event))
51 return 0;
54 * We don't allow user space callchains for function trace
55 * event, due to issues with page faults while tracing page
56 * fault handler and its overall trickiness nature.
58 if (!p_event->attr.exclude_callchain_user)
59 return -EINVAL;
62 * Same reason to disable user stack dump as for user space
63 * callchains above.
65 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
66 return -EINVAL;
69 /* No tracing, just counting, so no obvious leak */
70 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
71 return 0;
73 /* Some events are ok to be traced by non-root users... */
74 if (p_event->attach_state == PERF_ATTACH_TASK) {
75 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
76 return 0;
80 * ...otherwise raw tracepoint data can be a severe data leak,
81 * only allow root to have these.
83 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
84 return -EPERM;
86 return 0;
89 static int perf_trace_event_reg(struct trace_event_call *tp_event,
90 struct perf_event *p_event)
92 struct hlist_head __percpu *list;
93 int ret = -ENOMEM;
94 int cpu;
96 p_event->tp_event = tp_event;
97 if (tp_event->perf_refcount++ > 0)
98 return 0;
100 list = alloc_percpu(struct hlist_head);
101 if (!list)
102 goto fail;
104 for_each_possible_cpu(cpu)
105 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
107 tp_event->perf_events = list;
109 if (!total_ref_count) {
110 char __percpu *buf;
111 int i;
113 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
114 buf = (char __percpu *)alloc_percpu(perf_trace_t);
115 if (!buf)
116 goto fail;
118 perf_trace_buf[i] = buf;
122 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
123 if (ret)
124 goto fail;
126 total_ref_count++;
127 return 0;
129 fail:
130 if (!total_ref_count) {
131 int i;
133 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
134 free_percpu(perf_trace_buf[i]);
135 perf_trace_buf[i] = NULL;
139 if (!--tp_event->perf_refcount) {
140 free_percpu(tp_event->perf_events);
141 tp_event->perf_events = NULL;
144 return ret;
147 static void perf_trace_event_unreg(struct perf_event *p_event)
149 struct trace_event_call *tp_event = p_event->tp_event;
150 int i;
152 if (--tp_event->perf_refcount > 0)
153 goto out;
155 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
158 * Ensure our callback won't be called anymore. The buffers
159 * will be freed after that.
161 tracepoint_synchronize_unregister();
163 free_percpu(tp_event->perf_events);
164 tp_event->perf_events = NULL;
166 if (!--total_ref_count) {
167 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
168 free_percpu(perf_trace_buf[i]);
169 perf_trace_buf[i] = NULL;
172 out:
173 module_put(tp_event->mod);
176 static int perf_trace_event_open(struct perf_event *p_event)
178 struct trace_event_call *tp_event = p_event->tp_event;
179 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
182 static void perf_trace_event_close(struct perf_event *p_event)
184 struct trace_event_call *tp_event = p_event->tp_event;
185 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
188 static int perf_trace_event_init(struct trace_event_call *tp_event,
189 struct perf_event *p_event)
191 int ret;
193 ret = perf_trace_event_perm(tp_event, p_event);
194 if (ret)
195 return ret;
197 ret = perf_trace_event_reg(tp_event, p_event);
198 if (ret)
199 return ret;
201 ret = perf_trace_event_open(p_event);
202 if (ret) {
203 perf_trace_event_unreg(p_event);
204 return ret;
207 return 0;
210 int perf_trace_init(struct perf_event *p_event)
212 struct trace_event_call *tp_event;
213 u64 event_id = p_event->attr.config;
214 int ret = -EINVAL;
216 mutex_lock(&event_mutex);
217 list_for_each_entry(tp_event, &ftrace_events, list) {
218 if (tp_event->event.type == event_id &&
219 tp_event->class && tp_event->class->reg &&
220 try_module_get(tp_event->mod)) {
221 ret = perf_trace_event_init(tp_event, p_event);
222 if (ret)
223 module_put(tp_event->mod);
224 break;
227 mutex_unlock(&event_mutex);
229 return ret;
232 void perf_trace_destroy(struct perf_event *p_event)
234 mutex_lock(&event_mutex);
235 perf_trace_event_close(p_event);
236 perf_trace_event_unreg(p_event);
237 mutex_unlock(&event_mutex);
240 int perf_trace_add(struct perf_event *p_event, int flags)
242 struct trace_event_call *tp_event = p_event->tp_event;
244 if (!(flags & PERF_EF_START))
245 p_event->hw.state = PERF_HES_STOPPED;
248 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
249 * and we need to take the default action of enqueueing our event on
250 * the right per-cpu hlist.
252 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
253 struct hlist_head __percpu *pcpu_list;
254 struct hlist_head *list;
256 pcpu_list = tp_event->perf_events;
257 if (WARN_ON_ONCE(!pcpu_list))
258 return -EINVAL;
260 list = this_cpu_ptr(pcpu_list);
261 hlist_add_head_rcu(&p_event->hlist_entry, list);
264 return 0;
267 void perf_trace_del(struct perf_event *p_event, int flags)
269 struct trace_event_call *tp_event = p_event->tp_event;
272 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
273 * and we need to take the default action of dequeueing our event from
274 * the right per-cpu hlist.
276 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
277 hlist_del_rcu(&p_event->hlist_entry);
280 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
282 char *raw_data;
283 int rctx;
285 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
287 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
288 "perf buffer not large enough"))
289 return NULL;
291 *rctxp = rctx = perf_swevent_get_recursion_context();
292 if (rctx < 0)
293 return NULL;
295 if (regs)
296 *regs = this_cpu_ptr(&__perf_regs[rctx]);
297 raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
299 /* zero the dead bytes from align to not leak stack to user */
300 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
301 return raw_data;
303 EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
304 NOKPROBE_SYMBOL(perf_trace_buf_alloc);
306 void perf_trace_buf_update(void *record, u16 type)
308 struct trace_entry *entry = record;
309 int pc = preempt_count();
310 unsigned long flags;
312 local_save_flags(flags);
313 tracing_generic_entry_update(entry, flags, pc);
314 entry->type = type;
316 NOKPROBE_SYMBOL(perf_trace_buf_update);
318 #ifdef CONFIG_FUNCTION_TRACER
319 static void
320 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
321 struct ftrace_ops *ops, struct pt_regs *pt_regs)
323 struct ftrace_entry *entry;
324 struct perf_event *event;
325 struct hlist_head head;
326 struct pt_regs regs;
327 int rctx;
329 if ((unsigned long)ops->private != smp_processor_id())
330 return;
332 event = container_of(ops, struct perf_event, ftrace_ops);
335 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
336 * the perf code does is hlist_for_each_entry_rcu(), so we can
337 * get away with simply setting the @head.first pointer in order
338 * to create a singular list.
340 head.first = &event->hlist_entry;
342 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
343 sizeof(u64)) - sizeof(u32))
345 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
347 memset(&regs, 0, sizeof(regs));
348 perf_fetch_caller_regs(&regs);
350 entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
351 if (!entry)
352 return;
354 entry->ip = ip;
355 entry->parent_ip = parent_ip;
356 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
357 1, &regs, &head, NULL);
359 #undef ENTRY_SIZE
362 static int perf_ftrace_function_register(struct perf_event *event)
364 struct ftrace_ops *ops = &event->ftrace_ops;
366 ops->flags = FTRACE_OPS_FL_RCU;
367 ops->func = perf_ftrace_function_call;
368 ops->private = (void *)(unsigned long)nr_cpu_ids;
370 return register_ftrace_function(ops);
373 static int perf_ftrace_function_unregister(struct perf_event *event)
375 struct ftrace_ops *ops = &event->ftrace_ops;
376 int ret = unregister_ftrace_function(ops);
377 ftrace_free_filter(ops);
378 return ret;
381 int perf_ftrace_event_register(struct trace_event_call *call,
382 enum trace_reg type, void *data)
384 struct perf_event *event = data;
386 switch (type) {
387 case TRACE_REG_REGISTER:
388 case TRACE_REG_UNREGISTER:
389 break;
390 case TRACE_REG_PERF_REGISTER:
391 case TRACE_REG_PERF_UNREGISTER:
392 return 0;
393 case TRACE_REG_PERF_OPEN:
394 return perf_ftrace_function_register(data);
395 case TRACE_REG_PERF_CLOSE:
396 return perf_ftrace_function_unregister(data);
397 case TRACE_REG_PERF_ADD:
398 event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
399 return 1;
400 case TRACE_REG_PERF_DEL:
401 event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
402 return 1;
405 return -EINVAL;
407 #endif /* CONFIG_FUNCTION_TRACER */