pinctrl: st: Enhance the controller to manage unavailable registers
[linux/fpc-iii.git] / kernel / trace / trace_event_perf.c
blobe854f420e033eb65a2bca233bb8df2e42778faf7
1 /*
2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
12 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
16 * suprises
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 perf_trace_t;
21 /* Count the events in use (per event id, not per instance) */
22 static int total_ref_count;
24 static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event)
27 if (tp_event->perf_perm) {
28 int ret = tp_event->perf_perm(tp_event, p_event);
29 if (ret)
30 return ret;
33 /* The ftrace function trace is allowed only for root. */
34 if (ftrace_event_is_function(tp_event) &&
35 perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
36 return -EPERM;
38 /* No tracing, just counting, so no obvious leak */
39 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
40 return 0;
42 /* Some events are ok to be traced by non-root users... */
43 if (p_event->attach_state == PERF_ATTACH_TASK) {
44 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
45 return 0;
49 * ...otherwise raw tracepoint data can be a severe data leak,
50 * only allow root to have these.
52 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
53 return -EPERM;
55 return 0;
58 static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
59 struct perf_event *p_event)
61 struct hlist_head __percpu *list;
62 int ret = -ENOMEM;
63 int cpu;
65 p_event->tp_event = tp_event;
66 if (tp_event->perf_refcount++ > 0)
67 return 0;
69 list = alloc_percpu(struct hlist_head);
70 if (!list)
71 goto fail;
73 for_each_possible_cpu(cpu)
74 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
76 tp_event->perf_events = list;
78 if (!total_ref_count) {
79 char __percpu *buf;
80 int i;
82 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
83 buf = (char __percpu *)alloc_percpu(perf_trace_t);
84 if (!buf)
85 goto fail;
87 perf_trace_buf[i] = buf;
91 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
92 if (ret)
93 goto fail;
95 total_ref_count++;
96 return 0;
98 fail:
99 if (!total_ref_count) {
100 int i;
102 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
103 free_percpu(perf_trace_buf[i]);
104 perf_trace_buf[i] = NULL;
108 if (!--tp_event->perf_refcount) {
109 free_percpu(tp_event->perf_events);
110 tp_event->perf_events = NULL;
113 return ret;
116 static void perf_trace_event_unreg(struct perf_event *p_event)
118 struct ftrace_event_call *tp_event = p_event->tp_event;
119 int i;
121 if (--tp_event->perf_refcount > 0)
122 goto out;
124 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
127 * Ensure our callback won't be called anymore. The buffers
128 * will be freed after that.
130 tracepoint_synchronize_unregister();
132 free_percpu(tp_event->perf_events);
133 tp_event->perf_events = NULL;
135 if (!--total_ref_count) {
136 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
137 free_percpu(perf_trace_buf[i]);
138 perf_trace_buf[i] = NULL;
141 out:
142 module_put(tp_event->mod);
145 static int perf_trace_event_open(struct perf_event *p_event)
147 struct ftrace_event_call *tp_event = p_event->tp_event;
148 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
151 static void perf_trace_event_close(struct perf_event *p_event)
153 struct ftrace_event_call *tp_event = p_event->tp_event;
154 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
157 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
158 struct perf_event *p_event)
160 int ret;
162 ret = perf_trace_event_perm(tp_event, p_event);
163 if (ret)
164 return ret;
166 ret = perf_trace_event_reg(tp_event, p_event);
167 if (ret)
168 return ret;
170 ret = perf_trace_event_open(p_event);
171 if (ret) {
172 perf_trace_event_unreg(p_event);
173 return ret;
176 return 0;
179 int perf_trace_init(struct perf_event *p_event)
181 struct ftrace_event_call *tp_event;
182 u64 event_id = p_event->attr.config;
183 int ret = -EINVAL;
185 mutex_lock(&event_mutex);
186 list_for_each_entry(tp_event, &ftrace_events, list) {
187 if (tp_event->event.type == event_id &&
188 tp_event->class && tp_event->class->reg &&
189 try_module_get(tp_event->mod)) {
190 ret = perf_trace_event_init(tp_event, p_event);
191 if (ret)
192 module_put(tp_event->mod);
193 break;
196 mutex_unlock(&event_mutex);
198 return ret;
201 void perf_trace_destroy(struct perf_event *p_event)
203 mutex_lock(&event_mutex);
204 perf_trace_event_close(p_event);
205 perf_trace_event_unreg(p_event);
206 mutex_unlock(&event_mutex);
209 int perf_trace_add(struct perf_event *p_event, int flags)
211 struct ftrace_event_call *tp_event = p_event->tp_event;
212 struct hlist_head __percpu *pcpu_list;
213 struct hlist_head *list;
215 pcpu_list = tp_event->perf_events;
216 if (WARN_ON_ONCE(!pcpu_list))
217 return -EINVAL;
219 if (!(flags & PERF_EF_START))
220 p_event->hw.state = PERF_HES_STOPPED;
222 list = this_cpu_ptr(pcpu_list);
223 hlist_add_head_rcu(&p_event->hlist_entry, list);
225 return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
228 void perf_trace_del(struct perf_event *p_event, int flags)
230 struct ftrace_event_call *tp_event = p_event->tp_event;
231 hlist_del_rcu(&p_event->hlist_entry);
232 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
235 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
236 struct pt_regs *regs, int *rctxp)
238 struct trace_entry *entry;
239 unsigned long flags;
240 char *raw_data;
241 int pc;
243 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
245 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
246 "perf buffer not large enough"))
247 return NULL;
249 pc = preempt_count();
251 *rctxp = perf_swevent_get_recursion_context();
252 if (*rctxp < 0)
253 return NULL;
255 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
257 /* zero the dead bytes from align to not leak stack to user */
258 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
260 entry = (struct trace_entry *)raw_data;
261 local_save_flags(flags);
262 tracing_generic_entry_update(entry, flags, pc);
263 entry->type = type;
265 return raw_data;
267 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
269 #ifdef CONFIG_FUNCTION_TRACER
270 static void
271 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
272 struct ftrace_ops *ops, struct pt_regs *pt_regs)
274 struct ftrace_entry *entry;
275 struct hlist_head *head;
276 struct pt_regs regs;
277 int rctx;
279 head = this_cpu_ptr(event_function.perf_events);
280 if (hlist_empty(head))
281 return;
283 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
284 sizeof(u64)) - sizeof(u32))
286 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
288 perf_fetch_caller_regs(&regs);
290 entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
291 if (!entry)
292 return;
294 entry->ip = ip;
295 entry->parent_ip = parent_ip;
296 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
297 1, &regs, head, NULL);
299 #undef ENTRY_SIZE
302 static int perf_ftrace_function_register(struct perf_event *event)
304 struct ftrace_ops *ops = &event->ftrace_ops;
306 ops->flags |= FTRACE_OPS_FL_CONTROL;
307 ops->func = perf_ftrace_function_call;
308 return register_ftrace_function(ops);
311 static int perf_ftrace_function_unregister(struct perf_event *event)
313 struct ftrace_ops *ops = &event->ftrace_ops;
314 int ret = unregister_ftrace_function(ops);
315 ftrace_free_filter(ops);
316 return ret;
319 static void perf_ftrace_function_enable(struct perf_event *event)
321 ftrace_function_local_enable(&event->ftrace_ops);
324 static void perf_ftrace_function_disable(struct perf_event *event)
326 ftrace_function_local_disable(&event->ftrace_ops);
329 int perf_ftrace_event_register(struct ftrace_event_call *call,
330 enum trace_reg type, void *data)
332 switch (type) {
333 case TRACE_REG_REGISTER:
334 case TRACE_REG_UNREGISTER:
335 break;
336 case TRACE_REG_PERF_REGISTER:
337 case TRACE_REG_PERF_UNREGISTER:
338 return 0;
339 case TRACE_REG_PERF_OPEN:
340 return perf_ftrace_function_register(data);
341 case TRACE_REG_PERF_CLOSE:
342 return perf_ftrace_function_unregister(data);
343 case TRACE_REG_PERF_ADD:
344 perf_ftrace_function_enable(data);
345 return 0;
346 case TRACE_REG_PERF_DEL:
347 perf_ftrace_function_disable(data);
348 return 0;
351 return -EINVAL;
353 #endif /* CONFIG_FUNCTION_TRACER */