Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / kernel / trace / trace_syscalls.c
blobcb654542c1a1621fc325cba70699f8b2d1be9658
1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/slab.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
6 #include <linux/ftrace.h>
7 #include <linux/perf_event.h>
8 #include <asm/syscall.h>
10 #include "trace_output.h"
11 #include "trace.h"
13 static DEFINE_MUTEX(syscall_trace_lock);
14 static int sys_refcount_enter;
15 static int sys_refcount_exit;
16 static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
17 static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
19 static int syscall_enter_register(struct ftrace_event_call *event,
20 enum trace_reg type);
21 static int syscall_exit_register(struct ftrace_event_call *event,
22 enum trace_reg type);
24 static int syscall_enter_define_fields(struct ftrace_event_call *call);
25 static int syscall_exit_define_fields(struct ftrace_event_call *call);
27 static struct list_head *
28 syscall_get_enter_fields(struct ftrace_event_call *call)
30 struct syscall_metadata *entry = call->data;
32 return &entry->enter_fields;
35 struct trace_event_functions enter_syscall_print_funcs = {
36 .trace = print_syscall_enter,
39 struct trace_event_functions exit_syscall_print_funcs = {
40 .trace = print_syscall_exit,
43 struct ftrace_event_class event_class_syscall_enter = {
44 .system = "syscalls",
45 .reg = syscall_enter_register,
46 .define_fields = syscall_enter_define_fields,
47 .get_fields = syscall_get_enter_fields,
48 .raw_init = init_syscall_trace,
51 struct ftrace_event_class event_class_syscall_exit = {
52 .system = "syscalls",
53 .reg = syscall_exit_register,
54 .define_fields = syscall_exit_define_fields,
55 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
56 .raw_init = init_syscall_trace,
59 extern struct syscall_metadata *__start_syscalls_metadata[];
60 extern struct syscall_metadata *__stop_syscalls_metadata[];
62 static struct syscall_metadata **syscalls_metadata;
64 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
65 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
68 * Only compare after the "sys" prefix. Archs that use
69 * syscall wrappers may have syscalls symbols aliases prefixed
70 * with "SyS" instead of "sys", leading to an unwanted
71 * mismatch.
73 return !strcmp(sym + 3, name + 3);
75 #endif
77 static __init struct syscall_metadata *
78 find_syscall_meta(unsigned long syscall)
80 struct syscall_metadata **start;
81 struct syscall_metadata **stop;
82 char str[KSYM_SYMBOL_LEN];
85 start = __start_syscalls_metadata;
86 stop = __stop_syscalls_metadata;
87 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
89 if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
90 return NULL;
92 for ( ; start < stop; start++) {
93 if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
94 return *start;
96 return NULL;
99 static struct syscall_metadata *syscall_nr_to_meta(int nr)
101 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
102 return NULL;
104 return syscalls_metadata[nr];
107 enum print_line_t
108 print_syscall_enter(struct trace_iterator *iter, int flags,
109 struct trace_event *event)
111 struct trace_seq *s = &iter->seq;
112 struct trace_entry *ent = iter->ent;
113 struct syscall_trace_enter *trace;
114 struct syscall_metadata *entry;
115 int i, ret, syscall;
117 trace = (typeof(trace))ent;
118 syscall = trace->nr;
119 entry = syscall_nr_to_meta(syscall);
121 if (!entry)
122 goto end;
124 if (entry->enter_event->event.type != ent->type) {
125 WARN_ON_ONCE(1);
126 goto end;
129 ret = trace_seq_printf(s, "%s(", entry->name);
130 if (!ret)
131 return TRACE_TYPE_PARTIAL_LINE;
133 for (i = 0; i < entry->nb_args; i++) {
134 /* parameter types */
135 if (trace_flags & TRACE_ITER_VERBOSE) {
136 ret = trace_seq_printf(s, "%s ", entry->types[i]);
137 if (!ret)
138 return TRACE_TYPE_PARTIAL_LINE;
140 /* parameter values */
141 ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
142 trace->args[i],
143 i == entry->nb_args - 1 ? "" : ", ");
144 if (!ret)
145 return TRACE_TYPE_PARTIAL_LINE;
148 ret = trace_seq_putc(s, ')');
149 if (!ret)
150 return TRACE_TYPE_PARTIAL_LINE;
152 end:
153 ret = trace_seq_putc(s, '\n');
154 if (!ret)
155 return TRACE_TYPE_PARTIAL_LINE;
157 return TRACE_TYPE_HANDLED;
160 enum print_line_t
161 print_syscall_exit(struct trace_iterator *iter, int flags,
162 struct trace_event *event)
164 struct trace_seq *s = &iter->seq;
165 struct trace_entry *ent = iter->ent;
166 struct syscall_trace_exit *trace;
167 int syscall;
168 struct syscall_metadata *entry;
169 int ret;
171 trace = (typeof(trace))ent;
172 syscall = trace->nr;
173 entry = syscall_nr_to_meta(syscall);
175 if (!entry) {
176 trace_seq_printf(s, "\n");
177 return TRACE_TYPE_HANDLED;
180 if (entry->exit_event->event.type != ent->type) {
181 WARN_ON_ONCE(1);
182 return TRACE_TYPE_UNHANDLED;
185 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
186 trace->ret);
187 if (!ret)
188 return TRACE_TYPE_PARTIAL_LINE;
190 return TRACE_TYPE_HANDLED;
193 extern char *__bad_type_size(void);
195 #define SYSCALL_FIELD(type, name) \
196 sizeof(type) != sizeof(trace.name) ? \
197 __bad_type_size() : \
198 #type, #name, offsetof(typeof(trace), name), \
199 sizeof(trace.name), is_signed_type(type)
201 static
202 int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
204 int i;
205 int pos = 0;
207 /* When len=0, we just calculate the needed length */
208 #define LEN_OR_ZERO (len ? len - pos : 0)
210 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
211 for (i = 0; i < entry->nb_args; i++) {
212 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
213 entry->args[i], sizeof(unsigned long),
214 i == entry->nb_args - 1 ? "" : ", ");
216 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
218 for (i = 0; i < entry->nb_args; i++) {
219 pos += snprintf(buf + pos, LEN_OR_ZERO,
220 ", ((unsigned long)(REC->%s))", entry->args[i]);
223 #undef LEN_OR_ZERO
225 /* return the length of print_fmt */
226 return pos;
229 static int set_syscall_print_fmt(struct ftrace_event_call *call)
231 char *print_fmt;
232 int len;
233 struct syscall_metadata *entry = call->data;
235 if (entry->enter_event != call) {
236 call->print_fmt = "\"0x%lx\", REC->ret";
237 return 0;
240 /* First: called with 0 length to calculate the needed length */
241 len = __set_enter_print_fmt(entry, NULL, 0);
243 print_fmt = kmalloc(len + 1, GFP_KERNEL);
244 if (!print_fmt)
245 return -ENOMEM;
247 /* Second: actually write the @print_fmt */
248 __set_enter_print_fmt(entry, print_fmt, len + 1);
249 call->print_fmt = print_fmt;
251 return 0;
254 static void free_syscall_print_fmt(struct ftrace_event_call *call)
256 struct syscall_metadata *entry = call->data;
258 if (entry->enter_event == call)
259 kfree(call->print_fmt);
262 static int syscall_enter_define_fields(struct ftrace_event_call *call)
264 struct syscall_trace_enter trace;
265 struct syscall_metadata *meta = call->data;
266 int ret;
267 int i;
268 int offset = offsetof(typeof(trace), args);
270 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
271 if (ret)
272 return ret;
274 for (i = 0; i < meta->nb_args; i++) {
275 ret = trace_define_field(call, meta->types[i],
276 meta->args[i], offset,
277 sizeof(unsigned long), 0,
278 FILTER_OTHER);
279 offset += sizeof(unsigned long);
282 return ret;
285 static int syscall_exit_define_fields(struct ftrace_event_call *call)
287 struct syscall_trace_exit trace;
288 int ret;
290 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
291 if (ret)
292 return ret;
294 ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
295 FILTER_OTHER);
297 return ret;
300 void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
302 struct syscall_trace_enter *entry;
303 struct syscall_metadata *sys_data;
304 struct ring_buffer_event *event;
305 struct ring_buffer *buffer;
306 int size;
307 int syscall_nr;
309 syscall_nr = syscall_get_nr(current, regs);
310 if (syscall_nr < 0)
311 return;
312 if (!test_bit(syscall_nr, enabled_enter_syscalls))
313 return;
315 sys_data = syscall_nr_to_meta(syscall_nr);
316 if (!sys_data)
317 return;
319 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
321 event = trace_current_buffer_lock_reserve(&buffer,
322 sys_data->enter_event->event.type, size, 0, 0);
323 if (!event)
324 return;
326 entry = ring_buffer_event_data(event);
327 entry->nr = syscall_nr;
328 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
330 if (!filter_current_check_discard(buffer, sys_data->enter_event,
331 entry, event))
332 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
335 void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
337 struct syscall_trace_exit *entry;
338 struct syscall_metadata *sys_data;
339 struct ring_buffer_event *event;
340 struct ring_buffer *buffer;
341 int syscall_nr;
343 syscall_nr = syscall_get_nr(current, regs);
344 if (syscall_nr < 0)
345 return;
346 if (!test_bit(syscall_nr, enabled_exit_syscalls))
347 return;
349 sys_data = syscall_nr_to_meta(syscall_nr);
350 if (!sys_data)
351 return;
353 event = trace_current_buffer_lock_reserve(&buffer,
354 sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
355 if (!event)
356 return;
358 entry = ring_buffer_event_data(event);
359 entry->nr = syscall_nr;
360 entry->ret = syscall_get_return_value(current, regs);
362 if (!filter_current_check_discard(buffer, sys_data->exit_event,
363 entry, event))
364 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
367 int reg_event_syscall_enter(struct ftrace_event_call *call)
369 int ret = 0;
370 int num;
372 num = ((struct syscall_metadata *)call->data)->syscall_nr;
373 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
374 return -ENOSYS;
375 mutex_lock(&syscall_trace_lock);
376 if (!sys_refcount_enter)
377 ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
378 if (!ret) {
379 set_bit(num, enabled_enter_syscalls);
380 sys_refcount_enter++;
382 mutex_unlock(&syscall_trace_lock);
383 return ret;
386 void unreg_event_syscall_enter(struct ftrace_event_call *call)
388 int num;
390 num = ((struct syscall_metadata *)call->data)->syscall_nr;
391 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
392 return;
393 mutex_lock(&syscall_trace_lock);
394 sys_refcount_enter--;
395 clear_bit(num, enabled_enter_syscalls);
396 if (!sys_refcount_enter)
397 unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
398 mutex_unlock(&syscall_trace_lock);
401 int reg_event_syscall_exit(struct ftrace_event_call *call)
403 int ret = 0;
404 int num;
406 num = ((struct syscall_metadata *)call->data)->syscall_nr;
407 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
408 return -ENOSYS;
409 mutex_lock(&syscall_trace_lock);
410 if (!sys_refcount_exit)
411 ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
412 if (!ret) {
413 set_bit(num, enabled_exit_syscalls);
414 sys_refcount_exit++;
416 mutex_unlock(&syscall_trace_lock);
417 return ret;
420 void unreg_event_syscall_exit(struct ftrace_event_call *call)
422 int num;
424 num = ((struct syscall_metadata *)call->data)->syscall_nr;
425 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
426 return;
427 mutex_lock(&syscall_trace_lock);
428 sys_refcount_exit--;
429 clear_bit(num, enabled_exit_syscalls);
430 if (!sys_refcount_exit)
431 unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
432 mutex_unlock(&syscall_trace_lock);
435 int init_syscall_trace(struct ftrace_event_call *call)
437 int id;
438 int num;
440 num = ((struct syscall_metadata *)call->data)->syscall_nr;
441 if (num < 0 || num >= NR_syscalls) {
442 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
443 ((struct syscall_metadata *)call->data)->name);
444 return -ENOSYS;
447 if (set_syscall_print_fmt(call) < 0)
448 return -ENOMEM;
450 id = trace_event_raw_init(call);
452 if (id < 0) {
453 free_syscall_print_fmt(call);
454 return id;
457 return id;
460 unsigned long __init __weak arch_syscall_addr(int nr)
462 return (unsigned long)sys_call_table[nr];
465 int __init init_ftrace_syscalls(void)
467 struct syscall_metadata *meta;
468 unsigned long addr;
469 int i;
471 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
472 NR_syscalls, GFP_KERNEL);
473 if (!syscalls_metadata) {
474 WARN_ON(1);
475 return -ENOMEM;
478 for (i = 0; i < NR_syscalls; i++) {
479 addr = arch_syscall_addr(i);
480 meta = find_syscall_meta(addr);
481 if (!meta)
482 continue;
484 meta->syscall_nr = i;
485 syscalls_metadata[i] = meta;
488 return 0;
490 core_initcall(init_ftrace_syscalls);
492 #ifdef CONFIG_PERF_EVENTS
494 static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
495 static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
496 static int sys_perf_refcount_enter;
497 static int sys_perf_refcount_exit;
499 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
501 struct syscall_metadata *sys_data;
502 struct syscall_trace_enter *rec;
503 struct hlist_head *head;
504 int syscall_nr;
505 int rctx;
506 int size;
508 syscall_nr = syscall_get_nr(current, regs);
509 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
510 return;
512 sys_data = syscall_nr_to_meta(syscall_nr);
513 if (!sys_data)
514 return;
516 /* get the size after alignment with the u32 buffer size field */
517 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
518 size = ALIGN(size + sizeof(u32), sizeof(u64));
519 size -= sizeof(u32);
521 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
522 "perf buffer not large enough"))
523 return;
525 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
526 sys_data->enter_event->event.type, regs, &rctx);
527 if (!rec)
528 return;
530 rec->nr = syscall_nr;
531 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
532 (unsigned long *)&rec->args);
534 head = this_cpu_ptr(sys_data->enter_event->perf_events);
535 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
538 int perf_sysenter_enable(struct ftrace_event_call *call)
540 int ret = 0;
541 int num;
543 num = ((struct syscall_metadata *)call->data)->syscall_nr;
545 mutex_lock(&syscall_trace_lock);
546 if (!sys_perf_refcount_enter)
547 ret = register_trace_sys_enter(perf_syscall_enter, NULL);
548 if (ret) {
549 pr_info("event trace: Could not activate"
550 "syscall entry trace point");
551 } else {
552 set_bit(num, enabled_perf_enter_syscalls);
553 sys_perf_refcount_enter++;
555 mutex_unlock(&syscall_trace_lock);
556 return ret;
559 void perf_sysenter_disable(struct ftrace_event_call *call)
561 int num;
563 num = ((struct syscall_metadata *)call->data)->syscall_nr;
565 mutex_lock(&syscall_trace_lock);
566 sys_perf_refcount_enter--;
567 clear_bit(num, enabled_perf_enter_syscalls);
568 if (!sys_perf_refcount_enter)
569 unregister_trace_sys_enter(perf_syscall_enter, NULL);
570 mutex_unlock(&syscall_trace_lock);
573 static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
575 struct syscall_metadata *sys_data;
576 struct syscall_trace_exit *rec;
577 struct hlist_head *head;
578 int syscall_nr;
579 int rctx;
580 int size;
582 syscall_nr = syscall_get_nr(current, regs);
583 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
584 return;
586 sys_data = syscall_nr_to_meta(syscall_nr);
587 if (!sys_data)
588 return;
590 /* We can probably do that at build time */
591 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
592 size -= sizeof(u32);
595 * Impossible, but be paranoid with the future
596 * How to put this check outside runtime?
598 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
599 "exit event has grown above perf buffer size"))
600 return;
602 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
603 sys_data->exit_event->event.type, regs, &rctx);
604 if (!rec)
605 return;
607 rec->nr = syscall_nr;
608 rec->ret = syscall_get_return_value(current, regs);
610 head = this_cpu_ptr(sys_data->exit_event->perf_events);
611 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
614 int perf_sysexit_enable(struct ftrace_event_call *call)
616 int ret = 0;
617 int num;
619 num = ((struct syscall_metadata *)call->data)->syscall_nr;
621 mutex_lock(&syscall_trace_lock);
622 if (!sys_perf_refcount_exit)
623 ret = register_trace_sys_exit(perf_syscall_exit, NULL);
624 if (ret) {
625 pr_info("event trace: Could not activate"
626 "syscall exit trace point");
627 } else {
628 set_bit(num, enabled_perf_exit_syscalls);
629 sys_perf_refcount_exit++;
631 mutex_unlock(&syscall_trace_lock);
632 return ret;
635 void perf_sysexit_disable(struct ftrace_event_call *call)
637 int num;
639 num = ((struct syscall_metadata *)call->data)->syscall_nr;
641 mutex_lock(&syscall_trace_lock);
642 sys_perf_refcount_exit--;
643 clear_bit(num, enabled_perf_exit_syscalls);
644 if (!sys_perf_refcount_exit)
645 unregister_trace_sys_exit(perf_syscall_exit, NULL);
646 mutex_unlock(&syscall_trace_lock);
649 #endif /* CONFIG_PERF_EVENTS */
651 static int syscall_enter_register(struct ftrace_event_call *event,
652 enum trace_reg type)
654 switch (type) {
655 case TRACE_REG_REGISTER:
656 return reg_event_syscall_enter(event);
657 case TRACE_REG_UNREGISTER:
658 unreg_event_syscall_enter(event);
659 return 0;
661 #ifdef CONFIG_PERF_EVENTS
662 case TRACE_REG_PERF_REGISTER:
663 return perf_sysenter_enable(event);
664 case TRACE_REG_PERF_UNREGISTER:
665 perf_sysenter_disable(event);
666 return 0;
667 #endif
669 return 0;
672 static int syscall_exit_register(struct ftrace_event_call *event,
673 enum trace_reg type)
675 switch (type) {
676 case TRACE_REG_REGISTER:
677 return reg_event_syscall_exit(event);
678 case TRACE_REG_UNREGISTER:
679 unreg_event_syscall_exit(event);
680 return 0;
682 #ifdef CONFIG_PERF_EVENTS
683 case TRACE_REG_PERF_REGISTER:
684 return perf_sysexit_enable(event);
685 case TRACE_REG_PERF_UNREGISTER:
686 perf_sysexit_disable(event);
687 return 0;
688 #endif
690 return 0;