1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/slab.h>
4 #include <linux/kernel.h>
5 #include <linux/ftrace.h>
6 #include <linux/perf_event.h>
7 #include <asm/syscall.h>
9 #include "trace_output.h"
12 static DEFINE_MUTEX(syscall_trace_lock
);
13 static int sys_refcount_enter
;
14 static int sys_refcount_exit
;
15 static DECLARE_BITMAP(enabled_enter_syscalls
, NR_syscalls
);
16 static DECLARE_BITMAP(enabled_exit_syscalls
, NR_syscalls
);
18 static int syscall_enter_register(struct ftrace_event_call
*event
,
20 static int syscall_exit_register(struct ftrace_event_call
*event
,
23 static int syscall_enter_define_fields(struct ftrace_event_call
*call
);
24 static int syscall_exit_define_fields(struct ftrace_event_call
*call
);
26 static struct list_head
*
27 syscall_get_enter_fields(struct ftrace_event_call
*call
)
29 struct syscall_metadata
*entry
= call
->data
;
31 return &entry
->enter_fields
;
34 static struct list_head
*
35 syscall_get_exit_fields(struct ftrace_event_call
*call
)
37 struct syscall_metadata
*entry
= call
->data
;
39 return &entry
->exit_fields
;
42 struct trace_event_functions enter_syscall_print_funcs
= {
43 .trace
= print_syscall_enter
,
46 struct trace_event_functions exit_syscall_print_funcs
= {
47 .trace
= print_syscall_exit
,
50 struct ftrace_event_class event_class_syscall_enter
= {
52 .reg
= syscall_enter_register
,
53 .define_fields
= syscall_enter_define_fields
,
54 .get_fields
= syscall_get_enter_fields
,
55 .raw_init
= init_syscall_trace
,
58 struct ftrace_event_class event_class_syscall_exit
= {
60 .reg
= syscall_exit_register
,
61 .define_fields
= syscall_exit_define_fields
,
62 .get_fields
= syscall_get_exit_fields
,
63 .raw_init
= init_syscall_trace
,
66 extern unsigned long __start_syscalls_metadata
[];
67 extern unsigned long __stop_syscalls_metadata
[];
69 static struct syscall_metadata
**syscalls_metadata
;
71 static struct syscall_metadata
*find_syscall_meta(unsigned long syscall
)
73 struct syscall_metadata
*start
;
74 struct syscall_metadata
*stop
;
75 char str
[KSYM_SYMBOL_LEN
];
78 start
= (struct syscall_metadata
*)__start_syscalls_metadata
;
79 stop
= (struct syscall_metadata
*)__stop_syscalls_metadata
;
80 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
82 for ( ; start
< stop
; start
++) {
84 * Only compare after the "sys" prefix. Archs that use
85 * syscall wrappers may have syscalls symbols aliases prefixed
86 * with "SyS" instead of "sys", leading to an unwanted
89 if (start
->name
&& !strcmp(start
->name
+ 3, str
+ 3))
95 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
97 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
100 return syscalls_metadata
[nr
];
104 print_syscall_enter(struct trace_iterator
*iter
, int flags
,
105 struct trace_event
*event
)
107 struct trace_seq
*s
= &iter
->seq
;
108 struct trace_entry
*ent
= iter
->ent
;
109 struct syscall_trace_enter
*trace
;
110 struct syscall_metadata
*entry
;
113 trace
= (typeof(trace
))ent
;
115 entry
= syscall_nr_to_meta(syscall
);
120 if (entry
->enter_event
->event
.type
!= ent
->type
) {
125 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
127 return TRACE_TYPE_PARTIAL_LINE
;
129 for (i
= 0; i
< entry
->nb_args
; i
++) {
130 /* parameter types */
131 if (trace_flags
& TRACE_ITER_VERBOSE
) {
132 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
134 return TRACE_TYPE_PARTIAL_LINE
;
136 /* parameter values */
137 ret
= trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
139 i
== entry
->nb_args
- 1 ? "" : ", ");
141 return TRACE_TYPE_PARTIAL_LINE
;
144 ret
= trace_seq_putc(s
, ')');
146 return TRACE_TYPE_PARTIAL_LINE
;
149 ret
= trace_seq_putc(s
, '\n');
151 return TRACE_TYPE_PARTIAL_LINE
;
153 return TRACE_TYPE_HANDLED
;
157 print_syscall_exit(struct trace_iterator
*iter
, int flags
,
158 struct trace_event
*event
)
160 struct trace_seq
*s
= &iter
->seq
;
161 struct trace_entry
*ent
= iter
->ent
;
162 struct syscall_trace_exit
*trace
;
164 struct syscall_metadata
*entry
;
167 trace
= (typeof(trace
))ent
;
169 entry
= syscall_nr_to_meta(syscall
);
172 trace_seq_printf(s
, "\n");
173 return TRACE_TYPE_HANDLED
;
176 if (entry
->exit_event
->event
.type
!= ent
->type
) {
178 return TRACE_TYPE_UNHANDLED
;
181 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
184 return TRACE_TYPE_PARTIAL_LINE
;
186 return TRACE_TYPE_HANDLED
;
189 extern char *__bad_type_size(void);
191 #define SYSCALL_FIELD(type, name) \
192 sizeof(type) != sizeof(trace.name) ? \
193 __bad_type_size() : \
194 #type, #name, offsetof(typeof(trace), name), \
195 sizeof(trace.name), is_signed_type(type)
198 int __set_enter_print_fmt(struct syscall_metadata
*entry
, char *buf
, int len
)
203 /* When len=0, we just calculate the needed length */
204 #define LEN_OR_ZERO (len ? len - pos : 0)
206 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
207 for (i
= 0; i
< entry
->nb_args
; i
++) {
208 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s: 0x%%0%zulx%s",
209 entry
->args
[i
], sizeof(unsigned long),
210 i
== entry
->nb_args
- 1 ? "" : ", ");
212 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
214 for (i
= 0; i
< entry
->nb_args
; i
++) {
215 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
216 ", ((unsigned long)(REC->%s))", entry
->args
[i
]);
221 /* return the length of print_fmt */
225 static int set_syscall_print_fmt(struct ftrace_event_call
*call
)
229 struct syscall_metadata
*entry
= call
->data
;
231 if (entry
->enter_event
!= call
) {
232 call
->print_fmt
= "\"0x%lx\", REC->ret";
236 /* First: called with 0 length to calculate the needed length */
237 len
= __set_enter_print_fmt(entry
, NULL
, 0);
239 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
243 /* Second: actually write the @print_fmt */
244 __set_enter_print_fmt(entry
, print_fmt
, len
+ 1);
245 call
->print_fmt
= print_fmt
;
250 static void free_syscall_print_fmt(struct ftrace_event_call
*call
)
252 struct syscall_metadata
*entry
= call
->data
;
254 if (entry
->enter_event
== call
)
255 kfree(call
->print_fmt
);
258 static int syscall_enter_define_fields(struct ftrace_event_call
*call
)
260 struct syscall_trace_enter trace
;
261 struct syscall_metadata
*meta
= call
->data
;
264 int offset
= offsetof(typeof(trace
), args
);
266 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
270 for (i
= 0; i
< meta
->nb_args
; i
++) {
271 ret
= trace_define_field(call
, meta
->types
[i
],
272 meta
->args
[i
], offset
,
273 sizeof(unsigned long), 0,
275 offset
+= sizeof(unsigned long);
281 static int syscall_exit_define_fields(struct ftrace_event_call
*call
)
283 struct syscall_trace_exit trace
;
286 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
290 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
),
296 void ftrace_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
298 struct syscall_trace_enter
*entry
;
299 struct syscall_metadata
*sys_data
;
300 struct ring_buffer_event
*event
;
301 struct ring_buffer
*buffer
;
305 syscall_nr
= syscall_get_nr(current
, regs
);
308 if (!test_bit(syscall_nr
, enabled_enter_syscalls
))
311 sys_data
= syscall_nr_to_meta(syscall_nr
);
315 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
317 event
= trace_current_buffer_lock_reserve(&buffer
,
318 sys_data
->enter_event
->event
.type
, size
, 0, 0);
322 entry
= ring_buffer_event_data(event
);
323 entry
->nr
= syscall_nr
;
324 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
326 if (!filter_current_check_discard(buffer
, sys_data
->enter_event
,
328 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
331 void ftrace_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
333 struct syscall_trace_exit
*entry
;
334 struct syscall_metadata
*sys_data
;
335 struct ring_buffer_event
*event
;
336 struct ring_buffer
*buffer
;
339 syscall_nr
= syscall_get_nr(current
, regs
);
342 if (!test_bit(syscall_nr
, enabled_exit_syscalls
))
345 sys_data
= syscall_nr_to_meta(syscall_nr
);
349 event
= trace_current_buffer_lock_reserve(&buffer
,
350 sys_data
->exit_event
->event
.type
, sizeof(*entry
), 0, 0);
354 entry
= ring_buffer_event_data(event
);
355 entry
->nr
= syscall_nr
;
356 entry
->ret
= syscall_get_return_value(current
, regs
);
358 if (!filter_current_check_discard(buffer
, sys_data
->exit_event
,
360 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
363 int reg_event_syscall_enter(struct ftrace_event_call
*call
)
368 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
369 if (num
< 0 || num
>= NR_syscalls
)
371 mutex_lock(&syscall_trace_lock
);
372 if (!sys_refcount_enter
)
373 ret
= register_trace_sys_enter(ftrace_syscall_enter
, NULL
);
375 set_bit(num
, enabled_enter_syscalls
);
376 sys_refcount_enter
++;
378 mutex_unlock(&syscall_trace_lock
);
382 void unreg_event_syscall_enter(struct ftrace_event_call
*call
)
386 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
387 if (num
< 0 || num
>= NR_syscalls
)
389 mutex_lock(&syscall_trace_lock
);
390 sys_refcount_enter
--;
391 clear_bit(num
, enabled_enter_syscalls
);
392 if (!sys_refcount_enter
)
393 unregister_trace_sys_enter(ftrace_syscall_enter
, NULL
);
394 mutex_unlock(&syscall_trace_lock
);
397 int reg_event_syscall_exit(struct ftrace_event_call
*call
)
402 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
403 if (num
< 0 || num
>= NR_syscalls
)
405 mutex_lock(&syscall_trace_lock
);
406 if (!sys_refcount_exit
)
407 ret
= register_trace_sys_exit(ftrace_syscall_exit
, NULL
);
409 set_bit(num
, enabled_exit_syscalls
);
412 mutex_unlock(&syscall_trace_lock
);
416 void unreg_event_syscall_exit(struct ftrace_event_call
*call
)
420 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
421 if (num
< 0 || num
>= NR_syscalls
)
423 mutex_lock(&syscall_trace_lock
);
425 clear_bit(num
, enabled_exit_syscalls
);
426 if (!sys_refcount_exit
)
427 unregister_trace_sys_exit(ftrace_syscall_exit
, NULL
);
428 mutex_unlock(&syscall_trace_lock
);
431 int init_syscall_trace(struct ftrace_event_call
*call
)
435 if (set_syscall_print_fmt(call
) < 0)
438 id
= trace_event_raw_init(call
);
441 free_syscall_print_fmt(call
);
448 unsigned long __init
arch_syscall_addr(int nr
)
450 return (unsigned long)sys_call_table
[nr
];
453 int __init
init_ftrace_syscalls(void)
455 struct syscall_metadata
*meta
;
459 syscalls_metadata
= kzalloc(sizeof(*syscalls_metadata
) *
460 NR_syscalls
, GFP_KERNEL
);
461 if (!syscalls_metadata
) {
466 for (i
= 0; i
< NR_syscalls
; i
++) {
467 addr
= arch_syscall_addr(i
);
468 meta
= find_syscall_meta(addr
);
472 meta
->syscall_nr
= i
;
473 syscalls_metadata
[i
] = meta
;
478 core_initcall(init_ftrace_syscalls
);
480 #ifdef CONFIG_PERF_EVENTS
482 static DECLARE_BITMAP(enabled_perf_enter_syscalls
, NR_syscalls
);
483 static DECLARE_BITMAP(enabled_perf_exit_syscalls
, NR_syscalls
);
484 static int sys_perf_refcount_enter
;
485 static int sys_perf_refcount_exit
;
487 static void perf_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
489 struct syscall_metadata
*sys_data
;
490 struct syscall_trace_enter
*rec
;
491 struct hlist_head
*head
;
496 syscall_nr
= syscall_get_nr(current
, regs
);
497 if (!test_bit(syscall_nr
, enabled_perf_enter_syscalls
))
500 sys_data
= syscall_nr_to_meta(syscall_nr
);
504 /* get the size after alignment with the u32 buffer size field */
505 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
506 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
509 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
510 "perf buffer not large enough"))
513 rec
= (struct syscall_trace_enter
*)perf_trace_buf_prepare(size
,
514 sys_data
->enter_event
->event
.type
, regs
, &rctx
);
518 rec
->nr
= syscall_nr
;
519 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
520 (unsigned long *)&rec
->args
);
522 head
= this_cpu_ptr(sys_data
->enter_event
->perf_events
);
523 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
);
526 int perf_sysenter_enable(struct ftrace_event_call
*call
)
531 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
533 mutex_lock(&syscall_trace_lock
);
534 if (!sys_perf_refcount_enter
)
535 ret
= register_trace_sys_enter(perf_syscall_enter
, NULL
);
537 pr_info("event trace: Could not activate"
538 "syscall entry trace point");
540 set_bit(num
, enabled_perf_enter_syscalls
);
541 sys_perf_refcount_enter
++;
543 mutex_unlock(&syscall_trace_lock
);
547 void perf_sysenter_disable(struct ftrace_event_call
*call
)
551 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
553 mutex_lock(&syscall_trace_lock
);
554 sys_perf_refcount_enter
--;
555 clear_bit(num
, enabled_perf_enter_syscalls
);
556 if (!sys_perf_refcount_enter
)
557 unregister_trace_sys_enter(perf_syscall_enter
, NULL
);
558 mutex_unlock(&syscall_trace_lock
);
561 static void perf_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
563 struct syscall_metadata
*sys_data
;
564 struct syscall_trace_exit
*rec
;
565 struct hlist_head
*head
;
570 syscall_nr
= syscall_get_nr(current
, regs
);
571 if (!test_bit(syscall_nr
, enabled_perf_exit_syscalls
))
574 sys_data
= syscall_nr_to_meta(syscall_nr
);
578 /* We can probably do that at build time */
579 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
583 * Impossible, but be paranoid with the future
584 * How to put this check outside runtime?
586 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
587 "exit event has grown above perf buffer size"))
590 rec
= (struct syscall_trace_exit
*)perf_trace_buf_prepare(size
,
591 sys_data
->exit_event
->event
.type
, regs
, &rctx
);
595 rec
->nr
= syscall_nr
;
596 rec
->ret
= syscall_get_return_value(current
, regs
);
598 head
= this_cpu_ptr(sys_data
->exit_event
->perf_events
);
599 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
);
602 int perf_sysexit_enable(struct ftrace_event_call
*call
)
607 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
609 mutex_lock(&syscall_trace_lock
);
610 if (!sys_perf_refcount_exit
)
611 ret
= register_trace_sys_exit(perf_syscall_exit
, NULL
);
613 pr_info("event trace: Could not activate"
614 "syscall exit trace point");
616 set_bit(num
, enabled_perf_exit_syscalls
);
617 sys_perf_refcount_exit
++;
619 mutex_unlock(&syscall_trace_lock
);
623 void perf_sysexit_disable(struct ftrace_event_call
*call
)
627 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
629 mutex_lock(&syscall_trace_lock
);
630 sys_perf_refcount_exit
--;
631 clear_bit(num
, enabled_perf_exit_syscalls
);
632 if (!sys_perf_refcount_exit
)
633 unregister_trace_sys_exit(perf_syscall_exit
, NULL
);
634 mutex_unlock(&syscall_trace_lock
);
637 #endif /* CONFIG_PERF_EVENTS */
639 static int syscall_enter_register(struct ftrace_event_call
*event
,
643 case TRACE_REG_REGISTER
:
644 return reg_event_syscall_enter(event
);
645 case TRACE_REG_UNREGISTER
:
646 unreg_event_syscall_enter(event
);
649 #ifdef CONFIG_PERF_EVENTS
650 case TRACE_REG_PERF_REGISTER
:
651 return perf_sysenter_enable(event
);
652 case TRACE_REG_PERF_UNREGISTER
:
653 perf_sysenter_disable(event
);
660 static int syscall_exit_register(struct ftrace_event_call
*event
,
664 case TRACE_REG_REGISTER
:
665 return reg_event_syscall_exit(event
);
666 case TRACE_REG_UNREGISTER
:
667 unreg_event_syscall_exit(event
);
670 #ifdef CONFIG_PERF_EVENTS
671 case TRACE_REG_PERF_REGISTER
:
672 return perf_sysexit_enable(event
);
673 case TRACE_REG_PERF_UNREGISTER
:
674 perf_sysexit_disable(event
);