1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/syscalls.h>
4 #include <linux/slab.h>
5 #include <linux/kernel.h>
6 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
7 #include <linux/ftrace.h>
8 #include <linux/perf_event.h>
9 #include <asm/syscall.h>
11 #include "trace_output.h"
14 static DEFINE_MUTEX(syscall_trace_lock
);
16 static int syscall_enter_register(struct ftrace_event_call
*event
,
17 enum trace_reg type
, void *data
);
18 static int syscall_exit_register(struct ftrace_event_call
*event
,
19 enum trace_reg type
, void *data
);
21 static struct list_head
*
22 syscall_get_enter_fields(struct ftrace_event_call
*call
)
24 struct syscall_metadata
*entry
= call
->data
;
26 return &entry
->enter_fields
;
29 extern struct syscall_metadata
*__start_syscalls_metadata
[];
30 extern struct syscall_metadata
*__stop_syscalls_metadata
[];
32 static struct syscall_metadata
**syscalls_metadata
;
34 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
35 static inline bool arch_syscall_match_sym_name(const char *sym
, const char *name
)
38 * Only compare after the "sys" prefix. Archs that use
39 * syscall wrappers may have syscalls symbols aliases prefixed
40 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
43 return !strcmp(sym
+ 3, name
+ 3);
47 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
49 * Some architectures that allow for 32bit applications
50 * to run on a 64bit kernel, do not map the syscalls for
51 * the 32bit tasks the same as they do for 64bit tasks.
55 * In such a case, instead of reporting the wrong syscalls,
58 * For an arch to ignore the compat syscalls it needs to
59 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
60 * define the function arch_trace_is_compat_syscall() to let
61 * the tracing system know that it should ignore it.
64 trace_get_syscall_nr(struct task_struct
*task
, struct pt_regs
*regs
)
66 if (unlikely(arch_trace_is_compat_syscall(regs
)))
69 return syscall_get_nr(task
, regs
);
73 trace_get_syscall_nr(struct task_struct
*task
, struct pt_regs
*regs
)
75 return syscall_get_nr(task
, regs
);
77 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
79 static __init
struct syscall_metadata
*
80 find_syscall_meta(unsigned long syscall
)
82 struct syscall_metadata
**start
;
83 struct syscall_metadata
**stop
;
84 char str
[KSYM_SYMBOL_LEN
];
87 start
= __start_syscalls_metadata
;
88 stop
= __stop_syscalls_metadata
;
89 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
91 if (arch_syscall_match_sym_name(str
, "sys_ni_syscall"))
94 for ( ; start
< stop
; start
++) {
95 if ((*start
)->name
&& arch_syscall_match_sym_name(str
, (*start
)->name
))
101 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
103 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
106 return syscalls_metadata
[nr
];
109 static enum print_line_t
110 print_syscall_enter(struct trace_iterator
*iter
, int flags
,
111 struct trace_event
*event
)
113 struct trace_seq
*s
= &iter
->seq
;
114 struct trace_entry
*ent
= iter
->ent
;
115 struct syscall_trace_enter
*trace
;
116 struct syscall_metadata
*entry
;
119 trace
= (typeof(trace
))ent
;
121 entry
= syscall_nr_to_meta(syscall
);
126 if (entry
->enter_event
->event
.type
!= ent
->type
) {
131 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
133 return TRACE_TYPE_PARTIAL_LINE
;
135 for (i
= 0; i
< entry
->nb_args
; i
++) {
136 /* parameter types */
137 if (trace_flags
& TRACE_ITER_VERBOSE
) {
138 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
140 return TRACE_TYPE_PARTIAL_LINE
;
142 /* parameter values */
143 ret
= trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
145 i
== entry
->nb_args
- 1 ? "" : ", ");
147 return TRACE_TYPE_PARTIAL_LINE
;
150 ret
= trace_seq_putc(s
, ')');
152 return TRACE_TYPE_PARTIAL_LINE
;
155 ret
= trace_seq_putc(s
, '\n');
157 return TRACE_TYPE_PARTIAL_LINE
;
159 return TRACE_TYPE_HANDLED
;
162 static enum print_line_t
163 print_syscall_exit(struct trace_iterator
*iter
, int flags
,
164 struct trace_event
*event
)
166 struct trace_seq
*s
= &iter
->seq
;
167 struct trace_entry
*ent
= iter
->ent
;
168 struct syscall_trace_exit
*trace
;
170 struct syscall_metadata
*entry
;
173 trace
= (typeof(trace
))ent
;
175 entry
= syscall_nr_to_meta(syscall
);
178 trace_seq_putc(s
, '\n');
179 return TRACE_TYPE_HANDLED
;
182 if (entry
->exit_event
->event
.type
!= ent
->type
) {
184 return TRACE_TYPE_UNHANDLED
;
187 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
190 return TRACE_TYPE_PARTIAL_LINE
;
192 return TRACE_TYPE_HANDLED
;
195 extern char *__bad_type_size(void);
197 #define SYSCALL_FIELD(type, name) \
198 sizeof(type) != sizeof(trace.name) ? \
199 __bad_type_size() : \
200 #type, #name, offsetof(typeof(trace), name), \
201 sizeof(trace.name), is_signed_type(type)
204 int __set_enter_print_fmt(struct syscall_metadata
*entry
, char *buf
, int len
)
209 /* When len=0, we just calculate the needed length */
210 #define LEN_OR_ZERO (len ? len - pos : 0)
212 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
213 for (i
= 0; i
< entry
->nb_args
; i
++) {
214 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s: 0x%%0%zulx%s",
215 entry
->args
[i
], sizeof(unsigned long),
216 i
== entry
->nb_args
- 1 ? "" : ", ");
218 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
220 for (i
= 0; i
< entry
->nb_args
; i
++) {
221 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
222 ", ((unsigned long)(REC->%s))", entry
->args
[i
]);
227 /* return the length of print_fmt */
231 static int set_syscall_print_fmt(struct ftrace_event_call
*call
)
235 struct syscall_metadata
*entry
= call
->data
;
237 if (entry
->enter_event
!= call
) {
238 call
->print_fmt
= "\"0x%lx\", REC->ret";
242 /* First: called with 0 length to calculate the needed length */
243 len
= __set_enter_print_fmt(entry
, NULL
, 0);
245 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
249 /* Second: actually write the @print_fmt */
250 __set_enter_print_fmt(entry
, print_fmt
, len
+ 1);
251 call
->print_fmt
= print_fmt
;
256 static void free_syscall_print_fmt(struct ftrace_event_call
*call
)
258 struct syscall_metadata
*entry
= call
->data
;
260 if (entry
->enter_event
== call
)
261 kfree(call
->print_fmt
);
264 static int __init
syscall_enter_define_fields(struct ftrace_event_call
*call
)
266 struct syscall_trace_enter trace
;
267 struct syscall_metadata
*meta
= call
->data
;
270 int offset
= offsetof(typeof(trace
), args
);
272 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
276 for (i
= 0; i
< meta
->nb_args
; i
++) {
277 ret
= trace_define_field(call
, meta
->types
[i
],
278 meta
->args
[i
], offset
,
279 sizeof(unsigned long), 0,
281 offset
+= sizeof(unsigned long);
287 static int __init
syscall_exit_define_fields(struct ftrace_event_call
*call
)
289 struct syscall_trace_exit trace
;
292 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
296 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
),
302 static void ftrace_syscall_enter(void *data
, struct pt_regs
*regs
, long id
)
304 struct trace_array
*tr
= data
;
305 struct syscall_trace_enter
*entry
;
306 struct syscall_metadata
*sys_data
;
307 struct ring_buffer_event
*event
;
308 struct ring_buffer
*buffer
;
309 unsigned long irq_flags
;
314 syscall_nr
= trace_get_syscall_nr(current
, regs
);
317 if (!test_bit(syscall_nr
, tr
->enabled_enter_syscalls
))
320 sys_data
= syscall_nr_to_meta(syscall_nr
);
324 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
326 local_save_flags(irq_flags
);
327 pc
= preempt_count();
329 buffer
= tr
->trace_buffer
.buffer
;
330 event
= trace_buffer_lock_reserve(buffer
,
331 sys_data
->enter_event
->event
.type
, size
, irq_flags
, pc
);
335 entry
= ring_buffer_event_data(event
);
336 entry
->nr
= syscall_nr
;
337 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
339 if (!filter_current_check_discard(buffer
, sys_data
->enter_event
,
341 trace_current_buffer_unlock_commit(buffer
, event
,
345 static void ftrace_syscall_exit(void *data
, struct pt_regs
*regs
, long ret
)
347 struct trace_array
*tr
= data
;
348 struct syscall_trace_exit
*entry
;
349 struct syscall_metadata
*sys_data
;
350 struct ring_buffer_event
*event
;
351 struct ring_buffer
*buffer
;
352 unsigned long irq_flags
;
356 syscall_nr
= trace_get_syscall_nr(current
, regs
);
359 if (!test_bit(syscall_nr
, tr
->enabled_exit_syscalls
))
362 sys_data
= syscall_nr_to_meta(syscall_nr
);
366 local_save_flags(irq_flags
);
367 pc
= preempt_count();
369 buffer
= tr
->trace_buffer
.buffer
;
370 event
= trace_buffer_lock_reserve(buffer
,
371 sys_data
->exit_event
->event
.type
, sizeof(*entry
),
376 entry
= ring_buffer_event_data(event
);
377 entry
->nr
= syscall_nr
;
378 entry
->ret
= syscall_get_return_value(current
, regs
);
380 if (!filter_current_check_discard(buffer
, sys_data
->exit_event
,
382 trace_current_buffer_unlock_commit(buffer
, event
,
386 static int reg_event_syscall_enter(struct ftrace_event_file
*file
,
387 struct ftrace_event_call
*call
)
389 struct trace_array
*tr
= file
->tr
;
393 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
394 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
396 mutex_lock(&syscall_trace_lock
);
397 if (!tr
->sys_refcount_enter
)
398 ret
= register_trace_sys_enter(ftrace_syscall_enter
, tr
);
400 set_bit(num
, tr
->enabled_enter_syscalls
);
401 tr
->sys_refcount_enter
++;
403 mutex_unlock(&syscall_trace_lock
);
407 static void unreg_event_syscall_enter(struct ftrace_event_file
*file
,
408 struct ftrace_event_call
*call
)
410 struct trace_array
*tr
= file
->tr
;
413 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
414 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
416 mutex_lock(&syscall_trace_lock
);
417 tr
->sys_refcount_enter
--;
418 clear_bit(num
, tr
->enabled_enter_syscalls
);
419 if (!tr
->sys_refcount_enter
)
420 unregister_trace_sys_enter(ftrace_syscall_enter
, tr
);
421 mutex_unlock(&syscall_trace_lock
);
424 static int reg_event_syscall_exit(struct ftrace_event_file
*file
,
425 struct ftrace_event_call
*call
)
427 struct trace_array
*tr
= file
->tr
;
431 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
432 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
434 mutex_lock(&syscall_trace_lock
);
435 if (!tr
->sys_refcount_exit
)
436 ret
= register_trace_sys_exit(ftrace_syscall_exit
, tr
);
438 set_bit(num
, tr
->enabled_exit_syscalls
);
439 tr
->sys_refcount_exit
++;
441 mutex_unlock(&syscall_trace_lock
);
445 static void unreg_event_syscall_exit(struct ftrace_event_file
*file
,
446 struct ftrace_event_call
*call
)
448 struct trace_array
*tr
= file
->tr
;
451 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
452 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
454 mutex_lock(&syscall_trace_lock
);
455 tr
->sys_refcount_exit
--;
456 clear_bit(num
, tr
->enabled_exit_syscalls
);
457 if (!tr
->sys_refcount_exit
)
458 unregister_trace_sys_exit(ftrace_syscall_exit
, tr
);
459 mutex_unlock(&syscall_trace_lock
);
462 static int init_syscall_trace(struct ftrace_event_call
*call
)
467 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
468 if (num
< 0 || num
>= NR_syscalls
) {
469 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
470 ((struct syscall_metadata
*)call
->data
)->name
);
474 if (set_syscall_print_fmt(call
) < 0)
477 id
= trace_event_raw_init(call
);
480 free_syscall_print_fmt(call
);
487 struct trace_event_functions enter_syscall_print_funcs
= {
488 .trace
= print_syscall_enter
,
491 struct trace_event_functions exit_syscall_print_funcs
= {
492 .trace
= print_syscall_exit
,
495 struct ftrace_event_class __refdata event_class_syscall_enter
= {
496 .system
= "syscalls",
497 .reg
= syscall_enter_register
,
498 .define_fields
= syscall_enter_define_fields
,
499 .get_fields
= syscall_get_enter_fields
,
500 .raw_init
= init_syscall_trace
,
503 struct ftrace_event_class __refdata event_class_syscall_exit
= {
504 .system
= "syscalls",
505 .reg
= syscall_exit_register
,
506 .define_fields
= syscall_exit_define_fields
,
507 .fields
= LIST_HEAD_INIT(event_class_syscall_exit
.fields
),
508 .raw_init
= init_syscall_trace
,
511 unsigned long __init __weak
arch_syscall_addr(int nr
)
513 return (unsigned long)sys_call_table
[nr
];
516 static int __init
init_ftrace_syscalls(void)
518 struct syscall_metadata
*meta
;
522 syscalls_metadata
= kcalloc(NR_syscalls
, sizeof(*syscalls_metadata
),
524 if (!syscalls_metadata
) {
529 for (i
= 0; i
< NR_syscalls
; i
++) {
530 addr
= arch_syscall_addr(i
);
531 meta
= find_syscall_meta(addr
);
535 meta
->syscall_nr
= i
;
536 syscalls_metadata
[i
] = meta
;
541 early_initcall(init_ftrace_syscalls
);
543 #ifdef CONFIG_PERF_EVENTS
545 static DECLARE_BITMAP(enabled_perf_enter_syscalls
, NR_syscalls
);
546 static DECLARE_BITMAP(enabled_perf_exit_syscalls
, NR_syscalls
);
547 static int sys_perf_refcount_enter
;
548 static int sys_perf_refcount_exit
;
550 static void perf_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
552 struct syscall_metadata
*sys_data
;
553 struct syscall_trace_enter
*rec
;
554 struct hlist_head
*head
;
559 syscall_nr
= trace_get_syscall_nr(current
, regs
);
562 if (!test_bit(syscall_nr
, enabled_perf_enter_syscalls
))
565 sys_data
= syscall_nr_to_meta(syscall_nr
);
569 head
= this_cpu_ptr(sys_data
->enter_event
->perf_events
);
570 if (hlist_empty(head
))
573 /* get the size after alignment with the u32 buffer size field */
574 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
575 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
578 rec
= (struct syscall_trace_enter
*)perf_trace_buf_prepare(size
,
579 sys_data
->enter_event
->event
.type
, regs
, &rctx
);
583 rec
->nr
= syscall_nr
;
584 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
585 (unsigned long *)&rec
->args
);
586 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
, NULL
);
589 static int perf_sysenter_enable(struct ftrace_event_call
*call
)
594 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
596 mutex_lock(&syscall_trace_lock
);
597 if (!sys_perf_refcount_enter
)
598 ret
= register_trace_sys_enter(perf_syscall_enter
, NULL
);
600 pr_info("event trace: Could not activate"
601 "syscall entry trace point");
603 set_bit(num
, enabled_perf_enter_syscalls
);
604 sys_perf_refcount_enter
++;
606 mutex_unlock(&syscall_trace_lock
);
610 static void perf_sysenter_disable(struct ftrace_event_call
*call
)
614 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
616 mutex_lock(&syscall_trace_lock
);
617 sys_perf_refcount_enter
--;
618 clear_bit(num
, enabled_perf_enter_syscalls
);
619 if (!sys_perf_refcount_enter
)
620 unregister_trace_sys_enter(perf_syscall_enter
, NULL
);
621 mutex_unlock(&syscall_trace_lock
);
624 static void perf_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
626 struct syscall_metadata
*sys_data
;
627 struct syscall_trace_exit
*rec
;
628 struct hlist_head
*head
;
633 syscall_nr
= trace_get_syscall_nr(current
, regs
);
636 if (!test_bit(syscall_nr
, enabled_perf_exit_syscalls
))
639 sys_data
= syscall_nr_to_meta(syscall_nr
);
643 head
= this_cpu_ptr(sys_data
->exit_event
->perf_events
);
644 if (hlist_empty(head
))
647 /* We can probably do that at build time */
648 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
651 rec
= (struct syscall_trace_exit
*)perf_trace_buf_prepare(size
,
652 sys_data
->exit_event
->event
.type
, regs
, &rctx
);
656 rec
->nr
= syscall_nr
;
657 rec
->ret
= syscall_get_return_value(current
, regs
);
658 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
, NULL
);
661 static int perf_sysexit_enable(struct ftrace_event_call
*call
)
666 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
668 mutex_lock(&syscall_trace_lock
);
669 if (!sys_perf_refcount_exit
)
670 ret
= register_trace_sys_exit(perf_syscall_exit
, NULL
);
672 pr_info("event trace: Could not activate"
673 "syscall exit trace point");
675 set_bit(num
, enabled_perf_exit_syscalls
);
676 sys_perf_refcount_exit
++;
678 mutex_unlock(&syscall_trace_lock
);
682 static void perf_sysexit_disable(struct ftrace_event_call
*call
)
686 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
688 mutex_lock(&syscall_trace_lock
);
689 sys_perf_refcount_exit
--;
690 clear_bit(num
, enabled_perf_exit_syscalls
);
691 if (!sys_perf_refcount_exit
)
692 unregister_trace_sys_exit(perf_syscall_exit
, NULL
);
693 mutex_unlock(&syscall_trace_lock
);
696 #endif /* CONFIG_PERF_EVENTS */
698 static int syscall_enter_register(struct ftrace_event_call
*event
,
699 enum trace_reg type
, void *data
)
701 struct ftrace_event_file
*file
= data
;
704 case TRACE_REG_REGISTER
:
705 return reg_event_syscall_enter(file
, event
);
706 case TRACE_REG_UNREGISTER
:
707 unreg_event_syscall_enter(file
, event
);
710 #ifdef CONFIG_PERF_EVENTS
711 case TRACE_REG_PERF_REGISTER
:
712 return perf_sysenter_enable(event
);
713 case TRACE_REG_PERF_UNREGISTER
:
714 perf_sysenter_disable(event
);
716 case TRACE_REG_PERF_OPEN
:
717 case TRACE_REG_PERF_CLOSE
:
718 case TRACE_REG_PERF_ADD
:
719 case TRACE_REG_PERF_DEL
:
726 static int syscall_exit_register(struct ftrace_event_call
*event
,
727 enum trace_reg type
, void *data
)
729 struct ftrace_event_file
*file
= data
;
732 case TRACE_REG_REGISTER
:
733 return reg_event_syscall_exit(file
, event
);
734 case TRACE_REG_UNREGISTER
:
735 unreg_event_syscall_exit(file
, event
);
738 #ifdef CONFIG_PERF_EVENTS
739 case TRACE_REG_PERF_REGISTER
:
740 return perf_sysexit_enable(event
);
741 case TRACE_REG_PERF_UNREGISTER
:
742 perf_sysexit_disable(event
);
744 case TRACE_REG_PERF_OPEN
:
745 case TRACE_REG_PERF_CLOSE
:
746 case TRACE_REG_PERF_ADD
:
747 case TRACE_REG_PERF_DEL
: