1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/kernel.h>
4 #include <linux/ftrace.h>
5 #include <linux/perf_event.h>
6 #include <asm/syscall.h>
8 #include "trace_output.h"
11 static DEFINE_MUTEX(syscall_trace_lock
);
12 static int sys_refcount_enter
;
13 static int sys_refcount_exit
;
14 static DECLARE_BITMAP(enabled_enter_syscalls
, NR_syscalls
);
15 static DECLARE_BITMAP(enabled_exit_syscalls
, NR_syscalls
);
18 print_syscall_enter(struct trace_iterator
*iter
, int flags
)
20 struct trace_seq
*s
= &iter
->seq
;
21 struct trace_entry
*ent
= iter
->ent
;
22 struct syscall_trace_enter
*trace
;
23 struct syscall_metadata
*entry
;
26 trace
= (typeof(trace
))ent
;
28 entry
= syscall_nr_to_meta(syscall
);
33 if (entry
->enter_id
!= ent
->type
) {
38 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
40 return TRACE_TYPE_PARTIAL_LINE
;
42 for (i
= 0; i
< entry
->nb_args
; i
++) {
44 if (trace_flags
& TRACE_ITER_VERBOSE
) {
45 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
47 return TRACE_TYPE_PARTIAL_LINE
;
49 /* parameter values */
50 ret
= trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
52 i
== entry
->nb_args
- 1 ? "" : ", ");
54 return TRACE_TYPE_PARTIAL_LINE
;
57 ret
= trace_seq_putc(s
, ')');
59 return TRACE_TYPE_PARTIAL_LINE
;
62 ret
= trace_seq_putc(s
, '\n');
64 return TRACE_TYPE_PARTIAL_LINE
;
66 return TRACE_TYPE_HANDLED
;
70 print_syscall_exit(struct trace_iterator
*iter
, int flags
)
72 struct trace_seq
*s
= &iter
->seq
;
73 struct trace_entry
*ent
= iter
->ent
;
74 struct syscall_trace_exit
*trace
;
76 struct syscall_metadata
*entry
;
79 trace
= (typeof(trace
))ent
;
81 entry
= syscall_nr_to_meta(syscall
);
84 trace_seq_printf(s
, "\n");
85 return TRACE_TYPE_HANDLED
;
88 if (entry
->exit_id
!= ent
->type
) {
90 return TRACE_TYPE_UNHANDLED
;
93 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
96 return TRACE_TYPE_PARTIAL_LINE
;
98 return TRACE_TYPE_HANDLED
;
101 extern char *__bad_type_size(void);
103 #define SYSCALL_FIELD(type, name) \
104 sizeof(type) != sizeof(trace.name) ? \
105 __bad_type_size() : \
106 #type, #name, offsetof(typeof(trace), name), sizeof(trace.name)
108 int syscall_enter_format(struct ftrace_event_call
*call
, struct trace_seq
*s
)
113 struct syscall_metadata
*entry
;
114 struct syscall_trace_enter trace
;
115 int offset
= offsetof(struct syscall_trace_enter
, args
);
117 nr
= syscall_name_to_nr(call
->data
);
118 entry
= syscall_nr_to_meta(nr
);
123 ret
= trace_seq_printf(s
, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
124 SYSCALL_FIELD(int, nr
));
128 for (i
= 0; i
< entry
->nb_args
; i
++) {
129 ret
= trace_seq_printf(s
, "\tfield:%s %s;", entry
->types
[i
],
133 ret
= trace_seq_printf(s
, "\toffset:%d;\tsize:%zu;\n", offset
,
134 sizeof(unsigned long));
137 offset
+= sizeof(unsigned long);
140 trace_seq_puts(s
, "\nprint fmt: \"");
141 for (i
= 0; i
< entry
->nb_args
; i
++) {
142 ret
= trace_seq_printf(s
, "%s: 0x%%0%zulx%s", entry
->args
[i
],
143 sizeof(unsigned long),
144 i
== entry
->nb_args
- 1 ? "" : ", ");
148 trace_seq_putc(s
, '"');
150 for (i
= 0; i
< entry
->nb_args
; i
++) {
151 ret
= trace_seq_printf(s
, ", ((unsigned long)(REC->%s))",
157 return trace_seq_putc(s
, '\n');
160 int syscall_exit_format(struct ftrace_event_call
*call
, struct trace_seq
*s
)
163 struct syscall_trace_exit trace
;
165 ret
= trace_seq_printf(s
,
166 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
167 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
168 SYSCALL_FIELD(int, nr
),
169 SYSCALL_FIELD(long, ret
));
173 return trace_seq_printf(s
, "\nprint fmt: \"0x%%lx\", REC->ret\n");
176 int syscall_enter_define_fields(struct ftrace_event_call
*call
)
178 struct syscall_trace_enter trace
;
179 struct syscall_metadata
*meta
;
183 int offset
= offsetof(typeof(trace
), args
);
185 nr
= syscall_name_to_nr(call
->data
);
186 meta
= syscall_nr_to_meta(nr
);
191 ret
= trace_define_common_fields(call
);
195 for (i
= 0; i
< meta
->nb_args
; i
++) {
196 ret
= trace_define_field(call
, meta
->types
[i
],
197 meta
->args
[i
], offset
,
198 sizeof(unsigned long), 0,
200 offset
+= sizeof(unsigned long);
206 int syscall_exit_define_fields(struct ftrace_event_call
*call
)
208 struct syscall_trace_exit trace
;
211 ret
= trace_define_common_fields(call
);
215 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
), 0,
221 void ftrace_syscall_enter(struct pt_regs
*regs
, long id
)
223 struct syscall_trace_enter
*entry
;
224 struct syscall_metadata
*sys_data
;
225 struct ring_buffer_event
*event
;
226 struct ring_buffer
*buffer
;
230 syscall_nr
= syscall_get_nr(current
, regs
);
233 if (!test_bit(syscall_nr
, enabled_enter_syscalls
))
236 sys_data
= syscall_nr_to_meta(syscall_nr
);
240 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
242 event
= trace_current_buffer_lock_reserve(&buffer
, sys_data
->enter_id
,
247 entry
= ring_buffer_event_data(event
);
248 entry
->nr
= syscall_nr
;
249 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
251 if (!filter_current_check_discard(buffer
, sys_data
->enter_event
,
253 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
256 void ftrace_syscall_exit(struct pt_regs
*regs
, long ret
)
258 struct syscall_trace_exit
*entry
;
259 struct syscall_metadata
*sys_data
;
260 struct ring_buffer_event
*event
;
261 struct ring_buffer
*buffer
;
264 syscall_nr
= syscall_get_nr(current
, regs
);
267 if (!test_bit(syscall_nr
, enabled_exit_syscalls
))
270 sys_data
= syscall_nr_to_meta(syscall_nr
);
274 event
= trace_current_buffer_lock_reserve(&buffer
, sys_data
->exit_id
,
275 sizeof(*entry
), 0, 0);
279 entry
= ring_buffer_event_data(event
);
280 entry
->nr
= syscall_nr
;
281 entry
->ret
= syscall_get_return_value(current
, regs
);
283 if (!filter_current_check_discard(buffer
, sys_data
->exit_event
,
285 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
288 int reg_event_syscall_enter(void *ptr
)
295 num
= syscall_name_to_nr(name
);
296 if (num
< 0 || num
>= NR_syscalls
)
298 mutex_lock(&syscall_trace_lock
);
299 if (!sys_refcount_enter
)
300 ret
= register_trace_sys_enter(ftrace_syscall_enter
);
302 pr_info("event trace: Could not activate"
303 "syscall entry trace point");
305 set_bit(num
, enabled_enter_syscalls
);
306 sys_refcount_enter
++;
308 mutex_unlock(&syscall_trace_lock
);
312 void unreg_event_syscall_enter(void *ptr
)
318 num
= syscall_name_to_nr(name
);
319 if (num
< 0 || num
>= NR_syscalls
)
321 mutex_lock(&syscall_trace_lock
);
322 sys_refcount_enter
--;
323 clear_bit(num
, enabled_enter_syscalls
);
324 if (!sys_refcount_enter
)
325 unregister_trace_sys_enter(ftrace_syscall_enter
);
326 mutex_unlock(&syscall_trace_lock
);
329 int reg_event_syscall_exit(void *ptr
)
336 num
= syscall_name_to_nr(name
);
337 if (num
< 0 || num
>= NR_syscalls
)
339 mutex_lock(&syscall_trace_lock
);
340 if (!sys_refcount_exit
)
341 ret
= register_trace_sys_exit(ftrace_syscall_exit
);
343 pr_info("event trace: Could not activate"
344 "syscall exit trace point");
346 set_bit(num
, enabled_exit_syscalls
);
349 mutex_unlock(&syscall_trace_lock
);
353 void unreg_event_syscall_exit(void *ptr
)
359 num
= syscall_name_to_nr(name
);
360 if (num
< 0 || num
>= NR_syscalls
)
362 mutex_lock(&syscall_trace_lock
);
364 clear_bit(num
, enabled_exit_syscalls
);
365 if (!sys_refcount_exit
)
366 unregister_trace_sys_exit(ftrace_syscall_exit
);
367 mutex_unlock(&syscall_trace_lock
);
370 struct trace_event event_syscall_enter
= {
371 .trace
= print_syscall_enter
,
374 struct trace_event event_syscall_exit
= {
375 .trace
= print_syscall_exit
,
378 #ifdef CONFIG_EVENT_PROFILE
380 static DECLARE_BITMAP(enabled_prof_enter_syscalls
, NR_syscalls
);
381 static DECLARE_BITMAP(enabled_prof_exit_syscalls
, NR_syscalls
);
382 static int sys_prof_refcount_enter
;
383 static int sys_prof_refcount_exit
;
385 static void prof_syscall_enter(struct pt_regs
*regs
, long id
)
387 struct syscall_metadata
*sys_data
;
388 struct syscall_trace_enter
*rec
;
395 syscall_nr
= syscall_get_nr(current
, regs
);
396 if (!test_bit(syscall_nr
, enabled_prof_enter_syscalls
))
399 sys_data
= syscall_nr_to_meta(syscall_nr
);
403 /* get the size after alignment with the u32 buffer size field */
404 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
405 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
408 if (WARN_ONCE(size
> FTRACE_MAX_PROFILE_SIZE
,
409 "profile buffer not large enough"))
412 /* Protect the per cpu buffer, begin the rcu read side */
413 local_irq_save(flags
);
415 cpu
= smp_processor_id();
418 raw_data
= rcu_dereference(trace_profile_buf_nmi
);
420 raw_data
= rcu_dereference(trace_profile_buf
);
425 raw_data
= per_cpu_ptr(raw_data
, cpu
);
427 /* zero the dead bytes from align to not leak stack to user */
428 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
430 rec
= (struct syscall_trace_enter
*) raw_data
;
431 tracing_generic_entry_update(&rec
->ent
, 0, 0);
432 rec
->ent
.type
= sys_data
->enter_id
;
433 rec
->nr
= syscall_nr
;
434 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
435 (unsigned long *)&rec
->args
);
436 perf_tp_event(sys_data
->enter_id
, 0, 1, rec
, size
);
439 local_irq_restore(flags
);
442 int reg_prof_syscall_enter(char *name
)
447 num
= syscall_name_to_nr(name
);
448 if (num
< 0 || num
>= NR_syscalls
)
451 mutex_lock(&syscall_trace_lock
);
452 if (!sys_prof_refcount_enter
)
453 ret
= register_trace_sys_enter(prof_syscall_enter
);
455 pr_info("event trace: Could not activate"
456 "syscall entry trace point");
458 set_bit(num
, enabled_prof_enter_syscalls
);
459 sys_prof_refcount_enter
++;
461 mutex_unlock(&syscall_trace_lock
);
465 void unreg_prof_syscall_enter(char *name
)
469 num
= syscall_name_to_nr(name
);
470 if (num
< 0 || num
>= NR_syscalls
)
473 mutex_lock(&syscall_trace_lock
);
474 sys_prof_refcount_enter
--;
475 clear_bit(num
, enabled_prof_enter_syscalls
);
476 if (!sys_prof_refcount_enter
)
477 unregister_trace_sys_enter(prof_syscall_enter
);
478 mutex_unlock(&syscall_trace_lock
);
481 static void prof_syscall_exit(struct pt_regs
*regs
, long ret
)
483 struct syscall_metadata
*sys_data
;
484 struct syscall_trace_exit
*rec
;
491 syscall_nr
= syscall_get_nr(current
, regs
);
492 if (!test_bit(syscall_nr
, enabled_prof_exit_syscalls
))
495 sys_data
= syscall_nr_to_meta(syscall_nr
);
499 /* We can probably do that at build time */
500 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
504 * Impossible, but be paranoid with the future
505 * How to put this check outside runtime?
507 if (WARN_ONCE(size
> FTRACE_MAX_PROFILE_SIZE
,
508 "exit event has grown above profile buffer size"))
511 /* Protect the per cpu buffer, begin the rcu read side */
512 local_irq_save(flags
);
513 cpu
= smp_processor_id();
516 raw_data
= rcu_dereference(trace_profile_buf_nmi
);
518 raw_data
= rcu_dereference(trace_profile_buf
);
523 raw_data
= per_cpu_ptr(raw_data
, cpu
);
525 /* zero the dead bytes from align to not leak stack to user */
526 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
528 rec
= (struct syscall_trace_exit
*)raw_data
;
530 tracing_generic_entry_update(&rec
->ent
, 0, 0);
531 rec
->ent
.type
= sys_data
->exit_id
;
532 rec
->nr
= syscall_nr
;
533 rec
->ret
= syscall_get_return_value(current
, regs
);
535 perf_tp_event(sys_data
->exit_id
, 0, 1, rec
, size
);
538 local_irq_restore(flags
);
541 int reg_prof_syscall_exit(char *name
)
546 num
= syscall_name_to_nr(name
);
547 if (num
< 0 || num
>= NR_syscalls
)
550 mutex_lock(&syscall_trace_lock
);
551 if (!sys_prof_refcount_exit
)
552 ret
= register_trace_sys_exit(prof_syscall_exit
);
554 pr_info("event trace: Could not activate"
555 "syscall entry trace point");
557 set_bit(num
, enabled_prof_exit_syscalls
);
558 sys_prof_refcount_exit
++;
560 mutex_unlock(&syscall_trace_lock
);
564 void unreg_prof_syscall_exit(char *name
)
568 num
= syscall_name_to_nr(name
);
569 if (num
< 0 || num
>= NR_syscalls
)
572 mutex_lock(&syscall_trace_lock
);
573 sys_prof_refcount_exit
--;
574 clear_bit(num
, enabled_prof_exit_syscalls
);
575 if (!sys_prof_refcount_exit
)
576 unregister_trace_sys_exit(prof_syscall_exit
);
577 mutex_unlock(&syscall_trace_lock
);