1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/kernel.h>
4 #include <linux/ftrace.h>
5 #include <linux/perf_event.h>
6 #include <asm/syscall.h>
8 #include "trace_output.h"
11 static DEFINE_MUTEX(syscall_trace_lock
);
12 static int sys_refcount_enter
;
13 static int sys_refcount_exit
;
14 static DECLARE_BITMAP(enabled_enter_syscalls
, NR_syscalls
);
15 static DECLARE_BITMAP(enabled_exit_syscalls
, NR_syscalls
);
18 print_syscall_enter(struct trace_iterator
*iter
, int flags
)
20 struct trace_seq
*s
= &iter
->seq
;
21 struct trace_entry
*ent
= iter
->ent
;
22 struct syscall_trace_enter
*trace
;
23 struct syscall_metadata
*entry
;
26 trace
= (typeof(trace
))ent
;
28 entry
= syscall_nr_to_meta(syscall
);
33 if (entry
->enter_id
!= ent
->type
) {
38 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
40 return TRACE_TYPE_PARTIAL_LINE
;
42 for (i
= 0; i
< entry
->nb_args
; i
++) {
44 if (trace_flags
& TRACE_ITER_VERBOSE
) {
45 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
47 return TRACE_TYPE_PARTIAL_LINE
;
49 /* parameter values */
50 ret
= trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
52 i
== entry
->nb_args
- 1 ? "" : ", ");
54 return TRACE_TYPE_PARTIAL_LINE
;
57 ret
= trace_seq_putc(s
, ')');
59 return TRACE_TYPE_PARTIAL_LINE
;
62 ret
= trace_seq_putc(s
, '\n');
64 return TRACE_TYPE_PARTIAL_LINE
;
66 return TRACE_TYPE_HANDLED
;
70 print_syscall_exit(struct trace_iterator
*iter
, int flags
)
72 struct trace_seq
*s
= &iter
->seq
;
73 struct trace_entry
*ent
= iter
->ent
;
74 struct syscall_trace_exit
*trace
;
76 struct syscall_metadata
*entry
;
79 trace
= (typeof(trace
))ent
;
81 entry
= syscall_nr_to_meta(syscall
);
84 trace_seq_printf(s
, "\n");
85 return TRACE_TYPE_HANDLED
;
88 if (entry
->exit_id
!= ent
->type
) {
90 return TRACE_TYPE_UNHANDLED
;
93 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
96 return TRACE_TYPE_PARTIAL_LINE
;
98 return TRACE_TYPE_HANDLED
;
101 extern char *__bad_type_size(void);
103 #define SYSCALL_FIELD(type, name) \
104 sizeof(type) != sizeof(trace.name) ? \
105 __bad_type_size() : \
106 #type, #name, offsetof(typeof(trace), name), \
107 sizeof(trace.name), is_signed_type(type)
109 int syscall_enter_format(struct ftrace_event_call
*call
, struct trace_seq
*s
)
114 struct syscall_metadata
*entry
;
115 struct syscall_trace_enter trace
;
116 int offset
= offsetof(struct syscall_trace_enter
, args
);
118 nr
= syscall_name_to_nr(call
->data
);
119 entry
= syscall_nr_to_meta(nr
);
124 ret
= trace_seq_printf(s
, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
126 SYSCALL_FIELD(int, nr
));
130 for (i
= 0; i
< entry
->nb_args
; i
++) {
131 ret
= trace_seq_printf(s
, "\tfield:%s %s;", entry
->types
[i
],
135 ret
= trace_seq_printf(s
, "\toffset:%d;\tsize:%zu;"
136 "\tsigned:%u;\n", offset
,
137 sizeof(unsigned long),
138 is_signed_type(unsigned long));
141 offset
+= sizeof(unsigned long);
144 trace_seq_puts(s
, "\nprint fmt: \"");
145 for (i
= 0; i
< entry
->nb_args
; i
++) {
146 ret
= trace_seq_printf(s
, "%s: 0x%%0%zulx%s", entry
->args
[i
],
147 sizeof(unsigned long),
148 i
== entry
->nb_args
- 1 ? "" : ", ");
152 trace_seq_putc(s
, '"');
154 for (i
= 0; i
< entry
->nb_args
; i
++) {
155 ret
= trace_seq_printf(s
, ", ((unsigned long)(REC->%s))",
161 return trace_seq_putc(s
, '\n');
164 int syscall_exit_format(struct ftrace_event_call
*call
, struct trace_seq
*s
)
167 struct syscall_trace_exit trace
;
169 ret
= trace_seq_printf(s
,
170 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
172 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
174 SYSCALL_FIELD(int, nr
),
175 SYSCALL_FIELD(long, ret
));
179 return trace_seq_printf(s
, "\nprint fmt: \"0x%%lx\", REC->ret\n");
182 int syscall_enter_define_fields(struct ftrace_event_call
*call
)
184 struct syscall_trace_enter trace
;
185 struct syscall_metadata
*meta
;
189 int offset
= offsetof(typeof(trace
), args
);
191 nr
= syscall_name_to_nr(call
->data
);
192 meta
= syscall_nr_to_meta(nr
);
197 ret
= trace_define_common_fields(call
);
201 for (i
= 0; i
< meta
->nb_args
; i
++) {
202 ret
= trace_define_field(call
, meta
->types
[i
],
203 meta
->args
[i
], offset
,
204 sizeof(unsigned long), 0,
206 offset
+= sizeof(unsigned long);
212 int syscall_exit_define_fields(struct ftrace_event_call
*call
)
214 struct syscall_trace_exit trace
;
217 ret
= trace_define_common_fields(call
);
221 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
),
227 void ftrace_syscall_enter(struct pt_regs
*regs
, long id
)
229 struct syscall_trace_enter
*entry
;
230 struct syscall_metadata
*sys_data
;
231 struct ring_buffer_event
*event
;
232 struct ring_buffer
*buffer
;
236 syscall_nr
= syscall_get_nr(current
, regs
);
239 if (!test_bit(syscall_nr
, enabled_enter_syscalls
))
242 sys_data
= syscall_nr_to_meta(syscall_nr
);
246 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
248 event
= trace_current_buffer_lock_reserve(&buffer
, sys_data
->enter_id
,
253 entry
= ring_buffer_event_data(event
);
254 entry
->nr
= syscall_nr
;
255 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
257 if (!filter_current_check_discard(buffer
, sys_data
->enter_event
,
259 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
262 void ftrace_syscall_exit(struct pt_regs
*regs
, long ret
)
264 struct syscall_trace_exit
*entry
;
265 struct syscall_metadata
*sys_data
;
266 struct ring_buffer_event
*event
;
267 struct ring_buffer
*buffer
;
270 syscall_nr
= syscall_get_nr(current
, regs
);
273 if (!test_bit(syscall_nr
, enabled_exit_syscalls
))
276 sys_data
= syscall_nr_to_meta(syscall_nr
);
280 event
= trace_current_buffer_lock_reserve(&buffer
, sys_data
->exit_id
,
281 sizeof(*entry
), 0, 0);
285 entry
= ring_buffer_event_data(event
);
286 entry
->nr
= syscall_nr
;
287 entry
->ret
= syscall_get_return_value(current
, regs
);
289 if (!filter_current_check_discard(buffer
, sys_data
->exit_event
,
291 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
294 int reg_event_syscall_enter(void *ptr
)
301 num
= syscall_name_to_nr(name
);
302 if (num
< 0 || num
>= NR_syscalls
)
304 mutex_lock(&syscall_trace_lock
);
305 if (!sys_refcount_enter
)
306 ret
= register_trace_sys_enter(ftrace_syscall_enter
);
308 pr_info("event trace: Could not activate"
309 "syscall entry trace point");
311 set_bit(num
, enabled_enter_syscalls
);
312 sys_refcount_enter
++;
314 mutex_unlock(&syscall_trace_lock
);
318 void unreg_event_syscall_enter(void *ptr
)
324 num
= syscall_name_to_nr(name
);
325 if (num
< 0 || num
>= NR_syscalls
)
327 mutex_lock(&syscall_trace_lock
);
328 sys_refcount_enter
--;
329 clear_bit(num
, enabled_enter_syscalls
);
330 if (!sys_refcount_enter
)
331 unregister_trace_sys_enter(ftrace_syscall_enter
);
332 mutex_unlock(&syscall_trace_lock
);
335 int reg_event_syscall_exit(void *ptr
)
342 num
= syscall_name_to_nr(name
);
343 if (num
< 0 || num
>= NR_syscalls
)
345 mutex_lock(&syscall_trace_lock
);
346 if (!sys_refcount_exit
)
347 ret
= register_trace_sys_exit(ftrace_syscall_exit
);
349 pr_info("event trace: Could not activate"
350 "syscall exit trace point");
352 set_bit(num
, enabled_exit_syscalls
);
355 mutex_unlock(&syscall_trace_lock
);
359 void unreg_event_syscall_exit(void *ptr
)
365 num
= syscall_name_to_nr(name
);
366 if (num
< 0 || num
>= NR_syscalls
)
368 mutex_lock(&syscall_trace_lock
);
370 clear_bit(num
, enabled_exit_syscalls
);
371 if (!sys_refcount_exit
)
372 unregister_trace_sys_exit(ftrace_syscall_exit
);
373 mutex_unlock(&syscall_trace_lock
);
376 struct trace_event event_syscall_enter
= {
377 .trace
= print_syscall_enter
,
380 struct trace_event event_syscall_exit
= {
381 .trace
= print_syscall_exit
,
384 #ifdef CONFIG_EVENT_PROFILE
386 static DECLARE_BITMAP(enabled_prof_enter_syscalls
, NR_syscalls
);
387 static DECLARE_BITMAP(enabled_prof_exit_syscalls
, NR_syscalls
);
388 static int sys_prof_refcount_enter
;
389 static int sys_prof_refcount_exit
;
391 static void prof_syscall_enter(struct pt_regs
*regs
, long id
)
393 struct syscall_metadata
*sys_data
;
394 struct syscall_trace_enter
*rec
;
401 syscall_nr
= syscall_get_nr(current
, regs
);
402 if (!test_bit(syscall_nr
, enabled_prof_enter_syscalls
))
405 sys_data
= syscall_nr_to_meta(syscall_nr
);
409 /* get the size after alignment with the u32 buffer size field */
410 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
411 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
414 if (WARN_ONCE(size
> FTRACE_MAX_PROFILE_SIZE
,
415 "profile buffer not large enough"))
418 /* Protect the per cpu buffer, begin the rcu read side */
419 local_irq_save(flags
);
421 cpu
= smp_processor_id();
424 raw_data
= rcu_dereference(trace_profile_buf_nmi
);
426 raw_data
= rcu_dereference(trace_profile_buf
);
431 raw_data
= per_cpu_ptr(raw_data
, cpu
);
433 /* zero the dead bytes from align to not leak stack to user */
434 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
436 rec
= (struct syscall_trace_enter
*) raw_data
;
437 tracing_generic_entry_update(&rec
->ent
, 0, 0);
438 rec
->ent
.type
= sys_data
->enter_id
;
439 rec
->nr
= syscall_nr
;
440 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
441 (unsigned long *)&rec
->args
);
442 perf_tp_event(sys_data
->enter_id
, 0, 1, rec
, size
);
445 local_irq_restore(flags
);
448 int reg_prof_syscall_enter(char *name
)
453 num
= syscall_name_to_nr(name
);
454 if (num
< 0 || num
>= NR_syscalls
)
457 mutex_lock(&syscall_trace_lock
);
458 if (!sys_prof_refcount_enter
)
459 ret
= register_trace_sys_enter(prof_syscall_enter
);
461 pr_info("event trace: Could not activate"
462 "syscall entry trace point");
464 set_bit(num
, enabled_prof_enter_syscalls
);
465 sys_prof_refcount_enter
++;
467 mutex_unlock(&syscall_trace_lock
);
471 void unreg_prof_syscall_enter(char *name
)
475 num
= syscall_name_to_nr(name
);
476 if (num
< 0 || num
>= NR_syscalls
)
479 mutex_lock(&syscall_trace_lock
);
480 sys_prof_refcount_enter
--;
481 clear_bit(num
, enabled_prof_enter_syscalls
);
482 if (!sys_prof_refcount_enter
)
483 unregister_trace_sys_enter(prof_syscall_enter
);
484 mutex_unlock(&syscall_trace_lock
);
487 static void prof_syscall_exit(struct pt_regs
*regs
, long ret
)
489 struct syscall_metadata
*sys_data
;
490 struct syscall_trace_exit
*rec
;
497 syscall_nr
= syscall_get_nr(current
, regs
);
498 if (!test_bit(syscall_nr
, enabled_prof_exit_syscalls
))
501 sys_data
= syscall_nr_to_meta(syscall_nr
);
505 /* We can probably do that at build time */
506 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
510 * Impossible, but be paranoid with the future
511 * How to put this check outside runtime?
513 if (WARN_ONCE(size
> FTRACE_MAX_PROFILE_SIZE
,
514 "exit event has grown above profile buffer size"))
517 /* Protect the per cpu buffer, begin the rcu read side */
518 local_irq_save(flags
);
519 cpu
= smp_processor_id();
522 raw_data
= rcu_dereference(trace_profile_buf_nmi
);
524 raw_data
= rcu_dereference(trace_profile_buf
);
529 raw_data
= per_cpu_ptr(raw_data
, cpu
);
531 /* zero the dead bytes from align to not leak stack to user */
532 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
534 rec
= (struct syscall_trace_exit
*)raw_data
;
536 tracing_generic_entry_update(&rec
->ent
, 0, 0);
537 rec
->ent
.type
= sys_data
->exit_id
;
538 rec
->nr
= syscall_nr
;
539 rec
->ret
= syscall_get_return_value(current
, regs
);
541 perf_tp_event(sys_data
->exit_id
, 0, 1, rec
, size
);
544 local_irq_restore(flags
);
547 int reg_prof_syscall_exit(char *name
)
552 num
= syscall_name_to_nr(name
);
553 if (num
< 0 || num
>= NR_syscalls
)
556 mutex_lock(&syscall_trace_lock
);
557 if (!sys_prof_refcount_exit
)
558 ret
= register_trace_sys_exit(prof_syscall_exit
);
560 pr_info("event trace: Could not activate"
561 "syscall entry trace point");
563 set_bit(num
, enabled_prof_exit_syscalls
);
564 sys_prof_refcount_exit
++;
566 mutex_unlock(&syscall_trace_lock
);
570 void unreg_prof_syscall_exit(char *name
)
574 num
= syscall_name_to_nr(name
);
575 if (num
< 0 || num
>= NR_syscalls
)
578 mutex_lock(&syscall_trace_lock
);
579 sys_prof_refcount_exit
--;
580 clear_bit(num
, enabled_prof_exit_syscalls
);
581 if (!sys_prof_refcount_exit
)
582 unregister_trace_sys_exit(prof_syscall_exit
);
583 mutex_unlock(&syscall_trace_lock
);