1 // SPDX-License-Identifier: GPL-2.0
2 #include <trace/syscall.h>
3 #include <trace/events/syscalls.h>
4 #include <linux/syscalls.h>
5 #include <linux/slab.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
8 #include <linux/ftrace.h>
9 #include <linux/perf_event.h>
10 #include <linux/xarray.h>
11 #include <asm/syscall.h>
13 #include "trace_output.h"
16 static DEFINE_MUTEX(syscall_trace_lock
);
18 static int syscall_enter_register(struct trace_event_call
*event
,
19 enum trace_reg type
, void *data
);
20 static int syscall_exit_register(struct trace_event_call
*event
,
21 enum trace_reg type
, void *data
);
23 static struct list_head
*
24 syscall_get_enter_fields(struct trace_event_call
*call
)
26 struct syscall_metadata
*entry
= call
->data
;
28 return &entry
->enter_fields
;
31 extern struct syscall_metadata
*__start_syscalls_metadata
[];
32 extern struct syscall_metadata
*__stop_syscalls_metadata
[];
34 static DEFINE_XARRAY(syscalls_metadata_sparse
);
35 static struct syscall_metadata
**syscalls_metadata
;
37 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
38 static inline bool arch_syscall_match_sym_name(const char *sym
, const char *name
)
41 * Only compare after the "sys" prefix. Archs that use
42 * syscall wrappers may have syscalls symbols aliases prefixed
43 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
46 return !strcmp(sym
+ 3, name
+ 3);
50 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
52 * Some architectures that allow for 32bit applications
53 * to run on a 64bit kernel, do not map the syscalls for
54 * the 32bit tasks the same as they do for 64bit tasks.
58 * In such a case, instead of reporting the wrong syscalls,
61 * For an arch to ignore the compat syscalls it needs to
62 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
63 * define the function arch_trace_is_compat_syscall() to let
64 * the tracing system know that it should ignore it.
67 trace_get_syscall_nr(struct task_struct
*task
, struct pt_regs
*regs
)
69 if (unlikely(arch_trace_is_compat_syscall(regs
)))
72 return syscall_get_nr(task
, regs
);
76 trace_get_syscall_nr(struct task_struct
*task
, struct pt_regs
*regs
)
78 return syscall_get_nr(task
, regs
);
80 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
82 static __init
struct syscall_metadata
*
83 find_syscall_meta(unsigned long syscall
)
85 struct syscall_metadata
**start
;
86 struct syscall_metadata
**stop
;
87 char str
[KSYM_SYMBOL_LEN
];
90 start
= __start_syscalls_metadata
;
91 stop
= __stop_syscalls_metadata
;
92 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
94 if (arch_syscall_match_sym_name(str
, "sys_ni_syscall"))
97 for ( ; start
< stop
; start
++) {
98 if ((*start
)->name
&& arch_syscall_match_sym_name(str
, (*start
)->name
))
104 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
106 if (IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR
))
107 return xa_load(&syscalls_metadata_sparse
, (unsigned long)nr
);
109 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
112 return syscalls_metadata
[nr
];
115 const char *get_syscall_name(int syscall
)
117 struct syscall_metadata
*entry
;
119 entry
= syscall_nr_to_meta(syscall
);
126 static enum print_line_t
127 print_syscall_enter(struct trace_iterator
*iter
, int flags
,
128 struct trace_event
*event
)
130 struct trace_array
*tr
= iter
->tr
;
131 struct trace_seq
*s
= &iter
->seq
;
132 struct trace_entry
*ent
= iter
->ent
;
133 struct syscall_trace_enter
*trace
;
134 struct syscall_metadata
*entry
;
137 trace
= (typeof(trace
))ent
;
139 entry
= syscall_nr_to_meta(syscall
);
144 if (entry
->enter_event
->event
.type
!= ent
->type
) {
149 trace_seq_printf(s
, "%s(", entry
->name
);
151 for (i
= 0; i
< entry
->nb_args
; i
++) {
153 if (trace_seq_has_overflowed(s
))
156 /* parameter types */
157 if (tr
&& tr
->trace_flags
& TRACE_ITER_VERBOSE
)
158 trace_seq_printf(s
, "%s ", entry
->types
[i
]);
160 /* parameter values */
161 trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
163 i
== entry
->nb_args
- 1 ? "" : ", ");
166 trace_seq_putc(s
, ')');
168 trace_seq_putc(s
, '\n');
170 return trace_handle_return(s
);
173 static enum print_line_t
174 print_syscall_exit(struct trace_iterator
*iter
, int flags
,
175 struct trace_event
*event
)
177 struct trace_seq
*s
= &iter
->seq
;
178 struct trace_entry
*ent
= iter
->ent
;
179 struct syscall_trace_exit
*trace
;
181 struct syscall_metadata
*entry
;
183 trace
= (typeof(trace
))ent
;
185 entry
= syscall_nr_to_meta(syscall
);
188 trace_seq_putc(s
, '\n');
192 if (entry
->exit_event
->event
.type
!= ent
->type
) {
194 return TRACE_TYPE_UNHANDLED
;
197 trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
201 return trace_handle_return(s
);
204 #define SYSCALL_FIELD(_type, _name) { \
205 .type = #_type, .name = #_name, \
206 .size = sizeof(_type), .align = __alignof__(_type), \
207 .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }
210 __set_enter_print_fmt(struct syscall_metadata
*entry
, char *buf
, int len
)
215 /* When len=0, we just calculate the needed length */
216 #define LEN_OR_ZERO (len ? len - pos : 0)
218 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
219 for (i
= 0; i
< entry
->nb_args
; i
++) {
220 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s: 0x%%0%zulx%s",
221 entry
->args
[i
], sizeof(unsigned long),
222 i
== entry
->nb_args
- 1 ? "" : ", ");
224 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
226 for (i
= 0; i
< entry
->nb_args
; i
++) {
227 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
228 ", ((unsigned long)(REC->%s))", entry
->args
[i
]);
233 /* return the length of print_fmt */
237 static int __init
set_syscall_print_fmt(struct trace_event_call
*call
)
241 struct syscall_metadata
*entry
= call
->data
;
243 if (entry
->enter_event
!= call
) {
244 call
->print_fmt
= "\"0x%lx\", REC->ret";
248 /* First: called with 0 length to calculate the needed length */
249 len
= __set_enter_print_fmt(entry
, NULL
, 0);
251 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
255 /* Second: actually write the @print_fmt */
256 __set_enter_print_fmt(entry
, print_fmt
, len
+ 1);
257 call
->print_fmt
= print_fmt
;
262 static void __init
free_syscall_print_fmt(struct trace_event_call
*call
)
264 struct syscall_metadata
*entry
= call
->data
;
266 if (entry
->enter_event
== call
)
267 kfree(call
->print_fmt
);
270 static int __init
syscall_enter_define_fields(struct trace_event_call
*call
)
272 struct syscall_trace_enter trace
;
273 struct syscall_metadata
*meta
= call
->data
;
274 int offset
= offsetof(typeof(trace
), args
);
278 for (i
= 0; i
< meta
->nb_args
; i
++) {
279 ret
= trace_define_field(call
, meta
->types
[i
],
280 meta
->args
[i
], offset
,
281 sizeof(unsigned long), 0,
285 offset
+= sizeof(unsigned long);
291 static void ftrace_syscall_enter(void *data
, struct pt_regs
*regs
, long id
)
293 struct trace_array
*tr
= data
;
294 struct trace_event_file
*trace_file
;
295 struct syscall_trace_enter
*entry
;
296 struct syscall_metadata
*sys_data
;
297 struct trace_event_buffer fbuffer
;
298 unsigned long args
[6];
302 syscall_nr
= trace_get_syscall_nr(current
, regs
);
303 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
306 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
307 trace_file
= rcu_dereference_sched(tr
->enter_syscall_files
[syscall_nr
]);
311 if (trace_trigger_soft_disabled(trace_file
))
314 sys_data
= syscall_nr_to_meta(syscall_nr
);
318 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
320 entry
= trace_event_buffer_reserve(&fbuffer
, trace_file
, size
);
324 entry
= ring_buffer_event_data(fbuffer
.event
);
325 entry
->nr
= syscall_nr
;
326 syscall_get_arguments(current
, regs
, args
);
327 memcpy(entry
->args
, args
, sizeof(unsigned long) * sys_data
->nb_args
);
329 trace_event_buffer_commit(&fbuffer
);
332 static void ftrace_syscall_exit(void *data
, struct pt_regs
*regs
, long ret
)
334 struct trace_array
*tr
= data
;
335 struct trace_event_file
*trace_file
;
336 struct syscall_trace_exit
*entry
;
337 struct syscall_metadata
*sys_data
;
338 struct trace_event_buffer fbuffer
;
341 syscall_nr
= trace_get_syscall_nr(current
, regs
);
342 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
345 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
346 trace_file
= rcu_dereference_sched(tr
->exit_syscall_files
[syscall_nr
]);
350 if (trace_trigger_soft_disabled(trace_file
))
353 sys_data
= syscall_nr_to_meta(syscall_nr
);
357 entry
= trace_event_buffer_reserve(&fbuffer
, trace_file
, sizeof(*entry
));
361 entry
= ring_buffer_event_data(fbuffer
.event
);
362 entry
->nr
= syscall_nr
;
363 entry
->ret
= syscall_get_return_value(current
, regs
);
365 trace_event_buffer_commit(&fbuffer
);
368 static int reg_event_syscall_enter(struct trace_event_file
*file
,
369 struct trace_event_call
*call
)
371 struct trace_array
*tr
= file
->tr
;
375 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
376 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
378 mutex_lock(&syscall_trace_lock
);
379 if (!tr
->sys_refcount_enter
)
380 ret
= register_trace_sys_enter(ftrace_syscall_enter
, tr
);
382 rcu_assign_pointer(tr
->enter_syscall_files
[num
], file
);
383 tr
->sys_refcount_enter
++;
385 mutex_unlock(&syscall_trace_lock
);
389 static void unreg_event_syscall_enter(struct trace_event_file
*file
,
390 struct trace_event_call
*call
)
392 struct trace_array
*tr
= file
->tr
;
395 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
396 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
398 mutex_lock(&syscall_trace_lock
);
399 tr
->sys_refcount_enter
--;
400 RCU_INIT_POINTER(tr
->enter_syscall_files
[num
], NULL
);
401 if (!tr
->sys_refcount_enter
)
402 unregister_trace_sys_enter(ftrace_syscall_enter
, tr
);
403 mutex_unlock(&syscall_trace_lock
);
406 static int reg_event_syscall_exit(struct trace_event_file
*file
,
407 struct trace_event_call
*call
)
409 struct trace_array
*tr
= file
->tr
;
413 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
414 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
416 mutex_lock(&syscall_trace_lock
);
417 if (!tr
->sys_refcount_exit
)
418 ret
= register_trace_sys_exit(ftrace_syscall_exit
, tr
);
420 rcu_assign_pointer(tr
->exit_syscall_files
[num
], file
);
421 tr
->sys_refcount_exit
++;
423 mutex_unlock(&syscall_trace_lock
);
427 static void unreg_event_syscall_exit(struct trace_event_file
*file
,
428 struct trace_event_call
*call
)
430 struct trace_array
*tr
= file
->tr
;
433 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
434 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
436 mutex_lock(&syscall_trace_lock
);
437 tr
->sys_refcount_exit
--;
438 RCU_INIT_POINTER(tr
->exit_syscall_files
[num
], NULL
);
439 if (!tr
->sys_refcount_exit
)
440 unregister_trace_sys_exit(ftrace_syscall_exit
, tr
);
441 mutex_unlock(&syscall_trace_lock
);
444 static int __init
init_syscall_trace(struct trace_event_call
*call
)
449 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
450 if (num
< 0 || num
>= NR_syscalls
) {
451 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
452 ((struct syscall_metadata
*)call
->data
)->name
);
456 if (set_syscall_print_fmt(call
) < 0)
459 id
= trace_event_raw_init(call
);
462 free_syscall_print_fmt(call
);
469 static struct trace_event_fields __refdata syscall_enter_fields_array
[] = {
470 SYSCALL_FIELD(int, __syscall_nr
),
471 { .type
= TRACE_FUNCTION_TYPE
,
472 .define_fields
= syscall_enter_define_fields
},
476 struct trace_event_functions enter_syscall_print_funcs
= {
477 .trace
= print_syscall_enter
,
480 struct trace_event_functions exit_syscall_print_funcs
= {
481 .trace
= print_syscall_exit
,
484 struct trace_event_class __refdata event_class_syscall_enter
= {
485 .system
= "syscalls",
486 .reg
= syscall_enter_register
,
487 .fields_array
= syscall_enter_fields_array
,
488 .get_fields
= syscall_get_enter_fields
,
489 .raw_init
= init_syscall_trace
,
492 struct trace_event_class __refdata event_class_syscall_exit
= {
493 .system
= "syscalls",
494 .reg
= syscall_exit_register
,
495 .fields_array
= (struct trace_event_fields
[]){
496 SYSCALL_FIELD(int, __syscall_nr
),
497 SYSCALL_FIELD(long, ret
),
500 .fields
= LIST_HEAD_INIT(event_class_syscall_exit
.fields
),
501 .raw_init
= init_syscall_trace
,
504 unsigned long __init __weak
arch_syscall_addr(int nr
)
506 return (unsigned long)sys_call_table
[nr
];
509 void __init
init_ftrace_syscalls(void)
511 struct syscall_metadata
*meta
;
516 if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR
)) {
517 syscalls_metadata
= kcalloc(NR_syscalls
,
518 sizeof(*syscalls_metadata
),
520 if (!syscalls_metadata
) {
526 for (i
= 0; i
< NR_syscalls
; i
++) {
527 addr
= arch_syscall_addr(i
);
528 meta
= find_syscall_meta(addr
);
532 meta
->syscall_nr
= i
;
534 if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR
)) {
535 syscalls_metadata
[i
] = meta
;
537 ret
= xa_store(&syscalls_metadata_sparse
, i
, meta
,
540 "Syscall memory allocation failed\n");
546 #ifdef CONFIG_PERF_EVENTS
548 static DECLARE_BITMAP(enabled_perf_enter_syscalls
, NR_syscalls
);
549 static DECLARE_BITMAP(enabled_perf_exit_syscalls
, NR_syscalls
);
550 static int sys_perf_refcount_enter
;
551 static int sys_perf_refcount_exit
;
553 static int perf_call_bpf_enter(struct trace_event_call
*call
, struct pt_regs
*regs
,
554 struct syscall_metadata
*sys_data
,
555 struct syscall_trace_enter
*rec
)
557 struct syscall_tp_t
{
558 struct trace_entry ent
;
560 unsigned long args
[SYSCALL_DEFINE_MAXARGS
];
561 } __aligned(8) param
;
564 BUILD_BUG_ON(sizeof(param
.ent
) < sizeof(void *));
566 /* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. ¶m) */
567 perf_fetch_caller_regs(regs
);
568 *(struct pt_regs
**)¶m
= regs
;
569 param
.syscall_nr
= rec
->nr
;
570 for (i
= 0; i
< sys_data
->nb_args
; i
++)
571 param
.args
[i
] = rec
->args
[i
];
572 return trace_call_bpf(call
, ¶m
);
575 static void perf_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
577 struct syscall_metadata
*sys_data
;
578 struct syscall_trace_enter
*rec
;
579 struct pt_regs
*fake_regs
;
580 struct hlist_head
*head
;
581 unsigned long args
[6];
582 bool valid_prog_array
;
587 syscall_nr
= trace_get_syscall_nr(current
, regs
);
588 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
590 if (!test_bit(syscall_nr
, enabled_perf_enter_syscalls
))
593 sys_data
= syscall_nr_to_meta(syscall_nr
);
597 head
= this_cpu_ptr(sys_data
->enter_event
->perf_events
);
598 valid_prog_array
= bpf_prog_array_valid(sys_data
->enter_event
);
599 if (!valid_prog_array
&& hlist_empty(head
))
602 /* get the size after alignment with the u32 buffer size field */
603 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
604 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
607 rec
= perf_trace_buf_alloc(size
, &fake_regs
, &rctx
);
611 rec
->nr
= syscall_nr
;
612 syscall_get_arguments(current
, regs
, args
);
613 memcpy(&rec
->args
, args
, sizeof(unsigned long) * sys_data
->nb_args
);
615 if ((valid_prog_array
&&
616 !perf_call_bpf_enter(sys_data
->enter_event
, fake_regs
, sys_data
, rec
)) ||
618 perf_swevent_put_recursion_context(rctx
);
622 perf_trace_buf_submit(rec
, size
, rctx
,
623 sys_data
->enter_event
->event
.type
, 1, regs
,
627 static int perf_sysenter_enable(struct trace_event_call
*call
)
632 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
634 mutex_lock(&syscall_trace_lock
);
635 if (!sys_perf_refcount_enter
)
636 ret
= register_trace_sys_enter(perf_syscall_enter
, NULL
);
638 pr_info("event trace: Could not activate syscall entry trace point");
640 set_bit(num
, enabled_perf_enter_syscalls
);
641 sys_perf_refcount_enter
++;
643 mutex_unlock(&syscall_trace_lock
);
647 static void perf_sysenter_disable(struct trace_event_call
*call
)
651 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
653 mutex_lock(&syscall_trace_lock
);
654 sys_perf_refcount_enter
--;
655 clear_bit(num
, enabled_perf_enter_syscalls
);
656 if (!sys_perf_refcount_enter
)
657 unregister_trace_sys_enter(perf_syscall_enter
, NULL
);
658 mutex_unlock(&syscall_trace_lock
);
661 static int perf_call_bpf_exit(struct trace_event_call
*call
, struct pt_regs
*regs
,
662 struct syscall_trace_exit
*rec
)
664 struct syscall_tp_t
{
665 struct trace_entry ent
;
668 } __aligned(8) param
;
670 /* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. ¶m) */
671 perf_fetch_caller_regs(regs
);
672 *(struct pt_regs
**)¶m
= regs
;
673 param
.syscall_nr
= rec
->nr
;
674 param
.ret
= rec
->ret
;
675 return trace_call_bpf(call
, ¶m
);
678 static void perf_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
680 struct syscall_metadata
*sys_data
;
681 struct syscall_trace_exit
*rec
;
682 struct pt_regs
*fake_regs
;
683 struct hlist_head
*head
;
684 bool valid_prog_array
;
689 syscall_nr
= trace_get_syscall_nr(current
, regs
);
690 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
692 if (!test_bit(syscall_nr
, enabled_perf_exit_syscalls
))
695 sys_data
= syscall_nr_to_meta(syscall_nr
);
699 head
= this_cpu_ptr(sys_data
->exit_event
->perf_events
);
700 valid_prog_array
= bpf_prog_array_valid(sys_data
->exit_event
);
701 if (!valid_prog_array
&& hlist_empty(head
))
704 /* We can probably do that at build time */
705 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
708 rec
= perf_trace_buf_alloc(size
, &fake_regs
, &rctx
);
712 rec
->nr
= syscall_nr
;
713 rec
->ret
= syscall_get_return_value(current
, regs
);
715 if ((valid_prog_array
&&
716 !perf_call_bpf_exit(sys_data
->exit_event
, fake_regs
, rec
)) ||
718 perf_swevent_put_recursion_context(rctx
);
722 perf_trace_buf_submit(rec
, size
, rctx
, sys_data
->exit_event
->event
.type
,
723 1, regs
, head
, NULL
);
726 static int perf_sysexit_enable(struct trace_event_call
*call
)
731 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
733 mutex_lock(&syscall_trace_lock
);
734 if (!sys_perf_refcount_exit
)
735 ret
= register_trace_sys_exit(perf_syscall_exit
, NULL
);
737 pr_info("event trace: Could not activate syscall exit trace point");
739 set_bit(num
, enabled_perf_exit_syscalls
);
740 sys_perf_refcount_exit
++;
742 mutex_unlock(&syscall_trace_lock
);
746 static void perf_sysexit_disable(struct trace_event_call
*call
)
750 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
752 mutex_lock(&syscall_trace_lock
);
753 sys_perf_refcount_exit
--;
754 clear_bit(num
, enabled_perf_exit_syscalls
);
755 if (!sys_perf_refcount_exit
)
756 unregister_trace_sys_exit(perf_syscall_exit
, NULL
);
757 mutex_unlock(&syscall_trace_lock
);
760 #endif /* CONFIG_PERF_EVENTS */
762 static int syscall_enter_register(struct trace_event_call
*event
,
763 enum trace_reg type
, void *data
)
765 struct trace_event_file
*file
= data
;
768 case TRACE_REG_REGISTER
:
769 return reg_event_syscall_enter(file
, event
);
770 case TRACE_REG_UNREGISTER
:
771 unreg_event_syscall_enter(file
, event
);
774 #ifdef CONFIG_PERF_EVENTS
775 case TRACE_REG_PERF_REGISTER
:
776 return perf_sysenter_enable(event
);
777 case TRACE_REG_PERF_UNREGISTER
:
778 perf_sysenter_disable(event
);
780 case TRACE_REG_PERF_OPEN
:
781 case TRACE_REG_PERF_CLOSE
:
782 case TRACE_REG_PERF_ADD
:
783 case TRACE_REG_PERF_DEL
:
790 static int syscall_exit_register(struct trace_event_call
*event
,
791 enum trace_reg type
, void *data
)
793 struct trace_event_file
*file
= data
;
796 case TRACE_REG_REGISTER
:
797 return reg_event_syscall_exit(file
, event
);
798 case TRACE_REG_UNREGISTER
:
799 unreg_event_syscall_exit(file
, event
);
802 #ifdef CONFIG_PERF_EVENTS
803 case TRACE_REG_PERF_REGISTER
:
804 return perf_sysexit_enable(event
);
805 case TRACE_REG_PERF_UNREGISTER
:
806 perf_sysexit_disable(event
);
808 case TRACE_REG_PERF_OPEN
:
809 case TRACE_REG_PERF_CLOSE
:
810 case TRACE_REG_PERF_ADD
:
811 case TRACE_REG_PERF_DEL
: