2 * trace_events_filter - generic event filtering
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
28 #include "trace_output.h"
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
57 static struct filter_op filter_ops
[] = {
67 { OP_NONE
, "OP_NONE", 0 },
68 { OP_OPEN_PAREN
, "(", 0 },
74 FILT_ERR_UNBALANCED_PAREN
,
75 FILT_ERR_TOO_MANY_OPERANDS
,
76 FILT_ERR_OPERAND_TOO_LONG
,
77 FILT_ERR_FIELD_NOT_FOUND
,
78 FILT_ERR_ILLEGAL_FIELD_OP
,
79 FILT_ERR_ILLEGAL_INTVAL
,
80 FILT_ERR_BAD_SUBSYS_FILTER
,
81 FILT_ERR_TOO_MANY_PREDS
,
82 FILT_ERR_MISSING_FIELD
,
83 FILT_ERR_INVALID_FILTER
,
86 static char *err_text
[] = {
93 "Illegal operation for field type",
94 "Illegal integer value",
95 "Couldn't find or set field in one of a subsystem's events",
96 "Too many terms in predicate expression",
97 "Missing field name and/or value",
98 "Meaningless filter expression",
103 struct list_head list
;
109 struct list_head list
;
112 struct filter_parse_state
{
113 struct filter_op
*ops
;
114 struct list_head opstack
;
115 struct list_head postfix
;
126 char string
[MAX_FILTER_STR_VAL
];
133 struct filter_pred
**preds
;
137 #define DEFINE_COMPARISON_PRED(type) \
138 static int filter_pred_##type(struct filter_pred *pred, void *event) \
140 type *addr = (type *)(event + pred->offset); \
141 type val = (type)pred->val; \
144 switch (pred->op) { \
146 match = (*addr < val); \
149 match = (*addr <= val); \
152 match = (*addr > val); \
155 match = (*addr >= val); \
164 #define DEFINE_EQUALITY_PRED(size) \
165 static int filter_pred_##size(struct filter_pred *pred, void *event) \
167 u##size *addr = (u##size *)(event + pred->offset); \
168 u##size val = (u##size)pred->val; \
171 match = (val == *addr) ^ pred->not; \
176 DEFINE_COMPARISON_PRED(s64
);
177 DEFINE_COMPARISON_PRED(u64
);
178 DEFINE_COMPARISON_PRED(s32
);
179 DEFINE_COMPARISON_PRED(u32
);
180 DEFINE_COMPARISON_PRED(s16
);
181 DEFINE_COMPARISON_PRED(u16
);
182 DEFINE_COMPARISON_PRED(s8
);
183 DEFINE_COMPARISON_PRED(u8
);
185 DEFINE_EQUALITY_PRED(64);
186 DEFINE_EQUALITY_PRED(32);
187 DEFINE_EQUALITY_PRED(16);
188 DEFINE_EQUALITY_PRED(8);
190 /* Filter predicate for fixed sized arrays of characters */
191 static int filter_pred_string(struct filter_pred
*pred
, void *event
)
193 char *addr
= (char *)(event
+ pred
->offset
);
196 cmp
= pred
->regex
.match(addr
, &pred
->regex
, pred
->regex
.field_len
);
198 match
= cmp
^ pred
->not;
203 /* Filter predicate for char * pointers */
204 static int filter_pred_pchar(struct filter_pred
*pred
, void *event
)
206 char **addr
= (char **)(event
+ pred
->offset
);
208 int len
= strlen(*addr
) + 1; /* including tailing '\0' */
210 cmp
= pred
->regex
.match(*addr
, &pred
->regex
, len
);
212 match
= cmp
^ pred
->not;
218 * Filter predicate for dynamic sized arrays of characters.
219 * These are implemented through a list of strings at the end
221 * Also each of these strings have a field in the entry which
222 * contains its offset from the beginning of the entry.
223 * We have then first to get this field, dereference it
224 * and add it to the address of the entry, and at last we have
225 * the address of the string.
227 static int filter_pred_strloc(struct filter_pred
*pred
, void *event
)
229 u32 str_item
= *(u32
*)(event
+ pred
->offset
);
230 int str_loc
= str_item
& 0xffff;
231 int str_len
= str_item
>> 16;
232 char *addr
= (char *)(event
+ str_loc
);
235 cmp
= pred
->regex
.match(addr
, &pred
->regex
, str_len
);
237 match
= cmp
^ pred
->not;
242 static int filter_pred_none(struct filter_pred
*pred
, void *event
)
248 * regex_match_foo - Basic regex callbacks
250 * @str: the string to be searched
251 * @r: the regex structure containing the pattern string
252 * @len: the length of the string to be searched (including '\0')
255 * - @str might not be NULL-terminated if it's of type DYN_STRING
259 static int regex_match_full(char *str
, struct regex
*r
, int len
)
261 if (strncmp(str
, r
->pattern
, len
) == 0)
266 static int regex_match_front(char *str
, struct regex
*r
, int len
)
268 if (strncmp(str
, r
->pattern
, r
->len
) == 0)
273 static int regex_match_middle(char *str
, struct regex
*r
, int len
)
275 if (strnstr(str
, r
->pattern
, len
))
280 static int regex_match_end(char *str
, struct regex
*r
, int len
)
282 int strlen
= len
- 1;
284 if (strlen
>= r
->len
&&
285 memcmp(str
+ strlen
- r
->len
, r
->pattern
, r
->len
) == 0)
291 * filter_parse_regex - parse a basic regex
292 * @buff: the raw regex
293 * @len: length of the regex
294 * @search: will point to the beginning of the string to compare
295 * @not: tell whether the match will have to be inverted
297 * This passes in a buffer containing a regex and this function will
298 * set search to point to the search part of the buffer and
299 * return the type of search it is (see enum above).
300 * This does modify buff.
303 * search returns the pointer to use for comparison.
304 * not returns 1 if buff started with a '!'
307 enum regex_type
filter_parse_regex(char *buff
, int len
, char **search
, int *not)
309 int type
= MATCH_FULL
;
312 if (buff
[0] == '!') {
321 for (i
= 0; i
< len
; i
++) {
322 if (buff
[i
] == '*') {
325 type
= MATCH_END_ONLY
;
327 if (type
== MATCH_END_ONLY
)
328 type
= MATCH_MIDDLE_ONLY
;
330 type
= MATCH_FRONT_ONLY
;
340 static void filter_build_regex(struct filter_pred
*pred
)
342 struct regex
*r
= &pred
->regex
;
344 enum regex_type type
= MATCH_FULL
;
347 if (pred
->op
== OP_GLOB
) {
348 type
= filter_parse_regex(r
->pattern
, r
->len
, &search
, ¬);
349 r
->len
= strlen(search
);
350 memmove(r
->pattern
, search
, r
->len
+1);
355 r
->match
= regex_match_full
;
357 case MATCH_FRONT_ONLY
:
358 r
->match
= regex_match_front
;
360 case MATCH_MIDDLE_ONLY
:
361 r
->match
= regex_match_middle
;
364 r
->match
= regex_match_end
;
377 static struct filter_pred
*
378 get_pred_parent(struct filter_pred
*pred
, struct filter_pred
*preds
,
379 int index
, enum move_type
*move
)
381 if (pred
->parent
& FILTER_PRED_IS_RIGHT
)
382 *move
= MOVE_UP_FROM_RIGHT
;
384 *move
= MOVE_UP_FROM_LEFT
;
385 pred
= &preds
[pred
->parent
& ~FILTER_PRED_IS_RIGHT
];
396 typedef int (*filter_pred_walkcb_t
) (enum move_type move
,
397 struct filter_pred
*pred
,
398 int *err
, void *data
);
400 static int walk_pred_tree(struct filter_pred
*preds
,
401 struct filter_pred
*root
,
402 filter_pred_walkcb_t cb
, void *data
)
404 struct filter_pred
*pred
= root
;
405 enum move_type move
= MOVE_DOWN
;
414 ret
= cb(move
, pred
, &err
, data
);
415 if (ret
== WALK_PRED_ABORT
)
417 if (ret
== WALK_PRED_PARENT
)
422 if (pred
->left
!= FILTER_PRED_INVALID
) {
423 pred
= &preds
[pred
->left
];
427 case MOVE_UP_FROM_LEFT
:
428 pred
= &preds
[pred
->right
];
431 case MOVE_UP_FROM_RIGHT
:
435 pred
= get_pred_parent(pred
, preds
,
448 * A series of AND or ORs where found together. Instead of
449 * climbing up and down the tree branches, an array of the
450 * ops were made in order of checks. We can just move across
451 * the array and short circuit if needed.
453 static int process_ops(struct filter_pred
*preds
,
454 struct filter_pred
*op
, void *rec
)
456 struct filter_pred
*pred
;
462 * Micro-optimization: We set type to true if op
463 * is an OR and false otherwise (AND). Then we
464 * just need to test if the match is equal to
465 * the type, and if it is, we can short circuit the
466 * rest of the checks:
468 * if ((match && op->op == OP_OR) ||
469 * (!match && op->op == OP_AND))
472 type
= op
->op
== OP_OR
;
474 for (i
= 0; i
< op
->val
; i
++) {
475 pred
= &preds
[op
->ops
[i
]];
476 if (!WARN_ON_ONCE(!pred
->fn
))
477 match
= pred
->fn(pred
, rec
);
484 struct filter_match_preds_data
{
485 struct filter_pred
*preds
;
490 static int filter_match_preds_cb(enum move_type move
, struct filter_pred
*pred
,
491 int *err
, void *data
)
493 struct filter_match_preds_data
*d
= data
;
498 /* only AND and OR have children */
499 if (pred
->left
!= FILTER_PRED_INVALID
) {
500 /* If ops is set, then it was folded. */
502 return WALK_PRED_DEFAULT
;
503 /* We can treat folded ops as a leaf node */
504 d
->match
= process_ops(d
->preds
, pred
, d
->rec
);
506 if (!WARN_ON_ONCE(!pred
->fn
))
507 d
->match
= pred
->fn(pred
, d
->rec
);
510 return WALK_PRED_PARENT
;
511 case MOVE_UP_FROM_LEFT
:
513 * Check for short circuits.
515 * Optimization: !!match == (pred->op == OP_OR)
517 * if ((match && pred->op == OP_OR) ||
518 * (!match && pred->op == OP_AND))
520 if (!!d
->match
== (pred
->op
== OP_OR
))
521 return WALK_PRED_PARENT
;
523 case MOVE_UP_FROM_RIGHT
:
527 return WALK_PRED_DEFAULT
;
530 /* return 1 if event matches, 0 otherwise (discard) */
531 int filter_match_preds(struct event_filter
*filter
, void *rec
)
533 struct filter_pred
*preds
;
534 struct filter_pred
*root
;
535 struct filter_match_preds_data data
= {
536 /* match is currently meaningless */
542 /* no filter is considered a match */
546 n_preds
= filter
->n_preds
;
551 * n_preds, root and filter->preds are protect with preemption disabled.
553 root
= rcu_dereference_sched(filter
->root
);
557 data
.preds
= preds
= rcu_dereference_sched(filter
->preds
);
558 ret
= walk_pred_tree(preds
, root
, filter_match_preds_cb
, &data
);
562 EXPORT_SYMBOL_GPL(filter_match_preds
);
564 static void parse_error(struct filter_parse_state
*ps
, int err
, int pos
)
567 ps
->lasterr_pos
= pos
;
570 static void remove_filter_string(struct event_filter
*filter
)
575 kfree(filter
->filter_string
);
576 filter
->filter_string
= NULL
;
579 static int replace_filter_string(struct event_filter
*filter
,
582 kfree(filter
->filter_string
);
583 filter
->filter_string
= kstrdup(filter_string
, GFP_KERNEL
);
584 if (!filter
->filter_string
)
590 static int append_filter_string(struct event_filter
*filter
,
594 char *new_filter_string
;
596 BUG_ON(!filter
->filter_string
);
597 newlen
= strlen(filter
->filter_string
) + strlen(string
) + 1;
598 new_filter_string
= kmalloc(newlen
, GFP_KERNEL
);
599 if (!new_filter_string
)
602 strcpy(new_filter_string
, filter
->filter_string
);
603 strcat(new_filter_string
, string
);
604 kfree(filter
->filter_string
);
605 filter
->filter_string
= new_filter_string
;
610 static void append_filter_err(struct filter_parse_state
*ps
,
611 struct event_filter
*filter
)
613 int pos
= ps
->lasterr_pos
;
616 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
620 append_filter_string(filter
, "\n");
621 memset(buf
, ' ', PAGE_SIZE
);
622 if (pos
> PAGE_SIZE
- 128)
625 pbuf
= &buf
[pos
] + 1;
627 sprintf(pbuf
, "\nparse_error: %s\n", err_text
[ps
->lasterr
]);
628 append_filter_string(filter
, buf
);
629 free_page((unsigned long) buf
);
632 void print_event_filter(struct ftrace_event_call
*call
, struct trace_seq
*s
)
634 struct event_filter
*filter
;
636 mutex_lock(&event_mutex
);
637 filter
= call
->filter
;
638 if (filter
&& filter
->filter_string
)
639 trace_seq_printf(s
, "%s\n", filter
->filter_string
);
641 trace_seq_printf(s
, "none\n");
642 mutex_unlock(&event_mutex
);
645 void print_subsystem_event_filter(struct event_subsystem
*system
,
648 struct event_filter
*filter
;
650 mutex_lock(&event_mutex
);
651 filter
= system
->filter
;
652 if (filter
&& filter
->filter_string
)
653 trace_seq_printf(s
, "%s\n", filter
->filter_string
);
655 trace_seq_printf(s
, DEFAULT_SYS_FILTER_MESSAGE
"\n");
656 mutex_unlock(&event_mutex
);
659 static struct ftrace_event_field
*
660 __find_event_field(struct list_head
*head
, char *name
)
662 struct ftrace_event_field
*field
;
664 list_for_each_entry(field
, head
, link
) {
665 if (!strcmp(field
->name
, name
))
672 static struct ftrace_event_field
*
673 find_event_field(struct ftrace_event_call
*call
, char *name
)
675 struct ftrace_event_field
*field
;
676 struct list_head
*head
;
678 field
= __find_event_field(&ftrace_common_fields
, name
);
682 head
= trace_get_fields(call
);
683 return __find_event_field(head
, name
);
686 static int __alloc_pred_stack(struct pred_stack
*stack
, int n_preds
)
688 stack
->preds
= kzalloc(sizeof(*stack
->preds
)*(n_preds
+ 1), GFP_KERNEL
);
691 stack
->index
= n_preds
;
695 static void __free_pred_stack(struct pred_stack
*stack
)
701 static int __push_pred_stack(struct pred_stack
*stack
,
702 struct filter_pred
*pred
)
704 int index
= stack
->index
;
706 if (WARN_ON(index
== 0))
709 stack
->preds
[--index
] = pred
;
710 stack
->index
= index
;
714 static struct filter_pred
*
715 __pop_pred_stack(struct pred_stack
*stack
)
717 struct filter_pred
*pred
;
718 int index
= stack
->index
;
720 pred
= stack
->preds
[index
++];
724 stack
->index
= index
;
728 static int filter_set_pred(struct event_filter
*filter
,
730 struct pred_stack
*stack
,
731 struct filter_pred
*src
)
733 struct filter_pred
*dest
= &filter
->preds
[idx
];
734 struct filter_pred
*left
;
735 struct filter_pred
*right
;
740 if (dest
->op
== OP_OR
|| dest
->op
== OP_AND
) {
741 right
= __pop_pred_stack(stack
);
742 left
= __pop_pred_stack(stack
);
746 * If both children can be folded
747 * and they are the same op as this op or a leaf,
748 * then this op can be folded.
750 if (left
->index
& FILTER_PRED_FOLD
&&
751 (left
->op
== dest
->op
||
752 left
->left
== FILTER_PRED_INVALID
) &&
753 right
->index
& FILTER_PRED_FOLD
&&
754 (right
->op
== dest
->op
||
755 right
->left
== FILTER_PRED_INVALID
))
756 dest
->index
|= FILTER_PRED_FOLD
;
758 dest
->left
= left
->index
& ~FILTER_PRED_FOLD
;
759 dest
->right
= right
->index
& ~FILTER_PRED_FOLD
;
760 left
->parent
= dest
->index
& ~FILTER_PRED_FOLD
;
761 right
->parent
= dest
->index
| FILTER_PRED_IS_RIGHT
;
764 * Make dest->left invalid to be used as a quick
765 * way to know this is a leaf node.
767 dest
->left
= FILTER_PRED_INVALID
;
769 /* All leafs allow folding the parent ops. */
770 dest
->index
|= FILTER_PRED_FOLD
;
773 return __push_pred_stack(stack
, dest
);
776 static void __free_preds(struct event_filter
*filter
)
779 kfree(filter
->preds
);
780 filter
->preds
= NULL
;
786 static void filter_disable(struct ftrace_event_call
*call
)
788 call
->flags
&= ~TRACE_EVENT_FL_FILTERED
;
791 static void __free_filter(struct event_filter
*filter
)
796 __free_preds(filter
);
797 kfree(filter
->filter_string
);
802 * Called when destroying the ftrace_event_call.
803 * The call is being freed, so we do not need to worry about
804 * the call being currently used. This is for module code removing
805 * the tracepoints from within it.
807 void destroy_preds(struct ftrace_event_call
*call
)
809 __free_filter(call
->filter
);
813 static struct event_filter
*__alloc_filter(void)
815 struct event_filter
*filter
;
817 filter
= kzalloc(sizeof(*filter
), GFP_KERNEL
);
821 static int __alloc_preds(struct event_filter
*filter
, int n_preds
)
823 struct filter_pred
*pred
;
827 __free_preds(filter
);
830 kzalloc(sizeof(*filter
->preds
) * n_preds
, GFP_KERNEL
);
835 filter
->a_preds
= n_preds
;
838 for (i
= 0; i
< n_preds
; i
++) {
839 pred
= &filter
->preds
[i
];
840 pred
->fn
= filter_pred_none
;
846 static void filter_free_subsystem_preds(struct event_subsystem
*system
)
848 struct ftrace_event_call
*call
;
850 list_for_each_entry(call
, &ftrace_events
, list
) {
851 if (strcmp(call
->class->system
, system
->name
) != 0)
854 filter_disable(call
);
855 remove_filter_string(call
->filter
);
859 static void filter_free_subsystem_filters(struct event_subsystem
*system
)
861 struct ftrace_event_call
*call
;
863 list_for_each_entry(call
, &ftrace_events
, list
) {
864 if (strcmp(call
->class->system
, system
->name
) != 0)
866 __free_filter(call
->filter
);
871 static int filter_add_pred(struct filter_parse_state
*ps
,
872 struct event_filter
*filter
,
873 struct filter_pred
*pred
,
874 struct pred_stack
*stack
)
878 if (WARN_ON(filter
->n_preds
== filter
->a_preds
)) {
879 parse_error(ps
, FILT_ERR_TOO_MANY_PREDS
, 0);
883 err
= filter_set_pred(filter
, filter
->n_preds
, stack
, pred
);
892 int filter_assign_type(const char *type
)
894 if (strstr(type
, "__data_loc") && strstr(type
, "char"))
895 return FILTER_DYN_STRING
;
897 if (strchr(type
, '[') && strstr(type
, "char"))
898 return FILTER_STATIC_STRING
;
903 static bool is_string_field(struct ftrace_event_field
*field
)
905 return field
->filter_type
== FILTER_DYN_STRING
||
906 field
->filter_type
== FILTER_STATIC_STRING
||
907 field
->filter_type
== FILTER_PTR_STRING
;
910 static int is_legal_op(struct ftrace_event_field
*field
, int op
)
912 if (is_string_field(field
) &&
913 (op
!= OP_EQ
&& op
!= OP_NE
&& op
!= OP_GLOB
))
915 if (!is_string_field(field
) && op
== OP_GLOB
)
921 static filter_pred_fn_t
select_comparison_fn(int op
, int field_size
,
924 filter_pred_fn_t fn
= NULL
;
926 switch (field_size
) {
928 if (op
== OP_EQ
|| op
== OP_NE
)
930 else if (field_is_signed
)
931 fn
= filter_pred_s64
;
933 fn
= filter_pred_u64
;
936 if (op
== OP_EQ
|| op
== OP_NE
)
938 else if (field_is_signed
)
939 fn
= filter_pred_s32
;
941 fn
= filter_pred_u32
;
944 if (op
== OP_EQ
|| op
== OP_NE
)
946 else if (field_is_signed
)
947 fn
= filter_pred_s16
;
949 fn
= filter_pred_u16
;
952 if (op
== OP_EQ
|| op
== OP_NE
)
954 else if (field_is_signed
)
964 static int init_pred(struct filter_parse_state
*ps
,
965 struct ftrace_event_field
*field
,
966 struct filter_pred
*pred
)
969 filter_pred_fn_t fn
= filter_pred_none
;
970 unsigned long long val
;
973 pred
->offset
= field
->offset
;
975 if (!is_legal_op(field
, pred
->op
)) {
976 parse_error(ps
, FILT_ERR_ILLEGAL_FIELD_OP
, 0);
980 if (is_string_field(field
)) {
981 filter_build_regex(pred
);
983 if (field
->filter_type
== FILTER_STATIC_STRING
) {
984 fn
= filter_pred_string
;
985 pred
->regex
.field_len
= field
->size
;
986 } else if (field
->filter_type
== FILTER_DYN_STRING
)
987 fn
= filter_pred_strloc
;
989 fn
= filter_pred_pchar
;
991 if (field
->is_signed
)
992 ret
= strict_strtoll(pred
->regex
.pattern
, 0, &val
);
994 ret
= strict_strtoull(pred
->regex
.pattern
, 0, &val
);
996 parse_error(ps
, FILT_ERR_ILLEGAL_INTVAL
, 0);
1001 fn
= select_comparison_fn(pred
->op
, field
->size
,
1004 parse_error(ps
, FILT_ERR_INVALID_OP
, 0);
1009 if (pred
->op
== OP_NE
)
1016 static void parse_init(struct filter_parse_state
*ps
,
1017 struct filter_op
*ops
,
1020 memset(ps
, '\0', sizeof(*ps
));
1022 ps
->infix
.string
= infix_string
;
1023 ps
->infix
.cnt
= strlen(infix_string
);
1026 INIT_LIST_HEAD(&ps
->opstack
);
1027 INIT_LIST_HEAD(&ps
->postfix
);
1030 static char infix_next(struct filter_parse_state
*ps
)
1034 return ps
->infix
.string
[ps
->infix
.tail
++];
1037 static char infix_peek(struct filter_parse_state
*ps
)
1039 if (ps
->infix
.tail
== strlen(ps
->infix
.string
))
1042 return ps
->infix
.string
[ps
->infix
.tail
];
1045 static void infix_advance(struct filter_parse_state
*ps
)
1051 static inline int is_precedence_lower(struct filter_parse_state
*ps
,
1054 return ps
->ops
[a
].precedence
< ps
->ops
[b
].precedence
;
1057 static inline int is_op_char(struct filter_parse_state
*ps
, char c
)
1061 for (i
= 0; strcmp(ps
->ops
[i
].string
, "OP_NONE"); i
++) {
1062 if (ps
->ops
[i
].string
[0] == c
)
1069 static int infix_get_op(struct filter_parse_state
*ps
, char firstc
)
1071 char nextc
= infix_peek(ps
);
1079 for (i
= 0; strcmp(ps
->ops
[i
].string
, "OP_NONE"); i
++) {
1080 if (!strcmp(opstr
, ps
->ops
[i
].string
)) {
1082 return ps
->ops
[i
].id
;
1088 for (i
= 0; strcmp(ps
->ops
[i
].string
, "OP_NONE"); i
++) {
1089 if (!strcmp(opstr
, ps
->ops
[i
].string
))
1090 return ps
->ops
[i
].id
;
1096 static inline void clear_operand_string(struct filter_parse_state
*ps
)
1098 memset(ps
->operand
.string
, '\0', MAX_FILTER_STR_VAL
);
1099 ps
->operand
.tail
= 0;
1102 static inline int append_operand_char(struct filter_parse_state
*ps
, char c
)
1104 if (ps
->operand
.tail
== MAX_FILTER_STR_VAL
- 1)
1107 ps
->operand
.string
[ps
->operand
.tail
++] = c
;
1112 static int filter_opstack_push(struct filter_parse_state
*ps
, int op
)
1114 struct opstack_op
*opstack_op
;
1116 opstack_op
= kmalloc(sizeof(*opstack_op
), GFP_KERNEL
);
1120 opstack_op
->op
= op
;
1121 list_add(&opstack_op
->list
, &ps
->opstack
);
1126 static int filter_opstack_empty(struct filter_parse_state
*ps
)
1128 return list_empty(&ps
->opstack
);
1131 static int filter_opstack_top(struct filter_parse_state
*ps
)
1133 struct opstack_op
*opstack_op
;
1135 if (filter_opstack_empty(ps
))
1138 opstack_op
= list_first_entry(&ps
->opstack
, struct opstack_op
, list
);
1140 return opstack_op
->op
;
1143 static int filter_opstack_pop(struct filter_parse_state
*ps
)
1145 struct opstack_op
*opstack_op
;
1148 if (filter_opstack_empty(ps
))
1151 opstack_op
= list_first_entry(&ps
->opstack
, struct opstack_op
, list
);
1152 op
= opstack_op
->op
;
1153 list_del(&opstack_op
->list
);
1160 static void filter_opstack_clear(struct filter_parse_state
*ps
)
1162 while (!filter_opstack_empty(ps
))
1163 filter_opstack_pop(ps
);
1166 static char *curr_operand(struct filter_parse_state
*ps
)
1168 return ps
->operand
.string
;
1171 static int postfix_append_operand(struct filter_parse_state
*ps
, char *operand
)
1173 struct postfix_elt
*elt
;
1175 elt
= kmalloc(sizeof(*elt
), GFP_KERNEL
);
1180 elt
->operand
= kstrdup(operand
, GFP_KERNEL
);
1181 if (!elt
->operand
) {
1186 list_add_tail(&elt
->list
, &ps
->postfix
);
1191 static int postfix_append_op(struct filter_parse_state
*ps
, int op
)
1193 struct postfix_elt
*elt
;
1195 elt
= kmalloc(sizeof(*elt
), GFP_KERNEL
);
1200 elt
->operand
= NULL
;
1202 list_add_tail(&elt
->list
, &ps
->postfix
);
1207 static void postfix_clear(struct filter_parse_state
*ps
)
1209 struct postfix_elt
*elt
;
1211 while (!list_empty(&ps
->postfix
)) {
1212 elt
= list_first_entry(&ps
->postfix
, struct postfix_elt
, list
);
1213 list_del(&elt
->list
);
1214 kfree(elt
->operand
);
1219 static int filter_parse(struct filter_parse_state
*ps
)
1225 while ((ch
= infix_next(ps
))) {
1237 if (is_op_char(ps
, ch
)) {
1238 op
= infix_get_op(ps
, ch
);
1239 if (op
== OP_NONE
) {
1240 parse_error(ps
, FILT_ERR_INVALID_OP
, 0);
1244 if (strlen(curr_operand(ps
))) {
1245 postfix_append_operand(ps
, curr_operand(ps
));
1246 clear_operand_string(ps
);
1249 while (!filter_opstack_empty(ps
)) {
1250 top_op
= filter_opstack_top(ps
);
1251 if (!is_precedence_lower(ps
, top_op
, op
)) {
1252 top_op
= filter_opstack_pop(ps
);
1253 postfix_append_op(ps
, top_op
);
1259 filter_opstack_push(ps
, op
);
1264 filter_opstack_push(ps
, OP_OPEN_PAREN
);
1269 if (strlen(curr_operand(ps
))) {
1270 postfix_append_operand(ps
, curr_operand(ps
));
1271 clear_operand_string(ps
);
1274 top_op
= filter_opstack_pop(ps
);
1275 while (top_op
!= OP_NONE
) {
1276 if (top_op
== OP_OPEN_PAREN
)
1278 postfix_append_op(ps
, top_op
);
1279 top_op
= filter_opstack_pop(ps
);
1281 if (top_op
== OP_NONE
) {
1282 parse_error(ps
, FILT_ERR_UNBALANCED_PAREN
, 0);
1288 if (append_operand_char(ps
, ch
)) {
1289 parse_error(ps
, FILT_ERR_OPERAND_TOO_LONG
, 0);
1294 if (strlen(curr_operand(ps
)))
1295 postfix_append_operand(ps
, curr_operand(ps
));
1297 while (!filter_opstack_empty(ps
)) {
1298 top_op
= filter_opstack_pop(ps
);
1299 if (top_op
== OP_NONE
)
1301 if (top_op
== OP_OPEN_PAREN
) {
1302 parse_error(ps
, FILT_ERR_UNBALANCED_PAREN
, 0);
1305 postfix_append_op(ps
, top_op
);
1311 static struct filter_pred
*create_pred(struct filter_parse_state
*ps
,
1312 struct ftrace_event_call
*call
,
1313 int op
, char *operand1
, char *operand2
)
1315 struct ftrace_event_field
*field
;
1316 static struct filter_pred pred
;
1318 memset(&pred
, 0, sizeof(pred
));
1321 if (op
== OP_AND
|| op
== OP_OR
)
1324 if (!operand1
|| !operand2
) {
1325 parse_error(ps
, FILT_ERR_MISSING_FIELD
, 0);
1329 field
= find_event_field(call
, operand1
);
1331 parse_error(ps
, FILT_ERR_FIELD_NOT_FOUND
, 0);
1335 strcpy(pred
.regex
.pattern
, operand2
);
1336 pred
.regex
.len
= strlen(pred
.regex
.pattern
);
1338 #ifdef CONFIG_FTRACE_STARTUP_TEST
1341 return init_pred(ps
, field
, &pred
) ? NULL
: &pred
;
1344 static int check_preds(struct filter_parse_state
*ps
)
1346 int n_normal_preds
= 0, n_logical_preds
= 0;
1347 struct postfix_elt
*elt
;
1349 list_for_each_entry(elt
, &ps
->postfix
, list
) {
1350 if (elt
->op
== OP_NONE
)
1353 if (elt
->op
== OP_AND
|| elt
->op
== OP_OR
) {
1360 if (!n_normal_preds
|| n_logical_preds
>= n_normal_preds
) {
1361 parse_error(ps
, FILT_ERR_INVALID_FILTER
, 0);
1368 static int count_preds(struct filter_parse_state
*ps
)
1370 struct postfix_elt
*elt
;
1373 list_for_each_entry(elt
, &ps
->postfix
, list
) {
1374 if (elt
->op
== OP_NONE
)
1382 struct check_pred_data
{
1387 static int check_pred_tree_cb(enum move_type move
, struct filter_pred
*pred
,
1388 int *err
, void *data
)
1390 struct check_pred_data
*d
= data
;
1392 if (WARN_ON(d
->count
++ > d
->max
)) {
1394 return WALK_PRED_ABORT
;
1396 return WALK_PRED_DEFAULT
;
1400 * The tree is walked at filtering of an event. If the tree is not correctly
1401 * built, it may cause an infinite loop. Check here that the tree does
1404 static int check_pred_tree(struct event_filter
*filter
,
1405 struct filter_pred
*root
)
1407 struct check_pred_data data
= {
1409 * The max that we can hit a node is three times.
1410 * Once going down, once coming up from left, and
1411 * once coming up from right. This is more than enough
1412 * since leafs are only hit a single time.
1414 .max
= 3 * filter
->n_preds
,
1418 return walk_pred_tree(filter
->preds
, root
,
1419 check_pred_tree_cb
, &data
);
1422 static int count_leafs_cb(enum move_type move
, struct filter_pred
*pred
,
1423 int *err
, void *data
)
1427 if ((move
== MOVE_DOWN
) &&
1428 (pred
->left
== FILTER_PRED_INVALID
))
1431 return WALK_PRED_DEFAULT
;
1434 static int count_leafs(struct filter_pred
*preds
, struct filter_pred
*root
)
1438 ret
= walk_pred_tree(preds
, root
, count_leafs_cb
, &count
);
1443 struct fold_pred_data
{
1444 struct filter_pred
*root
;
1449 static int fold_pred_cb(enum move_type move
, struct filter_pred
*pred
,
1450 int *err
, void *data
)
1452 struct fold_pred_data
*d
= data
;
1453 struct filter_pred
*root
= d
->root
;
1455 if (move
!= MOVE_DOWN
)
1456 return WALK_PRED_DEFAULT
;
1457 if (pred
->left
!= FILTER_PRED_INVALID
)
1458 return WALK_PRED_DEFAULT
;
1460 if (WARN_ON(d
->count
== d
->children
)) {
1462 return WALK_PRED_ABORT
;
1465 pred
->index
&= ~FILTER_PRED_FOLD
;
1466 root
->ops
[d
->count
++] = pred
->index
;
1467 return WALK_PRED_DEFAULT
;
1470 static int fold_pred(struct filter_pred
*preds
, struct filter_pred
*root
)
1472 struct fold_pred_data data
= {
1478 /* No need to keep the fold flag */
1479 root
->index
&= ~FILTER_PRED_FOLD
;
1481 /* If the root is a leaf then do nothing */
1482 if (root
->left
== FILTER_PRED_INVALID
)
1485 /* count the children */
1486 children
= count_leafs(preds
, &preds
[root
->left
]);
1487 children
+= count_leafs(preds
, &preds
[root
->right
]);
1489 root
->ops
= kzalloc(sizeof(*root
->ops
) * children
, GFP_KERNEL
);
1493 root
->val
= children
;
1494 data
.children
= children
;
1495 return walk_pred_tree(preds
, root
, fold_pred_cb
, &data
);
1498 static int fold_pred_tree_cb(enum move_type move
, struct filter_pred
*pred
,
1499 int *err
, void *data
)
1501 struct filter_pred
*preds
= data
;
1503 if (move
!= MOVE_DOWN
)
1504 return WALK_PRED_DEFAULT
;
1505 if (!(pred
->index
& FILTER_PRED_FOLD
))
1506 return WALK_PRED_DEFAULT
;
1508 *err
= fold_pred(preds
, pred
);
1510 return WALK_PRED_ABORT
;
1512 /* eveyrhing below is folded, continue with parent */
1513 return WALK_PRED_PARENT
;
1517 * To optimize the processing of the ops, if we have several "ors" or
1518 * "ands" together, we can put them in an array and process them all
1519 * together speeding up the filter logic.
1521 static int fold_pred_tree(struct event_filter
*filter
,
1522 struct filter_pred
*root
)
1524 return walk_pred_tree(filter
->preds
, root
, fold_pred_tree_cb
,
1528 static int replace_preds(struct ftrace_event_call
*call
,
1529 struct event_filter
*filter
,
1530 struct filter_parse_state
*ps
,
1531 char *filter_string
,
1534 char *operand1
= NULL
, *operand2
= NULL
;
1535 struct filter_pred
*pred
;
1536 struct filter_pred
*root
;
1537 struct postfix_elt
*elt
;
1538 struct pred_stack stack
= { }; /* init to NULL */
1542 n_preds
= count_preds(ps
);
1543 if (n_preds
>= MAX_FILTER_PRED
) {
1544 parse_error(ps
, FILT_ERR_TOO_MANY_PREDS
, 0);
1548 err
= check_preds(ps
);
1553 err
= __alloc_pred_stack(&stack
, n_preds
);
1556 err
= __alloc_preds(filter
, n_preds
);
1562 list_for_each_entry(elt
, &ps
->postfix
, list
) {
1563 if (elt
->op
== OP_NONE
) {
1565 operand1
= elt
->operand
;
1567 operand2
= elt
->operand
;
1569 parse_error(ps
, FILT_ERR_TOO_MANY_OPERANDS
, 0);
1576 if (WARN_ON(n_preds
++ == MAX_FILTER_PRED
)) {
1577 parse_error(ps
, FILT_ERR_TOO_MANY_PREDS
, 0);
1582 pred
= create_pred(ps
, call
, elt
->op
, operand1
, operand2
);
1589 err
= filter_add_pred(ps
, filter
, pred
, &stack
);
1594 operand1
= operand2
= NULL
;
1598 /* We should have one item left on the stack */
1599 pred
= __pop_pred_stack(&stack
);
1602 /* This item is where we start from in matching */
1604 /* Make sure the stack is empty */
1605 pred
= __pop_pred_stack(&stack
);
1606 if (WARN_ON(pred
)) {
1608 filter
->root
= NULL
;
1611 err
= check_pred_tree(filter
, root
);
1615 /* Optimize the tree */
1616 err
= fold_pred_tree(filter
, root
);
1620 /* We don't set root until we know it works */
1622 filter
->root
= root
;
1627 __free_pred_stack(&stack
);
1631 struct filter_list
{
1632 struct list_head list
;
1633 struct event_filter
*filter
;
1636 static int replace_system_preds(struct event_subsystem
*system
,
1637 struct filter_parse_state
*ps
,
1638 char *filter_string
)
1640 struct ftrace_event_call
*call
;
1641 struct filter_list
*filter_item
;
1642 struct filter_list
*tmp
;
1643 LIST_HEAD(filter_list
);
1647 list_for_each_entry(call
, &ftrace_events
, list
) {
1649 if (strcmp(call
->class->system
, system
->name
) != 0)
1653 * Try to see if the filter can be applied
1654 * (filter arg is ignored on dry_run)
1656 err
= replace_preds(call
, NULL
, ps
, filter_string
, true);
1658 call
->flags
|= TRACE_EVENT_FL_NO_SET_FILTER
;
1660 call
->flags
&= ~TRACE_EVENT_FL_NO_SET_FILTER
;
1663 list_for_each_entry(call
, &ftrace_events
, list
) {
1664 struct event_filter
*filter
;
1666 if (strcmp(call
->class->system
, system
->name
) != 0)
1669 if (call
->flags
& TRACE_EVENT_FL_NO_SET_FILTER
)
1672 filter_item
= kzalloc(sizeof(*filter_item
), GFP_KERNEL
);
1676 list_add_tail(&filter_item
->list
, &filter_list
);
1678 filter_item
->filter
= __alloc_filter();
1679 if (!filter_item
->filter
)
1681 filter
= filter_item
->filter
;
1683 /* Can only fail on no memory */
1684 err
= replace_filter_string(filter
, filter_string
);
1688 err
= replace_preds(call
, filter
, ps
, filter_string
, false);
1690 filter_disable(call
);
1691 parse_error(ps
, FILT_ERR_BAD_SUBSYS_FILTER
, 0);
1692 append_filter_err(ps
, filter
);
1694 call
->flags
|= TRACE_EVENT_FL_FILTERED
;
1696 * Regardless of if this returned an error, we still
1697 * replace the filter for the call.
1699 filter
= call
->filter
;
1700 rcu_assign_pointer(call
->filter
, filter_item
->filter
);
1701 filter_item
->filter
= filter
;
1710 * The calls can still be using the old filters.
1711 * Do a synchronize_sched() to ensure all calls are
1712 * done with them before we free them.
1714 synchronize_sched();
1715 list_for_each_entry_safe(filter_item
, tmp
, &filter_list
, list
) {
1716 __free_filter(filter_item
->filter
);
1717 list_del(&filter_item
->list
);
1722 /* No call succeeded */
1723 list_for_each_entry_safe(filter_item
, tmp
, &filter_list
, list
) {
1724 list_del(&filter_item
->list
);
1727 parse_error(ps
, FILT_ERR_BAD_SUBSYS_FILTER
, 0);
1730 /* If any call succeeded, we still need to sync */
1732 synchronize_sched();
1733 list_for_each_entry_safe(filter_item
, tmp
, &filter_list
, list
) {
1734 __free_filter(filter_item
->filter
);
1735 list_del(&filter_item
->list
);
1741 static int create_filter_start(char *filter_str
, bool set_str
,
1742 struct filter_parse_state
**psp
,
1743 struct event_filter
**filterp
)
1745 struct event_filter
*filter
;
1746 struct filter_parse_state
*ps
= NULL
;
1749 WARN_ON_ONCE(*psp
|| *filterp
);
1751 /* allocate everything, and if any fails, free all and fail */
1752 filter
= __alloc_filter();
1753 if (filter
&& set_str
)
1754 err
= replace_filter_string(filter
, filter_str
);
1756 ps
= kzalloc(sizeof(*ps
), GFP_KERNEL
);
1758 if (!filter
|| !ps
|| err
) {
1760 __free_filter(filter
);
1764 /* we're committed to creating a new filter */
1768 parse_init(ps
, filter_ops
, filter_str
);
1769 err
= filter_parse(ps
);
1771 append_filter_err(ps
, filter
);
1775 static void create_filter_finish(struct filter_parse_state
*ps
)
1778 filter_opstack_clear(ps
);
1785 * create_filter - create a filter for a ftrace_event_call
1786 * @call: ftrace_event_call to create a filter for
1787 * @filter_str: filter string
1788 * @set_str: remember @filter_str and enable detailed error in filter
1789 * @filterp: out param for created filter (always updated on return)
1791 * Creates a filter for @call with @filter_str. If @set_str is %true,
1792 * @filter_str is copied and recorded in the new filter.
1794 * On success, returns 0 and *@filterp points to the new filter. On
1795 * failure, returns -errno and *@filterp may point to %NULL or to a new
1796 * filter. In the latter case, the returned filter contains error
1797 * information if @set_str is %true and the caller is responsible for
1800 static int create_filter(struct ftrace_event_call
*call
,
1801 char *filter_str
, bool set_str
,
1802 struct event_filter
**filterp
)
1804 struct event_filter
*filter
= NULL
;
1805 struct filter_parse_state
*ps
= NULL
;
1808 err
= create_filter_start(filter_str
, set_str
, &ps
, &filter
);
1810 err
= replace_preds(call
, filter
, ps
, filter_str
, false);
1812 append_filter_err(ps
, filter
);
1814 create_filter_finish(ps
);
1821 * create_system_filter - create a filter for an event_subsystem
1822 * @system: event_subsystem to create a filter for
1823 * @filter_str: filter string
1824 * @filterp: out param for created filter (always updated on return)
1826 * Identical to create_filter() except that it creates a subsystem filter
1827 * and always remembers @filter_str.
1829 static int create_system_filter(struct event_subsystem
*system
,
1830 char *filter_str
, struct event_filter
**filterp
)
1832 struct event_filter
*filter
= NULL
;
1833 struct filter_parse_state
*ps
= NULL
;
1836 err
= create_filter_start(filter_str
, true, &ps
, &filter
);
1838 err
= replace_system_preds(system
, ps
, filter_str
);
1840 /* System filters just show a default message */
1841 kfree(filter
->filter_string
);
1842 filter
->filter_string
= NULL
;
1844 append_filter_err(ps
, filter
);
1847 create_filter_finish(ps
);
1853 int apply_event_filter(struct ftrace_event_call
*call
, char *filter_string
)
1855 struct event_filter
*filter
;
1858 mutex_lock(&event_mutex
);
1860 if (!strcmp(strstrip(filter_string
), "0")) {
1861 filter_disable(call
);
1862 filter
= call
->filter
;
1865 RCU_INIT_POINTER(call
->filter
, NULL
);
1866 /* Make sure the filter is not being used */
1867 synchronize_sched();
1868 __free_filter(filter
);
1872 err
= create_filter(call
, filter_string
, true, &filter
);
1875 * Always swap the call filter with the new filter
1876 * even if there was an error. If there was an error
1877 * in the filter, we disable the filter and show the error
1881 struct event_filter
*tmp
= call
->filter
;
1884 call
->flags
|= TRACE_EVENT_FL_FILTERED
;
1886 filter_disable(call
);
1888 rcu_assign_pointer(call
->filter
, filter
);
1891 /* Make sure the call is done with the filter */
1892 synchronize_sched();
1897 mutex_unlock(&event_mutex
);
1902 int apply_subsystem_event_filter(struct event_subsystem
*system
,
1903 char *filter_string
)
1905 struct event_filter
*filter
;
1908 mutex_lock(&event_mutex
);
1910 /* Make sure the system still has events */
1911 if (!system
->nr_events
) {
1916 if (!strcmp(strstrip(filter_string
), "0")) {
1917 filter_free_subsystem_preds(system
);
1918 remove_filter_string(system
->filter
);
1919 filter
= system
->filter
;
1920 system
->filter
= NULL
;
1921 /* Ensure all filters are no longer used */
1922 synchronize_sched();
1923 filter_free_subsystem_filters(system
);
1924 __free_filter(filter
);
1928 err
= create_system_filter(system
, filter_string
, &filter
);
1931 * No event actually uses the system filter
1932 * we can free it without synchronize_sched().
1934 __free_filter(system
->filter
);
1935 system
->filter
= filter
;
1938 mutex_unlock(&event_mutex
);
1943 #ifdef CONFIG_PERF_EVENTS
1945 void ftrace_profile_free_filter(struct perf_event
*event
)
1947 struct event_filter
*filter
= event
->filter
;
1949 event
->filter
= NULL
;
1950 __free_filter(filter
);
1953 int ftrace_profile_set_filter(struct perf_event
*event
, int event_id
,
1957 struct event_filter
*filter
;
1958 struct ftrace_event_call
*call
;
1960 mutex_lock(&event_mutex
);
1962 call
= event
->tp_event
;
1972 err
= create_filter(call
, filter_str
, false, &filter
);
1974 event
->filter
= filter
;
1976 __free_filter(filter
);
1979 mutex_unlock(&event_mutex
);
1984 #endif /* CONFIG_PERF_EVENTS */
1986 #ifdef CONFIG_FTRACE_STARTUP_TEST
1988 #include <linux/types.h>
1989 #include <linux/tracepoint.h>
1991 #define CREATE_TRACE_POINTS
1992 #include "trace_events_filter_test.h"
1994 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
1997 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
1998 .e = ve, .f = vf, .g = vg, .h = vh }, \
2000 .not_visited = nvisit, \
2005 static struct test_filter_data_t
{
2007 struct ftrace_raw_ftrace_test_filter rec
;
2010 } test_filter_data
[] = {
2011 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2012 "e == 1 && f == 1 && g == 1 && h == 1"
2013 DATA_REC(YES
, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2014 DATA_REC(NO
, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2015 DATA_REC(NO
, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2017 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2018 "e == 1 || f == 1 || g == 1 || h == 1"
2019 DATA_REC(NO
, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2020 DATA_REC(YES
, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2021 DATA_REC(YES
, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2023 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2024 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2025 DATA_REC(NO
, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2026 DATA_REC(YES
, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2027 DATA_REC(YES
, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2028 DATA_REC(NO
, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2030 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2031 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2032 DATA_REC(YES
, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2033 DATA_REC(YES
, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2034 DATA_REC(NO
, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2036 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2037 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2038 DATA_REC(YES
, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2039 DATA_REC(NO
, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2040 DATA_REC(YES
, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2042 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2043 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2044 DATA_REC(YES
, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2045 DATA_REC(NO
, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2046 DATA_REC(YES
, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2048 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2049 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2050 DATA_REC(YES
, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2051 DATA_REC(NO
, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2052 DATA_REC(NO
, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2054 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2055 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2056 DATA_REC(YES
, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2057 DATA_REC(YES
, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2058 DATA_REC(YES
, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2066 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2068 static int test_pred_visited
;
2070 static int test_pred_visited_fn(struct filter_pred
*pred
, void *event
)
2072 struct ftrace_event_field
*field
= pred
->field
;
2074 test_pred_visited
= 1;
2075 printk(KERN_INFO
"\npred visited %s\n", field
->name
);
2079 static int test_walk_pred_cb(enum move_type move
, struct filter_pred
*pred
,
2080 int *err
, void *data
)
2082 char *fields
= data
;
2084 if ((move
== MOVE_DOWN
) &&
2085 (pred
->left
== FILTER_PRED_INVALID
)) {
2086 struct ftrace_event_field
*field
= pred
->field
;
2089 WARN(1, "all leafs should have field defined");
2090 return WALK_PRED_DEFAULT
;
2092 if (!strchr(fields
, *field
->name
))
2093 return WALK_PRED_DEFAULT
;
2096 pred
->fn
= test_pred_visited_fn
;
2098 return WALK_PRED_DEFAULT
;
2101 static __init
int ftrace_test_event_filter(void)
2105 printk(KERN_INFO
"Testing ftrace filter: ");
2107 for (i
= 0; i
< DATA_CNT
; i
++) {
2108 struct event_filter
*filter
= NULL
;
2109 struct test_filter_data_t
*d
= &test_filter_data
[i
];
2112 err
= create_filter(&event_ftrace_test_filter
, d
->filter
,
2116 "Failed to get filter for '%s', err %d\n",
2118 __free_filter(filter
);
2123 * The preemption disabling is not really needed for self
2124 * tests, but the rcu dereference will complain without it.
2127 if (*d
->not_visited
)
2128 walk_pred_tree(filter
->preds
, filter
->root
,
2132 test_pred_visited
= 0;
2133 err
= filter_match_preds(filter
, &d
->rec
);
2136 __free_filter(filter
);
2138 if (test_pred_visited
) {
2140 "Failed, unwanted pred visited for filter %s\n",
2145 if (err
!= d
->match
) {
2147 "Failed to match filter '%s', expected %d\n",
2148 d
->filter
, d
->match
);
2154 printk(KERN_CONT
"OK\n");
2159 late_initcall(ftrace_test_event_filter
);
2161 #endif /* CONFIG_FTRACE_STARTUP_TEST */