4 #include "parse-options.h"
5 #include "parse-events.h"
10 extern char *strcasestr(const char *haystack
, const char *needle
);
14 struct perf_counter_attr attrs
[MAX_COUNTERS
];
23 char debugfs_path
[MAXPATHLEN
];
25 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
26 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
28 static struct event_symbol event_symbols
[] = {
29 { CHW(CPU_CYCLES
), "cpu-cycles", "cycles" },
30 { CHW(INSTRUCTIONS
), "instructions", "" },
31 { CHW(CACHE_REFERENCES
), "cache-references", "" },
32 { CHW(CACHE_MISSES
), "cache-misses", "" },
33 { CHW(BRANCH_INSTRUCTIONS
), "branch-instructions", "branches" },
34 { CHW(BRANCH_MISSES
), "branch-misses", "" },
35 { CHW(BUS_CYCLES
), "bus-cycles", "" },
37 { CSW(CPU_CLOCK
), "cpu-clock", "" },
38 { CSW(TASK_CLOCK
), "task-clock", "" },
39 { CSW(PAGE_FAULTS
), "page-faults", "faults" },
40 { CSW(PAGE_FAULTS_MIN
), "minor-faults", "" },
41 { CSW(PAGE_FAULTS_MAJ
), "major-faults", "" },
42 { CSW(CONTEXT_SWITCHES
), "context-switches", "cs" },
43 { CSW(CPU_MIGRATIONS
), "cpu-migrations", "migrations" },
46 #define __PERF_COUNTER_FIELD(config, name) \
47 ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
49 #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
50 #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
51 #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
52 #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
54 static char *hw_event_names
[] = {
64 static char *sw_event_names
[] = {
76 static char *hw_cache
[][MAX_ALIASES
] = {
77 { "L1-dcache", "l1-d", "l1d", "L1-data", },
78 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
80 { "dTLB", "d-tlb", "Data-TLB", },
81 { "iTLB", "i-tlb", "Instruction-TLB", },
82 { "branch", "branches", "bpu", "btb", "bpc", },
85 static char *hw_cache_op
[][MAX_ALIASES
] = {
86 { "load", "loads", "read", },
87 { "store", "stores", "write", },
88 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
91 static char *hw_cache_result
[][MAX_ALIASES
] = {
92 { "refs", "Reference", "ops", "access", },
93 { "misses", "miss", },
96 #define C(x) PERF_COUNT_HW_CACHE_##x
97 #define CACHE_READ (1 << C(OP_READ))
98 #define CACHE_WRITE (1 << C(OP_WRITE))
99 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
100 #define COP(x) (1 << x)
103 * cache operartion stat
104 * L1I : Read and prefetch only
105 * ITLB and BPU : Read-only
107 static unsigned long hw_cache_stat
[C(MAX
)] = {
108 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
109 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
110 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
111 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
112 [C(ITLB
)] = (CACHE_READ
),
113 [C(BPU
)] = (CACHE_READ
),
116 #define for_each_subsystem(sys_dir, sys_dirent, sys_next, file, st) \
117 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
118 if (snprintf(file, MAXPATHLEN, "%s/%s", debugfs_path, \
119 sys_dirent.d_name) && \
120 (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
121 (strcmp(sys_dirent.d_name, ".")) && \
122 (strcmp(sys_dirent.d_name, "..")))
124 static int tp_event_has_id(struct dirent
*sys_dir
, struct dirent
*evt_dir
)
126 char evt_path
[MAXPATHLEN
];
129 snprintf(evt_path
, MAXPATHLEN
, "%s/%s/%s/id", debugfs_path
,
130 sys_dir
->d_name
, evt_dir
->d_name
);
131 fd
= open(evt_path
, O_RDONLY
);
139 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, file, st) \
140 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
141 if (snprintf(file, MAXPATHLEN, "%s/%s/%s", debugfs_path, \
142 sys_dirent.d_name, evt_dirent.d_name) && \
143 (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
144 (strcmp(evt_dirent.d_name, ".")) && \
145 (strcmp(evt_dirent.d_name, "..")) && \
146 (!tp_event_has_id(&sys_dirent, &evt_dirent)))
148 #define MAX_EVENT_LENGTH 30
150 int valid_debugfs_mount(const char *debugfs
)
154 if (statfs(debugfs
, &st_fs
) < 0)
156 else if (st_fs
.f_type
!= (long) DEBUGFS_MAGIC
)
161 static char *tracepoint_id_to_name(u64 config
)
163 static char tracepoint_name
[2 * MAX_EVENT_LENGTH
];
164 DIR *sys_dir
, *evt_dir
;
165 struct dirent
*sys_next
, *evt_next
, sys_dirent
, evt_dirent
;
170 char evt_path
[MAXPATHLEN
];
172 if (valid_debugfs_mount(debugfs_path
))
175 sys_dir
= opendir(debugfs_path
);
179 for_each_subsystem(sys_dir
, sys_dirent
, sys_next
, evt_path
, st
) {
180 evt_dir
= opendir(evt_path
);
183 for_each_event(sys_dirent
, evt_dir
, evt_dirent
, evt_next
,
185 snprintf(evt_path
, MAXPATHLEN
, "%s/%s/%s/id",
186 debugfs_path
, sys_dirent
.d_name
,
188 fd
= open(evt_path
, O_RDONLY
);
191 if (read(fd
, id_buf
, sizeof(id_buf
)) < 0) {
200 snprintf(tracepoint_name
, 2 * MAX_EVENT_LENGTH
,
201 "%s:%s", sys_dirent
.d_name
,
203 return tracepoint_name
;
214 static int is_cache_op_valid(u8 cache_type
, u8 cache_op
)
216 if (hw_cache_stat
[cache_type
] & COP(cache_op
))
217 return 1; /* valid */
219 return 0; /* invalid */
222 static char *event_cache_name(u8 cache_type
, u8 cache_op
, u8 cache_result
)
224 static char name
[50];
227 sprintf(name
, "%s-%s-%s", hw_cache
[cache_type
][0],
228 hw_cache_op
[cache_op
][0],
229 hw_cache_result
[cache_result
][0]);
231 sprintf(name
, "%s-%s", hw_cache
[cache_type
][0],
232 hw_cache_op
[cache_op
][1]);
238 char *event_name(int counter
)
240 u64 config
= attrs
[counter
].config
;
241 int type
= attrs
[counter
].type
;
243 return __event_name(type
, config
);
246 char *__event_name(int type
, u64 config
)
250 if (type
== PERF_TYPE_RAW
) {
251 sprintf(buf
, "raw 0x%llx", config
);
256 case PERF_TYPE_HARDWARE
:
257 if (config
< PERF_COUNT_HW_MAX
)
258 return hw_event_names
[config
];
259 return "unknown-hardware";
261 case PERF_TYPE_HW_CACHE
: {
262 u8 cache_type
, cache_op
, cache_result
;
264 cache_type
= (config
>> 0) & 0xff;
265 if (cache_type
> PERF_COUNT_HW_CACHE_MAX
)
266 return "unknown-ext-hardware-cache-type";
268 cache_op
= (config
>> 8) & 0xff;
269 if (cache_op
> PERF_COUNT_HW_CACHE_OP_MAX
)
270 return "unknown-ext-hardware-cache-op";
272 cache_result
= (config
>> 16) & 0xff;
273 if (cache_result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
274 return "unknown-ext-hardware-cache-result";
276 if (!is_cache_op_valid(cache_type
, cache_op
))
277 return "invalid-cache";
279 return event_cache_name(cache_type
, cache_op
, cache_result
);
282 case PERF_TYPE_SOFTWARE
:
283 if (config
< PERF_COUNT_SW_MAX
)
284 return sw_event_names
[config
];
285 return "unknown-software";
287 case PERF_TYPE_TRACEPOINT
:
288 return tracepoint_id_to_name(config
);
297 static int parse_aliases(const char **str
, char *names
[][MAX_ALIASES
], int size
)
302 for (i
= 0; i
< size
; i
++) {
303 for (j
= 0; j
< MAX_ALIASES
&& names
[i
][j
]; j
++) {
304 n
= strlen(names
[i
][j
]);
305 if (n
> longest
&& !strncasecmp(*str
, names
[i
][j
], n
))
318 parse_generic_hw_event(const char **str
, struct perf_counter_attr
*attr
)
320 const char *s
= *str
;
321 int cache_type
= -1, cache_op
= -1, cache_result
= -1;
323 cache_type
= parse_aliases(&s
, hw_cache
, PERF_COUNT_HW_CACHE_MAX
);
325 * No fallback - if we cannot get a clear cache type
328 if (cache_type
== -1)
331 while ((cache_op
== -1 || cache_result
== -1) && *s
== '-') {
334 if (cache_op
== -1) {
335 cache_op
= parse_aliases(&s
, hw_cache_op
,
336 PERF_COUNT_HW_CACHE_OP_MAX
);
338 if (!is_cache_op_valid(cache_type
, cache_op
))
344 if (cache_result
== -1) {
345 cache_result
= parse_aliases(&s
, hw_cache_result
,
346 PERF_COUNT_HW_CACHE_RESULT_MAX
);
347 if (cache_result
>= 0)
352 * Can't parse this as a cache op or result, so back up
360 * Fall back to reads:
363 cache_op
= PERF_COUNT_HW_CACHE_OP_READ
;
366 * Fall back to accesses:
368 if (cache_result
== -1)
369 cache_result
= PERF_COUNT_HW_CACHE_RESULT_ACCESS
;
371 attr
->config
= cache_type
| (cache_op
<< 8) | (cache_result
<< 16);
372 attr
->type
= PERF_TYPE_HW_CACHE
;
378 static int parse_tracepoint_event(const char **strp
,
379 struct perf_counter_attr
*attr
)
381 const char *evt_name
;
383 char sys_name
[MAX_EVENT_LENGTH
];
386 unsigned int sys_length
, evt_length
;
388 char evt_path
[MAXPATHLEN
];
390 if (valid_debugfs_mount(debugfs_path
))
393 evt_name
= strchr(*strp
, ':');
397 sys_length
= evt_name
- *strp
;
398 if (sys_length
>= MAX_EVENT_LENGTH
)
401 strncpy(sys_name
, *strp
, sys_length
);
402 sys_name
[sys_length
] = '\0';
403 evt_name
= evt_name
+ 1;
405 flags
= strchr(evt_name
, ':');
409 if (!strncmp(flags
, "record", strlen(flags
)))
410 attr
->sample_type
|= PERF_SAMPLE_RAW
;
413 evt_length
= strlen(evt_name
);
414 if (evt_length
>= MAX_EVENT_LENGTH
)
417 snprintf(evt_path
, MAXPATHLEN
, "%s/%s/%s/id", debugfs_path
,
419 fd
= open(evt_path
, O_RDONLY
);
423 if (read(fd
, id_buf
, sizeof(id_buf
)) < 0) {
430 attr
->type
= PERF_TYPE_TRACEPOINT
;
431 *strp
= evt_name
+ evt_length
;
435 static int check_events(const char *str
, unsigned int i
)
439 n
= strlen(event_symbols
[i
].symbol
);
440 if (!strncmp(str
, event_symbols
[i
].symbol
, n
))
443 n
= strlen(event_symbols
[i
].alias
);
445 if (!strncmp(str
, event_symbols
[i
].alias
, n
))
451 parse_symbolic_event(const char **strp
, struct perf_counter_attr
*attr
)
453 const char *str
= *strp
;
457 for (i
= 0; i
< ARRAY_SIZE(event_symbols
); i
++) {
458 n
= check_events(str
, i
);
460 attr
->type
= event_symbols
[i
].type
;
461 attr
->config
= event_symbols
[i
].config
;
469 static int parse_raw_event(const char **strp
, struct perf_counter_attr
*attr
)
471 const char *str
= *strp
;
477 n
= hex2u64(str
+ 1, &config
);
480 attr
->type
= PERF_TYPE_RAW
;
481 attr
->config
= config
;
488 parse_numeric_event(const char **strp
, struct perf_counter_attr
*attr
)
490 const char *str
= *strp
;
495 type
= strtoul(str
, &endp
, 0);
496 if (endp
> str
&& type
< PERF_TYPE_MAX
&& *endp
== ':') {
498 config
= strtoul(str
, &endp
, 0);
501 attr
->config
= config
;
510 parse_event_modifier(const char **strp
, struct perf_counter_attr
*attr
)
512 const char *str
= *strp
;
513 int eu
= 1, ek
= 1, eh
= 1;
520 else if (*str
== 'k')
522 else if (*str
== 'h')
528 if (str
>= *strp
+ 2) {
530 attr
->exclude_user
= eu
;
531 attr
->exclude_kernel
= ek
;
532 attr
->exclude_hv
= eh
;
539 * Each event can have multiple symbolic names.
540 * Symbolic names are (almost) exactly matched.
542 static int parse_event_symbols(const char **str
, struct perf_counter_attr
*attr
)
544 if (!(parse_tracepoint_event(str
, attr
) ||
545 parse_raw_event(str
, attr
) ||
546 parse_numeric_event(str
, attr
) ||
547 parse_symbolic_event(str
, attr
) ||
548 parse_generic_hw_event(str
, attr
)))
551 parse_event_modifier(str
, attr
);
556 int parse_events(const struct option
*opt __used
, const char *str
, int unset __used
)
558 struct perf_counter_attr attr
;
561 if (nr_counters
== MAX_COUNTERS
)
564 memset(&attr
, 0, sizeof(attr
));
565 if (!parse_event_symbols(&str
, &attr
))
568 if (!(*str
== 0 || *str
== ',' || isspace(*str
)))
571 attrs
[nr_counters
] = attr
;
578 while (isspace(*str
))
585 static const char * const event_type_descriptors
[] = {
590 "Hardware cache event",
594 * Print the events from <debugfs_mount_point>/tracing/events
597 static void print_tracepoint_events(void)
599 DIR *sys_dir
, *evt_dir
;
600 struct dirent
*sys_next
, *evt_next
, sys_dirent
, evt_dirent
;
602 char evt_path
[MAXPATHLEN
];
604 if (valid_debugfs_mount(debugfs_path
))
607 sys_dir
= opendir(debugfs_path
);
611 for_each_subsystem(sys_dir
, sys_dirent
, sys_next
, evt_path
, st
) {
612 evt_dir
= opendir(evt_path
);
615 for_each_event(sys_dirent
, evt_dir
, evt_dirent
, evt_next
,
617 snprintf(evt_path
, MAXPATHLEN
, "%s:%s",
618 sys_dirent
.d_name
, evt_dirent
.d_name
);
619 fprintf(stderr
, " %-40s [%s]\n", evt_path
,
620 event_type_descriptors
[PERF_TYPE_TRACEPOINT
+1]);
630 * Print the help text for the event symbols:
632 void print_events(void)
634 struct event_symbol
*syms
= event_symbols
;
635 unsigned int i
, type
, op
, prev_type
= -1;
638 fprintf(stderr
, "\n");
639 fprintf(stderr
, "List of pre-defined events (to be used in -e):\n");
641 for (i
= 0; i
< ARRAY_SIZE(event_symbols
); i
++, syms
++) {
642 type
= syms
->type
+ 1;
643 if (type
>= ARRAY_SIZE(event_type_descriptors
))
646 if (type
!= prev_type
)
647 fprintf(stderr
, "\n");
649 if (strlen(syms
->alias
))
650 sprintf(name
, "%s OR %s", syms
->symbol
, syms
->alias
);
652 strcpy(name
, syms
->symbol
);
653 fprintf(stderr
, " %-40s [%s]\n", name
,
654 event_type_descriptors
[type
]);
659 fprintf(stderr
, "\n");
660 for (type
= 0; type
< PERF_COUNT_HW_CACHE_MAX
; type
++) {
661 for (op
= 0; op
< PERF_COUNT_HW_CACHE_OP_MAX
; op
++) {
662 /* skip invalid cache type */
663 if (!is_cache_op_valid(type
, op
))
666 for (i
= 0; i
< PERF_COUNT_HW_CACHE_RESULT_MAX
; i
++) {
667 fprintf(stderr
, " %-40s [%s]\n",
668 event_cache_name(type
, op
, i
),
669 event_type_descriptors
[4]);
674 fprintf(stderr
, "\n");
675 fprintf(stderr
, " %-40s [raw hardware event descriptor]\n",
677 fprintf(stderr
, "\n");
679 print_tracepoint_events();