1 #ifndef __PERF_EVLIST_H
2 #define __PERF_EVLIST_H 1
4 #include <linux/compiler.h>
5 #include <linux/kernel.h>
6 #include <linux/refcount.h>
7 #include <linux/list.h>
8 #include <api/fd/array.h>
23 #define PERF_EVLIST__HLIST_BITS 8
24 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
27 * struct perf_mmap - perf's ring buffer mmap details
29 * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
37 struct auxtrace_mmap auxtrace_mmap
;
38 char event_copy
[PERF_SAMPLE_MAX_SIZE
] __aligned(8);
42 perf_mmap__mmap_len(struct perf_mmap
*map
)
44 return map
->mask
+ 1 + page_size
;
48 * State machine of bkw_mmap_state:
50 * .________________(forbid)_____________.
52 * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
54 * | |__(forbid)____/ |___(forbid)___/|
56 * \_________________(3)_______________/
58 * NOTREADY : Backward ring buffers are not ready
59 * RUNNING : Backward ring buffers are recording
60 * DATA_PENDING : We are required to collect data from backward ring buffers
61 * EMPTY : We have collected data from backward ring buffers.
63 * (0): Setup backward ring buffer
64 * (1): Pause ring buffers for reading
65 * (2): Read from ring buffers
66 * (3): Resume ring buffers for recording
71 BKW_MMAP_DATA_PENDING
,
76 struct list_head entries
;
77 struct hlist_head heads
[PERF_EVLIST__HLIST_SIZE
];
87 u64 combined_sample_type
;
88 enum bkw_mmap_state bkw_mmap_state
;
93 struct fdarray pollfd
;
94 struct perf_mmap
*mmap
;
95 struct perf_mmap
*backward_mmap
;
96 struct thread_map
*threads
;
98 struct perf_evsel
*selected
;
99 struct events_stats stats
;
100 struct perf_env
*env
;
103 struct perf_evsel_str_handler
{
108 struct perf_evlist
*perf_evlist__new(void);
109 struct perf_evlist
*perf_evlist__new_default(void);
110 struct perf_evlist
*perf_evlist__new_dummy(void);
111 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
112 struct thread_map
*threads
);
113 void perf_evlist__exit(struct perf_evlist
*evlist
);
114 void perf_evlist__delete(struct perf_evlist
*evlist
);
116 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
);
117 void perf_evlist__remove(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
);
118 int perf_evlist__add_default(struct perf_evlist
*evlist
);
119 int __perf_evlist__add_default_attrs(struct perf_evlist
*evlist
,
120 struct perf_event_attr
*attrs
, size_t nr_attrs
);
122 #define perf_evlist__add_default_attrs(evlist, array) \
123 __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
125 int perf_evlist__add_dummy(struct perf_evlist
*evlist
);
127 int perf_evlist__add_newtp(struct perf_evlist
*evlist
,
128 const char *sys
, const char *name
, void *handler
);
130 void __perf_evlist__set_sample_bit(struct perf_evlist
*evlist
,
131 enum perf_event_sample_format bit
);
132 void __perf_evlist__reset_sample_bit(struct perf_evlist
*evlist
,
133 enum perf_event_sample_format bit
);
135 #define perf_evlist__set_sample_bit(evlist, bit) \
136 __perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit)
138 #define perf_evlist__reset_sample_bit(evlist, bit) \
139 __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
141 int perf_evlist__set_filter(struct perf_evlist
*evlist
, const char *filter
);
142 int perf_evlist__set_filter_pid(struct perf_evlist
*evlist
, pid_t pid
);
143 int perf_evlist__set_filter_pids(struct perf_evlist
*evlist
, size_t npids
, pid_t
*pids
);
146 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
);
149 perf_evlist__find_tracepoint_by_name(struct perf_evlist
*evlist
,
152 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
153 int cpu
, int thread
, u64 id
);
154 int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
155 struct perf_evsel
*evsel
,
156 int cpu
, int thread
, int fd
);
158 int perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
);
159 int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
);
160 int perf_evlist__filter_pollfd(struct perf_evlist
*evlist
, short revents_and_mask
);
162 int perf_evlist__poll(struct perf_evlist
*evlist
, int timeout
);
164 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
);
165 struct perf_evsel
*perf_evlist__id2evsel_strict(struct perf_evlist
*evlist
,
168 struct perf_sample_id
*perf_evlist__id2sid(struct perf_evlist
*evlist
, u64 id
);
170 void perf_evlist__toggle_bkw_mmap(struct perf_evlist
*evlist
, enum bkw_mmap_state state
);
172 union perf_event
*perf_mmap__read_forward(struct perf_mmap
*map
, bool check_messup
);
173 union perf_event
*perf_mmap__read_backward(struct perf_mmap
*map
);
175 void perf_mmap__read_catchup(struct perf_mmap
*md
);
176 void perf_mmap__consume(struct perf_mmap
*md
, bool overwrite
);
178 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
);
180 union perf_event
*perf_evlist__mmap_read_forward(struct perf_evlist
*evlist
,
182 union perf_event
*perf_evlist__mmap_read_backward(struct perf_evlist
*evlist
,
184 void perf_evlist__mmap_read_catchup(struct perf_evlist
*evlist
, int idx
);
186 void perf_evlist__mmap_consume(struct perf_evlist
*evlist
, int idx
);
188 int perf_evlist__open(struct perf_evlist
*evlist
);
189 void perf_evlist__close(struct perf_evlist
*evlist
);
191 struct callchain_param
;
193 void perf_evlist__set_id_pos(struct perf_evlist
*evlist
);
194 bool perf_can_sample_identifier(void);
195 bool perf_can_record_switch_events(void);
196 bool perf_can_record_cpu_wide(void);
197 void perf_evlist__config(struct perf_evlist
*evlist
, struct record_opts
*opts
,
198 struct callchain_param
*callchain
);
199 int record_opts__config(struct record_opts
*opts
);
201 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
,
202 struct target
*target
,
203 const char *argv
[], bool pipe_output
,
204 void (*exec_error
)(int signo
, siginfo_t
*info
,
206 int perf_evlist__start_workload(struct perf_evlist
*evlist
);
210 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages
, const char *str
);
211 int perf_evlist__parse_mmap_pages(const struct option
*opt
,
215 unsigned long perf_event_mlock_kb_in_pages(void);
217 int perf_evlist__mmap_ex(struct perf_evlist
*evlist
, unsigned int pages
,
218 bool overwrite
, unsigned int auxtrace_pages
,
219 bool auxtrace_overwrite
);
220 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
222 void perf_evlist__munmap(struct perf_evlist
*evlist
);
224 size_t perf_evlist__mmap_size(unsigned long pages
);
226 void perf_evlist__disable(struct perf_evlist
*evlist
);
227 void perf_evlist__enable(struct perf_evlist
*evlist
);
228 void perf_evlist__toggle_enable(struct perf_evlist
*evlist
);
230 int perf_evlist__enable_event_idx(struct perf_evlist
*evlist
,
231 struct perf_evsel
*evsel
, int idx
);
233 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
234 struct perf_evsel
*evsel
);
236 void perf_evlist__set_maps(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
237 struct thread_map
*threads
);
238 int perf_evlist__create_maps(struct perf_evlist
*evlist
, struct target
*target
);
239 int perf_evlist__apply_filters(struct perf_evlist
*evlist
, struct perf_evsel
**err_evsel
);
241 void __perf_evlist__set_leader(struct list_head
*list
);
242 void perf_evlist__set_leader(struct perf_evlist
*evlist
);
244 u64
perf_evlist__read_format(struct perf_evlist
*evlist
);
245 u64
__perf_evlist__combined_sample_type(struct perf_evlist
*evlist
);
246 u64
perf_evlist__combined_sample_type(struct perf_evlist
*evlist
);
247 u64
perf_evlist__combined_branch_type(struct perf_evlist
*evlist
);
248 bool perf_evlist__sample_id_all(struct perf_evlist
*evlist
);
249 u16
perf_evlist__id_hdr_size(struct perf_evlist
*evlist
);
251 int perf_evlist__parse_sample(struct perf_evlist
*evlist
, union perf_event
*event
,
252 struct perf_sample
*sample
);
254 bool perf_evlist__valid_sample_type(struct perf_evlist
*evlist
);
255 bool perf_evlist__valid_sample_id_all(struct perf_evlist
*evlist
);
256 bool perf_evlist__valid_read_format(struct perf_evlist
*evlist
);
258 void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
259 struct list_head
*list
);
261 static inline struct perf_evsel
*perf_evlist__first(struct perf_evlist
*evlist
)
263 return list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
266 static inline struct perf_evsel
*perf_evlist__last(struct perf_evlist
*evlist
)
268 return list_entry(evlist
->entries
.prev
, struct perf_evsel
, node
);
271 size_t perf_evlist__fprintf(struct perf_evlist
*evlist
, FILE *fp
);
273 int perf_evlist__strerror_open(struct perf_evlist
*evlist
, int err
, char *buf
, size_t size
);
274 int perf_evlist__strerror_mmap(struct perf_evlist
*evlist
, int err
, char *buf
, size_t size
);
276 static inline u64
perf_mmap__read_head(struct perf_mmap
*mm
)
278 struct perf_event_mmap_page
*pc
= mm
->base
;
279 u64 head
= ACCESS_ONCE(pc
->data_head
);
284 static inline void perf_mmap__write_tail(struct perf_mmap
*md
, u64 tail
)
286 struct perf_event_mmap_page
*pc
= md
->base
;
289 * ensure all reads are done before we write the tail out.
292 pc
->data_tail
= tail
;
295 bool perf_evlist__can_select_event(struct perf_evlist
*evlist
, const char *str
);
296 void perf_evlist__to_front(struct perf_evlist
*evlist
,
297 struct perf_evsel
*move_evsel
);
300 * __evlist__for_each_entry - iterate thru all the evsels
301 * @list: list_head instance to iterate
302 * @evsel: struct evsel iterator
304 #define __evlist__for_each_entry(list, evsel) \
305 list_for_each_entry(evsel, list, node)
308 * evlist__for_each_entry - iterate thru all the evsels
309 * @evlist: evlist instance to iterate
310 * @evsel: struct evsel iterator
312 #define evlist__for_each_entry(evlist, evsel) \
313 __evlist__for_each_entry(&(evlist)->entries, evsel)
316 * __evlist__for_each_entry_continue - continue iteration thru all the evsels
317 * @list: list_head instance to iterate
318 * @evsel: struct evsel iterator
320 #define __evlist__for_each_entry_continue(list, evsel) \
321 list_for_each_entry_continue(evsel, list, node)
324 * evlist__for_each_entry_continue - continue iteration thru all the evsels
325 * @evlist: evlist instance to iterate
326 * @evsel: struct evsel iterator
328 #define evlist__for_each_entry_continue(evlist, evsel) \
329 __evlist__for_each_entry_continue(&(evlist)->entries, evsel)
332 * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
333 * @list: list_head instance to iterate
334 * @evsel: struct evsel iterator
336 #define __evlist__for_each_entry_reverse(list, evsel) \
337 list_for_each_entry_reverse(evsel, list, node)
340 * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
341 * @evlist: evlist instance to iterate
342 * @evsel: struct evsel iterator
344 #define evlist__for_each_entry_reverse(evlist, evsel) \
345 __evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
348 * __evlist__for_each_entry_safe - safely iterate thru all the evsels
349 * @list: list_head instance to iterate
350 * @tmp: struct evsel temp iterator
351 * @evsel: struct evsel iterator
353 #define __evlist__for_each_entry_safe(list, tmp, evsel) \
354 list_for_each_entry_safe(evsel, tmp, list, node)
357 * evlist__for_each_entry_safe - safely iterate thru all the evsels
358 * @evlist: evlist instance to iterate
359 * @evsel: struct evsel iterator
360 * @tmp: struct evsel temp iterator
362 #define evlist__for_each_entry_safe(evlist, tmp, evsel) \
363 __evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
365 void perf_evlist__set_tracking_event(struct perf_evlist
*evlist
,
366 struct perf_evsel
*tracking_evsel
);
368 void perf_event_attr__set_max_precise_ip(struct perf_event_attr
*attr
);
371 perf_evlist__find_evsel_by_str(struct perf_evlist
*evlist
, const char *str
);
373 struct perf_evsel
*perf_evlist__event2evsel(struct perf_evlist
*evlist
,
374 union perf_event
*event
);
375 #endif /* __PERF_EVLIST_H */