2 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
16 #include "util.h" /* page_size */
18 size_t perf_mmap__mmap_len(struct perf_mmap
*map
)
20 return map
->mask
+ 1 + page_size
;
23 /* When check_messup is true, 'end' must points to a good entry */
24 static union perf_event
*perf_mmap__read(struct perf_mmap
*map
,
27 unsigned char *data
= map
->base
+ page_size
;
28 union perf_event
*event
= NULL
;
29 int diff
= end
- *startp
;
31 if (diff
>= (int)sizeof(event
->header
)) {
34 event
= (union perf_event
*)&data
[*startp
& map
->mask
];
35 size
= event
->header
.size
;
37 if (size
< sizeof(event
->header
) || diff
< (int)size
)
41 * Event straddles the mmap boundary -- header should always
42 * be inside due to u64 alignment of output.
44 if ((*startp
& map
->mask
) + size
!= ((*startp
+ size
) & map
->mask
)) {
45 unsigned int offset
= *startp
;
46 unsigned int len
= min(sizeof(*event
), size
), cpy
;
47 void *dst
= map
->event_copy
;
50 cpy
= min(map
->mask
+ 1 - (offset
& map
->mask
), len
);
51 memcpy(dst
, &data
[offset
& map
->mask
], cpy
);
57 event
= (union perf_event
*)map
->event_copy
;
67 * legacy interface for mmap read.
68 * Don't use it. Use perf_mmap__read_event().
70 union perf_event
*perf_mmap__read_forward(struct perf_mmap
*map
)
75 * Check if event was unmapped due to a POLLHUP/POLLERR.
77 if (!refcount_read(&map
->refcnt
))
80 head
= perf_mmap__read_head(map
);
82 return perf_mmap__read(map
, &map
->prev
, head
);
86 * Read event from ring buffer one by one.
87 * Return one event for each call.
90 * perf_mmap__read_init()
91 * while(event = perf_mmap__read_event()) {
93 * perf_mmap__consume()
95 * perf_mmap__read_done()
97 union perf_event
*perf_mmap__read_event(struct perf_mmap
*map
,
101 union perf_event
*event
;
104 * Check if event was unmapped due to a POLLHUP/POLLERR.
106 if (!refcount_read(&map
->refcnt
))
112 /* non-overwirte doesn't pause the ringbuffer */
114 end
= perf_mmap__read_head(map
);
116 event
= perf_mmap__read(map
, startp
, end
);
124 static bool perf_mmap__empty(struct perf_mmap
*map
)
126 return perf_mmap__read_head(map
) == map
->prev
&& !map
->auxtrace_mmap
.base
;
129 void perf_mmap__get(struct perf_mmap
*map
)
131 refcount_inc(&map
->refcnt
);
134 void perf_mmap__put(struct perf_mmap
*map
)
136 BUG_ON(map
->base
&& refcount_read(&map
->refcnt
) == 0);
138 if (refcount_dec_and_test(&map
->refcnt
))
139 perf_mmap__munmap(map
);
142 void perf_mmap__consume(struct perf_mmap
*map
, bool overwrite
)
147 perf_mmap__write_tail(map
, old
);
150 if (refcount_read(&map
->refcnt
) == 1 && perf_mmap__empty(map
))
154 int __weak
auxtrace_mmap__mmap(struct auxtrace_mmap
*mm __maybe_unused
,
155 struct auxtrace_mmap_params
*mp __maybe_unused
,
156 void *userpg __maybe_unused
,
157 int fd __maybe_unused
)
162 void __weak
auxtrace_mmap__munmap(struct auxtrace_mmap
*mm __maybe_unused
)
166 void __weak
auxtrace_mmap_params__init(struct auxtrace_mmap_params
*mp __maybe_unused
,
167 off_t auxtrace_offset __maybe_unused
,
168 unsigned int auxtrace_pages __maybe_unused
,
169 bool auxtrace_overwrite __maybe_unused
)
173 void __weak
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params
*mp __maybe_unused
,
174 struct perf_evlist
*evlist __maybe_unused
,
175 int idx __maybe_unused
,
176 bool per_cpu __maybe_unused
)
180 void perf_mmap__munmap(struct perf_mmap
*map
)
182 if (map
->base
!= NULL
) {
183 munmap(map
->base
, perf_mmap__mmap_len(map
));
186 refcount_set(&map
->refcnt
, 0);
188 auxtrace_mmap__munmap(&map
->auxtrace_mmap
);
191 int perf_mmap__mmap(struct perf_mmap
*map
, struct mmap_params
*mp
, int fd
)
194 * The last one will be done at perf_evlist__mmap_consume(), so that we
195 * make sure we don't prevent tools from consuming every last event in
198 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
199 * anymore, but the last events for it are still in the ring buffer,
200 * waiting to be consumed.
202 * Tools can chose to ignore this at their own discretion, but the
203 * evlist layer can't just drop it when filtering events in
204 * perf_evlist__filter_pollfd().
206 refcount_set(&map
->refcnt
, 2);
208 map
->mask
= mp
->mask
;
209 map
->base
= mmap(NULL
, perf_mmap__mmap_len(map
), mp
->prot
,
211 if (map
->base
== MAP_FAILED
) {
212 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
219 if (auxtrace_mmap__mmap(&map
->auxtrace_mmap
,
220 &mp
->auxtrace_mp
, map
->base
, fd
))
226 static int overwrite_rb_find_range(void *buf
, int mask
, u64 head
, u64
*start
, u64
*end
)
228 struct perf_event_header
*pheader
;
232 pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64
"\n", buf
, head
);
233 pheader
= (struct perf_event_header
*)(buf
+ (head
& mask
));
236 if (evt_head
- head
>= (unsigned int)size
) {
237 pr_debug("Finished reading overwrite ring buffer: rewind\n");
238 if (evt_head
- head
> (unsigned int)size
)
239 evt_head
-= pheader
->size
;
244 pheader
= (struct perf_event_header
*)(buf
+ (evt_head
& mask
));
246 if (pheader
->size
== 0) {
247 pr_debug("Finished reading overwrite ring buffer: get start\n");
252 evt_head
+= pheader
->size
;
253 pr_debug3("move evt_head: %"PRIx64
"\n", evt_head
);
255 WARN_ONCE(1, "Shouldn't get here\n");
260 * Report the start and end of the available data in ringbuffer
262 int perf_mmap__read_init(struct perf_mmap
*md
, bool overwrite
,
263 u64
*startp
, u64
*endp
)
265 u64 head
= perf_mmap__read_head(md
);
267 unsigned char *data
= md
->base
+ page_size
;
270 *startp
= overwrite
? head
: old
;
271 *endp
= overwrite
? old
: head
;
273 if (*startp
== *endp
)
276 size
= *endp
- *startp
;
277 if (size
> (unsigned long)(md
->mask
) + 1) {
279 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
282 perf_mmap__consume(md
, overwrite
);
287 * Backward ring buffer is full. We still have a chance to read
288 * most of data from it.
290 if (overwrite_rb_find_range(data
, md
->mask
, head
, startp
, endp
))
297 int perf_mmap__push(struct perf_mmap
*md
, bool overwrite
,
298 void *to
, int push(void *to
, void *buf
, size_t size
))
300 u64 head
= perf_mmap__read_head(md
);
302 unsigned char *data
= md
->base
+ page_size
;
307 rc
= perf_mmap__read_init(md
, overwrite
, &start
, &end
);
309 return (rc
== -EAGAIN
) ? 0 : -1;
313 if ((start
& md
->mask
) + size
!= (end
& md
->mask
)) {
314 buf
= &data
[start
& md
->mask
];
315 size
= md
->mask
+ 1 - (start
& md
->mask
);
318 if (push(to
, buf
, size
) < 0) {
324 buf
= &data
[start
& md
->mask
];
328 if (push(to
, buf
, size
) < 0) {
334 perf_mmap__consume(md
, overwrite
);
340 * Mandatory for overwrite mode
341 * The direction of overwrite mode is backward.
342 * The last perf_mmap__read() will set tail to map->prev.
343 * Need to correct the map->prev to head which is the end of next read.
345 void perf_mmap__read_done(struct perf_mmap
*map
)
347 map
->prev
= perf_mmap__read_head(map
);