Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / tools / perf / util / mmap.c
blob91531a7c8fbf38606ee52ca0c3fef57bb2a9cff9
1 /*
2 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
10 #include <sys/mman.h>
11 #include <inttypes.h>
12 #include <asm/bug.h>
13 #include "debug.h"
14 #include "event.h"
15 #include "mmap.h"
16 #include "util.h" /* page_size */
18 size_t perf_mmap__mmap_len(struct perf_mmap *map)
20 return map->mask + 1 + page_size;
23 /* When check_messup is true, 'end' must points to a good entry */
24 static union perf_event *perf_mmap__read(struct perf_mmap *map,
25 u64 *startp, u64 end)
27 unsigned char *data = map->base + page_size;
28 union perf_event *event = NULL;
29 int diff = end - *startp;
31 if (diff >= (int)sizeof(event->header)) {
32 size_t size;
34 event = (union perf_event *)&data[*startp & map->mask];
35 size = event->header.size;
37 if (size < sizeof(event->header) || diff < (int)size)
38 return NULL;
41 * Event straddles the mmap boundary -- header should always
42 * be inside due to u64 alignment of output.
44 if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
45 unsigned int offset = *startp;
46 unsigned int len = min(sizeof(*event), size), cpy;
47 void *dst = map->event_copy;
49 do {
50 cpy = min(map->mask + 1 - (offset & map->mask), len);
51 memcpy(dst, &data[offset & map->mask], cpy);
52 offset += cpy;
53 dst += cpy;
54 len -= cpy;
55 } while (len);
57 event = (union perf_event *)map->event_copy;
60 *startp += size;
63 return event;
67 * legacy interface for mmap read.
68 * Don't use it. Use perf_mmap__read_event().
70 union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
72 u64 head;
75 * Check if event was unmapped due to a POLLHUP/POLLERR.
77 if (!refcount_read(&map->refcnt))
78 return NULL;
80 head = perf_mmap__read_head(map);
82 return perf_mmap__read(map, &map->prev, head);
86 * Read event from ring buffer one by one.
87 * Return one event for each call.
89 * Usage:
90 * perf_mmap__read_init()
91 * while(event = perf_mmap__read_event()) {
92 * //process the event
93 * perf_mmap__consume()
94 * }
95 * perf_mmap__read_done()
97 union perf_event *perf_mmap__read_event(struct perf_mmap *map,
98 bool overwrite,
99 u64 *startp, u64 end)
101 union perf_event *event;
104 * Check if event was unmapped due to a POLLHUP/POLLERR.
106 if (!refcount_read(&map->refcnt))
107 return NULL;
109 if (startp == NULL)
110 return NULL;
112 /* non-overwirte doesn't pause the ringbuffer */
113 if (!overwrite)
114 end = perf_mmap__read_head(map);
116 event = perf_mmap__read(map, startp, end);
118 if (!overwrite)
119 map->prev = *startp;
121 return event;
124 static bool perf_mmap__empty(struct perf_mmap *map)
126 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
129 void perf_mmap__get(struct perf_mmap *map)
131 refcount_inc(&map->refcnt);
134 void perf_mmap__put(struct perf_mmap *map)
136 BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
138 if (refcount_dec_and_test(&map->refcnt))
139 perf_mmap__munmap(map);
142 void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
144 if (!overwrite) {
145 u64 old = map->prev;
147 perf_mmap__write_tail(map, old);
150 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
151 perf_mmap__put(map);
154 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
155 struct auxtrace_mmap_params *mp __maybe_unused,
156 void *userpg __maybe_unused,
157 int fd __maybe_unused)
159 return 0;
162 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
166 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
167 off_t auxtrace_offset __maybe_unused,
168 unsigned int auxtrace_pages __maybe_unused,
169 bool auxtrace_overwrite __maybe_unused)
173 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
174 struct perf_evlist *evlist __maybe_unused,
175 int idx __maybe_unused,
176 bool per_cpu __maybe_unused)
180 void perf_mmap__munmap(struct perf_mmap *map)
182 if (map->base != NULL) {
183 munmap(map->base, perf_mmap__mmap_len(map));
184 map->base = NULL;
185 map->fd = -1;
186 refcount_set(&map->refcnt, 0);
188 auxtrace_mmap__munmap(&map->auxtrace_mmap);
191 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
194 * The last one will be done at perf_evlist__mmap_consume(), so that we
195 * make sure we don't prevent tools from consuming every last event in
196 * the ring buffer.
198 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
199 * anymore, but the last events for it are still in the ring buffer,
200 * waiting to be consumed.
202 * Tools can chose to ignore this at their own discretion, but the
203 * evlist layer can't just drop it when filtering events in
204 * perf_evlist__filter_pollfd().
206 refcount_set(&map->refcnt, 2);
207 map->prev = 0;
208 map->mask = mp->mask;
209 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
210 MAP_SHARED, fd, 0);
211 if (map->base == MAP_FAILED) {
212 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
213 errno);
214 map->base = NULL;
215 return -1;
217 map->fd = fd;
219 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
220 &mp->auxtrace_mp, map->base, fd))
221 return -1;
223 return 0;
226 static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
228 struct perf_event_header *pheader;
229 u64 evt_head = head;
230 int size = mask + 1;
232 pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
233 pheader = (struct perf_event_header *)(buf + (head & mask));
234 *start = head;
235 while (true) {
236 if (evt_head - head >= (unsigned int)size) {
237 pr_debug("Finished reading overwrite ring buffer: rewind\n");
238 if (evt_head - head > (unsigned int)size)
239 evt_head -= pheader->size;
240 *end = evt_head;
241 return 0;
244 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
246 if (pheader->size == 0) {
247 pr_debug("Finished reading overwrite ring buffer: get start\n");
248 *end = evt_head;
249 return 0;
252 evt_head += pheader->size;
253 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
255 WARN_ONCE(1, "Shouldn't get here\n");
256 return -1;
260 * Report the start and end of the available data in ringbuffer
262 int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
263 u64 *startp, u64 *endp)
265 u64 head = perf_mmap__read_head(md);
266 u64 old = md->prev;
267 unsigned char *data = md->base + page_size;
268 unsigned long size;
270 *startp = overwrite ? head : old;
271 *endp = overwrite ? old : head;
273 if (*startp == *endp)
274 return -EAGAIN;
276 size = *endp - *startp;
277 if (size > (unsigned long)(md->mask) + 1) {
278 if (!overwrite) {
279 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
281 md->prev = head;
282 perf_mmap__consume(md, overwrite);
283 return -EAGAIN;
287 * Backward ring buffer is full. We still have a chance to read
288 * most of data from it.
290 if (overwrite_rb_find_range(data, md->mask, head, startp, endp))
291 return -EINVAL;
294 return 0;
297 int perf_mmap__push(struct perf_mmap *md, bool overwrite,
298 void *to, int push(void *to, void *buf, size_t size))
300 u64 head = perf_mmap__read_head(md);
301 u64 end, start;
302 unsigned char *data = md->base + page_size;
303 unsigned long size;
304 void *buf;
305 int rc = 0;
307 rc = perf_mmap__read_init(md, overwrite, &start, &end);
308 if (rc < 0)
309 return (rc == -EAGAIN) ? 0 : -1;
311 size = end - start;
313 if ((start & md->mask) + size != (end & md->mask)) {
314 buf = &data[start & md->mask];
315 size = md->mask + 1 - (start & md->mask);
316 start += size;
318 if (push(to, buf, size) < 0) {
319 rc = -1;
320 goto out;
324 buf = &data[start & md->mask];
325 size = end - start;
326 start += size;
328 if (push(to, buf, size) < 0) {
329 rc = -1;
330 goto out;
333 md->prev = head;
334 perf_mmap__consume(md, overwrite);
335 out:
336 return rc;
340 * Mandatory for overwrite mode
341 * The direction of overwrite mode is backward.
342 * The last perf_mmap__read() will set tail to map->prev.
343 * Need to correct the map->prev to head which is the end of next read.
345 void perf_mmap__read_done(struct perf_mmap *map)
347 map->prev = perf_mmap__read_head(map);