2 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
16 #include "util.h" /* page_size */
18 size_t perf_mmap__mmap_len(struct perf_mmap
*map
)
20 return map
->mask
+ 1 + page_size
;
23 /* When check_messup is true, 'end' must points to a good entry */
24 static union perf_event
*perf_mmap__read(struct perf_mmap
*map
,
27 unsigned char *data
= map
->base
+ page_size
;
28 union perf_event
*event
= NULL
;
29 int diff
= end
- *startp
;
31 if (diff
>= (int)sizeof(event
->header
)) {
34 event
= (union perf_event
*)&data
[*startp
& map
->mask
];
35 size
= event
->header
.size
;
37 if (size
< sizeof(event
->header
) || diff
< (int)size
)
41 * Event straddles the mmap boundary -- header should always
42 * be inside due to u64 alignment of output.
44 if ((*startp
& map
->mask
) + size
!= ((*startp
+ size
) & map
->mask
)) {
45 unsigned int offset
= *startp
;
46 unsigned int len
= min(sizeof(*event
), size
), cpy
;
47 void *dst
= map
->event_copy
;
50 cpy
= min(map
->mask
+ 1 - (offset
& map
->mask
), len
);
51 memcpy(dst
, &data
[offset
& map
->mask
], cpy
);
57 event
= (union perf_event
*)map
->event_copy
;
67 * Read event from ring buffer one by one.
68 * Return one event for each call.
71 * perf_mmap__read_init()
72 * while(event = perf_mmap__read_event()) {
74 * perf_mmap__consume()
76 * perf_mmap__read_done()
78 union perf_event
*perf_mmap__read_event(struct perf_mmap
*map
)
80 union perf_event
*event
;
83 * Check if event was unmapped due to a POLLHUP/POLLERR.
85 if (!refcount_read(&map
->refcnt
))
88 /* non-overwirte doesn't pause the ringbuffer */
90 map
->end
= perf_mmap__read_head(map
);
92 event
= perf_mmap__read(map
, &map
->start
, map
->end
);
95 map
->prev
= map
->start
;
100 static bool perf_mmap__empty(struct perf_mmap
*map
)
102 return perf_mmap__read_head(map
) == map
->prev
&& !map
->auxtrace_mmap
.base
;
105 void perf_mmap__get(struct perf_mmap
*map
)
107 refcount_inc(&map
->refcnt
);
110 void perf_mmap__put(struct perf_mmap
*map
)
112 BUG_ON(map
->base
&& refcount_read(&map
->refcnt
) == 0);
114 if (refcount_dec_and_test(&map
->refcnt
))
115 perf_mmap__munmap(map
);
118 void perf_mmap__consume(struct perf_mmap
*map
)
120 if (!map
->overwrite
) {
123 perf_mmap__write_tail(map
, old
);
126 if (refcount_read(&map
->refcnt
) == 1 && perf_mmap__empty(map
))
130 int __weak
auxtrace_mmap__mmap(struct auxtrace_mmap
*mm __maybe_unused
,
131 struct auxtrace_mmap_params
*mp __maybe_unused
,
132 void *userpg __maybe_unused
,
133 int fd __maybe_unused
)
138 void __weak
auxtrace_mmap__munmap(struct auxtrace_mmap
*mm __maybe_unused
)
142 void __weak
auxtrace_mmap_params__init(struct auxtrace_mmap_params
*mp __maybe_unused
,
143 off_t auxtrace_offset __maybe_unused
,
144 unsigned int auxtrace_pages __maybe_unused
,
145 bool auxtrace_overwrite __maybe_unused
)
149 void __weak
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params
*mp __maybe_unused
,
150 struct perf_evlist
*evlist __maybe_unused
,
151 int idx __maybe_unused
,
152 bool per_cpu __maybe_unused
)
156 void perf_mmap__munmap(struct perf_mmap
*map
)
158 if (map
->base
!= NULL
) {
159 munmap(map
->base
, perf_mmap__mmap_len(map
));
162 refcount_set(&map
->refcnt
, 0);
164 auxtrace_mmap__munmap(&map
->auxtrace_mmap
);
167 int perf_mmap__mmap(struct perf_mmap
*map
, struct mmap_params
*mp
, int fd
, int cpu
)
170 * The last one will be done at perf_mmap__consume(), so that we
171 * make sure we don't prevent tools from consuming every last event in
174 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
175 * anymore, but the last events for it are still in the ring buffer,
176 * waiting to be consumed.
178 * Tools can chose to ignore this at their own discretion, but the
179 * evlist layer can't just drop it when filtering events in
180 * perf_evlist__filter_pollfd().
182 refcount_set(&map
->refcnt
, 2);
184 map
->mask
= mp
->mask
;
185 map
->base
= mmap(NULL
, perf_mmap__mmap_len(map
), mp
->prot
,
187 if (map
->base
== MAP_FAILED
) {
188 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
196 if (auxtrace_mmap__mmap(&map
->auxtrace_mmap
,
197 &mp
->auxtrace_mp
, map
->base
, fd
))
203 static int overwrite_rb_find_range(void *buf
, int mask
, u64
*start
, u64
*end
)
205 struct perf_event_header
*pheader
;
206 u64 evt_head
= *start
;
209 pr_debug2("%s: buf=%p, start=%"PRIx64
"\n", __func__
, buf
, *start
);
210 pheader
= (struct perf_event_header
*)(buf
+ (*start
& mask
));
212 if (evt_head
- *start
>= (unsigned int)size
) {
213 pr_debug("Finished reading overwrite ring buffer: rewind\n");
214 if (evt_head
- *start
> (unsigned int)size
)
215 evt_head
-= pheader
->size
;
220 pheader
= (struct perf_event_header
*)(buf
+ (evt_head
& mask
));
222 if (pheader
->size
== 0) {
223 pr_debug("Finished reading overwrite ring buffer: get start\n");
228 evt_head
+= pheader
->size
;
229 pr_debug3("move evt_head: %"PRIx64
"\n", evt_head
);
231 WARN_ONCE(1, "Shouldn't get here\n");
236 * Report the start and end of the available data in ringbuffer
238 static int __perf_mmap__read_init(struct perf_mmap
*md
)
240 u64 head
= perf_mmap__read_head(md
);
242 unsigned char *data
= md
->base
+ page_size
;
245 md
->start
= md
->overwrite
? head
: old
;
246 md
->end
= md
->overwrite
? old
: head
;
248 if (md
->start
== md
->end
)
251 size
= md
->end
- md
->start
;
252 if (size
> (unsigned long)(md
->mask
) + 1) {
253 if (!md
->overwrite
) {
254 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
257 perf_mmap__consume(md
);
262 * Backward ring buffer is full. We still have a chance to read
263 * most of data from it.
265 if (overwrite_rb_find_range(data
, md
->mask
, &md
->start
, &md
->end
))
272 int perf_mmap__read_init(struct perf_mmap
*map
)
275 * Check if event was unmapped due to a POLLHUP/POLLERR.
277 if (!refcount_read(&map
->refcnt
))
280 return __perf_mmap__read_init(map
);
283 int perf_mmap__push(struct perf_mmap
*md
, void *to
,
284 int push(struct perf_mmap
*map
, void *to
, void *buf
, size_t size
))
286 u64 head
= perf_mmap__read_head(md
);
287 unsigned char *data
= md
->base
+ page_size
;
292 rc
= perf_mmap__read_init(md
);
294 return (rc
== -EAGAIN
) ? 0 : -1;
296 size
= md
->end
- md
->start
;
298 if ((md
->start
& md
->mask
) + size
!= (md
->end
& md
->mask
)) {
299 buf
= &data
[md
->start
& md
->mask
];
300 size
= md
->mask
+ 1 - (md
->start
& md
->mask
);
303 if (push(md
, to
, buf
, size
) < 0) {
309 buf
= &data
[md
->start
& md
->mask
];
310 size
= md
->end
- md
->start
;
313 if (push(md
, to
, buf
, size
) < 0) {
319 perf_mmap__consume(md
);
325 * Mandatory for overwrite mode
326 * The direction of overwrite mode is backward.
327 * The last perf_mmap__read() will set tail to map->prev.
328 * Need to correct the map->prev to head which is the end of next read.
330 void perf_mmap__read_done(struct perf_mmap
*map
)
333 * Check if event was unmapped due to a POLLHUP/POLLERR.
335 if (!refcount_read(&map
->refcnt
))
338 map
->prev
= perf_mmap__read_head(map
);