2 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
16 #include "util.h" /* page_size */
18 size_t perf_mmap__mmap_len(struct perf_mmap
*map
)
20 return map
->mask
+ 1 + page_size
;
23 /* When check_messup is true, 'end' must points to a good entry */
24 static union perf_event
*perf_mmap__read(struct perf_mmap
*map
,
25 u64 start
, u64 end
, u64
*prev
)
27 unsigned char *data
= map
->base
+ page_size
;
28 union perf_event
*event
= NULL
;
29 int diff
= end
- start
;
31 if (diff
>= (int)sizeof(event
->header
)) {
34 event
= (union perf_event
*)&data
[start
& map
->mask
];
35 size
= event
->header
.size
;
37 if (size
< sizeof(event
->header
) || diff
< (int)size
) {
43 * Event straddles the mmap boundary -- header should always
44 * be inside due to u64 alignment of output.
46 if ((start
& map
->mask
) + size
!= ((start
+ size
) & map
->mask
)) {
47 unsigned int offset
= start
;
48 unsigned int len
= min(sizeof(*event
), size
), cpy
;
49 void *dst
= map
->event_copy
;
52 cpy
= min(map
->mask
+ 1 - (offset
& map
->mask
), len
);
53 memcpy(dst
, &data
[offset
& map
->mask
], cpy
);
59 event
= (union perf_event
*)map
->event_copy
;
72 union perf_event
*perf_mmap__read_forward(struct perf_mmap
*map
)
78 * Check if event was unmapped due to a POLLHUP/POLLERR.
80 if (!refcount_read(&map
->refcnt
))
83 head
= perf_mmap__read_head(map
);
85 return perf_mmap__read(map
, old
, head
, &map
->prev
);
88 union perf_event
*perf_mmap__read_backward(struct perf_mmap
*map
)
91 u64 start
= map
->prev
;
94 * Check if event was unmapped due to a POLLHUP/POLLERR.
96 if (!refcount_read(&map
->refcnt
))
99 head
= perf_mmap__read_head(map
);
104 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
105 * it each time when kernel writes to it, so in fact 'head' is
106 * negative. 'end' pointer is made manually by adding the size of
107 * the ring buffer to 'head' pointer, means the validate data can
108 * read is the whole ring buffer. If 'end' is positive, the ring
109 * buffer has not fully filled, so we must adjust 'end' to 0.
111 * However, since both 'head' and 'end' is unsigned, we can't
112 * simply compare 'end' against 0. Here we compare '-head' and
113 * the size of the ring buffer, where -head is the number of bytes
114 * kernel write to the ring buffer.
116 if (-head
< (u64
)(map
->mask
+ 1))
119 end
= head
+ map
->mask
+ 1;
121 return perf_mmap__read(map
, start
, end
, &map
->prev
);
124 void perf_mmap__read_catchup(struct perf_mmap
*map
)
128 if (!refcount_read(&map
->refcnt
))
131 head
= perf_mmap__read_head(map
);
135 static bool perf_mmap__empty(struct perf_mmap
*map
)
137 return perf_mmap__read_head(map
) == map
->prev
&& !map
->auxtrace_mmap
.base
;
140 void perf_mmap__get(struct perf_mmap
*map
)
142 refcount_inc(&map
->refcnt
);
145 void perf_mmap__put(struct perf_mmap
*map
)
147 BUG_ON(map
->base
&& refcount_read(&map
->refcnt
) == 0);
149 if (refcount_dec_and_test(&map
->refcnt
))
150 perf_mmap__munmap(map
);
153 void perf_mmap__consume(struct perf_mmap
*map
, bool overwrite
)
158 perf_mmap__write_tail(map
, old
);
161 if (refcount_read(&map
->refcnt
) == 1 && perf_mmap__empty(map
))
165 int __weak
auxtrace_mmap__mmap(struct auxtrace_mmap
*mm __maybe_unused
,
166 struct auxtrace_mmap_params
*mp __maybe_unused
,
167 void *userpg __maybe_unused
,
168 int fd __maybe_unused
)
173 void __weak
auxtrace_mmap__munmap(struct auxtrace_mmap
*mm __maybe_unused
)
177 void __weak
auxtrace_mmap_params__init(struct auxtrace_mmap_params
*mp __maybe_unused
,
178 off_t auxtrace_offset __maybe_unused
,
179 unsigned int auxtrace_pages __maybe_unused
,
180 bool auxtrace_overwrite __maybe_unused
)
184 void __weak
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params
*mp __maybe_unused
,
185 struct perf_evlist
*evlist __maybe_unused
,
186 int idx __maybe_unused
,
187 bool per_cpu __maybe_unused
)
191 void perf_mmap__munmap(struct perf_mmap
*map
)
193 if (map
->base
!= NULL
) {
194 munmap(map
->base
, perf_mmap__mmap_len(map
));
197 refcount_set(&map
->refcnt
, 0);
199 auxtrace_mmap__munmap(&map
->auxtrace_mmap
);
202 int perf_mmap__mmap(struct perf_mmap
*map
, struct mmap_params
*mp
, int fd
)
205 * The last one will be done at perf_evlist__mmap_consume(), so that we
206 * make sure we don't prevent tools from consuming every last event in
209 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
210 * anymore, but the last events for it are still in the ring buffer,
211 * waiting to be consumed.
213 * Tools can chose to ignore this at their own discretion, but the
214 * evlist layer can't just drop it when filtering events in
215 * perf_evlist__filter_pollfd().
217 refcount_set(&map
->refcnt
, 2);
219 map
->mask
= mp
->mask
;
220 map
->base
= mmap(NULL
, perf_mmap__mmap_len(map
), mp
->prot
,
222 if (map
->base
== MAP_FAILED
) {
223 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
230 if (auxtrace_mmap__mmap(&map
->auxtrace_mmap
,
231 &mp
->auxtrace_mp
, map
->base
, fd
))
237 static int overwrite_rb_find_range(void *buf
, int mask
, u64 head
, u64
*start
, u64
*end
)
239 struct perf_event_header
*pheader
;
243 pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64
"\n", buf
, head
);
244 pheader
= (struct perf_event_header
*)(buf
+ (head
& mask
));
247 if (evt_head
- head
>= (unsigned int)size
) {
248 pr_debug("Finished reading overwrite ring buffer: rewind\n");
249 if (evt_head
- head
> (unsigned int)size
)
250 evt_head
-= pheader
->size
;
255 pheader
= (struct perf_event_header
*)(buf
+ (evt_head
& mask
));
257 if (pheader
->size
== 0) {
258 pr_debug("Finished reading overwrite ring buffer: get start\n");
263 evt_head
+= pheader
->size
;
264 pr_debug3("move evt_head: %"PRIx64
"\n", evt_head
);
266 WARN_ONCE(1, "Shouldn't get here\n");
270 int perf_mmap__push(struct perf_mmap
*md
, bool overwrite
,
271 void *to
, int push(void *to
, void *buf
, size_t size
))
273 u64 head
= perf_mmap__read_head(md
);
275 u64 end
= head
, start
= old
;
276 unsigned char *data
= md
->base
+ page_size
;
281 start
= overwrite
? head
: old
;
282 end
= overwrite
? old
: head
;
288 if (size
> (unsigned long)(md
->mask
) + 1) {
290 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
293 perf_mmap__consume(md
, overwrite
);
298 * Backward ring buffer is full. We still have a chance to read
299 * most of data from it.
301 if (overwrite_rb_find_range(data
, md
->mask
, head
, &start
, &end
))
305 if ((start
& md
->mask
) + size
!= (end
& md
->mask
)) {
306 buf
= &data
[start
& md
->mask
];
307 size
= md
->mask
+ 1 - (start
& md
->mask
);
310 if (push(to
, buf
, size
) < 0) {
316 buf
= &data
[start
& md
->mask
];
320 if (push(to
, buf
, size
) < 0) {
326 perf_mmap__consume(md
, overwrite
);