1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/ring_buffer.h>
8 #include <linux/perf_event.h>
10 #include <perf/event.h>
11 #include <internal/mmap.h>
12 #include <internal/lib.h>
13 #include <linux/kernel.h>
16 void perf_mmap__init(struct perf_mmap
*map
, struct perf_mmap
*prev
,
17 bool overwrite
, libperf_unmap_cb_t unmap_cb
)
20 map
->overwrite
= overwrite
;
21 map
->unmap_cb
= unmap_cb
;
22 refcount_set(&map
->refcnt
, 0);
27 size_t perf_mmap__mmap_len(struct perf_mmap
*map
)
29 return map
->mask
+ 1 + page_size
;
32 int perf_mmap__mmap(struct perf_mmap
*map
, struct perf_mmap_param
*mp
,
37 map
->base
= mmap(NULL
, perf_mmap__mmap_len(map
), mp
->prot
,
39 if (map
->base
== MAP_FAILED
) {
49 void perf_mmap__munmap(struct perf_mmap
*map
)
51 if (map
&& map
->base
!= NULL
) {
52 munmap(map
->base
, perf_mmap__mmap_len(map
));
55 refcount_set(&map
->refcnt
, 0);
57 if (map
&& map
->unmap_cb
)
61 void perf_mmap__get(struct perf_mmap
*map
)
63 refcount_inc(&map
->refcnt
);
66 void perf_mmap__put(struct perf_mmap
*map
)
68 BUG_ON(map
->base
&& refcount_read(&map
->refcnt
) == 0);
70 if (refcount_dec_and_test(&map
->refcnt
))
71 perf_mmap__munmap(map
);
74 static inline void perf_mmap__write_tail(struct perf_mmap
*md
, u64 tail
)
76 ring_buffer_write_tail(md
->base
, tail
);
79 u64
perf_mmap__read_head(struct perf_mmap
*map
)
81 return ring_buffer_read_head(map
->base
);
84 static bool perf_mmap__empty(struct perf_mmap
*map
)
86 struct perf_event_mmap_page
*pc
= map
->base
;
88 return perf_mmap__read_head(map
) == map
->prev
&& !pc
->aux_size
;
91 void perf_mmap__consume(struct perf_mmap
*map
)
93 if (!map
->overwrite
) {
96 perf_mmap__write_tail(map
, old
);
99 if (refcount_read(&map
->refcnt
) == 1 && perf_mmap__empty(map
))
103 static int overwrite_rb_find_range(void *buf
, int mask
, u64
*start
, u64
*end
)
105 struct perf_event_header
*pheader
;
106 u64 evt_head
= *start
;
109 pr_debug2("%s: buf=%p, start=%"PRIx64
"\n", __func__
, buf
, *start
);
110 pheader
= (struct perf_event_header
*)(buf
+ (*start
& mask
));
112 if (evt_head
- *start
>= (unsigned int)size
) {
113 pr_debug("Finished reading overwrite ring buffer: rewind\n");
114 if (evt_head
- *start
> (unsigned int)size
)
115 evt_head
-= pheader
->size
;
120 pheader
= (struct perf_event_header
*)(buf
+ (evt_head
& mask
));
122 if (pheader
->size
== 0) {
123 pr_debug("Finished reading overwrite ring buffer: get start\n");
128 evt_head
+= pheader
->size
;
129 pr_debug3("move evt_head: %"PRIx64
"\n", evt_head
);
131 WARN_ONCE(1, "Shouldn't get here\n");
136 * Report the start and end of the available data in ringbuffer
138 static int __perf_mmap__read_init(struct perf_mmap
*md
)
140 u64 head
= perf_mmap__read_head(md
);
142 unsigned char *data
= md
->base
+ page_size
;
145 md
->start
= md
->overwrite
? head
: old
;
146 md
->end
= md
->overwrite
? old
: head
;
148 if ((md
->end
- md
->start
) < md
->flush
)
151 size
= md
->end
- md
->start
;
152 if (size
> (unsigned long)(md
->mask
) + 1) {
153 if (!md
->overwrite
) {
154 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
157 perf_mmap__consume(md
);
162 * Backward ring buffer is full. We still have a chance to read
163 * most of data from it.
165 if (overwrite_rb_find_range(data
, md
->mask
, &md
->start
, &md
->end
))
172 int perf_mmap__read_init(struct perf_mmap
*map
)
175 * Check if event was unmapped due to a POLLHUP/POLLERR.
177 if (!refcount_read(&map
->refcnt
))
180 return __perf_mmap__read_init(map
);
184 * Mandatory for overwrite mode
185 * The direction of overwrite mode is backward.
186 * The last perf_mmap__read() will set tail to map->core.prev.
187 * Need to correct the map->core.prev to head which is the end of next read.
189 void perf_mmap__read_done(struct perf_mmap
*map
)
192 * Check if event was unmapped due to a POLLHUP/POLLERR.
194 if (!refcount_read(&map
->refcnt
))
197 map
->prev
= perf_mmap__read_head(map
);
200 /* When check_messup is true, 'end' must points to a good entry */
201 static union perf_event
*perf_mmap__read(struct perf_mmap
*map
,
202 u64
*startp
, u64 end
)
204 unsigned char *data
= map
->base
+ page_size
;
205 union perf_event
*event
= NULL
;
206 int diff
= end
- *startp
;
208 if (diff
>= (int)sizeof(event
->header
)) {
211 event
= (union perf_event
*)&data
[*startp
& map
->mask
];
212 size
= event
->header
.size
;
214 if (size
< sizeof(event
->header
) || diff
< (int)size
)
218 * Event straddles the mmap boundary -- header should always
219 * be inside due to u64 alignment of output.
221 if ((*startp
& map
->mask
) + size
!= ((*startp
+ size
) & map
->mask
)) {
222 unsigned int offset
= *startp
;
223 unsigned int len
= min(sizeof(*event
), size
), cpy
;
224 void *dst
= map
->event_copy
;
227 cpy
= min(map
->mask
+ 1 - (offset
& map
->mask
), len
);
228 memcpy(dst
, &data
[offset
& map
->mask
], cpy
);
234 event
= (union perf_event
*)map
->event_copy
;
244 * Read event from ring buffer one by one.
245 * Return one event for each call.
248 * perf_mmap__read_init()
249 * while(event = perf_mmap__read_event()) {
250 * //process the event
251 * perf_mmap__consume()
253 * perf_mmap__read_done()
255 union perf_event
*perf_mmap__read_event(struct perf_mmap
*map
)
257 union perf_event
*event
;
260 * Check if event was unmapped due to a POLLHUP/POLLERR.
262 if (!refcount_read(&map
->refcnt
))
265 /* non-overwirte doesn't pause the ringbuffer */
267 map
->end
= perf_mmap__read_head(map
);
269 event
= perf_mmap__read(map
, &map
->start
, map
->end
);
272 map
->prev
= map
->start
;