1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/list.h>
5 #include <linux/compiler.h>
6 #include <linux/string.h>
7 #include "ordered-events.h"
11 #include "ui/progress.h"
13 #define pr_N(n, fmt, ...) \
14 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
16 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
18 static void queue_event(struct ordered_events
*oe
, struct ordered_event
*new)
20 struct ordered_event
*last
= oe
->last
;
21 u64 timestamp
= new->timestamp
;
27 pr_oe_time2(timestamp
, "queue_event nr_events %u\n", oe
->nr_events
);
30 list_add(&new->list
, &oe
->events
);
31 oe
->max_timestamp
= timestamp
;
36 * last event might point to some random place in the list as it's
37 * the last queued event. We expect that the new event is close to
40 if (last
->timestamp
<= timestamp
) {
41 while (last
->timestamp
<= timestamp
) {
43 if (p
== &oe
->events
) {
44 list_add_tail(&new->list
, &oe
->events
);
45 oe
->max_timestamp
= timestamp
;
48 last
= list_entry(p
, struct ordered_event
, list
);
50 list_add_tail(&new->list
, &last
->list
);
52 while (last
->timestamp
> timestamp
) {
54 if (p
== &oe
->events
) {
55 list_add(&new->list
, &oe
->events
);
58 last
= list_entry(p
, struct ordered_event
, list
);
60 list_add(&new->list
, &last
->list
);
64 static union perf_event
*__dup_event(struct ordered_events
*oe
,
65 union perf_event
*event
)
67 union perf_event
*new_event
= NULL
;
69 if (oe
->cur_alloc_size
< oe
->max_alloc_size
) {
70 new_event
= memdup(event
, event
->header
.size
);
72 oe
->cur_alloc_size
+= event
->header
.size
;
78 static union perf_event
*dup_event(struct ordered_events
*oe
,
79 union perf_event
*event
)
81 return oe
->copy_on_queue
? __dup_event(oe
, event
) : event
;
84 static void __free_dup_event(struct ordered_events
*oe
, union perf_event
*event
)
87 oe
->cur_alloc_size
-= event
->header
.size
;
92 static void free_dup_event(struct ordered_events
*oe
, union perf_event
*event
)
94 if (oe
->copy_on_queue
)
95 __free_dup_event(oe
, event
);
98 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
99 static struct ordered_event
*alloc_event(struct ordered_events
*oe
,
100 union perf_event
*event
)
102 struct list_head
*cache
= &oe
->cache
;
103 struct ordered_event
*new = NULL
;
104 union perf_event
*new_event
;
107 new_event
= dup_event(oe
, event
);
112 * We maintain the following scheme of buffers for ordered
115 * to_free list -> buffer1 (64K)
119 * Each buffer keeps an array of ordered events objects:
124 * Each allocated ordered event is linked to one of
126 * - time ordered list 'events'
127 * - list of currently removed events 'cache'
129 * Allocation of the ordered event uses the following order
131 * - use recently removed object from 'cache' list
132 * - use available object in current allocation buffer
133 * - allocate new buffer if the current buffer is full
135 * Removal of ordered event object moves it from events to
138 size
= sizeof(*oe
->buffer
) + MAX_SAMPLE_BUFFER
* sizeof(*new);
140 if (!list_empty(cache
)) {
141 new = list_entry(cache
->next
, struct ordered_event
, list
);
142 list_del_init(&new->list
);
143 } else if (oe
->buffer
) {
144 new = &oe
->buffer
->event
[oe
->buffer_idx
];
145 if (++oe
->buffer_idx
== MAX_SAMPLE_BUFFER
)
147 } else if ((oe
->cur_alloc_size
+ size
) < oe
->max_alloc_size
) {
148 oe
->buffer
= malloc(size
);
150 free_dup_event(oe
, new_event
);
154 pr("alloc size %" PRIu64
"B (+%zu), max %" PRIu64
"B\n",
155 oe
->cur_alloc_size
, size
, oe
->max_alloc_size
);
157 oe
->cur_alloc_size
+= size
;
158 list_add(&oe
->buffer
->list
, &oe
->to_free
);
161 new = &oe
->buffer
->event
[0];
163 pr("allocation limit reached %" PRIu64
"B\n", oe
->max_alloc_size
);
167 new->event
= new_event
;
171 static struct ordered_event
*
172 ordered_events__new_event(struct ordered_events
*oe
, u64 timestamp
,
173 union perf_event
*event
)
175 struct ordered_event
*new;
177 new = alloc_event(oe
, event
);
179 new->timestamp
= timestamp
;
180 queue_event(oe
, new);
186 void ordered_events__delete(struct ordered_events
*oe
, struct ordered_event
*event
)
188 list_move(&event
->list
, &oe
->cache
);
190 free_dup_event(oe
, event
->event
);
194 int ordered_events__queue(struct ordered_events
*oe
, union perf_event
*event
,
195 u64 timestamp
, u64 file_offset
)
197 struct ordered_event
*oevent
;
199 if (!timestamp
|| timestamp
== ~0ULL)
202 if (timestamp
< oe
->last_flush
) {
203 pr_oe_time(timestamp
, "out of order event\n");
204 pr_oe_time(oe
->last_flush
, "last flush, last_flush_type %d\n",
205 oe
->last_flush_type
);
207 oe
->nr_unordered_events
++;
210 oevent
= ordered_events__new_event(oe
, timestamp
, event
);
212 ordered_events__flush(oe
, OE_FLUSH__HALF
);
213 oevent
= ordered_events__new_event(oe
, timestamp
, event
);
219 oevent
->file_offset
= file_offset
;
223 static int do_flush(struct ordered_events
*oe
, bool show_progress
)
225 struct list_head
*head
= &oe
->events
;
226 struct ordered_event
*tmp
, *iter
;
227 u64 limit
= oe
->next_flush
;
228 u64 last_ts
= oe
->last
? oe
->last
->timestamp
: 0ULL;
229 struct ui_progress prog
;
236 ui_progress__init(&prog
, oe
->nr_events
, "Processing time ordered events...");
238 list_for_each_entry_safe(iter
, tmp
, head
, list
) {
242 if (iter
->timestamp
> limit
)
244 ret
= oe
->deliver(oe
, iter
);
248 ordered_events__delete(oe
, iter
);
249 oe
->last_flush
= iter
->timestamp
;
252 ui_progress__update(&prog
, 1);
255 if (list_empty(head
))
257 else if (last_ts
<= limit
)
258 oe
->last
= list_entry(head
->prev
, struct ordered_event
, list
);
261 ui_progress__finish();
266 static int __ordered_events__flush(struct ordered_events
*oe
, enum oe_flush how
,
269 static const char * const str
[] = {
278 bool show_progress
= false;
280 if (oe
->nr_events
== 0)
284 case OE_FLUSH__FINAL
:
285 show_progress
= true;
288 oe
->next_flush
= ULLONG_MAX
;
293 struct ordered_event
*first
, *last
;
294 struct list_head
*head
= &oe
->events
;
296 first
= list_entry(head
->next
, struct ordered_event
, list
);
299 /* Warn if we are called before any event got allocated. */
300 if (WARN_ONCE(!last
|| list_empty(head
), "empty queue"))
303 oe
->next_flush
= first
->timestamp
;
304 oe
->next_flush
+= (last
->timestamp
- first
->timestamp
) / 2;
309 oe
->next_flush
= timestamp
;
310 show_progress
= false;
313 case OE_FLUSH__ROUND
:
319 pr_oe_time(oe
->next_flush
, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
320 str
[how
], oe
->nr_events
);
321 pr_oe_time(oe
->max_timestamp
, "max_timestamp\n");
323 err
= do_flush(oe
, show_progress
);
326 if (how
== OE_FLUSH__ROUND
)
327 oe
->next_flush
= oe
->max_timestamp
;
329 oe
->last_flush_type
= how
;
332 pr_oe_time(oe
->next_flush
, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
333 str
[how
], oe
->nr_events
);
334 pr_oe_time(oe
->last_flush
, "last_flush\n");
339 int ordered_events__flush(struct ordered_events
*oe
, enum oe_flush how
)
341 return __ordered_events__flush(oe
, how
, 0);
344 int ordered_events__flush_time(struct ordered_events
*oe
, u64 timestamp
)
346 return __ordered_events__flush(oe
, OE_FLUSH__TIME
, timestamp
);
349 u64
ordered_events__first_time(struct ordered_events
*oe
)
351 struct ordered_event
*event
;
353 if (list_empty(&oe
->events
))
356 event
= list_first_entry(&oe
->events
, struct ordered_event
, list
);
357 return event
->timestamp
;
360 void ordered_events__init(struct ordered_events
*oe
, ordered_events__deliver_t deliver
,
363 INIT_LIST_HEAD(&oe
->events
);
364 INIT_LIST_HEAD(&oe
->cache
);
365 INIT_LIST_HEAD(&oe
->to_free
);
366 oe
->max_alloc_size
= (u64
) -1;
367 oe
->cur_alloc_size
= 0;
368 oe
->deliver
= deliver
;
373 ordered_events_buffer__free(struct ordered_events_buffer
*buffer
,
374 unsigned int max
, struct ordered_events
*oe
)
376 if (oe
->copy_on_queue
) {
379 for (i
= 0; i
< max
; i
++)
380 __free_dup_event(oe
, buffer
->event
[i
].event
);
386 void ordered_events__free(struct ordered_events
*oe
)
388 struct ordered_events_buffer
*buffer
, *tmp
;
390 if (list_empty(&oe
->to_free
))
394 * Current buffer might not have all the events allocated
395 * yet, we need to free only allocated ones ...
398 list_del_init(&oe
->buffer
->list
);
399 ordered_events_buffer__free(oe
->buffer
, oe
->buffer_idx
, oe
);
402 /* ... and continue with the rest */
403 list_for_each_entry_safe(buffer
, tmp
, &oe
->to_free
, list
) {
404 list_del_init(&buffer
->list
);
405 ordered_events_buffer__free(buffer
, MAX_SAMPLE_BUFFER
, oe
);
409 void ordered_events__reinit(struct ordered_events
*oe
)
411 ordered_events__deliver_t old_deliver
= oe
->deliver
;
413 ordered_events__free(oe
);
414 memset(oe
, '\0', sizeof(*oe
));
415 ordered_events__init(oe
, old_deliver
, oe
->data
);