1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/list.h>
5 #include <linux/compiler.h>
6 #include <linux/string.h>
7 #include "ordered-events.h"
12 #define pr_N(n, fmt, ...) \
13 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
15 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
17 static void queue_event(struct ordered_events
*oe
, struct ordered_event
*new)
19 struct ordered_event
*last
= oe
->last
;
20 u64 timestamp
= new->timestamp
;
26 pr_oe_time2(timestamp
, "queue_event nr_events %u\n", oe
->nr_events
);
29 list_add(&new->list
, &oe
->events
);
30 oe
->max_timestamp
= timestamp
;
35 * last event might point to some random place in the list as it's
36 * the last queued event. We expect that the new event is close to
39 if (last
->timestamp
<= timestamp
) {
40 while (last
->timestamp
<= timestamp
) {
42 if (p
== &oe
->events
) {
43 list_add_tail(&new->list
, &oe
->events
);
44 oe
->max_timestamp
= timestamp
;
47 last
= list_entry(p
, struct ordered_event
, list
);
49 list_add_tail(&new->list
, &last
->list
);
51 while (last
->timestamp
> timestamp
) {
53 if (p
== &oe
->events
) {
54 list_add(&new->list
, &oe
->events
);
57 last
= list_entry(p
, struct ordered_event
, list
);
59 list_add(&new->list
, &last
->list
);
63 static union perf_event
*__dup_event(struct ordered_events
*oe
,
64 union perf_event
*event
)
66 union perf_event
*new_event
= NULL
;
68 if (oe
->cur_alloc_size
< oe
->max_alloc_size
) {
69 new_event
= memdup(event
, event
->header
.size
);
71 oe
->cur_alloc_size
+= event
->header
.size
;
77 static union perf_event
*dup_event(struct ordered_events
*oe
,
78 union perf_event
*event
)
80 return oe
->copy_on_queue
? __dup_event(oe
, event
) : event
;
83 static void __free_dup_event(struct ordered_events
*oe
, union perf_event
*event
)
86 oe
->cur_alloc_size
-= event
->header
.size
;
91 static void free_dup_event(struct ordered_events
*oe
, union perf_event
*event
)
93 if (oe
->copy_on_queue
)
94 __free_dup_event(oe
, event
);
97 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
98 static struct ordered_event
*alloc_event(struct ordered_events
*oe
,
99 union perf_event
*event
)
101 struct list_head
*cache
= &oe
->cache
;
102 struct ordered_event
*new = NULL
;
103 union perf_event
*new_event
;
106 new_event
= dup_event(oe
, event
);
111 * We maintain the following scheme of buffers for ordered
114 * to_free list -> buffer1 (64K)
118 * Each buffer keeps an array of ordered events objects:
123 * Each allocated ordered event is linked to one of
125 * - time ordered list 'events'
126 * - list of currently removed events 'cache'
128 * Allocation of the ordered event uses the following order
130 * - use recently removed object from 'cache' list
131 * - use available object in current allocation buffer
132 * - allocate new buffer if the current buffer is full
134 * Removal of ordered event object moves it from events to
137 size
= sizeof(*oe
->buffer
) + MAX_SAMPLE_BUFFER
* sizeof(*new);
139 if (!list_empty(cache
)) {
140 new = list_entry(cache
->next
, struct ordered_event
, list
);
141 list_del(&new->list
);
142 } else if (oe
->buffer
) {
143 new = &oe
->buffer
->event
[oe
->buffer_idx
];
144 if (++oe
->buffer_idx
== MAX_SAMPLE_BUFFER
)
146 } else if ((oe
->cur_alloc_size
+ size
) < oe
->max_alloc_size
) {
147 oe
->buffer
= malloc(size
);
149 free_dup_event(oe
, new_event
);
153 pr("alloc size %" PRIu64
"B (+%zu), max %" PRIu64
"B\n",
154 oe
->cur_alloc_size
, size
, oe
->max_alloc_size
);
156 oe
->cur_alloc_size
+= size
;
157 list_add(&oe
->buffer
->list
, &oe
->to_free
);
160 new = &oe
->buffer
->event
[0];
162 pr("allocation limit reached %" PRIu64
"B\n", oe
->max_alloc_size
);
166 new->event
= new_event
;
170 static struct ordered_event
*
171 ordered_events__new_event(struct ordered_events
*oe
, u64 timestamp
,
172 union perf_event
*event
)
174 struct ordered_event
*new;
176 new = alloc_event(oe
, event
);
178 new->timestamp
= timestamp
;
179 queue_event(oe
, new);
185 void ordered_events__delete(struct ordered_events
*oe
, struct ordered_event
*event
)
187 list_move(&event
->list
, &oe
->cache
);
189 free_dup_event(oe
, event
->event
);
193 int ordered_events__queue(struct ordered_events
*oe
, union perf_event
*event
,
194 u64 timestamp
, u64 file_offset
)
196 struct ordered_event
*oevent
;
198 if (!timestamp
|| timestamp
== ~0ULL)
201 if (timestamp
< oe
->last_flush
) {
202 pr_oe_time(timestamp
, "out of order event\n");
203 pr_oe_time(oe
->last_flush
, "last flush, last_flush_type %d\n",
204 oe
->last_flush_type
);
206 oe
->nr_unordered_events
++;
209 oevent
= ordered_events__new_event(oe
, timestamp
, event
);
211 ordered_events__flush(oe
, OE_FLUSH__HALF
);
212 oevent
= ordered_events__new_event(oe
, timestamp
, event
);
218 oevent
->file_offset
= file_offset
;
222 static int do_flush(struct ordered_events
*oe
, bool show_progress
)
224 struct list_head
*head
= &oe
->events
;
225 struct ordered_event
*tmp
, *iter
;
226 u64 limit
= oe
->next_flush
;
227 u64 last_ts
= oe
->last
? oe
->last
->timestamp
: 0ULL;
228 struct ui_progress prog
;
235 ui_progress__init(&prog
, oe
->nr_events
, "Processing time ordered events...");
237 list_for_each_entry_safe(iter
, tmp
, head
, list
) {
241 if (iter
->timestamp
> limit
)
243 ret
= oe
->deliver(oe
, iter
);
247 ordered_events__delete(oe
, iter
);
248 oe
->last_flush
= iter
->timestamp
;
251 ui_progress__update(&prog
, 1);
254 if (list_empty(head
))
256 else if (last_ts
<= limit
)
257 oe
->last
= list_entry(head
->prev
, struct ordered_event
, list
);
260 ui_progress__finish();
265 static int __ordered_events__flush(struct ordered_events
*oe
, enum oe_flush how
,
268 static const char * const str
[] = {
277 bool show_progress
= false;
279 if (oe
->nr_events
== 0)
283 case OE_FLUSH__FINAL
:
284 show_progress
= true;
287 oe
->next_flush
= ULLONG_MAX
;
292 struct ordered_event
*first
, *last
;
293 struct list_head
*head
= &oe
->events
;
295 first
= list_entry(head
->next
, struct ordered_event
, list
);
298 /* Warn if we are called before any event got allocated. */
299 if (WARN_ONCE(!last
|| list_empty(head
), "empty queue"))
302 oe
->next_flush
= first
->timestamp
;
303 oe
->next_flush
+= (last
->timestamp
- first
->timestamp
) / 2;
308 oe
->next_flush
= timestamp
;
309 show_progress
= false;
312 case OE_FLUSH__ROUND
:
318 pr_oe_time(oe
->next_flush
, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
319 str
[how
], oe
->nr_events
);
320 pr_oe_time(oe
->max_timestamp
, "max_timestamp\n");
322 err
= do_flush(oe
, show_progress
);
325 if (how
== OE_FLUSH__ROUND
)
326 oe
->next_flush
= oe
->max_timestamp
;
328 oe
->last_flush_type
= how
;
331 pr_oe_time(oe
->next_flush
, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
332 str
[how
], oe
->nr_events
);
333 pr_oe_time(oe
->last_flush
, "last_flush\n");
338 int ordered_events__flush(struct ordered_events
*oe
, enum oe_flush how
)
340 return __ordered_events__flush(oe
, how
, 0);
343 int ordered_events__flush_time(struct ordered_events
*oe
, u64 timestamp
)
345 return __ordered_events__flush(oe
, OE_FLUSH__TIME
, timestamp
);
348 u64
ordered_events__first_time(struct ordered_events
*oe
)
350 struct ordered_event
*event
;
352 if (list_empty(&oe
->events
))
355 event
= list_first_entry(&oe
->events
, struct ordered_event
, list
);
356 return event
->timestamp
;
359 void ordered_events__init(struct ordered_events
*oe
, ordered_events__deliver_t deliver
,
362 INIT_LIST_HEAD(&oe
->events
);
363 INIT_LIST_HEAD(&oe
->cache
);
364 INIT_LIST_HEAD(&oe
->to_free
);
365 oe
->max_alloc_size
= (u64
) -1;
366 oe
->cur_alloc_size
= 0;
367 oe
->deliver
= deliver
;
372 ordered_events_buffer__free(struct ordered_events_buffer
*buffer
,
373 unsigned int max
, struct ordered_events
*oe
)
375 if (oe
->copy_on_queue
) {
378 for (i
= 0; i
< max
; i
++)
379 __free_dup_event(oe
, buffer
->event
[i
].event
);
385 void ordered_events__free(struct ordered_events
*oe
)
387 struct ordered_events_buffer
*buffer
, *tmp
;
389 if (list_empty(&oe
->to_free
))
393 * Current buffer might not have all the events allocated
394 * yet, we need to free only allocated ones ...
397 list_del(&oe
->buffer
->list
);
398 ordered_events_buffer__free(oe
->buffer
, oe
->buffer_idx
, oe
);
401 /* ... and continue with the rest */
402 list_for_each_entry_safe(buffer
, tmp
, &oe
->to_free
, list
) {
403 list_del(&buffer
->list
);
404 ordered_events_buffer__free(buffer
, MAX_SAMPLE_BUFFER
, oe
);
408 void ordered_events__reinit(struct ordered_events
*oe
)
410 ordered_events__deliver_t old_deliver
= oe
->deliver
;
412 ordered_events__free(oe
);
413 memset(oe
, '\0', sizeof(*oe
));
414 ordered_events__init(oe
, old_deliver
, oe
->data
);