1 #include <linux/list.h>
2 #include <linux/compiler.h>
3 #include <linux/string.h>
4 #include "ordered-events.h"
9 #define pr_N(n, fmt, ...) \
10 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
12 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
14 static void queue_event(struct ordered_events
*oe
, struct ordered_event
*new)
16 struct ordered_event
*last
= oe
->last
;
17 u64 timestamp
= new->timestamp
;
23 pr_oe_time2(timestamp
, "queue_event nr_events %u\n", oe
->nr_events
);
26 list_add(&new->list
, &oe
->events
);
27 oe
->max_timestamp
= timestamp
;
32 * last event might point to some random place in the list as it's
33 * the last queued event. We expect that the new event is close to
36 if (last
->timestamp
<= timestamp
) {
37 while (last
->timestamp
<= timestamp
) {
39 if (p
== &oe
->events
) {
40 list_add_tail(&new->list
, &oe
->events
);
41 oe
->max_timestamp
= timestamp
;
44 last
= list_entry(p
, struct ordered_event
, list
);
46 list_add_tail(&new->list
, &last
->list
);
48 while (last
->timestamp
> timestamp
) {
50 if (p
== &oe
->events
) {
51 list_add(&new->list
, &oe
->events
);
54 last
= list_entry(p
, struct ordered_event
, list
);
56 list_add(&new->list
, &last
->list
);
60 static union perf_event
*__dup_event(struct ordered_events
*oe
,
61 union perf_event
*event
)
63 union perf_event
*new_event
= NULL
;
65 if (oe
->cur_alloc_size
< oe
->max_alloc_size
) {
66 new_event
= memdup(event
, event
->header
.size
);
68 oe
->cur_alloc_size
+= event
->header
.size
;
74 static union perf_event
*dup_event(struct ordered_events
*oe
,
75 union perf_event
*event
)
77 return oe
->copy_on_queue
? __dup_event(oe
, event
) : event
;
80 static void free_dup_event(struct ordered_events
*oe
, union perf_event
*event
)
82 if (oe
->copy_on_queue
) {
83 oe
->cur_alloc_size
-= event
->header
.size
;
88 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
89 static struct ordered_event
*alloc_event(struct ordered_events
*oe
,
90 union perf_event
*event
)
92 struct list_head
*cache
= &oe
->cache
;
93 struct ordered_event
*new = NULL
;
94 union perf_event
*new_event
;
96 new_event
= dup_event(oe
, event
);
100 if (!list_empty(cache
)) {
101 new = list_entry(cache
->next
, struct ordered_event
, list
);
102 list_del(&new->list
);
103 } else if (oe
->buffer
) {
104 new = oe
->buffer
+ oe
->buffer_idx
;
105 if (++oe
->buffer_idx
== MAX_SAMPLE_BUFFER
)
107 } else if (oe
->cur_alloc_size
< oe
->max_alloc_size
) {
108 size_t size
= MAX_SAMPLE_BUFFER
* sizeof(*new);
110 oe
->buffer
= malloc(size
);
112 free_dup_event(oe
, new_event
);
116 pr("alloc size %" PRIu64
"B (+%zu), max %" PRIu64
"B\n",
117 oe
->cur_alloc_size
, size
, oe
->max_alloc_size
);
119 oe
->cur_alloc_size
+= size
;
120 list_add(&oe
->buffer
->list
, &oe
->to_free
);
122 /* First entry is abused to maintain the to_free list. */
124 new = oe
->buffer
+ 1;
126 pr("allocation limit reached %" PRIu64
"B\n", oe
->max_alloc_size
);
129 new->event
= new_event
;
133 static struct ordered_event
*
134 ordered_events__new_event(struct ordered_events
*oe
, u64 timestamp
,
135 union perf_event
*event
)
137 struct ordered_event
*new;
139 new = alloc_event(oe
, event
);
141 new->timestamp
= timestamp
;
142 queue_event(oe
, new);
148 void ordered_events__delete(struct ordered_events
*oe
, struct ordered_event
*event
)
150 list_move(&event
->list
, &oe
->cache
);
152 free_dup_event(oe
, event
->event
);
155 int ordered_events__queue(struct ordered_events
*oe
, union perf_event
*event
,
156 struct perf_sample
*sample
, u64 file_offset
)
158 u64 timestamp
= sample
->time
;
159 struct ordered_event
*oevent
;
161 if (!timestamp
|| timestamp
== ~0ULL)
164 if (timestamp
< oe
->last_flush
) {
165 pr_oe_time(timestamp
, "out of order event\n");
166 pr_oe_time(oe
->last_flush
, "last flush, last_flush_type %d\n",
167 oe
->last_flush_type
);
169 oe
->nr_unordered_events
++;
172 oevent
= ordered_events__new_event(oe
, timestamp
, event
);
174 ordered_events__flush(oe
, OE_FLUSH__HALF
);
175 oevent
= ordered_events__new_event(oe
, timestamp
, event
);
181 oevent
->file_offset
= file_offset
;
185 static int __ordered_events__flush(struct ordered_events
*oe
)
187 struct list_head
*head
= &oe
->events
;
188 struct ordered_event
*tmp
, *iter
;
189 u64 limit
= oe
->next_flush
;
190 u64 last_ts
= oe
->last
? oe
->last
->timestamp
: 0ULL;
191 bool show_progress
= limit
== ULLONG_MAX
;
192 struct ui_progress prog
;
199 ui_progress__init(&prog
, oe
->nr_events
, "Processing time ordered events...");
201 list_for_each_entry_safe(iter
, tmp
, head
, list
) {
205 if (iter
->timestamp
> limit
)
207 ret
= oe
->deliver(oe
, iter
);
211 ordered_events__delete(oe
, iter
);
212 oe
->last_flush
= iter
->timestamp
;
215 ui_progress__update(&prog
, 1);
218 if (list_empty(head
))
220 else if (last_ts
<= limit
)
221 oe
->last
= list_entry(head
->prev
, struct ordered_event
, list
);
224 ui_progress__finish();
229 int ordered_events__flush(struct ordered_events
*oe
, enum oe_flush how
)
231 static const char * const str
[] = {
239 if (oe
->nr_events
== 0)
243 case OE_FLUSH__FINAL
:
244 oe
->next_flush
= ULLONG_MAX
;
249 struct ordered_event
*first
, *last
;
250 struct list_head
*head
= &oe
->events
;
252 first
= list_entry(head
->next
, struct ordered_event
, list
);
255 /* Warn if we are called before any event got allocated. */
256 if (WARN_ONCE(!last
|| list_empty(head
), "empty queue"))
259 oe
->next_flush
= first
->timestamp
;
260 oe
->next_flush
+= (last
->timestamp
- first
->timestamp
) / 2;
264 case OE_FLUSH__ROUND
:
270 pr_oe_time(oe
->next_flush
, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
271 str
[how
], oe
->nr_events
);
272 pr_oe_time(oe
->max_timestamp
, "max_timestamp\n");
274 err
= __ordered_events__flush(oe
);
277 if (how
== OE_FLUSH__ROUND
)
278 oe
->next_flush
= oe
->max_timestamp
;
280 oe
->last_flush_type
= how
;
283 pr_oe_time(oe
->next_flush
, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
284 str
[how
], oe
->nr_events
);
285 pr_oe_time(oe
->last_flush
, "last_flush\n");
290 void ordered_events__init(struct ordered_events
*oe
, ordered_events__deliver_t deliver
)
292 INIT_LIST_HEAD(&oe
->events
);
293 INIT_LIST_HEAD(&oe
->cache
);
294 INIT_LIST_HEAD(&oe
->to_free
);
295 oe
->max_alloc_size
= (u64
) -1;
296 oe
->cur_alloc_size
= 0;
297 oe
->deliver
= deliver
;
300 void ordered_events__free(struct ordered_events
*oe
)
302 while (!list_empty(&oe
->to_free
)) {
303 struct ordered_event
*event
;
305 event
= list_entry(oe
->to_free
.next
, struct ordered_event
, list
);
306 list_del(&event
->list
);
307 free_dup_event(oe
, event
->event
);