fs/ext4/inode.c: use pr_warn_ratelimited()
[linux/fpc-iii.git] / tools / perf / util / session.c
blob313dac2d94ce9f7c9e74ef0e8995383866427dbf
1 #define _FILE_OFFSET_BITS 64
3 #include <linux/kernel.h>
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8 #include <sys/mman.h>
10 #include "session.h"
11 #include "sort.h"
12 #include "util.h"
14 static int perf_session__open(struct perf_session *self, bool force)
16 struct stat input_stat;
18 if (!strcmp(self->filename, "-")) {
19 self->fd_pipe = true;
20 self->fd = STDIN_FILENO;
22 if (perf_header__read(self, self->fd) < 0)
23 pr_err("incompatible file format");
25 return 0;
28 self->fd = open(self->filename, O_RDONLY);
29 if (self->fd < 0) {
30 int err = errno;
32 pr_err("failed to open %s: %s", self->filename, strerror(err));
33 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
34 pr_err(" (try 'perf record' first)");
35 pr_err("\n");
36 return -errno;
39 if (fstat(self->fd, &input_stat) < 0)
40 goto out_close;
42 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
43 pr_err("file %s not owned by current user or root\n",
44 self->filename);
45 goto out_close;
48 if (!input_stat.st_size) {
49 pr_info("zero-sized file (%s), nothing to do!\n",
50 self->filename);
51 goto out_close;
54 if (perf_header__read(self, self->fd) < 0) {
55 pr_err("incompatible file format");
56 goto out_close;
59 self->size = input_stat.st_size;
60 return 0;
62 out_close:
63 close(self->fd);
64 self->fd = -1;
65 return -1;
68 static void perf_session__id_header_size(struct perf_session *session)
70 struct sample_data *data;
71 u64 sample_type = session->sample_type;
72 u16 size = 0;
74 if (!session->sample_id_all)
75 goto out;
77 if (sample_type & PERF_SAMPLE_TID)
78 size += sizeof(data->tid) * 2;
80 if (sample_type & PERF_SAMPLE_TIME)
81 size += sizeof(data->time);
83 if (sample_type & PERF_SAMPLE_ID)
84 size += sizeof(data->id);
86 if (sample_type & PERF_SAMPLE_STREAM_ID)
87 size += sizeof(data->stream_id);
89 if (sample_type & PERF_SAMPLE_CPU)
90 size += sizeof(data->cpu) * 2;
91 out:
92 session->id_hdr_size = size;
95 void perf_session__set_sample_id_all(struct perf_session *session, bool value)
97 session->sample_id_all = value;
98 perf_session__id_header_size(session);
101 void perf_session__set_sample_type(struct perf_session *session, u64 type)
103 session->sample_type = type;
106 void perf_session__update_sample_type(struct perf_session *self)
108 self->sample_type = perf_header__sample_type(&self->header);
109 self->sample_id_all = perf_header__sample_id_all(&self->header);
110 perf_session__id_header_size(self);
113 int perf_session__create_kernel_maps(struct perf_session *self)
115 int ret = machine__create_kernel_maps(&self->host_machine);
117 if (ret >= 0)
118 ret = machines__create_guest_kernel_maps(&self->machines);
119 return ret;
122 static void perf_session__destroy_kernel_maps(struct perf_session *self)
124 machine__destroy_kernel_maps(&self->host_machine);
125 machines__destroy_guest_kernel_maps(&self->machines);
128 struct perf_session *perf_session__new(const char *filename, int mode,
129 bool force, bool repipe,
130 struct perf_event_ops *ops)
132 size_t len = filename ? strlen(filename) + 1 : 0;
133 struct perf_session *self = zalloc(sizeof(*self) + len);
135 if (self == NULL)
136 goto out;
138 if (perf_header__init(&self->header) < 0)
139 goto out_free;
141 memcpy(self->filename, filename, len);
142 self->threads = RB_ROOT;
143 INIT_LIST_HEAD(&self->dead_threads);
144 self->hists_tree = RB_ROOT;
145 self->last_match = NULL;
147 * On 64bit we can mmap the data file in one go. No need for tiny mmap
148 * slices. On 32bit we use 32MB.
150 #if BITS_PER_LONG == 64
151 self->mmap_window = ULLONG_MAX;
152 #else
153 self->mmap_window = 32 * 1024 * 1024ULL;
154 #endif
155 self->machines = RB_ROOT;
156 self->repipe = repipe;
157 INIT_LIST_HEAD(&self->ordered_samples.samples);
158 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
159 INIT_LIST_HEAD(&self->ordered_samples.to_free);
160 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
162 if (mode == O_RDONLY) {
163 if (perf_session__open(self, force) < 0)
164 goto out_delete;
165 } else if (mode == O_WRONLY) {
167 * In O_RDONLY mode this will be performed when reading the
168 * kernel MMAP event, in event__process_mmap().
170 if (perf_session__create_kernel_maps(self) < 0)
171 goto out_delete;
174 perf_session__update_sample_type(self);
176 if (ops && ops->ordering_requires_timestamps &&
177 ops->ordered_samples && !self->sample_id_all) {
178 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
179 ops->ordered_samples = false;
182 out:
183 return self;
184 out_free:
185 free(self);
186 return NULL;
187 out_delete:
188 perf_session__delete(self);
189 return NULL;
192 static void perf_session__delete_dead_threads(struct perf_session *self)
194 struct thread *n, *t;
196 list_for_each_entry_safe(t, n, &self->dead_threads, node) {
197 list_del(&t->node);
198 thread__delete(t);
202 static void perf_session__delete_threads(struct perf_session *self)
204 struct rb_node *nd = rb_first(&self->threads);
206 while (nd) {
207 struct thread *t = rb_entry(nd, struct thread, rb_node);
209 rb_erase(&t->rb_node, &self->threads);
210 nd = rb_next(nd);
211 thread__delete(t);
215 void perf_session__delete(struct perf_session *self)
217 perf_header__exit(&self->header);
218 perf_session__destroy_kernel_maps(self);
219 perf_session__delete_dead_threads(self);
220 perf_session__delete_threads(self);
221 machine__exit(&self->host_machine);
222 close(self->fd);
223 free(self);
226 void perf_session__remove_thread(struct perf_session *self, struct thread *th)
228 self->last_match = NULL;
229 rb_erase(&th->rb_node, &self->threads);
231 * We may have references to this thread, for instance in some hist_entry
232 * instances, so just move them to a separate list.
234 list_add_tail(&th->node, &self->dead_threads);
237 static bool symbol__match_parent_regex(struct symbol *sym)
239 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
240 return 1;
242 return 0;
245 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
246 struct thread *thread,
247 struct ip_callchain *chain,
248 struct symbol **parent)
250 u8 cpumode = PERF_RECORD_MISC_USER;
251 unsigned int i;
252 struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
254 if (!syms)
255 return NULL;
257 for (i = 0; i < chain->nr; i++) {
258 u64 ip = chain->ips[i];
259 struct addr_location al;
261 if (ip >= PERF_CONTEXT_MAX) {
262 switch (ip) {
263 case PERF_CONTEXT_HV:
264 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
265 case PERF_CONTEXT_KERNEL:
266 cpumode = PERF_RECORD_MISC_KERNEL; break;
267 case PERF_CONTEXT_USER:
268 cpumode = PERF_RECORD_MISC_USER; break;
269 default:
270 break;
272 continue;
275 al.filtered = false;
276 thread__find_addr_location(thread, self, cpumode,
277 MAP__FUNCTION, thread->pid, ip, &al, NULL);
278 if (al.sym != NULL) {
279 if (sort__has_parent && !*parent &&
280 symbol__match_parent_regex(al.sym))
281 *parent = al.sym;
282 if (!symbol_conf.use_callchain)
283 break;
284 syms[i].map = al.map;
285 syms[i].sym = al.sym;
289 return syms;
292 static int process_event_synth_stub(event_t *event __used,
293 struct perf_session *session __used)
295 dump_printf(": unhandled!\n");
296 return 0;
299 static int process_event_stub(event_t *event __used,
300 struct sample_data *sample __used,
301 struct perf_session *session __used)
303 dump_printf(": unhandled!\n");
304 return 0;
307 static int process_finished_round_stub(event_t *event __used,
308 struct perf_session *session __used,
309 struct perf_event_ops *ops __used)
311 dump_printf(": unhandled!\n");
312 return 0;
315 static int process_finished_round(event_t *event,
316 struct perf_session *session,
317 struct perf_event_ops *ops);
319 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
321 if (handler->sample == NULL)
322 handler->sample = process_event_stub;
323 if (handler->mmap == NULL)
324 handler->mmap = process_event_stub;
325 if (handler->comm == NULL)
326 handler->comm = process_event_stub;
327 if (handler->fork == NULL)
328 handler->fork = process_event_stub;
329 if (handler->exit == NULL)
330 handler->exit = process_event_stub;
331 if (handler->lost == NULL)
332 handler->lost = event__process_lost;
333 if (handler->read == NULL)
334 handler->read = process_event_stub;
335 if (handler->throttle == NULL)
336 handler->throttle = process_event_stub;
337 if (handler->unthrottle == NULL)
338 handler->unthrottle = process_event_stub;
339 if (handler->attr == NULL)
340 handler->attr = process_event_synth_stub;
341 if (handler->event_type == NULL)
342 handler->event_type = process_event_synth_stub;
343 if (handler->tracing_data == NULL)
344 handler->tracing_data = process_event_synth_stub;
345 if (handler->build_id == NULL)
346 handler->build_id = process_event_synth_stub;
347 if (handler->finished_round == NULL) {
348 if (handler->ordered_samples)
349 handler->finished_round = process_finished_round;
350 else
351 handler->finished_round = process_finished_round_stub;
355 void mem_bswap_64(void *src, int byte_size)
357 u64 *m = src;
359 while (byte_size > 0) {
360 *m = bswap_64(*m);
361 byte_size -= sizeof(u64);
362 ++m;
366 static void event__all64_swap(event_t *self)
368 struct perf_event_header *hdr = &self->header;
369 mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
372 static void event__comm_swap(event_t *self)
374 self->comm.pid = bswap_32(self->comm.pid);
375 self->comm.tid = bswap_32(self->comm.tid);
378 static void event__mmap_swap(event_t *self)
380 self->mmap.pid = bswap_32(self->mmap.pid);
381 self->mmap.tid = bswap_32(self->mmap.tid);
382 self->mmap.start = bswap_64(self->mmap.start);
383 self->mmap.len = bswap_64(self->mmap.len);
384 self->mmap.pgoff = bswap_64(self->mmap.pgoff);
387 static void event__task_swap(event_t *self)
389 self->fork.pid = bswap_32(self->fork.pid);
390 self->fork.tid = bswap_32(self->fork.tid);
391 self->fork.ppid = bswap_32(self->fork.ppid);
392 self->fork.ptid = bswap_32(self->fork.ptid);
393 self->fork.time = bswap_64(self->fork.time);
396 static void event__read_swap(event_t *self)
398 self->read.pid = bswap_32(self->read.pid);
399 self->read.tid = bswap_32(self->read.tid);
400 self->read.value = bswap_64(self->read.value);
401 self->read.time_enabled = bswap_64(self->read.time_enabled);
402 self->read.time_running = bswap_64(self->read.time_running);
403 self->read.id = bswap_64(self->read.id);
406 static void event__attr_swap(event_t *self)
408 size_t size;
410 self->attr.attr.type = bswap_32(self->attr.attr.type);
411 self->attr.attr.size = bswap_32(self->attr.attr.size);
412 self->attr.attr.config = bswap_64(self->attr.attr.config);
413 self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
414 self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
415 self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
416 self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
417 self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
418 self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
419 self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
421 size = self->header.size;
422 size -= (void *)&self->attr.id - (void *)self;
423 mem_bswap_64(self->attr.id, size);
426 static void event__event_type_swap(event_t *self)
428 self->event_type.event_type.event_id =
429 bswap_64(self->event_type.event_type.event_id);
432 static void event__tracing_data_swap(event_t *self)
434 self->tracing_data.size = bswap_32(self->tracing_data.size);
437 typedef void (*event__swap_op)(event_t *self);
439 static event__swap_op event__swap_ops[] = {
440 [PERF_RECORD_MMAP] = event__mmap_swap,
441 [PERF_RECORD_COMM] = event__comm_swap,
442 [PERF_RECORD_FORK] = event__task_swap,
443 [PERF_RECORD_EXIT] = event__task_swap,
444 [PERF_RECORD_LOST] = event__all64_swap,
445 [PERF_RECORD_READ] = event__read_swap,
446 [PERF_RECORD_SAMPLE] = event__all64_swap,
447 [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
448 [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
449 [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
450 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
451 [PERF_RECORD_HEADER_MAX] = NULL,
454 struct sample_queue {
455 u64 timestamp;
456 u64 file_offset;
457 event_t *event;
458 struct list_head list;
461 static void perf_session_free_sample_buffers(struct perf_session *session)
463 struct ordered_samples *os = &session->ordered_samples;
465 while (!list_empty(&os->to_free)) {
466 struct sample_queue *sq;
468 sq = list_entry(os->to_free.next, struct sample_queue, list);
469 list_del(&sq->list);
470 free(sq);
474 static int perf_session_deliver_event(struct perf_session *session,
475 event_t *event,
476 struct sample_data *sample,
477 struct perf_event_ops *ops,
478 u64 file_offset);
480 static void flush_sample_queue(struct perf_session *s,
481 struct perf_event_ops *ops)
483 struct ordered_samples *os = &s->ordered_samples;
484 struct list_head *head = &os->samples;
485 struct sample_queue *tmp, *iter;
486 struct sample_data sample;
487 u64 limit = os->next_flush;
488 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
490 if (!ops->ordered_samples || !limit)
491 return;
493 list_for_each_entry_safe(iter, tmp, head, list) {
494 if (iter->timestamp > limit)
495 break;
497 event__parse_sample(iter->event, s, &sample);
498 perf_session_deliver_event(s, iter->event, &sample, ops,
499 iter->file_offset);
501 os->last_flush = iter->timestamp;
502 list_del(&iter->list);
503 list_add(&iter->list, &os->sample_cache);
506 if (list_empty(head)) {
507 os->last_sample = NULL;
508 } else if (last_ts <= limit) {
509 os->last_sample =
510 list_entry(head->prev, struct sample_queue, list);
515 * When perf record finishes a pass on every buffers, it records this pseudo
516 * event.
517 * We record the max timestamp t found in the pass n.
518 * Assuming these timestamps are monotonic across cpus, we know that if
519 * a buffer still has events with timestamps below t, they will be all
520 * available and then read in the pass n + 1.
521 * Hence when we start to read the pass n + 2, we can safely flush every
522 * events with timestamps below t.
524 * ============ PASS n =================
525 * CPU 0 | CPU 1
527 * cnt1 timestamps | cnt2 timestamps
528 * 1 | 2
529 * 2 | 3
530 * - | 4 <--- max recorded
532 * ============ PASS n + 1 ==============
533 * CPU 0 | CPU 1
535 * cnt1 timestamps | cnt2 timestamps
536 * 3 | 5
537 * 4 | 6
538 * 5 | 7 <---- max recorded
540 * Flush every events below timestamp 4
542 * ============ PASS n + 2 ==============
543 * CPU 0 | CPU 1
545 * cnt1 timestamps | cnt2 timestamps
546 * 6 | 8
547 * 7 | 9
548 * - | 10
550 * Flush every events below timestamp 7
551 * etc...
553 static int process_finished_round(event_t *event __used,
554 struct perf_session *session,
555 struct perf_event_ops *ops)
557 flush_sample_queue(session, ops);
558 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
560 return 0;
563 /* The queue is ordered by time */
564 static void __queue_event(struct sample_queue *new, struct perf_session *s)
566 struct ordered_samples *os = &s->ordered_samples;
567 struct sample_queue *sample = os->last_sample;
568 u64 timestamp = new->timestamp;
569 struct list_head *p;
571 os->last_sample = new;
573 if (!sample) {
574 list_add(&new->list, &os->samples);
575 os->max_timestamp = timestamp;
576 return;
580 * last_sample might point to some random place in the list as it's
581 * the last queued event. We expect that the new event is close to
582 * this.
584 if (sample->timestamp <= timestamp) {
585 while (sample->timestamp <= timestamp) {
586 p = sample->list.next;
587 if (p == &os->samples) {
588 list_add_tail(&new->list, &os->samples);
589 os->max_timestamp = timestamp;
590 return;
592 sample = list_entry(p, struct sample_queue, list);
594 list_add_tail(&new->list, &sample->list);
595 } else {
596 while (sample->timestamp > timestamp) {
597 p = sample->list.prev;
598 if (p == &os->samples) {
599 list_add(&new->list, &os->samples);
600 return;
602 sample = list_entry(p, struct sample_queue, list);
604 list_add(&new->list, &sample->list);
608 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
610 static int perf_session_queue_event(struct perf_session *s, event_t *event,
611 struct sample_data *data, u64 file_offset)
613 struct ordered_samples *os = &s->ordered_samples;
614 struct list_head *sc = &os->sample_cache;
615 u64 timestamp = data->time;
616 struct sample_queue *new;
618 if (!timestamp || timestamp == ~0ULL)
619 return -ETIME;
621 if (timestamp < s->ordered_samples.last_flush) {
622 printf("Warning: Timestamp below last timeslice flush\n");
623 return -EINVAL;
626 if (!list_empty(sc)) {
627 new = list_entry(sc->next, struct sample_queue, list);
628 list_del(&new->list);
629 } else if (os->sample_buffer) {
630 new = os->sample_buffer + os->sample_buffer_idx;
631 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
632 os->sample_buffer = NULL;
633 } else {
634 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
635 if (!os->sample_buffer)
636 return -ENOMEM;
637 list_add(&os->sample_buffer->list, &os->to_free);
638 os->sample_buffer_idx = 2;
639 new = os->sample_buffer + 1;
642 new->timestamp = timestamp;
643 new->file_offset = file_offset;
644 new->event = event;
646 __queue_event(new, s);
648 return 0;
651 static void callchain__printf(struct sample_data *sample)
653 unsigned int i;
655 printf("... chain: nr:%Lu\n", sample->callchain->nr);
657 for (i = 0; i < sample->callchain->nr; i++)
658 printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]);
661 static void perf_session__print_tstamp(struct perf_session *session,
662 event_t *event,
663 struct sample_data *sample)
665 if (event->header.type != PERF_RECORD_SAMPLE &&
666 !session->sample_id_all) {
667 fputs("-1 -1 ", stdout);
668 return;
671 if ((session->sample_type & PERF_SAMPLE_CPU))
672 printf("%u ", sample->cpu);
674 if (session->sample_type & PERF_SAMPLE_TIME)
675 printf("%Lu ", sample->time);
678 static void dump_event(struct perf_session *session, event_t *event,
679 u64 file_offset, struct sample_data *sample)
681 if (!dump_trace)
682 return;
684 printf("\n%#Lx [%#x]: event: %d\n", file_offset, event->header.size,
685 event->header.type);
687 trace_event(event);
689 if (sample)
690 perf_session__print_tstamp(session, event, sample);
692 printf("%#Lx [%#x]: PERF_RECORD_%s", file_offset, event->header.size,
693 event__get_event_name(event->header.type));
696 static void dump_sample(struct perf_session *session, event_t *event,
697 struct sample_data *sample)
699 if (!dump_trace)
700 return;
702 printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
703 sample->pid, sample->tid, sample->ip, sample->period);
705 if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
706 callchain__printf(sample);
709 static int perf_session_deliver_event(struct perf_session *session,
710 event_t *event,
711 struct sample_data *sample,
712 struct perf_event_ops *ops,
713 u64 file_offset)
715 dump_event(session, event, file_offset, sample);
717 switch (event->header.type) {
718 case PERF_RECORD_SAMPLE:
719 dump_sample(session, event, sample);
720 return ops->sample(event, sample, session);
721 case PERF_RECORD_MMAP:
722 return ops->mmap(event, sample, session);
723 case PERF_RECORD_COMM:
724 return ops->comm(event, sample, session);
725 case PERF_RECORD_FORK:
726 return ops->fork(event, sample, session);
727 case PERF_RECORD_EXIT:
728 return ops->exit(event, sample, session);
729 case PERF_RECORD_LOST:
730 return ops->lost(event, sample, session);
731 case PERF_RECORD_READ:
732 return ops->read(event, sample, session);
733 case PERF_RECORD_THROTTLE:
734 return ops->throttle(event, sample, session);
735 case PERF_RECORD_UNTHROTTLE:
736 return ops->unthrottle(event, sample, session);
737 default:
738 ++session->hists.stats.nr_unknown_events;
739 return -1;
743 static int perf_session__preprocess_sample(struct perf_session *session,
744 event_t *event, struct sample_data *sample)
746 if (event->header.type != PERF_RECORD_SAMPLE ||
747 !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
748 return 0;
750 if (!ip_callchain__valid(sample->callchain, event)) {
751 pr_debug("call-chain problem with event, skipping it.\n");
752 ++session->hists.stats.nr_invalid_chains;
753 session->hists.stats.total_invalid_chains += sample->period;
754 return -EINVAL;
756 return 0;
759 static int perf_session__process_user_event(struct perf_session *session, event_t *event,
760 struct perf_event_ops *ops, u64 file_offset)
762 dump_event(session, event, file_offset, NULL);
764 /* These events are processed right away */
765 switch (event->header.type) {
766 case PERF_RECORD_HEADER_ATTR:
767 return ops->attr(event, session);
768 case PERF_RECORD_HEADER_EVENT_TYPE:
769 return ops->event_type(event, session);
770 case PERF_RECORD_HEADER_TRACING_DATA:
771 /* setup for reading amidst mmap */
772 lseek(session->fd, file_offset, SEEK_SET);
773 return ops->tracing_data(event, session);
774 case PERF_RECORD_HEADER_BUILD_ID:
775 return ops->build_id(event, session);
776 case PERF_RECORD_FINISHED_ROUND:
777 return ops->finished_round(event, session, ops);
778 default:
779 return -EINVAL;
783 static int perf_session__process_event(struct perf_session *session,
784 event_t *event,
785 struct perf_event_ops *ops,
786 u64 file_offset)
788 struct sample_data sample;
789 int ret;
791 if (session->header.needs_swap && event__swap_ops[event->header.type])
792 event__swap_ops[event->header.type](event);
794 if (event->header.type >= PERF_RECORD_HEADER_MAX)
795 return -EINVAL;
797 hists__inc_nr_events(&session->hists, event->header.type);
799 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
800 return perf_session__process_user_event(session, event, ops, file_offset);
803 * For all kernel events we get the sample data
805 event__parse_sample(event, session, &sample);
807 /* Preprocess sample records - precheck callchains */
808 if (perf_session__preprocess_sample(session, event, &sample))
809 return 0;
811 if (ops->ordered_samples) {
812 ret = perf_session_queue_event(session, event, &sample,
813 file_offset);
814 if (ret != -ETIME)
815 return ret;
818 return perf_session_deliver_event(session, event, &sample, ops,
819 file_offset);
822 void perf_event_header__bswap(struct perf_event_header *self)
824 self->type = bswap_32(self->type);
825 self->misc = bswap_16(self->misc);
826 self->size = bswap_16(self->size);
829 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
831 struct thread *thread = perf_session__findnew(self, 0);
833 if (thread == NULL || thread__set_comm(thread, "swapper")) {
834 pr_err("problem inserting idle task.\n");
835 thread = NULL;
838 return thread;
841 static void perf_session__warn_about_errors(const struct perf_session *session,
842 const struct perf_event_ops *ops)
844 if (ops->lost == event__process_lost &&
845 session->hists.stats.total_lost != 0) {
846 ui__warning("Processed %Lu events and LOST %Lu!\n\n"
847 "Check IO/CPU overload!\n\n",
848 session->hists.stats.total_period,
849 session->hists.stats.total_lost);
852 if (session->hists.stats.nr_unknown_events != 0) {
853 ui__warning("Found %u unknown events!\n\n"
854 "Is this an older tool processing a perf.data "
855 "file generated by a more recent tool?\n\n"
856 "If that is not the case, consider "
857 "reporting to linux-kernel@vger.kernel.org.\n\n",
858 session->hists.stats.nr_unknown_events);
861 if (session->hists.stats.nr_invalid_chains != 0) {
862 ui__warning("Found invalid callchains!\n\n"
863 "%u out of %u events were discarded for this reason.\n\n"
864 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
865 session->hists.stats.nr_invalid_chains,
866 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
870 #define session_done() (*(volatile int *)(&session_done))
871 volatile int session_done;
873 static int __perf_session__process_pipe_events(struct perf_session *self,
874 struct perf_event_ops *ops)
876 event_t event;
877 uint32_t size;
878 int skip = 0;
879 u64 head;
880 int err;
881 void *p;
883 perf_event_ops__fill_defaults(ops);
885 head = 0;
886 more:
887 err = readn(self->fd, &event, sizeof(struct perf_event_header));
888 if (err <= 0) {
889 if (err == 0)
890 goto done;
892 pr_err("failed to read event header\n");
893 goto out_err;
896 if (self->header.needs_swap)
897 perf_event_header__bswap(&event.header);
899 size = event.header.size;
900 if (size == 0)
901 size = 8;
903 p = &event;
904 p += sizeof(struct perf_event_header);
906 if (size - sizeof(struct perf_event_header)) {
907 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
908 if (err <= 0) {
909 if (err == 0) {
910 pr_err("unexpected end of event stream\n");
911 goto done;
914 pr_err("failed to read event data\n");
915 goto out_err;
919 if (size == 0 ||
920 (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
921 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
922 head, event.header.size, event.header.type);
924 * assume we lost track of the stream, check alignment, and
925 * increment a single u64 in the hope to catch on again 'soon'.
927 if (unlikely(head & 7))
928 head &= ~7ULL;
930 size = 8;
933 head += size;
935 if (skip > 0)
936 head += skip;
938 if (!session_done())
939 goto more;
940 done:
941 err = 0;
942 out_err:
943 perf_session__warn_about_errors(self, ops);
944 perf_session_free_sample_buffers(self);
945 return err;
948 int __perf_session__process_events(struct perf_session *session,
949 u64 data_offset, u64 data_size,
950 u64 file_size, struct perf_event_ops *ops)
952 u64 head, page_offset, file_offset, file_pos, progress_next;
953 int err, mmap_prot, mmap_flags, map_idx = 0;
954 struct ui_progress *progress;
955 size_t page_size, mmap_size;
956 char *buf, *mmaps[8];
957 event_t *event;
958 uint32_t size;
960 perf_event_ops__fill_defaults(ops);
962 page_size = sysconf(_SC_PAGESIZE);
964 page_offset = page_size * (data_offset / page_size);
965 file_offset = page_offset;
966 head = data_offset - page_offset;
968 if (data_offset + data_size < file_size)
969 file_size = data_offset + data_size;
971 progress_next = file_size / 16;
972 progress = ui_progress__new("Processing events...", file_size);
973 if (progress == NULL)
974 return -1;
976 mmap_size = session->mmap_window;
977 if (mmap_size > file_size)
978 mmap_size = file_size;
980 memset(mmaps, 0, sizeof(mmaps));
982 mmap_prot = PROT_READ;
983 mmap_flags = MAP_SHARED;
985 if (session->header.needs_swap) {
986 mmap_prot |= PROT_WRITE;
987 mmap_flags = MAP_PRIVATE;
989 remap:
990 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
991 file_offset);
992 if (buf == MAP_FAILED) {
993 pr_err("failed to mmap file\n");
994 err = -errno;
995 goto out_err;
997 mmaps[map_idx] = buf;
998 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
999 file_pos = file_offset + head;
1001 more:
1002 event = (event_t *)(buf + head);
1004 if (session->header.needs_swap)
1005 perf_event_header__bswap(&event->header);
1006 size = event->header.size;
1007 if (size == 0)
1008 size = 8;
1010 if (head + event->header.size > mmap_size) {
1011 if (mmaps[map_idx]) {
1012 munmap(mmaps[map_idx], mmap_size);
1013 mmaps[map_idx] = NULL;
1016 page_offset = page_size * (head / page_size);
1017 file_offset += page_offset;
1018 head -= page_offset;
1019 goto remap;
1022 size = event->header.size;
1024 if (size == 0 ||
1025 perf_session__process_event(session, event, ops, file_pos) < 0) {
1026 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
1027 file_offset + head, event->header.size,
1028 event->header.type);
1030 * assume we lost track of the stream, check alignment, and
1031 * increment a single u64 in the hope to catch on again 'soon'.
1033 if (unlikely(head & 7))
1034 head &= ~7ULL;
1036 size = 8;
1039 head += size;
1040 file_pos += size;
1042 if (file_pos >= progress_next) {
1043 progress_next += file_size / 16;
1044 ui_progress__update(progress, file_pos);
1047 if (file_pos < file_size)
1048 goto more;
1050 err = 0;
1051 /* do the final flush for ordered samples */
1052 session->ordered_samples.next_flush = ULLONG_MAX;
1053 flush_sample_queue(session, ops);
1054 out_err:
1055 ui_progress__delete(progress);
1056 perf_session__warn_about_errors(session, ops);
1057 perf_session_free_sample_buffers(session);
1058 return err;
1061 int perf_session__process_events(struct perf_session *self,
1062 struct perf_event_ops *ops)
1064 int err;
1066 if (perf_session__register_idle_thread(self) == NULL)
1067 return -ENOMEM;
1069 if (!self->fd_pipe)
1070 err = __perf_session__process_events(self,
1071 self->header.data_offset,
1072 self->header.data_size,
1073 self->size, ops);
1074 else
1075 err = __perf_session__process_pipe_events(self, ops);
1077 return err;
1080 bool perf_session__has_traces(struct perf_session *self, const char *msg)
1082 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1083 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1084 return false;
1087 return true;
1090 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
1091 const char *symbol_name,
1092 u64 addr)
1094 char *bracket;
1095 enum map_type i;
1096 struct ref_reloc_sym *ref;
1098 ref = zalloc(sizeof(struct ref_reloc_sym));
1099 if (ref == NULL)
1100 return -ENOMEM;
1102 ref->name = strdup(symbol_name);
1103 if (ref->name == NULL) {
1104 free(ref);
1105 return -ENOMEM;
1108 bracket = strchr(ref->name, ']');
1109 if (bracket)
1110 *bracket = '\0';
1112 ref->addr = addr;
1114 for (i = 0; i < MAP__NR_TYPES; ++i) {
1115 struct kmap *kmap = map__kmap(maps[i]);
1116 kmap->ref_reloc_sym = ref;
1119 return 0;
1122 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1124 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1125 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1126 machines__fprintf_dsos(&self->machines, fp);
1129 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1130 bool with_hits)
1132 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
1133 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);