mmc: bcm2835: Fix DMA channel leak on probe error
[linux/fpc-iii.git] / tools / perf / util / intel-bts.c
blob7127bc917fc522f571ce63df6d9d4b77f67d96ef
1 /*
2 * intel-bts.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
16 #include <endian.h>
17 #include <errno.h>
18 #include <byteswap.h>
19 #include <inttypes.h>
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/bitops.h>
23 #include <linux/log2.h>
25 #include "cpumap.h"
26 #include "color.h"
27 #include "evsel.h"
28 #include "evlist.h"
29 #include "machine.h"
30 #include "session.h"
31 #include "util.h"
32 #include "thread.h"
33 #include "thread-stack.h"
34 #include "debug.h"
35 #include "tsc.h"
36 #include "auxtrace.h"
37 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
38 #include "intel-bts.h"
40 #define MAX_TIMESTAMP (~0ULL)
42 #define INTEL_BTS_ERR_NOINSN 5
43 #define INTEL_BTS_ERR_LOST 9
45 #if __BYTE_ORDER == __BIG_ENDIAN
46 #define le64_to_cpu bswap_64
47 #else
48 #define le64_to_cpu
49 #endif
51 struct intel_bts {
52 struct auxtrace auxtrace;
53 struct auxtrace_queues queues;
54 struct auxtrace_heap heap;
55 u32 auxtrace_type;
56 struct perf_session *session;
57 struct machine *machine;
58 bool sampling_mode;
59 bool snapshot_mode;
60 bool data_queued;
61 u32 pmu_type;
62 struct perf_tsc_conversion tc;
63 bool cap_user_time_zero;
64 struct itrace_synth_opts synth_opts;
65 bool sample_branches;
66 u32 branches_filter;
67 u64 branches_sample_type;
68 u64 branches_id;
69 size_t branches_event_size;
70 unsigned long num_events;
73 struct intel_bts_queue {
74 struct intel_bts *bts;
75 unsigned int queue_nr;
76 struct auxtrace_buffer *buffer;
77 bool on_heap;
78 bool done;
79 pid_t pid;
80 pid_t tid;
81 int cpu;
82 u64 time;
83 struct intel_pt_insn intel_pt_insn;
84 u32 sample_flags;
87 struct branch {
88 u64 from;
89 u64 to;
90 u64 misc;
93 static void intel_bts_dump(struct intel_bts *bts __maybe_unused,
94 unsigned char *buf, size_t len)
96 struct branch *branch;
97 size_t i, pos = 0, br_sz = sizeof(struct branch), sz;
98 const char *color = PERF_COLOR_BLUE;
100 color_fprintf(stdout, color,
101 ". ... Intel BTS data: size %zu bytes\n",
102 len);
104 while (len) {
105 if (len >= br_sz)
106 sz = br_sz;
107 else
108 sz = len;
109 printf(".");
110 color_fprintf(stdout, color, " %08x: ", pos);
111 for (i = 0; i < sz; i++)
112 color_fprintf(stdout, color, " %02x", buf[i]);
113 for (; i < br_sz; i++)
114 color_fprintf(stdout, color, " ");
115 if (len >= br_sz) {
116 branch = (struct branch *)buf;
117 color_fprintf(stdout, color, " %"PRIx64" -> %"PRIx64" %s\n",
118 le64_to_cpu(branch->from),
119 le64_to_cpu(branch->to),
120 le64_to_cpu(branch->misc) & 0x10 ?
121 "pred" : "miss");
122 } else {
123 color_fprintf(stdout, color, " Bad record!\n");
125 pos += sz;
126 buf += sz;
127 len -= sz;
131 static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf,
132 size_t len)
134 printf(".\n");
135 intel_bts_dump(bts, buf, len);
138 static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
140 union perf_event event;
141 int err;
143 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
144 INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
145 sample->tid, 0, "Lost trace data");
147 err = perf_session__deliver_synth_event(bts->session, &event, NULL);
148 if (err)
149 pr_err("Intel BTS: failed to deliver error event, error %d\n",
150 err);
152 return err;
155 static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts,
156 unsigned int queue_nr)
158 struct intel_bts_queue *btsq;
160 btsq = zalloc(sizeof(struct intel_bts_queue));
161 if (!btsq)
162 return NULL;
164 btsq->bts = bts;
165 btsq->queue_nr = queue_nr;
166 btsq->pid = -1;
167 btsq->tid = -1;
168 btsq->cpu = -1;
170 return btsq;
173 static int intel_bts_setup_queue(struct intel_bts *bts,
174 struct auxtrace_queue *queue,
175 unsigned int queue_nr)
177 struct intel_bts_queue *btsq = queue->priv;
179 if (list_empty(&queue->head))
180 return 0;
182 if (!btsq) {
183 btsq = intel_bts_alloc_queue(bts, queue_nr);
184 if (!btsq)
185 return -ENOMEM;
186 queue->priv = btsq;
188 if (queue->cpu != -1)
189 btsq->cpu = queue->cpu;
190 btsq->tid = queue->tid;
193 if (bts->sampling_mode)
194 return 0;
196 if (!btsq->on_heap && !btsq->buffer) {
197 int ret;
199 btsq->buffer = auxtrace_buffer__next(queue, NULL);
200 if (!btsq->buffer)
201 return 0;
203 ret = auxtrace_heap__add(&bts->heap, queue_nr,
204 btsq->buffer->reference);
205 if (ret)
206 return ret;
207 btsq->on_heap = true;
210 return 0;
213 static int intel_bts_setup_queues(struct intel_bts *bts)
215 unsigned int i;
216 int ret;
218 for (i = 0; i < bts->queues.nr_queues; i++) {
219 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i],
221 if (ret)
222 return ret;
224 return 0;
227 static inline int intel_bts_update_queues(struct intel_bts *bts)
229 if (bts->queues.new_data) {
230 bts->queues.new_data = false;
231 return intel_bts_setup_queues(bts);
233 return 0;
236 static unsigned char *intel_bts_find_overlap(unsigned char *buf_a, size_t len_a,
237 unsigned char *buf_b, size_t len_b)
239 size_t offs, len;
241 if (len_a > len_b)
242 offs = len_a - len_b;
243 else
244 offs = 0;
246 for (; offs < len_a; offs += sizeof(struct branch)) {
247 len = len_a - offs;
248 if (!memcmp(buf_a + offs, buf_b, len))
249 return buf_b + len;
252 return buf_b;
255 static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue,
256 struct auxtrace_buffer *b)
258 struct auxtrace_buffer *a;
259 void *start;
261 if (b->list.prev == &queue->head)
262 return 0;
263 a = list_entry(b->list.prev, struct auxtrace_buffer, list);
264 start = intel_bts_find_overlap(a->data, a->size, b->data, b->size);
265 if (!start)
266 return -EINVAL;
267 b->use_size = b->data + b->size - start;
268 b->use_data = start;
269 return 0;
272 static inline u8 intel_bts_cpumode(struct intel_bts *bts, uint64_t ip)
274 return machine__kernel_ip(bts->machine, ip) ?
275 PERF_RECORD_MISC_KERNEL :
276 PERF_RECORD_MISC_USER;
279 static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
280 struct branch *branch)
282 int ret;
283 struct intel_bts *bts = btsq->bts;
284 union perf_event event;
285 struct perf_sample sample = { .ip = 0, };
287 if (bts->synth_opts.initial_skip &&
288 bts->num_events++ <= bts->synth_opts.initial_skip)
289 return 0;
291 sample.ip = le64_to_cpu(branch->from);
292 sample.cpumode = intel_bts_cpumode(bts, sample.ip);
293 sample.pid = btsq->pid;
294 sample.tid = btsq->tid;
295 sample.addr = le64_to_cpu(branch->to);
296 sample.id = btsq->bts->branches_id;
297 sample.stream_id = btsq->bts->branches_id;
298 sample.period = 1;
299 sample.cpu = btsq->cpu;
300 sample.flags = btsq->sample_flags;
301 sample.insn_len = btsq->intel_pt_insn.length;
302 memcpy(sample.insn, btsq->intel_pt_insn.buf, INTEL_PT_INSN_BUF_SZ);
304 event.sample.header.type = PERF_RECORD_SAMPLE;
305 event.sample.header.misc = sample.cpumode;
306 event.sample.header.size = sizeof(struct perf_event_header);
308 if (bts->synth_opts.inject) {
309 event.sample.header.size = bts->branches_event_size;
310 ret = perf_event__synthesize_sample(&event,
311 bts->branches_sample_type,
312 0, &sample);
313 if (ret)
314 return ret;
317 ret = perf_session__deliver_synth_event(bts->session, &event, &sample);
318 if (ret)
319 pr_err("Intel BTS: failed to deliver branch event, error %d\n",
320 ret);
322 return ret;
325 static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
327 struct machine *machine = btsq->bts->machine;
328 struct thread *thread;
329 struct addr_location al;
330 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
331 ssize_t len;
332 int x86_64;
333 uint8_t cpumode;
334 int err = -1;
336 if (machine__kernel_ip(machine, ip))
337 cpumode = PERF_RECORD_MISC_KERNEL;
338 else
339 cpumode = PERF_RECORD_MISC_USER;
341 thread = machine__find_thread(machine, -1, btsq->tid);
342 if (!thread)
343 return -1;
345 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
346 goto out_put;
348 len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf,
349 INTEL_PT_INSN_BUF_SZ);
350 if (len <= 0)
351 goto out_put;
353 /* Load maps to ensure dso->is_64_bit has been updated */
354 map__load(al.map);
356 x86_64 = al.map->dso->is_64_bit;
358 if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
359 goto out_put;
361 err = 0;
362 out_put:
363 thread__put(thread);
364 return err;
367 static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
368 pid_t tid, u64 ip)
370 union perf_event event;
371 int err;
373 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
374 INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
375 "Failed to get instruction");
377 err = perf_session__deliver_synth_event(bts->session, &event, NULL);
378 if (err)
379 pr_err("Intel BTS: failed to deliver error event, error %d\n",
380 err);
382 return err;
385 static int intel_bts_get_branch_type(struct intel_bts_queue *btsq,
386 struct branch *branch)
388 int err;
390 if (!branch->from) {
391 if (branch->to)
392 btsq->sample_flags = PERF_IP_FLAG_BRANCH |
393 PERF_IP_FLAG_TRACE_BEGIN;
394 else
395 btsq->sample_flags = 0;
396 btsq->intel_pt_insn.length = 0;
397 } else if (!branch->to) {
398 btsq->sample_flags = PERF_IP_FLAG_BRANCH |
399 PERF_IP_FLAG_TRACE_END;
400 btsq->intel_pt_insn.length = 0;
401 } else {
402 err = intel_bts_get_next_insn(btsq, branch->from);
403 if (err) {
404 btsq->sample_flags = 0;
405 btsq->intel_pt_insn.length = 0;
406 if (!btsq->bts->synth_opts.errors)
407 return 0;
408 err = intel_bts_synth_error(btsq->bts, btsq->cpu,
409 btsq->pid, btsq->tid,
410 branch->from);
411 return err;
413 btsq->sample_flags = intel_pt_insn_type(btsq->intel_pt_insn.op);
414 /* Check for an async branch into the kernel */
415 if (!machine__kernel_ip(btsq->bts->machine, branch->from) &&
416 machine__kernel_ip(btsq->bts->machine, branch->to) &&
417 btsq->sample_flags != (PERF_IP_FLAG_BRANCH |
418 PERF_IP_FLAG_CALL |
419 PERF_IP_FLAG_SYSCALLRET))
420 btsq->sample_flags = PERF_IP_FLAG_BRANCH |
421 PERF_IP_FLAG_CALL |
422 PERF_IP_FLAG_ASYNC |
423 PERF_IP_FLAG_INTERRUPT;
426 return 0;
429 static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
430 struct auxtrace_buffer *buffer,
431 struct thread *thread)
433 struct branch *branch;
434 size_t sz, bsz = sizeof(struct branch);
435 u32 filter = btsq->bts->branches_filter;
436 int err = 0;
438 if (buffer->use_data) {
439 sz = buffer->use_size;
440 branch = buffer->use_data;
441 } else {
442 sz = buffer->size;
443 branch = buffer->data;
446 if (!btsq->bts->sample_branches)
447 return 0;
449 for (; sz > bsz; branch += 1, sz -= bsz) {
450 if (!branch->from && !branch->to)
451 continue;
452 intel_bts_get_branch_type(btsq, branch);
453 if (btsq->bts->synth_opts.thread_stack)
454 thread_stack__event(thread, btsq->sample_flags,
455 le64_to_cpu(branch->from),
456 le64_to_cpu(branch->to),
457 btsq->intel_pt_insn.length,
458 buffer->buffer_nr + 1);
459 if (filter && !(filter & btsq->sample_flags))
460 continue;
461 err = intel_bts_synth_branch_sample(btsq, branch);
462 if (err)
463 break;
465 return err;
468 static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
470 struct auxtrace_buffer *buffer = btsq->buffer, *old_buffer = buffer;
471 struct auxtrace_queue *queue;
472 struct thread *thread;
473 int err;
475 if (btsq->done)
476 return 1;
478 if (btsq->pid == -1) {
479 thread = machine__find_thread(btsq->bts->machine, -1,
480 btsq->tid);
481 if (thread)
482 btsq->pid = thread->pid_;
483 } else {
484 thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
485 btsq->tid);
488 queue = &btsq->bts->queues.queue_array[btsq->queue_nr];
490 if (!buffer)
491 buffer = auxtrace_buffer__next(queue, NULL);
493 if (!buffer) {
494 if (!btsq->bts->sampling_mode)
495 btsq->done = 1;
496 err = 1;
497 goto out_put;
500 /* Currently there is no support for split buffers */
501 if (buffer->consecutive) {
502 err = -EINVAL;
503 goto out_put;
506 if (!buffer->data) {
507 int fd = perf_data__fd(btsq->bts->session->data);
509 buffer->data = auxtrace_buffer__get_data(buffer, fd);
510 if (!buffer->data) {
511 err = -ENOMEM;
512 goto out_put;
516 if (btsq->bts->snapshot_mode && !buffer->consecutive &&
517 intel_bts_do_fix_overlap(queue, buffer)) {
518 err = -ENOMEM;
519 goto out_put;
522 if (!btsq->bts->synth_opts.callchain &&
523 !btsq->bts->synth_opts.thread_stack && thread &&
524 (!old_buffer || btsq->bts->sampling_mode ||
525 (btsq->bts->snapshot_mode && !buffer->consecutive)))
526 thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1);
528 err = intel_bts_process_buffer(btsq, buffer, thread);
530 auxtrace_buffer__drop_data(buffer);
532 btsq->buffer = auxtrace_buffer__next(queue, buffer);
533 if (btsq->buffer) {
534 if (timestamp)
535 *timestamp = btsq->buffer->reference;
536 } else {
537 if (!btsq->bts->sampling_mode)
538 btsq->done = 1;
540 out_put:
541 thread__put(thread);
542 return err;
545 static int intel_bts_flush_queue(struct intel_bts_queue *btsq)
547 u64 ts = 0;
548 int ret;
550 while (1) {
551 ret = intel_bts_process_queue(btsq, &ts);
552 if (ret < 0)
553 return ret;
554 if (ret)
555 break;
557 return 0;
560 static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid)
562 struct auxtrace_queues *queues = &bts->queues;
563 unsigned int i;
565 for (i = 0; i < queues->nr_queues; i++) {
566 struct auxtrace_queue *queue = &bts->queues.queue_array[i];
567 struct intel_bts_queue *btsq = queue->priv;
569 if (btsq && btsq->tid == tid)
570 return intel_bts_flush_queue(btsq);
572 return 0;
575 static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp)
577 while (1) {
578 unsigned int queue_nr;
579 struct auxtrace_queue *queue;
580 struct intel_bts_queue *btsq;
581 u64 ts = 0;
582 int ret;
584 if (!bts->heap.heap_cnt)
585 return 0;
587 if (bts->heap.heap_array[0].ordinal > timestamp)
588 return 0;
590 queue_nr = bts->heap.heap_array[0].queue_nr;
591 queue = &bts->queues.queue_array[queue_nr];
592 btsq = queue->priv;
594 auxtrace_heap__pop(&bts->heap);
596 ret = intel_bts_process_queue(btsq, &ts);
597 if (ret < 0) {
598 auxtrace_heap__add(&bts->heap, queue_nr, ts);
599 return ret;
602 if (!ret) {
603 ret = auxtrace_heap__add(&bts->heap, queue_nr, ts);
604 if (ret < 0)
605 return ret;
606 } else {
607 btsq->on_heap = false;
611 return 0;
614 static int intel_bts_process_event(struct perf_session *session,
615 union perf_event *event,
616 struct perf_sample *sample,
617 struct perf_tool *tool)
619 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
620 auxtrace);
621 u64 timestamp;
622 int err;
624 if (dump_trace)
625 return 0;
627 if (!tool->ordered_events) {
628 pr_err("Intel BTS requires ordered events\n");
629 return -EINVAL;
632 if (sample->time && sample->time != (u64)-1)
633 timestamp = perf_time_to_tsc(sample->time, &bts->tc);
634 else
635 timestamp = 0;
637 err = intel_bts_update_queues(bts);
638 if (err)
639 return err;
641 err = intel_bts_process_queues(bts, timestamp);
642 if (err)
643 return err;
644 if (event->header.type == PERF_RECORD_EXIT) {
645 err = intel_bts_process_tid_exit(bts, event->fork.tid);
646 if (err)
647 return err;
650 if (event->header.type == PERF_RECORD_AUX &&
651 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
652 bts->synth_opts.errors)
653 err = intel_bts_lost(bts, sample);
655 return err;
658 static int intel_bts_process_auxtrace_event(struct perf_session *session,
659 union perf_event *event,
660 struct perf_tool *tool __maybe_unused)
662 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
663 auxtrace);
665 if (bts->sampling_mode)
666 return 0;
668 if (!bts->data_queued) {
669 struct auxtrace_buffer *buffer;
670 off_t data_offset;
671 int fd = perf_data__fd(session->data);
672 int err;
674 if (perf_data__is_pipe(session->data)) {
675 data_offset = 0;
676 } else {
677 data_offset = lseek(fd, 0, SEEK_CUR);
678 if (data_offset == -1)
679 return -errno;
682 err = auxtrace_queues__add_event(&bts->queues, session, event,
683 data_offset, &buffer);
684 if (err)
685 return err;
687 /* Dump here now we have copied a piped trace out of the pipe */
688 if (dump_trace) {
689 if (auxtrace_buffer__get_data(buffer, fd)) {
690 intel_bts_dump_event(bts, buffer->data,
691 buffer->size);
692 auxtrace_buffer__put_data(buffer);
697 return 0;
700 static int intel_bts_flush(struct perf_session *session,
701 struct perf_tool *tool __maybe_unused)
703 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
704 auxtrace);
705 int ret;
707 if (dump_trace || bts->sampling_mode)
708 return 0;
710 if (!tool->ordered_events)
711 return -EINVAL;
713 ret = intel_bts_update_queues(bts);
714 if (ret < 0)
715 return ret;
717 return intel_bts_process_queues(bts, MAX_TIMESTAMP);
720 static void intel_bts_free_queue(void *priv)
722 struct intel_bts_queue *btsq = priv;
724 if (!btsq)
725 return;
726 free(btsq);
729 static void intel_bts_free_events(struct perf_session *session)
731 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
732 auxtrace);
733 struct auxtrace_queues *queues = &bts->queues;
734 unsigned int i;
736 for (i = 0; i < queues->nr_queues; i++) {
737 intel_bts_free_queue(queues->queue_array[i].priv);
738 queues->queue_array[i].priv = NULL;
740 auxtrace_queues__free(queues);
743 static void intel_bts_free(struct perf_session *session)
745 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
746 auxtrace);
748 auxtrace_heap__free(&bts->heap);
749 intel_bts_free_events(session);
750 session->auxtrace = NULL;
751 free(bts);
754 struct intel_bts_synth {
755 struct perf_tool dummy_tool;
756 struct perf_session *session;
759 static int intel_bts_event_synth(struct perf_tool *tool,
760 union perf_event *event,
761 struct perf_sample *sample __maybe_unused,
762 struct machine *machine __maybe_unused)
764 struct intel_bts_synth *intel_bts_synth =
765 container_of(tool, struct intel_bts_synth, dummy_tool);
767 return perf_session__deliver_synth_event(intel_bts_synth->session,
768 event, NULL);
771 static int intel_bts_synth_event(struct perf_session *session,
772 struct perf_event_attr *attr, u64 id)
774 struct intel_bts_synth intel_bts_synth;
776 memset(&intel_bts_synth, 0, sizeof(struct intel_bts_synth));
777 intel_bts_synth.session = session;
779 return perf_event__synthesize_attr(&intel_bts_synth.dummy_tool, attr, 1,
780 &id, intel_bts_event_synth);
783 static int intel_bts_synth_events(struct intel_bts *bts,
784 struct perf_session *session)
786 struct perf_evlist *evlist = session->evlist;
787 struct perf_evsel *evsel;
788 struct perf_event_attr attr;
789 bool found = false;
790 u64 id;
791 int err;
793 evlist__for_each_entry(evlist, evsel) {
794 if (evsel->attr.type == bts->pmu_type && evsel->ids) {
795 found = true;
796 break;
800 if (!found) {
801 pr_debug("There are no selected events with Intel BTS data\n");
802 return 0;
805 memset(&attr, 0, sizeof(struct perf_event_attr));
806 attr.size = sizeof(struct perf_event_attr);
807 attr.type = PERF_TYPE_HARDWARE;
808 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
809 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
810 PERF_SAMPLE_PERIOD;
811 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
812 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
813 attr.exclude_user = evsel->attr.exclude_user;
814 attr.exclude_kernel = evsel->attr.exclude_kernel;
815 attr.exclude_hv = evsel->attr.exclude_hv;
816 attr.exclude_host = evsel->attr.exclude_host;
817 attr.exclude_guest = evsel->attr.exclude_guest;
818 attr.sample_id_all = evsel->attr.sample_id_all;
819 attr.read_format = evsel->attr.read_format;
821 id = evsel->id[0] + 1000000000;
822 if (!id)
823 id = 1;
825 if (bts->synth_opts.branches) {
826 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
827 attr.sample_period = 1;
828 attr.sample_type |= PERF_SAMPLE_ADDR;
829 pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
830 id, (u64)attr.sample_type);
831 err = intel_bts_synth_event(session, &attr, id);
832 if (err) {
833 pr_err("%s: failed to synthesize 'branches' event type\n",
834 __func__);
835 return err;
837 bts->sample_branches = true;
838 bts->branches_sample_type = attr.sample_type;
839 bts->branches_id = id;
841 * We only use sample types from PERF_SAMPLE_MASK so we can use
842 * __perf_evsel__sample_size() here.
844 bts->branches_event_size = sizeof(struct sample_event) +
845 __perf_evsel__sample_size(attr.sample_type);
848 return 0;
851 static const char * const intel_bts_info_fmts[] = {
852 [INTEL_BTS_PMU_TYPE] = " PMU Type %"PRId64"\n",
853 [INTEL_BTS_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
854 [INTEL_BTS_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
855 [INTEL_BTS_TIME_ZERO] = " Time Zero %"PRIu64"\n",
856 [INTEL_BTS_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
857 [INTEL_BTS_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
860 static void intel_bts_print_info(u64 *arr, int start, int finish)
862 int i;
864 if (!dump_trace)
865 return;
867 for (i = start; i <= finish; i++)
868 fprintf(stdout, intel_bts_info_fmts[i], arr[i]);
871 int intel_bts_process_auxtrace_info(union perf_event *event,
872 struct perf_session *session)
874 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
875 size_t min_sz = sizeof(u64) * INTEL_BTS_SNAPSHOT_MODE;
876 struct intel_bts *bts;
877 int err;
879 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
880 min_sz)
881 return -EINVAL;
883 bts = zalloc(sizeof(struct intel_bts));
884 if (!bts)
885 return -ENOMEM;
887 err = auxtrace_queues__init(&bts->queues);
888 if (err)
889 goto err_free;
891 bts->session = session;
892 bts->machine = &session->machines.host; /* No kvm support */
893 bts->auxtrace_type = auxtrace_info->type;
894 bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE];
895 bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT];
896 bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT];
897 bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO];
898 bts->cap_user_time_zero =
899 auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO];
900 bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE];
902 bts->sampling_mode = false;
904 bts->auxtrace.process_event = intel_bts_process_event;
905 bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event;
906 bts->auxtrace.flush_events = intel_bts_flush;
907 bts->auxtrace.free_events = intel_bts_free_events;
908 bts->auxtrace.free = intel_bts_free;
909 session->auxtrace = &bts->auxtrace;
911 intel_bts_print_info(&auxtrace_info->priv[0], INTEL_BTS_PMU_TYPE,
912 INTEL_BTS_SNAPSHOT_MODE);
914 if (dump_trace)
915 return 0;
917 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
918 bts->synth_opts = *session->itrace_synth_opts;
919 } else {
920 itrace_synth_opts__set_default(&bts->synth_opts);
921 if (session->itrace_synth_opts)
922 bts->synth_opts.thread_stack =
923 session->itrace_synth_opts->thread_stack;
926 if (bts->synth_opts.calls)
927 bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
928 PERF_IP_FLAG_TRACE_END;
929 if (bts->synth_opts.returns)
930 bts->branches_filter |= PERF_IP_FLAG_RETURN |
931 PERF_IP_FLAG_TRACE_BEGIN;
933 err = intel_bts_synth_events(bts, session);
934 if (err)
935 goto err_free_queues;
937 err = auxtrace_queues__process_index(&bts->queues, session);
938 if (err)
939 goto err_free_queues;
941 if (bts->queues.populated)
942 bts->data_queued = true;
944 return 0;
946 err_free_queues:
947 auxtrace_queues__free(&bts->queues);
948 session->auxtrace = NULL;
949 err_free:
950 free(bts);
951 return err;