2 * auxtrace.c: AUX area trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <sys/types.h>
20 #include <linux/kernel.h>
21 #include <linux/perf_event.h>
22 #include <linux/types.h>
23 #include <linux/bitops.h>
24 #include <linux/log2.h>
25 #include <linux/string.h>
27 #include <sys/param.h>
33 #include <linux/list.h>
39 #include "thread_map.h"
43 #include <linux/hash.h>
48 #include "parse-options.h"
51 #include "intel-bts.h"
53 int auxtrace_mmap__mmap(struct auxtrace_mmap
*mm
,
54 struct auxtrace_mmap_params
*mp
,
57 struct perf_event_mmap_page
*pc
= userpg
;
59 WARN_ONCE(mm
->base
, "Uninitialized auxtrace_mmap\n");
74 #if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
75 pr_err("Cannot use AUX area tracing mmaps\n");
79 pc
->aux_offset
= mp
->offset
;
80 pc
->aux_size
= mp
->len
;
82 mm
->base
= mmap(NULL
, mp
->len
, mp
->prot
, MAP_SHARED
, fd
, mp
->offset
);
83 if (mm
->base
== MAP_FAILED
) {
84 pr_debug2("failed to mmap AUX area\n");
92 void auxtrace_mmap__munmap(struct auxtrace_mmap
*mm
)
95 munmap(mm
->base
, mm
->len
);
100 void auxtrace_mmap_params__init(struct auxtrace_mmap_params
*mp
,
101 off_t auxtrace_offset
,
102 unsigned int auxtrace_pages
,
103 bool auxtrace_overwrite
)
105 if (auxtrace_pages
) {
106 mp
->offset
= auxtrace_offset
;
107 mp
->len
= auxtrace_pages
* (size_t)page_size
;
108 mp
->mask
= is_power_of_2(mp
->len
) ? mp
->len
- 1 : 0;
109 mp
->prot
= PROT_READ
| (auxtrace_overwrite
? 0 : PROT_WRITE
);
110 pr_debug2("AUX area mmap length %zu\n", mp
->len
);
116 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params
*mp
,
117 struct perf_evlist
*evlist
, int idx
,
123 mp
->cpu
= evlist
->cpus
->map
[idx
];
125 mp
->tid
= thread_map__pid(evlist
->threads
, 0);
130 mp
->tid
= thread_map__pid(evlist
->threads
, idx
);
134 #define AUXTRACE_INIT_NR_QUEUES 32
136 static struct auxtrace_queue
*auxtrace_alloc_queue_array(unsigned int nr_queues
)
138 struct auxtrace_queue
*queue_array
;
139 unsigned int max_nr_queues
, i
;
141 max_nr_queues
= UINT_MAX
/ sizeof(struct auxtrace_queue
);
142 if (nr_queues
> max_nr_queues
)
145 queue_array
= calloc(nr_queues
, sizeof(struct auxtrace_queue
));
149 for (i
= 0; i
< nr_queues
; i
++) {
150 INIT_LIST_HEAD(&queue_array
[i
].head
);
151 queue_array
[i
].priv
= NULL
;
157 int auxtrace_queues__init(struct auxtrace_queues
*queues
)
159 queues
->nr_queues
= AUXTRACE_INIT_NR_QUEUES
;
160 queues
->queue_array
= auxtrace_alloc_queue_array(queues
->nr_queues
);
161 if (!queues
->queue_array
)
166 static int auxtrace_queues__grow(struct auxtrace_queues
*queues
,
167 unsigned int new_nr_queues
)
169 unsigned int nr_queues
= queues
->nr_queues
;
170 struct auxtrace_queue
*queue_array
;
174 nr_queues
= AUXTRACE_INIT_NR_QUEUES
;
176 while (nr_queues
&& nr_queues
< new_nr_queues
)
179 if (nr_queues
< queues
->nr_queues
|| nr_queues
< new_nr_queues
)
182 queue_array
= auxtrace_alloc_queue_array(nr_queues
);
186 for (i
= 0; i
< queues
->nr_queues
; i
++) {
187 list_splice_tail(&queues
->queue_array
[i
].head
,
188 &queue_array
[i
].head
);
189 queue_array
[i
].priv
= queues
->queue_array
[i
].priv
;
192 queues
->nr_queues
= nr_queues
;
193 queues
->queue_array
= queue_array
;
198 static void *auxtrace_copy_data(u64 size
, struct perf_session
*session
)
200 int fd
= perf_data_file__fd(session
->file
);
204 if (size
> SSIZE_MAX
)
211 ret
= readn(fd
, p
, size
);
212 if (ret
!= (ssize_t
)size
) {
220 static int auxtrace_queues__add_buffer(struct auxtrace_queues
*queues
,
222 struct auxtrace_buffer
*buffer
)
224 struct auxtrace_queue
*queue
;
227 if (idx
>= queues
->nr_queues
) {
228 err
= auxtrace_queues__grow(queues
, idx
+ 1);
233 queue
= &queues
->queue_array
[idx
];
237 queue
->tid
= buffer
->tid
;
238 queue
->cpu
= buffer
->cpu
;
239 } else if (buffer
->cpu
!= queue
->cpu
|| buffer
->tid
!= queue
->tid
) {
240 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
241 queue
->cpu
, queue
->tid
, buffer
->cpu
, buffer
->tid
);
245 buffer
->buffer_nr
= queues
->next_buffer_nr
++;
247 list_add_tail(&buffer
->list
, &queue
->head
);
249 queues
->new_data
= true;
250 queues
->populated
= true;
255 /* Limit buffers to 32MiB on 32-bit */
256 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
258 static int auxtrace_queues__split_buffer(struct auxtrace_queues
*queues
,
260 struct auxtrace_buffer
*buffer
)
262 u64 sz
= buffer
->size
;
263 bool consecutive
= false;
264 struct auxtrace_buffer
*b
;
267 while (sz
> BUFFER_LIMIT_FOR_32_BIT
) {
268 b
= memdup(buffer
, sizeof(struct auxtrace_buffer
));
271 b
->size
= BUFFER_LIMIT_FOR_32_BIT
;
272 b
->consecutive
= consecutive
;
273 err
= auxtrace_queues__add_buffer(queues
, idx
, b
);
275 auxtrace_buffer__free(b
);
278 buffer
->data_offset
+= BUFFER_LIMIT_FOR_32_BIT
;
279 sz
-= BUFFER_LIMIT_FOR_32_BIT
;
284 buffer
->consecutive
= consecutive
;
289 static int auxtrace_queues__add_event_buffer(struct auxtrace_queues
*queues
,
290 struct perf_session
*session
,
292 struct auxtrace_buffer
*buffer
)
294 if (session
->one_mmap
) {
295 buffer
->data
= buffer
->data_offset
- session
->one_mmap_offset
+
296 session
->one_mmap_addr
;
297 } else if (perf_data_file__is_pipe(session
->file
)) {
298 buffer
->data
= auxtrace_copy_data(buffer
->size
, session
);
301 buffer
->data_needs_freeing
= true;
302 } else if (BITS_PER_LONG
== 32 &&
303 buffer
->size
> BUFFER_LIMIT_FOR_32_BIT
) {
306 err
= auxtrace_queues__split_buffer(queues
, idx
, buffer
);
311 return auxtrace_queues__add_buffer(queues
, idx
, buffer
);
314 int auxtrace_queues__add_event(struct auxtrace_queues
*queues
,
315 struct perf_session
*session
,
316 union perf_event
*event
, off_t data_offset
,
317 struct auxtrace_buffer
**buffer_ptr
)
319 struct auxtrace_buffer
*buffer
;
323 buffer
= zalloc(sizeof(struct auxtrace_buffer
));
328 buffer
->tid
= event
->auxtrace
.tid
;
329 buffer
->cpu
= event
->auxtrace
.cpu
;
330 buffer
->data_offset
= data_offset
;
331 buffer
->offset
= event
->auxtrace
.offset
;
332 buffer
->reference
= event
->auxtrace
.reference
;
333 buffer
->size
= event
->auxtrace
.size
;
334 idx
= event
->auxtrace
.idx
;
336 err
= auxtrace_queues__add_event_buffer(queues
, session
, idx
, buffer
);
341 *buffer_ptr
= buffer
;
346 auxtrace_buffer__free(buffer
);
350 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues
*queues
,
351 struct perf_session
*session
,
352 off_t file_offset
, size_t sz
)
354 union perf_event
*event
;
356 char buf
[PERF_SAMPLE_MAX_SIZE
];
358 err
= perf_session__peek_event(session
, file_offset
, buf
,
359 PERF_SAMPLE_MAX_SIZE
, &event
, NULL
);
363 if (event
->header
.type
== PERF_RECORD_AUXTRACE
) {
364 if (event
->header
.size
< sizeof(struct auxtrace_event
) ||
365 event
->header
.size
!= sz
) {
369 file_offset
+= event
->header
.size
;
370 err
= auxtrace_queues__add_event(queues
, session
, event
,
377 void auxtrace_queues__free(struct auxtrace_queues
*queues
)
381 for (i
= 0; i
< queues
->nr_queues
; i
++) {
382 while (!list_empty(&queues
->queue_array
[i
].head
)) {
383 struct auxtrace_buffer
*buffer
;
385 buffer
= list_entry(queues
->queue_array
[i
].head
.next
,
386 struct auxtrace_buffer
, list
);
387 list_del(&buffer
->list
);
388 auxtrace_buffer__free(buffer
);
392 zfree(&queues
->queue_array
);
393 queues
->nr_queues
= 0;
396 static void auxtrace_heapify(struct auxtrace_heap_item
*heap_array
,
397 unsigned int pos
, unsigned int queue_nr
,
403 parent
= (pos
- 1) >> 1;
404 if (heap_array
[parent
].ordinal
<= ordinal
)
406 heap_array
[pos
] = heap_array
[parent
];
409 heap_array
[pos
].queue_nr
= queue_nr
;
410 heap_array
[pos
].ordinal
= ordinal
;
413 int auxtrace_heap__add(struct auxtrace_heap
*heap
, unsigned int queue_nr
,
416 struct auxtrace_heap_item
*heap_array
;
418 if (queue_nr
>= heap
->heap_sz
) {
419 unsigned int heap_sz
= AUXTRACE_INIT_NR_QUEUES
;
421 while (heap_sz
<= queue_nr
)
423 heap_array
= realloc(heap
->heap_array
,
424 heap_sz
* sizeof(struct auxtrace_heap_item
));
427 heap
->heap_array
= heap_array
;
428 heap
->heap_sz
= heap_sz
;
431 auxtrace_heapify(heap
->heap_array
, heap
->heap_cnt
++, queue_nr
, ordinal
);
436 void auxtrace_heap__free(struct auxtrace_heap
*heap
)
438 zfree(&heap
->heap_array
);
443 void auxtrace_heap__pop(struct auxtrace_heap
*heap
)
445 unsigned int pos
, last
, heap_cnt
= heap
->heap_cnt
;
446 struct auxtrace_heap_item
*heap_array
;
453 heap_array
= heap
->heap_array
;
457 unsigned int left
, right
;
459 left
= (pos
<< 1) + 1;
460 if (left
>= heap_cnt
)
463 if (right
>= heap_cnt
) {
464 heap_array
[pos
] = heap_array
[left
];
467 if (heap_array
[left
].ordinal
< heap_array
[right
].ordinal
) {
468 heap_array
[pos
] = heap_array
[left
];
471 heap_array
[pos
] = heap_array
[right
];
477 auxtrace_heapify(heap_array
, pos
, heap_array
[last
].queue_nr
,
478 heap_array
[last
].ordinal
);
481 size_t auxtrace_record__info_priv_size(struct auxtrace_record
*itr
)
484 return itr
->info_priv_size(itr
);
488 static int auxtrace_not_supported(void)
490 pr_err("AUX area tracing is not supported on this architecture\n");
494 int auxtrace_record__info_fill(struct auxtrace_record
*itr
,
495 struct perf_session
*session
,
496 struct auxtrace_info_event
*auxtrace_info
,
500 return itr
->info_fill(itr
, session
, auxtrace_info
, priv_size
);
501 return auxtrace_not_supported();
504 void auxtrace_record__free(struct auxtrace_record
*itr
)
510 int auxtrace_record__snapshot_start(struct auxtrace_record
*itr
)
512 if (itr
&& itr
->snapshot_start
)
513 return itr
->snapshot_start(itr
);
517 int auxtrace_record__snapshot_finish(struct auxtrace_record
*itr
)
519 if (itr
&& itr
->snapshot_finish
)
520 return itr
->snapshot_finish(itr
);
524 int auxtrace_record__find_snapshot(struct auxtrace_record
*itr
, int idx
,
525 struct auxtrace_mmap
*mm
,
526 unsigned char *data
, u64
*head
, u64
*old
)
528 if (itr
&& itr
->find_snapshot
)
529 return itr
->find_snapshot(itr
, idx
, mm
, data
, head
, old
);
533 int auxtrace_record__options(struct auxtrace_record
*itr
,
534 struct perf_evlist
*evlist
,
535 struct record_opts
*opts
)
538 return itr
->recording_options(itr
, evlist
, opts
);
542 u64
auxtrace_record__reference(struct auxtrace_record
*itr
)
545 return itr
->reference(itr
);
549 int auxtrace_parse_snapshot_options(struct auxtrace_record
*itr
,
550 struct record_opts
*opts
, const char *str
)
556 return itr
->parse_snapshot_options(itr
, opts
, str
);
558 pr_err("No AUX area tracing to snapshot\n");
562 struct auxtrace_record
*__weak
563 auxtrace_record__init(struct perf_evlist
*evlist __maybe_unused
, int *err
)
569 static int auxtrace_index__alloc(struct list_head
*head
)
571 struct auxtrace_index
*auxtrace_index
;
573 auxtrace_index
= malloc(sizeof(struct auxtrace_index
));
577 auxtrace_index
->nr
= 0;
578 INIT_LIST_HEAD(&auxtrace_index
->list
);
580 list_add_tail(&auxtrace_index
->list
, head
);
585 void auxtrace_index__free(struct list_head
*head
)
587 struct auxtrace_index
*auxtrace_index
, *n
;
589 list_for_each_entry_safe(auxtrace_index
, n
, head
, list
) {
590 list_del(&auxtrace_index
->list
);
591 free(auxtrace_index
);
595 static struct auxtrace_index
*auxtrace_index__last(struct list_head
*head
)
597 struct auxtrace_index
*auxtrace_index
;
600 if (list_empty(head
)) {
601 err
= auxtrace_index__alloc(head
);
606 auxtrace_index
= list_entry(head
->prev
, struct auxtrace_index
, list
);
608 if (auxtrace_index
->nr
>= PERF_AUXTRACE_INDEX_ENTRY_COUNT
) {
609 err
= auxtrace_index__alloc(head
);
612 auxtrace_index
= list_entry(head
->prev
, struct auxtrace_index
,
616 return auxtrace_index
;
619 int auxtrace_index__auxtrace_event(struct list_head
*head
,
620 union perf_event
*event
, off_t file_offset
)
622 struct auxtrace_index
*auxtrace_index
;
625 auxtrace_index
= auxtrace_index__last(head
);
629 nr
= auxtrace_index
->nr
;
630 auxtrace_index
->entries
[nr
].file_offset
= file_offset
;
631 auxtrace_index
->entries
[nr
].sz
= event
->header
.size
;
632 auxtrace_index
->nr
+= 1;
637 static int auxtrace_index__do_write(int fd
,
638 struct auxtrace_index
*auxtrace_index
)
640 struct auxtrace_index_entry ent
;
643 for (i
= 0; i
< auxtrace_index
->nr
; i
++) {
644 ent
.file_offset
= auxtrace_index
->entries
[i
].file_offset
;
645 ent
.sz
= auxtrace_index
->entries
[i
].sz
;
646 if (writen(fd
, &ent
, sizeof(ent
)) != sizeof(ent
))
652 int auxtrace_index__write(int fd
, struct list_head
*head
)
654 struct auxtrace_index
*auxtrace_index
;
658 list_for_each_entry(auxtrace_index
, head
, list
)
659 total
+= auxtrace_index
->nr
;
661 if (writen(fd
, &total
, sizeof(total
)) != sizeof(total
))
664 list_for_each_entry(auxtrace_index
, head
, list
) {
665 err
= auxtrace_index__do_write(fd
, auxtrace_index
);
673 static int auxtrace_index__process_entry(int fd
, struct list_head
*head
,
676 struct auxtrace_index
*auxtrace_index
;
677 struct auxtrace_index_entry ent
;
680 if (readn(fd
, &ent
, sizeof(ent
)) != sizeof(ent
))
683 auxtrace_index
= auxtrace_index__last(head
);
687 nr
= auxtrace_index
->nr
;
689 auxtrace_index
->entries
[nr
].file_offset
=
690 bswap_64(ent
.file_offset
);
691 auxtrace_index
->entries
[nr
].sz
= bswap_64(ent
.sz
);
693 auxtrace_index
->entries
[nr
].file_offset
= ent
.file_offset
;
694 auxtrace_index
->entries
[nr
].sz
= ent
.sz
;
697 auxtrace_index
->nr
= nr
+ 1;
702 int auxtrace_index__process(int fd
, u64 size
, struct perf_session
*session
,
705 struct list_head
*head
= &session
->auxtrace_index
;
708 if (readn(fd
, &nr
, sizeof(u64
)) != sizeof(u64
))
714 if (sizeof(u64
) + nr
* sizeof(struct auxtrace_index_entry
) > size
)
720 err
= auxtrace_index__process_entry(fd
, head
, needs_swap
);
728 static int auxtrace_queues__process_index_entry(struct auxtrace_queues
*queues
,
729 struct perf_session
*session
,
730 struct auxtrace_index_entry
*ent
)
732 return auxtrace_queues__add_indexed_event(queues
, session
,
733 ent
->file_offset
, ent
->sz
);
736 int auxtrace_queues__process_index(struct auxtrace_queues
*queues
,
737 struct perf_session
*session
)
739 struct auxtrace_index
*auxtrace_index
;
740 struct auxtrace_index_entry
*ent
;
744 list_for_each_entry(auxtrace_index
, &session
->auxtrace_index
, list
) {
745 for (i
= 0; i
< auxtrace_index
->nr
; i
++) {
746 ent
= &auxtrace_index
->entries
[i
];
747 err
= auxtrace_queues__process_index_entry(queues
,
757 struct auxtrace_buffer
*auxtrace_buffer__next(struct auxtrace_queue
*queue
,
758 struct auxtrace_buffer
*buffer
)
761 if (list_is_last(&buffer
->list
, &queue
->head
))
763 return list_entry(buffer
->list
.next
, struct auxtrace_buffer
,
766 if (list_empty(&queue
->head
))
768 return list_entry(queue
->head
.next
, struct auxtrace_buffer
,
773 void *auxtrace_buffer__get_data(struct auxtrace_buffer
*buffer
, int fd
)
775 size_t adj
= buffer
->data_offset
& (page_size
- 1);
776 size_t size
= buffer
->size
+ adj
;
777 off_t file_offset
= buffer
->data_offset
- adj
;
783 addr
= mmap(NULL
, size
, PROT_READ
, MAP_SHARED
, fd
, file_offset
);
784 if (addr
== MAP_FAILED
)
787 buffer
->mmap_addr
= addr
;
788 buffer
->mmap_size
= size
;
790 buffer
->data
= addr
+ adj
;
795 void auxtrace_buffer__put_data(struct auxtrace_buffer
*buffer
)
797 if (!buffer
->data
|| !buffer
->mmap_addr
)
799 munmap(buffer
->mmap_addr
, buffer
->mmap_size
);
800 buffer
->mmap_addr
= NULL
;
801 buffer
->mmap_size
= 0;
803 buffer
->use_data
= NULL
;
806 void auxtrace_buffer__drop_data(struct auxtrace_buffer
*buffer
)
808 auxtrace_buffer__put_data(buffer
);
809 if (buffer
->data_needs_freeing
) {
810 buffer
->data_needs_freeing
= false;
811 zfree(&buffer
->data
);
812 buffer
->use_data
= NULL
;
817 void auxtrace_buffer__free(struct auxtrace_buffer
*buffer
)
819 auxtrace_buffer__drop_data(buffer
);
823 void auxtrace_synth_error(struct auxtrace_error_event
*auxtrace_error
, int type
,
824 int code
, int cpu
, pid_t pid
, pid_t tid
, u64 ip
,
829 memset(auxtrace_error
, 0, sizeof(struct auxtrace_error_event
));
831 auxtrace_error
->header
.type
= PERF_RECORD_AUXTRACE_ERROR
;
832 auxtrace_error
->type
= type
;
833 auxtrace_error
->code
= code
;
834 auxtrace_error
->cpu
= cpu
;
835 auxtrace_error
->pid
= pid
;
836 auxtrace_error
->tid
= tid
;
837 auxtrace_error
->ip
= ip
;
838 strlcpy(auxtrace_error
->msg
, msg
, MAX_AUXTRACE_ERROR_MSG
);
840 size
= (void *)auxtrace_error
->msg
- (void *)auxtrace_error
+
841 strlen(auxtrace_error
->msg
) + 1;
842 auxtrace_error
->header
.size
= PERF_ALIGN(size
, sizeof(u64
));
845 int perf_event__synthesize_auxtrace_info(struct auxtrace_record
*itr
,
846 struct perf_tool
*tool
,
847 struct perf_session
*session
,
848 perf_event__handler_t process
)
850 union perf_event
*ev
;
854 pr_debug2("Synthesizing auxtrace information\n");
855 priv_size
= auxtrace_record__info_priv_size(itr
);
856 ev
= zalloc(sizeof(struct auxtrace_info_event
) + priv_size
);
860 ev
->auxtrace_info
.header
.type
= PERF_RECORD_AUXTRACE_INFO
;
861 ev
->auxtrace_info
.header
.size
= sizeof(struct auxtrace_info_event
) +
863 err
= auxtrace_record__info_fill(itr
, session
, &ev
->auxtrace_info
,
868 err
= process(tool
, ev
, NULL
, NULL
);
874 static bool auxtrace__dont_decode(struct perf_session
*session
)
876 return !session
->itrace_synth_opts
||
877 session
->itrace_synth_opts
->dont_decode
;
880 int perf_event__process_auxtrace_info(struct perf_tool
*tool __maybe_unused
,
881 union perf_event
*event
,
882 struct perf_session
*session
)
884 enum auxtrace_type type
= event
->auxtrace_info
.type
;
887 fprintf(stdout
, " type: %u\n", type
);
890 case PERF_AUXTRACE_INTEL_PT
:
891 return intel_pt_process_auxtrace_info(event
, session
);
892 case PERF_AUXTRACE_INTEL_BTS
:
893 return intel_bts_process_auxtrace_info(event
, session
);
894 case PERF_AUXTRACE_UNKNOWN
:
900 s64
perf_event__process_auxtrace(struct perf_tool
*tool
,
901 union perf_event
*event
,
902 struct perf_session
*session
)
907 fprintf(stdout
, " size: %#"PRIx64
" offset: %#"PRIx64
" ref: %#"PRIx64
" idx: %u tid: %d cpu: %d\n",
908 event
->auxtrace
.size
, event
->auxtrace
.offset
,
909 event
->auxtrace
.reference
, event
->auxtrace
.idx
,
910 event
->auxtrace
.tid
, event
->auxtrace
.cpu
);
912 if (auxtrace__dont_decode(session
))
913 return event
->auxtrace
.size
;
915 if (!session
->auxtrace
|| event
->header
.type
!= PERF_RECORD_AUXTRACE
)
918 err
= session
->auxtrace
->process_auxtrace_event(session
, event
, tool
);
922 return event
->auxtrace
.size
;
925 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
926 #define PERF_ITRACE_DEFAULT_PERIOD 100000
927 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
928 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
929 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
930 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
932 void itrace_synth_opts__set_default(struct itrace_synth_opts
*synth_opts
)
934 synth_opts
->instructions
= true;
935 synth_opts
->branches
= true;
936 synth_opts
->transactions
= true;
937 synth_opts
->errors
= true;
938 synth_opts
->period_type
= PERF_ITRACE_DEFAULT_PERIOD_TYPE
;
939 synth_opts
->period
= PERF_ITRACE_DEFAULT_PERIOD
;
940 synth_opts
->callchain_sz
= PERF_ITRACE_DEFAULT_CALLCHAIN_SZ
;
941 synth_opts
->last_branch_sz
= PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ
;
945 * Please check tools/perf/Documentation/perf-script.txt for information
946 * about the options parsed here, which is introduced after this cset,
947 * when support in 'perf script' for these options is introduced.
949 int itrace_parse_synth_opts(const struct option
*opt
, const char *str
,
952 struct itrace_synth_opts
*synth_opts
= opt
->value
;
955 bool period_type_set
= false;
956 bool period_set
= false;
958 synth_opts
->set
= true;
961 synth_opts
->dont_decode
= true;
966 itrace_synth_opts__set_default(synth_opts
);
973 synth_opts
->instructions
= true;
974 while (*p
== ' ' || *p
== ',')
977 synth_opts
->period
= strtoull(p
, &endptr
, 10);
980 while (*p
== ' ' || *p
== ',')
984 synth_opts
->period_type
=
985 PERF_ITRACE_PERIOD_INSTRUCTIONS
;
986 period_type_set
= true;
989 synth_opts
->period_type
=
990 PERF_ITRACE_PERIOD_TICKS
;
991 period_type_set
= true;
994 synth_opts
->period
*= 1000;
997 synth_opts
->period
*= 1000;
1002 synth_opts
->period_type
=
1003 PERF_ITRACE_PERIOD_NANOSECS
;
1004 period_type_set
= true;
1014 synth_opts
->branches
= true;
1017 synth_opts
->transactions
= true;
1020 synth_opts
->errors
= true;
1023 synth_opts
->log
= true;
1026 synth_opts
->branches
= true;
1027 synth_opts
->calls
= true;
1030 synth_opts
->branches
= true;
1031 synth_opts
->returns
= true;
1034 synth_opts
->callchain
= true;
1035 synth_opts
->callchain_sz
=
1036 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ
;
1037 while (*p
== ' ' || *p
== ',')
1042 val
= strtoul(p
, &endptr
, 10);
1044 if (!val
|| val
> PERF_ITRACE_MAX_CALLCHAIN_SZ
)
1046 synth_opts
->callchain_sz
= val
;
1050 synth_opts
->last_branch
= true;
1051 synth_opts
->last_branch_sz
=
1052 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ
;
1053 while (*p
== ' ' || *p
== ',')
1058 val
= strtoul(p
, &endptr
, 10);
1061 val
> PERF_ITRACE_MAX_LAST_BRANCH_SZ
)
1063 synth_opts
->last_branch_sz
= val
;
1074 if (synth_opts
->instructions
) {
1075 if (!period_type_set
)
1076 synth_opts
->period_type
=
1077 PERF_ITRACE_DEFAULT_PERIOD_TYPE
;
1079 synth_opts
->period
= PERF_ITRACE_DEFAULT_PERIOD
;
1085 pr_err("Bad Instruction Tracing options '%s'\n", str
);
1089 static const char * const auxtrace_error_type_name
[] = {
1090 [PERF_AUXTRACE_ERROR_ITRACE
] = "instruction trace",
1093 static const char *auxtrace_error_name(int type
)
1095 const char *error_type_name
= NULL
;
1097 if (type
< PERF_AUXTRACE_ERROR_MAX
)
1098 error_type_name
= auxtrace_error_type_name
[type
];
1099 if (!error_type_name
)
1100 error_type_name
= "unknown AUX";
1101 return error_type_name
;
1104 size_t perf_event__fprintf_auxtrace_error(union perf_event
*event
, FILE *fp
)
1106 struct auxtrace_error_event
*e
= &event
->auxtrace_error
;
1109 ret
= fprintf(fp
, " %s error type %u",
1110 auxtrace_error_name(e
->type
), e
->type
);
1111 ret
+= fprintf(fp
, " cpu %d pid %d tid %d ip %#"PRIx64
" code %u: %s\n",
1112 e
->cpu
, e
->pid
, e
->tid
, e
->ip
, e
->code
, e
->msg
);
1116 void perf_session__auxtrace_error_inc(struct perf_session
*session
,
1117 union perf_event
*event
)
1119 struct auxtrace_error_event
*e
= &event
->auxtrace_error
;
1121 if (e
->type
< PERF_AUXTRACE_ERROR_MAX
)
1122 session
->evlist
->stats
.nr_auxtrace_errors
[e
->type
] += 1;
1125 void events_stats__auxtrace_error_warn(const struct events_stats
*stats
)
1129 for (i
= 0; i
< PERF_AUXTRACE_ERROR_MAX
; i
++) {
1130 if (!stats
->nr_auxtrace_errors
[i
])
1132 ui__warning("%u %s errors\n",
1133 stats
->nr_auxtrace_errors
[i
],
1134 auxtrace_error_name(i
));
1138 int perf_event__process_auxtrace_error(struct perf_tool
*tool __maybe_unused
,
1139 union perf_event
*event
,
1140 struct perf_session
*session
)
1142 if (auxtrace__dont_decode(session
))
1145 perf_event__fprintf_auxtrace_error(event
, stdout
);
1149 static int __auxtrace_mmap__read(struct auxtrace_mmap
*mm
,
1150 struct auxtrace_record
*itr
,
1151 struct perf_tool
*tool
, process_auxtrace_t fn
,
1152 bool snapshot
, size_t snapshot_size
)
1154 u64 head
, old
= mm
->prev
, offset
, ref
;
1155 unsigned char *data
= mm
->base
;
1156 size_t size
, head_off
, old_off
, len1
, len2
, padding
;
1157 union perf_event ev
;
1158 void *data1
, *data2
;
1161 head
= auxtrace_mmap__read_snapshot_head(mm
);
1162 if (auxtrace_record__find_snapshot(itr
, mm
->idx
, mm
, data
,
1166 head
= auxtrace_mmap__read_head(mm
);
1172 pr_debug3("auxtrace idx %d old %#"PRIx64
" head %#"PRIx64
" diff %#"PRIx64
"\n",
1173 mm
->idx
, old
, head
, head
- old
);
1176 head_off
= head
& mm
->mask
;
1177 old_off
= old
& mm
->mask
;
1179 head_off
= head
% mm
->len
;
1180 old_off
= old
% mm
->len
;
1183 if (head_off
> old_off
)
1184 size
= head_off
- old_off
;
1186 size
= mm
->len
- (old_off
- head_off
);
1188 if (snapshot
&& size
> snapshot_size
)
1189 size
= snapshot_size
;
1191 ref
= auxtrace_record__reference(itr
);
1193 if (head
> old
|| size
<= head
|| mm
->mask
) {
1194 offset
= head
- size
;
1197 * When the buffer size is not a power of 2, 'head' wraps at the
1198 * highest multiple of the buffer size, so we have to subtract
1199 * the remainder here.
1201 u64 rem
= (0ULL - mm
->len
) % mm
->len
;
1203 offset
= head
- size
- rem
;
1206 if (size
> head_off
) {
1207 len1
= size
- head_off
;
1208 data1
= &data
[mm
->len
- len1
];
1213 data1
= &data
[head_off
- len1
];
1218 if (itr
->alignment
) {
1219 unsigned int unwanted
= len1
% itr
->alignment
;
1225 /* padding must be written by fn() e.g. record__process_auxtrace() */
1228 padding
= 8 - padding
;
1230 memset(&ev
, 0, sizeof(ev
));
1231 ev
.auxtrace
.header
.type
= PERF_RECORD_AUXTRACE
;
1232 ev
.auxtrace
.header
.size
= sizeof(ev
.auxtrace
);
1233 ev
.auxtrace
.size
= size
+ padding
;
1234 ev
.auxtrace
.offset
= offset
;
1235 ev
.auxtrace
.reference
= ref
;
1236 ev
.auxtrace
.idx
= mm
->idx
;
1237 ev
.auxtrace
.tid
= mm
->tid
;
1238 ev
.auxtrace
.cpu
= mm
->cpu
;
1240 if (fn(tool
, &ev
, data1
, len1
, data2
, len2
))
1246 auxtrace_mmap__write_tail(mm
, head
);
1247 if (itr
->read_finish
) {
1250 err
= itr
->read_finish(itr
, mm
->idx
);
1259 int auxtrace_mmap__read(struct auxtrace_mmap
*mm
, struct auxtrace_record
*itr
,
1260 struct perf_tool
*tool
, process_auxtrace_t fn
)
1262 return __auxtrace_mmap__read(mm
, itr
, tool
, fn
, false, 0);
1265 int auxtrace_mmap__read_snapshot(struct auxtrace_mmap
*mm
,
1266 struct auxtrace_record
*itr
,
1267 struct perf_tool
*tool
, process_auxtrace_t fn
,
1268 size_t snapshot_size
)
1270 return __auxtrace_mmap__read(mm
, itr
, tool
, fn
, true, snapshot_size
);
1274 * struct auxtrace_cache - hash table to implement a cache
1275 * @hashtable: the hashtable
1276 * @sz: hashtable size (number of hlists)
1277 * @entry_size: size of an entry
1278 * @limit: limit the number of entries to this maximum, when reached the cache
1279 * is dropped and caching begins again with an empty cache
1280 * @cnt: current number of entries
1281 * @bits: hashtable size (@sz = 2^@bits)
1283 struct auxtrace_cache
{
1284 struct hlist_head
*hashtable
;
1292 struct auxtrace_cache
*auxtrace_cache__new(unsigned int bits
, size_t entry_size
,
1293 unsigned int limit_percent
)
1295 struct auxtrace_cache
*c
;
1296 struct hlist_head
*ht
;
1299 c
= zalloc(sizeof(struct auxtrace_cache
));
1305 ht
= calloc(sz
, sizeof(struct hlist_head
));
1309 for (i
= 0; i
< sz
; i
++)
1310 INIT_HLIST_HEAD(&ht
[i
]);
1314 c
->entry_size
= entry_size
;
1315 c
->limit
= (c
->sz
* limit_percent
) / 100;
1325 static void auxtrace_cache__drop(struct auxtrace_cache
*c
)
1327 struct auxtrace_cache_entry
*entry
;
1328 struct hlist_node
*tmp
;
1334 for (i
= 0; i
< c
->sz
; i
++) {
1335 hlist_for_each_entry_safe(entry
, tmp
, &c
->hashtable
[i
], hash
) {
1336 hlist_del(&entry
->hash
);
1337 auxtrace_cache__free_entry(c
, entry
);
1344 void auxtrace_cache__free(struct auxtrace_cache
*c
)
1349 auxtrace_cache__drop(c
);
1354 void *auxtrace_cache__alloc_entry(struct auxtrace_cache
*c
)
1356 return malloc(c
->entry_size
);
1359 void auxtrace_cache__free_entry(struct auxtrace_cache
*c __maybe_unused
,
1365 int auxtrace_cache__add(struct auxtrace_cache
*c
, u32 key
,
1366 struct auxtrace_cache_entry
*entry
)
1368 if (c
->limit
&& ++c
->cnt
> c
->limit
)
1369 auxtrace_cache__drop(c
);
1372 hlist_add_head(&entry
->hash
, &c
->hashtable
[hash_32(key
, c
->bits
)]);
1377 void *auxtrace_cache__lookup(struct auxtrace_cache
*c
, u32 key
)
1379 struct auxtrace_cache_entry
*entry
;
1380 struct hlist_head
*hlist
;
1385 hlist
= &c
->hashtable
[hash_32(key
, c
->bits
)];
1386 hlist_for_each_entry(entry
, hlist
, hash
) {
1387 if (entry
->key
== key
)