1 // SPDX-License-Identifier: GPL-2.0-only
3 * auxtrace.c: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
15 #include <linux/kernel.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <linux/bitops.h>
19 #include <linux/log2.h>
20 #include <linux/string.h>
21 #include <linux/time64.h>
23 #include <sys/param.h>
26 #include <linux/list.h>
27 #include <linux/zalloc.h>
34 #include "evsel_config.h"
36 #include "util/synthetic-events.h"
37 #include "thread_map.h"
41 #include <linux/hash.h>
47 #include <subcmd/parse-options.h>
51 #include "intel-bts.h"
53 #include "s390-cpumsf.h"
54 #include "util/mmap.h"
56 #include <linux/ctype.h>
57 #include <linux/kernel.h>
58 #include "symbol/kallsyms.h"
59 #include <internal/lib.h>
61 static struct perf_pmu
*perf_evsel__find_pmu(struct evsel
*evsel
)
63 struct perf_pmu
*pmu
= NULL
;
65 while ((pmu
= perf_pmu__scan(pmu
)) != NULL
) {
66 if (pmu
->type
== evsel
->core
.attr
.type
)
73 static bool perf_evsel__is_aux_event(struct evsel
*evsel
)
75 struct perf_pmu
*pmu
= perf_evsel__find_pmu(evsel
);
77 return pmu
&& pmu
->auxtrace
;
81 * Make a group from 'leader' to 'last', requiring that the events were not
82 * already grouped to a different leader.
84 static int perf_evlist__regroup(struct evlist
*evlist
,
91 if (!perf_evsel__is_group_leader(leader
))
95 evlist__for_each_entry(evlist
, evsel
) {
97 if (!(evsel
->leader
== leader
||
98 (evsel
->leader
== evsel
&&
99 evsel
->core
.nr_members
<= 1)))
101 } else if (evsel
== leader
) {
109 evlist__for_each_entry(evlist
, evsel
) {
111 if (evsel
->leader
!= leader
) {
112 evsel
->leader
= leader
;
113 if (leader
->core
.nr_members
< 1)
114 leader
->core
.nr_members
= 1;
115 leader
->core
.nr_members
+= 1;
117 } else if (evsel
== leader
) {
127 static bool auxtrace__dont_decode(struct perf_session
*session
)
129 return !session
->itrace_synth_opts
||
130 session
->itrace_synth_opts
->dont_decode
;
133 int auxtrace_mmap__mmap(struct auxtrace_mmap
*mm
,
134 struct auxtrace_mmap_params
*mp
,
135 void *userpg
, int fd
)
137 struct perf_event_mmap_page
*pc
= userpg
;
139 WARN_ONCE(mm
->base
, "Uninitialized auxtrace_mmap\n");
154 #if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
155 pr_err("Cannot use AUX area tracing mmaps\n");
159 pc
->aux_offset
= mp
->offset
;
160 pc
->aux_size
= mp
->len
;
162 mm
->base
= mmap(NULL
, mp
->len
, mp
->prot
, MAP_SHARED
, fd
, mp
->offset
);
163 if (mm
->base
== MAP_FAILED
) {
164 pr_debug2("failed to mmap AUX area\n");
172 void auxtrace_mmap__munmap(struct auxtrace_mmap
*mm
)
175 munmap(mm
->base
, mm
->len
);
180 void auxtrace_mmap_params__init(struct auxtrace_mmap_params
*mp
,
181 off_t auxtrace_offset
,
182 unsigned int auxtrace_pages
,
183 bool auxtrace_overwrite
)
185 if (auxtrace_pages
) {
186 mp
->offset
= auxtrace_offset
;
187 mp
->len
= auxtrace_pages
* (size_t)page_size
;
188 mp
->mask
= is_power_of_2(mp
->len
) ? mp
->len
- 1 : 0;
189 mp
->prot
= PROT_READ
| (auxtrace_overwrite
? 0 : PROT_WRITE
);
190 pr_debug2("AUX area mmap length %zu\n", mp
->len
);
196 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params
*mp
,
197 struct evlist
*evlist
, int idx
,
203 mp
->cpu
= evlist
->core
.cpus
->map
[idx
];
204 if (evlist
->core
.threads
)
205 mp
->tid
= perf_thread_map__pid(evlist
->core
.threads
, 0);
210 mp
->tid
= perf_thread_map__pid(evlist
->core
.threads
, idx
);
214 #define AUXTRACE_INIT_NR_QUEUES 32
216 static struct auxtrace_queue
*auxtrace_alloc_queue_array(unsigned int nr_queues
)
218 struct auxtrace_queue
*queue_array
;
219 unsigned int max_nr_queues
, i
;
221 max_nr_queues
= UINT_MAX
/ sizeof(struct auxtrace_queue
);
222 if (nr_queues
> max_nr_queues
)
225 queue_array
= calloc(nr_queues
, sizeof(struct auxtrace_queue
));
229 for (i
= 0; i
< nr_queues
; i
++) {
230 INIT_LIST_HEAD(&queue_array
[i
].head
);
231 queue_array
[i
].priv
= NULL
;
237 int auxtrace_queues__init(struct auxtrace_queues
*queues
)
239 queues
->nr_queues
= AUXTRACE_INIT_NR_QUEUES
;
240 queues
->queue_array
= auxtrace_alloc_queue_array(queues
->nr_queues
);
241 if (!queues
->queue_array
)
246 static int auxtrace_queues__grow(struct auxtrace_queues
*queues
,
247 unsigned int new_nr_queues
)
249 unsigned int nr_queues
= queues
->nr_queues
;
250 struct auxtrace_queue
*queue_array
;
254 nr_queues
= AUXTRACE_INIT_NR_QUEUES
;
256 while (nr_queues
&& nr_queues
< new_nr_queues
)
259 if (nr_queues
< queues
->nr_queues
|| nr_queues
< new_nr_queues
)
262 queue_array
= auxtrace_alloc_queue_array(nr_queues
);
266 for (i
= 0; i
< queues
->nr_queues
; i
++) {
267 list_splice_tail(&queues
->queue_array
[i
].head
,
268 &queue_array
[i
].head
);
269 queue_array
[i
].tid
= queues
->queue_array
[i
].tid
;
270 queue_array
[i
].cpu
= queues
->queue_array
[i
].cpu
;
271 queue_array
[i
].set
= queues
->queue_array
[i
].set
;
272 queue_array
[i
].priv
= queues
->queue_array
[i
].priv
;
275 queues
->nr_queues
= nr_queues
;
276 queues
->queue_array
= queue_array
;
281 static void *auxtrace_copy_data(u64 size
, struct perf_session
*session
)
283 int fd
= perf_data__fd(session
->data
);
287 if (size
> SSIZE_MAX
)
294 ret
= readn(fd
, p
, size
);
295 if (ret
!= (ssize_t
)size
) {
303 static int auxtrace_queues__queue_buffer(struct auxtrace_queues
*queues
,
305 struct auxtrace_buffer
*buffer
)
307 struct auxtrace_queue
*queue
;
310 if (idx
>= queues
->nr_queues
) {
311 err
= auxtrace_queues__grow(queues
, idx
+ 1);
316 queue
= &queues
->queue_array
[idx
];
320 queue
->tid
= buffer
->tid
;
321 queue
->cpu
= buffer
->cpu
;
322 } else if (buffer
->cpu
!= queue
->cpu
|| buffer
->tid
!= queue
->tid
) {
323 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
324 queue
->cpu
, queue
->tid
, buffer
->cpu
, buffer
->tid
);
328 buffer
->buffer_nr
= queues
->next_buffer_nr
++;
330 list_add_tail(&buffer
->list
, &queue
->head
);
332 queues
->new_data
= true;
333 queues
->populated
= true;
338 /* Limit buffers to 32MiB on 32-bit */
339 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
341 static int auxtrace_queues__split_buffer(struct auxtrace_queues
*queues
,
343 struct auxtrace_buffer
*buffer
)
345 u64 sz
= buffer
->size
;
346 bool consecutive
= false;
347 struct auxtrace_buffer
*b
;
350 while (sz
> BUFFER_LIMIT_FOR_32_BIT
) {
351 b
= memdup(buffer
, sizeof(struct auxtrace_buffer
));
354 b
->size
= BUFFER_LIMIT_FOR_32_BIT
;
355 b
->consecutive
= consecutive
;
356 err
= auxtrace_queues__queue_buffer(queues
, idx
, b
);
358 auxtrace_buffer__free(b
);
361 buffer
->data_offset
+= BUFFER_LIMIT_FOR_32_BIT
;
362 sz
-= BUFFER_LIMIT_FOR_32_BIT
;
367 buffer
->consecutive
= consecutive
;
372 static bool filter_cpu(struct perf_session
*session
, int cpu
)
374 unsigned long *cpu_bitmap
= session
->itrace_synth_opts
->cpu_bitmap
;
376 return cpu_bitmap
&& cpu
!= -1 && !test_bit(cpu
, cpu_bitmap
);
379 static int auxtrace_queues__add_buffer(struct auxtrace_queues
*queues
,
380 struct perf_session
*session
,
382 struct auxtrace_buffer
*buffer
,
383 struct auxtrace_buffer
**buffer_ptr
)
387 if (filter_cpu(session
, buffer
->cpu
))
390 buffer
= memdup(buffer
, sizeof(*buffer
));
394 if (session
->one_mmap
) {
395 buffer
->data
= buffer
->data_offset
- session
->one_mmap_offset
+
396 session
->one_mmap_addr
;
397 } else if (perf_data__is_pipe(session
->data
)) {
398 buffer
->data
= auxtrace_copy_data(buffer
->size
, session
);
401 buffer
->data_needs_freeing
= true;
402 } else if (BITS_PER_LONG
== 32 &&
403 buffer
->size
> BUFFER_LIMIT_FOR_32_BIT
) {
404 err
= auxtrace_queues__split_buffer(queues
, idx
, buffer
);
409 err
= auxtrace_queues__queue_buffer(queues
, idx
, buffer
);
413 /* FIXME: Doesn't work for split buffer */
415 *buffer_ptr
= buffer
;
420 auxtrace_buffer__free(buffer
);
424 int auxtrace_queues__add_event(struct auxtrace_queues
*queues
,
425 struct perf_session
*session
,
426 union perf_event
*event
, off_t data_offset
,
427 struct auxtrace_buffer
**buffer_ptr
)
429 struct auxtrace_buffer buffer
= {
431 .tid
= event
->auxtrace
.tid
,
432 .cpu
= event
->auxtrace
.cpu
,
433 .data_offset
= data_offset
,
434 .offset
= event
->auxtrace
.offset
,
435 .reference
= event
->auxtrace
.reference
,
436 .size
= event
->auxtrace
.size
,
438 unsigned int idx
= event
->auxtrace
.idx
;
440 return auxtrace_queues__add_buffer(queues
, session
, idx
, &buffer
,
444 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues
*queues
,
445 struct perf_session
*session
,
446 off_t file_offset
, size_t sz
)
448 union perf_event
*event
;
450 char buf
[PERF_SAMPLE_MAX_SIZE
];
452 err
= perf_session__peek_event(session
, file_offset
, buf
,
453 PERF_SAMPLE_MAX_SIZE
, &event
, NULL
);
457 if (event
->header
.type
== PERF_RECORD_AUXTRACE
) {
458 if (event
->header
.size
< sizeof(struct perf_record_auxtrace
) ||
459 event
->header
.size
!= sz
) {
463 file_offset
+= event
->header
.size
;
464 err
= auxtrace_queues__add_event(queues
, session
, event
,
471 void auxtrace_queues__free(struct auxtrace_queues
*queues
)
475 for (i
= 0; i
< queues
->nr_queues
; i
++) {
476 while (!list_empty(&queues
->queue_array
[i
].head
)) {
477 struct auxtrace_buffer
*buffer
;
479 buffer
= list_entry(queues
->queue_array
[i
].head
.next
,
480 struct auxtrace_buffer
, list
);
481 list_del_init(&buffer
->list
);
482 auxtrace_buffer__free(buffer
);
486 zfree(&queues
->queue_array
);
487 queues
->nr_queues
= 0;
490 static void auxtrace_heapify(struct auxtrace_heap_item
*heap_array
,
491 unsigned int pos
, unsigned int queue_nr
,
497 parent
= (pos
- 1) >> 1;
498 if (heap_array
[parent
].ordinal
<= ordinal
)
500 heap_array
[pos
] = heap_array
[parent
];
503 heap_array
[pos
].queue_nr
= queue_nr
;
504 heap_array
[pos
].ordinal
= ordinal
;
507 int auxtrace_heap__add(struct auxtrace_heap
*heap
, unsigned int queue_nr
,
510 struct auxtrace_heap_item
*heap_array
;
512 if (queue_nr
>= heap
->heap_sz
) {
513 unsigned int heap_sz
= AUXTRACE_INIT_NR_QUEUES
;
515 while (heap_sz
<= queue_nr
)
517 heap_array
= realloc(heap
->heap_array
,
518 heap_sz
* sizeof(struct auxtrace_heap_item
));
521 heap
->heap_array
= heap_array
;
522 heap
->heap_sz
= heap_sz
;
525 auxtrace_heapify(heap
->heap_array
, heap
->heap_cnt
++, queue_nr
, ordinal
);
530 void auxtrace_heap__free(struct auxtrace_heap
*heap
)
532 zfree(&heap
->heap_array
);
537 void auxtrace_heap__pop(struct auxtrace_heap
*heap
)
539 unsigned int pos
, last
, heap_cnt
= heap
->heap_cnt
;
540 struct auxtrace_heap_item
*heap_array
;
547 heap_array
= heap
->heap_array
;
551 unsigned int left
, right
;
553 left
= (pos
<< 1) + 1;
554 if (left
>= heap_cnt
)
557 if (right
>= heap_cnt
) {
558 heap_array
[pos
] = heap_array
[left
];
561 if (heap_array
[left
].ordinal
< heap_array
[right
].ordinal
) {
562 heap_array
[pos
] = heap_array
[left
];
565 heap_array
[pos
] = heap_array
[right
];
571 auxtrace_heapify(heap_array
, pos
, heap_array
[last
].queue_nr
,
572 heap_array
[last
].ordinal
);
575 size_t auxtrace_record__info_priv_size(struct auxtrace_record
*itr
,
576 struct evlist
*evlist
)
579 return itr
->info_priv_size(itr
, evlist
);
583 static int auxtrace_not_supported(void)
585 pr_err("AUX area tracing is not supported on this architecture\n");
589 int auxtrace_record__info_fill(struct auxtrace_record
*itr
,
590 struct perf_session
*session
,
591 struct perf_record_auxtrace_info
*auxtrace_info
,
595 return itr
->info_fill(itr
, session
, auxtrace_info
, priv_size
);
596 return auxtrace_not_supported();
599 void auxtrace_record__free(struct auxtrace_record
*itr
)
605 int auxtrace_record__snapshot_start(struct auxtrace_record
*itr
)
607 if (itr
&& itr
->snapshot_start
)
608 return itr
->snapshot_start(itr
);
612 int auxtrace_record__snapshot_finish(struct auxtrace_record
*itr
, bool on_exit
)
614 if (!on_exit
&& itr
&& itr
->snapshot_finish
)
615 return itr
->snapshot_finish(itr
);
619 int auxtrace_record__find_snapshot(struct auxtrace_record
*itr
, int idx
,
620 struct auxtrace_mmap
*mm
,
621 unsigned char *data
, u64
*head
, u64
*old
)
623 if (itr
&& itr
->find_snapshot
)
624 return itr
->find_snapshot(itr
, idx
, mm
, data
, head
, old
);
628 int auxtrace_record__options(struct auxtrace_record
*itr
,
629 struct evlist
*evlist
,
630 struct record_opts
*opts
)
633 return itr
->recording_options(itr
, evlist
, opts
);
637 u64
auxtrace_record__reference(struct auxtrace_record
*itr
)
640 return itr
->reference(itr
);
644 int auxtrace_parse_snapshot_options(struct auxtrace_record
*itr
,
645 struct record_opts
*opts
, const char *str
)
650 /* PMU-agnostic options */
653 opts
->auxtrace_snapshot_on_exit
= true;
661 return itr
->parse_snapshot_options(itr
, opts
, str
);
663 pr_err("No AUX area tracing to snapshot\n");
668 * Event record size is 16-bit which results in a maximum size of about 64KiB.
669 * Allow about 4KiB for the rest of the sample record, to give a maximum
670 * AUX area sample size of 60KiB.
672 #define MAX_AUX_SAMPLE_SIZE (60 * 1024)
674 /* Arbitrary default size if no other default provided */
675 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
677 static int auxtrace_validate_aux_sample_size(struct evlist
*evlist
,
678 struct record_opts
*opts
)
681 bool has_aux_leader
= false;
684 evlist__for_each_entry(evlist
, evsel
) {
685 sz
= evsel
->core
.attr
.aux_sample_size
;
686 if (perf_evsel__is_group_leader(evsel
)) {
687 has_aux_leader
= perf_evsel__is_aux_event(evsel
);
690 pr_err("Cannot add AUX area sampling to an AUX area event\n");
692 pr_err("Cannot add AUX area sampling to a group leader\n");
696 if (sz
> MAX_AUX_SAMPLE_SIZE
) {
697 pr_err("AUX area sample size %u too big, max. %d\n",
698 sz
, MAX_AUX_SAMPLE_SIZE
);
702 if (!has_aux_leader
) {
703 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
706 perf_evsel__set_sample_bit(evsel
, AUX
);
707 opts
->auxtrace_sample_mode
= true;
709 perf_evsel__reset_sample_bit(evsel
, AUX
);
713 if (!opts
->auxtrace_sample_mode
) {
714 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
718 if (!perf_can_aux_sample()) {
719 pr_err("AUX area sampling is not supported by kernel\n");
726 int auxtrace_parse_sample_options(struct auxtrace_record
*itr
,
727 struct evlist
*evlist
,
728 struct record_opts
*opts
, const char *str
)
730 struct perf_evsel_config_term
*term
;
731 struct evsel
*aux_evsel
;
732 bool has_aux_sample_size
= false;
733 bool has_aux_leader
= false;
742 pr_err("No AUX area event to sample\n");
746 sz
= strtoul(str
, &endptr
, 0);
747 if (*endptr
|| sz
> UINT_MAX
) {
748 pr_err("Bad AUX area sampling option: '%s'\n", str
);
753 sz
= itr
->default_aux_sample_size
;
756 sz
= DEFAULT_AUX_SAMPLE_SIZE
;
758 /* Set aux_sample_size based on --aux-sample option */
759 evlist__for_each_entry(evlist
, evsel
) {
760 if (perf_evsel__is_group_leader(evsel
)) {
761 has_aux_leader
= perf_evsel__is_aux_event(evsel
);
762 } else if (has_aux_leader
) {
763 evsel
->core
.attr
.aux_sample_size
= sz
;
768 /* Override with aux_sample_size from config term */
769 evlist__for_each_entry(evlist
, evsel
) {
770 if (perf_evsel__is_aux_event(evsel
))
772 term
= perf_evsel__get_config_term(evsel
, AUX_SAMPLE_SIZE
);
774 has_aux_sample_size
= true;
775 evsel
->core
.attr
.aux_sample_size
= term
->val
.aux_sample_size
;
776 /* If possible, group with the AUX event */
777 if (aux_evsel
&& evsel
->core
.attr
.aux_sample_size
)
778 perf_evlist__regroup(evlist
, aux_evsel
, evsel
);
782 if (!str
&& !has_aux_sample_size
)
786 pr_err("No AUX area event to sample\n");
790 return auxtrace_validate_aux_sample_size(evlist
, opts
);
793 struct auxtrace_record
*__weak
794 auxtrace_record__init(struct evlist
*evlist __maybe_unused
, int *err
)
800 static int auxtrace_index__alloc(struct list_head
*head
)
802 struct auxtrace_index
*auxtrace_index
;
804 auxtrace_index
= malloc(sizeof(struct auxtrace_index
));
808 auxtrace_index
->nr
= 0;
809 INIT_LIST_HEAD(&auxtrace_index
->list
);
811 list_add_tail(&auxtrace_index
->list
, head
);
816 void auxtrace_index__free(struct list_head
*head
)
818 struct auxtrace_index
*auxtrace_index
, *n
;
820 list_for_each_entry_safe(auxtrace_index
, n
, head
, list
) {
821 list_del_init(&auxtrace_index
->list
);
822 free(auxtrace_index
);
826 static struct auxtrace_index
*auxtrace_index__last(struct list_head
*head
)
828 struct auxtrace_index
*auxtrace_index
;
831 if (list_empty(head
)) {
832 err
= auxtrace_index__alloc(head
);
837 auxtrace_index
= list_entry(head
->prev
, struct auxtrace_index
, list
);
839 if (auxtrace_index
->nr
>= PERF_AUXTRACE_INDEX_ENTRY_COUNT
) {
840 err
= auxtrace_index__alloc(head
);
843 auxtrace_index
= list_entry(head
->prev
, struct auxtrace_index
,
847 return auxtrace_index
;
850 int auxtrace_index__auxtrace_event(struct list_head
*head
,
851 union perf_event
*event
, off_t file_offset
)
853 struct auxtrace_index
*auxtrace_index
;
856 auxtrace_index
= auxtrace_index__last(head
);
860 nr
= auxtrace_index
->nr
;
861 auxtrace_index
->entries
[nr
].file_offset
= file_offset
;
862 auxtrace_index
->entries
[nr
].sz
= event
->header
.size
;
863 auxtrace_index
->nr
+= 1;
868 static int auxtrace_index__do_write(int fd
,
869 struct auxtrace_index
*auxtrace_index
)
871 struct auxtrace_index_entry ent
;
874 for (i
= 0; i
< auxtrace_index
->nr
; i
++) {
875 ent
.file_offset
= auxtrace_index
->entries
[i
].file_offset
;
876 ent
.sz
= auxtrace_index
->entries
[i
].sz
;
877 if (writen(fd
, &ent
, sizeof(ent
)) != sizeof(ent
))
883 int auxtrace_index__write(int fd
, struct list_head
*head
)
885 struct auxtrace_index
*auxtrace_index
;
889 list_for_each_entry(auxtrace_index
, head
, list
)
890 total
+= auxtrace_index
->nr
;
892 if (writen(fd
, &total
, sizeof(total
)) != sizeof(total
))
895 list_for_each_entry(auxtrace_index
, head
, list
) {
896 err
= auxtrace_index__do_write(fd
, auxtrace_index
);
904 static int auxtrace_index__process_entry(int fd
, struct list_head
*head
,
907 struct auxtrace_index
*auxtrace_index
;
908 struct auxtrace_index_entry ent
;
911 if (readn(fd
, &ent
, sizeof(ent
)) != sizeof(ent
))
914 auxtrace_index
= auxtrace_index__last(head
);
918 nr
= auxtrace_index
->nr
;
920 auxtrace_index
->entries
[nr
].file_offset
=
921 bswap_64(ent
.file_offset
);
922 auxtrace_index
->entries
[nr
].sz
= bswap_64(ent
.sz
);
924 auxtrace_index
->entries
[nr
].file_offset
= ent
.file_offset
;
925 auxtrace_index
->entries
[nr
].sz
= ent
.sz
;
928 auxtrace_index
->nr
= nr
+ 1;
933 int auxtrace_index__process(int fd
, u64 size
, struct perf_session
*session
,
936 struct list_head
*head
= &session
->auxtrace_index
;
939 if (readn(fd
, &nr
, sizeof(u64
)) != sizeof(u64
))
945 if (sizeof(u64
) + nr
* sizeof(struct auxtrace_index_entry
) > size
)
951 err
= auxtrace_index__process_entry(fd
, head
, needs_swap
);
959 static int auxtrace_queues__process_index_entry(struct auxtrace_queues
*queues
,
960 struct perf_session
*session
,
961 struct auxtrace_index_entry
*ent
)
963 return auxtrace_queues__add_indexed_event(queues
, session
,
964 ent
->file_offset
, ent
->sz
);
967 int auxtrace_queues__process_index(struct auxtrace_queues
*queues
,
968 struct perf_session
*session
)
970 struct auxtrace_index
*auxtrace_index
;
971 struct auxtrace_index_entry
*ent
;
975 if (auxtrace__dont_decode(session
))
978 list_for_each_entry(auxtrace_index
, &session
->auxtrace_index
, list
) {
979 for (i
= 0; i
< auxtrace_index
->nr
; i
++) {
980 ent
= &auxtrace_index
->entries
[i
];
981 err
= auxtrace_queues__process_index_entry(queues
,
991 struct auxtrace_buffer
*auxtrace_buffer__next(struct auxtrace_queue
*queue
,
992 struct auxtrace_buffer
*buffer
)
995 if (list_is_last(&buffer
->list
, &queue
->head
))
997 return list_entry(buffer
->list
.next
, struct auxtrace_buffer
,
1000 if (list_empty(&queue
->head
))
1002 return list_entry(queue
->head
.next
, struct auxtrace_buffer
,
1007 struct auxtrace_queue
*auxtrace_queues__sample_queue(struct auxtrace_queues
*queues
,
1008 struct perf_sample
*sample
,
1009 struct perf_session
*session
)
1011 struct perf_sample_id
*sid
;
1019 sid
= perf_evlist__id2sid(session
->evlist
, id
);
1025 if (idx
>= queues
->nr_queues
)
1028 return &queues
->queue_array
[idx
];
1031 int auxtrace_queues__add_sample(struct auxtrace_queues
*queues
,
1032 struct perf_session
*session
,
1033 struct perf_sample
*sample
, u64 data_offset
,
1036 struct auxtrace_buffer buffer
= {
1038 .data_offset
= data_offset
,
1039 .reference
= reference
,
1040 .size
= sample
->aux_sample
.size
,
1042 struct perf_sample_id
*sid
;
1043 u64 id
= sample
->id
;
1049 sid
= perf_evlist__id2sid(session
->evlist
, id
);
1054 buffer
.tid
= sid
->tid
;
1055 buffer
.cpu
= sid
->cpu
;
1057 return auxtrace_queues__add_buffer(queues
, session
, idx
, &buffer
, NULL
);
1065 static int auxtrace_queue_data_cb(struct perf_session
*session
,
1066 union perf_event
*event
, u64 offset
,
1069 struct queue_data
*qd
= data
;
1070 struct perf_sample sample
;
1073 if (qd
->events
&& event
->header
.type
== PERF_RECORD_AUXTRACE
) {
1074 if (event
->header
.size
< sizeof(struct perf_record_auxtrace
))
1076 offset
+= event
->header
.size
;
1077 return session
->auxtrace
->queue_data(session
, NULL
, event
,
1081 if (!qd
->samples
|| event
->header
.type
!= PERF_RECORD_SAMPLE
)
1084 err
= perf_evlist__parse_sample(session
->evlist
, event
, &sample
);
1088 if (!sample
.aux_sample
.size
)
1091 offset
+= sample
.aux_sample
.data
- (void *)event
;
1093 return session
->auxtrace
->queue_data(session
, &sample
, NULL
, offset
);
1096 int auxtrace_queue_data(struct perf_session
*session
, bool samples
, bool events
)
1098 struct queue_data qd
= {
1103 if (auxtrace__dont_decode(session
))
1106 if (!session
->auxtrace
|| !session
->auxtrace
->queue_data
)
1109 return perf_session__peek_events(session
, session
->header
.data_offset
,
1110 session
->header
.data_size
,
1111 auxtrace_queue_data_cb
, &qd
);
1114 void *auxtrace_buffer__get_data(struct auxtrace_buffer
*buffer
, int fd
)
1116 size_t adj
= buffer
->data_offset
& (page_size
- 1);
1117 size_t size
= buffer
->size
+ adj
;
1118 off_t file_offset
= buffer
->data_offset
- adj
;
1122 return buffer
->data
;
1124 addr
= mmap(NULL
, size
, PROT_READ
, MAP_SHARED
, fd
, file_offset
);
1125 if (addr
== MAP_FAILED
)
1128 buffer
->mmap_addr
= addr
;
1129 buffer
->mmap_size
= size
;
1131 buffer
->data
= addr
+ adj
;
1133 return buffer
->data
;
1136 void auxtrace_buffer__put_data(struct auxtrace_buffer
*buffer
)
1138 if (!buffer
->data
|| !buffer
->mmap_addr
)
1140 munmap(buffer
->mmap_addr
, buffer
->mmap_size
);
1141 buffer
->mmap_addr
= NULL
;
1142 buffer
->mmap_size
= 0;
1143 buffer
->data
= NULL
;
1144 buffer
->use_data
= NULL
;
1147 void auxtrace_buffer__drop_data(struct auxtrace_buffer
*buffer
)
1149 auxtrace_buffer__put_data(buffer
);
1150 if (buffer
->data_needs_freeing
) {
1151 buffer
->data_needs_freeing
= false;
1152 zfree(&buffer
->data
);
1153 buffer
->use_data
= NULL
;
1158 void auxtrace_buffer__free(struct auxtrace_buffer
*buffer
)
1160 auxtrace_buffer__drop_data(buffer
);
1164 void auxtrace_synth_error(struct perf_record_auxtrace_error
*auxtrace_error
, int type
,
1165 int code
, int cpu
, pid_t pid
, pid_t tid
, u64 ip
,
1166 const char *msg
, u64 timestamp
)
1170 memset(auxtrace_error
, 0, sizeof(struct perf_record_auxtrace_error
));
1172 auxtrace_error
->header
.type
= PERF_RECORD_AUXTRACE_ERROR
;
1173 auxtrace_error
->type
= type
;
1174 auxtrace_error
->code
= code
;
1175 auxtrace_error
->cpu
= cpu
;
1176 auxtrace_error
->pid
= pid
;
1177 auxtrace_error
->tid
= tid
;
1178 auxtrace_error
->fmt
= 1;
1179 auxtrace_error
->ip
= ip
;
1180 auxtrace_error
->time
= timestamp
;
1181 strlcpy(auxtrace_error
->msg
, msg
, MAX_AUXTRACE_ERROR_MSG
);
1183 size
= (void *)auxtrace_error
->msg
- (void *)auxtrace_error
+
1184 strlen(auxtrace_error
->msg
) + 1;
1185 auxtrace_error
->header
.size
= PERF_ALIGN(size
, sizeof(u64
));
1188 int perf_event__synthesize_auxtrace_info(struct auxtrace_record
*itr
,
1189 struct perf_tool
*tool
,
1190 struct perf_session
*session
,
1191 perf_event__handler_t process
)
1193 union perf_event
*ev
;
1197 pr_debug2("Synthesizing auxtrace information\n");
1198 priv_size
= auxtrace_record__info_priv_size(itr
, session
->evlist
);
1199 ev
= zalloc(sizeof(struct perf_record_auxtrace_info
) + priv_size
);
1203 ev
->auxtrace_info
.header
.type
= PERF_RECORD_AUXTRACE_INFO
;
1204 ev
->auxtrace_info
.header
.size
= sizeof(struct perf_record_auxtrace_info
) +
1206 err
= auxtrace_record__info_fill(itr
, session
, &ev
->auxtrace_info
,
1211 err
= process(tool
, ev
, NULL
, NULL
);
1217 int perf_event__process_auxtrace_info(struct perf_session
*session
,
1218 union perf_event
*event
)
1220 enum auxtrace_type type
= event
->auxtrace_info
.type
;
1223 fprintf(stdout
, " type: %u\n", type
);
1226 case PERF_AUXTRACE_INTEL_PT
:
1227 return intel_pt_process_auxtrace_info(event
, session
);
1228 case PERF_AUXTRACE_INTEL_BTS
:
1229 return intel_bts_process_auxtrace_info(event
, session
);
1230 case PERF_AUXTRACE_ARM_SPE
:
1231 return arm_spe_process_auxtrace_info(event
, session
);
1232 case PERF_AUXTRACE_CS_ETM
:
1233 return cs_etm__process_auxtrace_info(event
, session
);
1234 case PERF_AUXTRACE_S390_CPUMSF
:
1235 return s390_cpumsf_process_auxtrace_info(event
, session
);
1236 case PERF_AUXTRACE_UNKNOWN
:
1242 s64
perf_event__process_auxtrace(struct perf_session
*session
,
1243 union perf_event
*event
)
1248 fprintf(stdout
, " size: %#"PRI_lx64
" offset: %#"PRI_lx64
" ref: %#"PRI_lx64
" idx: %u tid: %d cpu: %d\n",
1249 event
->auxtrace
.size
, event
->auxtrace
.offset
,
1250 event
->auxtrace
.reference
, event
->auxtrace
.idx
,
1251 event
->auxtrace
.tid
, event
->auxtrace
.cpu
);
1253 if (auxtrace__dont_decode(session
))
1254 return event
->auxtrace
.size
;
1256 if (!session
->auxtrace
|| event
->header
.type
!= PERF_RECORD_AUXTRACE
)
1259 err
= session
->auxtrace
->process_auxtrace_event(session
, event
, session
->tool
);
1263 return event
->auxtrace
.size
;
1266 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
1267 #define PERF_ITRACE_DEFAULT_PERIOD 100000
1268 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
1269 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
1270 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
1271 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
1273 void itrace_synth_opts__set_default(struct itrace_synth_opts
*synth_opts
,
1276 synth_opts
->branches
= true;
1277 synth_opts
->transactions
= true;
1278 synth_opts
->ptwrites
= true;
1279 synth_opts
->pwr_events
= true;
1280 synth_opts
->other_events
= true;
1281 synth_opts
->errors
= true;
1283 synth_opts
->period_type
= PERF_ITRACE_PERIOD_INSTRUCTIONS
;
1284 synth_opts
->period
= 1;
1285 synth_opts
->calls
= true;
1287 synth_opts
->instructions
= true;
1288 synth_opts
->period_type
= PERF_ITRACE_DEFAULT_PERIOD_TYPE
;
1289 synth_opts
->period
= PERF_ITRACE_DEFAULT_PERIOD
;
1291 synth_opts
->callchain_sz
= PERF_ITRACE_DEFAULT_CALLCHAIN_SZ
;
1292 synth_opts
->last_branch_sz
= PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ
;
1293 synth_opts
->initial_skip
= 0;
1297 * Please check tools/perf/Documentation/perf-script.txt for information
1298 * about the options parsed here, which is introduced after this cset,
1299 * when support in 'perf script' for these options is introduced.
1301 int itrace_parse_synth_opts(const struct option
*opt
, const char *str
,
1304 struct itrace_synth_opts
*synth_opts
= opt
->value
;
1307 bool period_type_set
= false;
1308 bool period_set
= false;
1310 synth_opts
->set
= true;
1313 synth_opts
->dont_decode
= true;
1318 itrace_synth_opts__set_default(synth_opts
,
1319 synth_opts
->default_no_sample
);
1323 for (p
= str
; *p
;) {
1326 synth_opts
->instructions
= true;
1327 while (*p
== ' ' || *p
== ',')
1330 synth_opts
->period
= strtoull(p
, &endptr
, 10);
1333 while (*p
== ' ' || *p
== ',')
1337 synth_opts
->period_type
=
1338 PERF_ITRACE_PERIOD_INSTRUCTIONS
;
1339 period_type_set
= true;
1342 synth_opts
->period_type
=
1343 PERF_ITRACE_PERIOD_TICKS
;
1344 period_type_set
= true;
1347 synth_opts
->period
*= 1000;
1350 synth_opts
->period
*= 1000;
1355 synth_opts
->period_type
=
1356 PERF_ITRACE_PERIOD_NANOSECS
;
1357 period_type_set
= true;
1367 synth_opts
->branches
= true;
1370 synth_opts
->transactions
= true;
1373 synth_opts
->ptwrites
= true;
1376 synth_opts
->pwr_events
= true;
1379 synth_opts
->other_events
= true;
1382 synth_opts
->errors
= true;
1385 synth_opts
->log
= true;
1388 synth_opts
->branches
= true;
1389 synth_opts
->calls
= true;
1392 synth_opts
->branches
= true;
1393 synth_opts
->returns
= true;
1396 synth_opts
->callchain
= true;
1397 synth_opts
->callchain_sz
=
1398 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ
;
1399 while (*p
== ' ' || *p
== ',')
1404 val
= strtoul(p
, &endptr
, 10);
1406 if (!val
|| val
> PERF_ITRACE_MAX_CALLCHAIN_SZ
)
1408 synth_opts
->callchain_sz
= val
;
1412 synth_opts
->last_branch
= true;
1413 synth_opts
->last_branch_sz
=
1414 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ
;
1415 while (*p
== ' ' || *p
== ',')
1420 val
= strtoul(p
, &endptr
, 10);
1423 val
> PERF_ITRACE_MAX_LAST_BRANCH_SZ
)
1425 synth_opts
->last_branch_sz
= val
;
1429 synth_opts
->initial_skip
= strtoul(p
, &endptr
, 10);
1442 if (synth_opts
->instructions
) {
1443 if (!period_type_set
)
1444 synth_opts
->period_type
=
1445 PERF_ITRACE_DEFAULT_PERIOD_TYPE
;
1447 synth_opts
->period
= PERF_ITRACE_DEFAULT_PERIOD
;
1453 pr_err("Bad Instruction Tracing options '%s'\n", str
);
1457 static const char * const auxtrace_error_type_name
[] = {
1458 [PERF_AUXTRACE_ERROR_ITRACE
] = "instruction trace",
1461 static const char *auxtrace_error_name(int type
)
1463 const char *error_type_name
= NULL
;
1465 if (type
< PERF_AUXTRACE_ERROR_MAX
)
1466 error_type_name
= auxtrace_error_type_name
[type
];
1467 if (!error_type_name
)
1468 error_type_name
= "unknown AUX";
1469 return error_type_name
;
1472 size_t perf_event__fprintf_auxtrace_error(union perf_event
*event
, FILE *fp
)
1474 struct perf_record_auxtrace_error
*e
= &event
->auxtrace_error
;
1475 unsigned long long nsecs
= e
->time
;
1476 const char *msg
= e
->msg
;
1479 ret
= fprintf(fp
, " %s error type %u",
1480 auxtrace_error_name(e
->type
), e
->type
);
1482 if (e
->fmt
&& nsecs
) {
1483 unsigned long secs
= nsecs
/ NSEC_PER_SEC
;
1485 nsecs
-= secs
* NSEC_PER_SEC
;
1486 ret
+= fprintf(fp
, " time %lu.%09llu", secs
, nsecs
);
1488 ret
+= fprintf(fp
, " time 0");
1492 msg
= (const char *)&e
->time
;
1494 ret
+= fprintf(fp
, " cpu %d pid %d tid %d ip %#"PRI_lx64
" code %u: %s\n",
1495 e
->cpu
, e
->pid
, e
->tid
, e
->ip
, e
->code
, msg
);
1499 void perf_session__auxtrace_error_inc(struct perf_session
*session
,
1500 union perf_event
*event
)
1502 struct perf_record_auxtrace_error
*e
= &event
->auxtrace_error
;
1504 if (e
->type
< PERF_AUXTRACE_ERROR_MAX
)
1505 session
->evlist
->stats
.nr_auxtrace_errors
[e
->type
] += 1;
1508 void events_stats__auxtrace_error_warn(const struct events_stats
*stats
)
1512 for (i
= 0; i
< PERF_AUXTRACE_ERROR_MAX
; i
++) {
1513 if (!stats
->nr_auxtrace_errors
[i
])
1515 ui__warning("%u %s errors\n",
1516 stats
->nr_auxtrace_errors
[i
],
1517 auxtrace_error_name(i
));
1521 int perf_event__process_auxtrace_error(struct perf_session
*session
,
1522 union perf_event
*event
)
1524 if (auxtrace__dont_decode(session
))
1527 perf_event__fprintf_auxtrace_error(event
, stdout
);
1531 static int __auxtrace_mmap__read(struct mmap
*map
,
1532 struct auxtrace_record
*itr
,
1533 struct perf_tool
*tool
, process_auxtrace_t fn
,
1534 bool snapshot
, size_t snapshot_size
)
1536 struct auxtrace_mmap
*mm
= &map
->auxtrace_mmap
;
1537 u64 head
, old
= mm
->prev
, offset
, ref
;
1538 unsigned char *data
= mm
->base
;
1539 size_t size
, head_off
, old_off
, len1
, len2
, padding
;
1540 union perf_event ev
;
1541 void *data1
, *data2
;
1544 head
= auxtrace_mmap__read_snapshot_head(mm
);
1545 if (auxtrace_record__find_snapshot(itr
, mm
->idx
, mm
, data
,
1549 head
= auxtrace_mmap__read_head(mm
);
1555 pr_debug3("auxtrace idx %d old %#"PRIx64
" head %#"PRIx64
" diff %#"PRIx64
"\n",
1556 mm
->idx
, old
, head
, head
- old
);
1559 head_off
= head
& mm
->mask
;
1560 old_off
= old
& mm
->mask
;
1562 head_off
= head
% mm
->len
;
1563 old_off
= old
% mm
->len
;
1566 if (head_off
> old_off
)
1567 size
= head_off
- old_off
;
1569 size
= mm
->len
- (old_off
- head_off
);
1571 if (snapshot
&& size
> snapshot_size
)
1572 size
= snapshot_size
;
1574 ref
= auxtrace_record__reference(itr
);
1576 if (head
> old
|| size
<= head
|| mm
->mask
) {
1577 offset
= head
- size
;
1580 * When the buffer size is not a power of 2, 'head' wraps at the
1581 * highest multiple of the buffer size, so we have to subtract
1582 * the remainder here.
1584 u64 rem
= (0ULL - mm
->len
) % mm
->len
;
1586 offset
= head
- size
- rem
;
1589 if (size
> head_off
) {
1590 len1
= size
- head_off
;
1591 data1
= &data
[mm
->len
- len1
];
1596 data1
= &data
[head_off
- len1
];
1601 if (itr
->alignment
) {
1602 unsigned int unwanted
= len1
% itr
->alignment
;
1608 /* padding must be written by fn() e.g. record__process_auxtrace() */
1609 padding
= size
& (PERF_AUXTRACE_RECORD_ALIGNMENT
- 1);
1611 padding
= PERF_AUXTRACE_RECORD_ALIGNMENT
- padding
;
1613 memset(&ev
, 0, sizeof(ev
));
1614 ev
.auxtrace
.header
.type
= PERF_RECORD_AUXTRACE
;
1615 ev
.auxtrace
.header
.size
= sizeof(ev
.auxtrace
);
1616 ev
.auxtrace
.size
= size
+ padding
;
1617 ev
.auxtrace
.offset
= offset
;
1618 ev
.auxtrace
.reference
= ref
;
1619 ev
.auxtrace
.idx
= mm
->idx
;
1620 ev
.auxtrace
.tid
= mm
->tid
;
1621 ev
.auxtrace
.cpu
= mm
->cpu
;
1623 if (fn(tool
, map
, &ev
, data1
, len1
, data2
, len2
))
1629 auxtrace_mmap__write_tail(mm
, head
);
1630 if (itr
->read_finish
) {
1633 err
= itr
->read_finish(itr
, mm
->idx
);
1642 int auxtrace_mmap__read(struct mmap
*map
, struct auxtrace_record
*itr
,
1643 struct perf_tool
*tool
, process_auxtrace_t fn
)
1645 return __auxtrace_mmap__read(map
, itr
, tool
, fn
, false, 0);
1648 int auxtrace_mmap__read_snapshot(struct mmap
*map
,
1649 struct auxtrace_record
*itr
,
1650 struct perf_tool
*tool
, process_auxtrace_t fn
,
1651 size_t snapshot_size
)
1653 return __auxtrace_mmap__read(map
, itr
, tool
, fn
, true, snapshot_size
);
1657 * struct auxtrace_cache - hash table to implement a cache
1658 * @hashtable: the hashtable
1659 * @sz: hashtable size (number of hlists)
1660 * @entry_size: size of an entry
1661 * @limit: limit the number of entries to this maximum, when reached the cache
1662 * is dropped and caching begins again with an empty cache
1663 * @cnt: current number of entries
1664 * @bits: hashtable size (@sz = 2^@bits)
1666 struct auxtrace_cache
{
1667 struct hlist_head
*hashtable
;
1675 struct auxtrace_cache
*auxtrace_cache__new(unsigned int bits
, size_t entry_size
,
1676 unsigned int limit_percent
)
1678 struct auxtrace_cache
*c
;
1679 struct hlist_head
*ht
;
1682 c
= zalloc(sizeof(struct auxtrace_cache
));
1688 ht
= calloc(sz
, sizeof(struct hlist_head
));
1692 for (i
= 0; i
< sz
; i
++)
1693 INIT_HLIST_HEAD(&ht
[i
]);
1697 c
->entry_size
= entry_size
;
1698 c
->limit
= (c
->sz
* limit_percent
) / 100;
1708 static void auxtrace_cache__drop(struct auxtrace_cache
*c
)
1710 struct auxtrace_cache_entry
*entry
;
1711 struct hlist_node
*tmp
;
1717 for (i
= 0; i
< c
->sz
; i
++) {
1718 hlist_for_each_entry_safe(entry
, tmp
, &c
->hashtable
[i
], hash
) {
1719 hlist_del(&entry
->hash
);
1720 auxtrace_cache__free_entry(c
, entry
);
1727 void auxtrace_cache__free(struct auxtrace_cache
*c
)
1732 auxtrace_cache__drop(c
);
1733 zfree(&c
->hashtable
);
1737 void *auxtrace_cache__alloc_entry(struct auxtrace_cache
*c
)
1739 return malloc(c
->entry_size
);
1742 void auxtrace_cache__free_entry(struct auxtrace_cache
*c __maybe_unused
,
1748 int auxtrace_cache__add(struct auxtrace_cache
*c
, u32 key
,
1749 struct auxtrace_cache_entry
*entry
)
1751 if (c
->limit
&& ++c
->cnt
> c
->limit
)
1752 auxtrace_cache__drop(c
);
1755 hlist_add_head(&entry
->hash
, &c
->hashtable
[hash_32(key
, c
->bits
)]);
1760 static struct auxtrace_cache_entry
*auxtrace_cache__rm(struct auxtrace_cache
*c
,
1763 struct auxtrace_cache_entry
*entry
;
1764 struct hlist_head
*hlist
;
1765 struct hlist_node
*n
;
1770 hlist
= &c
->hashtable
[hash_32(key
, c
->bits
)];
1771 hlist_for_each_entry_safe(entry
, n
, hlist
, hash
) {
1772 if (entry
->key
== key
) {
1773 hlist_del(&entry
->hash
);
1781 void auxtrace_cache__remove(struct auxtrace_cache
*c
, u32 key
)
1783 struct auxtrace_cache_entry
*entry
= auxtrace_cache__rm(c
, key
);
1785 auxtrace_cache__free_entry(c
, entry
);
1788 void *auxtrace_cache__lookup(struct auxtrace_cache
*c
, u32 key
)
1790 struct auxtrace_cache_entry
*entry
;
1791 struct hlist_head
*hlist
;
1796 hlist
= &c
->hashtable
[hash_32(key
, c
->bits
)];
1797 hlist_for_each_entry(entry
, hlist
, hash
) {
1798 if (entry
->key
== key
)
1805 static void addr_filter__free_str(struct addr_filter
*filt
)
1808 filt
->action
= NULL
;
1809 filt
->sym_from
= NULL
;
1810 filt
->sym_to
= NULL
;
1811 filt
->filename
= NULL
;
1814 static struct addr_filter
*addr_filter__new(void)
1816 struct addr_filter
*filt
= zalloc(sizeof(*filt
));
1819 INIT_LIST_HEAD(&filt
->list
);
1824 static void addr_filter__free(struct addr_filter
*filt
)
1827 addr_filter__free_str(filt
);
1831 static void addr_filters__add(struct addr_filters
*filts
,
1832 struct addr_filter
*filt
)
1834 list_add_tail(&filt
->list
, &filts
->head
);
1838 static void addr_filters__del(struct addr_filters
*filts
,
1839 struct addr_filter
*filt
)
1841 list_del_init(&filt
->list
);
1845 void addr_filters__init(struct addr_filters
*filts
)
1847 INIT_LIST_HEAD(&filts
->head
);
1851 void addr_filters__exit(struct addr_filters
*filts
)
1853 struct addr_filter
*filt
, *n
;
1855 list_for_each_entry_safe(filt
, n
, &filts
->head
, list
) {
1856 addr_filters__del(filts
, filt
);
1857 addr_filter__free(filt
);
1861 static int parse_num_or_str(char **inp
, u64
*num
, const char **str
,
1862 const char *str_delim
)
1864 *inp
+= strspn(*inp
, " ");
1866 if (isdigit(**inp
)) {
1872 *num
= strtoull(*inp
, &endptr
, 0);
1883 *inp
+= strspn(*inp
, " ");
1885 n
= strcspn(*inp
, str_delim
);
1897 static int parse_action(struct addr_filter
*filt
)
1899 if (!strcmp(filt
->action
, "filter")) {
1902 } else if (!strcmp(filt
->action
, "start")) {
1904 } else if (!strcmp(filt
->action
, "stop")) {
1905 filt
->start
= false;
1906 } else if (!strcmp(filt
->action
, "tracestop")) {
1907 filt
->start
= false;
1909 filt
->action
+= 5; /* Change 'tracestop' to 'stop' */
1916 static int parse_sym_idx(char **inp
, int *idx
)
1920 *inp
+= strspn(*inp
, " ");
1927 if (**inp
== 'g' || **inp
== 'G') {
1935 num
= strtoul(*inp
, &endptr
, 0);
1938 if (endptr
== *inp
|| num
> INT_MAX
)
1947 static int parse_addr_size(char **inp
, u64
*num
, const char **str
, int *idx
)
1949 int err
= parse_num_or_str(inp
, num
, str
, " ");
1952 err
= parse_sym_idx(inp
, idx
);
1957 static int parse_one_filter(struct addr_filter
*filt
, const char **filter_inp
)
1962 filt
->str
= fstr
= strdup(*filter_inp
);
1966 err
= parse_num_or_str(&fstr
, NULL
, &filt
->action
, " ");
1970 err
= parse_action(filt
);
1974 err
= parse_addr_size(&fstr
, &filt
->addr
, &filt
->sym_from
,
1975 &filt
->sym_from_idx
);
1979 fstr
+= strspn(fstr
, " ");
1983 err
= parse_addr_size(&fstr
, &filt
->size
, &filt
->sym_to
,
1990 fstr
+= strspn(fstr
, " ");
1994 err
= parse_num_or_str(&fstr
, NULL
, &filt
->filename
, " ,");
1999 fstr
+= strspn(fstr
, " ,");
2001 *filter_inp
+= fstr
- filt
->str
;
2006 addr_filter__free_str(filt
);
2011 int addr_filters__parse_bare_filter(struct addr_filters
*filts
,
2014 struct addr_filter
*filt
;
2015 const char *fstr
= filter
;
2019 filt
= addr_filter__new();
2020 err
= parse_one_filter(filt
, &fstr
);
2022 addr_filter__free(filt
);
2023 addr_filters__exit(filts
);
2026 addr_filters__add(filts
, filt
);
2045 static bool kern_sym_match(struct sym_args
*args
, const char *name
, char type
)
2047 /* A function with the same name, and global or the n'th found or any */
2048 return kallsyms__is_function(type
) &&
2049 !strcmp(name
, args
->name
) &&
2050 ((args
->global
&& isupper(type
)) ||
2051 (args
->selected
&& ++(args
->cnt
) == args
->idx
) ||
2052 (!args
->global
&& !args
->selected
));
2055 static int find_kern_sym_cb(void *arg
, const char *name
, char type
, u64 start
)
2057 struct sym_args
*args
= arg
;
2059 if (args
->started
) {
2061 args
->size
= start
- args
->start
;
2062 if (args
->selected
) {
2065 } else if (kern_sym_match(args
, name
, type
)) {
2066 args
->duplicate
= true;
2069 } else if (kern_sym_match(args
, name
, type
)) {
2070 args
->started
= true;
2071 args
->start
= start
;
2077 static int print_kern_sym_cb(void *arg
, const char *name
, char type
, u64 start
)
2079 struct sym_args
*args
= arg
;
2081 if (kern_sym_match(args
, name
, type
)) {
2082 pr_err("#%d\t0x%"PRIx64
"\t%c\t%s\n",
2083 ++args
->cnt
, start
, type
, name
);
2085 } else if (args
->near
) {
2087 pr_err("\t\twhich is near\t\t%s\n", name
);
2093 static int sym_not_found_error(const char *sym_name
, int idx
)
2096 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
2099 pr_err("Global symbol '%s' not found.\n", sym_name
);
2101 pr_err("Symbol '%s' not found.\n", sym_name
);
2103 pr_err("Note that symbols must be functions.\n");
2108 static int find_kern_sym(const char *sym_name
, u64
*start
, u64
*size
, int idx
)
2110 struct sym_args args
= {
2114 .selected
= idx
> 0,
2121 err
= kallsyms__parse("/proc/kallsyms", &args
, find_kern_sym_cb
);
2123 pr_err("Failed to parse /proc/kallsyms\n");
2127 if (args
.duplicate
) {
2128 pr_err("Multiple kernel symbols with name '%s'\n", sym_name
);
2130 kallsyms__parse("/proc/kallsyms", &args
, print_kern_sym_cb
);
2131 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2133 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2137 if (!args
.started
) {
2138 pr_err("Kernel symbol lookup: ");
2139 return sym_not_found_error(sym_name
, idx
);
2142 *start
= args
.start
;
2148 static int find_entire_kern_cb(void *arg
, const char *name __maybe_unused
,
2149 char type
, u64 start
)
2151 struct sym_args
*args
= arg
;
2153 if (!kallsyms__is_function(type
))
2156 if (!args
->started
) {
2157 args
->started
= true;
2158 args
->start
= start
;
2160 /* Don't know exactly where the kernel ends, so we add a page */
2161 args
->size
= round_up(start
, page_size
) + page_size
- args
->start
;
2166 static int addr_filter__entire_kernel(struct addr_filter
*filt
)
2168 struct sym_args args
= { .started
= false };
2171 err
= kallsyms__parse("/proc/kallsyms", &args
, find_entire_kern_cb
);
2172 if (err
< 0 || !args
.started
) {
2173 pr_err("Failed to parse /proc/kallsyms\n");
2177 filt
->addr
= args
.start
;
2178 filt
->size
= args
.size
;
2183 static int check_end_after_start(struct addr_filter
*filt
, u64 start
, u64 size
)
2185 if (start
+ size
>= filt
->addr
)
2188 if (filt
->sym_from
) {
2189 pr_err("Symbol '%s' (0x%"PRIx64
") comes before '%s' (0x%"PRIx64
")\n",
2190 filt
->sym_to
, start
, filt
->sym_from
, filt
->addr
);
2192 pr_err("Symbol '%s' (0x%"PRIx64
") comes before address 0x%"PRIx64
")\n",
2193 filt
->sym_to
, start
, filt
->addr
);
2199 static int addr_filter__resolve_kernel_syms(struct addr_filter
*filt
)
2201 bool no_size
= false;
2205 if (symbol_conf
.kptr_restrict
) {
2206 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
2210 if (filt
->sym_from
&& !strcmp(filt
->sym_from
, "*"))
2211 return addr_filter__entire_kernel(filt
);
2213 if (filt
->sym_from
) {
2214 err
= find_kern_sym(filt
->sym_from
, &start
, &size
,
2215 filt
->sym_from_idx
);
2219 if (filt
->range
&& !filt
->size
&& !filt
->sym_to
) {
2226 err
= find_kern_sym(filt
->sym_to
, &start
, &size
,
2231 err
= check_end_after_start(filt
, start
, size
);
2234 filt
->size
= start
+ size
- filt
->addr
;
2238 /* The very last symbol in kallsyms does not imply a particular size */
2240 pr_err("Cannot determine size of symbol '%s'\n",
2241 filt
->sym_to
? filt
->sym_to
: filt
->sym_from
);
2248 static struct dso
*load_dso(const char *name
)
2253 map
= dso__new_map(name
);
2257 if (map__load(map
) < 0)
2258 pr_err("File '%s' not found or has no symbols.\n", name
);
2260 dso
= dso__get(map
->dso
);
2267 static bool dso_sym_match(struct symbol
*sym
, const char *name
, int *cnt
,
2270 /* Same name, and global or the n'th found or any */
2271 return !arch__compare_symbol_names(name
, sym
->name
) &&
2272 ((!idx
&& sym
->binding
== STB_GLOBAL
) ||
2273 (idx
> 0 && ++*cnt
== idx
) ||
2277 static void print_duplicate_syms(struct dso
*dso
, const char *sym_name
)
2283 pr_err("Multiple symbols with name '%s'\n", sym_name
);
2285 sym
= dso__first_symbol(dso
);
2287 if (dso_sym_match(sym
, sym_name
, &cnt
, -1)) {
2288 pr_err("#%d\t0x%"PRIx64
"\t%c\t%s\n",
2290 sym
->binding
== STB_GLOBAL
? 'g' :
2291 sym
->binding
== STB_LOCAL
? 'l' : 'w',
2296 pr_err("\t\twhich is near\t\t%s\n", sym
->name
);
2298 sym
= dso__next_symbol(sym
);
2301 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2303 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2306 static int find_dso_sym(struct dso
*dso
, const char *sym_name
, u64
*start
,
2315 sym
= dso__first_symbol(dso
);
2319 *size
= sym
->start
- *start
;
2323 } else if (dso_sym_match(sym
, sym_name
, &cnt
, idx
)) {
2324 print_duplicate_syms(dso
, sym_name
);
2327 } else if (dso_sym_match(sym
, sym_name
, &cnt
, idx
)) {
2328 *start
= sym
->start
;
2329 *size
= sym
->end
- sym
->start
;
2331 sym
= dso__next_symbol(sym
);
2335 return sym_not_found_error(sym_name
, idx
);
2340 static int addr_filter__entire_dso(struct addr_filter
*filt
, struct dso
*dso
)
2342 if (dso__data_file_size(dso
, NULL
)) {
2343 pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
2349 filt
->size
= dso
->data
.file_size
;
2354 static int addr_filter__resolve_syms(struct addr_filter
*filt
)
2360 if (!filt
->sym_from
&& !filt
->sym_to
)
2363 if (!filt
->filename
)
2364 return addr_filter__resolve_kernel_syms(filt
);
2366 dso
= load_dso(filt
->filename
);
2368 pr_err("Failed to load symbols from: %s\n", filt
->filename
);
2372 if (filt
->sym_from
&& !strcmp(filt
->sym_from
, "*")) {
2373 err
= addr_filter__entire_dso(filt
, dso
);
2377 if (filt
->sym_from
) {
2378 err
= find_dso_sym(dso
, filt
->sym_from
, &start
, &size
,
2379 filt
->sym_from_idx
);
2383 if (filt
->range
&& !filt
->size
&& !filt
->sym_to
)
2388 err
= find_dso_sym(dso
, filt
->sym_to
, &start
, &size
,
2393 err
= check_end_after_start(filt
, start
, size
);
2397 filt
->size
= start
+ size
- filt
->addr
;
2406 static char *addr_filter__to_str(struct addr_filter
*filt
)
2408 char filename_buf
[PATH_MAX
];
2409 const char *at
= "";
2410 const char *fn
= "";
2414 if (filt
->filename
) {
2416 fn
= realpath(filt
->filename
, filename_buf
);
2422 err
= asprintf(&filter
, "%s 0x%"PRIx64
"/0x%"PRIx64
"%s%s",
2423 filt
->action
, filt
->addr
, filt
->size
, at
, fn
);
2425 err
= asprintf(&filter
, "%s 0x%"PRIx64
"%s%s",
2426 filt
->action
, filt
->addr
, at
, fn
);
2429 return err
< 0 ? NULL
: filter
;
2432 static int parse_addr_filter(struct evsel
*evsel
, const char *filter
,
2435 struct addr_filters filts
;
2436 struct addr_filter
*filt
;
2439 addr_filters__init(&filts
);
2441 err
= addr_filters__parse_bare_filter(&filts
, filter
);
2445 if (filts
.cnt
> max_nr
) {
2446 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2452 list_for_each_entry(filt
, &filts
.head
, list
) {
2455 err
= addr_filter__resolve_syms(filt
);
2459 new_filter
= addr_filter__to_str(filt
);
2465 if (perf_evsel__append_addr_filter(evsel
, new_filter
)) {
2472 addr_filters__exit(&filts
);
2475 pr_err("Failed to parse address filter: '%s'\n", filter
);
2476 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2477 pr_err("Where multiple filters are separated by space or comma.\n");
2483 static int perf_evsel__nr_addr_filter(struct evsel
*evsel
)
2485 struct perf_pmu
*pmu
= perf_evsel__find_pmu(evsel
);
2486 int nr_addr_filters
= 0;
2491 perf_pmu__scan_file(pmu
, "nr_addr_filters", "%d", &nr_addr_filters
);
2493 return nr_addr_filters
;
2496 int auxtrace_parse_filters(struct evlist
*evlist
)
2498 struct evsel
*evsel
;
2502 evlist__for_each_entry(evlist
, evsel
) {
2503 filter
= evsel
->filter
;
2504 max_nr
= perf_evsel__nr_addr_filter(evsel
);
2505 if (!filter
|| !max_nr
)
2507 evsel
->filter
= NULL
;
2508 err
= parse_addr_filter(evsel
, filter
, max_nr
);
2512 pr_debug("Address filter: %s\n", evsel
->filter
);
2518 int auxtrace__process_event(struct perf_session
*session
, union perf_event
*event
,
2519 struct perf_sample
*sample
, struct perf_tool
*tool
)
2521 if (!session
->auxtrace
)
2524 return session
->auxtrace
->process_event(session
, event
, sample
, tool
);
2527 void auxtrace__dump_auxtrace_sample(struct perf_session
*session
,
2528 struct perf_sample
*sample
)
2530 if (!session
->auxtrace
|| !session
->auxtrace
->dump_auxtrace_sample
||
2531 auxtrace__dont_decode(session
))
2534 session
->auxtrace
->dump_auxtrace_sample(session
, sample
);
2537 int auxtrace__flush_events(struct perf_session
*session
, struct perf_tool
*tool
)
2539 if (!session
->auxtrace
)
2542 return session
->auxtrace
->flush_events(session
, tool
);
2545 void auxtrace__free_events(struct perf_session
*session
)
2547 if (!session
->auxtrace
)
2550 return session
->auxtrace
->free_events(session
);
2553 void auxtrace__free(struct perf_session
*session
)
2555 if (!session
->auxtrace
)
2558 return session
->auxtrace
->free(session
);