2 * auxtrace.c: AUX area trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <sys/types.h>
24 #include <linux/kernel.h>
25 #include <linux/perf_event.h>
26 #include <linux/types.h>
27 #include <linux/bitops.h>
28 #include <linux/log2.h>
29 #include <linux/string.h>
31 #include <sys/param.h>
34 #include <linux/list.h>
44 #include "thread_map.h"
48 #include <linux/hash.h>
53 #include <subcmd/parse-options.h>
57 #include "intel-bts.h"
59 #include "s390-cpumsf.h"
61 #include "sane_ctype.h"
62 #include "symbol/kallsyms.h"
64 static bool auxtrace__dont_decode(struct perf_session
*session
)
66 return !session
->itrace_synth_opts
||
67 session
->itrace_synth_opts
->dont_decode
;
70 int auxtrace_mmap__mmap(struct auxtrace_mmap
*mm
,
71 struct auxtrace_mmap_params
*mp
,
74 struct perf_event_mmap_page
*pc
= userpg
;
76 WARN_ONCE(mm
->base
, "Uninitialized auxtrace_mmap\n");
91 #if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
92 pr_err("Cannot use AUX area tracing mmaps\n");
96 pc
->aux_offset
= mp
->offset
;
97 pc
->aux_size
= mp
->len
;
99 mm
->base
= mmap(NULL
, mp
->len
, mp
->prot
, MAP_SHARED
, fd
, mp
->offset
);
100 if (mm
->base
== MAP_FAILED
) {
101 pr_debug2("failed to mmap AUX area\n");
109 void auxtrace_mmap__munmap(struct auxtrace_mmap
*mm
)
112 munmap(mm
->base
, mm
->len
);
117 void auxtrace_mmap_params__init(struct auxtrace_mmap_params
*mp
,
118 off_t auxtrace_offset
,
119 unsigned int auxtrace_pages
,
120 bool auxtrace_overwrite
)
122 if (auxtrace_pages
) {
123 mp
->offset
= auxtrace_offset
;
124 mp
->len
= auxtrace_pages
* (size_t)page_size
;
125 mp
->mask
= is_power_of_2(mp
->len
) ? mp
->len
- 1 : 0;
126 mp
->prot
= PROT_READ
| (auxtrace_overwrite
? 0 : PROT_WRITE
);
127 pr_debug2("AUX area mmap length %zu\n", mp
->len
);
133 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params
*mp
,
134 struct perf_evlist
*evlist
, int idx
,
140 mp
->cpu
= evlist
->cpus
->map
[idx
];
142 mp
->tid
= thread_map__pid(evlist
->threads
, 0);
147 mp
->tid
= thread_map__pid(evlist
->threads
, idx
);
151 #define AUXTRACE_INIT_NR_QUEUES 32
153 static struct auxtrace_queue
*auxtrace_alloc_queue_array(unsigned int nr_queues
)
155 struct auxtrace_queue
*queue_array
;
156 unsigned int max_nr_queues
, i
;
158 max_nr_queues
= UINT_MAX
/ sizeof(struct auxtrace_queue
);
159 if (nr_queues
> max_nr_queues
)
162 queue_array
= calloc(nr_queues
, sizeof(struct auxtrace_queue
));
166 for (i
= 0; i
< nr_queues
; i
++) {
167 INIT_LIST_HEAD(&queue_array
[i
].head
);
168 queue_array
[i
].priv
= NULL
;
174 int auxtrace_queues__init(struct auxtrace_queues
*queues
)
176 queues
->nr_queues
= AUXTRACE_INIT_NR_QUEUES
;
177 queues
->queue_array
= auxtrace_alloc_queue_array(queues
->nr_queues
);
178 if (!queues
->queue_array
)
183 static int auxtrace_queues__grow(struct auxtrace_queues
*queues
,
184 unsigned int new_nr_queues
)
186 unsigned int nr_queues
= queues
->nr_queues
;
187 struct auxtrace_queue
*queue_array
;
191 nr_queues
= AUXTRACE_INIT_NR_QUEUES
;
193 while (nr_queues
&& nr_queues
< new_nr_queues
)
196 if (nr_queues
< queues
->nr_queues
|| nr_queues
< new_nr_queues
)
199 queue_array
= auxtrace_alloc_queue_array(nr_queues
);
203 for (i
= 0; i
< queues
->nr_queues
; i
++) {
204 list_splice_tail(&queues
->queue_array
[i
].head
,
205 &queue_array
[i
].head
);
206 queue_array
[i
].tid
= queues
->queue_array
[i
].tid
;
207 queue_array
[i
].cpu
= queues
->queue_array
[i
].cpu
;
208 queue_array
[i
].set
= queues
->queue_array
[i
].set
;
209 queue_array
[i
].priv
= queues
->queue_array
[i
].priv
;
212 queues
->nr_queues
= nr_queues
;
213 queues
->queue_array
= queue_array
;
218 static void *auxtrace_copy_data(u64 size
, struct perf_session
*session
)
220 int fd
= perf_data__fd(session
->data
);
224 if (size
> SSIZE_MAX
)
231 ret
= readn(fd
, p
, size
);
232 if (ret
!= (ssize_t
)size
) {
240 static int auxtrace_queues__queue_buffer(struct auxtrace_queues
*queues
,
242 struct auxtrace_buffer
*buffer
)
244 struct auxtrace_queue
*queue
;
247 if (idx
>= queues
->nr_queues
) {
248 err
= auxtrace_queues__grow(queues
, idx
+ 1);
253 queue
= &queues
->queue_array
[idx
];
257 queue
->tid
= buffer
->tid
;
258 queue
->cpu
= buffer
->cpu
;
259 } else if (buffer
->cpu
!= queue
->cpu
|| buffer
->tid
!= queue
->tid
) {
260 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
261 queue
->cpu
, queue
->tid
, buffer
->cpu
, buffer
->tid
);
265 buffer
->buffer_nr
= queues
->next_buffer_nr
++;
267 list_add_tail(&buffer
->list
, &queue
->head
);
269 queues
->new_data
= true;
270 queues
->populated
= true;
275 /* Limit buffers to 32MiB on 32-bit */
276 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
278 static int auxtrace_queues__split_buffer(struct auxtrace_queues
*queues
,
280 struct auxtrace_buffer
*buffer
)
282 u64 sz
= buffer
->size
;
283 bool consecutive
= false;
284 struct auxtrace_buffer
*b
;
287 while (sz
> BUFFER_LIMIT_FOR_32_BIT
) {
288 b
= memdup(buffer
, sizeof(struct auxtrace_buffer
));
291 b
->size
= BUFFER_LIMIT_FOR_32_BIT
;
292 b
->consecutive
= consecutive
;
293 err
= auxtrace_queues__queue_buffer(queues
, idx
, b
);
295 auxtrace_buffer__free(b
);
298 buffer
->data_offset
+= BUFFER_LIMIT_FOR_32_BIT
;
299 sz
-= BUFFER_LIMIT_FOR_32_BIT
;
304 buffer
->consecutive
= consecutive
;
309 static bool filter_cpu(struct perf_session
*session
, int cpu
)
311 unsigned long *cpu_bitmap
= session
->itrace_synth_opts
->cpu_bitmap
;
313 return cpu_bitmap
&& cpu
!= -1 && !test_bit(cpu
, cpu_bitmap
);
316 static int auxtrace_queues__add_buffer(struct auxtrace_queues
*queues
,
317 struct perf_session
*session
,
319 struct auxtrace_buffer
*buffer
,
320 struct auxtrace_buffer
**buffer_ptr
)
324 if (filter_cpu(session
, buffer
->cpu
))
327 buffer
= memdup(buffer
, sizeof(*buffer
));
331 if (session
->one_mmap
) {
332 buffer
->data
= buffer
->data_offset
- session
->one_mmap_offset
+
333 session
->one_mmap_addr
;
334 } else if (perf_data__is_pipe(session
->data
)) {
335 buffer
->data
= auxtrace_copy_data(buffer
->size
, session
);
338 buffer
->data_needs_freeing
= true;
339 } else if (BITS_PER_LONG
== 32 &&
340 buffer
->size
> BUFFER_LIMIT_FOR_32_BIT
) {
341 err
= auxtrace_queues__split_buffer(queues
, idx
, buffer
);
346 err
= auxtrace_queues__queue_buffer(queues
, idx
, buffer
);
350 /* FIXME: Doesn't work for split buffer */
352 *buffer_ptr
= buffer
;
357 auxtrace_buffer__free(buffer
);
361 int auxtrace_queues__add_event(struct auxtrace_queues
*queues
,
362 struct perf_session
*session
,
363 union perf_event
*event
, off_t data_offset
,
364 struct auxtrace_buffer
**buffer_ptr
)
366 struct auxtrace_buffer buffer
= {
368 .tid
= event
->auxtrace
.tid
,
369 .cpu
= event
->auxtrace
.cpu
,
370 .data_offset
= data_offset
,
371 .offset
= event
->auxtrace
.offset
,
372 .reference
= event
->auxtrace
.reference
,
373 .size
= event
->auxtrace
.size
,
375 unsigned int idx
= event
->auxtrace
.idx
;
377 return auxtrace_queues__add_buffer(queues
, session
, idx
, &buffer
,
381 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues
*queues
,
382 struct perf_session
*session
,
383 off_t file_offset
, size_t sz
)
385 union perf_event
*event
;
387 char buf
[PERF_SAMPLE_MAX_SIZE
];
389 err
= perf_session__peek_event(session
, file_offset
, buf
,
390 PERF_SAMPLE_MAX_SIZE
, &event
, NULL
);
394 if (event
->header
.type
== PERF_RECORD_AUXTRACE
) {
395 if (event
->header
.size
< sizeof(struct auxtrace_event
) ||
396 event
->header
.size
!= sz
) {
400 file_offset
+= event
->header
.size
;
401 err
= auxtrace_queues__add_event(queues
, session
, event
,
408 void auxtrace_queues__free(struct auxtrace_queues
*queues
)
412 for (i
= 0; i
< queues
->nr_queues
; i
++) {
413 while (!list_empty(&queues
->queue_array
[i
].head
)) {
414 struct auxtrace_buffer
*buffer
;
416 buffer
= list_entry(queues
->queue_array
[i
].head
.next
,
417 struct auxtrace_buffer
, list
);
418 list_del(&buffer
->list
);
419 auxtrace_buffer__free(buffer
);
423 zfree(&queues
->queue_array
);
424 queues
->nr_queues
= 0;
427 static void auxtrace_heapify(struct auxtrace_heap_item
*heap_array
,
428 unsigned int pos
, unsigned int queue_nr
,
434 parent
= (pos
- 1) >> 1;
435 if (heap_array
[parent
].ordinal
<= ordinal
)
437 heap_array
[pos
] = heap_array
[parent
];
440 heap_array
[pos
].queue_nr
= queue_nr
;
441 heap_array
[pos
].ordinal
= ordinal
;
444 int auxtrace_heap__add(struct auxtrace_heap
*heap
, unsigned int queue_nr
,
447 struct auxtrace_heap_item
*heap_array
;
449 if (queue_nr
>= heap
->heap_sz
) {
450 unsigned int heap_sz
= AUXTRACE_INIT_NR_QUEUES
;
452 while (heap_sz
<= queue_nr
)
454 heap_array
= realloc(heap
->heap_array
,
455 heap_sz
* sizeof(struct auxtrace_heap_item
));
458 heap
->heap_array
= heap_array
;
459 heap
->heap_sz
= heap_sz
;
462 auxtrace_heapify(heap
->heap_array
, heap
->heap_cnt
++, queue_nr
, ordinal
);
467 void auxtrace_heap__free(struct auxtrace_heap
*heap
)
469 zfree(&heap
->heap_array
);
474 void auxtrace_heap__pop(struct auxtrace_heap
*heap
)
476 unsigned int pos
, last
, heap_cnt
= heap
->heap_cnt
;
477 struct auxtrace_heap_item
*heap_array
;
484 heap_array
= heap
->heap_array
;
488 unsigned int left
, right
;
490 left
= (pos
<< 1) + 1;
491 if (left
>= heap_cnt
)
494 if (right
>= heap_cnt
) {
495 heap_array
[pos
] = heap_array
[left
];
498 if (heap_array
[left
].ordinal
< heap_array
[right
].ordinal
) {
499 heap_array
[pos
] = heap_array
[left
];
502 heap_array
[pos
] = heap_array
[right
];
508 auxtrace_heapify(heap_array
, pos
, heap_array
[last
].queue_nr
,
509 heap_array
[last
].ordinal
);
512 size_t auxtrace_record__info_priv_size(struct auxtrace_record
*itr
,
513 struct perf_evlist
*evlist
)
516 return itr
->info_priv_size(itr
, evlist
);
520 static int auxtrace_not_supported(void)
522 pr_err("AUX area tracing is not supported on this architecture\n");
526 int auxtrace_record__info_fill(struct auxtrace_record
*itr
,
527 struct perf_session
*session
,
528 struct auxtrace_info_event
*auxtrace_info
,
532 return itr
->info_fill(itr
, session
, auxtrace_info
, priv_size
);
533 return auxtrace_not_supported();
536 void auxtrace_record__free(struct auxtrace_record
*itr
)
542 int auxtrace_record__snapshot_start(struct auxtrace_record
*itr
)
544 if (itr
&& itr
->snapshot_start
)
545 return itr
->snapshot_start(itr
);
549 int auxtrace_record__snapshot_finish(struct auxtrace_record
*itr
)
551 if (itr
&& itr
->snapshot_finish
)
552 return itr
->snapshot_finish(itr
);
556 int auxtrace_record__find_snapshot(struct auxtrace_record
*itr
, int idx
,
557 struct auxtrace_mmap
*mm
,
558 unsigned char *data
, u64
*head
, u64
*old
)
560 if (itr
&& itr
->find_snapshot
)
561 return itr
->find_snapshot(itr
, idx
, mm
, data
, head
, old
);
565 int auxtrace_record__options(struct auxtrace_record
*itr
,
566 struct perf_evlist
*evlist
,
567 struct record_opts
*opts
)
570 return itr
->recording_options(itr
, evlist
, opts
);
574 u64
auxtrace_record__reference(struct auxtrace_record
*itr
)
577 return itr
->reference(itr
);
581 int auxtrace_parse_snapshot_options(struct auxtrace_record
*itr
,
582 struct record_opts
*opts
, const char *str
)
588 return itr
->parse_snapshot_options(itr
, opts
, str
);
590 pr_err("No AUX area tracing to snapshot\n");
594 struct auxtrace_record
*__weak
595 auxtrace_record__init(struct perf_evlist
*evlist __maybe_unused
, int *err
)
601 static int auxtrace_index__alloc(struct list_head
*head
)
603 struct auxtrace_index
*auxtrace_index
;
605 auxtrace_index
= malloc(sizeof(struct auxtrace_index
));
609 auxtrace_index
->nr
= 0;
610 INIT_LIST_HEAD(&auxtrace_index
->list
);
612 list_add_tail(&auxtrace_index
->list
, head
);
617 void auxtrace_index__free(struct list_head
*head
)
619 struct auxtrace_index
*auxtrace_index
, *n
;
621 list_for_each_entry_safe(auxtrace_index
, n
, head
, list
) {
622 list_del(&auxtrace_index
->list
);
623 free(auxtrace_index
);
627 static struct auxtrace_index
*auxtrace_index__last(struct list_head
*head
)
629 struct auxtrace_index
*auxtrace_index
;
632 if (list_empty(head
)) {
633 err
= auxtrace_index__alloc(head
);
638 auxtrace_index
= list_entry(head
->prev
, struct auxtrace_index
, list
);
640 if (auxtrace_index
->nr
>= PERF_AUXTRACE_INDEX_ENTRY_COUNT
) {
641 err
= auxtrace_index__alloc(head
);
644 auxtrace_index
= list_entry(head
->prev
, struct auxtrace_index
,
648 return auxtrace_index
;
651 int auxtrace_index__auxtrace_event(struct list_head
*head
,
652 union perf_event
*event
, off_t file_offset
)
654 struct auxtrace_index
*auxtrace_index
;
657 auxtrace_index
= auxtrace_index__last(head
);
661 nr
= auxtrace_index
->nr
;
662 auxtrace_index
->entries
[nr
].file_offset
= file_offset
;
663 auxtrace_index
->entries
[nr
].sz
= event
->header
.size
;
664 auxtrace_index
->nr
+= 1;
669 static int auxtrace_index__do_write(int fd
,
670 struct auxtrace_index
*auxtrace_index
)
672 struct auxtrace_index_entry ent
;
675 for (i
= 0; i
< auxtrace_index
->nr
; i
++) {
676 ent
.file_offset
= auxtrace_index
->entries
[i
].file_offset
;
677 ent
.sz
= auxtrace_index
->entries
[i
].sz
;
678 if (writen(fd
, &ent
, sizeof(ent
)) != sizeof(ent
))
684 int auxtrace_index__write(int fd
, struct list_head
*head
)
686 struct auxtrace_index
*auxtrace_index
;
690 list_for_each_entry(auxtrace_index
, head
, list
)
691 total
+= auxtrace_index
->nr
;
693 if (writen(fd
, &total
, sizeof(total
)) != sizeof(total
))
696 list_for_each_entry(auxtrace_index
, head
, list
) {
697 err
= auxtrace_index__do_write(fd
, auxtrace_index
);
705 static int auxtrace_index__process_entry(int fd
, struct list_head
*head
,
708 struct auxtrace_index
*auxtrace_index
;
709 struct auxtrace_index_entry ent
;
712 if (readn(fd
, &ent
, sizeof(ent
)) != sizeof(ent
))
715 auxtrace_index
= auxtrace_index__last(head
);
719 nr
= auxtrace_index
->nr
;
721 auxtrace_index
->entries
[nr
].file_offset
=
722 bswap_64(ent
.file_offset
);
723 auxtrace_index
->entries
[nr
].sz
= bswap_64(ent
.sz
);
725 auxtrace_index
->entries
[nr
].file_offset
= ent
.file_offset
;
726 auxtrace_index
->entries
[nr
].sz
= ent
.sz
;
729 auxtrace_index
->nr
= nr
+ 1;
734 int auxtrace_index__process(int fd
, u64 size
, struct perf_session
*session
,
737 struct list_head
*head
= &session
->auxtrace_index
;
740 if (readn(fd
, &nr
, sizeof(u64
)) != sizeof(u64
))
746 if (sizeof(u64
) + nr
* sizeof(struct auxtrace_index_entry
) > size
)
752 err
= auxtrace_index__process_entry(fd
, head
, needs_swap
);
760 static int auxtrace_queues__process_index_entry(struct auxtrace_queues
*queues
,
761 struct perf_session
*session
,
762 struct auxtrace_index_entry
*ent
)
764 return auxtrace_queues__add_indexed_event(queues
, session
,
765 ent
->file_offset
, ent
->sz
);
768 int auxtrace_queues__process_index(struct auxtrace_queues
*queues
,
769 struct perf_session
*session
)
771 struct auxtrace_index
*auxtrace_index
;
772 struct auxtrace_index_entry
*ent
;
776 if (auxtrace__dont_decode(session
))
779 list_for_each_entry(auxtrace_index
, &session
->auxtrace_index
, list
) {
780 for (i
= 0; i
< auxtrace_index
->nr
; i
++) {
781 ent
= &auxtrace_index
->entries
[i
];
782 err
= auxtrace_queues__process_index_entry(queues
,
792 struct auxtrace_buffer
*auxtrace_buffer__next(struct auxtrace_queue
*queue
,
793 struct auxtrace_buffer
*buffer
)
796 if (list_is_last(&buffer
->list
, &queue
->head
))
798 return list_entry(buffer
->list
.next
, struct auxtrace_buffer
,
801 if (list_empty(&queue
->head
))
803 return list_entry(queue
->head
.next
, struct auxtrace_buffer
,
808 void *auxtrace_buffer__get_data(struct auxtrace_buffer
*buffer
, int fd
)
810 size_t adj
= buffer
->data_offset
& (page_size
- 1);
811 size_t size
= buffer
->size
+ adj
;
812 off_t file_offset
= buffer
->data_offset
- adj
;
818 addr
= mmap(NULL
, size
, PROT_READ
, MAP_SHARED
, fd
, file_offset
);
819 if (addr
== MAP_FAILED
)
822 buffer
->mmap_addr
= addr
;
823 buffer
->mmap_size
= size
;
825 buffer
->data
= addr
+ adj
;
830 void auxtrace_buffer__put_data(struct auxtrace_buffer
*buffer
)
832 if (!buffer
->data
|| !buffer
->mmap_addr
)
834 munmap(buffer
->mmap_addr
, buffer
->mmap_size
);
835 buffer
->mmap_addr
= NULL
;
836 buffer
->mmap_size
= 0;
838 buffer
->use_data
= NULL
;
841 void auxtrace_buffer__drop_data(struct auxtrace_buffer
*buffer
)
843 auxtrace_buffer__put_data(buffer
);
844 if (buffer
->data_needs_freeing
) {
845 buffer
->data_needs_freeing
= false;
846 zfree(&buffer
->data
);
847 buffer
->use_data
= NULL
;
852 void auxtrace_buffer__free(struct auxtrace_buffer
*buffer
)
854 auxtrace_buffer__drop_data(buffer
);
858 void auxtrace_synth_error(struct auxtrace_error_event
*auxtrace_error
, int type
,
859 int code
, int cpu
, pid_t pid
, pid_t tid
, u64 ip
,
864 memset(auxtrace_error
, 0, sizeof(struct auxtrace_error_event
));
866 auxtrace_error
->header
.type
= PERF_RECORD_AUXTRACE_ERROR
;
867 auxtrace_error
->type
= type
;
868 auxtrace_error
->code
= code
;
869 auxtrace_error
->cpu
= cpu
;
870 auxtrace_error
->pid
= pid
;
871 auxtrace_error
->tid
= tid
;
872 auxtrace_error
->ip
= ip
;
873 strlcpy(auxtrace_error
->msg
, msg
, MAX_AUXTRACE_ERROR_MSG
);
875 size
= (void *)auxtrace_error
->msg
- (void *)auxtrace_error
+
876 strlen(auxtrace_error
->msg
) + 1;
877 auxtrace_error
->header
.size
= PERF_ALIGN(size
, sizeof(u64
));
880 int perf_event__synthesize_auxtrace_info(struct auxtrace_record
*itr
,
881 struct perf_tool
*tool
,
882 struct perf_session
*session
,
883 perf_event__handler_t process
)
885 union perf_event
*ev
;
889 pr_debug2("Synthesizing auxtrace information\n");
890 priv_size
= auxtrace_record__info_priv_size(itr
, session
->evlist
);
891 ev
= zalloc(sizeof(struct auxtrace_info_event
) + priv_size
);
895 ev
->auxtrace_info
.header
.type
= PERF_RECORD_AUXTRACE_INFO
;
896 ev
->auxtrace_info
.header
.size
= sizeof(struct auxtrace_info_event
) +
898 err
= auxtrace_record__info_fill(itr
, session
, &ev
->auxtrace_info
,
903 err
= process(tool
, ev
, NULL
, NULL
);
909 int perf_event__process_auxtrace_info(struct perf_session
*session
,
910 union perf_event
*event
)
912 enum auxtrace_type type
= event
->auxtrace_info
.type
;
915 fprintf(stdout
, " type: %u\n", type
);
918 case PERF_AUXTRACE_INTEL_PT
:
919 return intel_pt_process_auxtrace_info(event
, session
);
920 case PERF_AUXTRACE_INTEL_BTS
:
921 return intel_bts_process_auxtrace_info(event
, session
);
922 case PERF_AUXTRACE_ARM_SPE
:
923 return arm_spe_process_auxtrace_info(event
, session
);
924 case PERF_AUXTRACE_CS_ETM
:
925 return cs_etm__process_auxtrace_info(event
, session
);
926 case PERF_AUXTRACE_S390_CPUMSF
:
927 return s390_cpumsf_process_auxtrace_info(event
, session
);
928 case PERF_AUXTRACE_UNKNOWN
:
934 s64
perf_event__process_auxtrace(struct perf_session
*session
,
935 union perf_event
*event
)
940 fprintf(stdout
, " size: %#"PRIx64
" offset: %#"PRIx64
" ref: %#"PRIx64
" idx: %u tid: %d cpu: %d\n",
941 event
->auxtrace
.size
, event
->auxtrace
.offset
,
942 event
->auxtrace
.reference
, event
->auxtrace
.idx
,
943 event
->auxtrace
.tid
, event
->auxtrace
.cpu
);
945 if (auxtrace__dont_decode(session
))
946 return event
->auxtrace
.size
;
948 if (!session
->auxtrace
|| event
->header
.type
!= PERF_RECORD_AUXTRACE
)
951 err
= session
->auxtrace
->process_auxtrace_event(session
, event
, session
->tool
);
955 return event
->auxtrace
.size
;
958 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
959 #define PERF_ITRACE_DEFAULT_PERIOD 100000
960 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
961 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
962 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
963 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
965 void itrace_synth_opts__set_default(struct itrace_synth_opts
*synth_opts
,
968 synth_opts
->branches
= true;
969 synth_opts
->transactions
= true;
970 synth_opts
->ptwrites
= true;
971 synth_opts
->pwr_events
= true;
972 synth_opts
->errors
= true;
974 synth_opts
->period_type
= PERF_ITRACE_PERIOD_INSTRUCTIONS
;
975 synth_opts
->period
= 1;
976 synth_opts
->calls
= true;
978 synth_opts
->instructions
= true;
979 synth_opts
->period_type
= PERF_ITRACE_DEFAULT_PERIOD_TYPE
;
980 synth_opts
->period
= PERF_ITRACE_DEFAULT_PERIOD
;
982 synth_opts
->callchain_sz
= PERF_ITRACE_DEFAULT_CALLCHAIN_SZ
;
983 synth_opts
->last_branch_sz
= PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ
;
984 synth_opts
->initial_skip
= 0;
988 * Please check tools/perf/Documentation/perf-script.txt for information
989 * about the options parsed here, which is introduced after this cset,
990 * when support in 'perf script' for these options is introduced.
992 int itrace_parse_synth_opts(const struct option
*opt
, const char *str
,
995 struct itrace_synth_opts
*synth_opts
= opt
->value
;
998 bool period_type_set
= false;
999 bool period_set
= false;
1001 synth_opts
->set
= true;
1004 synth_opts
->dont_decode
= true;
1009 itrace_synth_opts__set_default(synth_opts
, false);
1013 for (p
= str
; *p
;) {
1016 synth_opts
->instructions
= true;
1017 while (*p
== ' ' || *p
== ',')
1020 synth_opts
->period
= strtoull(p
, &endptr
, 10);
1023 while (*p
== ' ' || *p
== ',')
1027 synth_opts
->period_type
=
1028 PERF_ITRACE_PERIOD_INSTRUCTIONS
;
1029 period_type_set
= true;
1032 synth_opts
->period_type
=
1033 PERF_ITRACE_PERIOD_TICKS
;
1034 period_type_set
= true;
1037 synth_opts
->period
*= 1000;
1040 synth_opts
->period
*= 1000;
1045 synth_opts
->period_type
=
1046 PERF_ITRACE_PERIOD_NANOSECS
;
1047 period_type_set
= true;
1057 synth_opts
->branches
= true;
1060 synth_opts
->transactions
= true;
1063 synth_opts
->ptwrites
= true;
1066 synth_opts
->pwr_events
= true;
1069 synth_opts
->errors
= true;
1072 synth_opts
->log
= true;
1075 synth_opts
->branches
= true;
1076 synth_opts
->calls
= true;
1079 synth_opts
->branches
= true;
1080 synth_opts
->returns
= true;
1083 synth_opts
->callchain
= true;
1084 synth_opts
->callchain_sz
=
1085 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ
;
1086 while (*p
== ' ' || *p
== ',')
1091 val
= strtoul(p
, &endptr
, 10);
1093 if (!val
|| val
> PERF_ITRACE_MAX_CALLCHAIN_SZ
)
1095 synth_opts
->callchain_sz
= val
;
1099 synth_opts
->last_branch
= true;
1100 synth_opts
->last_branch_sz
=
1101 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ
;
1102 while (*p
== ' ' || *p
== ',')
1107 val
= strtoul(p
, &endptr
, 10);
1110 val
> PERF_ITRACE_MAX_LAST_BRANCH_SZ
)
1112 synth_opts
->last_branch_sz
= val
;
1116 synth_opts
->initial_skip
= strtoul(p
, &endptr
, 10);
1129 if (synth_opts
->instructions
) {
1130 if (!period_type_set
)
1131 synth_opts
->period_type
=
1132 PERF_ITRACE_DEFAULT_PERIOD_TYPE
;
1134 synth_opts
->period
= PERF_ITRACE_DEFAULT_PERIOD
;
1140 pr_err("Bad Instruction Tracing options '%s'\n", str
);
1144 static const char * const auxtrace_error_type_name
[] = {
1145 [PERF_AUXTRACE_ERROR_ITRACE
] = "instruction trace",
1148 static const char *auxtrace_error_name(int type
)
1150 const char *error_type_name
= NULL
;
1152 if (type
< PERF_AUXTRACE_ERROR_MAX
)
1153 error_type_name
= auxtrace_error_type_name
[type
];
1154 if (!error_type_name
)
1155 error_type_name
= "unknown AUX";
1156 return error_type_name
;
1159 size_t perf_event__fprintf_auxtrace_error(union perf_event
*event
, FILE *fp
)
1161 struct auxtrace_error_event
*e
= &event
->auxtrace_error
;
1164 ret
= fprintf(fp
, " %s error type %u",
1165 auxtrace_error_name(e
->type
), e
->type
);
1166 ret
+= fprintf(fp
, " cpu %d pid %d tid %d ip %#"PRIx64
" code %u: %s\n",
1167 e
->cpu
, e
->pid
, e
->tid
, e
->ip
, e
->code
, e
->msg
);
1171 void perf_session__auxtrace_error_inc(struct perf_session
*session
,
1172 union perf_event
*event
)
1174 struct auxtrace_error_event
*e
= &event
->auxtrace_error
;
1176 if (e
->type
< PERF_AUXTRACE_ERROR_MAX
)
1177 session
->evlist
->stats
.nr_auxtrace_errors
[e
->type
] += 1;
1180 void events_stats__auxtrace_error_warn(const struct events_stats
*stats
)
1184 for (i
= 0; i
< PERF_AUXTRACE_ERROR_MAX
; i
++) {
1185 if (!stats
->nr_auxtrace_errors
[i
])
1187 ui__warning("%u %s errors\n",
1188 stats
->nr_auxtrace_errors
[i
],
1189 auxtrace_error_name(i
));
1193 int perf_event__process_auxtrace_error(struct perf_session
*session
,
1194 union perf_event
*event
)
1196 if (auxtrace__dont_decode(session
))
1199 perf_event__fprintf_auxtrace_error(event
, stdout
);
1203 static int __auxtrace_mmap__read(struct perf_mmap
*map
,
1204 struct auxtrace_record
*itr
,
1205 struct perf_tool
*tool
, process_auxtrace_t fn
,
1206 bool snapshot
, size_t snapshot_size
)
1208 struct auxtrace_mmap
*mm
= &map
->auxtrace_mmap
;
1209 u64 head
, old
= mm
->prev
, offset
, ref
;
1210 unsigned char *data
= mm
->base
;
1211 size_t size
, head_off
, old_off
, len1
, len2
, padding
;
1212 union perf_event ev
;
1213 void *data1
, *data2
;
1216 head
= auxtrace_mmap__read_snapshot_head(mm
);
1217 if (auxtrace_record__find_snapshot(itr
, mm
->idx
, mm
, data
,
1221 head
= auxtrace_mmap__read_head(mm
);
1227 pr_debug3("auxtrace idx %d old %#"PRIx64
" head %#"PRIx64
" diff %#"PRIx64
"\n",
1228 mm
->idx
, old
, head
, head
- old
);
1231 head_off
= head
& mm
->mask
;
1232 old_off
= old
& mm
->mask
;
1234 head_off
= head
% mm
->len
;
1235 old_off
= old
% mm
->len
;
1238 if (head_off
> old_off
)
1239 size
= head_off
- old_off
;
1241 size
= mm
->len
- (old_off
- head_off
);
1243 if (snapshot
&& size
> snapshot_size
)
1244 size
= snapshot_size
;
1246 ref
= auxtrace_record__reference(itr
);
1248 if (head
> old
|| size
<= head
|| mm
->mask
) {
1249 offset
= head
- size
;
1252 * When the buffer size is not a power of 2, 'head' wraps at the
1253 * highest multiple of the buffer size, so we have to subtract
1254 * the remainder here.
1256 u64 rem
= (0ULL - mm
->len
) % mm
->len
;
1258 offset
= head
- size
- rem
;
1261 if (size
> head_off
) {
1262 len1
= size
- head_off
;
1263 data1
= &data
[mm
->len
- len1
];
1268 data1
= &data
[head_off
- len1
];
1273 if (itr
->alignment
) {
1274 unsigned int unwanted
= len1
% itr
->alignment
;
1280 /* padding must be written by fn() e.g. record__process_auxtrace() */
1283 padding
= 8 - padding
;
1285 memset(&ev
, 0, sizeof(ev
));
1286 ev
.auxtrace
.header
.type
= PERF_RECORD_AUXTRACE
;
1287 ev
.auxtrace
.header
.size
= sizeof(ev
.auxtrace
);
1288 ev
.auxtrace
.size
= size
+ padding
;
1289 ev
.auxtrace
.offset
= offset
;
1290 ev
.auxtrace
.reference
= ref
;
1291 ev
.auxtrace
.idx
= mm
->idx
;
1292 ev
.auxtrace
.tid
= mm
->tid
;
1293 ev
.auxtrace
.cpu
= mm
->cpu
;
1295 if (fn(tool
, map
, &ev
, data1
, len1
, data2
, len2
))
1301 auxtrace_mmap__write_tail(mm
, head
);
1302 if (itr
->read_finish
) {
1305 err
= itr
->read_finish(itr
, mm
->idx
);
1314 int auxtrace_mmap__read(struct perf_mmap
*map
, struct auxtrace_record
*itr
,
1315 struct perf_tool
*tool
, process_auxtrace_t fn
)
1317 return __auxtrace_mmap__read(map
, itr
, tool
, fn
, false, 0);
1320 int auxtrace_mmap__read_snapshot(struct perf_mmap
*map
,
1321 struct auxtrace_record
*itr
,
1322 struct perf_tool
*tool
, process_auxtrace_t fn
,
1323 size_t snapshot_size
)
1325 return __auxtrace_mmap__read(map
, itr
, tool
, fn
, true, snapshot_size
);
1329 * struct auxtrace_cache - hash table to implement a cache
1330 * @hashtable: the hashtable
1331 * @sz: hashtable size (number of hlists)
1332 * @entry_size: size of an entry
1333 * @limit: limit the number of entries to this maximum, when reached the cache
1334 * is dropped and caching begins again with an empty cache
1335 * @cnt: current number of entries
1336 * @bits: hashtable size (@sz = 2^@bits)
1338 struct auxtrace_cache
{
1339 struct hlist_head
*hashtable
;
1347 struct auxtrace_cache
*auxtrace_cache__new(unsigned int bits
, size_t entry_size
,
1348 unsigned int limit_percent
)
1350 struct auxtrace_cache
*c
;
1351 struct hlist_head
*ht
;
1354 c
= zalloc(sizeof(struct auxtrace_cache
));
1360 ht
= calloc(sz
, sizeof(struct hlist_head
));
1364 for (i
= 0; i
< sz
; i
++)
1365 INIT_HLIST_HEAD(&ht
[i
]);
1369 c
->entry_size
= entry_size
;
1370 c
->limit
= (c
->sz
* limit_percent
) / 100;
1380 static void auxtrace_cache__drop(struct auxtrace_cache
*c
)
1382 struct auxtrace_cache_entry
*entry
;
1383 struct hlist_node
*tmp
;
1389 for (i
= 0; i
< c
->sz
; i
++) {
1390 hlist_for_each_entry_safe(entry
, tmp
, &c
->hashtable
[i
], hash
) {
1391 hlist_del(&entry
->hash
);
1392 auxtrace_cache__free_entry(c
, entry
);
1399 void auxtrace_cache__free(struct auxtrace_cache
*c
)
1404 auxtrace_cache__drop(c
);
1409 void *auxtrace_cache__alloc_entry(struct auxtrace_cache
*c
)
1411 return malloc(c
->entry_size
);
1414 void auxtrace_cache__free_entry(struct auxtrace_cache
*c __maybe_unused
,
1420 int auxtrace_cache__add(struct auxtrace_cache
*c
, u32 key
,
1421 struct auxtrace_cache_entry
*entry
)
1423 if (c
->limit
&& ++c
->cnt
> c
->limit
)
1424 auxtrace_cache__drop(c
);
1427 hlist_add_head(&entry
->hash
, &c
->hashtable
[hash_32(key
, c
->bits
)]);
1432 void *auxtrace_cache__lookup(struct auxtrace_cache
*c
, u32 key
)
1434 struct auxtrace_cache_entry
*entry
;
1435 struct hlist_head
*hlist
;
1440 hlist
= &c
->hashtable
[hash_32(key
, c
->bits
)];
1441 hlist_for_each_entry(entry
, hlist
, hash
) {
1442 if (entry
->key
== key
)
1449 static void addr_filter__free_str(struct addr_filter
*filt
)
1452 filt
->action
= NULL
;
1453 filt
->sym_from
= NULL
;
1454 filt
->sym_to
= NULL
;
1455 filt
->filename
= NULL
;
1459 static struct addr_filter
*addr_filter__new(void)
1461 struct addr_filter
*filt
= zalloc(sizeof(*filt
));
1464 INIT_LIST_HEAD(&filt
->list
);
1469 static void addr_filter__free(struct addr_filter
*filt
)
1472 addr_filter__free_str(filt
);
1476 static void addr_filters__add(struct addr_filters
*filts
,
1477 struct addr_filter
*filt
)
1479 list_add_tail(&filt
->list
, &filts
->head
);
1483 static void addr_filters__del(struct addr_filters
*filts
,
1484 struct addr_filter
*filt
)
1486 list_del_init(&filt
->list
);
1490 void addr_filters__init(struct addr_filters
*filts
)
1492 INIT_LIST_HEAD(&filts
->head
);
1496 void addr_filters__exit(struct addr_filters
*filts
)
1498 struct addr_filter
*filt
, *n
;
1500 list_for_each_entry_safe(filt
, n
, &filts
->head
, list
) {
1501 addr_filters__del(filts
, filt
);
1502 addr_filter__free(filt
);
1506 static int parse_num_or_str(char **inp
, u64
*num
, const char **str
,
1507 const char *str_delim
)
1509 *inp
+= strspn(*inp
, " ");
1511 if (isdigit(**inp
)) {
1517 *num
= strtoull(*inp
, &endptr
, 0);
1528 *inp
+= strspn(*inp
, " ");
1530 n
= strcspn(*inp
, str_delim
);
1542 static int parse_action(struct addr_filter
*filt
)
1544 if (!strcmp(filt
->action
, "filter")) {
1547 } else if (!strcmp(filt
->action
, "start")) {
1549 } else if (!strcmp(filt
->action
, "stop")) {
1550 filt
->start
= false;
1551 } else if (!strcmp(filt
->action
, "tracestop")) {
1552 filt
->start
= false;
1554 filt
->action
+= 5; /* Change 'tracestop' to 'stop' */
1561 static int parse_sym_idx(char **inp
, int *idx
)
1565 *inp
+= strspn(*inp
, " ");
1572 if (**inp
== 'g' || **inp
== 'G') {
1580 num
= strtoul(*inp
, &endptr
, 0);
1583 if (endptr
== *inp
|| num
> INT_MAX
)
1592 static int parse_addr_size(char **inp
, u64
*num
, const char **str
, int *idx
)
1594 int err
= parse_num_or_str(inp
, num
, str
, " ");
1597 err
= parse_sym_idx(inp
, idx
);
1602 static int parse_one_filter(struct addr_filter
*filt
, const char **filter_inp
)
1607 filt
->str
= fstr
= strdup(*filter_inp
);
1611 err
= parse_num_or_str(&fstr
, NULL
, &filt
->action
, " ");
1615 err
= parse_action(filt
);
1619 err
= parse_addr_size(&fstr
, &filt
->addr
, &filt
->sym_from
,
1620 &filt
->sym_from_idx
);
1624 fstr
+= strspn(fstr
, " ");
1628 err
= parse_addr_size(&fstr
, &filt
->size
, &filt
->sym_to
,
1635 fstr
+= strspn(fstr
, " ");
1639 err
= parse_num_or_str(&fstr
, NULL
, &filt
->filename
, " ,");
1644 fstr
+= strspn(fstr
, " ,");
1646 *filter_inp
+= fstr
- filt
->str
;
1651 addr_filter__free_str(filt
);
1656 int addr_filters__parse_bare_filter(struct addr_filters
*filts
,
1659 struct addr_filter
*filt
;
1660 const char *fstr
= filter
;
1664 filt
= addr_filter__new();
1665 err
= parse_one_filter(filt
, &fstr
);
1667 addr_filter__free(filt
);
1668 addr_filters__exit(filts
);
1671 addr_filters__add(filts
, filt
);
1690 static bool kern_sym_match(struct sym_args
*args
, const char *name
, char type
)
1692 /* A function with the same name, and global or the n'th found or any */
1693 return kallsyms__is_function(type
) &&
1694 !strcmp(name
, args
->name
) &&
1695 ((args
->global
&& isupper(type
)) ||
1696 (args
->selected
&& ++(args
->cnt
) == args
->idx
) ||
1697 (!args
->global
&& !args
->selected
));
1700 static int find_kern_sym_cb(void *arg
, const char *name
, char type
, u64 start
)
1702 struct sym_args
*args
= arg
;
1704 if (args
->started
) {
1706 args
->size
= start
- args
->start
;
1707 if (args
->selected
) {
1710 } else if (kern_sym_match(args
, name
, type
)) {
1711 args
->duplicate
= true;
1714 } else if (kern_sym_match(args
, name
, type
)) {
1715 args
->started
= true;
1716 args
->start
= start
;
1722 static int print_kern_sym_cb(void *arg
, const char *name
, char type
, u64 start
)
1724 struct sym_args
*args
= arg
;
1726 if (kern_sym_match(args
, name
, type
)) {
1727 pr_err("#%d\t0x%"PRIx64
"\t%c\t%s\n",
1728 ++args
->cnt
, start
, type
, name
);
1730 } else if (args
->near
) {
1732 pr_err("\t\twhich is near\t\t%s\n", name
);
1738 static int sym_not_found_error(const char *sym_name
, int idx
)
1741 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
1744 pr_err("Global symbol '%s' not found.\n", sym_name
);
1746 pr_err("Symbol '%s' not found.\n", sym_name
);
1748 pr_err("Note that symbols must be functions.\n");
1753 static int find_kern_sym(const char *sym_name
, u64
*start
, u64
*size
, int idx
)
1755 struct sym_args args
= {
1759 .selected
= idx
> 0,
1766 err
= kallsyms__parse("/proc/kallsyms", &args
, find_kern_sym_cb
);
1768 pr_err("Failed to parse /proc/kallsyms\n");
1772 if (args
.duplicate
) {
1773 pr_err("Multiple kernel symbols with name '%s'\n", sym_name
);
1775 kallsyms__parse("/proc/kallsyms", &args
, print_kern_sym_cb
);
1776 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
1778 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
1782 if (!args
.started
) {
1783 pr_err("Kernel symbol lookup: ");
1784 return sym_not_found_error(sym_name
, idx
);
1787 *start
= args
.start
;
1793 static int find_entire_kern_cb(void *arg
, const char *name __maybe_unused
,
1794 char type
, u64 start
)
1796 struct sym_args
*args
= arg
;
1798 if (!kallsyms__is_function(type
))
1801 if (!args
->started
) {
1802 args
->started
= true;
1803 args
->start
= start
;
1805 /* Don't know exactly where the kernel ends, so we add a page */
1806 args
->size
= round_up(start
, page_size
) + page_size
- args
->start
;
1811 static int addr_filter__entire_kernel(struct addr_filter
*filt
)
1813 struct sym_args args
= { .started
= false };
1816 err
= kallsyms__parse("/proc/kallsyms", &args
, find_entire_kern_cb
);
1817 if (err
< 0 || !args
.started
) {
1818 pr_err("Failed to parse /proc/kallsyms\n");
1822 filt
->addr
= args
.start
;
1823 filt
->size
= args
.size
;
1828 static int check_end_after_start(struct addr_filter
*filt
, u64 start
, u64 size
)
1830 if (start
+ size
>= filt
->addr
)
1833 if (filt
->sym_from
) {
1834 pr_err("Symbol '%s' (0x%"PRIx64
") comes before '%s' (0x%"PRIx64
")\n",
1835 filt
->sym_to
, start
, filt
->sym_from
, filt
->addr
);
1837 pr_err("Symbol '%s' (0x%"PRIx64
") comes before address 0x%"PRIx64
")\n",
1838 filt
->sym_to
, start
, filt
->addr
);
1844 static int addr_filter__resolve_kernel_syms(struct addr_filter
*filt
)
1846 bool no_size
= false;
1850 if (symbol_conf
.kptr_restrict
) {
1851 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
1855 if (filt
->sym_from
&& !strcmp(filt
->sym_from
, "*"))
1856 return addr_filter__entire_kernel(filt
);
1858 if (filt
->sym_from
) {
1859 err
= find_kern_sym(filt
->sym_from
, &start
, &size
,
1860 filt
->sym_from_idx
);
1864 if (filt
->range
&& !filt
->size
&& !filt
->sym_to
) {
1871 err
= find_kern_sym(filt
->sym_to
, &start
, &size
,
1876 err
= check_end_after_start(filt
, start
, size
);
1879 filt
->size
= start
+ size
- filt
->addr
;
1883 /* The very last symbol in kallsyms does not imply a particular size */
1885 pr_err("Cannot determine size of symbol '%s'\n",
1886 filt
->sym_to
? filt
->sym_to
: filt
->sym_from
);
1893 static struct dso
*load_dso(const char *name
)
1898 map
= dso__new_map(name
);
1904 dso
= dso__get(map
->dso
);
1911 static bool dso_sym_match(struct symbol
*sym
, const char *name
, int *cnt
,
1914 /* Same name, and global or the n'th found or any */
1915 return !arch__compare_symbol_names(name
, sym
->name
) &&
1916 ((!idx
&& sym
->binding
== STB_GLOBAL
) ||
1917 (idx
> 0 && ++*cnt
== idx
) ||
1921 static void print_duplicate_syms(struct dso
*dso
, const char *sym_name
)
1927 pr_err("Multiple symbols with name '%s'\n", sym_name
);
1929 sym
= dso__first_symbol(dso
);
1931 if (dso_sym_match(sym
, sym_name
, &cnt
, -1)) {
1932 pr_err("#%d\t0x%"PRIx64
"\t%c\t%s\n",
1934 sym
->binding
== STB_GLOBAL
? 'g' :
1935 sym
->binding
== STB_LOCAL
? 'l' : 'w',
1940 pr_err("\t\twhich is near\t\t%s\n", sym
->name
);
1942 sym
= dso__next_symbol(sym
);
1945 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
1947 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
1950 static int find_dso_sym(struct dso
*dso
, const char *sym_name
, u64
*start
,
1959 sym
= dso__first_symbol(dso
);
1963 *size
= sym
->start
- *start
;
1967 } else if (dso_sym_match(sym
, sym_name
, &cnt
, idx
)) {
1968 print_duplicate_syms(dso
, sym_name
);
1971 } else if (dso_sym_match(sym
, sym_name
, &cnt
, idx
)) {
1972 *start
= sym
->start
;
1973 *size
= sym
->end
- sym
->start
;
1975 sym
= dso__next_symbol(sym
);
1979 return sym_not_found_error(sym_name
, idx
);
1984 static int addr_filter__entire_dso(struct addr_filter
*filt
, struct dso
*dso
)
1986 struct symbol
*first_sym
= dso__first_symbol(dso
);
1987 struct symbol
*last_sym
= dso__last_symbol(dso
);
1989 if (!first_sym
|| !last_sym
) {
1990 pr_err("Failed to determine filter for %s\nNo symbols found.\n",
1995 filt
->addr
= first_sym
->start
;
1996 filt
->size
= last_sym
->end
- first_sym
->start
;
2001 static int addr_filter__resolve_syms(struct addr_filter
*filt
)
2007 if (!filt
->sym_from
&& !filt
->sym_to
)
2010 if (!filt
->filename
)
2011 return addr_filter__resolve_kernel_syms(filt
);
2013 dso
= load_dso(filt
->filename
);
2015 pr_err("Failed to load symbols from: %s\n", filt
->filename
);
2019 if (filt
->sym_from
&& !strcmp(filt
->sym_from
, "*")) {
2020 err
= addr_filter__entire_dso(filt
, dso
);
2024 if (filt
->sym_from
) {
2025 err
= find_dso_sym(dso
, filt
->sym_from
, &start
, &size
,
2026 filt
->sym_from_idx
);
2030 if (filt
->range
&& !filt
->size
&& !filt
->sym_to
)
2035 err
= find_dso_sym(dso
, filt
->sym_to
, &start
, &size
,
2040 err
= check_end_after_start(filt
, start
, size
);
2044 filt
->size
= start
+ size
- filt
->addr
;
2053 static char *addr_filter__to_str(struct addr_filter
*filt
)
2055 char filename_buf
[PATH_MAX
];
2056 const char *at
= "";
2057 const char *fn
= "";
2061 if (filt
->filename
) {
2063 fn
= realpath(filt
->filename
, filename_buf
);
2069 err
= asprintf(&filter
, "%s 0x%"PRIx64
"/0x%"PRIx64
"%s%s",
2070 filt
->action
, filt
->addr
, filt
->size
, at
, fn
);
2072 err
= asprintf(&filter
, "%s 0x%"PRIx64
"%s%s",
2073 filt
->action
, filt
->addr
, at
, fn
);
2076 return err
< 0 ? NULL
: filter
;
2079 static int parse_addr_filter(struct perf_evsel
*evsel
, const char *filter
,
2082 struct addr_filters filts
;
2083 struct addr_filter
*filt
;
2086 addr_filters__init(&filts
);
2088 err
= addr_filters__parse_bare_filter(&filts
, filter
);
2092 if (filts
.cnt
> max_nr
) {
2093 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2099 list_for_each_entry(filt
, &filts
.head
, list
) {
2102 err
= addr_filter__resolve_syms(filt
);
2106 new_filter
= addr_filter__to_str(filt
);
2112 if (perf_evsel__append_addr_filter(evsel
, new_filter
)) {
2119 addr_filters__exit(&filts
);
2122 pr_err("Failed to parse address filter: '%s'\n", filter
);
2123 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2124 pr_err("Where multiple filters are separated by space or comma.\n");
2130 static struct perf_pmu
*perf_evsel__find_pmu(struct perf_evsel
*evsel
)
2132 struct perf_pmu
*pmu
= NULL
;
2134 while ((pmu
= perf_pmu__scan(pmu
)) != NULL
) {
2135 if (pmu
->type
== evsel
->attr
.type
)
2142 static int perf_evsel__nr_addr_filter(struct perf_evsel
*evsel
)
2144 struct perf_pmu
*pmu
= perf_evsel__find_pmu(evsel
);
2145 int nr_addr_filters
= 0;
2150 perf_pmu__scan_file(pmu
, "nr_addr_filters", "%d", &nr_addr_filters
);
2152 return nr_addr_filters
;
2155 int auxtrace_parse_filters(struct perf_evlist
*evlist
)
2157 struct perf_evsel
*evsel
;
2161 evlist__for_each_entry(evlist
, evsel
) {
2162 filter
= evsel
->filter
;
2163 max_nr
= perf_evsel__nr_addr_filter(evsel
);
2164 if (!filter
|| !max_nr
)
2166 evsel
->filter
= NULL
;
2167 err
= parse_addr_filter(evsel
, filter
, max_nr
);
2171 pr_debug("Address filter: %s\n", evsel
->filter
);