1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/kernel.h>
4 #include <linux/types.h>
10 #include <sys/param.h>
12 #include "parse-events.h"
15 #include "thread_map.h"
25 #include "sane_ctype.h"
35 static unsigned int hex(char c
)
37 if (c
>= '0' && c
<= '9')
39 if (c
>= 'a' && c
<= 'f')
44 static size_t read_objdump_chunk(const char **line
, unsigned char **buf
,
47 size_t bytes_read
= 0;
48 unsigned char *chunk_start
= *buf
;
51 while (*buf_len
> 0) {
54 /* Get 2 hex digits */
62 /* Store byte and advance buf */
63 **buf
= (hex(c1
) << 4) | hex(c2
);
74 * objdump will display raw insn as LE if code endian
75 * is LE and bytes_per_chunk > 1. In that case reverse
76 * the chunk we just read.
78 * see disassemble_bytes() at binutils/objdump.c for details
79 * how objdump chooses display endian)
81 if (bytes_read
> 1 && !bigendian()) {
82 unsigned char *chunk_end
= chunk_start
+ bytes_read
- 1;
85 while (chunk_start
< chunk_end
) {
87 *chunk_start
= *chunk_end
;
97 static size_t read_objdump_line(const char *line
, unsigned char *buf
,
101 size_t ret
, bytes_read
= 0;
103 /* Skip to a colon */
104 p
= strchr(line
, ':');
109 /* Skip initial spaces */
117 ret
= read_objdump_chunk(&p
, &buf
, &buf_len
);
122 /* return number of successfully read bytes */
126 static int read_objdump_output(FILE *f
, void *buf
, size_t *len
, u64 start_addr
)
129 size_t line_len
, off_last
= 0;
132 u64 addr
, last_addr
= start_addr
;
134 while (off_last
< *len
) {
135 size_t off
, read_bytes
, written_bytes
;
136 unsigned char tmp
[BUFSZ
];
138 ret
= getline(&line
, &line_len
, f
);
142 pr_debug("getline failed\n");
147 /* read objdump data into temporary buffer */
148 read_bytes
= read_objdump_line(line
, tmp
, sizeof(tmp
));
152 if (sscanf(line
, "%"PRIx64
, &addr
) != 1)
154 if (addr
< last_addr
) {
155 pr_debug("addr going backwards, read beyond section?\n");
160 /* copy it from temporary buffer to 'buf' according
161 * to address on current objdump line */
162 off
= addr
- start_addr
;
165 written_bytes
= MIN(read_bytes
, *len
- off
);
166 memcpy(buf
+ off
, tmp
, written_bytes
);
167 off_last
= off
+ written_bytes
;
170 /* len returns number of bytes that could not be read */
178 static int read_via_objdump(const char *filename
, u64 addr
, void *buf
,
181 char cmd
[PATH_MAX
* 2];
186 fmt
= "%s -z -d --start-address=0x%"PRIx64
" --stop-address=0x%"PRIx64
" %s";
187 ret
= snprintf(cmd
, sizeof(cmd
), fmt
, "objdump", addr
, addr
+ len
,
189 if (ret
<= 0 || (size_t)ret
>= sizeof(cmd
))
192 pr_debug("Objdump command is: %s\n", cmd
);
194 /* Ignore objdump errors */
195 strcat(cmd
, " 2>/dev/null");
199 pr_debug("popen failed\n");
203 ret
= read_objdump_output(f
, buf
, &len
, addr
);
205 pr_debug("objdump read too few bytes: %zd\n", len
);
215 static void dump_buf(unsigned char *buf
, size_t len
)
219 for (i
= 0; i
< len
; i
++) {
220 pr_debug("0x%02x ", buf
[i
]);
227 static int read_object_code(u64 addr
, size_t len
, u8 cpumode
,
228 struct thread
*thread
, struct state
*state
)
230 struct addr_location al
;
231 unsigned char buf1
[BUFSZ
];
232 unsigned char buf2
[BUFSZ
];
235 const char *objdump_name
;
236 char decomp_name
[KMOD_DECOMP_LEN
];
240 pr_debug("Reading object code for memory address: %#"PRIx64
"\n", addr
);
242 if (!thread__find_map(thread
, cpumode
, addr
, &al
) || !al
.map
->dso
) {
243 if (cpumode
== PERF_RECORD_MISC_HYPERVISOR
) {
244 pr_debug("Hypervisor address can not be resolved - skipping\n");
248 pr_debug("thread__find_map failed\n");
252 pr_debug("File is: %s\n", al
.map
->dso
->long_name
);
254 if (al
.map
->dso
->symtab_type
== DSO_BINARY_TYPE__KALLSYMS
&&
255 !dso__is_kcore(al
.map
->dso
)) {
256 pr_debug("Unexpected kernel address - skipping\n");
260 pr_debug("On file address is: %#"PRIx64
"\n", al
.addr
);
265 /* Do not go off the map */
266 if (addr
+ len
> al
.map
->end
)
267 len
= al
.map
->end
- addr
;
269 /* Read the object code using perf */
270 ret_len
= dso__data_read_offset(al
.map
->dso
, thread
->mg
->machine
,
272 if (ret_len
!= len
) {
273 pr_debug("dso__data_read_offset failed\n");
278 * Converting addresses for use by objdump requires more information.
279 * map__load() does that. See map__rip_2objdump() for details.
281 if (map__load(al
.map
))
284 /* objdump struggles with kcore - try each map only once */
285 if (dso__is_kcore(al
.map
->dso
)) {
288 for (d
= 0; d
< state
->done_cnt
; d
++) {
289 if (state
->done
[d
] == al
.map
->start
) {
290 pr_debug("kcore map tested already");
291 pr_debug(" - skipping\n");
295 if (state
->done_cnt
>= ARRAY_SIZE(state
->done
)) {
296 pr_debug("Too many kcore maps - skipping\n");
299 state
->done
[state
->done_cnt
++] = al
.map
->start
;
302 objdump_name
= al
.map
->dso
->long_name
;
303 if (dso__needs_decompress(al
.map
->dso
)) {
304 if (dso__decompress_kmodule_path(al
.map
->dso
, objdump_name
,
306 sizeof(decomp_name
)) < 0) {
307 pr_debug("decompression failed\n");
312 objdump_name
= decomp_name
;
315 /* Read the object code using objdump */
316 objdump_addr
= map__rip_2objdump(al
.map
, al
.addr
);
317 ret
= read_via_objdump(objdump_name
, objdump_addr
, buf2
, len
);
320 unlink(objdump_name
);
324 * The kernel maps are inaccurate - assume objdump is right in
327 if (cpumode
== PERF_RECORD_MISC_KERNEL
||
328 cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) {
331 pr_debug("Reducing len to %zu\n", len
);
332 } else if (dso__is_kcore(al
.map
->dso
)) {
334 * objdump cannot handle very large segments
335 * that may be found in kcore.
337 pr_debug("objdump failed for kcore");
338 pr_debug(" - skipping\n");
346 pr_debug("read_via_objdump failed\n");
350 /* The results should be identical */
351 if (memcmp(buf1
, buf2
, len
)) {
352 pr_debug("Bytes read differ from those read by objdump\n");
353 pr_debug("buf1 (dso):\n");
355 pr_debug("buf2 (objdump):\n");
359 pr_debug("Bytes read match those read by objdump\n");
364 static int process_sample_event(struct machine
*machine
,
365 struct perf_evlist
*evlist
,
366 union perf_event
*event
, struct state
*state
)
368 struct perf_sample sample
;
369 struct thread
*thread
;
372 if (perf_evlist__parse_sample(evlist
, event
, &sample
)) {
373 pr_debug("perf_evlist__parse_sample failed\n");
377 thread
= machine__findnew_thread(machine
, sample
.pid
, sample
.tid
);
379 pr_debug("machine__findnew_thread failed\n");
383 ret
= read_object_code(sample
.ip
, READLEN
, sample
.cpumode
, thread
, state
);
388 static int process_event(struct machine
*machine
, struct perf_evlist
*evlist
,
389 union perf_event
*event
, struct state
*state
)
391 if (event
->header
.type
== PERF_RECORD_SAMPLE
)
392 return process_sample_event(machine
, evlist
, event
, state
);
394 if (event
->header
.type
== PERF_RECORD_THROTTLE
||
395 event
->header
.type
== PERF_RECORD_UNTHROTTLE
)
398 if (event
->header
.type
< PERF_RECORD_MAX
) {
401 ret
= machine__process_event(machine
, event
, NULL
);
403 pr_debug("machine__process_event failed, event type %u\n",
411 static int process_events(struct machine
*machine
, struct perf_evlist
*evlist
,
414 union perf_event
*event
;
415 struct perf_mmap
*md
;
418 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
419 md
= &evlist
->mmap
[i
];
420 if (perf_mmap__read_init(md
) < 0)
423 while ((event
= perf_mmap__read_event(md
)) != NULL
) {
424 ret
= process_event(machine
, evlist
, event
, state
);
425 perf_mmap__consume(md
);
429 perf_mmap__read_done(md
);
434 static int comp(const void *a
, const void *b
)
436 return *(int *)a
- *(int *)b
;
439 static void do_sort_something(void)
443 for (i
= 0; i
< (int)ARRAY_SIZE(buf
); i
++)
444 buf
[i
] = ARRAY_SIZE(buf
) - i
- 1;
446 qsort(buf
, ARRAY_SIZE(buf
), sizeof(int), comp
);
448 for (i
= 0; i
< (int)ARRAY_SIZE(buf
); i
++) {
450 pr_debug("qsort failed\n");
456 static void sort_something(void)
460 for (i
= 0; i
< 10; i
++)
464 static void syscall_something(void)
469 for (i
= 0; i
< 1000; i
++) {
470 if (pipe(pipefd
) < 0) {
471 pr_debug("pipe failed\n");
479 static void fs_something(void)
481 const char *test_file_name
= "temp-perf-code-reading-test-file--";
485 for (i
= 0; i
< 1000; i
++) {
486 f
= fopen(test_file_name
, "w+");
489 unlink(test_file_name
);
494 static const char *do_determine_event(bool excl_kernel
)
496 const char *event
= excl_kernel
? "cycles:u" : "cycles";
499 char cpuid
[128], model
[16], model_c
[16], cpum_cf_v
[16];
503 if (get_cpuid(cpuid
, sizeof(cpuid
)))
505 ret
= sscanf(cpuid
, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family
, model_c
,
506 model
, cpum_cf_v
, &cpum_cf_a
);
507 if (ret
!= 5) /* Not available */
509 if (excl_kernel
&& (cpum_cf_a
& 4))
511 if (!excl_kernel
&& (cpum_cf_a
& 2))
514 /* Fall through: missing authorization */
516 event
= excl_kernel
? "cpu-clock:u" : "cpu-clock";
522 static void do_something(void)
532 TEST_CODE_READING_OK
,
533 TEST_CODE_READING_NO_VMLINUX
,
534 TEST_CODE_READING_NO_KCORE
,
535 TEST_CODE_READING_NO_ACCESS
,
536 TEST_CODE_READING_NO_KERNEL_OBJ
,
539 static int do_test_code_reading(bool try_kcore
)
541 struct machine
*machine
;
542 struct thread
*thread
;
543 struct record_opts opts
= {
544 .mmap_pages
= UINT_MAX
,
545 .user_freq
= UINT_MAX
,
546 .user_interval
= ULLONG_MAX
,
552 struct state state
= {
555 struct thread_map
*threads
= NULL
;
556 struct cpu_map
*cpus
= NULL
;
557 struct perf_evlist
*evlist
= NULL
;
558 struct perf_evsel
*evsel
= NULL
;
562 bool have_vmlinux
, have_kcore
, excl_kernel
= false;
566 machine
= machine__new_host();
567 machine
->env
= &perf_env
;
569 ret
= machine__create_kernel_maps(machine
);
571 pr_debug("machine__create_kernel_maps failed\n");
575 /* Force the use of kallsyms instead of vmlinux to try kcore */
577 symbol_conf
.kallsyms_name
= "/proc/kallsyms";
579 /* Load kernel map */
580 map
= machine__kernel_map(machine
);
581 ret
= map__load(map
);
583 pr_debug("map__load failed\n");
586 have_vmlinux
= dso__is_vmlinux(map
->dso
);
587 have_kcore
= dso__is_kcore(map
->dso
);
589 /* 2nd time through we just try kcore */
590 if (try_kcore
&& !have_kcore
)
591 return TEST_CODE_READING_NO_KCORE
;
593 /* No point getting kernel events if there is no kernel object */
594 if (!have_vmlinux
&& !have_kcore
)
597 threads
= thread_map__new_by_tid(pid
);
599 pr_debug("thread_map__new_by_tid failed\n");
603 ret
= perf_event__synthesize_thread_map(NULL
, threads
,
604 perf_event__process
, machine
, false);
606 pr_debug("perf_event__synthesize_thread_map failed\n");
610 thread
= machine__findnew_thread(machine
, pid
, pid
);
612 pr_debug("machine__findnew_thread failed\n");
616 cpus
= cpu_map__new(NULL
);
618 pr_debug("cpu_map__new failed\n");
625 evlist
= perf_evlist__new();
627 pr_debug("perf_evlist__new failed\n");
631 perf_evlist__set_maps(evlist
, cpus
, threads
);
633 str
= do_determine_event(excl_kernel
);
634 pr_debug("Parsing event '%s'\n", str
);
635 ret
= parse_events(evlist
, str
, NULL
);
637 pr_debug("parse_events failed\n");
641 perf_evlist__config(evlist
, &opts
, NULL
);
643 evsel
= perf_evlist__first(evlist
);
645 evsel
->attr
.comm
= 1;
646 evsel
->attr
.disabled
= 1;
647 evsel
->attr
.enable_on_exec
= 0;
649 ret
= perf_evlist__open(evlist
);
654 * Both cpus and threads are now owned by evlist
655 * and will be freed by following perf_evlist__set_maps
656 * call. Getting refference to keep them alive.
659 thread_map__get(threads
);
660 perf_evlist__set_maps(evlist
, NULL
, NULL
);
661 perf_evlist__delete(evlist
);
668 perf_evlist__strerror_open(evlist
, errno
, errbuf
, sizeof(errbuf
));
669 pr_debug("perf_evlist__open() failed!\n%s\n", errbuf
);
677 ret
= perf_evlist__mmap(evlist
, UINT_MAX
);
679 pr_debug("perf_evlist__mmap failed\n");
683 perf_evlist__enable(evlist
);
687 perf_evlist__disable(evlist
);
689 ret
= process_events(machine
, evlist
, &state
);
693 if (!have_vmlinux
&& !have_kcore
&& !try_kcore
)
694 err
= TEST_CODE_READING_NO_KERNEL_OBJ
;
695 else if (!have_vmlinux
&& !try_kcore
)
696 err
= TEST_CODE_READING_NO_VMLINUX
;
697 else if (excl_kernel
)
698 err
= TEST_CODE_READING_NO_ACCESS
;
700 err
= TEST_CODE_READING_OK
;
706 perf_evlist__delete(evlist
);
709 thread_map__put(threads
);
711 machine__delete_threads(machine
);
712 machine__delete(machine
);
717 int test__code_reading(struct test
*test __maybe_unused
, int subtest __maybe_unused
)
721 ret
= do_test_code_reading(false);
723 ret
= do_test_code_reading(true);
726 case TEST_CODE_READING_OK
:
728 case TEST_CODE_READING_NO_VMLINUX
:
729 pr_debug("no vmlinux\n");
731 case TEST_CODE_READING_NO_KCORE
:
732 pr_debug("no kcore\n");
734 case TEST_CODE_READING_NO_ACCESS
:
735 pr_debug("no access\n");
737 case TEST_CODE_READING_NO_KERNEL_OBJ
:
738 pr_debug("no kernel obj\n");