1 // SPDX-License-Identifier: GPL-2.0
11 struct unwind_libunwind_ops __weak
*local_unwind_libunwind_ops
;
12 struct unwind_libunwind_ops __weak
*x86_32_unwind_libunwind_ops
;
13 struct unwind_libunwind_ops __weak
*arm64_unwind_libunwind_ops
;
15 int unwind__prepare_access(struct maps
*maps
, struct map
*map
, bool *initialized
)
18 enum dso_type dso_type
;
19 struct unwind_libunwind_ops
*ops
= local_unwind_libunwind_ops
;
20 struct dso
*dso
= map__dso(map
);
21 struct machine
*machine
;
24 if (!dwarf_callchain_users
)
27 if (maps__addr_space(maps
)) {
28 pr_debug("unwind: thread map already set, dso=%s\n", dso__name(dso
));
34 machine
= maps__machine(maps
);
35 /* env->arch is NULL for live-mode (i.e. perf top) */
36 if (!machine
->env
|| !machine
->env
->arch
)
39 dso_type
= dso__type(dso
, machine
);
40 if (dso_type
== DSO__TYPE_UNKNOWN
)
43 arch
= perf_env__arch(machine
->env
);
45 if (!strcmp(arch
, "x86")) {
46 if (dso_type
!= DSO__TYPE_64BIT
)
47 ops
= x86_32_unwind_libunwind_ops
;
48 } else if (!strcmp(arch
, "arm64") || !strcmp(arch
, "arm")) {
49 if (dso_type
== DSO__TYPE_64BIT
)
50 ops
= arm64_unwind_libunwind_ops
;
54 pr_warning_once("unwind: target platform=%s is not supported\n", arch
);
58 maps__set_unwind_libunwind_ops(maps
, ops
);
60 err
= maps__unwind_libunwind_ops(maps
)->prepare_access(maps
);
62 *initialized
= err
? false : true;
66 void unwind__flush_access(struct maps
*maps
)
68 const struct unwind_libunwind_ops
*ops
= maps__unwind_libunwind_ops(maps
);
71 ops
->flush_access(maps
);
74 void unwind__finish_access(struct maps
*maps
)
76 const struct unwind_libunwind_ops
*ops
= maps__unwind_libunwind_ops(maps
);
79 ops
->finish_access(maps
);
82 int unwind__get_entries(unwind_entry_cb_t cb
, void *arg
,
83 struct thread
*thread
,
84 struct perf_sample
*data
, int max_stack
,
87 const struct unwind_libunwind_ops
*ops
= maps__unwind_libunwind_ops(thread__maps(thread
));
90 return ops
->get_entries(cb
, arg
, thread
, data
, max_stack
, best_effort
);