1 #include <linux/compiler.h>
2 #include <linux/rbtree.h>
11 static int vmlinux_matches_kallsyms_filter(struct map
*map __maybe_unused
,
14 bool *visited
= symbol__priv(sym
);
19 #define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
21 int test__vmlinux_matches_kallsyms(void)
26 struct map
*kallsyms_map
, *vmlinux_map
;
27 struct machine kallsyms
, vmlinux
;
28 enum map_type type
= MAP__FUNCTION
;
29 struct ref_reloc_sym ref_reloc_sym
= { .name
= "_stext", };
30 u64 mem_start
, mem_end
;
35 * Init the machines that will hold kernel, modules obtained from
36 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
38 machine__init(&kallsyms
, "", HOST_KERNEL_ID
);
39 machine__init(&vmlinux
, "", HOST_KERNEL_ID
);
44 * Create the kernel maps for kallsyms and the DSO where we will then
45 * load /proc/kallsyms. Also create the modules maps from /proc/modules
46 * and find the .ko files that match them in /lib/modules/`uname -r`/.
48 if (machine__create_kernel_maps(&kallsyms
) < 0) {
49 pr_debug("machine__create_kernel_maps ");
56 * Load and split /proc/kallsyms into multiple maps, one per module.
58 if (machine__load_kallsyms(&kallsyms
, "/proc/kallsyms", type
, NULL
) <= 0) {
59 pr_debug("dso__load_kallsyms ");
66 * kallsyms will be internally on demand sorted by name so that we can
67 * find the reference relocation * symbol, i.e. the symbol we will use
68 * to see if the running kernel was relocated by checking if it has the
69 * same value in the vmlinux file we load.
71 kallsyms_map
= machine__kernel_map(&kallsyms
, type
);
73 sym
= map__find_symbol_by_name(kallsyms_map
, ref_reloc_sym
.name
, NULL
);
75 pr_debug("dso__find_symbol_by_name ");
79 ref_reloc_sym
.addr
= UM(sym
->start
);
84 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
86 if (machine__create_kernel_maps(&vmlinux
) < 0) {
87 pr_debug("machine__create_kernel_maps ");
91 vmlinux_map
= machine__kernel_map(&vmlinux
, type
);
92 map__kmap(vmlinux_map
)->ref_reloc_sym
= &ref_reloc_sym
;
97 * Locate a vmlinux file in the vmlinux path that has a buildid that
98 * matches the one of the running kernel.
100 * While doing that look if we find the ref reloc symbol, if we find it
101 * we'll have its ref_reloc_symbol.unrelocated_addr and then
102 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
103 * to fixup the symbols.
105 if (machine__load_vmlinux_path(&vmlinux
, type
,
106 vmlinux_matches_kallsyms_filter
) <= 0) {
107 pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
116 * Now look at the symbols in the vmlinux DSO and check if we find all of them
117 * in the kallsyms dso. For the ones that are in both, check its names and
120 for (nd
= rb_first(&vmlinux_map
->dso
->symbols
[type
]); nd
; nd
= rb_next(nd
)) {
121 struct symbol
*pair
, *first_pair
;
122 bool backwards
= true;
124 sym
= rb_entry(nd
, struct symbol
, rb_node
);
126 if (sym
->start
== sym
->end
)
129 mem_start
= vmlinux_map
->unmap_ip(vmlinux_map
, sym
->start
);
130 mem_end
= vmlinux_map
->unmap_ip(vmlinux_map
, sym
->end
);
132 first_pair
= machine__find_kernel_symbol(&kallsyms
, type
,
133 mem_start
, NULL
, NULL
);
136 if (pair
&& UM(pair
->start
) == mem_start
) {
138 if (strcmp(sym
->name
, pair
->name
) == 0) {
140 * kallsyms don't have the symbol end, so we
141 * set that by using the next symbol start - 1,
142 * in some cases we get this up to a page
143 * wrong, trace_kmalloc when I was developing
144 * this code was one such example, 2106 bytes
145 * off the real size. More than that and we
146 * _really_ have a problem.
148 s64 skew
= mem_end
- UM(pair
->end
);
149 if (llabs(skew
) >= page_size
)
150 pr_debug("%#" PRIx64
": diff end addr for %s v: %#" PRIx64
" k: %#" PRIx64
"\n",
151 mem_start
, sym
->name
, mem_end
,
155 * Do not count this as a failure, because we
156 * could really find a case where it's not
157 * possible to get proper function end from
165 nnd
= backwards
? rb_prev(&pair
->rb_node
) :
166 rb_next(&pair
->rb_node
);
168 struct symbol
*next
= rb_entry(nnd
, struct symbol
, rb_node
);
170 if (UM(next
->start
) == mem_start
) {
182 pr_debug("%#" PRIx64
": diff name v: %s k: %s\n",
183 mem_start
, sym
->name
, pair
->name
);
186 pr_debug("%#" PRIx64
": %s not on kallsyms\n",
187 mem_start
, sym
->name
);
195 pr_info("Maps only in vmlinux:\n");
197 for (nd
= rb_first(&vmlinux
.kmaps
.maps
[type
]); nd
; nd
= rb_next(nd
)) {
198 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
), *pair
;
200 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
201 * the kernel will have the path for the vmlinux file being used,
202 * so use the short name, less descriptive but the same ("[kernel]" in
205 pair
= map_groups__find_by_name(&kallsyms
.kmaps
, type
,
207 pos
->dso
->short_name
:
212 map__fprintf(pos
, stderr
);
215 pr_info("Maps in vmlinux with a different name in kallsyms:\n");
217 for (nd
= rb_first(&vmlinux
.kmaps
.maps
[type
]); nd
; nd
= rb_next(nd
)) {
218 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
), *pair
;
220 mem_start
= vmlinux_map
->unmap_ip(vmlinux_map
, pos
->start
);
221 mem_end
= vmlinux_map
->unmap_ip(vmlinux_map
, pos
->end
);
223 pair
= map_groups__find(&kallsyms
.kmaps
, type
, mem_start
);
224 if (pair
== NULL
|| pair
->priv
)
227 if (pair
->start
== mem_start
) {
229 pr_info(" %" PRIx64
"-%" PRIx64
" %" PRIx64
" %s in kallsyms as",
230 pos
->start
, pos
->end
, pos
->pgoff
, pos
->dso
->name
);
231 if (mem_end
!= pair
->end
)
232 pr_info(":\n*%" PRIx64
"-%" PRIx64
" %" PRIx64
,
233 pair
->start
, pair
->end
, pair
->pgoff
);
234 pr_info(" %s\n", pair
->dso
->name
);
239 pr_info("Maps only in kallsyms:\n");
241 for (nd
= rb_first(&kallsyms
.kmaps
.maps
[type
]);
242 nd
; nd
= rb_next(nd
)) {
243 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
246 map__fprintf(pos
, stderr
);
249 machine__exit(&kallsyms
);
250 machine__exit(&vmlinux
);