1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/compiler.h>
3 #include <linux/rbtree.h>
13 #define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
15 int test__vmlinux_matches_kallsyms(struct test
*test __maybe_unused
, int subtest __maybe_unused
)
20 struct map
*kallsyms_map
, *vmlinux_map
, *map
;
21 struct machine kallsyms
, vmlinux
;
22 enum map_type type
= MAP__FUNCTION
;
23 struct maps
*maps
= &vmlinux
.kmaps
.maps
[type
];
24 u64 mem_start
, mem_end
;
30 * Init the machines that will hold kernel, modules obtained from
31 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
33 machine__init(&kallsyms
, "", HOST_KERNEL_ID
);
34 machine__init(&vmlinux
, "", HOST_KERNEL_ID
);
39 * Create the kernel maps for kallsyms and the DSO where we will then
40 * load /proc/kallsyms. Also create the modules maps from /proc/modules
41 * and find the .ko files that match them in /lib/modules/`uname -r`/.
43 if (machine__create_kernel_maps(&kallsyms
) < 0) {
44 pr_debug("machine__create_kernel_maps ");
51 * Load and split /proc/kallsyms into multiple maps, one per module.
52 * Do not use kcore, as this test was designed before kcore support
53 * and has parts that only make sense if using the non-kcore code.
54 * XXX: extend it to stress the kcorre code as well, hint: the list
55 * of modules extracted from /proc/kcore, in its current form, can't
56 * be compacted against the list of modules found in the "vmlinux"
57 * code and with the one got from /proc/modules from the "kallsyms" code.
59 if (__machine__load_kallsyms(&kallsyms
, "/proc/kallsyms", type
, true) <= 0) {
60 pr_debug("dso__load_kallsyms ");
67 * kallsyms will be internally on demand sorted by name so that we can
68 * find the reference relocation * symbol, i.e. the symbol we will use
69 * to see if the running kernel was relocated by checking if it has the
70 * same value in the vmlinux file we load.
72 kallsyms_map
= machine__kernel_map(&kallsyms
);
77 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
79 if (machine__create_kernel_maps(&vmlinux
) < 0) {
80 pr_debug("machine__create_kernel_maps ");
84 vmlinux_map
= machine__kernel_map(&vmlinux
);
89 * Locate a vmlinux file in the vmlinux path that has a buildid that
90 * matches the one of the running kernel.
92 * While doing that look if we find the ref reloc symbol, if we find it
93 * we'll have its ref_reloc_symbol.unrelocated_addr and then
94 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
95 * to fixup the symbols.
97 if (machine__load_vmlinux_path(&vmlinux
, type
) <= 0) {
98 pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
107 * Now look at the symbols in the vmlinux DSO and check if we find all of them
108 * in the kallsyms dso. For the ones that are in both, check its names and
111 for (nd
= rb_first(&vmlinux_map
->dso
->symbols
[type
]); nd
; nd
= rb_next(nd
)) {
112 struct symbol
*pair
, *first_pair
;
114 sym
= rb_entry(nd
, struct symbol
, rb_node
);
116 if (sym
->start
== sym
->end
)
119 mem_start
= vmlinux_map
->unmap_ip(vmlinux_map
, sym
->start
);
120 mem_end
= vmlinux_map
->unmap_ip(vmlinux_map
, sym
->end
);
122 first_pair
= machine__find_kernel_symbol(&kallsyms
, type
,
126 if (pair
&& UM(pair
->start
) == mem_start
) {
128 if (strcmp(sym
->name
, pair
->name
) == 0) {
130 * kallsyms don't have the symbol end, so we
131 * set that by using the next symbol start - 1,
132 * in some cases we get this up to a page
133 * wrong, trace_kmalloc when I was developing
134 * this code was one such example, 2106 bytes
135 * off the real size. More than that and we
136 * _really_ have a problem.
138 s64 skew
= mem_end
- UM(pair
->end
);
139 if (llabs(skew
) >= page_size
)
140 pr_debug("WARN: %#" PRIx64
": diff end addr for %s v: %#" PRIx64
" k: %#" PRIx64
"\n",
141 mem_start
, sym
->name
, mem_end
,
145 * Do not count this as a failure, because we
146 * could really find a case where it's not
147 * possible to get proper function end from
152 pair
= machine__find_kernel_symbol_by_name(&kallsyms
, type
, sym
->name
, NULL
);
154 if (UM(pair
->start
) == mem_start
)
157 pr_debug("WARN: %#" PRIx64
": diff name v: %s k: %s\n",
158 mem_start
, sym
->name
, pair
->name
);
160 pr_debug("WARN: %#" PRIx64
": diff name v: %s k: %s\n",
161 mem_start
, sym
->name
, first_pair
->name
);
167 pr_debug("ERR : %#" PRIx64
": %s not on kallsyms\n",
168 mem_start
, sym
->name
);
176 header_printed
= false;
178 for (map
= maps__first(maps
); map
; map
= map__next(map
)) {
181 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
182 * the kernel will have the path for the vmlinux file being used,
183 * so use the short name, less descriptive but the same ("[kernel]" in
186 pair
= map_groups__find_by_name(&kallsyms
.kmaps
, type
,
188 map
->dso
->short_name
:
193 if (!header_printed
) {
194 pr_info("WARN: Maps only in vmlinux:\n");
195 header_printed
= true;
197 map__fprintf(map
, stderr
);
201 header_printed
= false;
203 for (map
= maps__first(maps
); map
; map
= map__next(map
)) {
206 mem_start
= vmlinux_map
->unmap_ip(vmlinux_map
, map
->start
);
207 mem_end
= vmlinux_map
->unmap_ip(vmlinux_map
, map
->end
);
209 pair
= map_groups__find(&kallsyms
.kmaps
, type
, mem_start
);
210 if (pair
== NULL
|| pair
->priv
)
213 if (pair
->start
== mem_start
) {
214 if (!header_printed
) {
215 pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
216 header_printed
= true;
219 pr_info("WARN: %" PRIx64
"-%" PRIx64
" %" PRIx64
" %s in kallsyms as",
220 map
->start
, map
->end
, map
->pgoff
, map
->dso
->name
);
221 if (mem_end
!= pair
->end
)
222 pr_info(":\nWARN: *%" PRIx64
"-%" PRIx64
" %" PRIx64
,
223 pair
->start
, pair
->end
, pair
->pgoff
);
224 pr_info(" %s\n", pair
->dso
->name
);
229 header_printed
= false;
231 maps
= &kallsyms
.kmaps
.maps
[type
];
233 for (map
= maps__first(maps
); map
; map
= map__next(map
)) {
235 if (!header_printed
) {
236 pr_info("WARN: Maps only in kallsyms:\n");
237 header_printed
= true;
239 map__fprintf(map
, stderr
);
243 machine__exit(&kallsyms
);
244 machine__exit(&vmlinux
);