1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
7 #include <linux/ftrace.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/sort.h>
11 #include <linux/moduleloader.h>
13 #include <asm/cache.h>
14 #include <asm/opcodes.h>
16 #ifdef CONFIG_THUMB2_KERNEL
17 #define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
20 #define PLT_ENT_LDR __opcode_to_mem_arm(0xe59ff000 | \
24 static const u32 fixed_plts
[] = {
25 #ifdef CONFIG_DYNAMIC_FTRACE
31 static void prealloc_fixed(struct mod_plt_sec
*pltsec
, struct plt_entries
*plt
)
35 if (!ARRAY_SIZE(fixed_plts
) || pltsec
->plt_count
)
37 pltsec
->plt_count
= ARRAY_SIZE(fixed_plts
);
39 for (i
= 0; i
< ARRAY_SIZE(plt
->ldr
); ++i
)
40 plt
->ldr
[i
] = PLT_ENT_LDR
;
42 BUILD_BUG_ON(sizeof(fixed_plts
) > sizeof(plt
->lit
));
43 memcpy(plt
->lit
, fixed_plts
, sizeof(fixed_plts
));
46 u32
get_module_plt(struct module
*mod
, unsigned long loc
, Elf32_Addr val
)
48 struct mod_plt_sec
*pltsec
= !within_module_init(loc
, mod
) ?
49 &mod
->arch
.core
: &mod
->arch
.init
;
50 struct plt_entries
*plt
;
53 /* cache the address, ELF header is available only during module load */
55 pltsec
->plt_ent
= (struct plt_entries
*)pltsec
->plt
->sh_addr
;
56 plt
= pltsec
->plt_ent
;
58 prealloc_fixed(pltsec
, plt
);
60 for (idx
= 0; idx
< ARRAY_SIZE(fixed_plts
); ++idx
)
61 if (plt
->lit
[idx
] == val
)
62 return (u32
)&plt
->ldr
[idx
];
66 * Look for an existing entry pointing to 'val'. Given that the
67 * relocations are sorted, this will be the last entry we allocated.
70 if (pltsec
->plt_count
> 0) {
71 plt
+= (pltsec
->plt_count
- 1) / PLT_ENT_COUNT
;
72 idx
= (pltsec
->plt_count
- 1) % PLT_ENT_COUNT
;
74 if (plt
->lit
[idx
] == val
)
75 return (u32
)&plt
->ldr
[idx
];
77 idx
= (idx
+ 1) % PLT_ENT_COUNT
;
83 BUG_ON(pltsec
->plt_count
* PLT_ENT_SIZE
> pltsec
->plt
->sh_size
);
86 /* Populate a new set of entries */
87 *plt
= (struct plt_entries
){
88 { [0 ... PLT_ENT_COUNT
- 1] = PLT_ENT_LDR
, },
94 return (u32
)&plt
->ldr
[idx
];
97 #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
99 static int cmp_rel(const void *a
, const void *b
)
101 const Elf32_Rel
*x
= a
, *y
= b
;
104 /* sort by type and symbol index */
105 i
= cmp_3way(ELF32_R_TYPE(x
->r_info
), ELF32_R_TYPE(y
->r_info
));
107 i
= cmp_3way(ELF32_R_SYM(x
->r_info
), ELF32_R_SYM(y
->r_info
));
111 static bool is_zero_addend_relocation(Elf32_Addr base
, const Elf32_Rel
*rel
)
113 u32
*tval
= (u32
*)(base
+ rel
->r_offset
);
116 * Do a bitwise compare on the raw addend rather than fully decoding
117 * the offset and doing an arithmetic comparison.
118 * Note that a zero-addend jump/call relocation is encoded taking the
119 * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
121 switch (ELF32_R_TYPE(rel
->r_info
)) {
125 case R_ARM_THM_JUMP24
:
126 upper
= __mem_to_opcode_thumb16(((u16
*)tval
)[0]);
127 lower
= __mem_to_opcode_thumb16(((u16
*)tval
)[1]);
129 return (upper
& 0x7ff) == 0x7ff && (lower
& 0x2fff) == 0x2ffe;
134 return (__mem_to_opcode_arm(*tval
) & 0xffffff) == 0xfffffe;
139 static bool duplicate_rel(Elf32_Addr base
, const Elf32_Rel
*rel
, int num
)
141 const Elf32_Rel
*prev
;
144 * Entries are sorted by type and symbol index. That means that,
145 * if a duplicate entry exists, it must be in the preceding
151 prev
= rel
+ num
- 1;
152 return cmp_rel(rel
+ num
, prev
) == 0 &&
153 is_zero_addend_relocation(base
, prev
);
156 /* Count how many PLT entries we may need */
157 static unsigned int count_plts(const Elf32_Sym
*syms
, Elf32_Addr base
,
158 const Elf32_Rel
*rel
, int num
, Elf32_Word dstidx
)
160 unsigned int ret
= 0;
164 for (i
= 0; i
< num
; i
++) {
165 switch (ELF32_R_TYPE(rel
[i
].r_info
)) {
170 case R_ARM_THM_JUMP24
:
172 * We only have to consider branch targets that resolve
173 * to symbols that are defined in a different section.
174 * This is not simply a heuristic, it is a fundamental
175 * limitation, since there is no guaranteed way to emit
176 * PLT entries sufficiently close to the branch if the
177 * section size exceeds the range of a branch
178 * instruction. So ignore relocations against defined
179 * symbols if they live in the same section as the
182 s
= syms
+ ELF32_R_SYM(rel
[i
].r_info
);
183 if (s
->st_shndx
== dstidx
)
187 * Jump relocations with non-zero addends against
188 * undefined symbols are supported by the ELF spec, but
189 * do not occur in practice (e.g., 'jump n bytes past
190 * the entry point of undefined function symbol f').
191 * So we need to support them, but there is no need to
192 * take them into consideration when trying to optimize
193 * this code. So let's only check for duplicates when
194 * the addend is zero. (Note that calls into the core
195 * module via init PLT entries could involve section
196 * relative symbol references with non-zero addends, for
197 * which we may end up emitting duplicates, but the init
198 * PLT is released along with the rest of the .init
199 * region as soon as module loading completes.)
201 if (!is_zero_addend_relocation(base
, rel
+ i
) ||
202 !duplicate_rel(base
, rel
, i
))
209 int module_frob_arch_sections(Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
210 char *secstrings
, struct module
*mod
)
212 unsigned long core_plts
= ARRAY_SIZE(fixed_plts
);
213 unsigned long init_plts
= ARRAY_SIZE(fixed_plts
);
214 Elf32_Shdr
*s
, *sechdrs_end
= sechdrs
+ ehdr
->e_shnum
;
215 Elf32_Sym
*syms
= NULL
;
218 * To store the PLTs, we expand the .text section for core module code
219 * and for initialization code.
221 for (s
= sechdrs
; s
< sechdrs_end
; ++s
) {
222 if (strcmp(".plt", secstrings
+ s
->sh_name
) == 0)
223 mod
->arch
.core
.plt
= s
;
224 else if (strcmp(".init.plt", secstrings
+ s
->sh_name
) == 0)
225 mod
->arch
.init
.plt
= s
;
226 else if (s
->sh_type
== SHT_SYMTAB
)
227 syms
= (Elf32_Sym
*)s
->sh_addr
;
230 if (!mod
->arch
.core
.plt
|| !mod
->arch
.init
.plt
) {
231 pr_err("%s: module PLT section(s) missing\n", mod
->name
);
235 pr_err("%s: module symtab section missing\n", mod
->name
);
239 for (s
= sechdrs
+ 1; s
< sechdrs_end
; ++s
) {
240 Elf32_Rel
*rels
= (void *)ehdr
+ s
->sh_offset
;
241 int numrels
= s
->sh_size
/ sizeof(Elf32_Rel
);
242 Elf32_Shdr
*dstsec
= sechdrs
+ s
->sh_info
;
244 if (s
->sh_type
!= SHT_REL
)
247 /* ignore relocations that operate on non-exec sections */
248 if (!(dstsec
->sh_flags
& SHF_EXECINSTR
))
251 /* sort by type and symbol index */
252 sort(rels
, numrels
, sizeof(Elf32_Rel
), cmp_rel
, NULL
);
254 if (!module_init_layout_section(secstrings
+ dstsec
->sh_name
))
255 core_plts
+= count_plts(syms
, dstsec
->sh_addr
, rels
,
256 numrels
, s
->sh_info
);
258 init_plts
+= count_plts(syms
, dstsec
->sh_addr
, rels
,
259 numrels
, s
->sh_info
);
262 mod
->arch
.core
.plt
->sh_type
= SHT_NOBITS
;
263 mod
->arch
.core
.plt
->sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
;
264 mod
->arch
.core
.plt
->sh_addralign
= L1_CACHE_BYTES
;
265 mod
->arch
.core
.plt
->sh_size
= round_up(core_plts
* PLT_ENT_SIZE
,
266 sizeof(struct plt_entries
));
267 mod
->arch
.core
.plt_count
= 0;
268 mod
->arch
.core
.plt_ent
= NULL
;
270 mod
->arch
.init
.plt
->sh_type
= SHT_NOBITS
;
271 mod
->arch
.init
.plt
->sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
;
272 mod
->arch
.init
.plt
->sh_addralign
= L1_CACHE_BYTES
;
273 mod
->arch
.init
.plt
->sh_size
= round_up(init_plts
* PLT_ENT_SIZE
,
274 sizeof(struct plt_entries
));
275 mod
->arch
.init
.plt_count
= 0;
276 mod
->arch
.init
.plt_ent
= NULL
;
278 pr_debug("%s: plt=%x, init.plt=%x\n", __func__
,
279 mod
->arch
.core
.plt
->sh_size
, mod
->arch
.init
.plt
->sh_size
);
283 bool in_module_plt(unsigned long loc
)
289 mod
= __module_text_address(loc
);
290 ret
= mod
&& (loc
- (u32
)mod
->arch
.core
.plt_ent
< mod
->arch
.core
.plt_count
* PLT_ENT_SIZE
||
291 loc
- (u32
)mod
->arch
.init
.plt_ent
< mod
->arch
.init
.plt_count
* PLT_ENT_SIZE
);