2 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/sort.h>
14 static bool in_init(const struct module
*mod
, void *loc
)
16 return (u64
)loc
- (u64
)mod
->init_layout
.base
< mod
->init_layout
.size
;
19 u64
module_emit_plt_entry(struct module
*mod
, void *loc
, const Elf64_Rela
*rela
,
22 struct mod_plt_sec
*pltsec
= !in_init(mod
, loc
) ? &mod
->arch
.core
:
24 struct plt_entry
*plt
= (struct plt_entry
*)pltsec
->plt
->sh_addr
;
25 int i
= pltsec
->plt_num_entries
;
26 u64 val
= sym
->st_value
+ rela
->r_addend
;
28 plt
[i
] = get_plt_entry(val
);
31 * Check if the entry we just created is a duplicate. Given that the
32 * relocations are sorted, this will be the last entry we allocated.
35 if (i
> 0 && plt_entries_equal(plt
+ i
, plt
+ i
- 1))
36 return (u64
)&plt
[i
- 1];
38 pltsec
->plt_num_entries
++;
39 if (WARN_ON(pltsec
->plt_num_entries
> pltsec
->plt_max_entries
))
45 #ifdef CONFIG_ARM64_ERRATUM_843419
46 u64
module_emit_adrp_veneer(struct module
*mod
, void *loc
, u64 val
)
48 struct mod_plt_sec
*pltsec
= !in_init(mod
, loc
) ? &mod
->arch
.core
:
50 struct plt_entry
*plt
= (struct plt_entry
*)pltsec
->plt
->sh_addr
;
51 int i
= pltsec
->plt_num_entries
++;
52 u32 mov0
, mov1
, mov2
, br
;
55 if (WARN_ON(pltsec
->plt_num_entries
> pltsec
->plt_max_entries
))
58 /* get the destination register of the ADRP instruction */
59 rd
= aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD
,
60 le32_to_cpup((__le32
*)loc
));
62 /* generate the veneer instructions */
63 mov0
= aarch64_insn_gen_movewide(rd
, (u16
)~val
, 0,
64 AARCH64_INSN_VARIANT_64BIT
,
65 AARCH64_INSN_MOVEWIDE_INVERSE
);
66 mov1
= aarch64_insn_gen_movewide(rd
, (u16
)(val
>> 16), 16,
67 AARCH64_INSN_VARIANT_64BIT
,
68 AARCH64_INSN_MOVEWIDE_KEEP
);
69 mov2
= aarch64_insn_gen_movewide(rd
, (u16
)(val
>> 32), 32,
70 AARCH64_INSN_VARIANT_64BIT
,
71 AARCH64_INSN_MOVEWIDE_KEEP
);
72 br
= aarch64_insn_gen_branch_imm((u64
)&plt
[i
].br
, (u64
)loc
+ 4,
73 AARCH64_INSN_BRANCH_NOLINK
);
75 plt
[i
] = (struct plt_entry
){
86 #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
88 static int cmp_rela(const void *a
, const void *b
)
90 const Elf64_Rela
*x
= a
, *y
= b
;
93 /* sort by type, symbol index and addend */
94 i
= cmp_3way(ELF64_R_TYPE(x
->r_info
), ELF64_R_TYPE(y
->r_info
));
96 i
= cmp_3way(ELF64_R_SYM(x
->r_info
), ELF64_R_SYM(y
->r_info
));
98 i
= cmp_3way(x
->r_addend
, y
->r_addend
);
102 static bool duplicate_rel(const Elf64_Rela
*rela
, int num
)
105 * Entries are sorted by type, symbol index and addend. That means
106 * that, if a duplicate entry exists, it must be in the preceding
109 return num
> 0 && cmp_rela(rela
+ num
, rela
+ num
- 1) == 0;
112 static unsigned int count_plts(Elf64_Sym
*syms
, Elf64_Rela
*rela
, int num
,
113 Elf64_Word dstidx
, Elf_Shdr
*dstsec
)
115 unsigned int ret
= 0;
119 for (i
= 0; i
< num
; i
++) {
122 switch (ELF64_R_TYPE(rela
[i
].r_info
)) {
123 case R_AARCH64_JUMP26
:
124 case R_AARCH64_CALL26
:
125 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE
))
129 * We only have to consider branch targets that resolve
130 * to symbols that are defined in a different section.
131 * This is not simply a heuristic, it is a fundamental
132 * limitation, since there is no guaranteed way to emit
133 * PLT entries sufficiently close to the branch if the
134 * section size exceeds the range of a branch
135 * instruction. So ignore relocations against defined
136 * symbols if they live in the same section as the
139 s
= syms
+ ELF64_R_SYM(rela
[i
].r_info
);
140 if (s
->st_shndx
== dstidx
)
144 * Jump relocations with non-zero addends against
145 * undefined symbols are supported by the ELF spec, but
146 * do not occur in practice (e.g., 'jump n bytes past
147 * the entry point of undefined function symbol f').
148 * So we need to support them, but there is no need to
149 * take them into consideration when trying to optimize
150 * this code. So let's only check for duplicates when
151 * the addend is zero: this allows us to record the PLT
152 * entry address in the symbol table itself, rather than
153 * having to search the list for duplicates each time we
156 if (rela
[i
].r_addend
!= 0 || !duplicate_rel(rela
, i
))
159 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
160 case R_AARCH64_ADR_PREL_PG_HI21
:
161 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419
) ||
162 !cpus_have_const_cap(ARM64_WORKAROUND_843419
))
166 * Determine the minimal safe alignment for this ADRP
167 * instruction: the section alignment at which it is
168 * guaranteed not to appear at a vulnerable offset.
170 * This comes down to finding the least significant zero
171 * bit in bits [11:3] of the section offset, and
172 * increasing the section's alignment so that the
173 * resulting address of this instruction is guaranteed
174 * to equal the offset in that particular bit (as well
175 * as all less signficant bits). This ensures that the
176 * address modulo 4 KB != 0xfff8 or 0xfffc (which would
177 * have all ones in bits [11:3])
179 min_align
= 2ULL << ffz(rela
[i
].r_offset
| 0x7);
182 * Allocate veneer space for each ADRP that may appear
183 * at a vulnerable offset nonetheless. At relocation
184 * time, some of these will remain unused since some
185 * ADRP instructions can be patched to ADR instructions
188 if (min_align
> SZ_4K
)
191 dstsec
->sh_addralign
= max(dstsec
->sh_addralign
,
199 int module_frob_arch_sections(Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
200 char *secstrings
, struct module
*mod
)
202 unsigned long core_plts
= 0;
203 unsigned long init_plts
= 0;
204 Elf64_Sym
*syms
= NULL
;
205 Elf_Shdr
*tramp
= NULL
;
209 * Find the empty .plt section so we can expand it to store the PLT
210 * entries. Record the symtab address as well.
212 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
213 if (!strcmp(secstrings
+ sechdrs
[i
].sh_name
, ".plt"))
214 mod
->arch
.core
.plt
= sechdrs
+ i
;
215 else if (!strcmp(secstrings
+ sechdrs
[i
].sh_name
, ".init.plt"))
216 mod
->arch
.init
.plt
= sechdrs
+ i
;
217 else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE
) &&
218 !strcmp(secstrings
+ sechdrs
[i
].sh_name
,
219 ".text.ftrace_trampoline"))
221 else if (sechdrs
[i
].sh_type
== SHT_SYMTAB
)
222 syms
= (Elf64_Sym
*)sechdrs
[i
].sh_addr
;
225 if (!mod
->arch
.core
.plt
|| !mod
->arch
.init
.plt
) {
226 pr_err("%s: module PLT section(s) missing\n", mod
->name
);
230 pr_err("%s: module symtab section missing\n", mod
->name
);
234 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
235 Elf64_Rela
*rels
= (void *)ehdr
+ sechdrs
[i
].sh_offset
;
236 int numrels
= sechdrs
[i
].sh_size
/ sizeof(Elf64_Rela
);
237 Elf64_Shdr
*dstsec
= sechdrs
+ sechdrs
[i
].sh_info
;
239 if (sechdrs
[i
].sh_type
!= SHT_RELA
)
242 /* ignore relocations that operate on non-exec sections */
243 if (!(dstsec
->sh_flags
& SHF_EXECINSTR
))
246 /* sort by type, symbol index and addend */
247 sort(rels
, numrels
, sizeof(Elf64_Rela
), cmp_rela
, NULL
);
249 if (strncmp(secstrings
+ dstsec
->sh_name
, ".init", 5) != 0)
250 core_plts
+= count_plts(syms
, rels
, numrels
,
251 sechdrs
[i
].sh_info
, dstsec
);
253 init_plts
+= count_plts(syms
, rels
, numrels
,
254 sechdrs
[i
].sh_info
, dstsec
);
257 mod
->arch
.core
.plt
->sh_type
= SHT_NOBITS
;
258 mod
->arch
.core
.plt
->sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
;
259 mod
->arch
.core
.plt
->sh_addralign
= L1_CACHE_BYTES
;
260 mod
->arch
.core
.plt
->sh_size
= (core_plts
+ 1) * sizeof(struct plt_entry
);
261 mod
->arch
.core
.plt_num_entries
= 0;
262 mod
->arch
.core
.plt_max_entries
= core_plts
;
264 mod
->arch
.init
.plt
->sh_type
= SHT_NOBITS
;
265 mod
->arch
.init
.plt
->sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
;
266 mod
->arch
.init
.plt
->sh_addralign
= L1_CACHE_BYTES
;
267 mod
->arch
.init
.plt
->sh_size
= (init_plts
+ 1) * sizeof(struct plt_entry
);
268 mod
->arch
.init
.plt_num_entries
= 0;
269 mod
->arch
.init
.plt_max_entries
= init_plts
;
272 tramp
->sh_type
= SHT_NOBITS
;
273 tramp
->sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
;
274 tramp
->sh_addralign
= __alignof__(struct plt_entry
);
275 tramp
->sh_size
= sizeof(struct plt_entry
);