1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 ARM Ltd.
8 #include <asm-generic/module.h>
16 struct mod_arch_specific
{
17 struct mod_plt_sec core
;
18 struct mod_plt_sec init
;
20 /* for CONFIG_DYNAMIC_FTRACE */
21 struct plt_entry
*ftrace_trampolines
;
24 u64
module_emit_plt_entry(struct module
*mod
, Elf64_Shdr
*sechdrs
,
25 void *loc
, const Elf64_Rela
*rela
,
28 u64
module_emit_veneer_for_adrp(struct module
*mod
, Elf64_Shdr
*sechdrs
,
33 * A program that conforms to the AArch64 Procedure Call Standard
34 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
35 * IP1 (x17) may be inserted at any branch instruction that is
36 * exposed to a relocation that supports long branches. Since that
37 * is exactly what we are dealing with here, we are free to use x16
38 * as a scratch register in the PLT veneers.
40 __le32 adrp
; /* adrp x16, .... */
41 __le32 add
; /* add x16, x16, #0x.... */
42 __le32 br
; /* br x16 */
45 static inline bool is_forbidden_offset_for_adrp(void *place
)
47 return cpus_have_final_cap(ARM64_WORKAROUND_843419
) &&
48 ((u64
)place
& 0xfff) >= 0xff8;
51 struct plt_entry
get_plt_entry(u64 dst
, void *pc
);
53 static inline const Elf_Shdr
*find_section(const Elf_Ehdr
*hdr
,
54 const Elf_Shdr
*sechdrs
,
57 const Elf_Shdr
*s
, *se
;
58 const char *secstrs
= (void *)hdr
+ sechdrs
[hdr
->e_shstrndx
].sh_offset
;
60 for (s
= sechdrs
, se
= sechdrs
+ hdr
->e_shnum
; s
< se
; s
++) {
61 if (strcmp(name
, secstrs
+ s
->sh_name
) == 0)
68 #endif /* __ASM_MODULE_H */