1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_CODE_PATCHING_H
3 #define _ASM_POWERPC_CODE_PATCHING_H
6 * Copyright 2008, Michael Ellerman, IBM Corporation.
10 #include <asm/ppc-opcode.h>
11 #include <linux/string.h>
12 #include <linux/kallsyms.h>
13 #include <asm/asm-compat.h>
16 /* Flags for create_branch:
17 * "b" == create_branch(addr, target, 0);
18 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
19 * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
20 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
22 #define BRANCH_SET_LINK 0x1
23 #define BRANCH_ABSOLUTE 0x2
26 * Powerpc branch instruction is :
29 * +---------+----------------+---+---+
30 * | opcode | LI |AA |LK |
31 * +---------+----------------+---+---+
32 * Where AA = 0 and LK = 0
34 * LI is a signed 24 bits integer. The real branch offset is computed
35 * by: imm32 = SignExtend(LI:'0b00', 32);
37 * So the maximum forward branch should be:
38 * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
39 * The maximum backward branch should be:
40 * (0xff800000 << 2) = 0xfe000000 = -0x2000000
42 static inline bool is_offset_in_branch_range(long offset
)
44 return (offset
>= -0x2000000 && offset
<= 0x1fffffc && !(offset
& 0x3));
47 static inline bool is_offset_in_cond_branch_range(long offset
)
49 return offset
>= -0x8000 && offset
<= 0x7fff && !(offset
& 0x3);
52 static inline int create_branch(ppc_inst_t
*instr
, const u32
*addr
,
53 unsigned long target
, int flags
)
59 if (! (flags
& BRANCH_ABSOLUTE
))
60 offset
= offset
- (unsigned long)addr
;
62 /* Check we can represent the target in the instruction format */
63 if (!is_offset_in_branch_range(offset
))
66 /* Mask out the flags and target, so they don't step on each other. */
67 *instr
= ppc_inst(0x48000000 | (flags
& 0x3) | (offset
& 0x03FFFFFC));
72 int create_cond_branch(ppc_inst_t
*instr
, const u32
*addr
,
73 unsigned long target
, int flags
);
74 int patch_branch(u32
*addr
, unsigned long target
, int flags
);
75 int patch_instruction(u32
*addr
, ppc_inst_t instr
);
76 int raw_patch_instruction(u32
*addr
, ppc_inst_t instr
);
77 int patch_instructions(u32
*addr
, u32
*code
, size_t len
, bool repeat_instr
);
80 * The data patching functions patch_uint() and patch_ulong(), etc., must be
81 * called on aligned addresses.
83 * The instruction patching functions patch_instruction() and similar must be
84 * called on addresses satisfying instruction alignment requirements.
89 int patch_uint(void *addr
, unsigned int val
);
90 int patch_ulong(void *addr
, unsigned long val
);
92 #define patch_u64 patch_ulong
96 static inline int patch_uint(void *addr
, unsigned int val
)
98 if (!IS_ALIGNED((unsigned long)addr
, sizeof(unsigned int)))
101 return patch_instruction(addr
, ppc_inst(val
));
104 static inline int patch_ulong(void *addr
, unsigned long val
)
106 if (!IS_ALIGNED((unsigned long)addr
, sizeof(unsigned long)))
109 return patch_instruction(addr
, ppc_inst(val
));
114 #define patch_u32 patch_uint
116 static inline unsigned long patch_site_addr(s32
*site
)
118 return (unsigned long)site
+ *site
;
121 static inline int patch_instruction_site(s32
*site
, ppc_inst_t instr
)
123 return patch_instruction((u32
*)patch_site_addr(site
), instr
);
126 static inline int patch_branch_site(s32
*site
, unsigned long target
, int flags
)
128 return patch_branch((u32
*)patch_site_addr(site
), target
, flags
);
131 static inline int modify_instruction(unsigned int *addr
, unsigned int clr
,
134 return patch_instruction(addr
, ppc_inst((*addr
& ~clr
) | set
));
137 static inline int modify_instruction_site(s32
*site
, unsigned int clr
, unsigned int set
)
139 return modify_instruction((unsigned int *)patch_site_addr(site
), clr
, set
);
142 static inline unsigned int branch_opcode(ppc_inst_t instr
)
144 return ppc_inst_primary_opcode(instr
) & 0x3F;
147 static inline int instr_is_branch_iform(ppc_inst_t instr
)
149 return branch_opcode(instr
) == 18;
152 static inline int instr_is_branch_bform(ppc_inst_t instr
)
154 return branch_opcode(instr
) == 16;
157 int instr_is_relative_branch(ppc_inst_t instr
);
158 int instr_is_relative_link_branch(ppc_inst_t instr
);
159 unsigned long branch_target(const u32
*instr
);
160 int translate_branch(ppc_inst_t
*instr
, const u32
*dest
, const u32
*src
);
161 bool is_conditional_branch(ppc_inst_t instr
);
163 #define OP_RT_RA_MASK 0xffff0000UL
164 #define LIS_R2 (PPC_RAW_LIS(_R2, 0))
165 #define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0))
166 #define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0))
169 static inline unsigned long ppc_function_entry(void *func
)
171 #ifdef CONFIG_PPC64_ELF_ABI_V2
175 * A PPC64 ABIv2 function may have a local and a global entry
176 * point. We need to use the local entry point when patching
177 * functions, so identify and step over the global entry point
180 * The global entry point sequence is always of the form:
185 * A linker optimisation may convert the addis to lis:
190 if ((((*insn
& OP_RT_RA_MASK
) == ADDIS_R2_R12
) ||
191 ((*insn
& OP_RT_RA_MASK
) == LIS_R2
)) &&
192 ((*(insn
+1) & OP_RT_RA_MASK
) == ADDI_R2_R2
))
193 return (unsigned long)(insn
+ 2);
195 return (unsigned long)func
;
196 #elif defined(CONFIG_PPC64_ELF_ABI_V1)
198 * On PPC64 ABIv1 the function pointer actually points to the
199 * function's descriptor. The first entry in the descriptor is the
200 * address of the function text.
202 return ((struct func_desc
*)func
)->addr
;
204 return (unsigned long)func
;
208 static inline unsigned long ppc_global_function_entry(void *func
)
210 #ifdef CONFIG_PPC64_ELF_ABI_V2
211 /* PPC64 ABIv2 the global entry point is at the address */
212 return (unsigned long)func
;
214 /* All other cases there is no change vs ppc_function_entry() */
215 return ppc_function_entry(func
);
220 * Wrapper around kallsyms_lookup() to return function entry address:
221 * - For ABIv1, we lookup the dot variant.
222 * - For ABIv2, we return the local entry point.
224 static inline unsigned long ppc_kallsyms_lookup_name(const char *name
)
227 #ifdef CONFIG_PPC64_ELF_ABI_V1
228 /* check for dot variant */
229 char dot_name
[1 + KSYM_NAME_LEN
];
230 bool dot_appended
= false;
232 if (strnlen(name
, KSYM_NAME_LEN
) >= KSYM_NAME_LEN
)
235 if (name
[0] != '.') {
238 strlcat(dot_name
, name
, sizeof(dot_name
));
242 strlcat(dot_name
, name
, sizeof(dot_name
));
244 addr
= kallsyms_lookup_name(dot_name
);
245 if (!addr
&& dot_appended
)
246 /* Let's try the original non-dot symbol lookup */
247 addr
= kallsyms_lookup_name(name
);
248 #elif defined(CONFIG_PPC64_ELF_ABI_V2)
249 addr
= kallsyms_lookup_name(name
);
251 addr
= ppc_function_entry((void *)addr
);
253 addr
= kallsyms_lookup_name(name
);
259 * Some instruction encodings commonly used in dynamic ftracing
260 * and function live patching.
263 /* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
264 #ifdef CONFIG_PPC64_ELF_ABI_V2
265 #define R2_STACK_OFFSET 24
267 #define R2_STACK_OFFSET 40
270 #define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
272 /* usually preceded by a mflr r0 */
273 #define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
275 #endif /* _ASM_POWERPC_CODE_PATCHING_H */