treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / powerpc / include / asm / code-patching.h
blob898b542628815c7cc8286698c698e30f3efdb462
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_CODE_PATCHING_H
3 #define _ASM_POWERPC_CODE_PATCHING_H
5 /*
6 * Copyright 2008, Michael Ellerman, IBM Corporation.
7 */
9 #include <asm/types.h>
10 #include <asm/ppc-opcode.h>
11 #include <linux/string.h>
12 #include <linux/kallsyms.h>
13 #include <asm/asm-compat.h>
15 /* Flags for create_branch:
16 * "b" == create_branch(addr, target, 0);
17 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
18 * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
19 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
21 #define BRANCH_SET_LINK 0x1
22 #define BRANCH_ABSOLUTE 0x2
24 bool is_offset_in_branch_range(long offset);
25 unsigned int create_branch(const unsigned int *addr,
26 unsigned long target, int flags);
27 unsigned int create_cond_branch(const unsigned int *addr,
28 unsigned long target, int flags);
29 int patch_branch(unsigned int *addr, unsigned long target, int flags);
30 int patch_instruction(unsigned int *addr, unsigned int instr);
31 int raw_patch_instruction(unsigned int *addr, unsigned int instr);
33 static inline unsigned long patch_site_addr(s32 *site)
35 return (unsigned long)site + *site;
38 static inline int patch_instruction_site(s32 *site, unsigned int instr)
40 return patch_instruction((unsigned int *)patch_site_addr(site), instr);
43 static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
45 return patch_branch((unsigned int *)patch_site_addr(site), target, flags);
48 static inline int modify_instruction(unsigned int *addr, unsigned int clr,
49 unsigned int set)
51 return patch_instruction(addr, (*addr & ~clr) | set);
54 static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
56 return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
59 int instr_is_relative_branch(unsigned int instr);
60 int instr_is_relative_link_branch(unsigned int instr);
61 int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
62 unsigned long branch_target(const unsigned int *instr);
63 unsigned int translate_branch(const unsigned int *dest,
64 const unsigned int *src);
65 extern bool is_conditional_branch(unsigned int instr);
66 #ifdef CONFIG_PPC_BOOK3E_64
67 void __patch_exception(int exc, unsigned long addr);
68 #define patch_exception(exc, name) do { \
69 extern unsigned int name; \
70 __patch_exception((exc), (unsigned long)&name); \
71 } while (0)
72 #endif
74 #define OP_RT_RA_MASK 0xffff0000UL
75 #define LIS_R2 0x3c020000UL
76 #define ADDIS_R2_R12 0x3c4c0000UL
77 #define ADDI_R2_R2 0x38420000UL
79 static inline unsigned long ppc_function_entry(void *func)
81 #ifdef PPC64_ELF_ABI_v2
82 u32 *insn = func;
85 * A PPC64 ABIv2 function may have a local and a global entry
86 * point. We need to use the local entry point when patching
87 * functions, so identify and step over the global entry point
88 * sequence.
90 * The global entry point sequence is always of the form:
92 * addis r2,r12,XXXX
93 * addi r2,r2,XXXX
95 * A linker optimisation may convert the addis to lis:
97 * lis r2,XXXX
98 * addi r2,r2,XXXX
100 if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
101 ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
102 ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
103 return (unsigned long)(insn + 2);
104 else
105 return (unsigned long)func;
106 #elif defined(PPC64_ELF_ABI_v1)
108 * On PPC64 ABIv1 the function pointer actually points to the
109 * function's descriptor. The first entry in the descriptor is the
110 * address of the function text.
112 return ((func_descr_t *)func)->entry;
113 #else
114 return (unsigned long)func;
115 #endif
118 static inline unsigned long ppc_global_function_entry(void *func)
120 #ifdef PPC64_ELF_ABI_v2
121 /* PPC64 ABIv2 the global entry point is at the address */
122 return (unsigned long)func;
123 #else
124 /* All other cases there is no change vs ppc_function_entry() */
125 return ppc_function_entry(func);
126 #endif
130 * Wrapper around kallsyms_lookup() to return function entry address:
131 * - For ABIv1, we lookup the dot variant.
132 * - For ABIv2, we return the local entry point.
134 static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
136 unsigned long addr;
137 #ifdef PPC64_ELF_ABI_v1
138 /* check for dot variant */
139 char dot_name[1 + KSYM_NAME_LEN];
140 bool dot_appended = false;
142 if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
143 return 0;
145 if (name[0] != '.') {
146 dot_name[0] = '.';
147 dot_name[1] = '\0';
148 strlcat(dot_name, name, sizeof(dot_name));
149 dot_appended = true;
150 } else {
151 dot_name[0] = '\0';
152 strlcat(dot_name, name, sizeof(dot_name));
154 addr = kallsyms_lookup_name(dot_name);
155 if (!addr && dot_appended)
156 /* Let's try the original non-dot symbol lookup */
157 addr = kallsyms_lookup_name(name);
158 #elif defined(PPC64_ELF_ABI_v2)
159 addr = kallsyms_lookup_name(name);
160 if (addr)
161 addr = ppc_function_entry((void *)addr);
162 #else
163 addr = kallsyms_lookup_name(name);
164 #endif
165 return addr;
168 #ifdef CONFIG_PPC64
170 * Some instruction encodings commonly used in dynamic ftracing
171 * and function live patching.
174 /* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
175 #ifdef PPC64_ELF_ABI_v2
176 #define R2_STACK_OFFSET 24
177 #else
178 #define R2_STACK_OFFSET 40
179 #endif
181 #define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
182 ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
184 /* usually preceded by a mflr r0 */
185 #define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
186 ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
187 #endif /* CONFIG_PPC64 */
189 #endif /* _ASM_POWERPC_CODE_PATCHING_H */