1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Code for Kernel probes Jump optimization.
5 * Copyright 2017, Anju T, IBM Corp.
8 #include <linux/kprobes.h>
9 #include <linux/jump_label.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/list.h>
13 #include <asm/kprobes.h>
14 #include <asm/ptrace.h>
15 #include <asm/cacheflush.h>
16 #include <asm/code-patching.h>
17 #include <asm/sstep.h>
18 #include <asm/ppc-opcode.h>
20 #define TMPL_CALL_HDLR_IDX \
21 (optprobe_template_call_handler - optprobe_template_entry)
22 #define TMPL_EMULATE_IDX \
23 (optprobe_template_call_emulate - optprobe_template_entry)
24 #define TMPL_RET_IDX \
25 (optprobe_template_ret - optprobe_template_entry)
27 (optprobe_template_op_address - optprobe_template_entry)
28 #define TMPL_INSN_IDX \
29 (optprobe_template_insn - optprobe_template_entry)
30 #define TMPL_END_IDX \
31 (optprobe_template_end - optprobe_template_entry)
33 DEFINE_INSN_CACHE_OPS(ppc_optinsn
);
35 static bool insn_page_in_use
;
37 static void *__ppc_alloc_insn_page(void)
41 insn_page_in_use
= true;
45 static void __ppc_free_insn_page(void *page __maybe_unused
)
47 insn_page_in_use
= false;
50 struct kprobe_insn_cache kprobe_ppc_optinsn_slots
= {
51 .mutex
= __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots
.mutex
),
52 .pages
= LIST_HEAD_INIT(kprobe_ppc_optinsn_slots
.pages
),
53 /* insn_size initialized later */
54 .alloc
= __ppc_alloc_insn_page
,
55 .free
= __ppc_free_insn_page
,
60 * Check if we can optimize this probe. Returns NIP post-emulation if this can
61 * be optimized and 0 otherwise.
63 static unsigned long can_optimize(struct kprobe
*p
)
66 struct instruction_op op
;
67 unsigned long nip
= 0;
70 * kprobe placed for kretprobe during boot time
71 * has a 'nop' instruction, which can be emulated.
72 * So further checks can be skipped.
74 if (p
->addr
== (kprobe_opcode_t
*)&kretprobe_trampoline
)
75 return (unsigned long)p
->addr
+ sizeof(kprobe_opcode_t
);
78 * We only support optimizing kernel addresses, but not
81 * FIXME: Optimize kprobes placed in module addresses.
83 if (!is_kernel_addr((unsigned long)p
->addr
))
86 memset(®s
, 0, sizeof(struct pt_regs
));
87 regs
.nip
= (unsigned long)p
->addr
;
89 regs
.msr
= MSR_KERNEL
;
92 * Kprobe placed in conditional branch instructions are
93 * not optimized, as we can't predict the nip prior with
94 * dummy pt_regs and can not ensure that the return branch
95 * from detour buffer falls in the range of address (i.e 32MB).
96 * A branch back from trampoline is set up in the detour buffer
97 * to the nip returned by the analyse_instr() here.
99 * Ensure that the instruction is not a conditional branch,
100 * and that can be emulated.
102 if (!is_conditional_branch(*p
->ainsn
.insn
) &&
103 analyse_instr(&op
, ®s
, *p
->ainsn
.insn
) == 1) {
104 emulate_update_regs(®s
, &op
);
111 static void optimized_callback(struct optimized_kprobe
*op
,
112 struct pt_regs
*regs
)
114 /* This is possible if op is under delayed unoptimizing */
115 if (kprobe_disabled(&op
->kp
))
120 if (kprobe_running()) {
121 kprobes_inc_nmissed_count(&op
->kp
);
123 __this_cpu_write(current_kprobe
, &op
->kp
);
124 regs
->nip
= (unsigned long)op
->kp
.addr
;
125 get_kprobe_ctlblk()->kprobe_status
= KPROBE_HIT_ACTIVE
;
126 opt_pre_handler(&op
->kp
, regs
);
127 __this_cpu_write(current_kprobe
, NULL
);
130 preempt_enable_no_resched();
132 NOKPROBE_SYMBOL(optimized_callback
);
134 void arch_remove_optimized_kprobe(struct optimized_kprobe
*op
)
136 if (op
->optinsn
.insn
) {
137 free_ppc_optinsn_slot(op
->optinsn
.insn
, 1);
138 op
->optinsn
.insn
= NULL
;
143 * emulate_step() requires insn to be emulated as
144 * second parameter. Load register 'r4' with the
147 void patch_imm32_load_insns(unsigned int val
, kprobe_opcode_t
*addr
)
149 /* addis r4,0,(insn)@h */
150 patch_instruction(addr
, PPC_INST_ADDIS
| ___PPC_RT(4) |
151 ((val
>> 16) & 0xffff));
154 /* ori r4,r4,(insn)@l */
155 patch_instruction(addr
, PPC_INST_ORI
| ___PPC_RA(4) |
156 ___PPC_RS(4) | (val
& 0xffff));
160 * Generate instructions to load provided immediate 64-bit value
161 * to register 'r3' and patch these instructions at 'addr'.
163 void patch_imm64_load_insns(unsigned long val
, kprobe_opcode_t
*addr
)
165 /* lis r3,(op)@highest */
166 patch_instruction(addr
, PPC_INST_ADDIS
| ___PPC_RT(3) |
167 ((val
>> 48) & 0xffff));
170 /* ori r3,r3,(op)@higher */
171 patch_instruction(addr
, PPC_INST_ORI
| ___PPC_RA(3) |
172 ___PPC_RS(3) | ((val
>> 32) & 0xffff));
175 /* rldicr r3,r3,32,31 */
176 patch_instruction(addr
, PPC_INST_RLDICR
| ___PPC_RA(3) |
177 ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
180 /* oris r3,r3,(op)@h */
181 patch_instruction(addr
, PPC_INST_ORIS
| ___PPC_RA(3) |
182 ___PPC_RS(3) | ((val
>> 16) & 0xffff));
185 /* ori r3,r3,(op)@l */
186 patch_instruction(addr
, PPC_INST_ORI
| ___PPC_RA(3) |
187 ___PPC_RS(3) | (val
& 0xffff));
190 int arch_prepare_optimized_kprobe(struct optimized_kprobe
*op
, struct kprobe
*p
)
192 kprobe_opcode_t
*buff
, branch_op_callback
, branch_emulate_step
;
193 kprobe_opcode_t
*op_callback_addr
, *emulate_step_addr
;
195 unsigned long nip
, size
;
198 kprobe_ppc_optinsn_slots
.insn_size
= MAX_OPTINSN_SIZE
;
200 nip
= can_optimize(p
);
204 /* Allocate instruction slot for detour buffer */
205 buff
= get_ppc_optinsn_slot();
210 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
212 * The target address has to be relatively nearby, to permit use
213 * of branch instruction in powerpc, because the address is specified
214 * in an immediate field in the instruction opcode itself, ie 24 bits
215 * in the opcode specify the address. Therefore the address should
216 * be within 32MB on either side of the current instruction.
218 b_offset
= (unsigned long)buff
- (unsigned long)p
->addr
;
219 if (!is_offset_in_branch_range(b_offset
))
222 /* Check if the return address is also within 32MB range */
223 b_offset
= (unsigned long)(buff
+ TMPL_RET_IDX
) -
225 if (!is_offset_in_branch_range(b_offset
))
229 /* We can optimize this via patch_instruction_window later */
230 size
= (TMPL_END_IDX
* sizeof(kprobe_opcode_t
)) / sizeof(int);
231 pr_devel("Copying template to %p, size %lu\n", buff
, size
);
232 for (i
= 0; i
< size
; i
++) {
233 rc
= patch_instruction(buff
+ i
, *(optprobe_template_entry
+ i
));
239 * Fixup the template with instructions to:
240 * 1. load the address of the actual probepoint
242 patch_imm64_load_insns((unsigned long)op
, buff
+ TMPL_OP_IDX
);
245 * 2. branch to optimized_callback() and emulate_step()
247 op_callback_addr
= (kprobe_opcode_t
*)ppc_kallsyms_lookup_name("optimized_callback");
248 emulate_step_addr
= (kprobe_opcode_t
*)ppc_kallsyms_lookup_name("emulate_step");
249 if (!op_callback_addr
|| !emulate_step_addr
) {
250 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
254 branch_op_callback
= create_branch((unsigned int *)buff
+ TMPL_CALL_HDLR_IDX
,
255 (unsigned long)op_callback_addr
,
258 branch_emulate_step
= create_branch((unsigned int *)buff
+ TMPL_EMULATE_IDX
,
259 (unsigned long)emulate_step_addr
,
262 if (!branch_op_callback
|| !branch_emulate_step
)
265 patch_instruction(buff
+ TMPL_CALL_HDLR_IDX
, branch_op_callback
);
266 patch_instruction(buff
+ TMPL_EMULATE_IDX
, branch_emulate_step
);
269 * 3. load instruction to be emulated into relevant register, and
271 patch_imm32_load_insns(*p
->ainsn
.insn
, buff
+ TMPL_INSN_IDX
);
274 * 4. branch back from trampoline
276 patch_branch(buff
+ TMPL_RET_IDX
, (unsigned long)nip
, 0);
278 flush_icache_range((unsigned long)buff
,
279 (unsigned long)(&buff
[TMPL_END_IDX
]));
281 op
->optinsn
.insn
= buff
;
286 free_ppc_optinsn_slot(buff
, 0);
291 int arch_prepared_optinsn(struct arch_optimized_insn
*optinsn
)
293 return optinsn
->insn
!= NULL
;
297 * On powerpc, Optprobes always replaces one instruction (4 bytes
298 * aligned and 4 bytes long). It is impossible to encounter another
299 * kprobe in this address range. So always return 0.
301 int arch_check_optimized_kprobe(struct optimized_kprobe
*op
)
306 void arch_optimize_kprobes(struct list_head
*oplist
)
308 struct optimized_kprobe
*op
;
309 struct optimized_kprobe
*tmp
;
311 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
313 * Backup instructions which will be replaced
316 memcpy(op
->optinsn
.copied_insn
, op
->kp
.addr
,
318 patch_instruction(op
->kp
.addr
,
319 create_branch((unsigned int *)op
->kp
.addr
,
320 (unsigned long)op
->optinsn
.insn
, 0));
321 list_del_init(&op
->list
);
325 void arch_unoptimize_kprobe(struct optimized_kprobe
*op
)
327 arch_arm_kprobe(&op
->kp
);
330 void arch_unoptimize_kprobes(struct list_head
*oplist
,
331 struct list_head
*done_list
)
333 struct optimized_kprobe
*op
;
334 struct optimized_kprobe
*tmp
;
336 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
337 arch_unoptimize_kprobe(op
);
338 list_move(&op
->list
, done_list
);
342 int arch_within_optimized_kprobe(struct optimized_kprobe
*op
,
345 return ((unsigned long)op
->kp
.addr
<= addr
&&
346 (unsigned long)op
->kp
.addr
+ RELATIVEJUMP_SIZE
> addr
);