2 * Code for Kernel probes Jump optimization.
4 * Copyright 2017, Anju T, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kprobes.h>
13 #include <linux/jump_label.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <asm/kprobes.h>
18 #include <asm/ptrace.h>
19 #include <asm/cacheflush.h>
20 #include <asm/code-patching.h>
21 #include <asm/sstep.h>
22 #include <asm/ppc-opcode.h>
24 #define TMPL_CALL_HDLR_IDX \
25 (optprobe_template_call_handler - optprobe_template_entry)
26 #define TMPL_EMULATE_IDX \
27 (optprobe_template_call_emulate - optprobe_template_entry)
28 #define TMPL_RET_IDX \
29 (optprobe_template_ret - optprobe_template_entry)
31 (optprobe_template_op_address - optprobe_template_entry)
32 #define TMPL_INSN_IDX \
33 (optprobe_template_insn - optprobe_template_entry)
34 #define TMPL_END_IDX \
35 (optprobe_template_end - optprobe_template_entry)
37 DEFINE_INSN_CACHE_OPS(ppc_optinsn
);
39 static bool insn_page_in_use
;
41 static void *__ppc_alloc_insn_page(void)
45 insn_page_in_use
= true;
49 static void __ppc_free_insn_page(void *page __maybe_unused
)
51 insn_page_in_use
= false;
54 struct kprobe_insn_cache kprobe_ppc_optinsn_slots
= {
55 .mutex
= __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots
.mutex
),
56 .pages
= LIST_HEAD_INIT(kprobe_ppc_optinsn_slots
.pages
),
57 /* insn_size initialized later */
58 .alloc
= __ppc_alloc_insn_page
,
59 .free
= __ppc_free_insn_page
,
64 * Check if we can optimize this probe. Returns NIP post-emulation if this can
65 * be optimized and 0 otherwise.
67 static unsigned long can_optimize(struct kprobe
*p
)
70 struct instruction_op op
;
71 unsigned long nip
= 0;
74 * kprobe placed for kretprobe during boot time
75 * has a 'nop' instruction, which can be emulated.
76 * So further checks can be skipped.
78 if (p
->addr
== (kprobe_opcode_t
*)&kretprobe_trampoline
)
79 return (unsigned long)p
->addr
+ sizeof(kprobe_opcode_t
);
82 * We only support optimizing kernel addresses, but not
85 * FIXME: Optimize kprobes placed in module addresses.
87 if (!is_kernel_addr((unsigned long)p
->addr
))
90 memset(®s
, 0, sizeof(struct pt_regs
));
91 regs
.nip
= (unsigned long)p
->addr
;
93 regs
.msr
= MSR_KERNEL
;
96 * Kprobe placed in conditional branch instructions are
97 * not optimized, as we can't predict the nip prior with
98 * dummy pt_regs and can not ensure that the return branch
99 * from detour buffer falls in the range of address (i.e 32MB).
100 * A branch back from trampoline is set up in the detour buffer
101 * to the nip returned by the analyse_instr() here.
103 * Ensure that the instruction is not a conditional branch,
104 * and that can be emulated.
106 if (!is_conditional_branch(*p
->ainsn
.insn
) &&
107 analyse_instr(&op
, ®s
, *p
->ainsn
.insn
) == 1) {
108 emulate_update_regs(®s
, &op
);
115 static void optimized_callback(struct optimized_kprobe
*op
,
116 struct pt_regs
*regs
)
118 /* This is possible if op is under delayed unoptimizing */
119 if (kprobe_disabled(&op
->kp
))
124 if (kprobe_running()) {
125 kprobes_inc_nmissed_count(&op
->kp
);
127 __this_cpu_write(current_kprobe
, &op
->kp
);
128 regs
->nip
= (unsigned long)op
->kp
.addr
;
129 get_kprobe_ctlblk()->kprobe_status
= KPROBE_HIT_ACTIVE
;
130 opt_pre_handler(&op
->kp
, regs
);
131 __this_cpu_write(current_kprobe
, NULL
);
134 preempt_enable_no_resched();
136 NOKPROBE_SYMBOL(optimized_callback
);
138 void arch_remove_optimized_kprobe(struct optimized_kprobe
*op
)
140 if (op
->optinsn
.insn
) {
141 free_ppc_optinsn_slot(op
->optinsn
.insn
, 1);
142 op
->optinsn
.insn
= NULL
;
147 * emulate_step() requires insn to be emulated as
148 * second parameter. Load register 'r4' with the
151 void patch_imm32_load_insns(unsigned int val
, kprobe_opcode_t
*addr
)
153 /* addis r4,0,(insn)@h */
154 patch_instruction(addr
, PPC_INST_ADDIS
| ___PPC_RT(4) |
155 ((val
>> 16) & 0xffff));
158 /* ori r4,r4,(insn)@l */
159 patch_instruction(addr
, PPC_INST_ORI
| ___PPC_RA(4) |
160 ___PPC_RS(4) | (val
& 0xffff));
164 * Generate instructions to load provided immediate 64-bit value
165 * to register 'r3' and patch these instructions at 'addr'.
167 void patch_imm64_load_insns(unsigned long val
, kprobe_opcode_t
*addr
)
169 /* lis r3,(op)@highest */
170 patch_instruction(addr
, PPC_INST_ADDIS
| ___PPC_RT(3) |
171 ((val
>> 48) & 0xffff));
174 /* ori r3,r3,(op)@higher */
175 patch_instruction(addr
, PPC_INST_ORI
| ___PPC_RA(3) |
176 ___PPC_RS(3) | ((val
>> 32) & 0xffff));
179 /* rldicr r3,r3,32,31 */
180 patch_instruction(addr
, PPC_INST_RLDICR
| ___PPC_RA(3) |
181 ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
184 /* oris r3,r3,(op)@h */
185 patch_instruction(addr
, PPC_INST_ORIS
| ___PPC_RA(3) |
186 ___PPC_RS(3) | ((val
>> 16) & 0xffff));
189 /* ori r3,r3,(op)@l */
190 patch_instruction(addr
, PPC_INST_ORI
| ___PPC_RA(3) |
191 ___PPC_RS(3) | (val
& 0xffff));
194 int arch_prepare_optimized_kprobe(struct optimized_kprobe
*op
, struct kprobe
*p
)
196 kprobe_opcode_t
*buff
, branch_op_callback
, branch_emulate_step
;
197 kprobe_opcode_t
*op_callback_addr
, *emulate_step_addr
;
199 unsigned long nip
, size
;
202 kprobe_ppc_optinsn_slots
.insn_size
= MAX_OPTINSN_SIZE
;
204 nip
= can_optimize(p
);
208 /* Allocate instruction slot for detour buffer */
209 buff
= get_ppc_optinsn_slot();
214 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
216 * The target address has to be relatively nearby, to permit use
217 * of branch instruction in powerpc, because the address is specified
218 * in an immediate field in the instruction opcode itself, ie 24 bits
219 * in the opcode specify the address. Therefore the address should
220 * be within 32MB on either side of the current instruction.
222 b_offset
= (unsigned long)buff
- (unsigned long)p
->addr
;
223 if (!is_offset_in_branch_range(b_offset
))
226 /* Check if the return address is also within 32MB range */
227 b_offset
= (unsigned long)(buff
+ TMPL_RET_IDX
) -
229 if (!is_offset_in_branch_range(b_offset
))
233 /* We can optimize this via patch_instruction_window later */
234 size
= (TMPL_END_IDX
* sizeof(kprobe_opcode_t
)) / sizeof(int);
235 pr_devel("Copying template to %p, size %lu\n", buff
, size
);
236 for (i
= 0; i
< size
; i
++) {
237 rc
= patch_instruction(buff
+ i
, *(optprobe_template_entry
+ i
));
243 * Fixup the template with instructions to:
244 * 1. load the address of the actual probepoint
246 patch_imm64_load_insns((unsigned long)op
, buff
+ TMPL_OP_IDX
);
249 * 2. branch to optimized_callback() and emulate_step()
251 op_callback_addr
= (kprobe_opcode_t
*)ppc_kallsyms_lookup_name("optimized_callback");
252 emulate_step_addr
= (kprobe_opcode_t
*)ppc_kallsyms_lookup_name("emulate_step");
253 if (!op_callback_addr
|| !emulate_step_addr
) {
254 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
258 branch_op_callback
= create_branch((unsigned int *)buff
+ TMPL_CALL_HDLR_IDX
,
259 (unsigned long)op_callback_addr
,
262 branch_emulate_step
= create_branch((unsigned int *)buff
+ TMPL_EMULATE_IDX
,
263 (unsigned long)emulate_step_addr
,
266 if (!branch_op_callback
|| !branch_emulate_step
)
269 patch_instruction(buff
+ TMPL_CALL_HDLR_IDX
, branch_op_callback
);
270 patch_instruction(buff
+ TMPL_EMULATE_IDX
, branch_emulate_step
);
273 * 3. load instruction to be emulated into relevant register, and
275 patch_imm32_load_insns(*p
->ainsn
.insn
, buff
+ TMPL_INSN_IDX
);
278 * 4. branch back from trampoline
280 patch_branch(buff
+ TMPL_RET_IDX
, (unsigned long)nip
, 0);
282 flush_icache_range((unsigned long)buff
,
283 (unsigned long)(&buff
[TMPL_END_IDX
]));
285 op
->optinsn
.insn
= buff
;
290 free_ppc_optinsn_slot(buff
, 0);
295 int arch_prepared_optinsn(struct arch_optimized_insn
*optinsn
)
297 return optinsn
->insn
!= NULL
;
301 * On powerpc, Optprobes always replaces one instruction (4 bytes
302 * aligned and 4 bytes long). It is impossible to encounter another
303 * kprobe in this address range. So always return 0.
305 int arch_check_optimized_kprobe(struct optimized_kprobe
*op
)
310 void arch_optimize_kprobes(struct list_head
*oplist
)
312 struct optimized_kprobe
*op
;
313 struct optimized_kprobe
*tmp
;
315 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
317 * Backup instructions which will be replaced
320 memcpy(op
->optinsn
.copied_insn
, op
->kp
.addr
,
322 patch_instruction(op
->kp
.addr
,
323 create_branch((unsigned int *)op
->kp
.addr
,
324 (unsigned long)op
->optinsn
.insn
, 0));
325 list_del_init(&op
->list
);
329 void arch_unoptimize_kprobe(struct optimized_kprobe
*op
)
331 arch_arm_kprobe(&op
->kp
);
334 void arch_unoptimize_kprobes(struct list_head
*oplist
,
335 struct list_head
*done_list
)
337 struct optimized_kprobe
*op
;
338 struct optimized_kprobe
*tmp
;
340 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
341 arch_unoptimize_kprobe(op
);
342 list_move(&op
->list
, done_list
);
346 int arch_within_optimized_kprobe(struct optimized_kprobe
*op
,
349 return ((unsigned long)op
->kp
.addr
<= addr
&&
350 (unsigned long)op
->kp
.addr
+ RELATIVEJUMP_SIZE
> addr
);