2 * Code for Kernel probes Jump optimization.
4 * Copyright 2017, Anju T, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kprobes.h>
13 #include <linux/jump_label.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <asm/kprobes.h>
18 #include <asm/ptrace.h>
19 #include <asm/cacheflush.h>
20 #include <asm/code-patching.h>
21 #include <asm/sstep.h>
22 #include <asm/ppc-opcode.h>
24 #define TMPL_CALL_HDLR_IDX \
25 (optprobe_template_call_handler - optprobe_template_entry)
26 #define TMPL_EMULATE_IDX \
27 (optprobe_template_call_emulate - optprobe_template_entry)
28 #define TMPL_RET_IDX \
29 (optprobe_template_ret - optprobe_template_entry)
31 (optprobe_template_op_address - optprobe_template_entry)
32 #define TMPL_INSN_IDX \
33 (optprobe_template_insn - optprobe_template_entry)
34 #define TMPL_END_IDX \
35 (optprobe_template_end - optprobe_template_entry)
37 DEFINE_INSN_CACHE_OPS(ppc_optinsn
);
39 static bool insn_page_in_use
;
41 static void *__ppc_alloc_insn_page(void)
45 insn_page_in_use
= true;
49 static void __ppc_free_insn_page(void *page __maybe_unused
)
51 insn_page_in_use
= false;
54 struct kprobe_insn_cache kprobe_ppc_optinsn_slots
= {
55 .mutex
= __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots
.mutex
),
56 .pages
= LIST_HEAD_INIT(kprobe_ppc_optinsn_slots
.pages
),
57 /* insn_size initialized later */
58 .alloc
= __ppc_alloc_insn_page
,
59 .free
= __ppc_free_insn_page
,
64 * Check if we can optimize this probe. Returns NIP post-emulation if this can
65 * be optimized and 0 otherwise.
67 static unsigned long can_optimize(struct kprobe
*p
)
70 struct instruction_op op
;
71 unsigned long nip
= 0;
74 * kprobe placed for kretprobe during boot time
75 * has a 'nop' instruction, which can be emulated.
76 * So further checks can be skipped.
78 if (p
->addr
== (kprobe_opcode_t
*)&kretprobe_trampoline
)
79 return (unsigned long)p
->addr
+ sizeof(kprobe_opcode_t
);
82 * We only support optimizing kernel addresses, but not
85 * FIXME: Optimize kprobes placed in module addresses.
87 if (!is_kernel_addr((unsigned long)p
->addr
))
90 memset(®s
, 0, sizeof(struct pt_regs
));
91 regs
.nip
= (unsigned long)p
->addr
;
93 regs
.msr
= MSR_KERNEL
;
96 * Kprobe placed in conditional branch instructions are
97 * not optimized, as we can't predict the nip prior with
98 * dummy pt_regs and can not ensure that the return branch
99 * from detour buffer falls in the range of address (i.e 32MB).
100 * A branch back from trampoline is set up in the detour buffer
101 * to the nip returned by the analyse_instr() here.
103 * Ensure that the instruction is not a conditional branch,
104 * and that can be emulated.
106 if (!is_conditional_branch(*p
->ainsn
.insn
) &&
107 analyse_instr(&op
, ®s
, *p
->ainsn
.insn
) == 1) {
108 emulate_update_regs(®s
, &op
);
115 static void optimized_callback(struct optimized_kprobe
*op
,
116 struct pt_regs
*regs
)
118 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
121 /* This is possible if op is under delayed unoptimizing */
122 if (kprobe_disabled(&op
->kp
))
125 local_irq_save(flags
);
128 if (kprobe_running()) {
129 kprobes_inc_nmissed_count(&op
->kp
);
131 __this_cpu_write(current_kprobe
, &op
->kp
);
132 regs
->nip
= (unsigned long)op
->kp
.addr
;
133 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
134 opt_pre_handler(&op
->kp
, regs
);
135 __this_cpu_write(current_kprobe
, NULL
);
139 * No need for an explicit __hard_irq_enable() here.
140 * local_irq_restore() will re-enable interrupts,
141 * if they were hard disabled.
143 local_irq_restore(flags
);
145 NOKPROBE_SYMBOL(optimized_callback
);
147 void arch_remove_optimized_kprobe(struct optimized_kprobe
*op
)
149 if (op
->optinsn
.insn
) {
150 free_ppc_optinsn_slot(op
->optinsn
.insn
, 1);
151 op
->optinsn
.insn
= NULL
;
156 * emulate_step() requires insn to be emulated as
157 * second parameter. Load register 'r4' with the
160 void patch_imm32_load_insns(unsigned int val
, kprobe_opcode_t
*addr
)
162 /* addis r4,0,(insn)@h */
163 patch_instruction(addr
, PPC_INST_ADDIS
| ___PPC_RT(4) |
164 ((val
>> 16) & 0xffff));
167 /* ori r4,r4,(insn)@l */
168 patch_instruction(addr
, PPC_INST_ORI
| ___PPC_RA(4) |
169 ___PPC_RS(4) | (val
& 0xffff));
173 * Generate instructions to load provided immediate 64-bit value
174 * to register 'r3' and patch these instructions at 'addr'.
176 void patch_imm64_load_insns(unsigned long val
, kprobe_opcode_t
*addr
)
178 /* lis r3,(op)@highest */
179 patch_instruction(addr
, PPC_INST_ADDIS
| ___PPC_RT(3) |
180 ((val
>> 48) & 0xffff));
183 /* ori r3,r3,(op)@higher */
184 patch_instruction(addr
, PPC_INST_ORI
| ___PPC_RA(3) |
185 ___PPC_RS(3) | ((val
>> 32) & 0xffff));
188 /* rldicr r3,r3,32,31 */
189 patch_instruction(addr
, PPC_INST_RLDICR
| ___PPC_RA(3) |
190 ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
193 /* oris r3,r3,(op)@h */
194 patch_instruction(addr
, PPC_INST_ORIS
| ___PPC_RA(3) |
195 ___PPC_RS(3) | ((val
>> 16) & 0xffff));
198 /* ori r3,r3,(op)@l */
199 patch_instruction(addr
, PPC_INST_ORI
| ___PPC_RA(3) |
200 ___PPC_RS(3) | (val
& 0xffff));
203 int arch_prepare_optimized_kprobe(struct optimized_kprobe
*op
, struct kprobe
*p
)
205 kprobe_opcode_t
*buff
, branch_op_callback
, branch_emulate_step
;
206 kprobe_opcode_t
*op_callback_addr
, *emulate_step_addr
;
208 unsigned long nip
, size
;
211 kprobe_ppc_optinsn_slots
.insn_size
= MAX_OPTINSN_SIZE
;
213 nip
= can_optimize(p
);
217 /* Allocate instruction slot for detour buffer */
218 buff
= get_ppc_optinsn_slot();
223 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
225 * The target address has to be relatively nearby, to permit use
226 * of branch instruction in powerpc, because the address is specified
227 * in an immediate field in the instruction opcode itself, ie 24 bits
228 * in the opcode specify the address. Therefore the address should
229 * be within 32MB on either side of the current instruction.
231 b_offset
= (unsigned long)buff
- (unsigned long)p
->addr
;
232 if (!is_offset_in_branch_range(b_offset
))
235 /* Check if the return address is also within 32MB range */
236 b_offset
= (unsigned long)(buff
+ TMPL_RET_IDX
) -
238 if (!is_offset_in_branch_range(b_offset
))
242 /* We can optimize this via patch_instruction_window later */
243 size
= (TMPL_END_IDX
* sizeof(kprobe_opcode_t
)) / sizeof(int);
244 pr_devel("Copying template to %p, size %lu\n", buff
, size
);
245 for (i
= 0; i
< size
; i
++) {
246 rc
= patch_instruction(buff
+ i
, *(optprobe_template_entry
+ i
));
252 * Fixup the template with instructions to:
253 * 1. load the address of the actual probepoint
255 patch_imm64_load_insns((unsigned long)op
, buff
+ TMPL_OP_IDX
);
258 * 2. branch to optimized_callback() and emulate_step()
260 op_callback_addr
= (kprobe_opcode_t
*)ppc_kallsyms_lookup_name("optimized_callback");
261 emulate_step_addr
= (kprobe_opcode_t
*)ppc_kallsyms_lookup_name("emulate_step");
262 if (!op_callback_addr
|| !emulate_step_addr
) {
263 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
267 branch_op_callback
= create_branch((unsigned int *)buff
+ TMPL_CALL_HDLR_IDX
,
268 (unsigned long)op_callback_addr
,
271 branch_emulate_step
= create_branch((unsigned int *)buff
+ TMPL_EMULATE_IDX
,
272 (unsigned long)emulate_step_addr
,
275 if (!branch_op_callback
|| !branch_emulate_step
)
278 patch_instruction(buff
+ TMPL_CALL_HDLR_IDX
, branch_op_callback
);
279 patch_instruction(buff
+ TMPL_EMULATE_IDX
, branch_emulate_step
);
282 * 3. load instruction to be emulated into relevant register, and
284 patch_imm32_load_insns(*p
->ainsn
.insn
, buff
+ TMPL_INSN_IDX
);
287 * 4. branch back from trampoline
289 patch_branch(buff
+ TMPL_RET_IDX
, (unsigned long)nip
, 0);
291 flush_icache_range((unsigned long)buff
,
292 (unsigned long)(&buff
[TMPL_END_IDX
]));
294 op
->optinsn
.insn
= buff
;
299 free_ppc_optinsn_slot(buff
, 0);
304 int arch_prepared_optinsn(struct arch_optimized_insn
*optinsn
)
306 return optinsn
->insn
!= NULL
;
310 * On powerpc, Optprobes always replaces one instruction (4 bytes
311 * aligned and 4 bytes long). It is impossible to encounter another
312 * kprobe in this address range. So always return 0.
314 int arch_check_optimized_kprobe(struct optimized_kprobe
*op
)
319 void arch_optimize_kprobes(struct list_head
*oplist
)
321 struct optimized_kprobe
*op
;
322 struct optimized_kprobe
*tmp
;
324 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
326 * Backup instructions which will be replaced
329 memcpy(op
->optinsn
.copied_insn
, op
->kp
.addr
,
331 patch_instruction(op
->kp
.addr
,
332 create_branch((unsigned int *)op
->kp
.addr
,
333 (unsigned long)op
->optinsn
.insn
, 0));
334 list_del_init(&op
->list
);
338 void arch_unoptimize_kprobe(struct optimized_kprobe
*op
)
340 arch_arm_kprobe(&op
->kp
);
343 void arch_unoptimize_kprobes(struct list_head
*oplist
,
344 struct list_head
*done_list
)
346 struct optimized_kprobe
*op
;
347 struct optimized_kprobe
*tmp
;
349 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
350 arch_unoptimize_kprobe(op
);
351 list_move(&op
->list
, done_list
);
355 int arch_within_optimized_kprobe(struct optimized_kprobe
*op
,
358 return ((unsigned long)op
->kp
.addr
<= addr
&&
359 (unsigned long)op
->kp
.addr
+ RELATIVEJUMP_SIZE
> addr
);