2 * Kernel Probes Jump Optimization (Optprobes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
19 * Copyright (C) Hitachi Ltd., 2012
21 #include <linux/kprobes.h>
22 #include <linux/ptrace.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
25 #include <linux/hardirq.h>
26 #include <linux/preempt.h>
27 #include <linux/module.h>
28 #include <linux/kdebug.h>
29 #include <linux/kallsyms.h>
30 #include <linux/ftrace.h>
32 #include <asm/cacheflush.h>
34 #include <asm/pgtable.h>
35 #include <asm/uaccess.h>
36 #include <asm/alternative.h>
38 #include <asm/debugreg.h>
42 unsigned long __recover_optprobed_insn(kprobe_opcode_t
*buf
, unsigned long addr
)
44 struct optimized_kprobe
*op
;
49 for (i
= 0; i
< RELATIVEJUMP_SIZE
; i
++) {
50 kp
= get_kprobe((void *)addr
- i
);
51 /* This function only handles jump-optimized kprobe */
52 if (kp
&& kprobe_optimized(kp
)) {
53 op
= container_of(kp
, struct optimized_kprobe
, kp
);
54 /* If op->list is not empty, op is under optimizing */
55 if (list_empty(&op
->list
))
63 * If the kprobe can be optimized, original bytes which can be
64 * overwritten by jump destination address. In this case, original
65 * bytes must be recovered from op->optinsn.copied_insn buffer.
67 memcpy(buf
, (void *)addr
, MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
68 if (addr
== (unsigned long)kp
->addr
) {
70 memcpy(buf
+ 1, op
->optinsn
.copied_insn
, RELATIVE_ADDR_SIZE
);
72 offs
= addr
- (unsigned long)kp
->addr
- 1;
73 memcpy(buf
, op
->optinsn
.copied_insn
+ offs
, RELATIVE_ADDR_SIZE
- offs
);
76 return (unsigned long)buf
;
79 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
80 static void __kprobes
synthesize_set_arg1(kprobe_opcode_t
*addr
, unsigned long val
)
88 *(unsigned long *)addr
= val
;
92 ".global optprobe_template_entry\n"
93 "optprobe_template_entry:\n"
95 /* We don't bother saving the ss register */
100 ".global optprobe_template_val\n"
101 "optprobe_template_val:\n"
104 ".global optprobe_template_call\n"
105 "optprobe_template_call:\n"
107 /* Move flags to rsp */
108 " movq 144(%rsp), %rdx\n"
109 " movq %rdx, 152(%rsp)\n"
111 /* Skip flags entry */
114 #else /* CONFIG_X86_32 */
118 ".global optprobe_template_val\n"
119 "optprobe_template_val:\n"
121 ".global optprobe_template_call\n"
122 "optprobe_template_call:\n"
125 " addl $4, %esp\n" /* skip cs */
128 ".global optprobe_template_end\n"
129 "optprobe_template_end:\n");
131 #define TMPL_MOVE_IDX \
132 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
133 #define TMPL_CALL_IDX \
134 ((long)&optprobe_template_call - (long)&optprobe_template_entry)
135 #define TMPL_END_IDX \
136 ((long)&optprobe_template_end - (long)&optprobe_template_entry)
138 #define INT3_SIZE sizeof(kprobe_opcode_t)
140 /* Optimized kprobe call back function: called from optinsn */
141 static void __kprobes
optimized_callback(struct optimized_kprobe
*op
, struct pt_regs
*regs
)
143 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
146 /* This is possible if op is under delayed unoptimizing */
147 if (kprobe_disabled(&op
->kp
))
150 local_irq_save(flags
);
151 if (kprobe_running()) {
152 kprobes_inc_nmissed_count(&op
->kp
);
154 /* Save skipped registers */
156 regs
->cs
= __KERNEL_CS
;
158 regs
->cs
= __KERNEL_CS
| get_kernel_rpl();
161 regs
->ip
= (unsigned long)op
->kp
.addr
+ INT3_SIZE
;
162 regs
->orig_ax
= ~0UL;
164 __this_cpu_write(current_kprobe
, &op
->kp
);
165 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
166 opt_pre_handler(&op
->kp
, regs
);
167 __this_cpu_write(current_kprobe
, NULL
);
169 local_irq_restore(flags
);
172 static int __kprobes
copy_optimized_instructions(u8
*dest
, u8
*src
)
176 while (len
< RELATIVEJUMP_SIZE
) {
177 ret
= __copy_instruction(dest
+ len
, src
+ len
);
178 if (!ret
|| !can_boost(dest
+ len
))
182 /* Check whether the address range is reserved */
183 if (ftrace_text_reserved(src
, src
+ len
- 1) ||
184 alternatives_text_reserved(src
, src
+ len
- 1) ||
185 jump_label_text_reserved(src
, src
+ len
- 1))
191 /* Check whether insn is indirect jump */
192 static int __kprobes
insn_is_indirect_jump(struct insn
*insn
)
194 return ((insn
->opcode
.bytes
[0] == 0xff &&
195 (X86_MODRM_REG(insn
->modrm
.value
) & 6) == 4) || /* Jump */
196 insn
->opcode
.bytes
[0] == 0xea); /* Segment based jump */
199 /* Check whether insn jumps into specified address range */
200 static int insn_jump_into_range(struct insn
*insn
, unsigned long start
, int len
)
202 unsigned long target
= 0;
204 switch (insn
->opcode
.bytes
[0]) {
205 case 0xe0: /* loopne */
206 case 0xe1: /* loope */
207 case 0xe2: /* loop */
208 case 0xe3: /* jcxz */
209 case 0xe9: /* near relative jump */
210 case 0xeb: /* short relative jump */
213 if ((insn
->opcode
.bytes
[1] & 0xf0) == 0x80) /* jcc near */
217 if ((insn
->opcode
.bytes
[0] & 0xf0) == 0x70) /* jcc short */
221 target
= (unsigned long)insn
->next_byte
+ insn
->immediate
.value
;
223 return (start
<= target
&& target
<= start
+ len
);
226 /* Decode whole function to ensure any instructions don't jump into target */
227 static int __kprobes
can_optimize(unsigned long paddr
)
229 unsigned long addr
, size
= 0, offset
= 0;
231 kprobe_opcode_t buf
[MAX_INSN_SIZE
];
233 /* Lookup symbol including addr */
234 if (!kallsyms_lookup_size_offset(paddr
, &size
, &offset
))
238 * Do not optimize in the entry code due to the unstable
241 if ((paddr
>= (unsigned long)__entry_text_start
) &&
242 (paddr
< (unsigned long)__entry_text_end
))
245 /* Check there is enough space for a relative jump. */
246 if (size
- offset
< RELATIVEJUMP_SIZE
)
249 /* Decode instructions */
250 addr
= paddr
- offset
;
251 while (addr
< paddr
- offset
+ size
) { /* Decode until function end */
252 if (search_exception_tables(addr
))
254 * Since some fixup code will jumps into this function,
255 * we can't optimize kprobe in this function.
258 kernel_insn_init(&insn
, (void *)recover_probed_instruction(buf
, addr
));
259 insn_get_length(&insn
);
260 /* Another subsystem puts a breakpoint */
261 if (insn
.opcode
.bytes
[0] == BREAKPOINT_INSTRUCTION
)
263 /* Recover address */
264 insn
.kaddr
= (void *)addr
;
265 insn
.next_byte
= (void *)(addr
+ insn
.length
);
266 /* Check any instructions don't jump into target */
267 if (insn_is_indirect_jump(&insn
) ||
268 insn_jump_into_range(&insn
, paddr
+ INT3_SIZE
,
277 /* Check optimized_kprobe can actually be optimized. */
278 int __kprobes
arch_check_optimized_kprobe(struct optimized_kprobe
*op
)
283 for (i
= 1; i
< op
->optinsn
.size
; i
++) {
284 p
= get_kprobe(op
->kp
.addr
+ i
);
285 if (p
&& !kprobe_disabled(p
))
292 /* Check the addr is within the optimized instructions. */
294 arch_within_optimized_kprobe(struct optimized_kprobe
*op
, unsigned long addr
)
296 return ((unsigned long)op
->kp
.addr
<= addr
&&
297 (unsigned long)op
->kp
.addr
+ op
->optinsn
.size
> addr
);
300 /* Free optimized instruction slot */
302 void __arch_remove_optimized_kprobe(struct optimized_kprobe
*op
, int dirty
)
304 if (op
->optinsn
.insn
) {
305 free_optinsn_slot(op
->optinsn
.insn
, dirty
);
306 op
->optinsn
.insn
= NULL
;
307 op
->optinsn
.size
= 0;
311 void __kprobes
arch_remove_optimized_kprobe(struct optimized_kprobe
*op
)
313 __arch_remove_optimized_kprobe(op
, 1);
317 * Copy replacing target instructions
318 * Target instructions MUST be relocatable (checked inside)
319 * This is called when new aggr(opt)probe is allocated or reused.
321 int __kprobes
arch_prepare_optimized_kprobe(struct optimized_kprobe
*op
)
327 if (!can_optimize((unsigned long)op
->kp
.addr
))
330 op
->optinsn
.insn
= get_optinsn_slot();
331 if (!op
->optinsn
.insn
)
335 * Verify if the address gap is in 2GB range, because this uses
338 rel
= (long)op
->optinsn
.insn
- (long)op
->kp
.addr
+ RELATIVEJUMP_SIZE
;
339 if (abs(rel
) > 0x7fffffff)
342 buf
= (u8
*)op
->optinsn
.insn
;
344 /* Copy instructions into the out-of-line buffer */
345 ret
= copy_optimized_instructions(buf
+ TMPL_END_IDX
, op
->kp
.addr
);
347 __arch_remove_optimized_kprobe(op
, 0);
350 op
->optinsn
.size
= ret
;
352 /* Copy arch-dep-instance from template */
353 memcpy(buf
, &optprobe_template_entry
, TMPL_END_IDX
);
355 /* Set probe information */
356 synthesize_set_arg1(buf
+ TMPL_MOVE_IDX
, (unsigned long)op
);
358 /* Set probe function call */
359 synthesize_relcall(buf
+ TMPL_CALL_IDX
, optimized_callback
);
361 /* Set returning jmp instruction at the tail of out-of-line buffer */
362 synthesize_reljump(buf
+ TMPL_END_IDX
+ op
->optinsn
.size
,
363 (u8
*)op
->kp
.addr
+ op
->optinsn
.size
);
365 flush_icache_range((unsigned long) buf
,
366 (unsigned long) buf
+ TMPL_END_IDX
+
367 op
->optinsn
.size
+ RELATIVEJUMP_SIZE
);
372 * Replace breakpoints (int3) with relative jumps.
373 * Caller must call with locking kprobe_mutex and text_mutex.
375 void __kprobes
arch_optimize_kprobes(struct list_head
*oplist
)
377 struct optimized_kprobe
*op
, *tmp
;
378 u8 insn_buf
[RELATIVEJUMP_SIZE
];
380 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
381 s32 rel
= (s32
)((long)op
->optinsn
.insn
-
382 ((long)op
->kp
.addr
+ RELATIVEJUMP_SIZE
));
384 WARN_ON(kprobe_disabled(&op
->kp
));
386 /* Backup instructions which will be replaced by jump address */
387 memcpy(op
->optinsn
.copied_insn
, op
->kp
.addr
+ INT3_SIZE
,
390 insn_buf
[0] = RELATIVEJUMP_OPCODE
;
391 *(s32
*)(&insn_buf
[1]) = rel
;
393 text_poke_bp(op
->kp
.addr
, insn_buf
, RELATIVEJUMP_SIZE
,
396 list_del_init(&op
->list
);
400 /* Replace a relative jump with a breakpoint (int3). */
401 void __kprobes
arch_unoptimize_kprobe(struct optimized_kprobe
*op
)
403 u8 insn_buf
[RELATIVEJUMP_SIZE
];
405 /* Set int3 to first byte for kprobes */
406 insn_buf
[0] = BREAKPOINT_INSTRUCTION
;
407 memcpy(insn_buf
+ 1, op
->optinsn
.copied_insn
, RELATIVE_ADDR_SIZE
);
408 text_poke_bp(op
->kp
.addr
, insn_buf
, RELATIVEJUMP_SIZE
,
413 * Recover original instructions and breakpoints from relative jumps.
414 * Caller must call with locking kprobe_mutex.
416 extern void arch_unoptimize_kprobes(struct list_head
*oplist
,
417 struct list_head
*done_list
)
419 struct optimized_kprobe
*op
, *tmp
;
421 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
422 arch_unoptimize_kprobe(op
);
423 list_move(&op
->list
, done_list
);
428 setup_detour_execution(struct kprobe
*p
, struct pt_regs
*regs
, int reenter
)
430 struct optimized_kprobe
*op
;
432 if (p
->flags
& KPROBE_FLAG_OPTIMIZED
) {
433 /* This kprobe is really able to run optimized path. */
434 op
= container_of(p
, struct optimized_kprobe
, kp
);
435 /* Detour through copied instructions */
436 regs
->ip
= (unsigned long)op
->optinsn
.insn
+ TMPL_END_IDX
;
438 reset_current_kprobe();
439 preempt_enable_no_resched();