1 // SPDX-License-Identifier: GPL-2.0-only
3 * arch/arm64/kernel/probes/kprobes.c
5 * Kprobes support for ARM64
7 * Copyright (C) 2013 Linaro Limited.
8 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/extable.h>
14 #include <linux/slab.h>
15 #include <linux/stop_machine.h>
16 #include <linux/sched/debug.h>
17 #include <linux/set_memory.h>
18 #include <linux/stringify.h>
19 #include <linux/vmalloc.h>
20 #include <asm/traps.h>
21 #include <asm/ptrace.h>
22 #include <asm/cacheflush.h>
23 #include <asm/debug-monitors.h>
24 #include <asm/system_misc.h>
26 #include <linux/uaccess.h>
28 #include <asm/sections.h>
30 #include "decode-insn.h"
32 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
33 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
36 post_kprobe_handler(struct kprobe_ctlblk
*, struct pt_regs
*);
38 static int __kprobes
patch_text(kprobe_opcode_t
*addr
, u32 opcode
)
46 return aarch64_insn_patch_text(addrs
, insns
, 1);
49 static void __kprobes
arch_prepare_ss_slot(struct kprobe
*p
)
51 /* prepare insn slot */
52 patch_text(p
->ainsn
.api
.insn
, p
->opcode
);
54 flush_icache_range((uintptr_t) (p
->ainsn
.api
.insn
),
55 (uintptr_t) (p
->ainsn
.api
.insn
) +
56 MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
59 * Needs restoring of return address after stepping xol.
61 p
->ainsn
.api
.restore
= (unsigned long) p
->addr
+
62 sizeof(kprobe_opcode_t
);
65 static void __kprobes
arch_prepare_simulate(struct kprobe
*p
)
67 /* This instructions is not executed xol. No need to adjust the PC */
68 p
->ainsn
.api
.restore
= 0;
71 static void __kprobes
arch_simulate_insn(struct kprobe
*p
, struct pt_regs
*regs
)
73 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
75 if (p
->ainsn
.api
.handler
)
76 p
->ainsn
.api
.handler((u32
)p
->opcode
, (long)p
->addr
, regs
);
78 /* single step simulated, now go for post processing */
79 post_kprobe_handler(kcb
, regs
);
82 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
84 unsigned long probe_addr
= (unsigned long)p
->addr
;
89 /* copy instruction */
90 p
->opcode
= le32_to_cpu(*p
->addr
);
92 if (search_exception_tables(probe_addr
))
95 /* decode instruction */
96 switch (arm_kprobe_decode_insn(p
->addr
, &p
->ainsn
)) {
97 case INSN_REJECTED
: /* insn not supported */
100 case INSN_GOOD_NO_SLOT
: /* insn need simulation */
101 p
->ainsn
.api
.insn
= NULL
;
104 case INSN_GOOD
: /* instruction uses slot */
105 p
->ainsn
.api
.insn
= get_insn_slot();
106 if (!p
->ainsn
.api
.insn
)
111 /* prepare the instruction */
112 if (p
->ainsn
.api
.insn
)
113 arch_prepare_ss_slot(p
);
115 arch_prepare_simulate(p
);
120 void *alloc_insn_page(void)
124 page
= vmalloc_exec(PAGE_SIZE
);
126 set_memory_ro((unsigned long)page
, 1);
131 /* arm kprobe: install breakpoint in text */
132 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
134 patch_text(p
->addr
, BRK64_OPCODE_KPROBES
);
137 /* disarm kprobe: remove breakpoint from text */
138 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
140 patch_text(p
->addr
, p
->opcode
);
143 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
145 if (p
->ainsn
.api
.insn
) {
146 free_insn_slot(p
->ainsn
.api
.insn
, 0);
147 p
->ainsn
.api
.insn
= NULL
;
151 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
153 kcb
->prev_kprobe
.kp
= kprobe_running();
154 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
157 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
159 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
160 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
163 static void __kprobes
set_current_kprobe(struct kprobe
*p
)
165 __this_cpu_write(current_kprobe
, p
);
169 * When PSTATE.D is set (masked), then software step exceptions can not be
171 * SPSR's D bit shows the value of PSTATE.D immediately before the
172 * exception was taken. PSTATE.D is set while entering into any exception
173 * mode, however software clears it for any normal (none-debug-exception)
174 * mode in the exception entry. Therefore, when we are entering into kprobe
175 * breakpoint handler from any normal mode then SPSR.D bit is already
176 * cleared, however it is set when we are entering from any debug exception
178 * Since we always need to generate single step exception after a kprobe
179 * breakpoint exception therefore we need to clear it unconditionally, when
180 * we become sure that the current breakpoint exception is for kprobe.
182 static void __kprobes
183 spsr_set_debug_flag(struct pt_regs
*regs
, int mask
)
185 unsigned long spsr
= regs
->pstate
;
196 * Interrupts need to be disabled before single-step mode is set, and not
197 * reenabled until after single-step mode ends.
198 * Without disabling interrupt on local CPU, there is a chance of
199 * interrupt occurrence in the period of exception return and start of
200 * out-of-line single-step, that result in wrongly single stepping
201 * into the interrupt handler.
203 static void __kprobes
kprobes_save_local_irqflag(struct kprobe_ctlblk
*kcb
,
204 struct pt_regs
*regs
)
206 kcb
->saved_irqflag
= regs
->pstate
;
207 regs
->pstate
|= PSR_I_BIT
;
210 static void __kprobes
kprobes_restore_local_irqflag(struct kprobe_ctlblk
*kcb
,
211 struct pt_regs
*regs
)
213 if (kcb
->saved_irqflag
& PSR_I_BIT
)
214 regs
->pstate
|= PSR_I_BIT
;
216 regs
->pstate
&= ~PSR_I_BIT
;
219 static void __kprobes
220 set_ss_context(struct kprobe_ctlblk
*kcb
, unsigned long addr
)
222 kcb
->ss_ctx
.ss_pending
= true;
223 kcb
->ss_ctx
.match_addr
= addr
+ sizeof(kprobe_opcode_t
);
226 static void __kprobes
clear_ss_context(struct kprobe_ctlblk
*kcb
)
228 kcb
->ss_ctx
.ss_pending
= false;
229 kcb
->ss_ctx
.match_addr
= 0;
232 static void __kprobes
setup_singlestep(struct kprobe
*p
,
233 struct pt_regs
*regs
,
234 struct kprobe_ctlblk
*kcb
, int reenter
)
239 save_previous_kprobe(kcb
);
240 set_current_kprobe(p
);
241 kcb
->kprobe_status
= KPROBE_REENTER
;
243 kcb
->kprobe_status
= KPROBE_HIT_SS
;
247 if (p
->ainsn
.api
.insn
) {
248 /* prepare for single stepping */
249 slot
= (unsigned long)p
->ainsn
.api
.insn
;
251 set_ss_context(kcb
, slot
); /* mark pending ss */
253 spsr_set_debug_flag(regs
, 0);
255 /* IRQs and single stepping do not mix well. */
256 kprobes_save_local_irqflag(kcb
, regs
);
257 kernel_enable_single_step(regs
);
258 instruction_pointer_set(regs
, slot
);
260 /* insn simulation */
261 arch_simulate_insn(p
, regs
);
265 static int __kprobes
reenter_kprobe(struct kprobe
*p
,
266 struct pt_regs
*regs
,
267 struct kprobe_ctlblk
*kcb
)
269 switch (kcb
->kprobe_status
) {
270 case KPROBE_HIT_SSDONE
:
271 case KPROBE_HIT_ACTIVE
:
272 kprobes_inc_nmissed_count(p
);
273 setup_singlestep(p
, regs
, kcb
, 1);
277 pr_warn("Unrecoverable kprobe detected.\n");
289 static void __kprobes
290 post_kprobe_handler(struct kprobe_ctlblk
*kcb
, struct pt_regs
*regs
)
292 struct kprobe
*cur
= kprobe_running();
297 /* return addr restore if non-branching insn */
298 if (cur
->ainsn
.api
.restore
!= 0)
299 instruction_pointer_set(regs
, cur
->ainsn
.api
.restore
);
301 /* restore back original saved kprobe variables and continue */
302 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
303 restore_previous_kprobe(kcb
);
306 /* call post handler */
307 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
308 if (cur
->post_handler
) {
309 /* post_handler can hit breakpoint and single step
310 * again, so we enable D-flag for recursive exception.
312 cur
->post_handler(cur
, regs
, 0);
315 reset_current_kprobe();
318 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, unsigned int fsr
)
320 struct kprobe
*cur
= kprobe_running();
321 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
323 switch (kcb
->kprobe_status
) {
327 * We are here because the instruction being single
328 * stepped caused a page fault. We reset the current
329 * kprobe and the ip points back to the probe address
330 * and allow the page fault handler to continue as a
333 instruction_pointer_set(regs
, (unsigned long) cur
->addr
);
334 if (!instruction_pointer(regs
))
337 kernel_disable_single_step();
339 if (kcb
->kprobe_status
== KPROBE_REENTER
)
340 restore_previous_kprobe(kcb
);
342 reset_current_kprobe();
345 case KPROBE_HIT_ACTIVE
:
346 case KPROBE_HIT_SSDONE
:
348 * We increment the nmissed count for accounting,
349 * we can also use npre/npostfault count for accounting
350 * these specific fault cases.
352 kprobes_inc_nmissed_count(cur
);
355 * We come here because instructions in the pre/post
356 * handler caused the page_fault, this could happen
357 * if handler tries to access user space by
358 * copy_from_user(), get_user() etc. Let the
359 * user-specified handler try to fix it first.
361 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, fsr
))
365 * In case the user-specified fault handler returned
366 * zero, try to fix up.
368 if (fixup_exception(regs
))
374 static void __kprobes
kprobe_handler(struct pt_regs
*regs
)
376 struct kprobe
*p
, *cur_kprobe
;
377 struct kprobe_ctlblk
*kcb
;
378 unsigned long addr
= instruction_pointer(regs
);
380 kcb
= get_kprobe_ctlblk();
381 cur_kprobe
= kprobe_running();
383 p
= get_kprobe((kprobe_opcode_t
*) addr
);
387 if (reenter_kprobe(p
, regs
, kcb
))
391 set_current_kprobe(p
);
392 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
395 * If we have no pre-handler or it returned 0, we
396 * continue with normal processing. If we have a
397 * pre-handler and it returned non-zero, it will
398 * modify the execution path and no need to single
399 * stepping. Let's just reset current kprobe and exit.
401 * pre_handler can hit a breakpoint and can step thru
402 * before return, keep PSTATE D-flag enabled until
403 * pre_handler return back.
405 if (!p
->pre_handler
|| !p
->pre_handler(p
, regs
)) {
406 setup_singlestep(p
, regs
, kcb
, 0);
408 reset_current_kprobe();
412 * The breakpoint instruction was removed right
413 * after we hit it. Another cpu has removed
414 * either a probepoint or a debugger breakpoint
415 * at this address. In either case, no further
416 * handling of this interrupt is appropriate.
417 * Return back to original instruction, and continue.
422 kprobe_ss_hit(struct kprobe_ctlblk
*kcb
, unsigned long addr
)
424 if ((kcb
->ss_ctx
.ss_pending
)
425 && (kcb
->ss_ctx
.match_addr
== addr
)) {
426 clear_ss_context(kcb
); /* clear pending ss */
427 return DBG_HOOK_HANDLED
;
429 /* not ours, kprobes should ignore it */
430 return DBG_HOOK_ERROR
;
434 kprobe_single_step_handler(struct pt_regs
*regs
, unsigned int esr
)
436 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
439 /* return error if this is not our step */
440 retval
= kprobe_ss_hit(kcb
, instruction_pointer(regs
));
442 if (retval
== DBG_HOOK_HANDLED
) {
443 kprobes_restore_local_irqflag(kcb
, regs
);
444 kernel_disable_single_step();
446 post_kprobe_handler(kcb
, regs
);
452 static struct step_hook kprobes_step_hook
= {
453 .fn
= kprobe_single_step_handler
,
457 kprobe_breakpoint_handler(struct pt_regs
*regs
, unsigned int esr
)
459 kprobe_handler(regs
);
460 return DBG_HOOK_HANDLED
;
463 static struct break_hook kprobes_break_hook
= {
464 .imm
= KPROBES_BRK_IMM
,
465 .fn
= kprobe_breakpoint_handler
,
469 * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
470 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
472 int __init
arch_populate_kprobe_blacklist(void)
476 ret
= kprobe_add_area_blacklist((unsigned long)__entry_text_start
,
477 (unsigned long)__entry_text_end
);
480 ret
= kprobe_add_area_blacklist((unsigned long)__irqentry_text_start
,
481 (unsigned long)__irqentry_text_end
);
484 ret
= kprobe_add_area_blacklist((unsigned long)__exception_text_start
,
485 (unsigned long)__exception_text_end
);
488 ret
= kprobe_add_area_blacklist((unsigned long)__idmap_text_start
,
489 (unsigned long)__idmap_text_end
);
492 ret
= kprobe_add_area_blacklist((unsigned long)__hyp_text_start
,
493 (unsigned long)__hyp_text_end
);
494 if (ret
|| is_kernel_in_hyp_mode())
496 ret
= kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start
,
497 (unsigned long)__hyp_idmap_text_end
);
501 void __kprobes __used
*trampoline_probe_handler(struct pt_regs
*regs
)
503 struct kretprobe_instance
*ri
= NULL
;
504 struct hlist_head
*head
, empty_rp
;
505 struct hlist_node
*tmp
;
506 unsigned long flags
, orig_ret_address
= 0;
507 unsigned long trampoline_address
=
508 (unsigned long)&kretprobe_trampoline
;
509 kprobe_opcode_t
*correct_ret_addr
= NULL
;
511 INIT_HLIST_HEAD(&empty_rp
);
512 kretprobe_hash_lock(current
, &head
, &flags
);
515 * It is possible to have multiple instances associated with a given
516 * task either because multiple functions in the call path have
517 * return probes installed on them, and/or more than one
518 * return probe was registered for a target function.
520 * We can handle this because:
521 * - instances are always pushed into the head of the list
522 * - when multiple return probes are registered for the same
523 * function, the (chronologically) first instance's ret_addr
524 * will be the real return address, and all the rest will
525 * point to kretprobe_trampoline.
527 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
528 if (ri
->task
!= current
)
529 /* another task is sharing our hash bucket */
532 orig_ret_address
= (unsigned long)ri
->ret_addr
;
534 if (orig_ret_address
!= trampoline_address
)
536 * This is the real return address. Any other
537 * instances associated with this task are for
538 * other calls deeper on the call stack
543 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
545 correct_ret_addr
= ri
->ret_addr
;
546 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
547 if (ri
->task
!= current
)
548 /* another task is sharing our hash bucket */
551 orig_ret_address
= (unsigned long)ri
->ret_addr
;
552 if (ri
->rp
&& ri
->rp
->handler
) {
553 __this_cpu_write(current_kprobe
, &ri
->rp
->kp
);
554 get_kprobe_ctlblk()->kprobe_status
= KPROBE_HIT_ACTIVE
;
555 ri
->ret_addr
= correct_ret_addr
;
556 ri
->rp
->handler(ri
, regs
);
557 __this_cpu_write(current_kprobe
, NULL
);
560 recycle_rp_inst(ri
, &empty_rp
);
562 if (orig_ret_address
!= trampoline_address
)
564 * This is the real return address. Any other
565 * instances associated with this task are for
566 * other calls deeper on the call stack
571 kretprobe_hash_unlock(current
, &flags
);
573 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
574 hlist_del(&ri
->hlist
);
577 return (void *)orig_ret_address
;
580 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
581 struct pt_regs
*regs
)
583 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->regs
[30];
585 /* replace return addr (x30) with trampoline */
586 regs
->regs
[30] = (long)&kretprobe_trampoline
;
589 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
594 int __init
arch_init_kprobes(void)
596 register_kernel_break_hook(&kprobes_break_hook
);
597 register_kernel_step_hook(&kprobes_step_hook
);