1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/uprobes.h>
8 #include <asm/cacheflush.h>
10 #include "decode-insn.h"
12 #define UPROBE_INV_FAULT_CODE UINT_MAX
14 void arch_uprobe_copy_ixol(struct page
*page
, unsigned long vaddr
,
15 void *src
, unsigned long len
)
17 void *xol_page_kaddr
= kmap_atomic(page
);
18 void *dst
= xol_page_kaddr
+ (vaddr
& ~PAGE_MASK
);
21 * Initial cache maintenance of the xol page done via set_pte_at().
22 * Subsequent CMOs only needed if the xol slot changes.
24 if (!memcmp(dst
, src
, len
))
27 /* Initialize the slot */
28 memcpy(dst
, src
, len
);
30 /* flush caches (dcache/icache) */
31 sync_icache_aliases((unsigned long)dst
, (unsigned long)dst
+ len
);
34 kunmap_atomic(xol_page_kaddr
);
37 unsigned long uprobe_get_swbp_addr(struct pt_regs
*regs
)
39 return instruction_pointer(regs
);
42 int arch_uprobe_analyze_insn(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
,
47 /* TODO: Currently we do not support AARCH32 instruction probing */
48 if (mm
->context
.flags
& MMCF_AARCH32
)
50 else if (!IS_ALIGNED(addr
, AARCH64_INSN_SIZE
))
53 insn
= le32_to_cpu(auprobe
->insn
);
55 switch (arm_probe_decode_insn(insn
, &auprobe
->api
)) {
59 case INSN_GOOD_NO_SLOT
:
60 auprobe
->simulate
= true;
70 int arch_uprobe_pre_xol(struct arch_uprobe
*auprobe
, struct pt_regs
*regs
)
72 struct uprobe_task
*utask
= current
->utask
;
74 /* Initialize with an invalid fault code to detect if ol insn trapped */
75 current
->thread
.fault_code
= UPROBE_INV_FAULT_CODE
;
77 /* Instruction points to execute ol */
78 instruction_pointer_set(regs
, utask
->xol_vaddr
);
80 user_enable_single_step(current
);
85 int arch_uprobe_post_xol(struct arch_uprobe
*auprobe
, struct pt_regs
*regs
)
87 struct uprobe_task
*utask
= current
->utask
;
89 WARN_ON_ONCE(current
->thread
.fault_code
!= UPROBE_INV_FAULT_CODE
);
91 /* Instruction points to execute next to breakpoint address */
92 instruction_pointer_set(regs
, utask
->vaddr
+ 4);
94 user_disable_single_step(current
);
98 bool arch_uprobe_xol_was_trapped(struct task_struct
*t
)
101 * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
102 * insn itself is trapped, then detect the case with the help of
103 * invalid fault code which is being set in arch_uprobe_pre_xol
105 if (t
->thread
.fault_code
!= UPROBE_INV_FAULT_CODE
)
111 bool arch_uprobe_skip_sstep(struct arch_uprobe
*auprobe
, struct pt_regs
*regs
)
116 if (!auprobe
->simulate
)
119 insn
= le32_to_cpu(auprobe
->insn
);
120 addr
= instruction_pointer(regs
);
122 if (auprobe
->api
.handler
)
123 auprobe
->api
.handler(insn
, addr
, regs
);
128 void arch_uprobe_abort_xol(struct arch_uprobe
*auprobe
, struct pt_regs
*regs
)
130 struct uprobe_task
*utask
= current
->utask
;
133 * Task has received a fatal signal, so reset back to probbed
136 instruction_pointer_set(regs
, utask
->vaddr
);
138 user_disable_single_step(current
);
141 bool arch_uretprobe_is_alive(struct return_instance
*ret
, enum rp_check ctx
,
142 struct pt_regs
*regs
)
145 * If a simple branch instruction (B) was called for retprobed
146 * assembly label then return true even when regs->sp and ret->stack
147 * are same. It will ensure that cleanup and reporting of return
148 * instances corresponding to callee label is done when
149 * handle_trampoline for called function is executed.
151 if (ctx
== RP_CHECK_CHAIN_CALL
)
152 return regs
->sp
<= ret
->stack
;
154 return regs
->sp
< ret
->stack
;
158 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr
,
159 struct pt_regs
*regs
)
161 unsigned long orig_ret_vaddr
;
163 orig_ret_vaddr
= procedure_link_pointer(regs
);
164 /* Replace the return addr with trampoline addr */
165 procedure_link_pointer_set(regs
, trampoline_vaddr
);
167 return orig_ret_vaddr
;
170 int arch_uprobe_exception_notify(struct notifier_block
*self
,
171 unsigned long val
, void *data
)
176 static int uprobe_breakpoint_handler(struct pt_regs
*regs
,
179 if (uprobe_pre_sstep_notifier(regs
))
180 return DBG_HOOK_HANDLED
;
182 return DBG_HOOK_ERROR
;
185 static int uprobe_single_step_handler(struct pt_regs
*regs
,
188 struct uprobe_task
*utask
= current
->utask
;
190 WARN_ON(utask
&& (instruction_pointer(regs
) != utask
->xol_vaddr
+ 4));
191 if (uprobe_post_sstep_notifier(regs
))
192 return DBG_HOOK_HANDLED
;
194 return DBG_HOOK_ERROR
;
197 /* uprobe breakpoint handler hook */
198 static struct break_hook uprobes_break_hook
= {
199 .imm
= UPROBES_BRK_IMM
,
200 .fn
= uprobe_breakpoint_handler
,
203 /* uprobe single step handler hook */
204 static struct step_hook uprobes_step_hook
= {
205 .fn
= uprobe_single_step_handler
,
208 static int __init
arch_init_uprobes(void)
210 register_user_break_hook(&uprobes_break_hook
);
211 register_user_step_hook(&uprobes_step_hook
);
216 device_initcall(arch_init_uprobes
);