1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/kprobes.h>
4 #include <linux/extable.h>
5 #include <linux/slab.h>
6 #include <linux/stop_machine.h>
7 #include <asm/ptrace.h>
8 #include <linux/uaccess.h>
9 #include <asm/sections.h>
10 #include <asm/cacheflush.h>
12 #include "decode-insn.h"
14 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
15 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
18 post_kprobe_handler(struct kprobe_ctlblk
*, struct pt_regs
*);
20 struct csky_insn_patch
{
21 kprobe_opcode_t
*addr
;
26 static int __kprobes
patch_text_cb(void *priv
)
28 struct csky_insn_patch
*param
= priv
;
29 unsigned int addr
= (unsigned int)param
->addr
;
31 if (atomic_inc_return(¶m
->cpu_count
) == 1) {
32 *(u16
*) addr
= cpu_to_le16(param
->opcode
);
33 dcache_wb_range(addr
, addr
+ 2);
34 atomic_inc(¶m
->cpu_count
);
36 while (atomic_read(¶m
->cpu_count
) <= num_online_cpus())
40 icache_inv_range(addr
, addr
+ 2);
45 static int __kprobes
patch_text(kprobe_opcode_t
*addr
, u32 opcode
)
47 struct csky_insn_patch param
= { addr
, opcode
, ATOMIC_INIT(0) };
49 return stop_machine_cpuslocked(patch_text_cb
, ¶m
, cpu_online_mask
);
52 static void __kprobes
arch_prepare_ss_slot(struct kprobe
*p
)
54 unsigned long offset
= is_insn32(p
->opcode
) ? 4 : 2;
56 p
->ainsn
.api
.restore
= (unsigned long)p
->addr
+ offset
;
58 patch_text(p
->ainsn
.api
.insn
, p
->opcode
);
61 static void __kprobes
arch_prepare_simulate(struct kprobe
*p
)
63 p
->ainsn
.api
.restore
= 0;
66 static void __kprobes
arch_simulate_insn(struct kprobe
*p
, struct pt_regs
*regs
)
68 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
70 if (p
->ainsn
.api
.handler
)
71 p
->ainsn
.api
.handler((u32
)p
->opcode
, (long)p
->addr
, regs
);
73 post_kprobe_handler(kcb
, regs
);
76 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
78 unsigned long probe_addr
= (unsigned long)p
->addr
;
80 if (probe_addr
& 0x1) {
81 pr_warn("Address not aligned.\n");
85 /* copy instruction */
86 p
->opcode
= le32_to_cpu(*p
->addr
);
88 /* decode instruction */
89 switch (csky_probe_decode_insn(p
->addr
, &p
->ainsn
.api
)) {
90 case INSN_REJECTED
: /* insn not supported */
93 case INSN_GOOD_NO_SLOT
: /* insn need simulation */
94 p
->ainsn
.api
.insn
= NULL
;
97 case INSN_GOOD
: /* instruction uses slot */
98 p
->ainsn
.api
.insn
= get_insn_slot();
99 if (!p
->ainsn
.api
.insn
)
104 /* prepare the instruction */
105 if (p
->ainsn
.api
.insn
)
106 arch_prepare_ss_slot(p
);
108 arch_prepare_simulate(p
);
113 /* install breakpoint in text */
114 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
116 patch_text(p
->addr
, USR_BKPT
);
119 /* remove breakpoint from text */
120 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
122 patch_text(p
->addr
, p
->opcode
);
125 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
129 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
131 kcb
->prev_kprobe
.kp
= kprobe_running();
132 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
135 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
137 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
138 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
141 static void __kprobes
set_current_kprobe(struct kprobe
*p
)
143 __this_cpu_write(current_kprobe
, p
);
147 * Interrupts need to be disabled before single-step mode is set, and not
148 * reenabled until after single-step mode ends.
149 * Without disabling interrupt on local CPU, there is a chance of
150 * interrupt occurrence in the period of exception return and start of
151 * out-of-line single-step, that result in wrongly single stepping
152 * into the interrupt handler.
154 static void __kprobes
kprobes_save_local_irqflag(struct kprobe_ctlblk
*kcb
,
155 struct pt_regs
*regs
)
157 kcb
->saved_sr
= regs
->sr
;
161 static void __kprobes
kprobes_restore_local_irqflag(struct kprobe_ctlblk
*kcb
,
162 struct pt_regs
*regs
)
164 regs
->sr
= kcb
->saved_sr
;
167 static void __kprobes
168 set_ss_context(struct kprobe_ctlblk
*kcb
, unsigned long addr
, struct kprobe
*p
)
170 unsigned long offset
= is_insn32(p
->opcode
) ? 4 : 2;
172 kcb
->ss_ctx
.ss_pending
= true;
173 kcb
->ss_ctx
.match_addr
= addr
+ offset
;
176 static void __kprobes
clear_ss_context(struct kprobe_ctlblk
*kcb
)
178 kcb
->ss_ctx
.ss_pending
= false;
179 kcb
->ss_ctx
.match_addr
= 0;
182 #define TRACE_MODE_SI BIT(14)
183 #define TRACE_MODE_MASK ~(0x3 << 14)
184 #define TRACE_MODE_RUN 0
186 static void __kprobes
setup_singlestep(struct kprobe
*p
,
187 struct pt_regs
*regs
,
188 struct kprobe_ctlblk
*kcb
, int reenter
)
193 save_previous_kprobe(kcb
);
194 set_current_kprobe(p
);
195 kcb
->kprobe_status
= KPROBE_REENTER
;
197 kcb
->kprobe_status
= KPROBE_HIT_SS
;
200 if (p
->ainsn
.api
.insn
) {
201 /* prepare for single stepping */
202 slot
= (unsigned long)p
->ainsn
.api
.insn
;
204 set_ss_context(kcb
, slot
, p
); /* mark pending ss */
206 /* IRQs and single stepping do not mix well. */
207 kprobes_save_local_irqflag(kcb
, regs
);
208 regs
->sr
= (regs
->sr
& TRACE_MODE_MASK
) | TRACE_MODE_SI
;
209 instruction_pointer_set(regs
, slot
);
211 /* insn simulation */
212 arch_simulate_insn(p
, regs
);
216 static int __kprobes
reenter_kprobe(struct kprobe
*p
,
217 struct pt_regs
*regs
,
218 struct kprobe_ctlblk
*kcb
)
220 switch (kcb
->kprobe_status
) {
221 case KPROBE_HIT_SSDONE
:
222 case KPROBE_HIT_ACTIVE
:
223 kprobes_inc_nmissed_count(p
);
224 setup_singlestep(p
, regs
, kcb
, 1);
228 pr_warn("Unrecoverable kprobe detected.\n");
240 static void __kprobes
241 post_kprobe_handler(struct kprobe_ctlblk
*kcb
, struct pt_regs
*regs
)
243 struct kprobe
*cur
= kprobe_running();
248 /* return addr restore if non-branching insn */
249 if (cur
->ainsn
.api
.restore
!= 0)
250 regs
->pc
= cur
->ainsn
.api
.restore
;
252 /* restore back original saved kprobe variables and continue */
253 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
254 restore_previous_kprobe(kcb
);
258 /* call post handler */
259 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
260 if (cur
->post_handler
) {
261 /* post_handler can hit breakpoint and single step
262 * again, so we enable D-flag for recursive exception.
264 cur
->post_handler(cur
, regs
, 0);
267 reset_current_kprobe();
270 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, unsigned int trapnr
)
272 struct kprobe
*cur
= kprobe_running();
273 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
275 switch (kcb
->kprobe_status
) {
279 * We are here because the instruction being single
280 * stepped caused a page fault. We reset the current
281 * kprobe and the ip points back to the probe address
282 * and allow the page fault handler to continue as a
285 regs
->pc
= (unsigned long) cur
->addr
;
286 if (!instruction_pointer(regs
))
289 if (kcb
->kprobe_status
== KPROBE_REENTER
)
290 restore_previous_kprobe(kcb
);
292 reset_current_kprobe();
295 case KPROBE_HIT_ACTIVE
:
296 case KPROBE_HIT_SSDONE
:
298 * We increment the nmissed count for accounting,
299 * we can also use npre/npostfault count for accounting
300 * these specific fault cases.
302 kprobes_inc_nmissed_count(cur
);
305 * We come here because instructions in the pre/post
306 * handler caused the page_fault, this could happen
307 * if handler tries to access user space by
308 * copy_from_user(), get_user() etc. Let the
309 * user-specified handler try to fix it first.
311 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
315 * In case the user-specified fault handler returned
316 * zero, try to fix up.
318 if (fixup_exception(regs
))
325 kprobe_breakpoint_handler(struct pt_regs
*regs
)
327 struct kprobe
*p
, *cur_kprobe
;
328 struct kprobe_ctlblk
*kcb
;
329 unsigned long addr
= instruction_pointer(regs
);
331 kcb
= get_kprobe_ctlblk();
332 cur_kprobe
= kprobe_running();
334 p
= get_kprobe((kprobe_opcode_t
*) addr
);
338 if (reenter_kprobe(p
, regs
, kcb
))
342 set_current_kprobe(p
);
343 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
346 * If we have no pre-handler or it returned 0, we
347 * continue with normal processing. If we have a
348 * pre-handler and it returned non-zero, it will
349 * modify the execution path and no need to single
350 * stepping. Let's just reset current kprobe and exit.
352 * pre_handler can hit a breakpoint and can step thru
355 if (!p
->pre_handler
|| !p
->pre_handler(p
, regs
))
356 setup_singlestep(p
, regs
, kcb
, 0);
358 reset_current_kprobe();
364 * The breakpoint instruction was removed right
365 * after we hit it. Another cpu has removed
366 * either a probepoint or a debugger breakpoint
367 * at this address. In either case, no further
368 * handling of this interrupt is appropriate.
369 * Return back to original instruction, and continue.
375 kprobe_single_step_handler(struct pt_regs
*regs
)
377 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
379 if ((kcb
->ss_ctx
.ss_pending
)
380 && (kcb
->ss_ctx
.match_addr
== instruction_pointer(regs
))) {
381 clear_ss_context(kcb
); /* clear pending ss */
383 kprobes_restore_local_irqflag(kcb
, regs
);
384 regs
->sr
= (regs
->sr
& TRACE_MODE_MASK
) | TRACE_MODE_RUN
;
386 post_kprobe_handler(kcb
, regs
);
393 * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
394 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
396 int __init
arch_populate_kprobe_blacklist(void)
400 ret
= kprobe_add_area_blacklist((unsigned long)__irqentry_text_start
,
401 (unsigned long)__irqentry_text_end
);
405 void __kprobes __used
*trampoline_probe_handler(struct pt_regs
*regs
)
407 return (void *)kretprobe_trampoline_handler(regs
, &kretprobe_trampoline
, NULL
);
410 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
411 struct pt_regs
*regs
)
413 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->lr
;
415 regs
->lr
= (unsigned long) &kretprobe_trampoline
;
418 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
423 int __init
arch_init_kprobes(void)