1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel Probes (KProbes)
4 * arch/mips/kernel/kprobes.c
6 * Copyright 2006 Sony Corp.
7 * Copyright 2010 Cavium Networks
9 * Some portions copied from the powerpc version.
11 * Copyright (C) IBM Corporation, 2002, 2004
14 #include <linux/kprobes.h>
15 #include <linux/preempt.h>
16 #include <linux/uaccess.h>
17 #include <linux/kdebug.h>
18 #include <linux/slab.h>
20 #include <asm/ptrace.h>
21 #include <asm/branch.h>
22 #include <asm/break.h>
24 #include "probes-common.h"
26 static const union mips_instruction breakpoint_insn
= {
29 .code
= BRK_KPROBE_BP
,
34 static const union mips_instruction breakpoint2_insn
= {
37 .code
= BRK_KPROBE_SSTEPBP
,
42 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
);
43 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
45 static int __kprobes
insn_has_delayslot(union mips_instruction insn
)
47 return __insn_has_delay_slot(insn
);
51 * insn_has_ll_or_sc function checks whether instruction is ll or sc
52 * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
53 * so we need to prevent it and refuse kprobes insertion for such
54 * instructions; cannot do much about breakpoint in the middle of
55 * ll/sc pair; it is upto user to avoid those places
57 static int __kprobes
insn_has_ll_or_sc(union mips_instruction insn
)
61 switch (insn
.i_format
.opcode
) {
74 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
76 union mips_instruction insn
;
77 union mips_instruction prev_insn
;
82 if (insn_has_ll_or_sc(insn
)) {
83 pr_notice("Kprobes for ll and sc instructions are not"
89 if ((probe_kernel_read(&prev_insn
, p
->addr
- 1,
90 sizeof(mips_instruction
)) == 0) &&
91 insn_has_delayslot(prev_insn
)) {
92 pr_notice("Kprobes for branch delayslot are not supported\n");
97 if (__insn_is_compact_branch(insn
)) {
98 pr_notice("Kprobes for compact branches are not supported\n");
103 /* insn: must be on special executable page on mips. */
104 p
->ainsn
.insn
= get_insn_slot();
105 if (!p
->ainsn
.insn
) {
111 * In the kprobe->ainsn.insn[] array we store the original
112 * instruction at index zero and a break trap instruction at
115 * On MIPS arch if the instruction at probed address is a
116 * branch instruction, we need to execute the instruction at
117 * Branch Delayslot (BD) at the time of probe hit. As MIPS also
118 * doesn't have single stepping support, the BD instruction can
119 * not be executed in-line and it would be executed on SSOL slot
120 * using a normal breakpoint instruction in the next slot.
121 * So, read the instruction and save it for later execution.
123 if (insn_has_delayslot(insn
))
124 memcpy(&p
->ainsn
.insn
[0], p
->addr
+ 1, sizeof(kprobe_opcode_t
));
126 memcpy(&p
->ainsn
.insn
[0], p
->addr
, sizeof(kprobe_opcode_t
));
128 p
->ainsn
.insn
[1] = breakpoint2_insn
;
129 p
->opcode
= *p
->addr
;
135 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
137 *p
->addr
= breakpoint_insn
;
141 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
143 *p
->addr
= p
->opcode
;
147 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
150 free_insn_slot(p
->ainsn
.insn
, 0);
151 p
->ainsn
.insn
= NULL
;
155 static void save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
157 kcb
->prev_kprobe
.kp
= kprobe_running();
158 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
159 kcb
->prev_kprobe
.old_SR
= kcb
->kprobe_old_SR
;
160 kcb
->prev_kprobe
.saved_SR
= kcb
->kprobe_saved_SR
;
161 kcb
->prev_kprobe
.saved_epc
= kcb
->kprobe_saved_epc
;
164 static void restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
166 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
167 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
168 kcb
->kprobe_old_SR
= kcb
->prev_kprobe
.old_SR
;
169 kcb
->kprobe_saved_SR
= kcb
->prev_kprobe
.saved_SR
;
170 kcb
->kprobe_saved_epc
= kcb
->prev_kprobe
.saved_epc
;
173 static void set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
174 struct kprobe_ctlblk
*kcb
)
176 __this_cpu_write(current_kprobe
, p
);
177 kcb
->kprobe_saved_SR
= kcb
->kprobe_old_SR
= (regs
->cp0_status
& ST0_IE
);
178 kcb
->kprobe_saved_epc
= regs
->cp0_epc
;
182 * evaluate_branch_instrucion -
184 * Evaluate the branch instruction at probed address during probe hit. The
185 * result of evaluation would be the updated epc. The insturction in delayslot
186 * would actually be single stepped using a normal breakpoint) on SSOL slot.
188 * The result is also saved in the kprobe control block for later use,
189 * in case we need to execute the delayslot instruction. The latter will be
190 * false for NOP instruction in dealyslot and the branch-likely instructions
191 * when the branch is taken. And for those cases we set a flag as
192 * SKIP_DELAYSLOT in the kprobe control block
194 static int evaluate_branch_instruction(struct kprobe
*p
, struct pt_regs
*regs
,
195 struct kprobe_ctlblk
*kcb
)
197 union mips_instruction insn
= p
->opcode
;
205 if (p
->ainsn
.insn
->word
== 0)
206 kcb
->flags
|= SKIP_DELAYSLOT
;
208 kcb
->flags
&= ~SKIP_DELAYSLOT
;
210 ret
= __compute_return_epc_for_insn(regs
, insn
);
214 if (ret
== BRANCH_LIKELY_TAKEN
)
215 kcb
->flags
|= SKIP_DELAYSLOT
;
217 kcb
->target_epc
= regs
->cp0_epc
;
222 pr_notice("%s: unaligned epc - sending SIGBUS.\n", current
->comm
);
228 static void prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
,
229 struct kprobe_ctlblk
*kcb
)
233 regs
->cp0_status
&= ~ST0_IE
;
235 /* single step inline if the instruction is a break */
236 if (p
->opcode
.word
== breakpoint_insn
.word
||
237 p
->opcode
.word
== breakpoint2_insn
.word
)
238 regs
->cp0_epc
= (unsigned long)p
->addr
;
239 else if (insn_has_delayslot(p
->opcode
)) {
240 ret
= evaluate_branch_instruction(p
, regs
, kcb
);
242 pr_notice("Kprobes: Error in evaluating branch\n");
246 regs
->cp0_epc
= (unsigned long)&p
->ainsn
.insn
[0];
250 * Called after single-stepping. p->addr is the address of the
251 * instruction whose first byte has been replaced by the "break 0"
252 * instruction. To avoid the SMP problems that can occur when we
253 * temporarily put back the original opcode to single-step, we
254 * single-stepped a copy of the instruction. The address of this
255 * copy is p->ainsn.insn.
257 * This function prepares to return from the post-single-step
258 * breakpoint trap. In case of branch instructions, the target
259 * epc to be restored.
261 static void __kprobes
resume_execution(struct kprobe
*p
,
262 struct pt_regs
*regs
,
263 struct kprobe_ctlblk
*kcb
)
265 if (insn_has_delayslot(p
->opcode
))
266 regs
->cp0_epc
= kcb
->target_epc
;
268 unsigned long orig_epc
= kcb
->kprobe_saved_epc
;
269 regs
->cp0_epc
= orig_epc
+ 4;
273 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
277 kprobe_opcode_t
*addr
;
278 struct kprobe_ctlblk
*kcb
;
280 addr
= (kprobe_opcode_t
*) regs
->cp0_epc
;
283 * We don't want to be preempted for the entire
284 * duration of kprobe processing
287 kcb
= get_kprobe_ctlblk();
289 /* Check we're not actually recursing */
290 if (kprobe_running()) {
291 p
= get_kprobe(addr
);
293 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
294 p
->ainsn
.insn
->word
== breakpoint_insn
.word
) {
295 regs
->cp0_status
&= ~ST0_IE
;
296 regs
->cp0_status
|= kcb
->kprobe_saved_SR
;
300 * We have reentered the kprobe_handler(), since
301 * another probe was hit while within the handler.
302 * We here save the original kprobes variables and
303 * just single step on the instruction of the new probe
304 * without calling any user handlers.
306 save_previous_kprobe(kcb
);
307 set_current_kprobe(p
, regs
, kcb
);
308 kprobes_inc_nmissed_count(p
);
309 prepare_singlestep(p
, regs
, kcb
);
310 kcb
->kprobe_status
= KPROBE_REENTER
;
311 if (kcb
->flags
& SKIP_DELAYSLOT
) {
312 resume_execution(p
, regs
, kcb
);
313 restore_previous_kprobe(kcb
);
314 preempt_enable_no_resched();
317 } else if (addr
->word
!= breakpoint_insn
.word
) {
319 * The breakpoint instruction was removed by
320 * another cpu right after we hit, no further
321 * handling of this interrupt is appropriate
328 p
= get_kprobe(addr
);
330 if (addr
->word
!= breakpoint_insn
.word
) {
332 * The breakpoint instruction was removed right
333 * after we hit it. Another cpu has removed
334 * either a probepoint or a debugger breakpoint
335 * at this address. In either case, no further
336 * handling of this interrupt is appropriate.
340 /* Not one of ours: let kernel handle it */
344 set_current_kprobe(p
, regs
, kcb
);
345 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
347 if (p
->pre_handler
&& p
->pre_handler(p
, regs
)) {
348 /* handler has already set things up, so skip ss setup */
349 reset_current_kprobe();
350 preempt_enable_no_resched();
354 prepare_singlestep(p
, regs
, kcb
);
355 if (kcb
->flags
& SKIP_DELAYSLOT
) {
356 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
358 p
->post_handler(p
, regs
, 0);
359 resume_execution(p
, regs
, kcb
);
360 preempt_enable_no_resched();
362 kcb
->kprobe_status
= KPROBE_HIT_SS
;
367 preempt_enable_no_resched();
372 static inline int post_kprobe_handler(struct pt_regs
*regs
)
374 struct kprobe
*cur
= kprobe_running();
375 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
380 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
381 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
382 cur
->post_handler(cur
, regs
, 0);
385 resume_execution(cur
, regs
, kcb
);
387 regs
->cp0_status
|= kcb
->kprobe_saved_SR
;
389 /* Restore back the original saved kprobes variables and continue. */
390 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
391 restore_previous_kprobe(kcb
);
394 reset_current_kprobe();
396 preempt_enable_no_resched();
401 int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
403 struct kprobe
*cur
= kprobe_running();
404 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
406 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
409 if (kcb
->kprobe_status
& KPROBE_HIT_SS
) {
410 resume_execution(cur
, regs
, kcb
);
411 regs
->cp0_status
|= kcb
->kprobe_old_SR
;
413 reset_current_kprobe();
414 preempt_enable_no_resched();
420 * Wrapper routine for handling exceptions.
422 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
423 unsigned long val
, void *data
)
426 struct die_args
*args
= (struct die_args
*)data
;
427 int ret
= NOTIFY_DONE
;
431 if (kprobe_handler(args
->regs
))
435 if (post_kprobe_handler(args
->regs
))
440 /* kprobe_running() needs smp_processor_id() */
444 && kprobe_fault_handler(args
->regs
, args
->trapnr
))
455 * Function return probe trampoline:
456 * - init_kprobes() establishes a probepoint here
457 * - When the probed function returns, this probe causes the
460 static void __used
kretprobe_trampoline_holder(void)
464 /* Keep the assembler from reordering and placing JR here. */
467 ".global kretprobe_trampoline\n"
468 "kretprobe_trampoline:\n\t"
474 void kretprobe_trampoline(void);
476 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
477 struct pt_regs
*regs
)
479 ri
->ret_addr
= (kprobe_opcode_t
*) regs
->regs
[31];
481 /* Replace the return addr with trampoline addr */
482 regs
->regs
[31] = (unsigned long)kretprobe_trampoline
;
486 * Called when the probe at kretprobe trampoline is hit
488 static int __kprobes
trampoline_probe_handler(struct kprobe
*p
,
489 struct pt_regs
*regs
)
491 struct kretprobe_instance
*ri
= NULL
;
492 struct hlist_head
*head
, empty_rp
;
493 struct hlist_node
*tmp
;
494 unsigned long flags
, orig_ret_address
= 0;
495 unsigned long trampoline_address
= (unsigned long)kretprobe_trampoline
;
497 INIT_HLIST_HEAD(&empty_rp
);
498 kretprobe_hash_lock(current
, &head
, &flags
);
501 * It is possible to have multiple instances associated with a given
502 * task either because an multiple functions in the call path
503 * have a return probe installed on them, and/or more than one return
504 * return probe was registered for a target function.
506 * We can handle this because:
507 * - instances are always inserted at the head of the list
508 * - when multiple return probes are registered for the same
509 * function, the first instance's ret_addr will point to the
510 * real return address, and all the rest will point to
511 * kretprobe_trampoline
513 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
514 if (ri
->task
!= current
)
515 /* another task is sharing our hash bucket */
518 if (ri
->rp
&& ri
->rp
->handler
)
519 ri
->rp
->handler(ri
, regs
);
521 orig_ret_address
= (unsigned long)ri
->ret_addr
;
522 recycle_rp_inst(ri
, &empty_rp
);
524 if (orig_ret_address
!= trampoline_address
)
526 * This is the real return address. Any other
527 * instances associated with this task are for
528 * other calls deeper on the call stack
533 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
534 instruction_pointer(regs
) = orig_ret_address
;
536 kretprobe_hash_unlock(current
, &flags
);
538 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
539 hlist_del(&ri
->hlist
);
543 * By returning a non-zero value, we are telling
544 * kprobe_handler() that we don't want the post_handler
545 * to run (and have re-enabled preemption)
550 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
552 if (p
->addr
== (kprobe_opcode_t
*)kretprobe_trampoline
)
558 static struct kprobe trampoline_p
= {
559 .addr
= (kprobe_opcode_t
*)kretprobe_trampoline
,
560 .pre_handler
= trampoline_probe_handler
563 int __init
arch_init_kprobes(void)
565 return register_kprobe(&trampoline_p
);