1 #include <linux/highmem.h>
2 #include <linux/kdebug.h>
3 #include <linux/types.h>
4 #include <linux/notifier.h>
5 #include <linux/sched.h>
6 #include <linux/uprobes.h>
8 #include <asm/branch.h>
9 #include <asm/cpu-features.h>
10 #include <asm/ptrace.h>
13 static inline int insn_has_delay_slot(const union mips_instruction insn
)
15 switch (insn
.i_format
.opcode
) {
17 * jr and jalr are in r_format format.
20 switch (insn
.r_format
.func
) {
28 * This group contains:
29 * bltz_op, bgez_op, bltzl_op, bgezl_op,
30 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
33 switch (insn
.i_format
.rt
) {
48 * These are unconditional and in j_format.
56 case blez_op
: /* not really i_format */
63 * And now the FPA/cp1 branch instructions.
66 #ifdef CONFIG_CPU_CAVIUM_OCTEON
67 case lwc2_op
: /* This is bbit0 on Octeon */
68 case ldc2_op
: /* This is bbit032 on Octeon */
69 case swc2_op
: /* This is bbit1 on Octeon */
70 case sdc2_op
: /* This is bbit132 on Octeon */
79 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
80 * @mm: the probed address space.
81 * @arch_uprobe: the probepoint information.
82 * @addr: virtual address at which to install the probepoint
83 * Return 0 on success or a -ve number on error.
85 int arch_uprobe_analyze_insn(struct arch_uprobe
*aup
,
86 struct mm_struct
*mm
, unsigned long addr
)
88 union mips_instruction inst
;
91 * For the time being this also blocks attempts to use uprobes with
92 * MIPS16 and microMIPS.
97 inst
.word
= aup
->insn
[0];
98 aup
->ixol
[0] = aup
->insn
[insn_has_delay_slot(inst
)];
99 aup
->ixol
[1] = UPROBE_BRK_UPROBE_XOL
; /* NOP */
105 * is_trap_insn - check if the instruction is a trap variant
106 * @insn: instruction to be checked.
107 * Returns true if @insn is a trap variant.
109 * This definition overrides the weak definition in kernel/events/uprobes.c.
110 * and is needed for the case where an architecture has multiple trap
111 * instructions (like PowerPC or MIPS). We treat BREAK just like the more
112 * modern conditional trap instructions.
114 bool is_trap_insn(uprobe_opcode_t
*insn
)
116 union mips_instruction inst
;
120 switch (inst
.i_format
.opcode
) {
122 switch (inst
.r_format
.func
) {
134 case bcond_op
: /* Yes, really ... */
135 switch (inst
.u_format
.rt
) {
150 #define UPROBE_TRAP_NR ULONG_MAX
153 * arch_uprobe_pre_xol - prepare to execute out of line.
154 * @auprobe: the probepoint information.
155 * @regs: reflects the saved user state of current task.
157 int arch_uprobe_pre_xol(struct arch_uprobe
*aup
, struct pt_regs
*regs
)
159 struct uprobe_task
*utask
= current
->utask
;
160 union mips_instruction insn
;
163 * Now find the EPC where to resume after the breakpoint has been
164 * dealt with. This may require emulation of a branch.
166 aup
->resume_epc
= regs
->cp0_epc
+ 4;
167 if (insn_has_delay_slot((union mips_instruction
) aup
->insn
[0])) {
171 __compute_return_epc_for_insn(regs
, insn
);
172 aup
->resume_epc
= regs
->cp0_epc
;
175 utask
->autask
.saved_trap_nr
= current
->thread
.trap_nr
;
176 current
->thread
.trap_nr
= UPROBE_TRAP_NR
;
177 regs
->cp0_epc
= current
->utask
->xol_vaddr
;
182 int arch_uprobe_post_xol(struct arch_uprobe
*aup
, struct pt_regs
*regs
)
184 struct uprobe_task
*utask
= current
->utask
;
186 current
->thread
.trap_nr
= utask
->autask
.saved_trap_nr
;
187 regs
->cp0_epc
= aup
->resume_epc
;
193 * If xol insn itself traps and generates a signal(Say,
194 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
195 * instruction jumps back to its own address. It is assumed that anything
196 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
198 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
199 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
200 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
202 bool arch_uprobe_xol_was_trapped(struct task_struct
*tsk
)
204 if (tsk
->thread
.trap_nr
!= UPROBE_TRAP_NR
)
210 int arch_uprobe_exception_notify(struct notifier_block
*self
,
211 unsigned long val
, void *data
)
213 struct die_args
*args
= data
;
214 struct pt_regs
*regs
= args
->regs
;
216 /* regs == NULL is a kernel bug */
220 /* We are only interested in userspace traps */
221 if (!user_mode(regs
))
226 if (uprobe_pre_sstep_notifier(regs
))
230 if (uprobe_post_sstep_notifier(regs
))
240 * This function gets called when XOL instruction either gets trapped or
241 * the thread has a fatal signal. Reset the instruction pointer to its
242 * probed address for the potential restart or for post mortem analysis.
244 void arch_uprobe_abort_xol(struct arch_uprobe
*aup
,
245 struct pt_regs
*regs
)
247 struct uprobe_task
*utask
= current
->utask
;
249 instruction_pointer_set(regs
, utask
->vaddr
);
252 unsigned long arch_uretprobe_hijack_return_addr(
253 unsigned long trampoline_vaddr
, struct pt_regs
*regs
)
259 /* Replace the return address with the trampoline address */
266 * set_swbp - store breakpoint at a given address.
267 * @auprobe: arch specific probepoint information.
268 * @mm: the probed process address space.
269 * @vaddr: the virtual address to insert the opcode.
271 * For mm @mm, store the breakpoint instruction at @vaddr.
272 * Return 0 (success) or a negative errno.
274 * This version overrides the weak version in kernel/events/uprobes.c.
275 * It is required to handle MIPS16 and microMIPS.
277 int __weak
set_swbp(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
,
280 return uprobe_write_opcode(mm
, vaddr
, UPROBE_SWBP_INSN
);
284 * set_orig_insn - Restore the original instruction.
285 * @mm: the probed process address space.
286 * @auprobe: arch specific probepoint information.
287 * @vaddr: the virtual address to insert the opcode.
289 * For mm @mm, restore the original opcode (opcode) at @vaddr.
290 * Return 0 (success) or a negative errno.
292 * This overrides the weak version in kernel/events/uprobes.c.
294 int set_orig_insn(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
,
297 return uprobe_write_opcode(mm
, vaddr
,
298 *(uprobe_opcode_t
*)&auprobe
->orig_inst
[0].word
);
301 void __weak
arch_uprobe_copy_ixol(struct page
*page
, unsigned long vaddr
,
302 void *src
, unsigned long len
)
306 /* Initialize the slot */
307 kaddr
= kmap_atomic(page
);
308 memcpy(kaddr
+ (vaddr
& ~PAGE_MASK
), src
, len
);
309 kunmap_atomic(kaddr
);
312 * The MIPS version of flush_icache_range will operate safely on
313 * user space addresses and more importantly, it doesn't require a
316 flush_icache_range(vaddr
, vaddr
+ len
);
320 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
321 * @regs: Reflects the saved state of the task after it has hit a breakpoint
323 * Return the address of the breakpoint instruction.
325 * This overrides the weak version in kernel/events/uprobes.c.
327 unsigned long uprobe_get_swbp_addr(struct pt_regs
*regs
)
329 return instruction_pointer(regs
);
333 * See if the instruction can be emulated.
334 * Returns true if instruction was emulated, false otherwise.
336 * For now we always emulate so this function just returns 0.
338 bool arch_uprobe_skip_sstep(struct arch_uprobe
*auprobe
, struct pt_regs
*regs
)