1 // SPDX-License-Identifier: GPL-2.0
3 * arch/parisc/kernel/kprobes.c
5 * PA-RISC kprobes implementation
7 * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
10 #include <linux/types.h>
11 #include <linux/kprobes.h>
12 #include <linux/slab.h>
13 #include <asm/cacheflush.h>
14 #include <asm/patch.h>
16 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
17 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
19 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
21 if ((unsigned long)p
->addr
& 3UL)
24 p
->ainsn
.insn
= get_insn_slot();
28 memcpy(p
->ainsn
.insn
, p
->addr
,
29 MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
35 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
40 free_insn_slot(p
->ainsn
.insn
, 0);
44 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
46 patch_text(p
->addr
, PARISC_KPROBES_BREAK_INSN
);
49 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
51 patch_text(p
->addr
, p
->opcode
);
54 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
56 kcb
->prev_kprobe
.kp
= kprobe_running();
57 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
60 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
62 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
63 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
66 static inline void __kprobes
set_current_kprobe(struct kprobe
*p
)
68 __this_cpu_write(current_kprobe
, p
);
71 static void __kprobes
setup_singlestep(struct kprobe
*p
,
72 struct kprobe_ctlblk
*kcb
, struct pt_regs
*regs
)
74 kcb
->iaoq
[0] = regs
->iaoq
[0];
75 kcb
->iaoq
[1] = regs
->iaoq
[1];
76 regs
->iaoq
[0] = (unsigned long)p
->ainsn
.insn
;
81 int __kprobes
parisc_kprobe_break_handler(struct pt_regs
*regs
)
84 struct kprobe_ctlblk
*kcb
;
88 kcb
= get_kprobe_ctlblk();
89 p
= get_kprobe((unsigned long *)regs
->iaoq
[0]);
92 preempt_enable_no_resched();
96 if (kprobe_running()) {
98 * We have reentered the kprobe_handler, since another kprobe
99 * was hit while within the handler, we save the original
100 * kprobes and single step on the instruction of the new probe
101 * without calling any user handlers to avoid recursive
104 save_previous_kprobe(kcb
);
105 set_current_kprobe(p
);
106 kprobes_inc_nmissed_count(p
);
107 setup_singlestep(p
, kcb
, regs
);
108 kcb
->kprobe_status
= KPROBE_REENTER
;
112 set_current_kprobe(p
);
113 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
115 /* If we have no pre-handler or it returned 0, we continue with
116 * normal processing. If we have a pre-handler and it returned
117 * non-zero - which means user handler setup registers to exit
118 * to another instruction, we must skip the single stepping.
121 if (!p
->pre_handler
|| !p
->pre_handler(p
, regs
)) {
122 setup_singlestep(p
, kcb
, regs
);
123 kcb
->kprobe_status
= KPROBE_HIT_SS
;
125 reset_current_kprobe();
126 preempt_enable_no_resched();
131 int __kprobes
parisc_kprobe_ss_handler(struct pt_regs
*regs
)
133 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
134 struct kprobe
*p
= kprobe_running();
139 if (regs
->iaoq
[0] != (unsigned long)p
->ainsn
.insn
+4)
142 /* restore back original saved kprobe variables and continue */
143 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
144 restore_previous_kprobe(kcb
);
148 /* for absolute branch instructions we can copy iaoq_b. for relative
149 * branch instructions we need to calculate the new address based on the
150 * difference between iaoq_f and iaoq_b. We cannot use iaoq_b without
151 * modificationt because it's based on our ainsn.insn address.
155 p
->post_handler(p
, regs
, 0);
157 switch (regs
->iir
>> 26) {
159 case 0x39: /* BE,L */
162 /* for absolute branches, regs->iaoq[1] has already the right
165 regs
->iaoq
[0] = kcb
->iaoq
[1];
168 regs
->iaoq
[1] = kcb
->iaoq
[0];
169 regs
->iaoq
[1] += (regs
->iaoq
[1] - regs
->iaoq
[0]) + 4;
170 regs
->iaoq
[0] = kcb
->iaoq
[1];
173 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
174 reset_current_kprobe();
178 static inline void kretprobe_trampoline(void)
184 static int __kprobes
trampoline_probe_handler(struct kprobe
*p
,
185 struct pt_regs
*regs
);
187 static struct kprobe trampoline_p
= {
188 .pre_handler
= trampoline_probe_handler
191 static int __kprobes
trampoline_probe_handler(struct kprobe
*p
,
192 struct pt_regs
*regs
)
194 struct kretprobe_instance
*ri
= NULL
;
195 struct hlist_head
*head
, empty_rp
;
196 struct hlist_node
*tmp
;
197 unsigned long flags
, orig_ret_address
= 0;
198 unsigned long trampoline_address
= (unsigned long)trampoline_p
.addr
;
199 kprobe_opcode_t
*correct_ret_addr
= NULL
;
201 INIT_HLIST_HEAD(&empty_rp
);
202 kretprobe_hash_lock(current
, &head
, &flags
);
205 * It is possible to have multiple instances associated with a given
206 * task either because multiple functions in the call path have
207 * a return probe installed on them, and/or more than one return
208 * probe was registered for a target function.
210 * We can handle this because:
211 * - instances are always inserted at the head of the list
212 * - when multiple return probes are registered for the same
213 * function, the first instance's ret_addr will point to the
214 * real return address, and all the rest will point to
215 * kretprobe_trampoline
217 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
218 if (ri
->task
!= current
)
219 /* another task is sharing our hash bucket */
222 orig_ret_address
= (unsigned long)ri
->ret_addr
;
224 if (orig_ret_address
!= trampoline_address
)
226 * This is the real return address. Any other
227 * instances associated with this task are for
228 * other calls deeper on the call stack
233 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
235 correct_ret_addr
= ri
->ret_addr
;
236 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
237 if (ri
->task
!= current
)
238 /* another task is sharing our hash bucket */
241 orig_ret_address
= (unsigned long)ri
->ret_addr
;
242 if (ri
->rp
&& ri
->rp
->handler
) {
243 __this_cpu_write(current_kprobe
, &ri
->rp
->kp
);
244 get_kprobe_ctlblk()->kprobe_status
= KPROBE_HIT_ACTIVE
;
245 ri
->ret_addr
= correct_ret_addr
;
246 ri
->rp
->handler(ri
, regs
);
247 __this_cpu_write(current_kprobe
, NULL
);
250 recycle_rp_inst(ri
, &empty_rp
);
252 if (orig_ret_address
!= trampoline_address
)
254 * This is the real return address. Any other
255 * instances associated with this task are for
256 * other calls deeper on the call stack
261 kretprobe_hash_unlock(current
, &flags
);
263 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
264 hlist_del(&ri
->hlist
);
267 instruction_pointer_set(regs
, orig_ret_address
);
271 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
272 struct pt_regs
*regs
)
274 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->gr
[2];
276 /* Replace the return addr with trampoline addr. */
277 regs
->gr
[2] = (unsigned long)trampoline_p
.addr
;
280 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
282 return p
->addr
== trampoline_p
.addr
;
285 int __init
arch_init_kprobes(void)
287 trampoline_p
.addr
= (kprobe_opcode_t
*)
288 dereference_function_descriptor(kretprobe_trampoline
);
289 return register_kprobe(&trampoline_p
);