1 // SPDX-License-Identifier: GPL-2.0
3 * User-space Probes (UProbes) for s390
5 * Copyright IBM Corp. 2014
6 * Author(s): Jan Willeke,
9 #include <linux/uaccess.h>
10 #include <linux/uprobes.h>
11 #include <linux/compat.h>
12 #include <linux/kdebug.h>
13 #include <linux/sched/task_stack.h>
15 #include <asm/switch_to.h>
16 #include <asm/facility.h>
17 #include <asm/kprobes.h>
21 #define UPROBE_TRAP_NR UINT_MAX
23 int arch_uprobe_analyze_insn(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
,
26 return probe_is_prohibited_opcode(auprobe
->insn
);
29 int arch_uprobe_pre_xol(struct arch_uprobe
*auprobe
, struct pt_regs
*regs
)
31 if (psw_bits(regs
->psw
).eaba
== PSW_BITS_AMODE_24BIT
)
33 if (!is_compat_task() && psw_bits(regs
->psw
).eaba
== PSW_BITS_AMODE_31BIT
)
35 clear_pt_regs_flag(regs
, PIF_PER_TRAP
);
36 auprobe
->saved_per
= psw_bits(regs
->psw
).per
;
37 auprobe
->saved_int_code
= regs
->int_code
;
38 regs
->int_code
= UPROBE_TRAP_NR
;
39 regs
->psw
.addr
= current
->utask
->xol_vaddr
;
40 set_tsk_thread_flag(current
, TIF_UPROBE_SINGLESTEP
);
41 update_cr_regs(current
);
45 bool arch_uprobe_xol_was_trapped(struct task_struct
*tsk
)
47 struct pt_regs
*regs
= task_pt_regs(tsk
);
49 if (regs
->int_code
!= UPROBE_TRAP_NR
)
54 static int check_per_event(unsigned short cause
, unsigned long control
,
57 if (!(regs
->psw
.mask
& PSW_MASK_PER
))
59 /* user space single step */
62 /* over indication for storage alteration */
63 if ((control
& 0x20200000) && (cause
& 0x2000))
67 if ((control
& 0x80800000) == 0x80000000)
69 /* branch into selected range */
70 if (((control
& 0x80800000) == 0x80800000) &&
71 regs
->psw
.addr
>= current
->thread
.per_user
.start
&&
72 regs
->psw
.addr
<= current
->thread
.per_user
.end
)
78 int arch_uprobe_post_xol(struct arch_uprobe
*auprobe
, struct pt_regs
*regs
)
80 int fixup
= probe_get_fixup_type(auprobe
->insn
);
81 struct uprobe_task
*utask
= current
->utask
;
83 clear_tsk_thread_flag(current
, TIF_UPROBE_SINGLESTEP
);
84 update_cr_regs(current
);
85 psw_bits(regs
->psw
).per
= auprobe
->saved_per
;
86 regs
->int_code
= auprobe
->saved_int_code
;
88 if (fixup
& FIXUP_PSW_NORMAL
)
89 regs
->psw
.addr
+= utask
->vaddr
- utask
->xol_vaddr
;
90 if (fixup
& FIXUP_RETURN_REGISTER
) {
91 int reg
= (auprobe
->insn
[0] & 0xf0) >> 4;
93 regs
->gprs
[reg
] += utask
->vaddr
- utask
->xol_vaddr
;
95 if (fixup
& FIXUP_BRANCH_NOT_TAKEN
) {
96 int ilen
= insn_length(auprobe
->insn
[0] >> 8);
98 if (regs
->psw
.addr
- utask
->xol_vaddr
== ilen
)
99 regs
->psw
.addr
= utask
->vaddr
+ ilen
;
101 if (check_per_event(current
->thread
.per_event
.cause
,
102 current
->thread
.per_user
.control
, regs
)) {
103 /* fix per address */
104 current
->thread
.per_event
.address
= utask
->vaddr
;
105 /* trigger per event */
106 set_pt_regs_flag(regs
, PIF_PER_TRAP
);
111 int arch_uprobe_exception_notify(struct notifier_block
*self
, unsigned long val
,
114 struct die_args
*args
= data
;
115 struct pt_regs
*regs
= args
->regs
;
117 if (!user_mode(regs
))
119 if (regs
->int_code
& 0x200) /* Trap during transaction */
123 if (uprobe_pre_sstep_notifier(regs
))
127 if (uprobe_post_sstep_notifier(regs
))
135 void arch_uprobe_abort_xol(struct arch_uprobe
*auprobe
, struct pt_regs
*regs
)
137 clear_thread_flag(TIF_UPROBE_SINGLESTEP
);
138 regs
->int_code
= auprobe
->saved_int_code
;
139 regs
->psw
.addr
= current
->utask
->vaddr
;
140 current
->thread
.per_event
.address
= current
->utask
->vaddr
;
143 unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline
,
144 struct pt_regs
*regs
)
148 orig
= regs
->gprs
[14];
149 regs
->gprs
[14] = trampoline
;
153 bool arch_uretprobe_is_alive(struct return_instance
*ret
, enum rp_check ctx
,
154 struct pt_regs
*regs
)
156 if (ctx
== RP_CHECK_CHAIN_CALL
)
157 return user_stack_pointer(regs
) <= ret
->stack
;
159 return user_stack_pointer(regs
) < ret
->stack
;
162 /* Instruction Emulation */
164 static void adjust_psw_addr(psw_t
*psw
, unsigned long len
)
166 psw
->addr
= __rewind_psw(*psw
, -len
);
169 #define EMU_ILLEGAL_OP 1
170 #define EMU_SPECIFICATION 2
171 #define EMU_ADDRESSING 3
173 #define emu_load_ril(ptr, output) \
175 unsigned int mask = sizeof(*(ptr)) - 1; \
176 __typeof__(*(ptr)) input; \
179 if (!test_facility(34)) \
180 __rc = EMU_ILLEGAL_OP; \
181 else if ((u64 __force)ptr & mask) \
182 __rc = EMU_SPECIFICATION; \
183 else if (get_user(input, ptr)) \
184 __rc = EMU_ADDRESSING; \
190 #define emu_store_ril(regs, ptr, input) \
192 unsigned int mask = sizeof(*(ptr)) - 1; \
193 __typeof__(ptr) __ptr = (ptr); \
196 if (!test_facility(34)) \
197 __rc = EMU_ILLEGAL_OP; \
198 else if ((u64 __force)__ptr & mask) \
199 __rc = EMU_SPECIFICATION; \
200 else if (put_user(*(input), __ptr)) \
201 __rc = EMU_ADDRESSING; \
203 sim_stor_event(regs, \
204 (void __force *)__ptr, \
209 #define emu_cmp_ril(regs, ptr, cmp) \
211 unsigned int mask = sizeof(*(ptr)) - 1; \
212 __typeof__(*(ptr)) input; \
215 if (!test_facility(34)) \
216 __rc = EMU_ILLEGAL_OP; \
217 else if ((u64 __force)ptr & mask) \
218 __rc = EMU_SPECIFICATION; \
219 else if (get_user(input, ptr)) \
220 __rc = EMU_ADDRESSING; \
221 else if (input > *(cmp)) \
222 psw_bits((regs)->psw).cc = 1; \
223 else if (input < *(cmp)) \
224 psw_bits((regs)->psw).cc = 2; \
226 psw_bits((regs)->psw).cc = 0; \
237 union split_register
{
247 * If user per registers are setup to trace storage alterations and an
248 * emulated store took place on a fitting address a user trap is generated.
250 static void sim_stor_event(struct pt_regs
*regs
, void *addr
, int len
)
252 if (!(regs
->psw
.mask
& PSW_MASK_PER
))
254 if (!(current
->thread
.per_user
.control
& PER_EVENT_STORE
))
256 if ((void *)current
->thread
.per_user
.start
> (addr
+ len
))
258 if ((void *)current
->thread
.per_user
.end
< addr
)
260 current
->thread
.per_event
.address
= regs
->psw
.addr
;
261 current
->thread
.per_event
.cause
= PER_EVENT_STORE
>> 16;
262 set_pt_regs_flag(regs
, PIF_PER_TRAP
);
266 * pc relative instructions are emulated, since parameters may not be
267 * accessible from the xol area due to range limitations.
269 static void handle_insn_ril(struct arch_uprobe
*auprobe
, struct pt_regs
*regs
)
271 union split_register
*rx
;
272 struct insn_ril
*insn
;
277 insn
= (struct insn_ril
*) &auprobe
->insn
;
278 rx
= (union split_register
*) ®s
->gprs
[insn
->reg
];
279 uptr
= (void *)(regs
->psw
.addr
+ (insn
->disp
* 2));
280 ilen
= insn_length(insn
->opc0
);
282 switch (insn
->opc0
) {
284 switch (insn
->opc1
) {
285 case 0x00: /* larl */
286 rx
->u64
= (unsigned long)uptr
;
291 switch (insn
->opc1
) {
292 case 0x02: /* llhrl */
293 rc
= emu_load_ril((u16 __user
*)uptr
, &rx
->u32
[1]);
295 case 0x04: /* lghrl */
296 rc
= emu_load_ril((s16 __user
*)uptr
, &rx
->u64
);
298 case 0x05: /* lhrl */
299 rc
= emu_load_ril((s16 __user
*)uptr
, &rx
->u32
[1]);
301 case 0x06: /* llghrl */
302 rc
= emu_load_ril((u16 __user
*)uptr
, &rx
->u64
);
304 case 0x08: /* lgrl */
305 rc
= emu_load_ril((u64 __user
*)uptr
, &rx
->u64
);
307 case 0x0c: /* lgfrl */
308 rc
= emu_load_ril((s32 __user
*)uptr
, &rx
->u64
);
311 rc
= emu_load_ril((u32 __user
*)uptr
, &rx
->u32
[1]);
313 case 0x0e: /* llgfrl */
314 rc
= emu_load_ril((u32 __user
*)uptr
, &rx
->u64
);
316 case 0x07: /* sthrl */
317 rc
= emu_store_ril(regs
, (u16 __user
*)uptr
, &rx
->u16
[3]);
319 case 0x0b: /* stgrl */
320 rc
= emu_store_ril(regs
, (u64 __user
*)uptr
, &rx
->u64
);
322 case 0x0f: /* strl */
323 rc
= emu_store_ril(regs
, (u32 __user
*)uptr
, &rx
->u32
[1]);
328 switch (insn
->opc1
) {
329 case 0x02: /* pfdrl */
330 if (!test_facility(34))
333 case 0x04: /* cghrl */
334 rc
= emu_cmp_ril(regs
, (s16 __user
*)uptr
, &rx
->s64
);
336 case 0x05: /* chrl */
337 rc
= emu_cmp_ril(regs
, (s16 __user
*)uptr
, &rx
->s32
[1]);
339 case 0x06: /* clghrl */
340 rc
= emu_cmp_ril(regs
, (u16 __user
*)uptr
, &rx
->u64
);
342 case 0x07: /* clhrl */
343 rc
= emu_cmp_ril(regs
, (u16 __user
*)uptr
, &rx
->u32
[1]);
345 case 0x08: /* cgrl */
346 rc
= emu_cmp_ril(regs
, (s64 __user
*)uptr
, &rx
->s64
);
348 case 0x0a: /* clgrl */
349 rc
= emu_cmp_ril(regs
, (u64 __user
*)uptr
, &rx
->u64
);
351 case 0x0c: /* cgfrl */
352 rc
= emu_cmp_ril(regs
, (s32 __user
*)uptr
, &rx
->s64
);
355 rc
= emu_cmp_ril(regs
, (s32 __user
*)uptr
, &rx
->s32
[1]);
357 case 0x0e: /* clgfrl */
358 rc
= emu_cmp_ril(regs
, (u32 __user
*)uptr
, &rx
->u64
);
360 case 0x0f: /* clrl */
361 rc
= emu_cmp_ril(regs
, (u32 __user
*)uptr
, &rx
->u32
[1]);
366 adjust_psw_addr(®s
->psw
, ilen
);
369 regs
->int_code
= ilen
<< 16 | 0x0001;
370 do_report_trap(regs
, SIGILL
, ILL_ILLOPC
, NULL
);
372 case EMU_SPECIFICATION
:
373 regs
->int_code
= ilen
<< 16 | 0x0006;
374 do_report_trap(regs
, SIGILL
, ILL_ILLOPC
, NULL
);
377 regs
->int_code
= ilen
<< 16 | 0x0005;
378 do_report_trap(regs
, SIGSEGV
, SEGV_MAPERR
, NULL
);
383 bool arch_uprobe_skip_sstep(struct arch_uprobe
*auprobe
, struct pt_regs
*regs
)
385 if ((psw_bits(regs
->psw
).eaba
== PSW_BITS_AMODE_24BIT
) ||
386 ((psw_bits(regs
->psw
).eaba
== PSW_BITS_AMODE_31BIT
) &&
387 !is_compat_task())) {
388 regs
->psw
.addr
= __rewind_psw(regs
->psw
, UPROBE_SWBP_INSN_SIZE
);
389 do_report_trap(regs
, SIGILL
, ILL_ILLADR
, NULL
);
392 if (probe_is_insn_relative_long(auprobe
->insn
)) {
393 handle_insn_ril(auprobe
, regs
);