vfs: remove unused wrapper block_page_mkwrite()
[linux/fpc-iii.git] / arch / x86 / kernel / ptrace.c
blob558f50edebca8f55a8c33e67bfcd21f9c78037a9
1 /* By Ross Biro 1/23/92 */
2 /*
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 */
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/smp.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/ptrace.h>
14 #include <linux/tracehook.h>
15 #include <linux/user.h>
16 #include <linux/elf.h>
17 #include <linux/security.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21 #include <linux/perf_event.h>
22 #include <linux/hw_breakpoint.h>
23 #include <linux/rcupdate.h>
24 #include <linux/export.h>
25 #include <linux/context_tracking.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgtable.h>
29 #include <asm/processor.h>
30 #include <asm/fpu/internal.h>
31 #include <asm/fpu/signal.h>
32 #include <asm/fpu/regset.h>
33 #include <asm/debugreg.h>
34 #include <asm/ldt.h>
35 #include <asm/desc.h>
36 #include <asm/prctl.h>
37 #include <asm/proto.h>
38 #include <asm/hw_breakpoint.h>
39 #include <asm/traps.h>
40 #include <asm/syscall.h>
42 #include "tls.h"
44 enum x86_regset {
45 REGSET_GENERAL,
46 REGSET_FP,
47 REGSET_XFP,
48 REGSET_IOPERM64 = REGSET_XFP,
49 REGSET_XSTATE,
50 REGSET_TLS,
51 REGSET_IOPERM32,
54 struct pt_regs_offset {
55 const char *name;
56 int offset;
59 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
60 #define REG_OFFSET_END {.name = NULL, .offset = 0}
62 static const struct pt_regs_offset regoffset_table[] = {
63 #ifdef CONFIG_X86_64
64 REG_OFFSET_NAME(r15),
65 REG_OFFSET_NAME(r14),
66 REG_OFFSET_NAME(r13),
67 REG_OFFSET_NAME(r12),
68 REG_OFFSET_NAME(r11),
69 REG_OFFSET_NAME(r10),
70 REG_OFFSET_NAME(r9),
71 REG_OFFSET_NAME(r8),
72 #endif
73 REG_OFFSET_NAME(bx),
74 REG_OFFSET_NAME(cx),
75 REG_OFFSET_NAME(dx),
76 REG_OFFSET_NAME(si),
77 REG_OFFSET_NAME(di),
78 REG_OFFSET_NAME(bp),
79 REG_OFFSET_NAME(ax),
80 #ifdef CONFIG_X86_32
81 REG_OFFSET_NAME(ds),
82 REG_OFFSET_NAME(es),
83 REG_OFFSET_NAME(fs),
84 REG_OFFSET_NAME(gs),
85 #endif
86 REG_OFFSET_NAME(orig_ax),
87 REG_OFFSET_NAME(ip),
88 REG_OFFSET_NAME(cs),
89 REG_OFFSET_NAME(flags),
90 REG_OFFSET_NAME(sp),
91 REG_OFFSET_NAME(ss),
92 REG_OFFSET_END,
95 /**
96 * regs_query_register_offset() - query register offset from its name
97 * @name: the name of a register
99 * regs_query_register_offset() returns the offset of a register in struct
100 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
102 int regs_query_register_offset(const char *name)
104 const struct pt_regs_offset *roff;
105 for (roff = regoffset_table; roff->name != NULL; roff++)
106 if (!strcmp(roff->name, name))
107 return roff->offset;
108 return -EINVAL;
112 * regs_query_register_name() - query register name from its offset
113 * @offset: the offset of a register in struct pt_regs.
115 * regs_query_register_name() returns the name of a register from its
116 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
118 const char *regs_query_register_name(unsigned int offset)
120 const struct pt_regs_offset *roff;
121 for (roff = regoffset_table; roff->name != NULL; roff++)
122 if (roff->offset == offset)
123 return roff->name;
124 return NULL;
127 static const int arg_offs_table[] = {
128 #ifdef CONFIG_X86_32
129 [0] = offsetof(struct pt_regs, ax),
130 [1] = offsetof(struct pt_regs, dx),
131 [2] = offsetof(struct pt_regs, cx)
132 #else /* CONFIG_X86_64 */
133 [0] = offsetof(struct pt_regs, di),
134 [1] = offsetof(struct pt_regs, si),
135 [2] = offsetof(struct pt_regs, dx),
136 [3] = offsetof(struct pt_regs, cx),
137 [4] = offsetof(struct pt_regs, r8),
138 [5] = offsetof(struct pt_regs, r9)
139 #endif
143 * does not yet catch signals sent when the child dies.
144 * in exit.c or in signal.c.
148 * Determines which flags the user has access to [1 = access, 0 = no access].
150 #define FLAG_MASK_32 ((unsigned long) \
151 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
152 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
153 X86_EFLAGS_SF | X86_EFLAGS_TF | \
154 X86_EFLAGS_DF | X86_EFLAGS_OF | \
155 X86_EFLAGS_RF | X86_EFLAGS_AC))
158 * Determines whether a value may be installed in a segment register.
160 static inline bool invalid_selector(u16 value)
162 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
165 #ifdef CONFIG_X86_32
167 #define FLAG_MASK FLAG_MASK_32
170 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
171 * when it traps. The previous stack will be directly underneath the saved
172 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
174 * Now, if the stack is empty, '&regs->sp' is out of range. In this
175 * case we try to take the previous stack. To always return a non-null
176 * stack pointer we fall back to regs as stack if no previous stack
177 * exists.
179 * This is valid only for kernel mode traps.
181 unsigned long kernel_stack_pointer(struct pt_regs *regs)
183 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
184 unsigned long sp = (unsigned long)&regs->sp;
185 u32 *prev_esp;
187 if (context == (sp & ~(THREAD_SIZE - 1)))
188 return sp;
190 prev_esp = (u32 *)(context);
191 if (prev_esp)
192 return (unsigned long)prev_esp;
194 return (unsigned long)regs;
196 EXPORT_SYMBOL_GPL(kernel_stack_pointer);
198 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
200 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
201 return &regs->bx + (regno >> 2);
204 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
207 * Returning the value truncates it to 16 bits.
209 unsigned int retval;
210 if (offset != offsetof(struct user_regs_struct, gs))
211 retval = *pt_regs_access(task_pt_regs(task), offset);
212 else {
213 if (task == current)
214 retval = get_user_gs(task_pt_regs(task));
215 else
216 retval = task_user_gs(task);
218 return retval;
221 static int set_segment_reg(struct task_struct *task,
222 unsigned long offset, u16 value)
225 * The value argument was already truncated to 16 bits.
227 if (invalid_selector(value))
228 return -EIO;
231 * For %cs and %ss we cannot permit a null selector.
232 * We can permit a bogus selector as long as it has USER_RPL.
233 * Null selectors are fine for other segment registers, but
234 * we will never get back to user mode with invalid %cs or %ss
235 * and will take the trap in iret instead. Much code relies
236 * on user_mode() to distinguish a user trap frame (which can
237 * safely use invalid selectors) from a kernel trap frame.
239 switch (offset) {
240 case offsetof(struct user_regs_struct, cs):
241 case offsetof(struct user_regs_struct, ss):
242 if (unlikely(value == 0))
243 return -EIO;
245 default:
246 *pt_regs_access(task_pt_regs(task), offset) = value;
247 break;
249 case offsetof(struct user_regs_struct, gs):
250 if (task == current)
251 set_user_gs(task_pt_regs(task), value);
252 else
253 task_user_gs(task) = value;
256 return 0;
259 #else /* CONFIG_X86_64 */
261 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
263 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
265 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
266 return &regs->r15 + (offset / sizeof(regs->r15));
269 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
272 * Returning the value truncates it to 16 bits.
274 unsigned int seg;
276 switch (offset) {
277 case offsetof(struct user_regs_struct, fs):
278 if (task == current) {
279 /* Older gas can't assemble movq %?s,%r?? */
280 asm("movl %%fs,%0" : "=r" (seg));
281 return seg;
283 return task->thread.fsindex;
284 case offsetof(struct user_regs_struct, gs):
285 if (task == current) {
286 asm("movl %%gs,%0" : "=r" (seg));
287 return seg;
289 return task->thread.gsindex;
290 case offsetof(struct user_regs_struct, ds):
291 if (task == current) {
292 asm("movl %%ds,%0" : "=r" (seg));
293 return seg;
295 return task->thread.ds;
296 case offsetof(struct user_regs_struct, es):
297 if (task == current) {
298 asm("movl %%es,%0" : "=r" (seg));
299 return seg;
301 return task->thread.es;
303 case offsetof(struct user_regs_struct, cs):
304 case offsetof(struct user_regs_struct, ss):
305 break;
307 return *pt_regs_access(task_pt_regs(task), offset);
310 static int set_segment_reg(struct task_struct *task,
311 unsigned long offset, u16 value)
314 * The value argument was already truncated to 16 bits.
316 if (invalid_selector(value))
317 return -EIO;
319 switch (offset) {
320 case offsetof(struct user_regs_struct,fs):
322 * If this is setting fs as for normal 64-bit use but
323 * setting fs_base has implicitly changed it, leave it.
325 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
326 task->thread.fs != 0) ||
327 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
328 task->thread.fs == 0))
329 break;
330 task->thread.fsindex = value;
331 if (task == current)
332 loadsegment(fs, task->thread.fsindex);
333 break;
334 case offsetof(struct user_regs_struct,gs):
336 * If this is setting gs as for normal 64-bit use but
337 * setting gs_base has implicitly changed it, leave it.
339 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
340 task->thread.gs != 0) ||
341 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
342 task->thread.gs == 0))
343 break;
344 task->thread.gsindex = value;
345 if (task == current)
346 load_gs_index(task->thread.gsindex);
347 break;
348 case offsetof(struct user_regs_struct,ds):
349 task->thread.ds = value;
350 if (task == current)
351 loadsegment(ds, task->thread.ds);
352 break;
353 case offsetof(struct user_regs_struct,es):
354 task->thread.es = value;
355 if (task == current)
356 loadsegment(es, task->thread.es);
357 break;
360 * Can't actually change these in 64-bit mode.
362 case offsetof(struct user_regs_struct,cs):
363 if (unlikely(value == 0))
364 return -EIO;
365 task_pt_regs(task)->cs = value;
366 break;
367 case offsetof(struct user_regs_struct,ss):
368 if (unlikely(value == 0))
369 return -EIO;
370 task_pt_regs(task)->ss = value;
371 break;
374 return 0;
377 #endif /* CONFIG_X86_32 */
379 static unsigned long get_flags(struct task_struct *task)
381 unsigned long retval = task_pt_regs(task)->flags;
384 * If the debugger set TF, hide it from the readout.
386 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
387 retval &= ~X86_EFLAGS_TF;
389 return retval;
392 static int set_flags(struct task_struct *task, unsigned long value)
394 struct pt_regs *regs = task_pt_regs(task);
397 * If the user value contains TF, mark that
398 * it was not "us" (the debugger) that set it.
399 * If not, make sure it stays set if we had.
401 if (value & X86_EFLAGS_TF)
402 clear_tsk_thread_flag(task, TIF_FORCED_TF);
403 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
404 value |= X86_EFLAGS_TF;
406 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
408 return 0;
411 static int putreg(struct task_struct *child,
412 unsigned long offset, unsigned long value)
414 switch (offset) {
415 case offsetof(struct user_regs_struct, cs):
416 case offsetof(struct user_regs_struct, ds):
417 case offsetof(struct user_regs_struct, es):
418 case offsetof(struct user_regs_struct, fs):
419 case offsetof(struct user_regs_struct, gs):
420 case offsetof(struct user_regs_struct, ss):
421 return set_segment_reg(child, offset, value);
423 case offsetof(struct user_regs_struct, flags):
424 return set_flags(child, value);
426 #ifdef CONFIG_X86_64
427 case offsetof(struct user_regs_struct,fs_base):
428 if (value >= TASK_SIZE_OF(child))
429 return -EIO;
431 * When changing the segment base, use do_arch_prctl
432 * to set either thread.fs or thread.fsindex and the
433 * corresponding GDT slot.
435 if (child->thread.fs != value)
436 return do_arch_prctl(child, ARCH_SET_FS, value);
437 return 0;
438 case offsetof(struct user_regs_struct,gs_base):
440 * Exactly the same here as the %fs handling above.
442 if (value >= TASK_SIZE_OF(child))
443 return -EIO;
444 if (child->thread.gs != value)
445 return do_arch_prctl(child, ARCH_SET_GS, value);
446 return 0;
447 #endif
450 *pt_regs_access(task_pt_regs(child), offset) = value;
451 return 0;
454 static unsigned long getreg(struct task_struct *task, unsigned long offset)
456 switch (offset) {
457 case offsetof(struct user_regs_struct, cs):
458 case offsetof(struct user_regs_struct, ds):
459 case offsetof(struct user_regs_struct, es):
460 case offsetof(struct user_regs_struct, fs):
461 case offsetof(struct user_regs_struct, gs):
462 case offsetof(struct user_regs_struct, ss):
463 return get_segment_reg(task, offset);
465 case offsetof(struct user_regs_struct, flags):
466 return get_flags(task);
468 #ifdef CONFIG_X86_64
469 case offsetof(struct user_regs_struct, fs_base): {
471 * do_arch_prctl may have used a GDT slot instead of
472 * the MSR. To userland, it appears the same either
473 * way, except the %fs segment selector might not be 0.
475 unsigned int seg = task->thread.fsindex;
476 if (task->thread.fs != 0)
477 return task->thread.fs;
478 if (task == current)
479 asm("movl %%fs,%0" : "=r" (seg));
480 if (seg != FS_TLS_SEL)
481 return 0;
482 return get_desc_base(&task->thread.tls_array[FS_TLS]);
484 case offsetof(struct user_regs_struct, gs_base): {
486 * Exactly the same here as the %fs handling above.
488 unsigned int seg = task->thread.gsindex;
489 if (task->thread.gs != 0)
490 return task->thread.gs;
491 if (task == current)
492 asm("movl %%gs,%0" : "=r" (seg));
493 if (seg != GS_TLS_SEL)
494 return 0;
495 return get_desc_base(&task->thread.tls_array[GS_TLS]);
497 #endif
500 return *pt_regs_access(task_pt_regs(task), offset);
503 static int genregs_get(struct task_struct *target,
504 const struct user_regset *regset,
505 unsigned int pos, unsigned int count,
506 void *kbuf, void __user *ubuf)
508 if (kbuf) {
509 unsigned long *k = kbuf;
510 while (count >= sizeof(*k)) {
511 *k++ = getreg(target, pos);
512 count -= sizeof(*k);
513 pos += sizeof(*k);
515 } else {
516 unsigned long __user *u = ubuf;
517 while (count >= sizeof(*u)) {
518 if (__put_user(getreg(target, pos), u++))
519 return -EFAULT;
520 count -= sizeof(*u);
521 pos += sizeof(*u);
525 return 0;
528 static int genregs_set(struct task_struct *target,
529 const struct user_regset *regset,
530 unsigned int pos, unsigned int count,
531 const void *kbuf, const void __user *ubuf)
533 int ret = 0;
534 if (kbuf) {
535 const unsigned long *k = kbuf;
536 while (count >= sizeof(*k) && !ret) {
537 ret = putreg(target, pos, *k++);
538 count -= sizeof(*k);
539 pos += sizeof(*k);
541 } else {
542 const unsigned long __user *u = ubuf;
543 while (count >= sizeof(*u) && !ret) {
544 unsigned long word;
545 ret = __get_user(word, u++);
546 if (ret)
547 break;
548 ret = putreg(target, pos, word);
549 count -= sizeof(*u);
550 pos += sizeof(*u);
553 return ret;
556 static void ptrace_triggered(struct perf_event *bp,
557 struct perf_sample_data *data,
558 struct pt_regs *regs)
560 int i;
561 struct thread_struct *thread = &(current->thread);
564 * Store in the virtual DR6 register the fact that the breakpoint
565 * was hit so the thread's debugger will see it.
567 for (i = 0; i < HBP_NUM; i++) {
568 if (thread->ptrace_bps[i] == bp)
569 break;
572 thread->debugreg6 |= (DR_TRAP0 << i);
576 * Walk through every ptrace breakpoints for this thread and
577 * build the dr7 value on top of their attributes.
580 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
582 int i;
583 int dr7 = 0;
584 struct arch_hw_breakpoint *info;
586 for (i = 0; i < HBP_NUM; i++) {
587 if (bp[i] && !bp[i]->attr.disabled) {
588 info = counter_arch_bp(bp[i]);
589 dr7 |= encode_dr7(i, info->len, info->type);
593 return dr7;
596 static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
597 int len, int type, bool disabled)
599 int err, bp_len, bp_type;
601 err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
602 if (!err) {
603 attr->bp_len = bp_len;
604 attr->bp_type = bp_type;
605 attr->disabled = disabled;
608 return err;
611 static struct perf_event *
612 ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
613 unsigned long addr, bool disabled)
615 struct perf_event_attr attr;
616 int err;
618 ptrace_breakpoint_init(&attr);
619 attr.bp_addr = addr;
621 err = ptrace_fill_bp_fields(&attr, len, type, disabled);
622 if (err)
623 return ERR_PTR(err);
625 return register_user_hw_breakpoint(&attr, ptrace_triggered,
626 NULL, tsk);
629 static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
630 int disabled)
632 struct perf_event_attr attr = bp->attr;
633 int err;
635 err = ptrace_fill_bp_fields(&attr, len, type, disabled);
636 if (err)
637 return err;
639 return modify_user_hw_breakpoint(bp, &attr);
643 * Handle ptrace writes to debug register 7.
645 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
647 struct thread_struct *thread = &tsk->thread;
648 unsigned long old_dr7;
649 bool second_pass = false;
650 int i, rc, ret = 0;
652 data &= ~DR_CONTROL_RESERVED;
653 old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
655 restore:
656 rc = 0;
657 for (i = 0; i < HBP_NUM; i++) {
658 unsigned len, type;
659 bool disabled = !decode_dr7(data, i, &len, &type);
660 struct perf_event *bp = thread->ptrace_bps[i];
662 if (!bp) {
663 if (disabled)
664 continue;
666 bp = ptrace_register_breakpoint(tsk,
667 len, type, 0, disabled);
668 if (IS_ERR(bp)) {
669 rc = PTR_ERR(bp);
670 break;
673 thread->ptrace_bps[i] = bp;
674 continue;
677 rc = ptrace_modify_breakpoint(bp, len, type, disabled);
678 if (rc)
679 break;
682 /* Restore if the first pass failed, second_pass shouldn't fail. */
683 if (rc && !WARN_ON(second_pass)) {
684 ret = rc;
685 data = old_dr7;
686 second_pass = true;
687 goto restore;
690 return ret;
694 * Handle PTRACE_PEEKUSR calls for the debug register area.
696 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
698 struct thread_struct *thread = &tsk->thread;
699 unsigned long val = 0;
701 if (n < HBP_NUM) {
702 struct perf_event *bp = thread->ptrace_bps[n];
704 if (bp)
705 val = bp->hw.info.address;
706 } else if (n == 6) {
707 val = thread->debugreg6;
708 } else if (n == 7) {
709 val = thread->ptrace_dr7;
711 return val;
714 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
715 unsigned long addr)
717 struct thread_struct *t = &tsk->thread;
718 struct perf_event *bp = t->ptrace_bps[nr];
719 int err = 0;
721 if (!bp) {
723 * Put stub len and type to create an inactive but correct bp.
725 * CHECKME: the previous code returned -EIO if the addr wasn't
726 * a valid task virtual addr. The new one will return -EINVAL in
727 * this case.
728 * -EINVAL may be what we want for in-kernel breakpoints users,
729 * but -EIO looks better for ptrace, since we refuse a register
730 * writing for the user. And anyway this is the previous
731 * behaviour.
733 bp = ptrace_register_breakpoint(tsk,
734 X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
735 addr, true);
736 if (IS_ERR(bp))
737 err = PTR_ERR(bp);
738 else
739 t->ptrace_bps[nr] = bp;
740 } else {
741 struct perf_event_attr attr = bp->attr;
743 attr.bp_addr = addr;
744 err = modify_user_hw_breakpoint(bp, &attr);
747 return err;
751 * Handle PTRACE_POKEUSR calls for the debug register area.
753 static int ptrace_set_debugreg(struct task_struct *tsk, int n,
754 unsigned long val)
756 struct thread_struct *thread = &tsk->thread;
757 /* There are no DR4 or DR5 registers */
758 int rc = -EIO;
760 if (n < HBP_NUM) {
761 rc = ptrace_set_breakpoint_addr(tsk, n, val);
762 } else if (n == 6) {
763 thread->debugreg6 = val;
764 rc = 0;
765 } else if (n == 7) {
766 rc = ptrace_write_dr7(tsk, val);
767 if (!rc)
768 thread->ptrace_dr7 = val;
770 return rc;
774 * These access the current or another (stopped) task's io permission
775 * bitmap for debugging or core dump.
777 static int ioperm_active(struct task_struct *target,
778 const struct user_regset *regset)
780 return target->thread.io_bitmap_max / regset->size;
783 static int ioperm_get(struct task_struct *target,
784 const struct user_regset *regset,
785 unsigned int pos, unsigned int count,
786 void *kbuf, void __user *ubuf)
788 if (!target->thread.io_bitmap_ptr)
789 return -ENXIO;
791 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
792 target->thread.io_bitmap_ptr,
793 0, IO_BITMAP_BYTES);
797 * Called by kernel/ptrace.c when detaching..
799 * Make sure the single step bit is not set.
801 void ptrace_disable(struct task_struct *child)
803 user_disable_single_step(child);
804 #ifdef TIF_SYSCALL_EMU
805 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
806 #endif
809 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
810 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
811 #endif
813 long arch_ptrace(struct task_struct *child, long request,
814 unsigned long addr, unsigned long data)
816 int ret;
817 unsigned long __user *datap = (unsigned long __user *)data;
819 switch (request) {
820 /* read the word at location addr in the USER area. */
821 case PTRACE_PEEKUSR: {
822 unsigned long tmp;
824 ret = -EIO;
825 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
826 break;
828 tmp = 0; /* Default return condition */
829 if (addr < sizeof(struct user_regs_struct))
830 tmp = getreg(child, addr);
831 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
832 addr <= offsetof(struct user, u_debugreg[7])) {
833 addr -= offsetof(struct user, u_debugreg[0]);
834 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
836 ret = put_user(tmp, datap);
837 break;
840 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
841 ret = -EIO;
842 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
843 break;
845 if (addr < sizeof(struct user_regs_struct))
846 ret = putreg(child, addr, data);
847 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
848 addr <= offsetof(struct user, u_debugreg[7])) {
849 addr -= offsetof(struct user, u_debugreg[0]);
850 ret = ptrace_set_debugreg(child,
851 addr / sizeof(data), data);
853 break;
855 case PTRACE_GETREGS: /* Get all gp regs from the child. */
856 return copy_regset_to_user(child,
857 task_user_regset_view(current),
858 REGSET_GENERAL,
859 0, sizeof(struct user_regs_struct),
860 datap);
862 case PTRACE_SETREGS: /* Set all gp regs in the child. */
863 return copy_regset_from_user(child,
864 task_user_regset_view(current),
865 REGSET_GENERAL,
866 0, sizeof(struct user_regs_struct),
867 datap);
869 case PTRACE_GETFPREGS: /* Get the child FPU state. */
870 return copy_regset_to_user(child,
871 task_user_regset_view(current),
872 REGSET_FP,
873 0, sizeof(struct user_i387_struct),
874 datap);
876 case PTRACE_SETFPREGS: /* Set the child FPU state. */
877 return copy_regset_from_user(child,
878 task_user_regset_view(current),
879 REGSET_FP,
880 0, sizeof(struct user_i387_struct),
881 datap);
883 #ifdef CONFIG_X86_32
884 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
885 return copy_regset_to_user(child, &user_x86_32_view,
886 REGSET_XFP,
887 0, sizeof(struct user_fxsr_struct),
888 datap) ? -EIO : 0;
890 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
891 return copy_regset_from_user(child, &user_x86_32_view,
892 REGSET_XFP,
893 0, sizeof(struct user_fxsr_struct),
894 datap) ? -EIO : 0;
895 #endif
897 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
898 case PTRACE_GET_THREAD_AREA:
899 if ((int) addr < 0)
900 return -EIO;
901 ret = do_get_thread_area(child, addr,
902 (struct user_desc __user *)data);
903 break;
905 case PTRACE_SET_THREAD_AREA:
906 if ((int) addr < 0)
907 return -EIO;
908 ret = do_set_thread_area(child, addr,
909 (struct user_desc __user *)data, 0);
910 break;
911 #endif
913 #ifdef CONFIG_X86_64
914 /* normal 64bit interface to access TLS data.
915 Works just like arch_prctl, except that the arguments
916 are reversed. */
917 case PTRACE_ARCH_PRCTL:
918 ret = do_arch_prctl(child, data, addr);
919 break;
920 #endif
922 default:
923 ret = ptrace_request(child, request, addr, data);
924 break;
927 return ret;
930 #ifdef CONFIG_IA32_EMULATION
932 #include <linux/compat.h>
933 #include <linux/syscalls.h>
934 #include <asm/ia32.h>
935 #include <asm/user32.h>
937 #define R32(l,q) \
938 case offsetof(struct user32, regs.l): \
939 regs->q = value; break
941 #define SEG32(rs) \
942 case offsetof(struct user32, regs.rs): \
943 return set_segment_reg(child, \
944 offsetof(struct user_regs_struct, rs), \
945 value); \
946 break
948 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
950 struct pt_regs *regs = task_pt_regs(child);
952 switch (regno) {
954 SEG32(cs);
955 SEG32(ds);
956 SEG32(es);
957 SEG32(fs);
958 SEG32(gs);
959 SEG32(ss);
961 R32(ebx, bx);
962 R32(ecx, cx);
963 R32(edx, dx);
964 R32(edi, di);
965 R32(esi, si);
966 R32(ebp, bp);
967 R32(eax, ax);
968 R32(eip, ip);
969 R32(esp, sp);
971 case offsetof(struct user32, regs.orig_eax):
973 * A 32-bit debugger setting orig_eax means to restore
974 * the state of the task restarting a 32-bit syscall.
975 * Make sure we interpret the -ERESTART* codes correctly
976 * in case the task is not actually still sitting at the
977 * exit from a 32-bit syscall with TS_COMPAT still set.
979 regs->orig_ax = value;
980 if (syscall_get_nr(child, regs) >= 0)
981 task_thread_info(child)->status |= TS_COMPAT;
982 break;
984 case offsetof(struct user32, regs.eflags):
985 return set_flags(child, value);
987 case offsetof(struct user32, u_debugreg[0]) ...
988 offsetof(struct user32, u_debugreg[7]):
989 regno -= offsetof(struct user32, u_debugreg[0]);
990 return ptrace_set_debugreg(child, regno / 4, value);
992 default:
993 if (regno > sizeof(struct user32) || (regno & 3))
994 return -EIO;
997 * Other dummy fields in the virtual user structure
998 * are ignored
1000 break;
1002 return 0;
1005 #undef R32
1006 #undef SEG32
1008 #define R32(l,q) \
1009 case offsetof(struct user32, regs.l): \
1010 *val = regs->q; break
1012 #define SEG32(rs) \
1013 case offsetof(struct user32, regs.rs): \
1014 *val = get_segment_reg(child, \
1015 offsetof(struct user_regs_struct, rs)); \
1016 break
1018 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1020 struct pt_regs *regs = task_pt_regs(child);
1022 switch (regno) {
1024 SEG32(ds);
1025 SEG32(es);
1026 SEG32(fs);
1027 SEG32(gs);
1029 R32(cs, cs);
1030 R32(ss, ss);
1031 R32(ebx, bx);
1032 R32(ecx, cx);
1033 R32(edx, dx);
1034 R32(edi, di);
1035 R32(esi, si);
1036 R32(ebp, bp);
1037 R32(eax, ax);
1038 R32(orig_eax, orig_ax);
1039 R32(eip, ip);
1040 R32(esp, sp);
1042 case offsetof(struct user32, regs.eflags):
1043 *val = get_flags(child);
1044 break;
1046 case offsetof(struct user32, u_debugreg[0]) ...
1047 offsetof(struct user32, u_debugreg[7]):
1048 regno -= offsetof(struct user32, u_debugreg[0]);
1049 *val = ptrace_get_debugreg(child, regno / 4);
1050 break;
1052 default:
1053 if (regno > sizeof(struct user32) || (regno & 3))
1054 return -EIO;
1057 * Other dummy fields in the virtual user structure
1058 * are ignored
1060 *val = 0;
1061 break;
1063 return 0;
1066 #undef R32
1067 #undef SEG32
1069 static int genregs32_get(struct task_struct *target,
1070 const struct user_regset *regset,
1071 unsigned int pos, unsigned int count,
1072 void *kbuf, void __user *ubuf)
1074 if (kbuf) {
1075 compat_ulong_t *k = kbuf;
1076 while (count >= sizeof(*k)) {
1077 getreg32(target, pos, k++);
1078 count -= sizeof(*k);
1079 pos += sizeof(*k);
1081 } else {
1082 compat_ulong_t __user *u = ubuf;
1083 while (count >= sizeof(*u)) {
1084 compat_ulong_t word;
1085 getreg32(target, pos, &word);
1086 if (__put_user(word, u++))
1087 return -EFAULT;
1088 count -= sizeof(*u);
1089 pos += sizeof(*u);
1093 return 0;
1096 static int genregs32_set(struct task_struct *target,
1097 const struct user_regset *regset,
1098 unsigned int pos, unsigned int count,
1099 const void *kbuf, const void __user *ubuf)
1101 int ret = 0;
1102 if (kbuf) {
1103 const compat_ulong_t *k = kbuf;
1104 while (count >= sizeof(*k) && !ret) {
1105 ret = putreg32(target, pos, *k++);
1106 count -= sizeof(*k);
1107 pos += sizeof(*k);
1109 } else {
1110 const compat_ulong_t __user *u = ubuf;
1111 while (count >= sizeof(*u) && !ret) {
1112 compat_ulong_t word;
1113 ret = __get_user(word, u++);
1114 if (ret)
1115 break;
1116 ret = putreg32(target, pos, word);
1117 count -= sizeof(*u);
1118 pos += sizeof(*u);
1121 return ret;
1124 static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request,
1125 compat_ulong_t caddr, compat_ulong_t cdata)
1127 unsigned long addr = caddr;
1128 unsigned long data = cdata;
1129 void __user *datap = compat_ptr(data);
1130 int ret;
1131 __u32 val;
1133 switch (request) {
1134 case PTRACE_PEEKUSR:
1135 ret = getreg32(child, addr, &val);
1136 if (ret == 0)
1137 ret = put_user(val, (__u32 __user *)datap);
1138 break;
1140 case PTRACE_POKEUSR:
1141 ret = putreg32(child, addr, data);
1142 break;
1144 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1145 return copy_regset_to_user(child, &user_x86_32_view,
1146 REGSET_GENERAL,
1147 0, sizeof(struct user_regs_struct32),
1148 datap);
1150 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1151 return copy_regset_from_user(child, &user_x86_32_view,
1152 REGSET_GENERAL, 0,
1153 sizeof(struct user_regs_struct32),
1154 datap);
1156 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1157 return copy_regset_to_user(child, &user_x86_32_view,
1158 REGSET_FP, 0,
1159 sizeof(struct user_i387_ia32_struct),
1160 datap);
1162 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1163 return copy_regset_from_user(
1164 child, &user_x86_32_view, REGSET_FP,
1165 0, sizeof(struct user_i387_ia32_struct), datap);
1167 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1168 return copy_regset_to_user(child, &user_x86_32_view,
1169 REGSET_XFP, 0,
1170 sizeof(struct user32_fxsr_struct),
1171 datap);
1173 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1174 return copy_regset_from_user(child, &user_x86_32_view,
1175 REGSET_XFP, 0,
1176 sizeof(struct user32_fxsr_struct),
1177 datap);
1179 case PTRACE_GET_THREAD_AREA:
1180 case PTRACE_SET_THREAD_AREA:
1181 return arch_ptrace(child, request, addr, data);
1183 default:
1184 return compat_ptrace_request(child, request, addr, data);
1187 return ret;
1189 #endif /* CONFIG_IA32_EMULATION */
1191 #ifdef CONFIG_X86_X32_ABI
1192 static long x32_arch_ptrace(struct task_struct *child,
1193 compat_long_t request, compat_ulong_t caddr,
1194 compat_ulong_t cdata)
1196 unsigned long addr = caddr;
1197 unsigned long data = cdata;
1198 void __user *datap = compat_ptr(data);
1199 int ret;
1201 switch (request) {
1202 /* Read 32bits at location addr in the USER area. Only allow
1203 to return the lower 32bits of segment and debug registers. */
1204 case PTRACE_PEEKUSR: {
1205 u32 tmp;
1207 ret = -EIO;
1208 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1209 addr < offsetof(struct user_regs_struct, cs))
1210 break;
1212 tmp = 0; /* Default return condition */
1213 if (addr < sizeof(struct user_regs_struct))
1214 tmp = getreg(child, addr);
1215 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1216 addr <= offsetof(struct user, u_debugreg[7])) {
1217 addr -= offsetof(struct user, u_debugreg[0]);
1218 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1220 ret = put_user(tmp, (__u32 __user *)datap);
1221 break;
1224 /* Write the word at location addr in the USER area. Only allow
1225 to update segment and debug registers with the upper 32bits
1226 zero-extended. */
1227 case PTRACE_POKEUSR:
1228 ret = -EIO;
1229 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1230 addr < offsetof(struct user_regs_struct, cs))
1231 break;
1233 if (addr < sizeof(struct user_regs_struct))
1234 ret = putreg(child, addr, data);
1235 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1236 addr <= offsetof(struct user, u_debugreg[7])) {
1237 addr -= offsetof(struct user, u_debugreg[0]);
1238 ret = ptrace_set_debugreg(child,
1239 addr / sizeof(data), data);
1241 break;
1243 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1244 return copy_regset_to_user(child,
1245 task_user_regset_view(current),
1246 REGSET_GENERAL,
1247 0, sizeof(struct user_regs_struct),
1248 datap);
1250 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1251 return copy_regset_from_user(child,
1252 task_user_regset_view(current),
1253 REGSET_GENERAL,
1254 0, sizeof(struct user_regs_struct),
1255 datap);
1257 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1258 return copy_regset_to_user(child,
1259 task_user_regset_view(current),
1260 REGSET_FP,
1261 0, sizeof(struct user_i387_struct),
1262 datap);
1264 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1265 return copy_regset_from_user(child,
1266 task_user_regset_view(current),
1267 REGSET_FP,
1268 0, sizeof(struct user_i387_struct),
1269 datap);
1271 default:
1272 return compat_ptrace_request(child, request, addr, data);
1275 return ret;
1277 #endif
1279 #ifdef CONFIG_COMPAT
1280 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1281 compat_ulong_t caddr, compat_ulong_t cdata)
1283 #ifdef CONFIG_X86_X32_ABI
1284 if (!is_ia32_task())
1285 return x32_arch_ptrace(child, request, caddr, cdata);
1286 #endif
1287 #ifdef CONFIG_IA32_EMULATION
1288 return ia32_arch_ptrace(child, request, caddr, cdata);
1289 #else
1290 return 0;
1291 #endif
1293 #endif /* CONFIG_COMPAT */
1295 #ifdef CONFIG_X86_64
1297 static struct user_regset x86_64_regsets[] __read_mostly = {
1298 [REGSET_GENERAL] = {
1299 .core_note_type = NT_PRSTATUS,
1300 .n = sizeof(struct user_regs_struct) / sizeof(long),
1301 .size = sizeof(long), .align = sizeof(long),
1302 .get = genregs_get, .set = genregs_set
1304 [REGSET_FP] = {
1305 .core_note_type = NT_PRFPREG,
1306 .n = sizeof(struct user_i387_struct) / sizeof(long),
1307 .size = sizeof(long), .align = sizeof(long),
1308 .active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
1310 [REGSET_XSTATE] = {
1311 .core_note_type = NT_X86_XSTATE,
1312 .size = sizeof(u64), .align = sizeof(u64),
1313 .active = xstateregs_active, .get = xstateregs_get,
1314 .set = xstateregs_set
1316 [REGSET_IOPERM64] = {
1317 .core_note_type = NT_386_IOPERM,
1318 .n = IO_BITMAP_LONGS,
1319 .size = sizeof(long), .align = sizeof(long),
1320 .active = ioperm_active, .get = ioperm_get
1324 static const struct user_regset_view user_x86_64_view = {
1325 .name = "x86_64", .e_machine = EM_X86_64,
1326 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1329 #else /* CONFIG_X86_32 */
1331 #define user_regs_struct32 user_regs_struct
1332 #define genregs32_get genregs_get
1333 #define genregs32_set genregs_set
1335 #endif /* CONFIG_X86_64 */
1337 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1338 static struct user_regset x86_32_regsets[] __read_mostly = {
1339 [REGSET_GENERAL] = {
1340 .core_note_type = NT_PRSTATUS,
1341 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1342 .size = sizeof(u32), .align = sizeof(u32),
1343 .get = genregs32_get, .set = genregs32_set
1345 [REGSET_FP] = {
1346 .core_note_type = NT_PRFPREG,
1347 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1348 .size = sizeof(u32), .align = sizeof(u32),
1349 .active = regset_fpregs_active, .get = fpregs_get, .set = fpregs_set
1351 [REGSET_XFP] = {
1352 .core_note_type = NT_PRXFPREG,
1353 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1354 .size = sizeof(u32), .align = sizeof(u32),
1355 .active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
1357 [REGSET_XSTATE] = {
1358 .core_note_type = NT_X86_XSTATE,
1359 .size = sizeof(u64), .align = sizeof(u64),
1360 .active = xstateregs_active, .get = xstateregs_get,
1361 .set = xstateregs_set
1363 [REGSET_TLS] = {
1364 .core_note_type = NT_386_TLS,
1365 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1366 .size = sizeof(struct user_desc),
1367 .align = sizeof(struct user_desc),
1368 .active = regset_tls_active,
1369 .get = regset_tls_get, .set = regset_tls_set
1371 [REGSET_IOPERM32] = {
1372 .core_note_type = NT_386_IOPERM,
1373 .n = IO_BITMAP_BYTES / sizeof(u32),
1374 .size = sizeof(u32), .align = sizeof(u32),
1375 .active = ioperm_active, .get = ioperm_get
1379 static const struct user_regset_view user_x86_32_view = {
1380 .name = "i386", .e_machine = EM_386,
1381 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1383 #endif
1386 * This represents bytes 464..511 in the memory layout exported through
1387 * the REGSET_XSTATE interface.
1389 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
1391 void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
1393 #ifdef CONFIG_X86_64
1394 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1395 #endif
1396 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1397 x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1398 #endif
1399 xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
1402 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1404 #ifdef CONFIG_IA32_EMULATION
1405 if (test_tsk_thread_flag(task, TIF_IA32))
1406 #endif
1407 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1408 return &user_x86_32_view;
1409 #endif
1410 #ifdef CONFIG_X86_64
1411 return &user_x86_64_view;
1412 #endif
1415 static void fill_sigtrap_info(struct task_struct *tsk,
1416 struct pt_regs *regs,
1417 int error_code, int si_code,
1418 struct siginfo *info)
1420 tsk->thread.trap_nr = X86_TRAP_DB;
1421 tsk->thread.error_code = error_code;
1423 memset(info, 0, sizeof(*info));
1424 info->si_signo = SIGTRAP;
1425 info->si_code = si_code;
1426 info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
1429 void user_single_step_siginfo(struct task_struct *tsk,
1430 struct pt_regs *regs,
1431 struct siginfo *info)
1433 fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
1436 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1437 int error_code, int si_code)
1439 struct siginfo info;
1441 fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
1442 /* Send us the fake SIGTRAP */
1443 force_sig_info(SIGTRAP, &info, tsk);