1 /* By Ross Biro 1/23/92 */
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/regset.h>
17 #include <linux/tracehook.h>
18 #include <linux/user.h>
19 #include <linux/elf.h>
20 #include <linux/security.h>
21 #include <linux/audit.h>
22 #include <linux/seccomp.h>
23 #include <linux/signal.h>
24 #include <linux/workqueue.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/system.h>
29 #include <asm/processor.h>
31 #include <asm/debugreg.h>
34 #include <asm/prctl.h>
35 #include <asm/proto.h>
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/syscalls.h>
47 REGSET_IOPERM64
= REGSET_XFP
,
53 * does not yet catch signals sent when the child dies.
54 * in exit.c or in signal.c.
58 * Determines which flags the user has access to [1 = access, 0 = no access].
60 #define FLAG_MASK_32 ((unsigned long) \
61 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
62 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
63 X86_EFLAGS_SF | X86_EFLAGS_TF | \
64 X86_EFLAGS_DF | X86_EFLAGS_OF | \
65 X86_EFLAGS_RF | X86_EFLAGS_AC))
68 * Determines whether a value may be installed in a segment register.
70 static inline bool invalid_selector(u16 value
)
72 return unlikely(value
!= 0 && (value
& SEGMENT_RPL_MASK
) != USER_RPL
);
77 #define FLAG_MASK FLAG_MASK_32
79 static unsigned long *pt_regs_access(struct pt_regs
*regs
, unsigned long regno
)
81 BUILD_BUG_ON(offsetof(struct pt_regs
, bx
) != 0);
82 return ®s
->bx
+ (regno
>> 2);
85 static u16
get_segment_reg(struct task_struct
*task
, unsigned long offset
)
88 * Returning the value truncates it to 16 bits.
91 if (offset
!= offsetof(struct user_regs_struct
, gs
))
92 retval
= *pt_regs_access(task_pt_regs(task
), offset
);
95 retval
= get_user_gs(task_pt_regs(task
));
97 retval
= task_user_gs(task
);
102 static int set_segment_reg(struct task_struct
*task
,
103 unsigned long offset
, u16 value
)
106 * The value argument was already truncated to 16 bits.
108 if (invalid_selector(value
))
112 * For %cs and %ss we cannot permit a null selector.
113 * We can permit a bogus selector as long as it has USER_RPL.
114 * Null selectors are fine for other segment registers, but
115 * we will never get back to user mode with invalid %cs or %ss
116 * and will take the trap in iret instead. Much code relies
117 * on user_mode() to distinguish a user trap frame (which can
118 * safely use invalid selectors) from a kernel trap frame.
121 case offsetof(struct user_regs_struct
, cs
):
122 case offsetof(struct user_regs_struct
, ss
):
123 if (unlikely(value
== 0))
127 *pt_regs_access(task_pt_regs(task
), offset
) = value
;
130 case offsetof(struct user_regs_struct
, gs
):
132 set_user_gs(task_pt_regs(task
), value
);
134 task_user_gs(task
) = value
;
140 static unsigned long debugreg_addr_limit(struct task_struct
*task
)
142 return TASK_SIZE
- 3;
145 #else /* CONFIG_X86_64 */
147 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
149 static unsigned long *pt_regs_access(struct pt_regs
*regs
, unsigned long offset
)
151 BUILD_BUG_ON(offsetof(struct pt_regs
, r15
) != 0);
152 return ®s
->r15
+ (offset
/ sizeof(regs
->r15
));
155 static u16
get_segment_reg(struct task_struct
*task
, unsigned long offset
)
158 * Returning the value truncates it to 16 bits.
163 case offsetof(struct user_regs_struct
, fs
):
164 if (task
== current
) {
165 /* Older gas can't assemble movq %?s,%r?? */
166 asm("movl %%fs,%0" : "=r" (seg
));
169 return task
->thread
.fsindex
;
170 case offsetof(struct user_regs_struct
, gs
):
171 if (task
== current
) {
172 asm("movl %%gs,%0" : "=r" (seg
));
175 return task
->thread
.gsindex
;
176 case offsetof(struct user_regs_struct
, ds
):
177 if (task
== current
) {
178 asm("movl %%ds,%0" : "=r" (seg
));
181 return task
->thread
.ds
;
182 case offsetof(struct user_regs_struct
, es
):
183 if (task
== current
) {
184 asm("movl %%es,%0" : "=r" (seg
));
187 return task
->thread
.es
;
189 case offsetof(struct user_regs_struct
, cs
):
190 case offsetof(struct user_regs_struct
, ss
):
193 return *pt_regs_access(task_pt_regs(task
), offset
);
196 static int set_segment_reg(struct task_struct
*task
,
197 unsigned long offset
, u16 value
)
200 * The value argument was already truncated to 16 bits.
202 if (invalid_selector(value
))
206 case offsetof(struct user_regs_struct
,fs
):
208 * If this is setting fs as for normal 64-bit use but
209 * setting fs_base has implicitly changed it, leave it.
211 if ((value
== FS_TLS_SEL
&& task
->thread
.fsindex
== 0 &&
212 task
->thread
.fs
!= 0) ||
213 (value
== 0 && task
->thread
.fsindex
== FS_TLS_SEL
&&
214 task
->thread
.fs
== 0))
216 task
->thread
.fsindex
= value
;
218 loadsegment(fs
, task
->thread
.fsindex
);
220 case offsetof(struct user_regs_struct
,gs
):
222 * If this is setting gs as for normal 64-bit use but
223 * setting gs_base has implicitly changed it, leave it.
225 if ((value
== GS_TLS_SEL
&& task
->thread
.gsindex
== 0 &&
226 task
->thread
.gs
!= 0) ||
227 (value
== 0 && task
->thread
.gsindex
== GS_TLS_SEL
&&
228 task
->thread
.gs
== 0))
230 task
->thread
.gsindex
= value
;
232 load_gs_index(task
->thread
.gsindex
);
234 case offsetof(struct user_regs_struct
,ds
):
235 task
->thread
.ds
= value
;
237 loadsegment(ds
, task
->thread
.ds
);
239 case offsetof(struct user_regs_struct
,es
):
240 task
->thread
.es
= value
;
242 loadsegment(es
, task
->thread
.es
);
246 * Can't actually change these in 64-bit mode.
248 case offsetof(struct user_regs_struct
,cs
):
249 if (unlikely(value
== 0))
251 #ifdef CONFIG_IA32_EMULATION
252 if (test_tsk_thread_flag(task
, TIF_IA32
))
253 task_pt_regs(task
)->cs
= value
;
256 case offsetof(struct user_regs_struct
,ss
):
257 if (unlikely(value
== 0))
259 #ifdef CONFIG_IA32_EMULATION
260 if (test_tsk_thread_flag(task
, TIF_IA32
))
261 task_pt_regs(task
)->ss
= value
;
269 static unsigned long debugreg_addr_limit(struct task_struct
*task
)
271 #ifdef CONFIG_IA32_EMULATION
272 if (test_tsk_thread_flag(task
, TIF_IA32
))
273 return IA32_PAGE_OFFSET
- 3;
275 return TASK_SIZE_MAX
- 7;
278 #endif /* CONFIG_X86_32 */
280 static unsigned long get_flags(struct task_struct
*task
)
282 unsigned long retval
= task_pt_regs(task
)->flags
;
285 * If the debugger set TF, hide it from the readout.
287 if (test_tsk_thread_flag(task
, TIF_FORCED_TF
))
288 retval
&= ~X86_EFLAGS_TF
;
293 static int set_flags(struct task_struct
*task
, unsigned long value
)
295 struct pt_regs
*regs
= task_pt_regs(task
);
298 * If the user value contains TF, mark that
299 * it was not "us" (the debugger) that set it.
300 * If not, make sure it stays set if we had.
302 if (value
& X86_EFLAGS_TF
)
303 clear_tsk_thread_flag(task
, TIF_FORCED_TF
);
304 else if (test_tsk_thread_flag(task
, TIF_FORCED_TF
))
305 value
|= X86_EFLAGS_TF
;
307 regs
->flags
= (regs
->flags
& ~FLAG_MASK
) | (value
& FLAG_MASK
);
312 static int putreg(struct task_struct
*child
,
313 unsigned long offset
, unsigned long value
)
316 case offsetof(struct user_regs_struct
, cs
):
317 case offsetof(struct user_regs_struct
, ds
):
318 case offsetof(struct user_regs_struct
, es
):
319 case offsetof(struct user_regs_struct
, fs
):
320 case offsetof(struct user_regs_struct
, gs
):
321 case offsetof(struct user_regs_struct
, ss
):
322 return set_segment_reg(child
, offset
, value
);
324 case offsetof(struct user_regs_struct
, flags
):
325 return set_flags(child
, value
);
328 case offsetof(struct user_regs_struct
,fs_base
):
329 if (value
>= TASK_SIZE_OF(child
))
332 * When changing the segment base, use do_arch_prctl
333 * to set either thread.fs or thread.fsindex and the
334 * corresponding GDT slot.
336 if (child
->thread
.fs
!= value
)
337 return do_arch_prctl(child
, ARCH_SET_FS
, value
);
339 case offsetof(struct user_regs_struct
,gs_base
):
341 * Exactly the same here as the %fs handling above.
343 if (value
>= TASK_SIZE_OF(child
))
345 if (child
->thread
.gs
!= value
)
346 return do_arch_prctl(child
, ARCH_SET_GS
, value
);
351 *pt_regs_access(task_pt_regs(child
), offset
) = value
;
355 static unsigned long getreg(struct task_struct
*task
, unsigned long offset
)
358 case offsetof(struct user_regs_struct
, cs
):
359 case offsetof(struct user_regs_struct
, ds
):
360 case offsetof(struct user_regs_struct
, es
):
361 case offsetof(struct user_regs_struct
, fs
):
362 case offsetof(struct user_regs_struct
, gs
):
363 case offsetof(struct user_regs_struct
, ss
):
364 return get_segment_reg(task
, offset
);
366 case offsetof(struct user_regs_struct
, flags
):
367 return get_flags(task
);
370 case offsetof(struct user_regs_struct
, fs_base
): {
372 * do_arch_prctl may have used a GDT slot instead of
373 * the MSR. To userland, it appears the same either
374 * way, except the %fs segment selector might not be 0.
376 unsigned int seg
= task
->thread
.fsindex
;
377 if (task
->thread
.fs
!= 0)
378 return task
->thread
.fs
;
380 asm("movl %%fs,%0" : "=r" (seg
));
381 if (seg
!= FS_TLS_SEL
)
383 return get_desc_base(&task
->thread
.tls_array
[FS_TLS
]);
385 case offsetof(struct user_regs_struct
, gs_base
): {
387 * Exactly the same here as the %fs handling above.
389 unsigned int seg
= task
->thread
.gsindex
;
390 if (task
->thread
.gs
!= 0)
391 return task
->thread
.gs
;
393 asm("movl %%gs,%0" : "=r" (seg
));
394 if (seg
!= GS_TLS_SEL
)
396 return get_desc_base(&task
->thread
.tls_array
[GS_TLS
]);
401 return *pt_regs_access(task_pt_regs(task
), offset
);
404 static int genregs_get(struct task_struct
*target
,
405 const struct user_regset
*regset
,
406 unsigned int pos
, unsigned int count
,
407 void *kbuf
, void __user
*ubuf
)
410 unsigned long *k
= kbuf
;
412 *k
++ = getreg(target
, pos
);
417 unsigned long __user
*u
= ubuf
;
419 if (__put_user(getreg(target
, pos
), u
++))
429 static int genregs_set(struct task_struct
*target
,
430 const struct user_regset
*regset
,
431 unsigned int pos
, unsigned int count
,
432 const void *kbuf
, const void __user
*ubuf
)
436 const unsigned long *k
= kbuf
;
437 while (count
> 0 && !ret
) {
438 ret
= putreg(target
, pos
, *k
++);
443 const unsigned long __user
*u
= ubuf
;
444 while (count
> 0 && !ret
) {
446 ret
= __get_user(word
, u
++);
449 ret
= putreg(target
, pos
, word
);
458 * This function is trivial and will be inlined by the compiler.
459 * Having it separates the implementation details of debug
460 * registers from the interface details of ptrace.
462 static unsigned long ptrace_get_debugreg(struct task_struct
*child
, int n
)
465 case 0: return child
->thread
.debugreg0
;
466 case 1: return child
->thread
.debugreg1
;
467 case 2: return child
->thread
.debugreg2
;
468 case 3: return child
->thread
.debugreg3
;
469 case 6: return child
->thread
.debugreg6
;
470 case 7: return child
->thread
.debugreg7
;
475 static int ptrace_set_debugreg(struct task_struct
*child
,
476 int n
, unsigned long data
)
480 if (unlikely(n
== 4 || n
== 5))
483 if (n
< 4 && unlikely(data
>= debugreg_addr_limit(child
)))
487 case 0: child
->thread
.debugreg0
= data
; break;
488 case 1: child
->thread
.debugreg1
= data
; break;
489 case 2: child
->thread
.debugreg2
= data
; break;
490 case 3: child
->thread
.debugreg3
= data
; break;
493 if ((data
& ~0xffffffffUL
) != 0)
495 child
->thread
.debugreg6
= data
;
500 * Sanity-check data. Take one half-byte at once with
501 * check = (val >> (16 + 4*i)) & 0xf. It contains the
502 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
503 * 2 and 3 are LENi. Given a list of invalid values,
504 * we do mask |= 1 << invalid_value, so that
505 * (mask >> check) & 1 is a correct test for invalid
508 * R/Wi contains the type of the breakpoint /
509 * watchpoint, LENi contains the length of the watched
510 * data in the watchpoint case.
512 * The invalid values are:
513 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
514 * - R/Wi == 0x10 (break on I/O reads or writes), so
516 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
519 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
521 * See the Intel Manual "System Programming Guide",
524 * Note that LENi == 0x10 is defined on x86_64 in long
525 * mode (i.e. even for 32-bit userspace software, but
526 * 64-bit kernel), so the x86_64 mask value is 0x5454.
527 * See the AMD manual no. 24593 (AMD64 System Programming)
530 #define DR7_MASK 0x5f54
532 #define DR7_MASK 0x5554
534 data
&= ~DR_CONTROL_RESERVED
;
535 for (i
= 0; i
< 4; i
++)
536 if ((DR7_MASK
>> ((data
>> (16 + 4*i
)) & 0xf)) & 1)
538 child
->thread
.debugreg7
= data
;
540 set_tsk_thread_flag(child
, TIF_DEBUG
);
542 clear_tsk_thread_flag(child
, TIF_DEBUG
);
550 * These access the current or another (stopped) task's io permission
551 * bitmap for debugging or core dump.
553 static int ioperm_active(struct task_struct
*target
,
554 const struct user_regset
*regset
)
556 return target
->thread
.io_bitmap_max
/ regset
->size
;
559 static int ioperm_get(struct task_struct
*target
,
560 const struct user_regset
*regset
,
561 unsigned int pos
, unsigned int count
,
562 void *kbuf
, void __user
*ubuf
)
564 if (!target
->thread
.io_bitmap_ptr
)
567 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
568 target
->thread
.io_bitmap_ptr
,
572 #ifdef CONFIG_X86_PTRACE_BTS
574 * A branch trace store context.
576 * Contexts may only be installed by ptrace_bts_config() and only for
579 * Contexts are destroyed when the tracee is detached from the tracer.
580 * The actual destruction work requires interrupts enabled, so the
581 * work is deferred and will be scheduled during __ptrace_unlink().
583 * Contexts hold an additional task_struct reference on the traced
584 * task, as well as a reference on the tracer's mm.
586 * Ptrace already holds a task_struct for the duration of ptrace operations,
587 * but since destruction is deferred, it may be executed after both
588 * tracer and tracee exited.
591 /* The branch trace handle. */
592 struct bts_tracer
*tracer
;
594 /* The buffer used to store the branch trace and its size. */
598 /* The mm that paid for the above buffer. */
599 struct mm_struct
*mm
;
601 /* The task this context belongs to. */
602 struct task_struct
*task
;
604 /* The signal to send on a bts buffer overflow. */
605 unsigned int bts_ovfl_signal
;
607 /* The work struct to destroy a context. */
608 struct work_struct work
;
611 static int alloc_bts_buffer(struct bts_context
*context
, unsigned int size
)
616 err
= account_locked_memory(current
->mm
, current
->signal
->rlim
, size
);
620 buffer
= kzalloc(size
, GFP_KERNEL
);
624 context
->buffer
= buffer
;
625 context
->size
= size
;
626 context
->mm
= get_task_mm(current
);
631 refund_locked_memory(current
->mm
, size
);
635 static inline void free_bts_buffer(struct bts_context
*context
)
637 if (!context
->buffer
)
640 kfree(context
->buffer
);
641 context
->buffer
= NULL
;
643 refund_locked_memory(context
->mm
, context
->size
);
650 static void free_bts_context_work(struct work_struct
*w
)
652 struct bts_context
*context
;
654 context
= container_of(w
, struct bts_context
, work
);
656 ds_release_bts(context
->tracer
);
657 put_task_struct(context
->task
);
658 free_bts_buffer(context
);
662 static inline void free_bts_context(struct bts_context
*context
)
664 INIT_WORK(&context
->work
, free_bts_context_work
);
665 schedule_work(&context
->work
);
668 static inline struct bts_context
*alloc_bts_context(struct task_struct
*task
)
670 struct bts_context
*context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
672 context
->task
= task
;
675 get_task_struct(task
);
681 static int ptrace_bts_read_record(struct task_struct
*child
, size_t index
,
682 struct bts_struct __user
*out
)
684 struct bts_context
*context
;
685 const struct bts_trace
*trace
;
686 struct bts_struct bts
;
687 const unsigned char *at
;
690 context
= child
->bts
;
694 trace
= ds_read_bts(context
->tracer
);
698 at
= trace
->ds
.top
- ((index
+ 1) * trace
->ds
.size
);
699 if ((void *)at
< trace
->ds
.begin
)
700 at
+= (trace
->ds
.n
* trace
->ds
.size
);
705 error
= trace
->read(context
->tracer
, at
, &bts
);
709 if (copy_to_user(out
, &bts
, sizeof(bts
)))
715 static int ptrace_bts_drain(struct task_struct
*child
,
717 struct bts_struct __user
*out
)
719 struct bts_context
*context
;
720 const struct bts_trace
*trace
;
721 const unsigned char *at
;
722 int error
, drained
= 0;
724 context
= child
->bts
;
728 trace
= ds_read_bts(context
->tracer
);
735 if (size
< (trace
->ds
.top
- trace
->ds
.begin
))
738 for (at
= trace
->ds
.begin
; (void *)at
< trace
->ds
.top
;
739 out
++, drained
++, at
+= trace
->ds
.size
) {
740 struct bts_struct bts
;
742 error
= trace
->read(context
->tracer
, at
, &bts
);
746 if (copy_to_user(out
, &bts
, sizeof(bts
)))
750 memset(trace
->ds
.begin
, 0, trace
->ds
.n
* trace
->ds
.size
);
752 error
= ds_reset_bts(context
->tracer
);
759 static int ptrace_bts_config(struct task_struct
*child
,
761 const struct ptrace_bts_config __user
*ucfg
)
763 struct bts_context
*context
;
764 struct ptrace_bts_config cfg
;
765 unsigned int flags
= 0;
767 if (cfg_size
< sizeof(cfg
))
770 if (copy_from_user(&cfg
, ucfg
, sizeof(cfg
)))
773 context
= child
->bts
;
775 context
= alloc_bts_context(child
);
779 if (cfg
.flags
& PTRACE_BTS_O_SIGNAL
) {
784 context
->bts_ovfl_signal
= cfg
.signal
;
787 ds_release_bts(context
->tracer
);
788 context
->tracer
= NULL
;
790 if ((cfg
.flags
& PTRACE_BTS_O_ALLOC
) && (cfg
.size
!= context
->size
)) {
793 free_bts_buffer(context
);
797 err
= alloc_bts_buffer(context
, cfg
.size
);
802 if (cfg
.flags
& PTRACE_BTS_O_TRACE
)
805 if (cfg
.flags
& PTRACE_BTS_O_SCHED
)
806 flags
|= BTS_TIMESTAMPS
;
809 ds_request_bts_task(child
, context
->buffer
, context
->size
,
810 NULL
, (size_t)-1, flags
);
811 if (unlikely(IS_ERR(context
->tracer
))) {
812 int error
= PTR_ERR(context
->tracer
);
814 free_bts_buffer(context
);
815 context
->tracer
= NULL
;
822 static int ptrace_bts_status(struct task_struct
*child
,
824 struct ptrace_bts_config __user
*ucfg
)
826 struct bts_context
*context
;
827 const struct bts_trace
*trace
;
828 struct ptrace_bts_config cfg
;
830 context
= child
->bts
;
834 if (cfg_size
< sizeof(cfg
))
837 trace
= ds_read_bts(context
->tracer
);
841 memset(&cfg
, 0, sizeof(cfg
));
842 cfg
.size
= trace
->ds
.end
- trace
->ds
.begin
;
843 cfg
.signal
= context
->bts_ovfl_signal
;
844 cfg
.bts_size
= sizeof(struct bts_struct
);
847 cfg
.flags
|= PTRACE_BTS_O_SIGNAL
;
849 if (trace
->ds
.flags
& BTS_USER
)
850 cfg
.flags
|= PTRACE_BTS_O_TRACE
;
852 if (trace
->ds
.flags
& BTS_TIMESTAMPS
)
853 cfg
.flags
|= PTRACE_BTS_O_SCHED
;
855 if (copy_to_user(ucfg
, &cfg
, sizeof(cfg
)))
861 static int ptrace_bts_clear(struct task_struct
*child
)
863 struct bts_context
*context
;
864 const struct bts_trace
*trace
;
866 context
= child
->bts
;
870 trace
= ds_read_bts(context
->tracer
);
874 memset(trace
->ds
.begin
, 0, trace
->ds
.n
* trace
->ds
.size
);
876 return ds_reset_bts(context
->tracer
);
879 static int ptrace_bts_size(struct task_struct
*child
)
881 struct bts_context
*context
;
882 const struct bts_trace
*trace
;
884 context
= child
->bts
;
888 trace
= ds_read_bts(context
->tracer
);
892 return (trace
->ds
.top
- trace
->ds
.begin
) / trace
->ds
.size
;
896 * Called from __ptrace_unlink() after the child has been moved back
897 * to its original parent.
899 void ptrace_bts_untrace(struct task_struct
*child
)
901 if (unlikely(child
->bts
)) {
902 free_bts_context(child
->bts
);
906 #endif /* CONFIG_X86_PTRACE_BTS */
909 * Called by kernel/ptrace.c when detaching..
911 * Make sure the single step bit is not set.
913 void ptrace_disable(struct task_struct
*child
)
915 user_disable_single_step(child
);
916 #ifdef TIF_SYSCALL_EMU
917 clear_tsk_thread_flag(child
, TIF_SYSCALL_EMU
);
921 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
922 static const struct user_regset_view user_x86_32_view
; /* Initialized below. */
925 long arch_ptrace(struct task_struct
*child
, long request
, long addr
, long data
)
928 unsigned long __user
*datap
= (unsigned long __user
*)data
;
931 /* read the word at location addr in the USER area. */
932 case PTRACE_PEEKUSR
: {
936 if ((addr
& (sizeof(data
) - 1)) || addr
< 0 ||
937 addr
>= sizeof(struct user
))
940 tmp
= 0; /* Default return condition */
941 if (addr
< sizeof(struct user_regs_struct
))
942 tmp
= getreg(child
, addr
);
943 else if (addr
>= offsetof(struct user
, u_debugreg
[0]) &&
944 addr
<= offsetof(struct user
, u_debugreg
[7])) {
945 addr
-= offsetof(struct user
, u_debugreg
[0]);
946 tmp
= ptrace_get_debugreg(child
, addr
/ sizeof(data
));
948 ret
= put_user(tmp
, datap
);
952 case PTRACE_POKEUSR
: /* write the word at location addr in the USER area */
954 if ((addr
& (sizeof(data
) - 1)) || addr
< 0 ||
955 addr
>= sizeof(struct user
))
958 if (addr
< sizeof(struct user_regs_struct
))
959 ret
= putreg(child
, addr
, data
);
960 else if (addr
>= offsetof(struct user
, u_debugreg
[0]) &&
961 addr
<= offsetof(struct user
, u_debugreg
[7])) {
962 addr
-= offsetof(struct user
, u_debugreg
[0]);
963 ret
= ptrace_set_debugreg(child
,
964 addr
/ sizeof(data
), data
);
968 case PTRACE_GETREGS
: /* Get all gp regs from the child. */
969 return copy_regset_to_user(child
,
970 task_user_regset_view(current
),
972 0, sizeof(struct user_regs_struct
),
975 case PTRACE_SETREGS
: /* Set all gp regs in the child. */
976 return copy_regset_from_user(child
,
977 task_user_regset_view(current
),
979 0, sizeof(struct user_regs_struct
),
982 case PTRACE_GETFPREGS
: /* Get the child FPU state. */
983 return copy_regset_to_user(child
,
984 task_user_regset_view(current
),
986 0, sizeof(struct user_i387_struct
),
989 case PTRACE_SETFPREGS
: /* Set the child FPU state. */
990 return copy_regset_from_user(child
,
991 task_user_regset_view(current
),
993 0, sizeof(struct user_i387_struct
),
997 case PTRACE_GETFPXREGS
: /* Get the child extended FPU state. */
998 return copy_regset_to_user(child
, &user_x86_32_view
,
1000 0, sizeof(struct user_fxsr_struct
),
1003 case PTRACE_SETFPXREGS
: /* Set the child extended FPU state. */
1004 return copy_regset_from_user(child
, &user_x86_32_view
,
1006 0, sizeof(struct user_fxsr_struct
),
1010 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1011 case PTRACE_GET_THREAD_AREA
:
1014 ret
= do_get_thread_area(child
, addr
,
1015 (struct user_desc __user
*) data
);
1018 case PTRACE_SET_THREAD_AREA
:
1021 ret
= do_set_thread_area(child
, addr
,
1022 (struct user_desc __user
*) data
, 0);
1026 #ifdef CONFIG_X86_64
1027 /* normal 64bit interface to access TLS data.
1028 Works just like arch_prctl, except that the arguments
1030 case PTRACE_ARCH_PRCTL
:
1031 ret
= do_arch_prctl(child
, data
, addr
);
1036 * These bits need more cooking - not enabled yet:
1038 #ifdef CONFIG_X86_PTRACE_BTS
1039 case PTRACE_BTS_CONFIG
:
1040 ret
= ptrace_bts_config
1041 (child
, data
, (struct ptrace_bts_config __user
*)addr
);
1044 case PTRACE_BTS_STATUS
:
1045 ret
= ptrace_bts_status
1046 (child
, data
, (struct ptrace_bts_config __user
*)addr
);
1049 case PTRACE_BTS_SIZE
:
1050 ret
= ptrace_bts_size(child
);
1053 case PTRACE_BTS_GET
:
1054 ret
= ptrace_bts_read_record
1055 (child
, data
, (struct bts_struct __user
*) addr
);
1058 case PTRACE_BTS_CLEAR
:
1059 ret
= ptrace_bts_clear(child
);
1062 case PTRACE_BTS_DRAIN
:
1063 ret
= ptrace_bts_drain
1064 (child
, data
, (struct bts_struct __user
*) addr
);
1066 #endif /* CONFIG_X86_PTRACE_BTS */
1069 ret
= ptrace_request(child
, request
, addr
, data
);
1076 #ifdef CONFIG_IA32_EMULATION
1078 #include <linux/compat.h>
1079 #include <linux/syscalls.h>
1080 #include <asm/ia32.h>
1081 #include <asm/user32.h>
1084 case offsetof(struct user32, regs.l): \
1085 regs->q = value; break
1088 case offsetof(struct user32, regs.rs): \
1089 return set_segment_reg(child, \
1090 offsetof(struct user_regs_struct, rs), \
1094 static int putreg32(struct task_struct
*child
, unsigned regno
, u32 value
)
1096 struct pt_regs
*regs
= task_pt_regs(child
);
1117 case offsetof(struct user32
, regs
.orig_eax
):
1119 * A 32-bit debugger setting orig_eax means to restore
1120 * the state of the task restarting a 32-bit syscall.
1121 * Make sure we interpret the -ERESTART* codes correctly
1122 * in case the task is not actually still sitting at the
1123 * exit from a 32-bit syscall with TS_COMPAT still set.
1125 regs
->orig_ax
= value
;
1126 if (syscall_get_nr(child
, regs
) >= 0)
1127 task_thread_info(child
)->status
|= TS_COMPAT
;
1130 case offsetof(struct user32
, regs
.eflags
):
1131 return set_flags(child
, value
);
1133 case offsetof(struct user32
, u_debugreg
[0]) ...
1134 offsetof(struct user32
, u_debugreg
[7]):
1135 regno
-= offsetof(struct user32
, u_debugreg
[0]);
1136 return ptrace_set_debugreg(child
, regno
/ 4, value
);
1139 if (regno
> sizeof(struct user32
) || (regno
& 3))
1143 * Other dummy fields in the virtual user structure
1155 case offsetof(struct user32, regs.l): \
1156 *val = regs->q; break
1159 case offsetof(struct user32, regs.rs): \
1160 *val = get_segment_reg(child, \
1161 offsetof(struct user_regs_struct, rs)); \
1164 static int getreg32(struct task_struct
*child
, unsigned regno
, u32
*val
)
1166 struct pt_regs
*regs
= task_pt_regs(child
);
1184 R32(orig_eax
, orig_ax
);
1188 case offsetof(struct user32
, regs
.eflags
):
1189 *val
= get_flags(child
);
1192 case offsetof(struct user32
, u_debugreg
[0]) ...
1193 offsetof(struct user32
, u_debugreg
[7]):
1194 regno
-= offsetof(struct user32
, u_debugreg
[0]);
1195 *val
= ptrace_get_debugreg(child
, regno
/ 4);
1199 if (regno
> sizeof(struct user32
) || (regno
& 3))
1203 * Other dummy fields in the virtual user structure
1215 static int genregs32_get(struct task_struct
*target
,
1216 const struct user_regset
*regset
,
1217 unsigned int pos
, unsigned int count
,
1218 void *kbuf
, void __user
*ubuf
)
1221 compat_ulong_t
*k
= kbuf
;
1223 getreg32(target
, pos
, k
++);
1224 count
-= sizeof(*k
);
1228 compat_ulong_t __user
*u
= ubuf
;
1230 compat_ulong_t word
;
1231 getreg32(target
, pos
, &word
);
1232 if (__put_user(word
, u
++))
1234 count
-= sizeof(*u
);
1242 static int genregs32_set(struct task_struct
*target
,
1243 const struct user_regset
*regset
,
1244 unsigned int pos
, unsigned int count
,
1245 const void *kbuf
, const void __user
*ubuf
)
1249 const compat_ulong_t
*k
= kbuf
;
1250 while (count
> 0 && !ret
) {
1251 ret
= putreg32(target
, pos
, *k
++);
1252 count
-= sizeof(*k
);
1256 const compat_ulong_t __user
*u
= ubuf
;
1257 while (count
> 0 && !ret
) {
1258 compat_ulong_t word
;
1259 ret
= __get_user(word
, u
++);
1262 ret
= putreg32(target
, pos
, word
);
1263 count
-= sizeof(*u
);
1270 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
1271 compat_ulong_t caddr
, compat_ulong_t cdata
)
1273 unsigned long addr
= caddr
;
1274 unsigned long data
= cdata
;
1275 void __user
*datap
= compat_ptr(data
);
1280 case PTRACE_PEEKUSR
:
1281 ret
= getreg32(child
, addr
, &val
);
1283 ret
= put_user(val
, (__u32 __user
*)datap
);
1286 case PTRACE_POKEUSR
:
1287 ret
= putreg32(child
, addr
, data
);
1290 case PTRACE_GETREGS
: /* Get all gp regs from the child. */
1291 return copy_regset_to_user(child
, &user_x86_32_view
,
1293 0, sizeof(struct user_regs_struct32
),
1296 case PTRACE_SETREGS
: /* Set all gp regs in the child. */
1297 return copy_regset_from_user(child
, &user_x86_32_view
,
1299 sizeof(struct user_regs_struct32
),
1302 case PTRACE_GETFPREGS
: /* Get the child FPU state. */
1303 return copy_regset_to_user(child
, &user_x86_32_view
,
1305 sizeof(struct user_i387_ia32_struct
),
1308 case PTRACE_SETFPREGS
: /* Set the child FPU state. */
1309 return copy_regset_from_user(
1310 child
, &user_x86_32_view
, REGSET_FP
,
1311 0, sizeof(struct user_i387_ia32_struct
), datap
);
1313 case PTRACE_GETFPXREGS
: /* Get the child extended FPU state. */
1314 return copy_regset_to_user(child
, &user_x86_32_view
,
1316 sizeof(struct user32_fxsr_struct
),
1319 case PTRACE_SETFPXREGS
: /* Set the child extended FPU state. */
1320 return copy_regset_from_user(child
, &user_x86_32_view
,
1322 sizeof(struct user32_fxsr_struct
),
1325 case PTRACE_GET_THREAD_AREA
:
1326 case PTRACE_SET_THREAD_AREA
:
1327 #ifdef CONFIG_X86_PTRACE_BTS
1328 case PTRACE_BTS_CONFIG
:
1329 case PTRACE_BTS_STATUS
:
1330 case PTRACE_BTS_SIZE
:
1331 case PTRACE_BTS_GET
:
1332 case PTRACE_BTS_CLEAR
:
1333 case PTRACE_BTS_DRAIN
:
1334 #endif /* CONFIG_X86_PTRACE_BTS */
1335 return arch_ptrace(child
, request
, addr
, data
);
1338 return compat_ptrace_request(child
, request
, addr
, data
);
1344 #endif /* CONFIG_IA32_EMULATION */
1346 #ifdef CONFIG_X86_64
1348 static const struct user_regset x86_64_regsets
[] = {
1349 [REGSET_GENERAL
] = {
1350 .core_note_type
= NT_PRSTATUS
,
1351 .n
= sizeof(struct user_regs_struct
) / sizeof(long),
1352 .size
= sizeof(long), .align
= sizeof(long),
1353 .get
= genregs_get
, .set
= genregs_set
1356 .core_note_type
= NT_PRFPREG
,
1357 .n
= sizeof(struct user_i387_struct
) / sizeof(long),
1358 .size
= sizeof(long), .align
= sizeof(long),
1359 .active
= xfpregs_active
, .get
= xfpregs_get
, .set
= xfpregs_set
1361 [REGSET_IOPERM64
] = {
1362 .core_note_type
= NT_386_IOPERM
,
1363 .n
= IO_BITMAP_LONGS
,
1364 .size
= sizeof(long), .align
= sizeof(long),
1365 .active
= ioperm_active
, .get
= ioperm_get
1369 static const struct user_regset_view user_x86_64_view
= {
1370 .name
= "x86_64", .e_machine
= EM_X86_64
,
1371 .regsets
= x86_64_regsets
, .n
= ARRAY_SIZE(x86_64_regsets
)
1374 #else /* CONFIG_X86_32 */
1376 #define user_regs_struct32 user_regs_struct
1377 #define genregs32_get genregs_get
1378 #define genregs32_set genregs_set
1380 #define user_i387_ia32_struct user_i387_struct
1381 #define user32_fxsr_struct user_fxsr_struct
1383 #endif /* CONFIG_X86_64 */
1385 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1386 static const struct user_regset x86_32_regsets
[] = {
1387 [REGSET_GENERAL
] = {
1388 .core_note_type
= NT_PRSTATUS
,
1389 .n
= sizeof(struct user_regs_struct32
) / sizeof(u32
),
1390 .size
= sizeof(u32
), .align
= sizeof(u32
),
1391 .get
= genregs32_get
, .set
= genregs32_set
1394 .core_note_type
= NT_PRFPREG
,
1395 .n
= sizeof(struct user_i387_ia32_struct
) / sizeof(u32
),
1396 .size
= sizeof(u32
), .align
= sizeof(u32
),
1397 .active
= fpregs_active
, .get
= fpregs_get
, .set
= fpregs_set
1400 .core_note_type
= NT_PRXFPREG
,
1401 .n
= sizeof(struct user32_fxsr_struct
) / sizeof(u32
),
1402 .size
= sizeof(u32
), .align
= sizeof(u32
),
1403 .active
= xfpregs_active
, .get
= xfpregs_get
, .set
= xfpregs_set
1406 .core_note_type
= NT_386_TLS
,
1407 .n
= GDT_ENTRY_TLS_ENTRIES
, .bias
= GDT_ENTRY_TLS_MIN
,
1408 .size
= sizeof(struct user_desc
),
1409 .align
= sizeof(struct user_desc
),
1410 .active
= regset_tls_active
,
1411 .get
= regset_tls_get
, .set
= regset_tls_set
1413 [REGSET_IOPERM32
] = {
1414 .core_note_type
= NT_386_IOPERM
,
1415 .n
= IO_BITMAP_BYTES
/ sizeof(u32
),
1416 .size
= sizeof(u32
), .align
= sizeof(u32
),
1417 .active
= ioperm_active
, .get
= ioperm_get
1421 static const struct user_regset_view user_x86_32_view
= {
1422 .name
= "i386", .e_machine
= EM_386
,
1423 .regsets
= x86_32_regsets
, .n
= ARRAY_SIZE(x86_32_regsets
)
1427 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1429 #ifdef CONFIG_IA32_EMULATION
1430 if (test_tsk_thread_flag(task
, TIF_IA32
))
1432 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1433 return &user_x86_32_view
;
1435 #ifdef CONFIG_X86_64
1436 return &user_x86_64_view
;
1440 void send_sigtrap(struct task_struct
*tsk
, struct pt_regs
*regs
,
1441 int error_code
, int si_code
)
1443 struct siginfo info
;
1445 tsk
->thread
.trap_no
= 1;
1446 tsk
->thread
.error_code
= error_code
;
1448 memset(&info
, 0, sizeof(info
));
1449 info
.si_signo
= SIGTRAP
;
1450 info
.si_code
= si_code
;
1453 info
.si_addr
= user_mode_vm(regs
) ? (void __user
*) regs
->ip
: NULL
;
1455 /* Send us the fake SIGTRAP */
1456 force_sig_info(SIGTRAP
, &info
, tsk
);
1460 #ifdef CONFIG_X86_32
1462 #elif defined CONFIG_IA32_EMULATION
1463 # define IS_IA32 is_compat_task()
1469 * We must return the syscall number to actually look up in the table.
1470 * This can be -1L to skip running any syscall at all.
1472 asmregparm
long syscall_trace_enter(struct pt_regs
*regs
)
1477 * If we stepped into a sysenter/syscall insn, it trapped in
1478 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
1479 * If user-mode had set TF itself, then it's still clear from
1480 * do_debug() and we need to set it again to restore the user
1481 * state. If we entered on the slow path, TF was already set.
1483 if (test_thread_flag(TIF_SINGLESTEP
))
1484 regs
->flags
|= X86_EFLAGS_TF
;
1486 /* do the secure computing check first */
1487 secure_computing(regs
->orig_ax
);
1489 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU
)))
1492 if ((ret
|| test_thread_flag(TIF_SYSCALL_TRACE
)) &&
1493 tracehook_report_syscall_entry(regs
))
1496 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
1497 trace_sys_enter(regs
, regs
->orig_ax
);
1499 if (unlikely(current
->audit_context
)) {
1501 audit_syscall_entry(AUDIT_ARCH_I386
,
1504 regs
->dx
, regs
->si
);
1505 #ifdef CONFIG_X86_64
1507 audit_syscall_entry(AUDIT_ARCH_X86_64
,
1510 regs
->dx
, regs
->r10
);
1514 return ret
?: regs
->orig_ax
;
1517 asmregparm
void syscall_trace_leave(struct pt_regs
*regs
)
1519 if (unlikely(current
->audit_context
))
1520 audit_syscall_exit(AUDITSC_RESULT(regs
->ax
), regs
->ax
);
1522 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
1523 trace_sys_exit(regs
, regs
->ax
);
1525 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1526 tracehook_report_syscall_exit(regs
, 0);
1529 * If TIF_SYSCALL_EMU is set, we only get here because of
1530 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
1531 * We already reported this syscall instruction in
1532 * syscall_trace_enter(), so don't do any more now.
1534 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU
)))
1538 * If we are single-stepping, synthesize a trap to follow the
1539 * system call instruction.
1541 if (test_thread_flag(TIF_SINGLESTEP
) &&
1542 tracehook_consider_fatal_signal(current
, SIGTRAP
))
1543 send_sigtrap(current
, regs
, 0, TRAP_BRKPT
);