[PATCH] Avoid console spam with ext3 aborted journal.
[linux-2.6/verdex.git] / arch / x86_64 / kernel / ptrace.c
blob60dc9b98951d0f682887c461852073eca81d46d7
1 /* ptrace.c */
2 /* By Ross Biro 1/23/92 */
3 /*
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * x86-64 port 2000-2002 Andi Kleen
8 */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/smp_lock.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/seccomp.h>
21 #include <linux/signal.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgtable.h>
25 #include <asm/system.h>
26 #include <asm/processor.h>
27 #include <asm/i387.h>
28 #include <asm/debugreg.h>
29 #include <asm/ldt.h>
30 #include <asm/desc.h>
31 #include <asm/proto.h>
32 #include <asm/ia32.h>
35 * does not yet catch signals sent when the child dies.
36 * in exit.c or in signal.c.
39 /* determines which flags the user has access to. */
40 /* 1 = access 0 = no access */
41 #define FLAG_MASK 0x44dd5UL
43 /* set's the trap flag. */
44 #define TRAP_FLAG 0x100UL
47 * eflags and offset of eflags on child stack..
49 #define EFLAGS offsetof(struct pt_regs, eflags)
50 #define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
53 * this routine will get a word off of the processes privileged stack.
54 * the offset is how far from the base addr as stored in the TSS.
55 * this routine assumes that all the privileged stacks are in our
56 * data space.
57 */
58 static inline unsigned long get_stack_long(struct task_struct *task, int offset)
60 unsigned char *stack;
62 stack = (unsigned char *)task->thread.rsp0;
63 stack += offset;
64 return (*((unsigned long *)stack));
67 static inline struct pt_regs *get_child_regs(struct task_struct *task)
69 struct pt_regs *regs = (void *)task->thread.rsp0;
70 return regs - 1;
74 * this routine will put a word on the processes privileged stack.
75 * the offset is how far from the base addr as stored in the TSS.
76 * this routine assumes that all the privileged stacks are in our
77 * data space.
79 static inline long put_stack_long(struct task_struct *task, int offset,
80 unsigned long data)
82 unsigned char * stack;
84 stack = (unsigned char *) task->thread.rsp0;
85 stack += offset;
86 *(unsigned long *) stack = data;
87 return 0;
90 #define LDT_SEGMENT 4
92 unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
94 unsigned long addr, seg;
96 addr = regs->rip;
97 seg = regs->cs & 0xffff;
100 * We'll assume that the code segments in the GDT
101 * are all zero-based. That is largely true: the
102 * TLS segments are used for data, and the PNPBIOS
103 * and APM bios ones we just ignore here.
105 if (seg & LDT_SEGMENT) {
106 u32 *desc;
107 unsigned long base;
109 down(&child->mm->context.sem);
110 desc = child->mm->context.ldt + (seg & ~7);
111 base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
113 /* 16-bit code segment? */
114 if (!((desc[1] >> 22) & 1))
115 addr &= 0xffff;
116 addr += base;
117 up(&child->mm->context.sem);
119 return addr;
122 static int is_at_popf(struct task_struct *child, struct pt_regs *regs)
124 int i, copied;
125 unsigned char opcode[16];
126 unsigned long addr = convert_rip_to_linear(child, regs);
128 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
129 for (i = 0; i < copied; i++) {
130 switch (opcode[i]) {
131 /* popf */
132 case 0x9d:
133 return 1;
135 /* CHECKME: 64 65 */
137 /* opcode and address size prefixes */
138 case 0x66: case 0x67:
139 continue;
140 /* irrelevant prefixes (segment overrides and repeats) */
141 case 0x26: case 0x2e:
142 case 0x36: case 0x3e:
143 case 0x64: case 0x65:
144 case 0xf0: case 0xf2: case 0xf3:
145 continue;
147 /* REX prefixes */
148 case 0x40 ... 0x4f:
149 continue;
151 /* CHECKME: f0, f2, f3 */
154 * pushf: NOTE! We should probably not let
155 * the user see the TF bit being set. But
156 * it's more pain than it's worth to avoid
157 * it, and a debugger could emulate this
158 * all in user space if it _really_ cares.
160 case 0x9c:
161 default:
162 return 0;
165 return 0;
168 static void set_singlestep(struct task_struct *child)
170 struct pt_regs *regs = get_child_regs(child);
173 * Always set TIF_SINGLESTEP - this guarantees that
174 * we single-step system calls etc.. This will also
175 * cause us to set TF when returning to user mode.
177 set_tsk_thread_flag(child, TIF_SINGLESTEP);
180 * If TF was already set, don't do anything else
182 if (regs->eflags & TRAP_FLAG)
183 return;
185 /* Set TF on the kernel stack.. */
186 regs->eflags |= TRAP_FLAG;
189 * ..but if TF is changed by the instruction we will trace,
190 * don't mark it as being "us" that set it, so that we
191 * won't clear it by hand later.
193 * AK: this is not enough, LAHF and IRET can change TF in user space too.
195 if (is_at_popf(child, regs))
196 return;
198 child->ptrace |= PT_DTRACE;
201 static void clear_singlestep(struct task_struct *child)
203 /* Always clear TIF_SINGLESTEP... */
204 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
206 /* But touch TF only if it was set by us.. */
207 if (child->ptrace & PT_DTRACE) {
208 struct pt_regs *regs = get_child_regs(child);
209 regs->eflags &= ~TRAP_FLAG;
210 child->ptrace &= ~PT_DTRACE;
215 * Called by kernel/ptrace.c when detaching..
217 * Make sure the single step bit is not set.
219 void ptrace_disable(struct task_struct *child)
221 clear_singlestep(child);
224 static int putreg(struct task_struct *child,
225 unsigned long regno, unsigned long value)
227 unsigned long tmp;
229 /* Some code in the 64bit emulation may not be 64bit clean.
230 Don't take any chances. */
231 if (test_tsk_thread_flag(child, TIF_IA32))
232 value &= 0xffffffff;
233 switch (regno) {
234 case offsetof(struct user_regs_struct,fs):
235 if (value && (value & 3) != 3)
236 return -EIO;
237 child->thread.fsindex = value & 0xffff;
238 return 0;
239 case offsetof(struct user_regs_struct,gs):
240 if (value && (value & 3) != 3)
241 return -EIO;
242 child->thread.gsindex = value & 0xffff;
243 return 0;
244 case offsetof(struct user_regs_struct,ds):
245 if (value && (value & 3) != 3)
246 return -EIO;
247 child->thread.ds = value & 0xffff;
248 return 0;
249 case offsetof(struct user_regs_struct,es):
250 if (value && (value & 3) != 3)
251 return -EIO;
252 child->thread.es = value & 0xffff;
253 return 0;
254 case offsetof(struct user_regs_struct,ss):
255 if ((value & 3) != 3)
256 return -EIO;
257 value &= 0xffff;
258 return 0;
259 case offsetof(struct user_regs_struct,fs_base):
260 if (value >= TASK_SIZE)
261 return -EIO;
262 child->thread.fs = value;
263 return 0;
264 case offsetof(struct user_regs_struct,gs_base):
265 if (value >= TASK_SIZE)
266 return -EIO;
267 child->thread.gs = value;
268 return 0;
269 case offsetof(struct user_regs_struct, eflags):
270 value &= FLAG_MASK;
271 tmp = get_stack_long(child, EFL_OFFSET);
272 tmp &= ~FLAG_MASK;
273 value |= tmp;
274 break;
275 case offsetof(struct user_regs_struct,cs):
276 if ((value & 3) != 3)
277 return -EIO;
278 value &= 0xffff;
279 break;
280 case offsetof(struct user_regs_struct, rip):
281 /* Check if the new RIP address is canonical */
282 if (value >= TASK_SIZE)
283 return -EIO;
284 break;
286 put_stack_long(child, regno - sizeof(struct pt_regs), value);
287 return 0;
290 static unsigned long getreg(struct task_struct *child, unsigned long regno)
292 unsigned long val;
293 switch (regno) {
294 case offsetof(struct user_regs_struct, fs):
295 return child->thread.fsindex;
296 case offsetof(struct user_regs_struct, gs):
297 return child->thread.gsindex;
298 case offsetof(struct user_regs_struct, ds):
299 return child->thread.ds;
300 case offsetof(struct user_regs_struct, es):
301 return child->thread.es;
302 case offsetof(struct user_regs_struct, fs_base):
303 return child->thread.fs;
304 case offsetof(struct user_regs_struct, gs_base):
305 return child->thread.gs;
306 default:
307 regno = regno - sizeof(struct pt_regs);
308 val = get_stack_long(child, regno);
309 if (test_tsk_thread_flag(child, TIF_IA32))
310 val &= 0xffffffff;
311 return val;
316 asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data)
318 struct task_struct *child;
319 long i, ret;
320 unsigned ui;
322 /* This lock_kernel fixes a subtle race with suid exec */
323 lock_kernel();
324 ret = -EPERM;
325 if (request == PTRACE_TRACEME) {
326 /* are we already being traced? */
327 if (current->ptrace & PT_PTRACED)
328 goto out;
329 ret = security_ptrace(current->parent, current);
330 if (ret)
331 goto out;
332 /* set the ptrace bit in the process flags. */
333 current->ptrace |= PT_PTRACED;
334 ret = 0;
335 goto out;
337 ret = -ESRCH;
338 read_lock(&tasklist_lock);
339 child = find_task_by_pid(pid);
340 if (child)
341 get_task_struct(child);
342 read_unlock(&tasklist_lock);
343 if (!child)
344 goto out;
346 ret = -EPERM;
347 if (pid == 1) /* you may not mess with init */
348 goto out_tsk;
350 if (request == PTRACE_ATTACH) {
351 ret = ptrace_attach(child);
352 goto out_tsk;
354 ret = ptrace_check_attach(child, request == PTRACE_KILL);
355 if (ret < 0)
356 goto out_tsk;
358 switch (request) {
359 /* when I and D space are separate, these will need to be fixed. */
360 case PTRACE_PEEKTEXT: /* read word at location addr. */
361 case PTRACE_PEEKDATA: {
362 unsigned long tmp;
363 int copied;
365 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
366 ret = -EIO;
367 if (copied != sizeof(tmp))
368 break;
369 ret = put_user(tmp,(unsigned long __user *) data);
370 break;
373 /* read the word at location addr in the USER area. */
374 case PTRACE_PEEKUSR: {
375 unsigned long tmp;
377 ret = -EIO;
378 if ((addr & 7) ||
379 addr > sizeof(struct user) - 7)
380 break;
382 switch (addr) {
383 case 0 ... sizeof(struct user_regs_struct):
384 tmp = getreg(child, addr);
385 break;
386 case offsetof(struct user, u_debugreg[0]):
387 tmp = child->thread.debugreg0;
388 break;
389 case offsetof(struct user, u_debugreg[1]):
390 tmp = child->thread.debugreg1;
391 break;
392 case offsetof(struct user, u_debugreg[2]):
393 tmp = child->thread.debugreg2;
394 break;
395 case offsetof(struct user, u_debugreg[3]):
396 tmp = child->thread.debugreg3;
397 break;
398 case offsetof(struct user, u_debugreg[6]):
399 tmp = child->thread.debugreg6;
400 break;
401 case offsetof(struct user, u_debugreg[7]):
402 tmp = child->thread.debugreg7;
403 break;
404 default:
405 tmp = 0;
406 break;
408 ret = put_user(tmp,(unsigned long __user *) data);
409 break;
412 /* when I and D space are separate, this will have to be fixed. */
413 case PTRACE_POKETEXT: /* write the word at location addr. */
414 case PTRACE_POKEDATA:
415 ret = 0;
416 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
417 break;
418 ret = -EIO;
419 break;
421 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
422 ret = -EIO;
423 if ((addr & 7) ||
424 addr > sizeof(struct user) - 7)
425 break;
427 switch (addr) {
428 case 0 ... sizeof(struct user_regs_struct):
429 ret = putreg(child, addr, data);
430 break;
431 /* Disallows to set a breakpoint into the vsyscall */
432 case offsetof(struct user, u_debugreg[0]):
433 if (data >= TASK_SIZE-7) break;
434 child->thread.debugreg0 = data;
435 ret = 0;
436 break;
437 case offsetof(struct user, u_debugreg[1]):
438 if (data >= TASK_SIZE-7) break;
439 child->thread.debugreg1 = data;
440 ret = 0;
441 break;
442 case offsetof(struct user, u_debugreg[2]):
443 if (data >= TASK_SIZE-7) break;
444 child->thread.debugreg2 = data;
445 ret = 0;
446 break;
447 case offsetof(struct user, u_debugreg[3]):
448 if (data >= TASK_SIZE-7) break;
449 child->thread.debugreg3 = data;
450 ret = 0;
451 break;
452 case offsetof(struct user, u_debugreg[6]):
453 if (data >> 32)
454 break;
455 child->thread.debugreg6 = data;
456 ret = 0;
457 break;
458 case offsetof(struct user, u_debugreg[7]):
459 /* See arch/i386/kernel/ptrace.c for an explanation of
460 * this awkward check.*/
461 data &= ~DR_CONTROL_RESERVED;
462 for(i=0; i<4; i++)
463 if ((0x5454 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
464 break;
465 if (i == 4) {
466 child->thread.debugreg7 = data;
467 ret = 0;
469 break;
471 break;
472 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
473 case PTRACE_CONT: /* restart after signal. */
475 ret = -EIO;
476 if (!valid_signal(data))
477 break;
478 if (request == PTRACE_SYSCALL)
479 set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
480 else
481 clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
482 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
483 child->exit_code = data;
484 /* make sure the single step bit is not set. */
485 clear_singlestep(child);
486 wake_up_process(child);
487 ret = 0;
488 break;
490 #ifdef CONFIG_IA32_EMULATION
491 /* This makes only sense with 32bit programs. Allow a
492 64bit debugger to fully examine them too. Better
493 don't use it against 64bit processes, use
494 PTRACE_ARCH_PRCTL instead. */
495 case PTRACE_SET_THREAD_AREA: {
496 struct user_desc __user *p;
497 int old;
498 p = (struct user_desc __user *)data;
499 get_user(old, &p->entry_number);
500 put_user(addr, &p->entry_number);
501 ret = do_set_thread_area(&child->thread, p);
502 put_user(old, &p->entry_number);
503 break;
504 case PTRACE_GET_THREAD_AREA:
505 p = (struct user_desc __user *)data;
506 get_user(old, &p->entry_number);
507 put_user(addr, &p->entry_number);
508 ret = do_get_thread_area(&child->thread, p);
509 put_user(old, &p->entry_number);
510 break;
512 #endif
513 /* normal 64bit interface to access TLS data.
514 Works just like arch_prctl, except that the arguments
515 are reversed. */
516 case PTRACE_ARCH_PRCTL:
517 ret = do_arch_prctl(child, data, addr);
518 break;
521 * make the child exit. Best I can do is send it a sigkill.
522 * perhaps it should be put in the status that it wants to
523 * exit.
525 case PTRACE_KILL:
526 ret = 0;
527 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
528 break;
529 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
530 child->exit_code = SIGKILL;
531 /* make sure the single step bit is not set. */
532 clear_singlestep(child);
533 wake_up_process(child);
534 break;
536 case PTRACE_SINGLESTEP: /* set the trap flag. */
537 ret = -EIO;
538 if (!valid_signal(data))
539 break;
540 clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
541 set_singlestep(child);
542 child->exit_code = data;
543 /* give it a chance to run. */
544 wake_up_process(child);
545 ret = 0;
546 break;
548 case PTRACE_DETACH:
549 /* detach a process that was attached. */
550 ret = ptrace_detach(child, data);
551 break;
553 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
554 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
555 sizeof(struct user_regs_struct))) {
556 ret = -EIO;
557 break;
559 ret = 0;
560 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
561 ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
562 data += sizeof(long);
564 break;
567 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
568 unsigned long tmp;
569 if (!access_ok(VERIFY_READ, (unsigned __user *)data,
570 sizeof(struct user_regs_struct))) {
571 ret = -EIO;
572 break;
574 ret = 0;
575 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
576 ret |= __get_user(tmp, (unsigned long __user *) data);
577 putreg(child, ui, tmp);
578 data += sizeof(long);
580 break;
583 case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
584 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
585 sizeof(struct user_i387_struct))) {
586 ret = -EIO;
587 break;
589 ret = get_fpregs((struct user_i387_struct __user *)data, child);
590 break;
593 case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
594 if (!access_ok(VERIFY_READ, (unsigned __user *)data,
595 sizeof(struct user_i387_struct))) {
596 ret = -EIO;
597 break;
599 set_stopped_child_used_math(child);
600 ret = set_fpregs(child, (struct user_i387_struct __user *)data);
601 break;
604 default:
605 ret = ptrace_request(child, request, addr, data);
606 break;
608 out_tsk:
609 put_task_struct(child);
610 out:
611 unlock_kernel();
612 return ret;
615 static void syscall_trace(struct pt_regs *regs)
618 #if 0
619 printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n",
620 current->comm,
621 regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0),
622 current_thread_info()->flags, current->ptrace);
623 #endif
625 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
626 ? 0x80 : 0));
628 * this isn't the same as continuing with a signal, but it will do
629 * for normal use. strace only continues with a signal if the
630 * stopping signal is not SIGTRAP. -brl
632 if (current->exit_code) {
633 send_sig(current->exit_code, current, 1);
634 current->exit_code = 0;
638 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
640 /* do the secure computing check first */
641 secure_computing(regs->orig_rax);
643 if (test_thread_flag(TIF_SYSCALL_TRACE)
644 && (current->ptrace & PT_PTRACED))
645 syscall_trace(regs);
647 if (unlikely(current->audit_context)) {
648 if (test_thread_flag(TIF_IA32)) {
649 audit_syscall_entry(current, AUDIT_ARCH_I386,
650 regs->orig_rax,
651 regs->rbx, regs->rcx,
652 regs->rdx, regs->rsi);
653 } else {
654 audit_syscall_entry(current, AUDIT_ARCH_X86_64,
655 regs->orig_rax,
656 regs->rdi, regs->rsi,
657 regs->rdx, regs->r10);
662 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
664 if (unlikely(current->audit_context))
665 audit_syscall_exit(current, AUDITSC_RESULT(regs->rax), regs->rax);
667 if ((test_thread_flag(TIF_SYSCALL_TRACE)
668 || test_thread_flag(TIF_SINGLESTEP))
669 && (current->ptrace & PT_PTRACED))
670 syscall_trace(regs);