[PATCH] i386: add memory clobbers to syscall macros
[linux-2.6/verdex.git] / arch / x86_64 / kernel / process.c
blob687ed357e685e344cdd32e6953e8628f1b00256e
1 /*
2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 *
9 * X86-64 port
10 * Andi Kleen.
12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
17 * This file handles the architecture-dependent parts of process handling..
20 #include <stdarg.h>
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/elfcore.h>
28 #include <linux/smp.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/module.h>
32 #include <linux/a.out.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/irq.h>
36 #include <linux/ptrace.h>
37 #include <linux/utsname.h>
38 #include <linux/random.h>
39 #include <linux/kprobes.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
44 #include <asm/io.h>
45 #include <asm/processor.h>
46 #include <asm/i387.h>
47 #include <asm/mmu_context.h>
48 #include <asm/pda.h>
49 #include <asm/prctl.h>
50 #include <asm/kdebug.h>
51 #include <asm/desc.h>
52 #include <asm/proto.h>
53 #include <asm/ia32.h>
55 asmlinkage extern void ret_from_fork(void);
57 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59 static atomic_t hlt_counter = ATOMIC_INIT(0);
61 unsigned long boot_option_idle_override = 0;
62 EXPORT_SYMBOL(boot_option_idle_override);
65 * Powermanagement idle function, if any..
67 void (*pm_idle)(void);
68 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
70 void disable_hlt(void)
72 atomic_inc(&hlt_counter);
75 EXPORT_SYMBOL(disable_hlt);
77 void enable_hlt(void)
79 atomic_dec(&hlt_counter);
82 EXPORT_SYMBOL(enable_hlt);
85 * We use this if we don't have any better
86 * idle routine..
88 void default_idle(void)
90 if (!atomic_read(&hlt_counter)) {
91 local_irq_disable();
92 if (!need_resched())
93 safe_halt();
94 else
95 local_irq_enable();
100 * On SMP it's slightly faster (but much more power-consuming!)
101 * to poll the ->need_resched flag instead of waiting for the
102 * cross-CPU IPI to arrive. Use this option with caution.
104 static void poll_idle (void)
106 int oldval;
108 local_irq_enable();
111 * Deal with another CPU just having chosen a thread to
112 * run here:
114 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
116 if (!oldval) {
117 set_thread_flag(TIF_POLLING_NRFLAG);
118 asm volatile(
119 "2:"
120 "testl %0,%1;"
121 "rep; nop;"
122 "je 2b;"
124 "i" (_TIF_NEED_RESCHED),
125 "m" (current_thread_info()->flags));
126 clear_thread_flag(TIF_POLLING_NRFLAG);
127 } else {
128 set_need_resched();
132 void cpu_idle_wait(void)
134 unsigned int cpu, this_cpu = get_cpu();
135 cpumask_t map;
137 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
138 put_cpu();
140 cpus_clear(map);
141 for_each_online_cpu(cpu) {
142 per_cpu(cpu_idle_state, cpu) = 1;
143 cpu_set(cpu, map);
146 __get_cpu_var(cpu_idle_state) = 0;
148 wmb();
149 do {
150 ssleep(1);
151 for_each_online_cpu(cpu) {
152 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
153 cpu_clear(cpu, map);
155 cpus_and(map, map, cpu_online_map);
156 } while (!cpus_empty(map));
158 EXPORT_SYMBOL_GPL(cpu_idle_wait);
160 #ifdef CONFIG_HOTPLUG_CPU
161 DECLARE_PER_CPU(int, cpu_state);
163 #include <asm/nmi.h>
164 /* We don't actually take CPU down, just spin without interrupts. */
165 static inline void play_dead(void)
167 idle_task_exit();
168 wbinvd();
169 mb();
170 /* Ack it */
171 __get_cpu_var(cpu_state) = CPU_DEAD;
173 while (1)
174 safe_halt();
176 #else
177 static inline void play_dead(void)
179 BUG();
181 #endif /* CONFIG_HOTPLUG_CPU */
184 * The idle thread. There's no useful work to be
185 * done, so just try to conserve power and have a
186 * low exit latency (ie sit in a loop waiting for
187 * somebody to say that they'd like to reschedule)
189 void cpu_idle (void)
191 /* endless idle loop with no priority at all */
192 while (1) {
193 while (!need_resched()) {
194 void (*idle)(void);
196 if (__get_cpu_var(cpu_idle_state))
197 __get_cpu_var(cpu_idle_state) = 0;
199 rmb();
200 idle = pm_idle;
201 if (!idle)
202 idle = default_idle;
203 if (cpu_is_offline(smp_processor_id()))
204 play_dead();
205 idle();
208 schedule();
213 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
214 * which can obviate IPI to trigger checking of need_resched.
215 * We execute MONITOR against need_resched and enter optimized wait state
216 * through MWAIT. Whenever someone changes need_resched, we would be woken
217 * up from MWAIT (without an IPI).
219 static void mwait_idle(void)
221 local_irq_enable();
223 if (!need_resched()) {
224 set_thread_flag(TIF_POLLING_NRFLAG);
225 do {
226 __monitor((void *)&current_thread_info()->flags, 0, 0);
227 if (need_resched())
228 break;
229 __mwait(0, 0);
230 } while (!need_resched());
231 clear_thread_flag(TIF_POLLING_NRFLAG);
235 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
237 static int printed;
238 if (cpu_has(c, X86_FEATURE_MWAIT)) {
240 * Skip, if setup has overridden idle.
241 * One CPU supports mwait => All CPUs supports mwait
243 if (!pm_idle) {
244 if (!printed) {
245 printk("using mwait in idle threads.\n");
246 printed = 1;
248 pm_idle = mwait_idle;
253 static int __init idle_setup (char *str)
255 if (!strncmp(str, "poll", 4)) {
256 printk("using polling idle threads.\n");
257 pm_idle = poll_idle;
260 boot_option_idle_override = 1;
261 return 1;
264 __setup("idle=", idle_setup);
266 /* Prints also some state that isn't saved in the pt_regs */
267 void __show_regs(struct pt_regs * regs)
269 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
270 unsigned int fsindex,gsindex;
271 unsigned int ds,cs,es;
273 printk("\n");
274 print_modules();
275 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
276 current->pid, current->comm, print_tainted(),
277 system_utsname.release,
278 (int)strcspn(system_utsname.version, " "),
279 system_utsname.version);
280 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
281 printk_address(regs->rip);
282 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
283 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
284 regs->rax, regs->rbx, regs->rcx);
285 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
286 regs->rdx, regs->rsi, regs->rdi);
287 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
288 regs->rbp, regs->r8, regs->r9);
289 printk("R10: %016lx R11: %016lx R12: %016lx\n",
290 regs->r10, regs->r11, regs->r12);
291 printk("R13: %016lx R14: %016lx R15: %016lx\n",
292 regs->r13, regs->r14, regs->r15);
294 asm("movl %%ds,%0" : "=r" (ds));
295 asm("movl %%cs,%0" : "=r" (cs));
296 asm("movl %%es,%0" : "=r" (es));
297 asm("movl %%fs,%0" : "=r" (fsindex));
298 asm("movl %%gs,%0" : "=r" (gsindex));
300 rdmsrl(MSR_FS_BASE, fs);
301 rdmsrl(MSR_GS_BASE, gs);
302 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
304 asm("movq %%cr0, %0": "=r" (cr0));
305 asm("movq %%cr2, %0": "=r" (cr2));
306 asm("movq %%cr3, %0": "=r" (cr3));
307 asm("movq %%cr4, %0": "=r" (cr4));
309 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
310 fs,fsindex,gs,gsindex,shadowgs);
311 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
312 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
315 void show_regs(struct pt_regs *regs)
317 printk("CPU %d:", smp_processor_id());
318 __show_regs(regs);
319 show_trace(&regs->rsp);
323 * Free current thread data structures etc..
325 void exit_thread(void)
327 struct task_struct *me = current;
328 struct thread_struct *t = &me->thread;
331 * Remove function-return probe instances associated with this task
332 * and put them back on the free list. Do not insert an exit probe for
333 * this function, it will be disabled by kprobe_flush_task if you do.
335 kprobe_flush_task(me);
337 if (me->thread.io_bitmap_ptr) {
338 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
340 kfree(t->io_bitmap_ptr);
341 t->io_bitmap_ptr = NULL;
343 * Careful, clear this in the TSS too:
345 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
346 t->io_bitmap_max = 0;
347 put_cpu();
351 void flush_thread(void)
353 struct task_struct *tsk = current;
354 struct thread_info *t = current_thread_info();
357 * Remove function-return probe instances associated with this task
358 * and put them back on the free list. Do not insert an exit probe for
359 * this function, it will be disabled by kprobe_flush_task if you do.
361 kprobe_flush_task(tsk);
363 if (t->flags & _TIF_ABI_PENDING)
364 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
366 tsk->thread.debugreg0 = 0;
367 tsk->thread.debugreg1 = 0;
368 tsk->thread.debugreg2 = 0;
369 tsk->thread.debugreg3 = 0;
370 tsk->thread.debugreg6 = 0;
371 tsk->thread.debugreg7 = 0;
372 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
374 * Forget coprocessor state..
376 clear_fpu(tsk);
377 clear_used_math();
380 void release_thread(struct task_struct *dead_task)
382 if (dead_task->mm) {
383 if (dead_task->mm->context.size) {
384 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
385 dead_task->comm,
386 dead_task->mm->context.ldt,
387 dead_task->mm->context.size);
388 BUG();
393 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
395 struct user_desc ud = {
396 .base_addr = addr,
397 .limit = 0xfffff,
398 .seg_32bit = 1,
399 .limit_in_pages = 1,
400 .useable = 1,
402 struct n_desc_struct *desc = (void *)t->thread.tls_array;
403 desc += tls;
404 desc->a = LDT_entry_a(&ud);
405 desc->b = LDT_entry_b(&ud);
408 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
410 struct desc_struct *desc = (void *)t->thread.tls_array;
411 desc += tls;
412 return desc->base0 |
413 (((u32)desc->base1) << 16) |
414 (((u32)desc->base2) << 24);
418 * This gets called before we allocate a new thread and copy
419 * the current task into it.
421 void prepare_to_copy(struct task_struct *tsk)
423 unlazy_fpu(tsk);
426 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
427 unsigned long unused,
428 struct task_struct * p, struct pt_regs * regs)
430 int err;
431 struct pt_regs * childregs;
432 struct task_struct *me = current;
434 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
436 *childregs = *regs;
438 childregs->rax = 0;
439 childregs->rsp = rsp;
440 if (rsp == ~0UL) {
441 childregs->rsp = (unsigned long)childregs;
444 p->thread.rsp = (unsigned long) childregs;
445 p->thread.rsp0 = (unsigned long) (childregs+1);
446 p->thread.userrsp = me->thread.userrsp;
448 set_ti_thread_flag(p->thread_info, TIF_FORK);
450 p->thread.fs = me->thread.fs;
451 p->thread.gs = me->thread.gs;
453 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
454 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
455 asm("mov %%es,%0" : "=m" (p->thread.es));
456 asm("mov %%ds,%0" : "=m" (p->thread.ds));
458 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
459 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
460 if (!p->thread.io_bitmap_ptr) {
461 p->thread.io_bitmap_max = 0;
462 return -ENOMEM;
464 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
468 * Set a new TLS for the child thread?
470 if (clone_flags & CLONE_SETTLS) {
471 #ifdef CONFIG_IA32_EMULATION
472 if (test_thread_flag(TIF_IA32))
473 err = ia32_child_tls(p, childregs);
474 else
475 #endif
476 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
477 if (err)
478 goto out;
480 err = 0;
481 out:
482 if (err && p->thread.io_bitmap_ptr) {
483 kfree(p->thread.io_bitmap_ptr);
484 p->thread.io_bitmap_max = 0;
486 return err;
490 * This special macro can be used to load a debugging register
492 #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
495 * switch_to(x,y) should switch tasks from x to y.
497 * This could still be optimized:
498 * - fold all the options into a flag word and test it with a single test.
499 * - could test fs/gs bitsliced
501 struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
503 struct thread_struct *prev = &prev_p->thread,
504 *next = &next_p->thread;
505 int cpu = smp_processor_id();
506 struct tss_struct *tss = &per_cpu(init_tss, cpu);
508 unlazy_fpu(prev_p);
511 * Reload esp0, LDT and the page table pointer:
513 tss->rsp0 = next->rsp0;
516 * Switch DS and ES.
517 * This won't pick up thread selector changes, but I guess that is ok.
519 asm volatile("mov %%es,%0" : "=m" (prev->es));
520 if (unlikely(next->es | prev->es))
521 loadsegment(es, next->es);
523 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
524 if (unlikely(next->ds | prev->ds))
525 loadsegment(ds, next->ds);
527 load_TLS(next, cpu);
530 * Switch FS and GS.
533 unsigned fsindex;
534 asm volatile("movl %%fs,%0" : "=r" (fsindex));
535 /* segment register != 0 always requires a reload.
536 also reload when it has changed.
537 when prev process used 64bit base always reload
538 to avoid an information leak. */
539 if (unlikely(fsindex | next->fsindex | prev->fs)) {
540 loadsegment(fs, next->fsindex);
541 /* check if the user used a selector != 0
542 * if yes clear 64bit base, since overloaded base
543 * is always mapped to the Null selector
545 if (fsindex)
546 prev->fs = 0;
548 /* when next process has a 64bit base use it */
549 if (next->fs)
550 wrmsrl(MSR_FS_BASE, next->fs);
551 prev->fsindex = fsindex;
554 unsigned gsindex;
555 asm volatile("movl %%gs,%0" : "=r" (gsindex));
556 if (unlikely(gsindex | next->gsindex | prev->gs)) {
557 load_gs_index(next->gsindex);
558 if (gsindex)
559 prev->gs = 0;
561 if (next->gs)
562 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
563 prev->gsindex = gsindex;
567 * Switch the PDA context.
569 prev->userrsp = read_pda(oldrsp);
570 write_pda(oldrsp, next->userrsp);
571 write_pda(pcurrent, next_p);
572 write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
575 * Now maybe reload the debug registers
577 if (unlikely(next->debugreg7)) {
578 loaddebug(next, 0);
579 loaddebug(next, 1);
580 loaddebug(next, 2);
581 loaddebug(next, 3);
582 /* no 4 and 5 */
583 loaddebug(next, 6);
584 loaddebug(next, 7);
589 * Handle the IO bitmap
591 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
592 if (next->io_bitmap_ptr)
594 * Copy the relevant range of the IO bitmap.
595 * Normally this is 128 bytes or less:
597 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
598 max(prev->io_bitmap_max, next->io_bitmap_max));
599 else {
601 * Clear any possible leftover bits:
603 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
607 return prev_p;
611 * sys_execve() executes a new program.
613 asmlinkage
614 long sys_execve(char __user *name, char __user * __user *argv,
615 char __user * __user *envp, struct pt_regs regs)
617 long error;
618 char * filename;
620 filename = getname(name);
621 error = PTR_ERR(filename);
622 if (IS_ERR(filename))
623 return error;
624 error = do_execve(filename, argv, envp, &regs);
625 if (error == 0) {
626 task_lock(current);
627 current->ptrace &= ~PT_DTRACE;
628 task_unlock(current);
630 putname(filename);
631 return error;
634 void set_personality_64bit(void)
636 /* inherit personality from parent */
638 /* Make sure to be in 64bit mode */
639 clear_thread_flag(TIF_IA32);
641 /* TBD: overwrites user setup. Should have two bits.
642 But 64bit processes have always behaved this way,
643 so it's not too bad. The main problem is just that
644 32bit childs are affected again. */
645 current->personality &= ~READ_IMPLIES_EXEC;
648 asmlinkage long sys_fork(struct pt_regs *regs)
650 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
653 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
655 if (!newsp)
656 newsp = regs->rsp;
657 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
661 * This is trivial, and on the face of it looks like it
662 * could equally well be done in user mode.
664 * Not so, for quite unobvious reasons - register pressure.
665 * In user mode vfork() cannot have a stack frame, and if
666 * done by calling the "clone()" system call directly, you
667 * do not have enough call-clobbered registers to hold all
668 * the information you need.
670 asmlinkage long sys_vfork(struct pt_regs *regs)
672 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
673 NULL, NULL);
676 unsigned long get_wchan(struct task_struct *p)
678 unsigned long stack;
679 u64 fp,rip;
680 int count = 0;
682 if (!p || p == current || p->state==TASK_RUNNING)
683 return 0;
684 stack = (unsigned long)p->thread_info;
685 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
686 return 0;
687 fp = *(u64 *)(p->thread.rsp);
688 do {
689 if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
690 return 0;
691 rip = *(u64 *)(fp+8);
692 if (!in_sched_functions(rip))
693 return rip;
694 fp = *(u64 *)fp;
695 } while (count++ < 16);
696 return 0;
699 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
701 int ret = 0;
702 int doit = task == current;
703 int cpu;
705 switch (code) {
706 case ARCH_SET_GS:
707 if (addr >= TASK_SIZE_OF(task))
708 return -EPERM;
709 cpu = get_cpu();
710 /* handle small bases via the GDT because that's faster to
711 switch. */
712 if (addr <= 0xffffffff) {
713 set_32bit_tls(task, GS_TLS, addr);
714 if (doit) {
715 load_TLS(&task->thread, cpu);
716 load_gs_index(GS_TLS_SEL);
718 task->thread.gsindex = GS_TLS_SEL;
719 task->thread.gs = 0;
720 } else {
721 task->thread.gsindex = 0;
722 task->thread.gs = addr;
723 if (doit) {
724 load_gs_index(0);
725 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
728 put_cpu();
729 break;
730 case ARCH_SET_FS:
731 /* Not strictly needed for fs, but do it for symmetry
732 with gs */
733 if (addr >= TASK_SIZE_OF(task))
734 return -EPERM;
735 cpu = get_cpu();
736 /* handle small bases via the GDT because that's faster to
737 switch. */
738 if (addr <= 0xffffffff) {
739 set_32bit_tls(task, FS_TLS, addr);
740 if (doit) {
741 load_TLS(&task->thread, cpu);
742 asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
744 task->thread.fsindex = FS_TLS_SEL;
745 task->thread.fs = 0;
746 } else {
747 task->thread.fsindex = 0;
748 task->thread.fs = addr;
749 if (doit) {
750 /* set the selector to 0 to not confuse
751 __switch_to */
752 asm volatile("movl %0,%%fs" :: "r" (0));
753 ret = checking_wrmsrl(MSR_FS_BASE, addr);
756 put_cpu();
757 break;
758 case ARCH_GET_FS: {
759 unsigned long base;
760 if (task->thread.fsindex == FS_TLS_SEL)
761 base = read_32bit_tls(task, FS_TLS);
762 else if (doit) {
763 rdmsrl(MSR_FS_BASE, base);
764 } else
765 base = task->thread.fs;
766 ret = put_user(base, (unsigned long __user *)addr);
767 break;
769 case ARCH_GET_GS: {
770 unsigned long base;
771 if (task->thread.gsindex == GS_TLS_SEL)
772 base = read_32bit_tls(task, GS_TLS);
773 else if (doit) {
774 rdmsrl(MSR_KERNEL_GS_BASE, base);
775 } else
776 base = task->thread.gs;
777 ret = put_user(base, (unsigned long __user *)addr);
778 break;
781 default:
782 ret = -EINVAL;
783 break;
786 return ret;
789 long sys_arch_prctl(int code, unsigned long addr)
791 return do_arch_prctl(current, code, addr);
795 * Capture the user space registers if the task is not running (in user space)
797 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
799 struct pt_regs *pp, ptregs;
801 pp = (struct pt_regs *)(tsk->thread.rsp0);
802 --pp;
804 ptregs = *pp;
805 ptregs.cs &= 0xffff;
806 ptregs.ss &= 0xffff;
808 elf_core_copy_regs(regs, &ptregs);
810 return 1;
813 unsigned long arch_align_stack(unsigned long sp)
815 if (randomize_va_space)
816 sp -= get_random_int() % 8192;
817 return sp & ~0xf;