2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
7 #include "linux/stddef.h"
9 #include "linux/hardirq.h"
11 #include "linux/personality.h"
12 #include "linux/proc_fs.h"
13 #include "linux/ptrace.h"
14 #include "linux/random.h"
15 #include "linux/sched.h"
16 #include "linux/tick.h"
17 #include "linux/threads.h"
18 #include "asm/pgtable.h"
19 #include "asm/uaccess.h"
20 #include "as-layout.h"
21 #include "kern_util.h"
27 * This is a per-cpu array. A processor only modifies its entry and it only
28 * cares about its entry, so it's OK if another processor is modifying its
31 struct cpu_task cpu_tasks
[NR_CPUS
] = { [0 ... NR_CPUS
- 1] = { -1, NULL
} };
33 static inline int external_pid(struct task_struct
*task
)
35 /* FIXME: Need to look up userspace_pid by cpu */
36 return userspace_pid
[0];
39 int pid_to_processor_id(int pid
)
43 for(i
= 0; i
< ncpus
; i
++) {
44 if (cpu_tasks
[i
].pid
== pid
)
50 void free_stack(unsigned long stack
, int order
)
52 free_pages(stack
, order
);
55 unsigned long alloc_stack(int order
, int atomic
)
58 gfp_t flags
= GFP_KERNEL
;
62 page
= __get_free_pages(flags
, order
);
69 int kernel_thread(int (*fn
)(void *), void * arg
, unsigned long flags
)
73 current
->thread
.request
.u
.thread
.proc
= fn
;
74 current
->thread
.request
.u
.thread
.arg
= arg
;
75 pid
= do_fork(CLONE_VM
| CLONE_UNTRACED
| flags
, 0,
76 ¤t
->thread
.regs
, 0, NULL
, NULL
);
80 static inline void set_current(struct task_struct
*task
)
82 cpu_tasks
[task_thread_info(task
)->cpu
] = ((struct cpu_task
)
83 { external_pid(task
), task
});
86 extern void arch_switch_to(struct task_struct
*from
, struct task_struct
*to
);
88 void *_switch_to(void *prev
, void *next
, void *last
)
90 struct task_struct
*from
= prev
;
91 struct task_struct
*to
= next
;
93 to
->thread
.prev_sched
= from
;
97 current
->thread
.saved_task
= NULL
;
99 switch_threads(&from
->thread
.switch_buf
,
100 &to
->thread
.switch_buf
);
102 arch_switch_to(current
->thread
.prev_sched
, current
);
104 if (current
->thread
.saved_task
)
105 show_regs(&(current
->thread
.regs
));
106 next
= current
->thread
.saved_task
;
108 } while(current
->thread
.saved_task
);
110 return current
->thread
.prev_sched
;
114 void interrupt_end(void)
118 if (test_tsk_thread_flag(current
, TIF_SIGPENDING
))
122 void exit_thread(void)
126 void *get_current(void)
131 extern void schedule_tail(struct task_struct
*prev
);
134 * This is called magically, by its address being stuffed in a jmp_buf
135 * and being longjmp-d to.
137 void new_thread_handler(void)
139 int (*fn
)(void *), n
;
142 if (current
->thread
.prev_sched
!= NULL
)
143 schedule_tail(current
->thread
.prev_sched
);
144 current
->thread
.prev_sched
= NULL
;
146 fn
= current
->thread
.request
.u
.thread
.proc
;
147 arg
= current
->thread
.request
.u
.thread
.arg
;
150 * The return value is 1 if the kernel thread execs a process,
153 n
= run_kernel_thread(fn
, arg
, ¤t
->thread
.exec_buf
);
155 /* Handle any immediate reschedules or signals */
157 userspace(¤t
->thread
.regs
.regs
);
162 /* Called magically, see new_thread_handler above */
163 void fork_handler(void)
166 if (current
->thread
.prev_sched
== NULL
)
169 schedule_tail(current
->thread
.prev_sched
);
172 * XXX: if interrupt_end() calls schedule, this call to
173 * arch_switch_to isn't needed. We could want to apply this to
174 * improve performance. -bb
176 arch_switch_to(current
->thread
.prev_sched
, current
);
178 current
->thread
.prev_sched
= NULL
;
180 /* Handle any immediate reschedules or signals */
183 userspace(¤t
->thread
.regs
.regs
);
186 int copy_thread(int nr
, unsigned long clone_flags
, unsigned long sp
,
187 unsigned long stack_top
, struct task_struct
* p
,
188 struct pt_regs
*regs
)
190 void (*handler
)(void);
193 p
->thread
= (struct thread_struct
) INIT_THREAD
;
195 if (current
->thread
.forking
) {
196 memcpy(&p
->thread
.regs
.regs
, ®s
->regs
,
197 sizeof(p
->thread
.regs
.regs
));
198 REGS_SET_SYSCALL_RETURN(p
->thread
.regs
.regs
.gp
, 0);
200 REGS_SP(p
->thread
.regs
.regs
.gp
) = sp
;
202 handler
= fork_handler
;
204 arch_copy_thread(¤t
->thread
.arch
, &p
->thread
.arch
);
207 init_thread_registers(&p
->thread
.regs
.regs
);
208 p
->thread
.request
.u
.thread
= current
->thread
.request
.u
.thread
;
209 handler
= new_thread_handler
;
212 new_thread(task_stack_page(p
), &p
->thread
.switch_buf
, handler
);
214 if (current
->thread
.forking
) {
215 clear_flushed_tls(p
);
218 * Set a new TLS for the child thread?
220 if (clone_flags
& CLONE_SETTLS
)
221 ret
= arch_copy_tls(p
);
227 void initial_thread_cb(void (*proc
)(void *), void *arg
)
229 int save_kmalloc_ok
= kmalloc_ok
;
232 initial_thread_cb_skas(proc
, arg
);
233 kmalloc_ok
= save_kmalloc_ok
;
236 void default_idle(void)
238 unsigned long long nsecs
;
241 /* endless idle loop with no priority at all */
244 * although we are an idle CPU, we do not want to
245 * get into the scheduler unnecessarily.
250 tick_nohz_stop_sched_tick();
251 nsecs
= disable_timer();
253 tick_nohz_restart_sched_tick();
259 cpu_tasks
[current_thread
->cpu
].pid
= os_getpid();
263 void *um_virt_to_phys(struct task_struct
*task
, unsigned long addr
,
272 if (task
->mm
== NULL
)
273 return ERR_PTR(-EINVAL
);
274 pgd
= pgd_offset(task
->mm
, addr
);
275 if (!pgd_present(*pgd
))
276 return ERR_PTR(-EINVAL
);
278 pud
= pud_offset(pgd
, addr
);
279 if (!pud_present(*pud
))
280 return ERR_PTR(-EINVAL
);
282 pmd
= pmd_offset(pud
, addr
);
283 if (!pmd_present(*pmd
))
284 return ERR_PTR(-EINVAL
);
286 pte
= pte_offset_kernel(pmd
, addr
);
288 if (!pte_present(ptent
))
289 return ERR_PTR(-EINVAL
);
293 return (void *) (pte_val(ptent
) & PAGE_MASK
) + (addr
& ~PAGE_MASK
);
296 char *current_cmd(void)
298 #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
301 void *addr
= um_virt_to_phys(current
, current
->mm
->arg_start
, NULL
);
302 return IS_ERR(addr
) ? "(Unknown)": __va((unsigned long) addr
);
306 void dump_thread(struct pt_regs
*regs
, struct user
*u
)
310 int __cant_sleep(void) {
311 return in_atomic() || irqs_disabled() || in_interrupt();
312 /* Is in_interrupt() really needed? */
315 int user_context(unsigned long sp
)
319 stack
= sp
& (PAGE_MASK
<< CONFIG_KERNEL_STACK_ORDER
);
320 return stack
!= (unsigned long) current_thread
;
323 extern exitcall_t __uml_exitcall_begin
, __uml_exitcall_end
;
325 void do_uml_exitcalls(void)
329 call
= &__uml_exitcall_end
;
330 while (--call
>= &__uml_exitcall_begin
)
334 char *uml_strdup(char *string
)
336 return kstrdup(string
, GFP_KERNEL
);
339 int copy_to_user_proc(void __user
*to
, void *from
, int size
)
341 return copy_to_user(to
, from
, size
);
344 int copy_from_user_proc(void *to
, void __user
*from
, int size
)
346 return copy_from_user(to
, from
, size
);
349 int clear_user_proc(void __user
*buf
, int size
)
351 return clear_user(buf
, size
);
354 int strlen_user_proc(char __user
*str
)
356 return strlen_user(str
);
359 int smp_sigio_handler(void)
362 int cpu
= current_thread
->cpu
;
372 return current_thread
->cpu
;
375 static atomic_t using_sysemu
= ATOMIC_INIT(0);
376 int sysemu_supported
;
378 void set_using_sysemu(int value
)
380 if (value
> sysemu_supported
)
382 atomic_set(&using_sysemu
, value
);
385 int get_using_sysemu(void)
387 return atomic_read(&using_sysemu
);
390 static int proc_read_sysemu(char *buf
, char **start
, off_t offset
, int size
,int *eof
, void *data
)
392 if (snprintf(buf
, size
, "%d\n", get_using_sysemu()) < size
)
399 static int proc_write_sysemu(struct file
*file
,const char __user
*buf
, unsigned long count
,void *data
)
403 if (copy_from_user(tmp
, buf
, 1))
406 if (tmp
[0] >= '0' && tmp
[0] <= '2')
407 set_using_sysemu(tmp
[0] - '0');
408 /* We use the first char, but pretend to write everything */
412 int __init
make_proc_sysemu(void)
414 struct proc_dir_entry
*ent
;
415 if (!sysemu_supported
)
418 ent
= create_proc_entry("sysemu", 0600, &proc_root
);
422 printk(KERN_WARNING
"Failed to register /proc/sysemu\n");
426 ent
->read_proc
= proc_read_sysemu
;
427 ent
->write_proc
= proc_write_sysemu
;
432 late_initcall(make_proc_sysemu
);
434 int singlestepping(void * t
)
436 struct task_struct
*task
= t
? t
: current
;
438 if ( ! (task
->ptrace
& PT_DTRACE
) )
441 if (task
->thread
.singlestep_syscall
)
448 * Only x86 and x86_64 have an arch_align_stack().
449 * All other arches have "#define arch_align_stack(x) (x)"
450 * in their asm/system.h
451 * As this is included in UML from asm-um/system-generic.h,
452 * we can use it to behave as the subarch does.
454 #ifndef arch_align_stack
455 unsigned long arch_align_stack(unsigned long sp
)
457 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
458 sp
-= get_random_int() % 8192;