2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
6 #include "linux/config.h"
7 #include "linux/kernel.h"
8 #include "linux/sched.h"
9 #include "linux/interrupt.h"
11 #include "linux/slab.h"
12 #include "linux/utsname.h"
14 #include "linux/utime.h"
15 #include "linux/smp_lock.h"
16 #include "linux/module.h"
17 #include "linux/init.h"
18 #include "linux/capability.h"
19 #include "linux/vmalloc.h"
20 #include "linux/spinlock.h"
21 #include "asm/unistd.h"
23 #include "asm/segment.h"
25 #include "asm/pgtable.h"
26 #include "asm/processor.h"
27 #include "asm/tlbflush.h"
28 #include "asm/uaccess.h"
30 #include "user_util.h"
31 #include "kern_util.h"
33 #include "signal_kern.h"
34 #include "signal_user.h"
38 #include "time_user.h"
40 #include "frame_kern.h"
41 #include "sigcontext.h"
42 #include "2_5compat.h"
45 #include "mode_kern.h"
46 #include "choose-mode.h"
48 /* This is a per-cpu array. A processor only modifies its entry and it only
49 * cares about its entry, so it's OK if another processor is modifying its
52 struct cpu_task cpu_tasks
[NR_CPUS
] = { [0 ... NR_CPUS
- 1] = { -1, NULL
} };
54 struct task_struct
*get_task(int pid
, int require
)
56 struct task_struct
*ret
;
58 read_lock(&tasklist_lock
);
59 ret
= find_task_by_pid(pid
);
60 read_unlock(&tasklist_lock
);
62 if(require
&& (ret
== NULL
)) panic("get_task couldn't find a task\n");
66 int external_pid(void *t
)
68 struct task_struct
*task
= t
? t
: current
;
70 return(CHOOSE_MODE_PROC(external_pid_tt
, external_pid_skas
, task
));
73 int pid_to_processor_id(int pid
)
77 for(i
= 0; i
< ncpus
; i
++){
78 if(cpu_tasks
[i
].pid
== pid
) return(i
);
83 void free_stack(unsigned long stack
, int order
)
85 free_pages(stack
, order
);
88 unsigned long alloc_stack(int order
, int atomic
)
91 int flags
= GFP_KERNEL
;
93 if(atomic
) flags
|= GFP_ATOMIC
;
94 page
= __get_free_pages(flags
, order
);
97 stack_protections(page
);
101 int kernel_thread(int (*fn
)(void *), void * arg
, unsigned long flags
)
105 current
->thread
.request
.u
.thread
.proc
= fn
;
106 current
->thread
.request
.u
.thread
.arg
= arg
;
107 pid
= do_fork(CLONE_VM
| CLONE_UNTRACED
| flags
, 0, NULL
, 0, NULL
,
110 panic("do_fork failed in kernel_thread, errno = %d", pid
);
114 void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
115 struct task_struct
*tsk
)
117 int cpu
= smp_processor_id();
120 cpu_clear(cpu
, prev
->cpu_vm_mask
);
121 cpu_set(cpu
, next
->cpu_vm_mask
);
124 void set_current(void *t
)
126 struct task_struct
*task
= t
;
128 cpu_tasks
[task
->thread_info
->cpu
] = ((struct cpu_task
)
129 { external_pid(task
), task
});
132 void *_switch_to(void *prev
, void *next
, void *last
)
134 return(CHOOSE_MODE(switch_to_tt(prev
, next
),
135 switch_to_skas(prev
, next
)));
138 void interrupt_end(void)
140 if(need_resched()) schedule();
141 if(test_tsk_thread_flag(current
, TIF_SIGPENDING
)) do_signal(0);
144 void release_thread(struct task_struct
*task
)
146 CHOOSE_MODE(release_thread_tt(task
), release_thread_skas(task
));
149 void exit_thread(void)
151 CHOOSE_MODE(exit_thread_tt(), exit_thread_skas());
152 unprotect_stack((unsigned long) current_thread
);
155 void *get_current(void)
160 void prepare_to_copy(struct task_struct
*tsk
)
164 int copy_thread(int nr
, unsigned long clone_flags
, unsigned long sp
,
165 unsigned long stack_top
, struct task_struct
* p
,
166 struct pt_regs
*regs
)
168 p
->thread
= (struct thread_struct
) INIT_THREAD
;
169 return(CHOOSE_MODE_PROC(copy_thread_tt
, copy_thread_skas
, nr
,
170 clone_flags
, sp
, stack_top
, p
, regs
));
173 void initial_thread_cb(void (*proc
)(void *), void *arg
)
175 int save_kmalloc_ok
= kmalloc_ok
;
178 CHOOSE_MODE_PROC(initial_thread_cb_tt
, initial_thread_cb_skas
, proc
,
180 kmalloc_ok
= save_kmalloc_ok
;
183 unsigned long stack_sp(unsigned long page
)
185 return(page
+ PAGE_SIZE
- sizeof(void *));
188 int current_pid(void)
190 return(current
->pid
);
193 void default_idle(void)
197 atomic_inc(&init_mm
.mm_count
);
198 current
->mm
= &init_mm
;
199 current
->active_mm
= &init_mm
;
202 /* endless idle loop with no priority at all */
206 * although we are an idle CPU, we do not want to
207 * get into the scheduler unnecessarily.
209 irq_stat
[smp_processor_id()].idle_timestamp
= jiffies
;
219 CHOOSE_MODE(init_idle_tt(), init_idle_skas());
232 void *um_virt_to_phys(struct task_struct
*task
, unsigned long addr
,
240 return(ERR_PTR(-EINVAL
));
241 pgd
= pgd_offset(task
->mm
, addr
);
242 pmd
= pmd_offset(pgd
, addr
);
243 if(!pmd_present(*pmd
))
244 return(ERR_PTR(-EINVAL
));
245 pte
= pte_offset_kernel(pmd
, addr
);
246 if(!pte_present(*pte
))
247 return(ERR_PTR(-EINVAL
));
250 return((void *) (pte_val(*pte
) & PAGE_MASK
) + (addr
& ~PAGE_MASK
));
253 char *current_cmd(void)
255 #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
258 void *addr
= um_virt_to_phys(current
, current
->mm
->arg_start
, NULL
);
259 return IS_ERR(addr
) ? "(Unknown)": __va((unsigned long) addr
);
263 void force_sigbus(void)
265 printk(KERN_ERR
"Killing pid %d because of a lack of memory\n",
268 sigaddset(¤t
->pending
.signal
, SIGBUS
);
270 current
->flags
|= PF_SIGNALED
;
271 do_exit(SIGBUS
| 0x80);
274 void dump_thread(struct pt_regs
*regs
, struct user
*u
)
278 void enable_hlt(void)
283 EXPORT_SYMBOL(enable_hlt
);
285 void disable_hlt(void)
287 panic("disable_hlt");
290 EXPORT_SYMBOL(disable_hlt
);
292 extern int signal_frame_size
;
294 void *um_kmalloc(int size
)
296 return(kmalloc(size
, GFP_KERNEL
));
299 void *um_kmalloc_atomic(int size
)
301 return(kmalloc(size
, GFP_ATOMIC
));
304 void *um_vmalloc(int size
)
306 return(vmalloc(size
));
309 unsigned long get_fault_addr(void)
311 return((unsigned long) current
->thread
.fault_addr
);
314 EXPORT_SYMBOL(get_fault_addr
);
316 void not_implemented(void)
318 printk(KERN_DEBUG
"Something isn't implemented in here\n");
321 EXPORT_SYMBOL(not_implemented
);
323 int user_context(unsigned long sp
)
327 stack
= sp
& (PAGE_MASK
<< CONFIG_KERNEL_STACK_ORDER
);
328 return(stack
!= (unsigned long) current_thread
);
331 extern void remove_umid_dir(void);
333 __uml_exitcall(remove_umid_dir
);
335 extern exitcall_t __uml_exitcall_begin
, __uml_exitcall_end
;
337 void do_uml_exitcalls(void)
341 call
= &__uml_exitcall_end
;
342 while (--call
>= &__uml_exitcall_begin
)
346 char *uml_strdup(char *string
)
350 new = kmalloc(strlen(string
) + 1, GFP_KERNEL
);
351 if(new == NULL
) return(NULL
);
356 void *get_init_task(void)
358 return(&init_thread_union
.thread_info
.task
);
361 int copy_to_user_proc(void *to
, void *from
, int size
)
363 return(copy_to_user(to
, from
, size
));
366 int copy_from_user_proc(void *to
, void *from
, int size
)
368 return(copy_from_user(to
, from
, size
));
371 int clear_user_proc(void *buf
, int size
)
373 return(clear_user(buf
, size
));
376 int strlen_user_proc(char *str
)
378 return(strlen_user(str
));
381 int smp_sigio_handler(void)
384 int cpu
= current_thread
->cpu
;
392 int um_in_interrupt(void)
394 return(in_interrupt());
399 return(current_thread
->cpu
);
403 * Overrides for Emacs so that we follow Linus's tabbing style.
404 * Emacs will notice this stuff at the end of the file and automatically
405 * adjust the settings for this buffer only. This must remain at the end
407 * ---------------------------------------------------------------------------
409 * c-file-style: "linux"