1 /* Function prototypes. */
3 /* FIXME this is a hack how to avoid inclusion conflicts */
9 #include <minix/safecopies.h>
10 #include <machine/archtypes.h>
11 #include <machine/signal.h>
12 #include <machine/frame.h>
14 /* Struct declarations. */
18 clock_t get_realtime(void);
19 void set_realtime(clock_t);
20 void set_adjtime_delta(int32_t);
21 clock_t get_monotonic(void);
22 void set_kernel_timer(minix_timer_t
*tp
, clock_t t
, tmr_func_t f
);
23 void reset_kernel_timer(minix_timer_t
*tp
);
24 void ser_dump_proc(void);
26 void cycles_accounting_init(void);
28 * This functions start and stop accounting for process, kernel or idle cycles.
29 * It inherently have to account for some kernel cycles for process too,
30 * therefore it should be called asap after trapping to kernel and as late as
31 * possible before returning to userspace. These function is architecture
34 void context_stop(struct proc
* p
);
35 /* this is a wrapper to make calling it from assembly easier */
36 void context_stop_idle(void);
37 int restore_fpu(struct proc
*);
38 void save_fpu(struct proc
*);
39 void save_local_fpu(struct proc
*, int retain
);
40 void fpu_sigcontext(struct proc
*, struct sigframe_sigcontext
*fr
, struct
45 #define kmain __k_unpaged_kmain
47 void kmain(kinfo_t
*cbi
);
48 void prepare_shutdown(int how
);
49 __dead
void minix_shutdown(minix_timer_t
*tp
);
50 void bsp_finish_booting(void);
54 int do_ipc(reg_t r1
, reg_t r2
, reg_t r3
);
56 int cancel_async(struct proc
*src
, struct proc
*dst
);
57 int has_pending_notify(struct proc
* caller
, int src_p
);
58 int has_pending_asend(struct proc
* caller
, int src_p
);
59 void unset_notify_pending(struct proc
* caller
, int src_p
);
60 int mini_notify(const struct proc
*src
, endpoint_t dst
);
61 void enqueue(struct proc
*rp
);
62 void dequeue(struct proc
*rp
);
63 void switch_to_user(void);
64 void arch_proc_reset(struct proc
*rp
);
65 void arch_proc_setcontext(struct proc
*rp
, struct stackframe_s
*state
,
66 int user
, int restorestyle
);
67 struct proc
* arch_finish_switch_to_user(void);
68 struct proc
*endpoint_lookup(endpoint_t ep
);
69 #if DEBUG_ENABLE_IPC_WARNINGS
70 int isokendpt_f(const char *file
, int line
, endpoint_t e
, int *p
, int
72 #define isokendpt_d(e, p, f) isokendpt_f(__FILE__, __LINE__, (e), (p), (f))
74 int isokendpt_f(endpoint_t e
, int *p
, int f
);
75 #define isokendpt_d(e, p, f) isokendpt_f((e), (p), (f))
77 void proc_no_time(struct proc
*p
);
78 void reset_proc_accounting(struct proc
*p
);
79 void increase_proc_signals(struct proc
*p
);
80 void flag_account(struct proc
*p
, int flag
);
81 int try_deliver_senda(struct proc
*caller_ptr
, asynmsg_t
*table
, size_t
86 char *env_get(const char *key
);
89 int get_priv(register struct proc
*rc
, int proc_type
);
90 void set_sendto_bit(const struct proc
*rc
, int id
);
91 void unset_sendto_bit(const struct proc
*rc
, int id
);
92 void fill_sendto_mask(const struct proc
*rc
, sys_map_t
*map
);
93 int send_sig(endpoint_t proc_nr
, int sig_nr
);
94 void cause_sig(proc_nr_t proc_nr
, int sig_nr
);
95 void sig_delay_done(struct proc
*rp
);
96 void send_diag_sig(void);
97 void kernel_call(message
*m_user
, struct proc
* caller
);
98 void system_init(void);
99 void clear_endpoint(struct proc
*rc
);
100 void clear_ipc_refs(struct proc
*rc
, int caller_ret
);
101 void kernel_call_resume(struct proc
*p
);
102 int sched_proc(struct proc
*rp
, int priority
, int quantum
, int cpu
);
104 /* system/do_vtimer.c */
105 void vtimer_check(struct proc
*rp
);
108 void put_irq_handler(irq_hook_t
*hook
, int irq
, irq_handler_t handler
);
109 void rm_irq_handler(const irq_hook_t
*hook
);
110 void enable_irq(const irq_hook_t
*hook
);
111 int disable_irq(const irq_hook_t
*hook
);
113 void interrupts_enable(void);
114 void interrupts_disable(void);
117 int runqueues_ok(void);
119 #define runqueues_ok_local runqueues_ok
121 #define runqueues_ok_local() runqueues_ok_cpu(cpuid)
122 int runqueues_ok_cpu(unsigned cpu
);
124 char *rtsflagstr(u32_t flags
);
125 char *miscflagstr(u32_t flags
);
126 char *schedulerstr(struct proc
*scheduler
);
127 /* prints process information */
128 void print_proc(struct proc
*pp
);
129 /* prints the given process and recursively all processes it depends on */
130 void print_proc_recursive(struct proc
*pp
);
132 void hook_ipc_msgrecv(message
*msg
, struct proc
*src
, struct proc
*dst
);
133 void hook_ipc_msgsend(message
*msg
, struct proc
*src
, struct proc
*dst
);
134 void hook_ipc_msgkcall(message
*msg
, struct proc
*proc
);
135 void hook_ipc_msgkresult(message
*msg
, struct proc
*proc
);
136 void hook_ipc_clear(struct proc
*proc
);
139 /* system/do_safecopy.c */
140 int verify_grant(endpoint_t
, endpoint_t
, cp_grant_id_t
, vir_bytes
, int,
141 vir_bytes
, vir_bytes
*, endpoint_t
*, u32_t
*);
143 /* system/do_diagctl.c */
144 int do_diagctl(struct proc
* caller
, message
*m
);
148 void init_profile_clock(u32_t
);
149 void stop_profile_clock(void);
152 /* functions defined in architecture-dependent files. */
154 void arch_post_init();
155 void arch_set_secondary_ipc_return(struct proc
*, u32_t val
);
156 phys_bytes
phys_copy(phys_bytes source
, phys_bytes dest
, phys_bytes
158 void phys_copy_fault(void);
159 void phys_copy_fault_in_kernel(void);
160 void memset_fault(void);
161 void memset_fault_in_kernel(void);
162 #define virtual_copy(src, dst, bytes) \
163 virtual_copy_f(NULL, src, dst, bytes, 0)
164 #define virtual_copy_vmcheck(caller, src, dst, bytes) \
165 virtual_copy_f(caller, src, dst, bytes, 1)
166 int virtual_copy_f(struct proc
* caller
, struct vir_addr
*src
, struct
167 vir_addr
*dst
, vir_bytes bytes
, int vmcheck
);
168 int data_copy(endpoint_t from
, vir_bytes from_addr
, endpoint_t to
,
169 vir_bytes to_addr
, size_t bytes
);
170 int data_copy_vmcheck(struct proc
*, endpoint_t from
, vir_bytes
171 from_addr
, endpoint_t to
, vir_bytes to_addr
, size_t bytes
);
172 phys_bytes
umap_virtual(struct proc
* rp
, int seg
, vir_bytes vir_addr
,
174 phys_bytes
seg2phys(u16_t
);
175 int vm_memset(struct proc
*caller
, endpoint_t who
, phys_bytes dst
,
176 int pattern
, phys_bytes count
);
179 void arch_init(void);
180 void arch_boot_proc(struct boot_image
*b
, struct proc
*p
);
181 void cpu_identify(void);
182 /* arch dependent FPU initialization per CPU */
184 /* returns true if pfu is present and initialized */
187 __dead
void arch_shutdown(int);
188 void restore_user_context(struct proc
* p
);
189 void read_tsc(u32_t
*high
, u32_t
*low
);
190 int arch_init_profile_clock(u32_t freq
);
191 void arch_stop_profile_clock(void);
192 void arch_ack_profile_clock(void);
193 void do_ser_debug(void);
194 int arch_get_params(char *parm
, int max
);
195 void memory_init(void);
196 void mem_clear_mapcache(void);
197 void arch_proc_init(struct proc
*pr
, u32_t
, u32_t
, u32_t
, char *);
198 int arch_do_vmctl(message
*m_ptr
, struct proc
*p
);
199 int vm_contiguous(const struct proc
*targetproc
, vir_bytes vir_buf
,
201 void proc_stacktrace(struct proc
*proc
);
202 int vm_lookup(const struct proc
*proc
, vir_bytes
virtual, phys_bytes
203 *result
, u32_t
*ptent
);
204 size_t vm_lookup_range(const struct proc
*proc
,
205 vir_bytes vir_addr
, phys_bytes
*phys_addr
, size_t bytes
);
206 void delivermsg(struct proc
*target
);
207 void arch_do_syscall(struct proc
*proc
);
208 int arch_phys_map(int index
, phys_bytes
*addr
, phys_bytes
*len
, int
210 int arch_phys_map_reply(int index
, vir_bytes addr
);
211 reg_t
arch_get_sp(struct proc
*p
);
212 int arch_enable_paging(struct proc
* caller
);
213 int vm_check_range(struct proc
*caller
,
214 struct proc
*target
, vir_bytes vir_addr
, size_t bytes
, int writable
);
216 int copy_msg_from_user(message
* user_mbuf
, message
* dst
);
217 int copy_msg_to_user(message
* src
, message
* user_mbuf
);
218 void switch_address_space(struct proc
* p
);
219 void release_address_space(struct proc
*pr
);
221 void enable_fpu_exception(void);
222 void disable_fpu_exception(void);
223 void release_fpu(struct proc
* p
);
224 void arch_pause(void);
225 short cpu_load(void);
226 void busy_delay_ms(int ms
);
229 void cpu_print_freq(unsigned cpu
);
230 #endif /* __kernel__ */