2 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
3 * Copyright 2012, Rene Gollent, rene@gollent.com.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7 * Copyright 2002, Michael Noisternig. All rights reserved.
8 * Distributed under the terms of the NewOS License.
14 #include <arch/x86/descriptors.h>
16 #include "asm_offsets.h"
17 #include "syscall_numbers.h"
22 /* void x86_fnsave(void *fpu_state); */
27 FUNCTION_END(x86_fnsave)
29 /* void x86_fxsave(void *fpu_state); */
34 FUNCTION_END(x86_fxsave)
36 /* void x86_frstor(const void *fpu_state); */
41 FUNCTION_END(x86_frstor)
43 /* void x86_fxrstor(const void *fpu_state); */
44 FUNCTION(x86_fxrstor):
48 FUNCTION_END(x86_fxrstor)
50 /* void x86_noop_swap(void *old_fpu_state, const void *new_fpu_state); */
51 FUNCTION(x86_noop_swap):
54 FUNCTION_END(x86_noop_swap)
56 /* void x86_fnsave_swap(void *old_fpu_state, const void *new_fpu_state); */
57 FUNCTION(x86_fnsave_swap):
63 FUNCTION_END(x86_fnsave_swap)
65 /* void x86_fxsave_swap(void *old_fpu_state, const void *new_fpu_state); */
66 FUNCTION(x86_fxsave_swap):
72 FUNCTION_END(x86_fxsave_swap)
74 /* uint32 x86_get_stack_frame(); */
75 FUNCTION(x86_get_stack_frame):
78 FUNCTION_END(x86_get_stack_frame)
80 /* uint64 x86_read_msr(uint32 register); */
81 FUNCTION(x86_read_msr):
85 FUNCTION_END(x86_read_msr)
87 /* void x86_write_msr(uint32 register, uint64 value); */
88 FUNCTION(x86_write_msr):
94 FUNCTION_END(x86_write_msr)
96 /* void x86_context_switch(struct arch_thread* oldState,
97 struct arch_thread* newState); */
98 FUNCTION(x86_context_switch):
99 pusha /* pushes 8 words onto the stack */
100 movl 36(%esp),%eax /* save oldState->current_stack */
105 movl 40(%esp),%eax /* get new newState->current_stack */
109 FUNCTION_END(x86_context_switch)
111 /* void x86_swap_pgdir(uint32 newPageDir); */
112 FUNCTION(x86_swap_pgdir):
116 FUNCTION_END(x86_swap_pgdir)
118 /* thread exit stub */
120 FUNCTION(x86_userspace_thread_exit):
125 movl $SYSCALL_EXIT_THREAD, %eax
128 FUNCTION_END(x86_userspace_thread_exit)
129 SYMBOL(x86_end_userspace_thread_exit):
136 FUNCTION(x86_reboot):
141 FUNCTION_END(x86_reboot)
144 /* status_t arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
145 FUNCTION(_arch_cpu_user_memcpy):
148 movl 12(%esp),%edi /* dest */
149 movl 16(%esp),%esi /* source */
150 movl 20(%esp),%ecx /* count */
152 /* set the fault handler */
153 movl 24(%esp),%edx /* fault handler */
155 movl $.L_user_memcpy_error, (%edx)
163 /* move any remaining data by bytes */
169 /* restore the old fault handler */
177 /* error condition */
178 .L_user_memcpy_error:
179 /* restore the old fault handler */
181 movl $-1,%eax /* return a generic error, the wrapper routine will deal with it */
185 FUNCTION_END(_arch_cpu_user_memcpy)
188 /* status_t arch_cpu_user_memset(void *to, char c, size_t count, addr_t *faultHandler) */
189 FUNCTION(_arch_cpu_user_memset):
192 movl 12(%esp),%edi /* dest */
193 movb 16(%esp),%al /* c */
194 movl 20(%esp),%ecx /* count */
196 /* set the fault handler */
197 movl 24(%esp),%edx /* fault handler */
199 movl $.L_user_memset_error, (%edx)
204 /* restore the old fault handler */
212 /* error condition */
213 .L_user_memset_error:
214 /* restore the old fault handler */
216 movl $-1,%eax /* return a generic error, the wrapper routine will deal with it */
220 FUNCTION_END(_arch_cpu_user_memset)
223 /* ssize_t arch_cpu_user_strlcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
224 FUNCTION(_arch_cpu_user_strlcpy):
228 movl 16(%esp),%edi /* dest */
229 movl 20(%esp),%esi /* source */
230 movl 24(%esp),%ecx /* count */
232 /* set the fault handler */
233 movl 28(%esp),%edx /* fault handler */
235 movl $.L_user_strlcpy_error, (%edx)
237 /* Check for 0 length */
239 je .L_user_strlcpy_source_count
241 /* Copy at most count - 1 bytes */
244 /* If count is now 0, skip straight to null terminating
245 as our loop will otherwise overflow */
246 jnz .L_user_strlcpy_copy_begin
248 jmp .L_user_strlcpy_source_count
250 .L_user_strlcpy_copy_begin:
252 .L_user_strlcpy_copy_loop:
253 /* move data by bytes */
257 jz .L_user_strlcpy_source_done
258 loop .L_user_strlcpy_copy_loop
260 /* null terminate string */
264 /* count remaining bytes in src */
265 .L_user_strlcpy_source_count:
267 # %ecx was 0 and is now max
274 .L_user_strlcpy_source_done:
278 /* restore the old fault handler */
286 /* error condition */
287 .L_user_strlcpy_error:
288 /* restore the old fault handler */
290 movl $-1,%eax /* return a generic error, the wrapper routine will deal with it */
295 FUNCTION_END(_arch_cpu_user_strlcpy)
298 /*! \fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
299 jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
301 Called by debug_call_with_fault_handler() to do the dirty work of setting
302 the fault handler and calling the function. If the function causes a page
303 fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
304 given \a jumpBuffer. Otherwise it returns normally.
306 debug_call_with_fault_handler() has already saved the CPU's fault_handler
307 and fault_handler_stack_pointer and will reset them later, so
308 arch_debug_call_with_fault_handler() doesn't need to care about it.
310 \param cpu The \c cpu_ent for the current CPU.
311 \param jumpBuffer Buffer to be used for longjmp().
312 \param function The function to be called.
313 \param parameter The parameter to be passed to the function to be called.
315 FUNCTION(arch_debug_call_with_fault_handler):
319 // Set fault handler address, and fault handler stack pointer address. We
320 // don't need to save the previous values, since that's done by the caller.
321 movl 8(%ebp), %eax // cpu to %eax
323 movl %edx, CPU_ENT_fault_handler(%eax)
324 movl %ebp, CPU_ENT_fault_handler_stack_pointer(%eax)
327 movl 20(%ebp), %eax // parameter
329 movl 16(%ebp), %eax // function
337 // fault -- return via longjmp(jumpBuffer, 1)
339 movl %ebp, %esp // restore %esp
341 movl 12(%ebp), %eax // jumpBuffer
344 FUNCTION_END(arch_debug_call_with_fault_handler)