1 /* This file is part of the lowest layer of the MINIX kernel. (The other part
2 * is "proc.c".) The lowest layer does process switching and message handling.
3 * Furthermore it contains the assembler startup code for Minix and the 32-bit
4 * interrupt handlers. It cooperates with the code in "start.c" to set up a
5 * good environment for main().
7 * Kernel is entered either because of kernel-calls, ipc-calls, interrupts or
8 * exceptions. TSS is set so that the kernel stack is loaded. The user context is
9 * saved to the proc table and the handler of the event is called. Once the
10 * handler is done, switch_to_user() function is called to pick a new process,
11 * finish what needs to be done for the next process to run, sets its context
12 * and switch to userspace.
14 * For communication with the boot monitor at startup time some constant
15 * data are compiled into the beginning of the text segment. This facilitates
16 * reading the data at the start of the boot process, since only the first
17 * sector of the file needs to be read.
19 * Some data storage is also allocated at the end of this file. This data
20 * will be at the start of the data segment of the kernel and will be read
21 * and modified by the boot monitor before the kernel starts.
24 #include "kernel/kernel.h" /* configures the kernel */
28 #include <machine/vm.h>
29 #include "../../kernel.h"
30 #include <minix/config.h>
31 #include <minix/const.h>
32 #include <minix/com.h>
33 #include <machine/asm.h>
34 #include <machine/interrupt.h>
35 #include "archconst.h"
36 #include "kernel/const.h"
37 #include "kernel/proc.h"
39 #include <machine/multiboot.h>
41 #include "arch_proto.h" /* K_STACK_SIZE */
44 #include "kernel/smp.h"
47 /* Selected 386 tss offsets. */
50 IMPORT(copr_not_available_handler)
53 IMPORT(switch_to_user)
54 IMPORT(multiboot_init)
57 /*===========================================================================*/
58 /* interrupt handlers */
59 /* interrupt handlers for 386 32-bit protected mode */
60 /*===========================================================================*/
62 #define PIC_IRQ_HANDLER(irq) \
64 call _C_LABEL(irq_handle) /* intr_handle(irq_handlers[irq]) */ ;\
67 /*===========================================================================*/
69 /*===========================================================================*/
70 /* Note this is a macro, it just looks like a subroutine. */
72 #define hwint_master(irq) \
73 TEST_INT_IN_KERNEL(4, 0f) ;\
75 SAVE_PROCESS_CTX(0) ;\
77 movl $0, %ebp /* for stack trace */ ;\
78 call _C_LABEL(context_stop) ;\
80 PIC_IRQ_HANDLER(irq) ;\
81 movb $END_OF_INT, %al ;\
82 outb $INT_CTL /* reenable interrupts in master pic */ ;\
83 jmp _C_LABEL(switch_to_user) ;\
87 call _C_LABEL(context_stop_idle) ;\
88 PIC_IRQ_HANDLER(irq) ;\
89 movb $END_OF_INT, %al ;\
90 outb $INT_CTL /* reenable interrupts in master pic */ ;\
91 CLEAR_IF(10*4(%esp)) ;\
95 /* Each of these entry points is an expansion of the hwint_master macro */
97 /* Interrupt routine for irq 0 (the clock). */
101 /* Interrupt routine for irq 1 (keyboard) */
105 /* Interrupt routine for irq 2 (cascade!) */
109 /* Interrupt routine for irq 3 (second serial) */
113 /* Interrupt routine for irq 4 (first serial) */
117 /* Interrupt routine for irq 5 (XT winchester) */
121 /* Interrupt routine for irq 6 (floppy) */
125 /* Interrupt routine for irq 7 (printer) */
128 /*===========================================================================*/
130 /*===========================================================================*/
131 /* Note this is a macro, it just looks like a subroutine. */
132 #define hwint_slave(irq) \
133 TEST_INT_IN_KERNEL(4, 0f) ;\
135 SAVE_PROCESS_CTX(0) ;\
137 movl $0, %ebp /* for stack trace */ ;\
138 call _C_LABEL(context_stop) ;\
140 PIC_IRQ_HANDLER(irq) ;\
141 movb $END_OF_INT, %al ;\
142 outb $INT_CTL /* reenable interrupts in master pic */ ;\
143 outb $INT2_CTL /* reenable slave 8259 */ ;\
144 jmp _C_LABEL(switch_to_user) ;\
148 call _C_LABEL(context_stop_idle) ;\
149 PIC_IRQ_HANDLER(irq) ;\
150 movb $END_OF_INT, %al ;\
151 outb $INT_CTL /* reenable interrupts in master pic */ ;\
152 outb $INT2_CTL /* reenable slave 8259 */ ;\
153 CLEAR_IF(10*4(%esp)) ;\
157 /* Each of these entry points is an expansion of the hwint_slave macro */
159 /* Interrupt routine for irq 8 (realtime clock) */
163 /* Interrupt routine for irq 9 (irq 2 redirected) */
167 /* Interrupt routine for irq 10 */
171 /* Interrupt routine for irq 11 */
175 /* Interrupt routine for irq 12 */
179 /* Interrupt routine for irq 13 (FPU exception) */
183 /* Interrupt routine for irq 14 (AT winchester) */
187 /* Interrupt routine for irq 15 */
191 * IPC is only from a process to kernel
197 /* save the pointer to the current process */
201 * pass the syscall arguments from userspace to the handler.
202 * SAVE_PROCESS_CTX() does not clobber these registers, they are still
203 * set as the userspace have set them
209 /* stop user process cycles */
211 /* for stack trace */
213 call _C_LABEL(context_stop)
216 call _C_LABEL(do_ipc)
218 /* restore the current process pointer and save the return value */
221 mov %eax, AXREG(%esi)
223 jmp _C_LABEL(switch_to_user)
227 * kernel call is only from a process to kernel
229 ENTRY(kernel_call_entry)
233 /* save the pointer to the current process */
237 * pass the syscall arguments from userspace to the handler.
238 * SAVE_PROCESS_CTX() does not clobber these registers, they are still
239 * set as the userspace have set them
243 /* stop user process cycles */
245 /* for stack trace */
247 call _C_LABEL(context_stop)
250 call _C_LABEL(kernel_call)
252 /* restore the current process pointer and save the return value */
255 jmp _C_LABEL(switch_to_user)
260 * called by the exception interrupt vectors. If the exception does not push
261 * errorcode, we assume that the vector handler pushed 0 instead. Next pushed
262 * thing is the vector number. From this point on we can continue as if every
263 * exception pushes an error code
267 * check if it is a nested trap by comparing the saved code segment
268 * descriptor with the kernel CS first
270 TEST_INT_IN_KERNEL(12, exception_entry_nested)
272 exception_entry_from_user:
275 /* stop user process cycles */
277 /* for stack trace clear %ebp */
279 call _C_LABEL(context_stop)
283 * push a pointer to the interrupt state pushed by the cpu and the
284 * vector number pushed by the vector handler just before calling
285 * exception_entry and call the exception handler.
288 push $0 /* it's not a nested exception */
289 call _C_LABEL(exception_handler)
291 jmp _C_LABEL(switch_to_user)
293 exception_entry_nested:
299 pushl $1 /* it's a nested exception */
300 call _C_LABEL(exception_handler)
304 /* clear the error code and the exception number */
306 /* resume execution at the point of exception */
309 /*===========================================================================*/
311 /*===========================================================================*/
312 ENTRY(restore_user_context)
313 mov 4(%esp), %ebp /* will assume P_STACKBASE == 0 */
315 /* reconstruct the stack for iret */
316 push $USER_DS_SELECTOR /* ss */
317 movl SPREG(%ebp), %eax
319 movl PSWREG(%ebp), %eax
321 push $USER_CS_SELECTOR /* cs */
322 movl PCREG(%ebp), %eax
325 /* Restore segments as the user should see them. */
326 movw $USER_DS_SELECTOR, %si
332 /* Same for general-purpose registers. */
333 RESTORE_GP_REGS(%ebp)
335 movl BPREG(%ebp), %ebp
337 iret /* continue process */
339 /*===========================================================================*/
340 /* exception handlers */
341 /*===========================================================================*/
343 #define EXCEPTION_ERR_CODE(vector) \
347 #define EXCEPTION_NO_ERR_CODE(vector) \
349 EXCEPTION_ERR_CODE(vector)
352 EXCEPTION_NO_ERR_CODE(DIVIDE_VECTOR)
354 LABEL(single_step_exception)
355 EXCEPTION_NO_ERR_CODE(DEBUG_VECTOR)
359 EXCEPTION_NO_ERR_CODE(NMI_VECTOR)
362 * We have to be very careful as this interrupt can occur anytime. On
363 * the other hand, if it interrupts a user process, we will resume the
364 * same process which makes things a little simpler. We know that we are
365 * already on kernel stack whenever it happened and we can be
366 * conservative and save everything as we don't need to be extremely
367 * efficient as the interrupt is infrequent and some overhead is already
372 * save the important registers. We don't save %cs and %ss and they are
373 * saved and restored by CPU
382 * We cannot be sure about the state of the kernel segment register,
383 * however, we always set %ds and %es to the same as %ss
390 call _C_LABEL(nmi_watchdog_handler)
393 /* restore all the important registers as they were before the trap */
403 LABEL(breakpoint_exception)
404 EXCEPTION_NO_ERR_CODE(BREAKPOINT_VECTOR)
407 EXCEPTION_NO_ERR_CODE(OVERFLOW_VECTOR)
410 EXCEPTION_NO_ERR_CODE(BOUNDS_VECTOR)
413 EXCEPTION_NO_ERR_CODE(INVAL_OP_VECTOR)
415 LABEL(copr_not_available)
416 TEST_INT_IN_KERNEL(4, copr_not_available_in_kernel)
417 cld /* set direction flag to a known value */
419 /* stop user process cycles */
422 call _C_LABEL(context_stop)
423 call _C_LABEL(copr_not_available_handler)
424 /* reached upon failure only */
425 jmp _C_LABEL(switch_to_user)
427 copr_not_available_in_kernel:
429 pushl $COPROC_NOT_VECTOR
430 jmp exception_entry_nested
433 EXCEPTION_ERR_CODE(DOUBLE_FAULT_VECTOR)
435 LABEL(copr_seg_overrun)
436 EXCEPTION_NO_ERR_CODE(COPROC_SEG_VECTOR)
439 EXCEPTION_ERR_CODE(INVAL_TSS_VECTOR)
441 LABEL(segment_not_present)
442 EXCEPTION_ERR_CODE(SEG_NOT_VECTOR)
444 LABEL(stack_exception)
445 EXCEPTION_ERR_CODE(STACK_FAULT_VECTOR)
447 LABEL(general_protection)
448 EXCEPTION_ERR_CODE(PROTECTION_VECTOR)
451 EXCEPTION_ERR_CODE(PAGE_FAULT_VECTOR)
454 EXCEPTION_NO_ERR_CODE(COPROC_ERR_VECTOR)
456 LABEL(alignment_check)
457 EXCEPTION_NO_ERR_CODE(ALIGNMENT_CHECK_VECTOR)
460 EXCEPTION_NO_ERR_CODE(MACHINE_CHECK_VECTOR)
462 LABEL(simd_exception)
463 EXCEPTION_NO_ERR_CODE(SIMD_EXCEPTION_VECTOR)
465 /*===========================================================================*/
467 /*===========================================================================*/
468 /* PUBLIC void reload_cr3(void); */
480 * we are in protected mode now, %cs is correct and we need to set the
481 * data descriptors before we can touch anything
483 * first load the regular, highly mapped idt, gdt
487 * use the boot stack for now. The running CPUs are already using their
488 * own stack, the rest is still waiting to be booted
490 movw $KERN_DS_SELECTOR, %ax
493 mov $_C_LABEL(k_boot_stktop) - 4, %esp
495 /* load the highly mapped idt, gdt, per-cpu tss */
496 call _C_LABEL(prot_load_selectors)
498 jmp _C_LABEL(smp_ap_boot)
502 /*===========================================================================*/
504 /*===========================================================================*/
507 .short 0x526F /* this must be the first data entry (magic #) */
511 LABEL(__k_unpaged_k_initial_stktop)
517 .space K_STACK_SIZE /* kernel stack */ /* FIXME use macro here */
518 LABEL(k_boot_stktop) /* top of kernel stack */
521 LABEL(k_stacks_start)
523 /* two pages for each stack, one for data, other as a sandbox */
524 .space 2 * (K_STACK_SIZE * CONFIG_MAX_CPUS)
528 /* top of kernel stack */