tools/llvm: Do not build with symbols
[minix3.git] / minix / kernel / arch / i386 / arch_system.c
blobedffee577b676c6aa1529dab610652adefb393ae
1 /* system dependent functions for use inside the whole kernel. */
3 #include "kernel/kernel.h"
5 #include <unistd.h>
6 #include <ctype.h>
7 #include <string.h>
8 #include <machine/cmos.h>
9 #include <machine/bios.h>
10 #include <machine/cpu.h>
11 #include <minix/portio.h>
12 #include <minix/cpufeature.h>
13 #include <assert.h>
14 #include <signal.h>
15 #include <machine/vm.h>
17 #include <minix/u64.h>
19 #include "archconst.h"
20 #include "arch_proto.h"
21 #include "serial.h"
22 #include "oxpcie.h"
23 #include <machine/multiboot.h>
25 #include "glo.h"
27 #ifdef USE_APIC
28 #include "apic.h"
29 #endif
31 #ifdef USE_ACPI
32 #include "acpi.h"
33 #endif
35 static int osfxsr_feature; /* FXSAVE/FXRSTOR instructions support (SSEx) */
37 /* set MP and NE flags to handle FPU exceptions in native mode. */
38 #define CR0_MP_NE 0x0022
39 /* set CR4.OSFXSR[bit 9] if FXSR is supported. */
40 #define CR4_OSFXSR (1L<<9)
41 /* set OSXMMEXCPT[bit 10] if we provide #XM handler. */
42 #define CR4_OSXMMEXCPT (1L<<10)
44 void * k_stacks;
46 static void ser_debug(int c);
47 static void ser_dump_vfs(void);
49 #ifdef CONFIG_SMP
50 static void ser_dump_proc_cpu(void);
51 #endif
52 #if !CONFIG_OXPCIE
53 static void ser_init(void);
54 #endif
56 void fpu_init(void)
58 unsigned short cw, sw;
60 fninit();
61 sw = fnstsw();
62 fnstcw(&cw);
64 if((sw & 0xff) == 0 &&
65 (cw & 0x103f) == 0x3f) {
66 /* We have some sort of FPU, but don't check exact model.
67 * Set CR0_NE and CR0_MP to handle fpu exceptions
68 * in native mode. */
69 write_cr0(read_cr0() | CR0_MP_NE);
70 get_cpulocal_var(fpu_presence) = 1;
71 if(_cpufeature(_CPUF_I386_FXSR)) {
72 u32_t cr4 = read_cr4() | CR4_OSFXSR; /* Enable FXSR. */
74 /* OSXMMEXCPT if supported
75 * FXSR feature can be available without SSE
77 if(_cpufeature(_CPUF_I386_SSE))
78 cr4 |= CR4_OSXMMEXCPT;
80 write_cr4(cr4);
81 osfxsr_feature = 1;
82 } else {
83 osfxsr_feature = 0;
85 } else {
86 /* No FPU presents. */
87 get_cpulocal_var(fpu_presence) = 0;
88 osfxsr_feature = 0;
89 return;
93 void save_local_fpu(struct proc *pr, int retain)
95 char *state = pr->p_seg.fpu_state;
97 /* Save process FPU context. If the 'retain' flag is set, keep the FPU
98 * state as is. If the flag is not set, the state is undefined upon
99 * return, and the caller is responsible for reloading a proper state.
102 if(!is_fpu())
103 return;
105 assert(state);
107 if(osfxsr_feature) {
108 fxsave(state);
109 } else {
110 fnsave(state);
111 if (retain)
112 (void) frstor(state);
116 void save_fpu(struct proc *pr)
118 #ifdef CONFIG_SMP
119 if (cpuid != pr->p_cpu) {
120 int stopped;
122 /* remember if the process was already stopped */
123 stopped = RTS_ISSET(pr, RTS_PROC_STOP);
125 /* stop the remote process and force its context to be saved */
126 smp_schedule_stop_proc_save_ctx(pr);
129 * If the process wasn't stopped let the process run again. The
130 * process is kept block by the fact that the kernel cannot run
131 * on its cpu
133 if (!stopped)
134 RTS_UNSET(pr, RTS_PROC_STOP);
136 return;
138 #endif
140 if (get_cpulocal_var(fpu_owner) == pr) {
141 disable_fpu_exception();
142 save_local_fpu(pr, TRUE /*retain*/);
146 /* reserve a chunk of memory for fpu state; every one has to
147 * be FPUALIGN-aligned.
149 static char fpu_state[NR_PROCS][FPU_XFP_SIZE] __aligned(FPUALIGN);
151 void arch_proc_reset(struct proc *pr)
153 char *v = NULL;
154 struct stackframe_s reg;
156 assert(pr->p_nr < NR_PROCS);
158 if(pr->p_nr >= 0) {
159 v = fpu_state[pr->p_nr];
160 /* verify alignment */
161 assert(!((vir_bytes)v % FPUALIGN));
162 /* initialize state */
163 memset(v, 0, FPU_XFP_SIZE);
166 /* Clear process state. */
167 memset(&reg, 0, sizeof(pr->p_reg));
168 if(iskerneln(pr->p_nr))
169 reg.psw = INIT_TASK_PSW;
170 else
171 reg.psw = INIT_PSW;
173 pr->p_seg.fpu_state = v;
175 /* Initialize the fundamentals that are (initially) the same for all
176 * processes - the segment selectors it gets to use.
178 pr->p_reg.cs = USER_CS_SELECTOR;
179 pr->p_reg.gs =
180 pr->p_reg.fs =
181 pr->p_reg.ss =
182 pr->p_reg.es =
183 pr->p_reg.ds = USER_DS_SELECTOR;
185 /* set full context and make sure it gets restored */
186 arch_proc_setcontext(pr, &reg, 0, KTS_FULLCONTEXT);
189 void arch_set_secondary_ipc_return(struct proc *p, u32_t val)
191 p->p_reg.bx = val;
194 int restore_fpu(struct proc *pr)
196 int failed;
197 char *state = pr->p_seg.fpu_state;
199 assert(state);
201 if(!proc_used_fpu(pr)) {
202 fninit();
203 pr->p_misc_flags |= MF_FPU_INITIALIZED;
204 } else {
205 if(osfxsr_feature) {
206 failed = fxrstor(state);
207 } else {
208 failed = frstor(state);
211 if (failed) return EINVAL;
214 return OK;
217 void cpu_identify(void)
219 u32_t eax, ebx, ecx, edx;
220 unsigned cpu = cpuid;
222 eax = 0;
223 _cpuid(&eax, &ebx, &ecx, &edx);
225 if (ebx == INTEL_CPUID_GEN_EBX && ecx == INTEL_CPUID_GEN_ECX &&
226 edx == INTEL_CPUID_GEN_EDX) {
227 cpu_info[cpu].vendor = CPU_VENDOR_INTEL;
228 } else if (ebx == AMD_CPUID_GEN_EBX && ecx == AMD_CPUID_GEN_ECX &&
229 edx == AMD_CPUID_GEN_EDX) {
230 cpu_info[cpu].vendor = CPU_VENDOR_AMD;
231 } else
232 cpu_info[cpu].vendor = CPU_VENDOR_UNKNOWN;
234 if (eax == 0)
235 return;
237 eax = 1;
238 _cpuid(&eax, &ebx, &ecx, &edx);
240 cpu_info[cpu].family = (eax >> 8) & 0xf;
241 if (cpu_info[cpu].family == 0xf)
242 cpu_info[cpu].family += (eax >> 20) & 0xff;
243 cpu_info[cpu].model = (eax >> 4) & 0xf;
244 if (cpu_info[cpu].model == 0xf || cpu_info[cpu].model == 0x6)
245 cpu_info[cpu].model += ((eax >> 16) & 0xf) << 4 ;
246 cpu_info[cpu].stepping = eax & 0xf;
247 cpu_info[cpu].flags[0] = ecx;
248 cpu_info[cpu].flags[1] = edx;
251 void arch_init(void)
253 k_stacks = (void*) &k_stacks_start;
254 assert(!((vir_bytes) k_stacks % K_STACK_SIZE));
256 #ifndef CONFIG_SMP
258 * use stack 0 and cpu id 0 on a single processor machine, SMP
259 * configuration does this in smp_init() for all cpus at once
261 tss_init(0, get_k_stack_top(0));
262 #endif
264 #if !CONFIG_OXPCIE
265 ser_init();
266 #endif
268 #ifdef USE_ACPI
269 acpi_init();
270 #endif
272 #if defined(USE_APIC) && !defined(CONFIG_SMP)
273 if (config_no_apic) {
274 BOOT_VERBOSE(printf("APIC disabled, using legacy PIC\n"));
276 else if (!apic_single_cpu_init()) {
277 BOOT_VERBOSE(printf("APIC not present, using legacy PIC\n"));
279 #endif
281 /* Reserve some BIOS ranges */
282 cut_memmap(&kinfo, BIOS_MEM_BEGIN, BIOS_MEM_END);
283 cut_memmap(&kinfo, BASE_MEM_TOP, UPPER_MEM_END);
286 /*===========================================================================*
287 * do_ser_debug *
288 *===========================================================================*/
289 void do_ser_debug()
291 u8_t c, lsr;
293 #if CONFIG_OXPCIE
295 int oxin;
296 if((oxin = oxpcie_in()) >= 0)
297 ser_debug(oxin);
299 #endif
301 lsr= inb(COM1_LSR);
302 if (!(lsr & LSR_DR))
303 return;
304 c = inb(COM1_RBR);
305 ser_debug(c);
308 static void ser_dump_queue_cpu(unsigned cpu)
310 int q;
311 struct proc ** rdy_head;
313 rdy_head = get_cpu_var(cpu, run_q_head);
315 for(q = 0; q < NR_SCHED_QUEUES; q++) {
316 struct proc *p;
317 if(rdy_head[q]) {
318 printf("%2d: ", q);
319 for(p = rdy_head[q]; p; p = p->p_nextready) {
320 printf("%s / %d ", p->p_name, p->p_endpoint);
322 printf("\n");
327 static void ser_dump_queues(void)
329 #ifdef CONFIG_SMP
330 unsigned cpu;
332 printf("--- run queues ---\n");
333 for (cpu = 0; cpu < ncpus; cpu++) {
334 printf("CPU %d :\n", cpu);
335 ser_dump_queue_cpu(cpu);
337 #else
338 ser_dump_queue_cpu(0);
339 #endif
342 #ifdef CONFIG_SMP
343 static void dump_bkl_usage(void)
345 unsigned cpu;
347 printf("--- BKL usage ---\n");
348 for (cpu = 0; cpu < ncpus; cpu++) {
349 printf("cpu %3d kernel ticks 0x%x%08x bkl ticks 0x%x%08x succ %d tries %d\n", cpu,
350 ex64hi(kernel_ticks[cpu]),
351 ex64lo(kernel_ticks[cpu]),
352 ex64hi(bkl_ticks[cpu]),
353 ex64lo(bkl_ticks[cpu]),
354 bkl_succ[cpu], bkl_tries[cpu]);
358 static void reset_bkl_usage(void)
360 memset(kernel_ticks, 0, sizeof(kernel_ticks));
361 memset(bkl_ticks, 0, sizeof(bkl_ticks));
362 memset(bkl_tries, 0, sizeof(bkl_tries));
363 memset(bkl_succ, 0, sizeof(bkl_succ));
365 #endif
367 static void ser_debug(const int c)
369 serial_debug_active = 1;
371 switch(c)
373 case 'Q':
374 minix_shutdown(NULL);
375 NOT_REACHABLE;
376 #ifdef CONFIG_SMP
377 case 'B':
378 dump_bkl_usage();
379 break;
380 case 'b':
381 reset_bkl_usage();
382 break;
383 #endif
384 case '1':
385 ser_dump_proc();
386 break;
387 case '2':
388 ser_dump_queues();
389 break;
390 #ifdef CONFIG_SMP
391 case '4':
392 ser_dump_proc_cpu();
393 break;
394 #endif
395 case '5':
396 ser_dump_vfs();
397 break;
398 #if DEBUG_TRACE
399 #define TOGGLECASE(ch, flag) \
400 case ch: { \
401 if(verboseflags & flag) { \
402 verboseflags &= ~flag; \
403 printf("%s disabled\n", #flag); \
404 } else { \
405 verboseflags |= flag; \
406 printf("%s enabled\n", #flag); \
408 break; \
410 TOGGLECASE('8', VF_SCHEDULING)
411 TOGGLECASE('9', VF_PICKPROC)
412 #endif
413 #ifdef USE_APIC
414 case 'I':
415 dump_apic_irq_state();
416 break;
417 #endif
419 serial_debug_active = 0;
422 #if DEBUG_SERIAL
424 static void ser_dump_vfs()
426 /* Notify VFS it has to generate stack traces. Kernel can't do that as
427 * it's not aware of user space threads.
429 mini_notify(proc_addr(KERNEL), VFS_PROC_NR);
432 #ifdef CONFIG_SMP
433 static void ser_dump_proc_cpu(void)
435 struct proc *pp;
436 unsigned cpu;
438 for (cpu = 0; cpu < ncpus; cpu++) {
439 printf("CPU %d processes : \n", cpu);
440 for (pp= BEG_USER_ADDR; pp < END_PROC_ADDR; pp++) {
441 if (isemptyp(pp) || pp->p_cpu != cpu)
442 continue;
443 print_proc(pp);
447 #endif
449 #endif /* DEBUG_SERIAL */
451 #if SPROFILE
453 int arch_init_profile_clock(const u32_t freq)
455 int r;
456 /* Set CMOS timer frequency. */
457 outb(RTC_INDEX, RTC_REG_A);
458 outb(RTC_IO, RTC_A_DV_OK | freq);
459 /* Enable CMOS timer interrupts. */
460 outb(RTC_INDEX, RTC_REG_B);
461 r = inb(RTC_IO);
462 outb(RTC_INDEX, RTC_REG_B);
463 outb(RTC_IO, r | RTC_B_PIE);
464 /* Mandatory read of CMOS register to enable timer interrupts. */
465 outb(RTC_INDEX, RTC_REG_C);
466 inb(RTC_IO);
468 return CMOS_CLOCK_IRQ;
471 void arch_stop_profile_clock(void)
473 int r;
474 /* Disable CMOS timer interrupts. */
475 outb(RTC_INDEX, RTC_REG_B);
476 r = inb(RTC_IO);
477 outb(RTC_INDEX, RTC_REG_B);
478 outb(RTC_IO, r & ~RTC_B_PIE);
481 void arch_ack_profile_clock(void)
483 /* Mandatory read of CMOS register to re-enable timer interrupts. */
484 outb(RTC_INDEX, RTC_REG_C);
485 inb(RTC_IO);
488 #endif
490 void arch_do_syscall(struct proc *proc)
492 /* do_ipc assumes that it's running because of the current process */
493 assert(proc == get_cpulocal_var(proc_ptr));
494 /* Make the system call, for real this time. */
495 assert(proc->p_misc_flags & MF_SC_DEFER);
496 proc->p_reg.retreg =
497 do_ipc(proc->p_defer.r1, proc->p_defer.r2, proc->p_defer.r3);
500 struct proc * arch_finish_switch_to_user(void)
502 char * stk;
503 struct proc * p;
505 #ifdef CONFIG_SMP
506 stk = (char *)tss[cpuid].sp0;
507 #else
508 stk = (char *)tss[0].sp0;
509 #endif
510 /* set pointer to the process to run on the stack */
511 p = get_cpulocal_var(proc_ptr);
512 *((reg_t *)stk) = (reg_t) p;
514 /* make sure IF is on in FLAGS so that interrupts won't be disabled
515 * once p's context is restored.
517 p->p_reg.psw |= IF_MASK;
519 /* Set TRACEBIT state properly. */
520 if(p->p_misc_flags & MF_STEP)
521 p->p_reg.psw |= TRACEBIT;
522 else
523 p->p_reg.psw &= ~TRACEBIT;
525 return p;
528 void arch_proc_setcontext(struct proc *p, struct stackframe_s *state,
529 int isuser, int trap_style)
531 if(isuser) {
532 /* Restore user bits of psw from sc, maintain system bits
533 * from proc.
535 state->psw = (state->psw & X86_FLAGS_USER) |
536 (p->p_reg.psw & ~X86_FLAGS_USER);
539 /* someone wants to totally re-initialize process state */
540 assert(sizeof(p->p_reg) == sizeof(*state));
541 if(state != &p->p_reg) {
542 memcpy(&p->p_reg, state, sizeof(*state));
545 /* further code is instructed to not touch the context
546 * any more
548 p->p_misc_flags |= MF_CONTEXT_SET;
550 /* on x86 this requires returning using iret (KTS_INT)
551 * so that the full context is restored instead of relying on
552 * the userspace doing it (as it would do on SYSEXIT).
553 * as ESP and EIP are also reset, userspace won't try to
554 * restore bogus context after returning.
556 * if the process is not blocked, or the kernel will ignore
557 * our trap style, we needn't panic but things will probably
558 * not go well for the process (restored context will be ignored)
559 * and the situation should be debugged.
561 if(!(p->p_rts_flags)) {
562 printf("WARNINIG: setting full context of runnable process\n");
563 print_proc(p);
564 util_stacktrace();
566 if(p->p_seg.p_kern_trap_style == KTS_NONE)
567 printf("WARNINIG: setting full context of out-of-kernel process\n");
568 p->p_seg.p_kern_trap_style = trap_style;
571 void restore_user_context(struct proc *p)
573 int trap_style = p->p_seg.p_kern_trap_style;
574 #if 0
575 #define TYPES 10
576 static int restores[TYPES], n = 0;
578 if(trap_style >= 0 && trap_style < TYPES)
579 restores[trap_style]++;
581 if(!(n++ % 500000)) {
582 int t;
583 for(t = 0; t < TYPES; t++)
584 if(restores[t])
585 printf("%d: %d ", t, restores[t]);
586 printf("\n");
588 #endif
590 p->p_seg.p_kern_trap_style = KTS_NONE;
592 if(trap_style == KTS_SYSENTER) {
593 restore_user_context_sysenter(p);
594 NOT_REACHABLE;
597 if(trap_style == KTS_SYSCALL) {
598 restore_user_context_syscall(p);
599 NOT_REACHABLE;
602 switch(trap_style) {
603 case KTS_NONE:
604 panic("no entry trap style known");
605 case KTS_INT_HARD:
606 case KTS_INT_UM:
607 case KTS_FULLCONTEXT:
608 case KTS_INT_ORIG:
609 restore_user_context_int(p);
610 NOT_REACHABLE;
611 default:
612 panic("unknown trap style recorded");
613 NOT_REACHABLE;
616 NOT_REACHABLE;
619 void fpu_sigcontext(struct proc *pr, struct sigframe_sigcontext *fr, struct sigcontext *sc)
621 int fp_error;
623 if (osfxsr_feature) {
624 fp_error = sc->sc_fpu_state.xfp_regs.fp_status &
625 ~sc->sc_fpu_state.xfp_regs.fp_control;
626 } else {
627 fp_error = sc->sc_fpu_state.fpu_regs.fp_status &
628 ~sc->sc_fpu_state.fpu_regs.fp_control;
631 if (fp_error & 0x001) { /* Invalid op */
633 * swd & 0x240 == 0x040: Stack Underflow
634 * swd & 0x240 == 0x240: Stack Overflow
635 * User must clear the SF bit (0x40) if set
637 fr->sf_code = FPE_FLTINV;
638 } else if (fp_error & 0x004) {
639 fr->sf_code = FPE_FLTDIV; /* Divide by Zero */
640 } else if (fp_error & 0x008) {
641 fr->sf_code = FPE_FLTOVF; /* Overflow */
642 } else if (fp_error & 0x012) {
643 fr->sf_code = FPE_FLTUND; /* Denormal, Underflow */
644 } else if (fp_error & 0x020) {
645 fr->sf_code = FPE_FLTRES; /* Precision */
646 } else {
647 fr->sf_code = 0; /* XXX - probably should be used for FPE_INTOVF or
648 * FPE_INTDIV */
652 reg_t arch_get_sp(struct proc *p) { return p->p_reg.sp; }
654 #if !CONFIG_OXPCIE
655 static void ser_init(void)
657 unsigned char lcr;
658 unsigned divisor;
660 /* keep BIOS settings if cttybaud is not set */
661 if (kinfo.serial_debug_baud <= 0) return;
663 /* set DLAB to make baud accessible */
664 lcr = LCR_8BIT | LCR_1STOP | LCR_NPAR;
665 outb(COM1_LCR, lcr | LCR_DLAB);
667 /* set baud rate */
668 divisor = UART_BASE_FREQ / kinfo.serial_debug_baud;
669 if (divisor < 1) divisor = 1;
670 if (divisor > 65535) divisor = 65535;
672 outb(COM1_DLL, divisor & 0xff);
673 outb(COM1_DLM, (divisor >> 8) & 0xff);
675 /* clear DLAB */
676 outb(COM1_LCR, lcr);
678 #endif