coverity appeasement
[minix.git] / kernel / arch / i386 / arch_system.c
blob2296bf835993a018d526c31e11fe7adf39f8c369
1 /* system dependent functions for use inside the whole kernel. */
3 #include "kernel/kernel.h"
5 #include <unistd.h>
6 #include <ctype.h>
7 #include <string.h>
8 #include <machine/cmos.h>
9 #include <machine/bios.h>
10 #include <minix/portio.h>
11 #include <minix/cpufeature.h>
12 #include <assert.h>
13 #include <signal.h>
14 #include <machine/vm.h>
16 #include <minix/u64.h>
18 #include "archconst.h"
19 #include "arch_proto.h"
20 #include "serial.h"
21 #include "oxpcie.h"
22 #include "direct_utils.h"
23 #include <machine/multiboot.h>
25 #include "glo.h"
27 #ifdef USE_APIC
28 #include "apic.h"
29 #endif
31 #ifdef USE_ACPI
32 #include "acpi.h"
33 #endif
35 static int osfxsr_feature; /* FXSAVE/FXRSTOR instructions support (SSEx) */
37 /* set MP and NE flags to handle FPU exceptions in native mode. */
38 #define CR0_MP_NE 0x0022
39 /* set CR4.OSFXSR[bit 9] if FXSR is supported. */
40 #define CR4_OSFXSR (1L<<9)
41 /* set OSXMMEXCPT[bit 10] if we provide #XM handler. */
42 #define CR4_OSXMMEXCPT (1L<<10)
44 void * k_stacks;
46 static void ser_debug(int c);
47 #ifdef CONFIG_SMP
48 static void ser_dump_proc_cpu(void);
49 #endif
50 #if !CONFIG_OXPCIE
51 static void ser_init(void);
52 #endif
54 void fpu_init(void)
56 unsigned short cw, sw;
58 fninit();
59 sw = fnstsw();
60 fnstcw(&cw);
62 if((sw & 0xff) == 0 &&
63 (cw & 0x103f) == 0x3f) {
64 /* We have some sort of FPU, but don't check exact model.
65 * Set CR0_NE and CR0_MP to handle fpu exceptions
66 * in native mode. */
67 write_cr0(read_cr0() | CR0_MP_NE);
68 get_cpulocal_var(fpu_presence) = 1;
69 if(_cpufeature(_CPUF_I386_FXSR)) {
70 u32_t cr4 = read_cr4() | CR4_OSFXSR; /* Enable FXSR. */
72 /* OSXMMEXCPT if supported
73 * FXSR feature can be available without SSE
75 if(_cpufeature(_CPUF_I386_SSE))
76 cr4 |= CR4_OSXMMEXCPT;
78 write_cr4(cr4);
79 osfxsr_feature = 1;
80 } else {
81 osfxsr_feature = 0;
83 } else {
84 /* No FPU presents. */
85 get_cpulocal_var(fpu_presence) = 0;
86 osfxsr_feature = 0;
87 return;
91 void save_local_fpu(struct proc *pr, int retain)
93 char *state = pr->p_seg.fpu_state;
95 /* Save process FPU context. If the 'retain' flag is set, keep the FPU
96 * state as is. If the flag is not set, the state is undefined upon
97 * return, and the caller is responsible for reloading a proper state.
100 if(!is_fpu())
101 return;
103 assert(state);
105 if(osfxsr_feature) {
106 fxsave(state);
107 } else {
108 fnsave(state);
109 if (retain)
110 (void) frstor(state);
114 void save_fpu(struct proc *pr)
116 #ifdef CONFIG_SMP
117 if (cpuid != pr->p_cpu) {
118 int stopped;
120 /* remember if the process was already stopped */
121 stopped = RTS_ISSET(pr, RTS_PROC_STOP);
123 /* stop the remote process and force its context to be saved */
124 smp_schedule_stop_proc_save_ctx(pr);
127 * If the process wasn't stopped let the process run again. The
128 * process is kept block by the fact that the kernel cannot run
129 * on its cpu
131 if (!stopped)
132 RTS_UNSET(pr, RTS_PROC_STOP);
134 return;
136 #endif
138 if (get_cpulocal_var(fpu_owner) == pr) {
139 disable_fpu_exception();
140 save_local_fpu(pr, TRUE /*retain*/);
144 /* reserve a chunk of memory for fpu state; every one has to
145 * be FPUALIGN-aligned.
147 static char fpu_state[NR_PROCS][FPU_XFP_SIZE] __aligned(FPUALIGN);
149 void arch_proc_reset(struct proc *pr)
151 char *v = NULL;
153 assert(pr->p_nr < NR_PROCS);
155 if(pr->p_nr >= 0) {
156 v = fpu_state[pr->p_nr];
157 /* verify alignment */
158 assert(!((vir_bytes)v % FPUALIGN));
159 /* initialize state */
160 memset(v, 0, FPU_XFP_SIZE);
163 /* Clear process state. */
164 memset(&pr->p_reg, 0, sizeof(pr->p_reg));
165 if(iskerneln(pr->p_nr))
166 pr->p_reg.psw = INIT_TASK_PSW;
167 else
168 pr->p_reg.psw = INIT_PSW;
170 pr->p_seg.fpu_state = v;
172 /* Initialize the fundamentals that are (initially) the same for all
173 * processes - the segment selectors it gets to use.
175 pr->p_reg.cs = USER_CS_SELECTOR;
176 pr->p_reg.gs =
177 pr->p_reg.fs =
178 pr->p_reg.ss =
179 pr->p_reg.es =
180 pr->p_reg.ds = USER_DS_SELECTOR;
183 void arch_set_secondary_ipc_return(struct proc *p, u32_t val)
185 p->p_reg.bx = val;
188 int restore_fpu(struct proc *pr)
190 int failed;
191 char *state = pr->p_seg.fpu_state;
193 assert(state);
195 if(!proc_used_fpu(pr)) {
196 fninit();
197 pr->p_misc_flags |= MF_FPU_INITIALIZED;
198 } else {
199 if(osfxsr_feature) {
200 failed = fxrstor(state);
201 } else {
202 failed = frstor(state);
205 if (failed) return EINVAL;
208 return OK;
211 void cpu_identify(void)
213 u32_t eax, ebx, ecx, edx;
214 unsigned cpu = cpuid;
216 eax = 0;
217 _cpuid(&eax, &ebx, &ecx, &edx);
219 if (ebx == INTEL_CPUID_GEN_EBX && ecx == INTEL_CPUID_GEN_ECX &&
220 edx == INTEL_CPUID_GEN_EDX) {
221 cpu_info[cpu].vendor = CPU_VENDOR_INTEL;
222 } else if (ebx == AMD_CPUID_GEN_EBX && ecx == AMD_CPUID_GEN_ECX &&
223 edx == AMD_CPUID_GEN_EDX) {
224 cpu_info[cpu].vendor = CPU_VENDOR_AMD;
225 } else
226 cpu_info[cpu].vendor = CPU_VENDOR_UNKNOWN;
228 if (eax == 0)
229 return;
231 eax = 1;
232 _cpuid(&eax, &ebx, &ecx, &edx);
234 cpu_info[cpu].family = (eax >> 8) & 0xf;
235 if (cpu_info[cpu].family == 0xf)
236 cpu_info[cpu].family += (eax >> 20) & 0xff;
237 cpu_info[cpu].model = (eax >> 4) & 0xf;
238 if (cpu_info[cpu].model == 0xf || cpu_info[cpu].model == 0x6)
239 cpu_info[cpu].model += ((eax >> 16) & 0xf) << 4 ;
240 cpu_info[cpu].stepping = eax & 0xf;
241 cpu_info[cpu].flags[0] = ecx;
242 cpu_info[cpu].flags[1] = edx;
245 void arch_init(void)
247 k_stacks = (void*) &k_stacks_start;
248 assert(!((vir_bytes) k_stacks % K_STACK_SIZE));
250 #ifndef CONFIG_SMP
252 * use stack 0 and cpu id 0 on a single processor machine, SMP
253 * configuration does this in smp_init() for all cpus at once
255 tss_init(0, get_k_stack_top(0));
256 #endif
258 #if !CONFIG_OXPCIE
259 ser_init();
260 #endif
262 #ifdef USE_ACPI
263 acpi_init();
264 #endif
266 #if defined(USE_APIC) && !defined(CONFIG_SMP)
267 if (config_no_apic) {
268 BOOT_VERBOSE(printf("APIC disabled, using legacy PIC\n"));
270 else if (!apic_single_cpu_init()) {
271 BOOT_VERBOSE(printf("APIC not present, using legacy PIC\n"));
273 #endif
275 /* Reserve some BIOS ranges */
276 cut_memmap(&kinfo, BIOS_MEM_BEGIN, BIOS_MEM_END);
277 cut_memmap(&kinfo, BASE_MEM_TOP, UPPER_MEM_END);
280 /*===========================================================================*
281 * do_ser_debug *
282 *===========================================================================*/
283 void do_ser_debug()
285 u8_t c, lsr;
287 #if CONFIG_OXPCIE
289 int oxin;
290 if((oxin = oxpcie_in()) >= 0)
291 ser_debug(oxin);
293 #endif
295 lsr= inb(COM1_LSR);
296 if (!(lsr & LSR_DR))
297 return;
298 c = inb(COM1_RBR);
299 ser_debug(c);
302 static void ser_dump_queue_cpu(unsigned cpu)
304 int q;
305 struct proc ** rdy_head;
307 rdy_head = get_cpu_var(cpu, run_q_head);
309 for(q = 0; q < NR_SCHED_QUEUES; q++) {
310 struct proc *p;
311 if(rdy_head[q]) {
312 printf("%2d: ", q);
313 for(p = rdy_head[q]; p; p = p->p_nextready) {
314 printf("%s / %d ", p->p_name, p->p_endpoint);
316 printf("\n");
321 static void ser_dump_queues(void)
323 #ifdef CONFIG_SMP
324 unsigned cpu;
326 printf("--- run queues ---\n");
327 for (cpu = 0; cpu < ncpus; cpu++) {
328 printf("CPU %d :\n", cpu);
329 ser_dump_queue_cpu(cpu);
331 #else
332 ser_dump_queue_cpu(0);
333 #endif
336 #ifdef CONFIG_SMP
337 static void dump_bkl_usage(void)
339 unsigned cpu;
341 printf("--- BKL usage ---\n");
342 for (cpu = 0; cpu < ncpus; cpu++) {
343 printf("cpu %3d kernel ticks 0x%x%08x bkl ticks 0x%x%08x succ %d tries %d\n", cpu,
344 ex64hi(kernel_ticks[cpu]),
345 ex64lo(kernel_ticks[cpu]),
346 ex64hi(bkl_ticks[cpu]),
347 ex64lo(bkl_ticks[cpu]),
348 bkl_succ[cpu], bkl_tries[cpu]);
352 static void reset_bkl_usage(void)
354 memset(kernel_ticks, 0, sizeof(kernel_ticks));
355 memset(bkl_ticks, 0, sizeof(bkl_ticks));
356 memset(bkl_tries, 0, sizeof(bkl_tries));
357 memset(bkl_succ, 0, sizeof(bkl_succ));
359 #endif
361 static void ser_debug(const int c)
363 serial_debug_active = 1;
365 switch(c)
367 case 'Q':
368 minix_shutdown(NULL);
369 NOT_REACHABLE;
370 #ifdef CONFIG_SMP
371 case 'B':
372 dump_bkl_usage();
373 break;
374 case 'b':
375 reset_bkl_usage();
376 break;
377 #endif
378 case '1':
379 ser_dump_proc();
380 break;
381 case '2':
382 ser_dump_queues();
383 break;
384 #ifdef CONFIG_SMP
385 case '4':
386 ser_dump_proc_cpu();
387 break;
388 #endif
389 #if DEBUG_TRACE
390 #define TOGGLECASE(ch, flag) \
391 case ch: { \
392 if(verboseflags & flag) { \
393 verboseflags &= ~flag; \
394 printf("%s disabled\n", #flag); \
395 } else { \
396 verboseflags |= flag; \
397 printf("%s enabled\n", #flag); \
399 break; \
401 TOGGLECASE('8', VF_SCHEDULING)
402 TOGGLECASE('9', VF_PICKPROC)
403 #endif
404 #ifdef USE_APIC
405 case 'I':
406 dump_apic_irq_state();
407 break;
408 #endif
410 serial_debug_active = 0;
413 #if DEBUG_SERIAL
414 void ser_dump_proc()
416 struct proc *pp;
418 for (pp= BEG_PROC_ADDR; pp < END_PROC_ADDR; pp++)
420 if (isemptyp(pp))
421 continue;
422 print_proc_recursive(pp);
426 #ifdef CONFIG_SMP
427 static void ser_dump_proc_cpu(void)
429 struct proc *pp;
430 unsigned cpu;
432 for (cpu = 0; cpu < ncpus; cpu++) {
433 printf("CPU %d processes : \n", cpu);
434 for (pp= BEG_USER_ADDR; pp < END_PROC_ADDR; pp++) {
435 if (isemptyp(pp) || pp->p_cpu != cpu)
436 continue;
437 print_proc(pp);
441 #endif
443 #endif /* DEBUG_SERIAL */
445 #if SPROFILE
447 int arch_init_profile_clock(const u32_t freq)
449 int r;
450 /* Set CMOS timer frequency. */
451 outb(RTC_INDEX, RTC_REG_A);
452 outb(RTC_IO, RTC_A_DV_OK | freq);
453 /* Enable CMOS timer interrupts. */
454 outb(RTC_INDEX, RTC_REG_B);
455 r = inb(RTC_IO);
456 outb(RTC_INDEX, RTC_REG_B);
457 outb(RTC_IO, r | RTC_B_PIE);
458 /* Mandatory read of CMOS register to enable timer interrupts. */
459 outb(RTC_INDEX, RTC_REG_C);
460 inb(RTC_IO);
462 return CMOS_CLOCK_IRQ;
465 void arch_stop_profile_clock(void)
467 int r;
468 /* Disable CMOS timer interrupts. */
469 outb(RTC_INDEX, RTC_REG_B);
470 r = inb(RTC_IO);
471 outb(RTC_INDEX, RTC_REG_B);
472 outb(RTC_IO, r & ~RTC_B_PIE);
475 void arch_ack_profile_clock(void)
477 /* Mandatory read of CMOS register to re-enable timer interrupts. */
478 outb(RTC_INDEX, RTC_REG_C);
479 inb(RTC_IO);
482 #endif
484 void arch_do_syscall(struct proc *proc)
486 /* do_ipc assumes that it's running because of the current process */
487 assert(proc == get_cpulocal_var(proc_ptr));
488 /* Make the system call, for real this time. */
489 proc->p_reg.retreg =
490 do_ipc(proc->p_reg.cx, proc->p_reg.retreg, proc->p_reg.bx);
493 struct proc * arch_finish_switch_to_user(void)
495 char * stk;
496 struct proc * p;
498 #ifdef CONFIG_SMP
499 stk = (char *)tss[cpuid].sp0;
500 #else
501 stk = (char *)tss[0].sp0;
502 #endif
503 /* set pointer to the process to run on the stack */
504 p = get_cpulocal_var(proc_ptr);
505 *((reg_t *)stk) = (reg_t) p;
507 /* make sure IF is on in FLAGS so that interrupts won't be disabled
508 * once p's context is restored. this should not be possible.
510 assert(p->p_reg.psw & (1L << 9));
512 return p;
515 void fpu_sigcontext(struct proc *pr, struct sigframe *fr, struct sigcontext *sc)
517 int fp_error;
519 if (osfxsr_feature) {
520 fp_error = sc->sc_fpu_state.xfp_regs.fp_status &
521 ~sc->sc_fpu_state.xfp_regs.fp_control;
522 } else {
523 fp_error = sc->sc_fpu_state.fpu_regs.fp_status &
524 ~sc->sc_fpu_state.fpu_regs.fp_control;
527 if (fp_error & 0x001) { /* Invalid op */
529 * swd & 0x240 == 0x040: Stack Underflow
530 * swd & 0x240 == 0x240: Stack Overflow
531 * User must clear the SF bit (0x40) if set
533 fr->sf_code = FPE_FLTINV;
534 } else if (fp_error & 0x004) {
535 fr->sf_code = FPE_FLTDIV; /* Divide by Zero */
536 } else if (fp_error & 0x008) {
537 fr->sf_code = FPE_FLTOVF; /* Overflow */
538 } else if (fp_error & 0x012) {
539 fr->sf_code = FPE_FLTUND; /* Denormal, Underflow */
540 } else if (fp_error & 0x020) {
541 fr->sf_code = FPE_FLTRES; /* Precision */
542 } else {
543 fr->sf_code = 0; /* XXX - probably should be used for FPE_INTOVF or
544 * FPE_INTDIV */
548 reg_t arch_get_sp(struct proc *p) { return p->p_reg.sp; }
550 #if !CONFIG_OXPCIE
551 static void ser_init(void)
553 unsigned char lcr;
554 unsigned divisor;
556 /* keep BIOS settings if cttybaud is not set */
557 if (kinfo.serial_debug_baud <= 0) return;
559 /* set DLAB to make baud accessible */
560 lcr = LCR_8BIT | LCR_1STOP | LCR_NPAR;
561 outb(COM1_LCR, lcr | LCR_DLAB);
563 /* set baud rate */
564 divisor = UART_BASE_FREQ / kinfo.serial_debug_baud;
565 if (divisor < 1) divisor = 1;
566 if (divisor > 65535) divisor = 65535;
568 outb(COM1_DLL, divisor & 0xff);
569 outb(COM1_DLM, (divisor >> 8) & 0xff);
571 /* clear DLAB */
572 outb(COM1_LCR, lcr);
574 #endif