1 /* system dependent functions for use inside the whole kernel. */
6 #include <machine/cmos.h>
7 #include <machine/bios.h>
8 #include <machine/cpu.h>
9 #include <minix/portio.h>
10 #include <minix/cpufeature.h>
13 #include <machine/vm.h>
15 #include <minix/u64.h>
17 #include "archconst.h"
30 static int osfxsr_feature
; /* FXSAVE/FXRSTOR instructions support (SSEx) */
32 /* set MP and NE flags to handle FPU exceptions in native mode. */
33 #define CR0_MP_NE 0x0022
34 /* set CR4.OSFXSR[bit 9] if FXSR is supported. */
35 #define CR4_OSFXSR (1L<<9)
36 /* set OSXMMEXCPT[bit 10] if we provide #XM handler. */
37 #define CR4_OSXMMEXCPT (1L<<10)
41 static void ser_debug(int c
);
42 static void ser_dump_vfs(void);
45 static void ser_dump_proc_cpu(void);
48 static void ser_init(void);
53 unsigned short cw
, sw
;
59 if((sw
& 0xff) == 0 &&
60 (cw
& 0x103f) == 0x3f) {
61 /* We have some sort of FPU, but don't check exact model.
62 * Set CR0_NE and CR0_MP to handle fpu exceptions
64 write_cr0(read_cr0() | CR0_MP_NE
);
65 get_cpulocal_var(fpu_presence
) = 1;
66 if(_cpufeature(_CPUF_I386_FXSR
)) {
67 u32_t cr4
= read_cr4() | CR4_OSFXSR
; /* Enable FXSR. */
69 /* OSXMMEXCPT if supported
70 * FXSR feature can be available without SSE
72 if(_cpufeature(_CPUF_I386_SSE
))
73 cr4
|= CR4_OSXMMEXCPT
;
81 /* No FPU presents. */
82 get_cpulocal_var(fpu_presence
) = 0;
88 void save_local_fpu(struct proc
*pr
, int retain
)
90 char *state
= pr
->p_seg
.fpu_state
;
92 /* Save process FPU context. If the 'retain' flag is set, keep the FPU
93 * state as is. If the flag is not set, the state is undefined upon
94 * return, and the caller is responsible for reloading a proper state.
107 (void) frstor(state
);
111 void save_fpu(struct proc
*pr
)
114 if (cpuid
!= pr
->p_cpu
) {
117 /* remember if the process was already stopped */
118 stopped
= RTS_ISSET(pr
, RTS_PROC_STOP
);
120 /* stop the remote process and force its context to be saved */
121 smp_schedule_stop_proc_save_ctx(pr
);
124 * If the process wasn't stopped let the process run again. The
125 * process is kept block by the fact that the kernel cannot run
129 RTS_UNSET(pr
, RTS_PROC_STOP
);
135 if (get_cpulocal_var(fpu_owner
) == pr
) {
136 disable_fpu_exception();
137 save_local_fpu(pr
, TRUE
/*retain*/);
141 /* reserve a chunk of memory for fpu state; every one has to
142 * be FPUALIGN-aligned.
144 static char fpu_state
[NR_PROCS
][FPU_XFP_SIZE
] __aligned(FPUALIGN
);
146 void arch_proc_reset(struct proc
*pr
)
149 struct stackframe_s reg
;
151 assert(pr
->p_nr
< NR_PROCS
);
154 v
= fpu_state
[pr
->p_nr
];
155 /* verify alignment */
156 assert(!((vir_bytes
)v
% FPUALIGN
));
157 /* initialize state */
158 memset(v
, 0, FPU_XFP_SIZE
);
161 /* Clear process state. */
162 memset(®
, 0, sizeof(pr
->p_reg
));
163 if(iskerneln(pr
->p_nr
))
164 reg
.psw
= INIT_TASK_PSW
;
168 pr
->p_seg
.fpu_state
= v
;
170 /* Initialize the fundamentals that are (initially) the same for all
171 * processes - the segment selectors it gets to use.
173 pr
->p_reg
.cs
= USER_CS_SELECTOR
;
178 pr
->p_reg
.ds
= USER_DS_SELECTOR
;
180 /* set full context and make sure it gets restored */
181 arch_proc_setcontext(pr
, ®
, 0, KTS_FULLCONTEXT
);
184 void arch_set_secondary_ipc_return(struct proc
*p
, u32_t val
)
189 int restore_fpu(struct proc
*pr
)
192 char *state
= pr
->p_seg
.fpu_state
;
196 if(!proc_used_fpu(pr
)) {
198 pr
->p_misc_flags
|= MF_FPU_INITIALIZED
;
201 failed
= fxrstor(state
);
203 failed
= frstor(state
);
206 if (failed
) return EINVAL
;
212 void cpu_identify(void)
214 u32_t eax
, ebx
, ecx
, edx
;
215 unsigned cpu
= cpuid
;
218 _cpuid(&eax
, &ebx
, &ecx
, &edx
);
220 if (ebx
== INTEL_CPUID_GEN_EBX
&& ecx
== INTEL_CPUID_GEN_ECX
&&
221 edx
== INTEL_CPUID_GEN_EDX
) {
222 cpu_info
[cpu
].vendor
= CPU_VENDOR_INTEL
;
223 } else if (ebx
== AMD_CPUID_GEN_EBX
&& ecx
== AMD_CPUID_GEN_ECX
&&
224 edx
== AMD_CPUID_GEN_EDX
) {
225 cpu_info
[cpu
].vendor
= CPU_VENDOR_AMD
;
227 cpu_info
[cpu
].vendor
= CPU_VENDOR_UNKNOWN
;
233 _cpuid(&eax
, &ebx
, &ecx
, &edx
);
235 cpu_info
[cpu
].family
= (eax
>> 8) & 0xf;
236 if (cpu_info
[cpu
].family
== 0xf)
237 cpu_info
[cpu
].family
+= (eax
>> 20) & 0xff;
238 cpu_info
[cpu
].model
= (eax
>> 4) & 0xf;
239 if (cpu_info
[cpu
].model
== 0xf || cpu_info
[cpu
].model
== 0x6)
240 cpu_info
[cpu
].model
+= ((eax
>> 16) & 0xf) << 4 ;
241 cpu_info
[cpu
].stepping
= eax
& 0xf;
242 cpu_info
[cpu
].flags
[0] = ecx
;
243 cpu_info
[cpu
].flags
[1] = edx
;
248 k_stacks
= (void*) &k_stacks_start
;
249 assert(!((vir_bytes
) k_stacks
% K_STACK_SIZE
));
253 * use stack 0 and cpu id 0 on a single processor machine, SMP
254 * configuration does this in smp_init() for all cpus at once
256 tss_init(0, get_k_stack_top(0));
267 #if defined(USE_APIC) && !defined(CONFIG_SMP)
268 if (config_no_apic
) {
269 DEBUGBASIC(("APIC disabled, using legacy PIC\n"));
271 else if (!apic_single_cpu_init()) {
272 DEBUGBASIC(("APIC not present, using legacy PIC\n"));
276 /* Reserve some BIOS ranges */
277 cut_memmap(&kinfo
, BIOS_MEM_BEGIN
, BIOS_MEM_END
);
278 cut_memmap(&kinfo
, BASE_MEM_TOP
, UPPER_MEM_END
);
281 /*===========================================================================*
283 *===========================================================================*/
284 void do_ser_debug(void)
291 if((oxin
= oxpcie_in()) >= 0)
303 static void ser_dump_queue_cpu(unsigned cpu
)
306 struct proc
** rdy_head
;
308 rdy_head
= get_cpu_var(cpu
, run_q_head
);
310 for(q
= 0; q
< NR_SCHED_QUEUES
; q
++) {
314 for(p
= rdy_head
[q
]; p
; p
= p
->p_nextready
) {
315 printf("%s / %d ", p
->p_name
, p
->p_endpoint
);
322 static void ser_dump_queues(void)
327 printf("--- run queues ---\n");
328 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
329 printf("CPU %d :\n", cpu
);
330 ser_dump_queue_cpu(cpu
);
333 ser_dump_queue_cpu(0);
338 static void dump_bkl_usage(void)
342 printf("--- BKL usage ---\n");
343 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
344 printf("cpu %3d kernel ticks 0x%x%08x bkl ticks 0x%x%08x succ %d tries %d\n", cpu
,
345 ex64hi(kernel_ticks
[cpu
]),
346 ex64lo(kernel_ticks
[cpu
]),
347 ex64hi(bkl_ticks
[cpu
]),
348 ex64lo(bkl_ticks
[cpu
]),
349 bkl_succ
[cpu
], bkl_tries
[cpu
]);
353 static void reset_bkl_usage(void)
355 memset(kernel_ticks
, 0, sizeof(kernel_ticks
));
356 memset(bkl_ticks
, 0, sizeof(bkl_ticks
));
357 memset(bkl_tries
, 0, sizeof(bkl_tries
));
358 memset(bkl_succ
, 0, sizeof(bkl_succ
));
362 static void ser_debug(const int c
)
364 serial_debug_active
= 1;
394 #define TOGGLECASE(ch, flag) \
396 if(verboseflags & flag) { \
397 verboseflags &= ~flag; \
398 printf("%s disabled\n", #flag); \
400 verboseflags |= flag; \
401 printf("%s enabled\n", #flag); \
405 TOGGLECASE('8', VF_SCHEDULING
)
406 TOGGLECASE('9', VF_PICKPROC
)
410 dump_apic_irq_state();
414 serial_debug_active
= 0;
419 static void ser_dump_vfs(void)
421 /* Notify VFS it has to generate stack traces. Kernel can't do that as
422 * it's not aware of user space threads.
424 mini_notify(proc_addr(KERNEL
), VFS_PROC_NR
);
428 static void ser_dump_proc_cpu(void)
433 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
434 printf("CPU %d processes : \n", cpu
);
435 for (pp
= BEG_USER_ADDR
; pp
< END_PROC_ADDR
; pp
++) {
436 if (isemptyp(pp
) || pp
->p_cpu
!= cpu
)
444 #endif /* DEBUG_SERIAL */
448 int arch_init_profile_clock(const u32_t freq
)
451 /* Set CMOS timer frequency. */
452 outb(RTC_INDEX
, RTC_REG_A
);
453 outb(RTC_IO
, RTC_A_DV_OK
| freq
);
454 /* Enable CMOS timer interrupts. */
455 outb(RTC_INDEX
, RTC_REG_B
);
457 outb(RTC_INDEX
, RTC_REG_B
);
458 outb(RTC_IO
, r
| RTC_B_PIE
);
459 /* Mandatory read of CMOS register to enable timer interrupts. */
460 outb(RTC_INDEX
, RTC_REG_C
);
463 return CMOS_CLOCK_IRQ
;
466 void arch_stop_profile_clock(void)
469 /* Disable CMOS timer interrupts. */
470 outb(RTC_INDEX
, RTC_REG_B
);
472 outb(RTC_INDEX
, RTC_REG_B
);
473 outb(RTC_IO
, r
& ~RTC_B_PIE
);
476 void arch_ack_profile_clock(void)
478 /* Mandatory read of CMOS register to re-enable timer interrupts. */
479 outb(RTC_INDEX
, RTC_REG_C
);
485 void arch_do_syscall(struct proc
*proc
)
487 /* do_ipc assumes that it's running because of the current process */
488 assert(proc
== get_cpulocal_var(proc_ptr
));
489 /* Make the system call, for real this time. */
490 assert(proc
->p_misc_flags
& MF_SC_DEFER
);
492 do_ipc(proc
->p_defer
.r1
, proc
->p_defer
.r2
, proc
->p_defer
.r3
);
495 struct proc
* arch_finish_switch_to_user(void)
501 stk
= (char *)tss
[cpuid
].sp0
;
503 stk
= (char *)tss
[0].sp0
;
505 /* set pointer to the process to run on the stack */
506 p
= get_cpulocal_var(proc_ptr
);
507 *((reg_t
*)stk
) = (reg_t
) p
;
509 /* make sure IF is on in FLAGS so that interrupts won't be disabled
510 * once p's context is restored.
512 p
->p_reg
.psw
|= IF_MASK
;
514 /* Set TRACEBIT state properly. */
515 if(p
->p_misc_flags
& MF_STEP
)
516 p
->p_reg
.psw
|= TRACEBIT
;
518 p
->p_reg
.psw
&= ~TRACEBIT
;
523 void arch_proc_setcontext(struct proc
*p
, struct stackframe_s
*state
,
524 int isuser
, int trap_style
)
527 /* Restore user bits of psw from sc, maintain system bits
530 state
->psw
= (state
->psw
& X86_FLAGS_USER
) |
531 (p
->p_reg
.psw
& ~X86_FLAGS_USER
);
534 /* someone wants to totally re-initialize process state */
535 assert(sizeof(p
->p_reg
) == sizeof(*state
));
536 if(state
!= &p
->p_reg
) {
537 memcpy(&p
->p_reg
, state
, sizeof(*state
));
540 /* further code is instructed to not touch the context
543 p
->p_misc_flags
|= MF_CONTEXT_SET
;
545 /* on x86 this requires returning using iret (KTS_INT)
546 * so that the full context is restored instead of relying on
547 * the userspace doing it (as it would do on SYSEXIT).
548 * as ESP and EIP are also reset, userspace won't try to
549 * restore bogus context after returning.
551 * if the process is not blocked, or the kernel will ignore
552 * our trap style, we needn't panic but things will probably
553 * not go well for the process (restored context will be ignored)
554 * and the situation should be debugged.
556 if(!(p
->p_rts_flags
)) {
557 printf("WARNINIG: setting full context of runnable process\n");
561 if(p
->p_seg
.p_kern_trap_style
== KTS_NONE
)
562 printf("WARNINIG: setting full context of out-of-kernel process\n");
563 p
->p_seg
.p_kern_trap_style
= trap_style
;
566 void restore_user_context(struct proc
*p
)
568 int trap_style
= p
->p_seg
.p_kern_trap_style
;
571 static int restores
[TYPES
], n
= 0;
573 if(trap_style
>= 0 && trap_style
< TYPES
)
574 restores
[trap_style
]++;
576 if(!(n
++ % 500000)) {
578 for(t
= 0; t
< TYPES
; t
++)
580 printf("%d: %d ", t
, restores
[t
]);
585 p
->p_seg
.p_kern_trap_style
= KTS_NONE
;
587 if(trap_style
== KTS_SYSENTER
) {
588 restore_user_context_sysenter(p
);
592 if(trap_style
== KTS_SYSCALL
) {
593 restore_user_context_syscall(p
);
599 panic("no entry trap style known");
602 case KTS_FULLCONTEXT
:
604 restore_user_context_int(p
);
607 panic("unknown trap style recorded");
614 void fpu_sigcontext(struct proc
*pr
, struct sigframe_sigcontext
*fr
, struct sigcontext
*sc
)
618 if (osfxsr_feature
) {
619 fp_error
= sc
->sc_fpu_state
.xfp_regs
.fp_status
&
620 ~sc
->sc_fpu_state
.xfp_regs
.fp_control
;
622 fp_error
= sc
->sc_fpu_state
.fpu_regs
.fp_status
&
623 ~sc
->sc_fpu_state
.fpu_regs
.fp_control
;
626 if (fp_error
& 0x001) { /* Invalid op */
628 * swd & 0x240 == 0x040: Stack Underflow
629 * swd & 0x240 == 0x240: Stack Overflow
630 * User must clear the SF bit (0x40) if set
632 fr
->sf_code
= FPE_FLTINV
;
633 } else if (fp_error
& 0x004) {
634 fr
->sf_code
= FPE_FLTDIV
; /* Divide by Zero */
635 } else if (fp_error
& 0x008) {
636 fr
->sf_code
= FPE_FLTOVF
; /* Overflow */
637 } else if (fp_error
& 0x012) {
638 fr
->sf_code
= FPE_FLTUND
; /* Denormal, Underflow */
639 } else if (fp_error
& 0x020) {
640 fr
->sf_code
= FPE_FLTRES
; /* Precision */
642 fr
->sf_code
= 0; /* XXX - probably should be used for FPE_INTOVF or
647 reg_t
arch_get_sp(struct proc
*p
) { return p
->p_reg
.sp
; }
650 static void ser_init(void)
655 /* keep BIOS settings if cttybaud is not set */
656 if (kinfo
.serial_debug_baud
<= 0) return;
658 /* set DLAB to make baud accessible */
659 lcr
= LCR_8BIT
| LCR_1STOP
| LCR_NPAR
;
660 outb(COM1_LCR
, lcr
| LCR_DLAB
);
663 divisor
= UART_BASE_FREQ
/ kinfo
.serial_debug_baud
;
664 if (divisor
< 1) divisor
= 1;
665 if (divisor
> 65535) divisor
= 65535;
667 outb(COM1_DLL
, divisor
& 0xff);
668 outb(COM1_DLM
, (divisor
>> 8) & 0xff);