1 /* system dependent functions for use inside the whole kernel. */
3 #include "kernel/kernel.h"
8 #include <machine/cmos.h>
9 #include <machine/bios.h>
10 #include <machine/cpu.h>
11 #include <minix/portio.h>
12 #include <minix/cpufeature.h>
15 #include <machine/vm.h>
17 #include <minix/u64.h>
19 #include "archconst.h"
20 #include "arch_proto.h"
23 #include <machine/multiboot.h>
35 static int osfxsr_feature
; /* FXSAVE/FXRSTOR instructions support (SSEx) */
37 /* set MP and NE flags to handle FPU exceptions in native mode. */
38 #define CR0_MP_NE 0x0022
39 /* set CR4.OSFXSR[bit 9] if FXSR is supported. */
40 #define CR4_OSFXSR (1L<<9)
41 /* set OSXMMEXCPT[bit 10] if we provide #XM handler. */
42 #define CR4_OSXMMEXCPT (1L<<10)
46 static void ser_debug(int c
);
47 static void ser_dump_vfs(void);
50 static void ser_dump_proc_cpu(void);
53 static void ser_init(void);
58 unsigned short cw
, sw
;
64 if((sw
& 0xff) == 0 &&
65 (cw
& 0x103f) == 0x3f) {
66 /* We have some sort of FPU, but don't check exact model.
67 * Set CR0_NE and CR0_MP to handle fpu exceptions
69 write_cr0(read_cr0() | CR0_MP_NE
);
70 get_cpulocal_var(fpu_presence
) = 1;
71 if(_cpufeature(_CPUF_I386_FXSR
)) {
72 u32_t cr4
= read_cr4() | CR4_OSFXSR
; /* Enable FXSR. */
74 /* OSXMMEXCPT if supported
75 * FXSR feature can be available without SSE
77 if(_cpufeature(_CPUF_I386_SSE
))
78 cr4
|= CR4_OSXMMEXCPT
;
86 /* No FPU presents. */
87 get_cpulocal_var(fpu_presence
) = 0;
93 void save_local_fpu(struct proc
*pr
, int retain
)
95 char *state
= pr
->p_seg
.fpu_state
;
97 /* Save process FPU context. If the 'retain' flag is set, keep the FPU
98 * state as is. If the flag is not set, the state is undefined upon
99 * return, and the caller is responsible for reloading a proper state.
112 (void) frstor(state
);
116 void save_fpu(struct proc
*pr
)
119 if (cpuid
!= pr
->p_cpu
) {
122 /* remember if the process was already stopped */
123 stopped
= RTS_ISSET(pr
, RTS_PROC_STOP
);
125 /* stop the remote process and force its context to be saved */
126 smp_schedule_stop_proc_save_ctx(pr
);
129 * If the process wasn't stopped let the process run again. The
130 * process is kept block by the fact that the kernel cannot run
134 RTS_UNSET(pr
, RTS_PROC_STOP
);
140 if (get_cpulocal_var(fpu_owner
) == pr
) {
141 disable_fpu_exception();
142 save_local_fpu(pr
, TRUE
/*retain*/);
146 /* reserve a chunk of memory for fpu state; every one has to
147 * be FPUALIGN-aligned.
149 static char fpu_state
[NR_PROCS
][FPU_XFP_SIZE
] __aligned(FPUALIGN
);
151 void arch_proc_reset(struct proc
*pr
)
154 struct stackframe_s reg
;
156 assert(pr
->p_nr
< NR_PROCS
);
159 v
= fpu_state
[pr
->p_nr
];
160 /* verify alignment */
161 assert(!((vir_bytes
)v
% FPUALIGN
));
162 /* initialize state */
163 memset(v
, 0, FPU_XFP_SIZE
);
166 /* Clear process state. */
167 memset(®
, 0, sizeof(pr
->p_reg
));
168 if(iskerneln(pr
->p_nr
))
169 reg
.psw
= INIT_TASK_PSW
;
173 pr
->p_seg
.fpu_state
= v
;
175 /* Initialize the fundamentals that are (initially) the same for all
176 * processes - the segment selectors it gets to use.
178 pr
->p_reg
.cs
= USER_CS_SELECTOR
;
183 pr
->p_reg
.ds
= USER_DS_SELECTOR
;
185 /* set full context and make sure it gets restored */
186 arch_proc_setcontext(pr
, ®
, 0, KTS_FULLCONTEXT
);
189 void arch_set_secondary_ipc_return(struct proc
*p
, u32_t val
)
194 int restore_fpu(struct proc
*pr
)
197 char *state
= pr
->p_seg
.fpu_state
;
201 if(!proc_used_fpu(pr
)) {
203 pr
->p_misc_flags
|= MF_FPU_INITIALIZED
;
206 failed
= fxrstor(state
);
208 failed
= frstor(state
);
211 if (failed
) return EINVAL
;
217 void cpu_identify(void)
219 u32_t eax
, ebx
, ecx
, edx
;
220 unsigned cpu
= cpuid
;
223 _cpuid(&eax
, &ebx
, &ecx
, &edx
);
225 if (ebx
== INTEL_CPUID_GEN_EBX
&& ecx
== INTEL_CPUID_GEN_ECX
&&
226 edx
== INTEL_CPUID_GEN_EDX
) {
227 cpu_info
[cpu
].vendor
= CPU_VENDOR_INTEL
;
228 } else if (ebx
== AMD_CPUID_GEN_EBX
&& ecx
== AMD_CPUID_GEN_ECX
&&
229 edx
== AMD_CPUID_GEN_EDX
) {
230 cpu_info
[cpu
].vendor
= CPU_VENDOR_AMD
;
232 cpu_info
[cpu
].vendor
= CPU_VENDOR_UNKNOWN
;
238 _cpuid(&eax
, &ebx
, &ecx
, &edx
);
240 cpu_info
[cpu
].family
= (eax
>> 8) & 0xf;
241 if (cpu_info
[cpu
].family
== 0xf)
242 cpu_info
[cpu
].family
+= (eax
>> 20) & 0xff;
243 cpu_info
[cpu
].model
= (eax
>> 4) & 0xf;
244 if (cpu_info
[cpu
].model
== 0xf || cpu_info
[cpu
].model
== 0x6)
245 cpu_info
[cpu
].model
+= ((eax
>> 16) & 0xf) << 4 ;
246 cpu_info
[cpu
].stepping
= eax
& 0xf;
247 cpu_info
[cpu
].flags
[0] = ecx
;
248 cpu_info
[cpu
].flags
[1] = edx
;
253 k_stacks
= (void*) &k_stacks_start
;
254 assert(!((vir_bytes
) k_stacks
% K_STACK_SIZE
));
258 * use stack 0 and cpu id 0 on a single processor machine, SMP
259 * configuration does this in smp_init() for all cpus at once
261 tss_init(0, get_k_stack_top(0));
272 #if defined(USE_APIC) && !defined(CONFIG_SMP)
273 if (config_no_apic
) {
274 BOOT_VERBOSE(printf("APIC disabled, using legacy PIC\n"));
276 else if (!apic_single_cpu_init()) {
277 BOOT_VERBOSE(printf("APIC not present, using legacy PIC\n"));
281 /* Reserve some BIOS ranges */
282 cut_memmap(&kinfo
, BIOS_MEM_BEGIN
, BIOS_MEM_END
);
283 cut_memmap(&kinfo
, BASE_MEM_TOP
, UPPER_MEM_END
);
286 /*===========================================================================*
288 *===========================================================================*/
296 if((oxin
= oxpcie_in()) >= 0)
308 static void ser_dump_queue_cpu(unsigned cpu
)
311 struct proc
** rdy_head
;
313 rdy_head
= get_cpu_var(cpu
, run_q_head
);
315 for(q
= 0; q
< NR_SCHED_QUEUES
; q
++) {
319 for(p
= rdy_head
[q
]; p
; p
= p
->p_nextready
) {
320 printf("%s / %d ", p
->p_name
, p
->p_endpoint
);
327 static void ser_dump_queues(void)
332 printf("--- run queues ---\n");
333 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
334 printf("CPU %d :\n", cpu
);
335 ser_dump_queue_cpu(cpu
);
338 ser_dump_queue_cpu(0);
343 static void dump_bkl_usage(void)
347 printf("--- BKL usage ---\n");
348 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
349 printf("cpu %3d kernel ticks 0x%x%08x bkl ticks 0x%x%08x succ %d tries %d\n", cpu
,
350 ex64hi(kernel_ticks
[cpu
]),
351 ex64lo(kernel_ticks
[cpu
]),
352 ex64hi(bkl_ticks
[cpu
]),
353 ex64lo(bkl_ticks
[cpu
]),
354 bkl_succ
[cpu
], bkl_tries
[cpu
]);
358 static void reset_bkl_usage(void)
360 memset(kernel_ticks
, 0, sizeof(kernel_ticks
));
361 memset(bkl_ticks
, 0, sizeof(bkl_ticks
));
362 memset(bkl_tries
, 0, sizeof(bkl_tries
));
363 memset(bkl_succ
, 0, sizeof(bkl_succ
));
367 static void ser_debug(const int c
)
369 serial_debug_active
= 1;
374 minix_shutdown(NULL
);
399 #define TOGGLECASE(ch, flag) \
401 if(verboseflags & flag) { \
402 verboseflags &= ~flag; \
403 printf("%s disabled\n", #flag); \
405 verboseflags |= flag; \
406 printf("%s enabled\n", #flag); \
410 TOGGLECASE('8', VF_SCHEDULING
)
411 TOGGLECASE('9', VF_PICKPROC
)
415 dump_apic_irq_state();
419 serial_debug_active
= 0;
424 static void ser_dump_vfs()
426 /* Notify VFS it has to generate stack traces. Kernel can't do that as
427 * it's not aware of user space threads.
429 mini_notify(proc_addr(KERNEL
), VFS_PROC_NR
);
433 static void ser_dump_proc_cpu(void)
438 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
439 printf("CPU %d processes : \n", cpu
);
440 for (pp
= BEG_USER_ADDR
; pp
< END_PROC_ADDR
; pp
++) {
441 if (isemptyp(pp
) || pp
->p_cpu
!= cpu
)
449 #endif /* DEBUG_SERIAL */
453 int arch_init_profile_clock(const u32_t freq
)
456 /* Set CMOS timer frequency. */
457 outb(RTC_INDEX
, RTC_REG_A
);
458 outb(RTC_IO
, RTC_A_DV_OK
| freq
);
459 /* Enable CMOS timer interrupts. */
460 outb(RTC_INDEX
, RTC_REG_B
);
462 outb(RTC_INDEX
, RTC_REG_B
);
463 outb(RTC_IO
, r
| RTC_B_PIE
);
464 /* Mandatory read of CMOS register to enable timer interrupts. */
465 outb(RTC_INDEX
, RTC_REG_C
);
468 return CMOS_CLOCK_IRQ
;
471 void arch_stop_profile_clock(void)
474 /* Disable CMOS timer interrupts. */
475 outb(RTC_INDEX
, RTC_REG_B
);
477 outb(RTC_INDEX
, RTC_REG_B
);
478 outb(RTC_IO
, r
& ~RTC_B_PIE
);
481 void arch_ack_profile_clock(void)
483 /* Mandatory read of CMOS register to re-enable timer interrupts. */
484 outb(RTC_INDEX
, RTC_REG_C
);
490 void arch_do_syscall(struct proc
*proc
)
492 /* do_ipc assumes that it's running because of the current process */
493 assert(proc
== get_cpulocal_var(proc_ptr
));
494 /* Make the system call, for real this time. */
495 assert(proc
->p_misc_flags
& MF_SC_DEFER
);
497 do_ipc(proc
->p_defer
.r1
, proc
->p_defer
.r2
, proc
->p_defer
.r3
);
500 struct proc
* arch_finish_switch_to_user(void)
506 stk
= (char *)tss
[cpuid
].sp0
;
508 stk
= (char *)tss
[0].sp0
;
510 /* set pointer to the process to run on the stack */
511 p
= get_cpulocal_var(proc_ptr
);
512 *((reg_t
*)stk
) = (reg_t
) p
;
514 /* make sure IF is on in FLAGS so that interrupts won't be disabled
515 * once p's context is restored.
517 p
->p_reg
.psw
|= IF_MASK
;
519 /* Set TRACEBIT state properly. */
520 if(p
->p_misc_flags
& MF_STEP
)
521 p
->p_reg
.psw
|= TRACEBIT
;
523 p
->p_reg
.psw
&= ~TRACEBIT
;
528 void arch_proc_setcontext(struct proc
*p
, struct stackframe_s
*state
,
529 int isuser
, int trap_style
)
532 /* Restore user bits of psw from sc, maintain system bits
535 state
->psw
= (state
->psw
& X86_FLAGS_USER
) |
536 (p
->p_reg
.psw
& ~X86_FLAGS_USER
);
539 /* someone wants to totally re-initialize process state */
540 assert(sizeof(p
->p_reg
) == sizeof(*state
));
541 if(state
!= &p
->p_reg
) {
542 memcpy(&p
->p_reg
, state
, sizeof(*state
));
545 /* further code is instructed to not touch the context
548 p
->p_misc_flags
|= MF_CONTEXT_SET
;
550 /* on x86 this requires returning using iret (KTS_INT)
551 * so that the full context is restored instead of relying on
552 * the userspace doing it (as it would do on SYSEXIT).
553 * as ESP and EIP are also reset, userspace won't try to
554 * restore bogus context after returning.
556 * if the process is not blocked, or the kernel will ignore
557 * our trap style, we needn't panic but things will probably
558 * not go well for the process (restored context will be ignored)
559 * and the situation should be debugged.
561 if(!(p
->p_rts_flags
)) {
562 printf("WARNINIG: setting full context of runnable process\n");
566 if(p
->p_seg
.p_kern_trap_style
== KTS_NONE
)
567 printf("WARNINIG: setting full context of out-of-kernel process\n");
568 p
->p_seg
.p_kern_trap_style
= trap_style
;
571 void restore_user_context(struct proc
*p
)
573 int trap_style
= p
->p_seg
.p_kern_trap_style
;
576 static int restores
[TYPES
], n
= 0;
578 if(trap_style
>= 0 && trap_style
< TYPES
)
579 restores
[trap_style
]++;
581 if(!(n
++ % 500000)) {
583 for(t
= 0; t
< TYPES
; t
++)
585 printf("%d: %d ", t
, restores
[t
]);
590 p
->p_seg
.p_kern_trap_style
= KTS_NONE
;
592 if(trap_style
== KTS_SYSENTER
) {
593 restore_user_context_sysenter(p
);
597 if(trap_style
== KTS_SYSCALL
) {
598 restore_user_context_syscall(p
);
604 panic("no entry trap style known");
607 case KTS_FULLCONTEXT
:
609 restore_user_context_int(p
);
612 panic("unknown trap style recorded");
619 void fpu_sigcontext(struct proc
*pr
, struct sigframe_sigcontext
*fr
, struct sigcontext
*sc
)
623 if (osfxsr_feature
) {
624 fp_error
= sc
->sc_fpu_state
.xfp_regs
.fp_status
&
625 ~sc
->sc_fpu_state
.xfp_regs
.fp_control
;
627 fp_error
= sc
->sc_fpu_state
.fpu_regs
.fp_status
&
628 ~sc
->sc_fpu_state
.fpu_regs
.fp_control
;
631 if (fp_error
& 0x001) { /* Invalid op */
633 * swd & 0x240 == 0x040: Stack Underflow
634 * swd & 0x240 == 0x240: Stack Overflow
635 * User must clear the SF bit (0x40) if set
637 fr
->sf_code
= FPE_FLTINV
;
638 } else if (fp_error
& 0x004) {
639 fr
->sf_code
= FPE_FLTDIV
; /* Divide by Zero */
640 } else if (fp_error
& 0x008) {
641 fr
->sf_code
= FPE_FLTOVF
; /* Overflow */
642 } else if (fp_error
& 0x012) {
643 fr
->sf_code
= FPE_FLTUND
; /* Denormal, Underflow */
644 } else if (fp_error
& 0x020) {
645 fr
->sf_code
= FPE_FLTRES
; /* Precision */
647 fr
->sf_code
= 0; /* XXX - probably should be used for FPE_INTOVF or
652 reg_t
arch_get_sp(struct proc
*p
) { return p
->p_reg
.sp
; }
655 static void ser_init(void)
660 /* keep BIOS settings if cttybaud is not set */
661 if (kinfo
.serial_debug_baud
<= 0) return;
663 /* set DLAB to make baud accessible */
664 lcr
= LCR_8BIT
| LCR_1STOP
| LCR_NPAR
;
665 outb(COM1_LCR
, lcr
| LCR_DLAB
);
668 divisor
= UART_BASE_FREQ
/ kinfo
.serial_debug_baud
;
669 if (divisor
< 1) divisor
= 1;
670 if (divisor
> 65535) divisor
= 65535;
672 outb(COM1_DLL
, divisor
& 0xff);
673 outb(COM1_DLM
, (divisor
>> 8) & 0xff);