1 /* system dependent functions for use inside the whole kernel. */
3 #include "kernel/kernel.h"
8 #include <machine/cmos.h>
9 #include <machine/bios.h>
10 #include <minix/portio.h>
11 #include <minix/cpufeature.h>
14 #include <machine/vm.h>
16 #include <minix/u64.h>
18 #include "archconst.h"
19 #include "arch_proto.h"
22 #include "direct_utils.h"
23 #include <machine/multiboot.h>
35 static int osfxsr_feature
; /* FXSAVE/FXRSTOR instructions support (SSEx) */
37 /* set MP and NE flags to handle FPU exceptions in native mode. */
38 #define CR0_MP_NE 0x0022
39 /* set CR4.OSFXSR[bit 9] if FXSR is supported. */
40 #define CR4_OSFXSR (1L<<9)
41 /* set OSXMMEXCPT[bit 10] if we provide #XM handler. */
42 #define CR4_OSXMMEXCPT (1L<<10)
46 static void ser_debug(int c
);
48 static void ser_dump_proc_cpu(void);
51 static void ser_init(void);
56 unsigned short cw
, sw
;
62 if((sw
& 0xff) == 0 &&
63 (cw
& 0x103f) == 0x3f) {
64 /* We have some sort of FPU, but don't check exact model.
65 * Set CR0_NE and CR0_MP to handle fpu exceptions
67 write_cr0(read_cr0() | CR0_MP_NE
);
68 get_cpulocal_var(fpu_presence
) = 1;
69 if(_cpufeature(_CPUF_I386_FXSR
)) {
70 u32_t cr4
= read_cr4() | CR4_OSFXSR
; /* Enable FXSR. */
72 /* OSXMMEXCPT if supported
73 * FXSR feature can be available without SSE
75 if(_cpufeature(_CPUF_I386_SSE
))
76 cr4
|= CR4_OSXMMEXCPT
;
84 /* No FPU presents. */
85 get_cpulocal_var(fpu_presence
) = 0;
91 void save_local_fpu(struct proc
*pr
, int retain
)
93 char *state
= pr
->p_seg
.fpu_state
;
95 /* Save process FPU context. If the 'retain' flag is set, keep the FPU
96 * state as is. If the flag is not set, the state is undefined upon
97 * return, and the caller is responsible for reloading a proper state.
110 (void) frstor(state
);
114 void save_fpu(struct proc
*pr
)
117 if (cpuid
!= pr
->p_cpu
) {
120 /* remember if the process was already stopped */
121 stopped
= RTS_ISSET(pr
, RTS_PROC_STOP
);
123 /* stop the remote process and force its context to be saved */
124 smp_schedule_stop_proc_save_ctx(pr
);
127 * If the process wasn't stopped let the process run again. The
128 * process is kept block by the fact that the kernel cannot run
132 RTS_UNSET(pr
, RTS_PROC_STOP
);
138 if (get_cpulocal_var(fpu_owner
) == pr
) {
139 disable_fpu_exception();
140 save_local_fpu(pr
, TRUE
/*retain*/);
144 /* reserve a chunk of memory for fpu state; every one has to
145 * be FPUALIGN-aligned.
147 static char fpu_state
[NR_PROCS
][FPU_XFP_SIZE
] __aligned(FPUALIGN
);
149 void arch_proc_reset(struct proc
*pr
)
153 assert(pr
->p_nr
< NR_PROCS
);
156 v
= fpu_state
[pr
->p_nr
];
157 /* verify alignment */
158 assert(!((vir_bytes
)v
% FPUALIGN
));
159 /* initialize state */
160 memset(v
, 0, FPU_XFP_SIZE
);
163 /* Clear process state. */
164 memset(&pr
->p_reg
, 0, sizeof(pr
->p_reg
));
165 if(iskerneln(pr
->p_nr
))
166 pr
->p_reg
.psw
= INIT_TASK_PSW
;
168 pr
->p_reg
.psw
= INIT_PSW
;
170 pr
->p_seg
.fpu_state
= v
;
172 /* Initialize the fundamentals that are (initially) the same for all
173 * processes - the segment selectors it gets to use.
175 pr
->p_reg
.cs
= USER_CS_SELECTOR
;
180 pr
->p_reg
.ds
= USER_DS_SELECTOR
;
183 void arch_set_secondary_ipc_return(struct proc
*p
, u32_t val
)
188 int restore_fpu(struct proc
*pr
)
191 char *state
= pr
->p_seg
.fpu_state
;
195 if(!proc_used_fpu(pr
)) {
197 pr
->p_misc_flags
|= MF_FPU_INITIALIZED
;
200 failed
= fxrstor(state
);
202 failed
= frstor(state
);
205 if (failed
) return EINVAL
;
211 void cpu_identify(void)
213 u32_t eax
, ebx
, ecx
, edx
;
214 unsigned cpu
= cpuid
;
217 _cpuid(&eax
, &ebx
, &ecx
, &edx
);
219 if (ebx
== INTEL_CPUID_GEN_EBX
&& ecx
== INTEL_CPUID_GEN_ECX
&&
220 edx
== INTEL_CPUID_GEN_EDX
) {
221 cpu_info
[cpu
].vendor
= CPU_VENDOR_INTEL
;
222 } else if (ebx
== AMD_CPUID_GEN_EBX
&& ecx
== AMD_CPUID_GEN_ECX
&&
223 edx
== AMD_CPUID_GEN_EDX
) {
224 cpu_info
[cpu
].vendor
= CPU_VENDOR_AMD
;
226 cpu_info
[cpu
].vendor
= CPU_VENDOR_UNKNOWN
;
232 _cpuid(&eax
, &ebx
, &ecx
, &edx
);
234 cpu_info
[cpu
].family
= (eax
>> 8) & 0xf;
235 if (cpu_info
[cpu
].family
== 0xf)
236 cpu_info
[cpu
].family
+= (eax
>> 20) & 0xff;
237 cpu_info
[cpu
].model
= (eax
>> 4) & 0xf;
238 if (cpu_info
[cpu
].model
== 0xf || cpu_info
[cpu
].model
== 0x6)
239 cpu_info
[cpu
].model
+= ((eax
>> 16) & 0xf) << 4 ;
240 cpu_info
[cpu
].stepping
= eax
& 0xf;
241 cpu_info
[cpu
].flags
[0] = ecx
;
242 cpu_info
[cpu
].flags
[1] = edx
;
247 k_stacks
= (void*) &k_stacks_start
;
248 assert(!((vir_bytes
) k_stacks
% K_STACK_SIZE
));
252 * use stack 0 and cpu id 0 on a single processor machine, SMP
253 * configuration does this in smp_init() for all cpus at once
255 tss_init(0, get_k_stack_top(0));
266 #if defined(USE_APIC) && !defined(CONFIG_SMP)
267 if (config_no_apic
) {
268 BOOT_VERBOSE(printf("APIC disabled, using legacy PIC\n"));
270 else if (!apic_single_cpu_init()) {
271 BOOT_VERBOSE(printf("APIC not present, using legacy PIC\n"));
275 /* Reserve some BIOS ranges */
276 cut_memmap(&kinfo
, BIOS_MEM_BEGIN
, BIOS_MEM_END
);
277 cut_memmap(&kinfo
, BASE_MEM_TOP
, UPPER_MEM_END
);
280 /*===========================================================================*
282 *===========================================================================*/
290 if((oxin
= oxpcie_in()) >= 0)
302 static void ser_dump_queue_cpu(unsigned cpu
)
305 struct proc
** rdy_head
;
307 rdy_head
= get_cpu_var(cpu
, run_q_head
);
309 for(q
= 0; q
< NR_SCHED_QUEUES
; q
++) {
313 for(p
= rdy_head
[q
]; p
; p
= p
->p_nextready
) {
314 printf("%s / %d ", p
->p_name
, p
->p_endpoint
);
321 static void ser_dump_queues(void)
326 printf("--- run queues ---\n");
327 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
328 printf("CPU %d :\n", cpu
);
329 ser_dump_queue_cpu(cpu
);
332 ser_dump_queue_cpu(0);
337 static void dump_bkl_usage(void)
341 printf("--- BKL usage ---\n");
342 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
343 printf("cpu %3d kernel ticks 0x%x%08x bkl ticks 0x%x%08x succ %d tries %d\n", cpu
,
344 ex64hi(kernel_ticks
[cpu
]),
345 ex64lo(kernel_ticks
[cpu
]),
346 ex64hi(bkl_ticks
[cpu
]),
347 ex64lo(bkl_ticks
[cpu
]),
348 bkl_succ
[cpu
], bkl_tries
[cpu
]);
352 static void reset_bkl_usage(void)
354 memset(kernel_ticks
, 0, sizeof(kernel_ticks
));
355 memset(bkl_ticks
, 0, sizeof(bkl_ticks
));
356 memset(bkl_tries
, 0, sizeof(bkl_tries
));
357 memset(bkl_succ
, 0, sizeof(bkl_succ
));
361 static void ser_debug(const int c
)
363 serial_debug_active
= 1;
368 minix_shutdown(NULL
);
390 #define TOGGLECASE(ch, flag) \
392 if(verboseflags & flag) { \
393 verboseflags &= ~flag; \
394 printf("%s disabled\n", #flag); \
396 verboseflags |= flag; \
397 printf("%s enabled\n", #flag); \
401 TOGGLECASE('8', VF_SCHEDULING
)
402 TOGGLECASE('9', VF_PICKPROC
)
406 dump_apic_irq_state();
410 serial_debug_active
= 0;
418 for (pp
= BEG_PROC_ADDR
; pp
< END_PROC_ADDR
; pp
++)
422 print_proc_recursive(pp
);
427 static void ser_dump_proc_cpu(void)
432 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
433 printf("CPU %d processes : \n", cpu
);
434 for (pp
= BEG_USER_ADDR
; pp
< END_PROC_ADDR
; pp
++) {
435 if (isemptyp(pp
) || pp
->p_cpu
!= cpu
)
443 #endif /* DEBUG_SERIAL */
447 int arch_init_profile_clock(const u32_t freq
)
450 /* Set CMOS timer frequency. */
451 outb(RTC_INDEX
, RTC_REG_A
);
452 outb(RTC_IO
, RTC_A_DV_OK
| freq
);
453 /* Enable CMOS timer interrupts. */
454 outb(RTC_INDEX
, RTC_REG_B
);
456 outb(RTC_INDEX
, RTC_REG_B
);
457 outb(RTC_IO
, r
| RTC_B_PIE
);
458 /* Mandatory read of CMOS register to enable timer interrupts. */
459 outb(RTC_INDEX
, RTC_REG_C
);
462 return CMOS_CLOCK_IRQ
;
465 void arch_stop_profile_clock(void)
468 /* Disable CMOS timer interrupts. */
469 outb(RTC_INDEX
, RTC_REG_B
);
471 outb(RTC_INDEX
, RTC_REG_B
);
472 outb(RTC_IO
, r
& ~RTC_B_PIE
);
475 void arch_ack_profile_clock(void)
477 /* Mandatory read of CMOS register to re-enable timer interrupts. */
478 outb(RTC_INDEX
, RTC_REG_C
);
484 void arch_do_syscall(struct proc
*proc
)
486 /* do_ipc assumes that it's running because of the current process */
487 assert(proc
== get_cpulocal_var(proc_ptr
));
488 /* Make the system call, for real this time. */
490 do_ipc(proc
->p_reg
.cx
, proc
->p_reg
.retreg
, proc
->p_reg
.bx
);
493 struct proc
* arch_finish_switch_to_user(void)
499 stk
= (char *)tss
[cpuid
].sp0
;
501 stk
= (char *)tss
[0].sp0
;
503 /* set pointer to the process to run on the stack */
504 p
= get_cpulocal_var(proc_ptr
);
505 *((reg_t
*)stk
) = (reg_t
) p
;
507 /* make sure IF is on in FLAGS so that interrupts won't be disabled
508 * once p's context is restored. this should not be possible.
510 assert(p
->p_reg
.psw
& (1L << 9));
515 void fpu_sigcontext(struct proc
*pr
, struct sigframe
*fr
, struct sigcontext
*sc
)
519 if (osfxsr_feature
) {
520 fp_error
= sc
->sc_fpu_state
.xfp_regs
.fp_status
&
521 ~sc
->sc_fpu_state
.xfp_regs
.fp_control
;
523 fp_error
= sc
->sc_fpu_state
.fpu_regs
.fp_status
&
524 ~sc
->sc_fpu_state
.fpu_regs
.fp_control
;
527 if (fp_error
& 0x001) { /* Invalid op */
529 * swd & 0x240 == 0x040: Stack Underflow
530 * swd & 0x240 == 0x240: Stack Overflow
531 * User must clear the SF bit (0x40) if set
533 fr
->sf_code
= FPE_FLTINV
;
534 } else if (fp_error
& 0x004) {
535 fr
->sf_code
= FPE_FLTDIV
; /* Divide by Zero */
536 } else if (fp_error
& 0x008) {
537 fr
->sf_code
= FPE_FLTOVF
; /* Overflow */
538 } else if (fp_error
& 0x012) {
539 fr
->sf_code
= FPE_FLTUND
; /* Denormal, Underflow */
540 } else if (fp_error
& 0x020) {
541 fr
->sf_code
= FPE_FLTRES
; /* Precision */
543 fr
->sf_code
= 0; /* XXX - probably should be used for FPE_INTOVF or
548 reg_t
arch_get_sp(struct proc
*p
) { return p
->p_reg
.sp
; }
551 static void ser_init(void)
556 /* keep BIOS settings if cttybaud is not set */
557 if (kinfo
.serial_debug_baud
<= 0) return;
559 /* set DLAB to make baud accessible */
560 lcr
= LCR_8BIT
| LCR_1STOP
| LCR_NPAR
;
561 outb(COM1_LCR
, lcr
| LCR_DLAB
);
564 divisor
= UART_BASE_FREQ
/ kinfo
.serial_debug_baud
;
565 if (divisor
< 1) divisor
= 1;
566 if (divisor
> 65535) divisor
= 65535;
568 outb(COM1_DLL
, divisor
& 0xff);
569 outb(COM1_DLM
, (divisor
>> 8) & 0xff);