2 #include <aros/debug.h>
5 #include <asm/segments.h>
7 #include <aros/symbolsets.h>
8 #include <exec/lists.h>
10 #include <utility/tagitem.h>
12 #include <proto/exec.h>
13 #include <proto/kernel.h>
17 #include "kernel_intern.h"
18 #include "../bootstrap/multiboot.h"
19 #include LC_LIBDEFS_FILE
23 extern const unsigned char start64
[];
24 extern const unsigned char _binary_smpbootstrap_start
[];
25 extern const unsigned char _binary_smpbootstrap_size
[];
29 asm(".section .aros.init,\"ax\"\n\t"
31 ".type start64,@function\n"
32 "start64: movq tmp_stack_end(%rip),%rsp\n\t"
34 "call __clear_bss\n\t"
36 "movq stack_end(%rip), %rsp\n\t"
37 "movq target_address(%rip), %rsi\n\t"
39 ".string \"Native/CORE v3 (" __DATE__
")\""
43 void __clear_bss(struct TagItem
*msg
)
45 struct KernelBSS
*bss
;
46 bss
= krnGetTagData(KRN_KernelBss
, 0, msg
);
52 bzero(bss
->addr
, bss
->len
);
62 #define KERNBOOTFLAG_BOOTCPUSET 1
64 IPTR _kern_early_ACPIRSDP
;
65 UBYTE _kern_early_BOOTAPICID
;
66 IPTR _Kern_APICTrampolineBase
;
68 static char _kern_early_BOOTCmdLine
[200];
70 static int Kernel_Init(LIBBASETYPEPTR LIBBASE
)
73 TLS_SET(KernelBase
, LIBBASE
);
74 struct ExecBase
*SysBase
= TLS_GET(SysBase
);
76 LIBBASE
->kb_XTPIC_Mask
= 0xfffb;
78 for (i
=0; i
< 256; i
++)
80 NEWLIST(&LIBBASE
->kb_Intr
[i
]);
84 LIBBASE
->kb_Intr
[i
].lh_Type
= KBL_XTPIC
;
87 LIBBASE
->kb_Intr
[i
].lh_Type
= KBL_APIC
;
90 LIBBASE
->kb_Intr
[i
].lh_Type
= KBL_INTERNAL
;
95 D(bug("[Kernel] Kernel_Init: Post-exec init\n"));
97 if (LIBBASE
->kb_APICBase
== NULL
)
98 LIBBASE
->kb_APICBase
= core_APICGetMSRAPICBase();;
99 D(bug("[Kernel] Kernel_Init: APIC Base @ %012p\n", LIBBASE
->kb_APICBase
));
101 core_APICInitialise(LIBBASE
->kb_APICBase
);
103 #warning "TODO: Check if NOACPI is set on the boot command line"
104 if (_kern_early_ACPIRSDP
)
106 LIBBASE
->kb_ACPIRSDP
= _kern_early_ACPIRSDP
;
107 LIBBASE
->kb_APICCount
= 1;
108 LIBBASE
->kb_APICIDMap
= AllocVec(LIBBASE
->kb_APICCount
, MEMF_CLEAR
);
109 LIBBASE
->kb_APICIDMap
[0] = _kern_early_BOOTAPICID
;
111 core_ACPIInitialise();
114 uint32_t *localAPIC
= (uint32_t*)LIBBASE
->kb_APICBase
+ 0x320;
116 LIBBASE
->kb_MemPool
= CreatePool(MEMF_CLEAR
| MEMF_PUBLIC
, 8192, 4096);
117 D(bug("[Kernel] Kernel_Init: MemPool @ %012p\n", LIBBASE
->kb_MemPool
));
120 asm volatile ("movl %0,(%1)"::"r"(0),"r"((volatile uint32_t*)(LIBBASE->kb_APICBase + 0xb0)));
122 D(bug("[Kernel] Kernel_Init: APIC SVR=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0xf0)));
123 D(bug("[Kernel] Kernel_Init: APIC ESR=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x280)));
124 D(bug("[Kernel] Kernel_Init: APIC TPR=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x80)));
125 D(bug("[Kernel] Kernel_Init: APIC ICR=%08x%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x314), *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x310)));
126 D(bug("[Kernel] Kernel_Init: APIC Timer divide=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x3e0)));
127 D(bug("[Kernel] Kernel_Init: APIC Timer config=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x320)));
129 asm volatile ("movl %0,(%1)"::"r"(0x000000fe),"r"((volatile uint32_t*)(LIBBASE->kb_APICBase + 0x320)));
130 //*(volatile uint32_t *)localAPIC = 0x000000fe;
131 D(bug("[Kernel] Kernel_Init: APIC Timer config=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x320)));
133 D(bug("[Kernel] Kernel_Init: APIC Initial count=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x380)));
134 D(bug("[Kernel] Kernel_Init: APIC Current count=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x390)));
135 *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x380) = 0x11111111;
136 asm volatile ("movl %0,(%1)"::"r"(0x000200fe),"r"((volatile uint32_t*)(LIBBASE->kb_APICBase + 0x320)));
137 D(bug("[Kernel] Kernel_Init: APIC Timer config=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x320)));
139 for (i=0; i < 0x10000000; i++) asm volatile("nop;");
141 D(bug("[Kernel] Kernel_Init: APIC Initial count=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x380)));
142 D(bug("[Kernel] Kernel_Init: APIC Current count=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x390)));
143 for (i=0; i < 0x1000000; i++) asm volatile("nop;");
144 D(bug("[Kernel] Kernel_Init: APIC Initial count=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x380)));
145 D(bug("[Kernel] Kernel_Init: APIC Current count=%08x\n", *(volatile uint32_t*)(LIBBASE->kb_APICBase + 0x390)));
147 for (i=0; i < 0x1000000; i++) asm volatile("nop;"); */
150 ADD2INITLIB(Kernel_Init
, 0)
152 static struct TagItem
*BootMsg
;
153 static struct vbe_controller vbectrl
;
154 static struct vbe_mode vbemd
;
158 int kernel_cstart(struct TagItem
*msg
, void *entry
)
160 rkprintf("[Kernel] kernel_cstart: Jumped into kernel.resource @ %p [asm stub @ %p].\n", kernel_cstart
, start64
);
162 IPTR _APICBase
= core_APICGetMSRAPICBase();
163 UBYTE kern_apic_id
= core_APICGetID(_APICBase
);
164 rkprintf("[Kernel] kernel_cstart: launching on APIC ID %d, base @ %p\n", kern_apic_id
, _APICBase
);
166 /* Enable fxsave/fxrstor */
167 wrcr(cr4
, rdcr(cr4
) | _CR4_OSFXSR
| _CR4_OSXMMEXCPT
);
169 if (!(_kern_initflags
& KERNBOOTFLAG_BOOTCPUSET
))
171 _kern_early_BOOTAPICID
= kern_apic_id
;
172 _kern_initflags
|= KERNBOOTFLAG_BOOTCPUSET
;
174 struct TagItem
*tag
= krnFindTagItem(KRN_CmdLine
, msg
);
175 addr
= krnGetTagData(KRN_KernelBase
, 0, msg
);
176 len
= krnGetTagData(KRN_KernelHighest
, 0, msg
) - addr
;
180 if (tag
->ti_Data
!= (IPTR
)_kern_early_BOOTCmdLine
) {
181 strncpy(_kern_early_BOOTCmdLine
, tag
->ti_Data
, 200);
182 tag
->ti_Data
= (IPTR
)_kern_early_BOOTCmdLine
;
186 tag
= krnFindTagItem(KRN_VBEModeInfo
, msg
);
189 if (tag
->ti_Data
!= (IPTR
)&vbemd
) {
190 bcopy(tag
->ti_Data
, &vbemd
, sizeof(vbemd
));
191 tag
->ti_Data
= (IPTR
)&vbemd
;
195 tag
= krnFindTagItem(KRN_VBEControllerInfo
, msg
);
198 if (tag
->ti_Data
!= (IPTR
)&vbectrl
) {
199 bcopy(tag
->ti_Data
, &vbectrl
, sizeof(vbectrl
));
200 tag
->ti_Data
= (IPTR
)&vbectrl
;
208 /* Initialize the ACPI boot-time table parser. */
209 _kern_early_ACPIRSDP
= core_ACPIProbe();
210 rkprintf("[Kernel] kernel_cstart: core_ACPIProbe() returned %p\n", _kern_early_ACPIRSDP
);
212 #warning "TODO: Allocate Trampoline page better"
214 IPTR lowpages
= (krnGetTagData(KRN_MEMLower
, 0, msg
) * 1024);
215 if ((lowpages
> 0x2000) && ((lowpages
- 0x2000) > PAGE_SIZE
))
217 _Kern_APICTrampolineBase
= (lowpages
- PAGE_SIZE
) & 0xFF000;
218 lowpages
= (_Kern_APICTrampolineBase
- 1)/1024;
220 krnSetTagData(KRN_MEMLower
, lowpages
, msg
);
221 rkprintf("[Kernel] kernel_cstart: Allocated %d bytes for APIC Trampoline @ %p\n", PAGE_SIZE
, _Kern_APICTrampolineBase
);
223 #if defined(CONFIG_LAPICS)
224 memcpy(_Kern_APICTrampolineBase
, _binary_smpbootstrap_start
,
225 _binary_smpbootstrap_size
);
227 rkprintf("[Kernel] kernel_cstart: Copied APIC bootstrap code to Trampoline from %p, %d bytes\n", _binary_smpbootstrap_start
,
228 _binary_smpbootstrap_size
);
235 /* Set TSS, GDT, LDT and MMU up */
236 core_CPUSetup(_APICBase
);
240 core_ProtKernelArea(addr
, len
, 1, 0, 1);
243 core_ProtKernelArea(0, 1, 0, 0, 0);
247 core_CPUSetup(_APICBase
);
251 (rkprintf("[Kernel] kernel_cstart[%d]: APIC_BASE_MSR=%016p\n", kern_apic_id
, rdmsrq(27)));
253 if (kern_apic_id
== _kern_early_BOOTAPICID
)
256 asm("outb %b0,%b1\n\tcall delay"::"a"((char)0x11),"i"(0x20)); /* Initialization sequence for 8259A-1 */
257 asm("outb %b0,%b1\n\tcall delay"::"a"((char)0x11),"i"(0xa0)); /* Initialization sequence for 8259A-2 */
258 asm("outb %b0,%b1\n\tcall delay"::"a"((char)0x20),"i"(0x21)); /* IRQs at 0x20 - 0x27 */
259 asm("outb %b0,%b1\n\tcall delay"::"a"((char)0x28),"i"(0xa1)); /* IRQs at 0x28 - 0x2f */
260 asm("outb %b0,%b1\n\tcall delay": :"a"((char)0x04),"i"(0x21)); /* 8259A-1 is master */
261 asm("outb %b0,%b1\n\tcall delay"::"a"((char)0x02),"i"(0xa1)); /* 8259A-2 is slave */
262 asm("outb %b0,%b1\n\tcall delay"::"a"((char)0x01),"i"(0x21)); /* 8086 mode for both */
263 asm("outb %b0,%b1\n\tcall delay"::"a"((char)0x01),"i"(0xa1));
264 asm("outb %b0,%b1\n\tcall delay"::"a"((char)0xff),"i"(0x21)); /* Enable cascade int */
265 asm("outb %b0,%b1\n\tcall delay"::"a"((char)0xff),"i"(0xa1)); /* Mask all interrupts */
267 rkprintf("[Kernel] kernel_cstart: Interrupts redirected. We will go back in a minute ;)\n");
268 rkprintf("[Kernel] kernel_cstart: Booting exec.library\n\n");
270 return exec_main(msg
, entry
);
275 /* A temporary solution - the code for smp is not ready yet... */
276 #warning "TODO: launch idle task ..."
277 rkprintf("[Kernel] kernel_cstart[%d]: Going into endless loop...\n", kern_apic_id
);
278 while(1) asm volatile("hlt");
285 /* Small delay routine used by exec_cinit initializer */
286 asm("\ndelay:\t.short 0x00eb\n\tretq");
288 static uint64_t __attribute__((used
)) tmp_stack
[128]={01,};
289 static const uint64_t *tmp_stack_end
__attribute__((used
, section(".text"))) = &tmp_stack
[120];
290 static uint64_t stack
[STACK_SIZE
] __attribute__((used
));
291 static uint64_t stack_panic
[STACK_SIZE
] __attribute__((used
));
292 static uint64_t stack_super
[STACK_SIZE
] __attribute__((used
));
293 static uint64_t stack_ring1
[STACK_SIZE
] __attribute__((used
));
295 static const uint64_t *stack_end
__attribute__((used
, section(".text"))) = &stack
[STACK_SIZE
-16];
296 static const void *target_address
__attribute__((section(".text"),used
)) = (void*)kernel_cstart
;
298 static struct int_gate_64bit IGATES
[256] __attribute__((used
,aligned(256)));
299 static struct tss_64bit TSS
[16] __attribute__((used
,aligned(128)));
301 struct segment_desc seg0
; /* seg 0x00 */
302 struct segment_desc super_cs
; /* seg 0x08 */
303 struct segment_desc super_ds
; /* seg 0x10 */
304 struct segment_desc user_cs32
; /* seg 0x18 */
305 struct segment_desc user_ds
; /* seg 0x20 */
306 struct segment_desc user_cs
; /* seg 0x28 */
307 struct segment_desc gs
; /* seg 0x30 */
308 struct segment_desc ldt
; /* seg 0x38 */
310 struct segment_desc tss_low
; /* seg 0x40... */
311 struct segment_ext tss_high
;
313 } GDT
__attribute__((used
,aligned(128)));
317 uint16_t size
__attribute__((packed
));
318 uint64_t base
__attribute__((packed
));
320 GDT_sel
= {sizeof(GDT
)-1, (uint64_t)&GDT
};
322 static tls_t system_tls
;
328 /* Supervisor segments */
329 GDT
.super_cs
.type
=0x1a; /* code segment */
330 GDT
.super_cs
.dpl
=0; /* supervisor level */
331 GDT
.super_cs
.p
=1; /* present */
332 GDT
.super_cs
.l
=1; /* long (64-bit) one */
333 GDT
.super_cs
.d
=0; /* must be zero */
334 GDT
.super_cs
.limit_low
=0xffff;
335 GDT
.super_cs
.limit_high
=0xf;
338 GDT
.super_ds
.type
=0x12; /* data segment */
339 GDT
.super_ds
.p
=1; /* present */
340 GDT
.super_ds
.limit_low
=0xffff;
341 GDT
.super_ds
.limit_high
=0xf;
345 /* User mode segments */
346 GDT
.user_cs
.type
=0x1a; /* code segment */
347 GDT
.user_cs
.dpl
=3; /* User level */
348 GDT
.user_cs
.p
=1; /* present */
349 GDT
.user_cs
.l
=1; /* long mode */
350 GDT
.user_cs
.d
=0; /* must be zero */
351 GDT
.user_cs
.limit_low
=0xffff;
352 GDT
.user_cs
.limit_high
=0xf;
355 GDT
.user_cs32
.type
=0x1a; /* code segment for legacy 32-bit code. NOT USED YET! */
356 GDT
.user_cs32
.dpl
=3; /* user elvel */
357 GDT
.user_cs32
.p
=1; /* present */
358 GDT
.user_cs32
.l
=0; /* 32-bit mode */
359 GDT
.user_cs32
.d
=1; /* 32-bit code */
360 GDT
.user_cs32
.limit_low
=0xffff;
361 GDT
.user_cs32
.limit_high
=0xf;
364 GDT
.user_ds
.type
=0x12; /* data segment */
365 GDT
.user_ds
.dpl
=3; /* user elvel */
366 GDT
.user_ds
.p
=1; /* present */
367 GDT
.user_ds
.limit_low
=0xffff;
368 GDT
.user_ds
.limit_high
=0xf;
372 for (i
=0; i
< 16; i
++)
374 /* Task State Segment */
375 GDT
.tss
[i
].tss_low
.type
=0x09; /* 64-bit TSS */
376 GDT
.tss
[i
].tss_low
.limit_low
=sizeof(TSS
)-1;
377 GDT
.tss
[i
].tss_low
.base_low
=((unsigned int)&TSS
[i
]) & 0xffff;
378 GDT
.tss
[i
].tss_low
.base_mid
=(((unsigned int)&TSS
[i
]) >> 16) & 0xff;
379 GDT
.tss
[i
].tss_low
.dpl
=3; /* User mode task */
380 GDT
.tss
[i
].tss_low
.p
=1; /* present */
381 GDT
.tss
[i
].tss_low
.limit_high
=((sizeof(TSS
)-1) >> 16) & 0x0f;
382 GDT
.tss
[i
].tss_low
.base_high
=(((unsigned int)&TSS
[i
]) >> 24) & 0xff;
383 GDT
.tss
[i
].tss_high
.base_ext
= 0; /* is within 4GB :-D */
385 intptr_t tls_ptr
= (intptr_t)&system_tls
;
387 GDT
.gs
.type
=0x12; /* data segment */
388 GDT
.gs
.dpl
=3; /* user elvel */
389 GDT
.gs
.p
=1; /* present */
390 GDT
.gs
.base_low
= tls_ptr
& 0xffff;
391 GDT
.gs
.base_mid
= (tls_ptr
>> 16) & 0xff;
392 GDT
.gs
.base_high
= (tls_ptr
>> 24) & 0xff;
398 void core_CPUSetup(IPTR _APICBase
)
400 UBYTE CPU_ID
= core_APICGetID(_APICBase
);
401 rkprintf("[Kernel] core_CPUSetup(id:%d)\n", CPU_ID
);
403 // system_tls.SysBase = (struct ExecBase *)0x12345678;
405 TSS
[CPU_ID
].ist1
= (uint64_t)&stack_panic
[STACK_SIZE
-2];
406 TSS
[CPU_ID
].rsp0
= (uint64_t)&stack_super
[STACK_SIZE
-2];
407 TSS
[CPU_ID
].rsp1
= (uint64_t)&stack_ring1
[STACK_SIZE
-2];
409 rkprintf("[Kernel] core_CPUSetup[%d]: Reloading the GDT and Task Register\n", CPU_ID
);
410 asm volatile ("lgdt %0"::"m"(GDT_sel
));
411 asm volatile ("ltr %w0"::"r"(TASK_SEG
+ (CPU_ID
<< 4)));
412 asm volatile ("mov %0,%%gs"::"a"(SEG_GS
));
415 struct TagItem
*krnNextTagItem(const struct TagItem
**tagListPtr
)
417 if (!(*tagListPtr
)) return 0;
421 switch((*tagListPtr
)->ti_Tag
)
424 if (!((*tagListPtr
) = (struct TagItem
*)(*tagListPtr
)->ti_Data
))
435 (*tagListPtr
) += (*tagListPtr
)->ti_Data
+ 1;
439 return (struct TagItem
*)(*tagListPtr
)++;
447 struct TagItem
*krnFindTagItem(Tag tagValue
, const struct TagItem
*tagList
)
450 const struct TagItem
*tagptr
= tagList
;
452 while((tag
= krnNextTagItem(&tagptr
)))
454 if (tag
->ti_Tag
== tagValue
)
461 IPTR
krnGetTagData(Tag tagValue
, intptr_t defaultVal
, const struct TagItem
*tagList
)
463 struct TagItem
*ti
= 0;
465 if (tagList
&& (ti
= krnFindTagItem(tagValue
, tagList
)))
471 void krnSetTagData(Tag tagValue
, intptr_t newtagValue
, const struct TagItem
*tagList
)
473 struct TagItem
*ti
= 0;
475 if (tagList
&& (ti
= krnFindTagItem(tagValue
, tagList
)))
476 ti
->ti_Data
= newtagValue
;
479 AROS_LH0I(struct TagItem
*, KrnGetBootInfo
,
480 struct KernelBase
*, KernelBase
, 10, Kernel
)