revert 213 commits (to 56092) from the last month. 10 still need work to resolve...
[AROS.git] / arch / x86_64-pc / kernel / kernel_startup.c
blobba41b2577ee85aedeb8d15e0858eea2c9406b493
1 /*
2 Copyright © 1995-2017, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <aros/multiboot.h>
7 #include <asm/cpu.h>
8 #include <asm/io.h>
9 #include <aros/symbolsets.h>
10 #include <exec/lists.h>
11 #include <exec/memory.h>
12 #include <exec/resident.h>
13 #include <utility/tagitem.h>
14 #include <proto/arossupport.h>
15 #include <proto/exec.h>
17 #include <bootconsole.h>
18 #include <inttypes.h>
19 #include <string.h>
21 #include "boot_utils.h"
22 #include "kernel_base.h"
23 #include "kernel_intern.h"
24 #include "kernel_bootmem.h"
25 #include "kernel_debug.h"
26 #include "kernel_mmap.h"
27 #include "kernel_romtags.h"
28 #include "smp.h"
29 #include "tls.h"
31 #define D(x)
32 #define DSTACK(x)
34 static APTR core_AllocBootTLS(struct KernBootPrivate *);
35 static APTR core_AllocBootTSS(struct KernBootPrivate *);
36 static APTR core_AllocBootGDT(struct KernBootPrivate *);
37 static APTR core_AllocBootIDT(struct KernBootPrivate *);
39 /* Common IBM PC memory layout (64bit version) */
40 static const struct MemRegion PC_Memory[] =
43 * Low memory has a bit lower priority -:
44 * - This helps the kernel/exec locate its MemHeader.
45 * - We explicitly need low memory for SMP bootstrap.
47 {0x000000000, 0x000100000, "Low memory" , -6, MEMF_PUBLIC|MEMF_LOCAL|MEMF_KICK|MEMF_CHIP|MEMF_31BIT|MEMF_24BITDMA},
48 {0x000100000, 0x001000000, "ISA DMA memory", -5, MEMF_PUBLIC|MEMF_LOCAL|MEMF_KICK|MEMF_CHIP|MEMF_31BIT|MEMF_24BITDMA},
50 * 1. 64-bit machines can expose RAM at addresses up to 0xD0000000 (giving 3.5 GB total).
51 * All MMIO sits beyond this border. AROS intentionally specifies a 4GB limit, in case some
52 * devices expose even more RAM in this space. This allows all the RAM to be usable.
53 * 2. AROS has MEMF_31BIT (compatable with MorphOS). This has likely originated from the assumption
54 * that MMIO starts at 0x80000000 (which is true at least for PegasosPPC), though on AROS it
55 * is used to ensure memory allocations lie within the low 32bit address space.
57 {0x001000000, 0x080000000, "31-bit memory" , 0, MEMF_PUBLIC|MEMF_LOCAL|MEMF_KICK|MEMF_CHIP|MEMF_31BIT },
58 {0x080000000, -1, "High memory" , 10, MEMF_PUBLIC|MEMF_LOCAL|MEMF_KICK|MEMF_FAST },
59 {0 , 0 , NULL , 0, 0 }
62 static ULONG allocator = ALLOCATOR_TLSF;
65 * Boot-time global variables.
66 * __KernBootPrivate needs to survive accross warm reboots, so it's put into .data.
67 * SysBase is intentionally put into .rodata. This way we prevent it from being modified.
69 __attribute__((section(".data"))) struct KernBootPrivate *__KernBootPrivate = NULL;
70 __attribute__((section(".data"))) IPTR kick_highest = 0;
71 __attribute__((section(".rodata"))) struct ExecBase *SysBase = NULL;
73 static void boot_start(struct TagItem *msg);
74 static char boot_stack[];
77 * This is where our kernel started.
78 * First we clear BSS section, then switch stack pointer to our temporary stack
79 * (which is itself located in BSS). While we are here, the stack is actually
80 * located inside our bootstrap, and it's safe to use it a little bit.
82 IPTR __startup start64(struct TagItem *msg, ULONG magic)
84 /* Anti-command-line-run protector */
85 if (magic == AROS_BOOT_MAGIC)
87 /* Run the kickstart from boot_start() routine. */
88 core_Kick(msg, boot_start);
91 return -1;
95 * This code is executed only once, after the kickstart is loaded by bootstrap.
96 * Its main job is to initialize early debugging console ASAP in order to be able
97 * to see what happens. This will deal with both serial and on-screen console.
99 * Console mirror is placed at the end of bootstrap's protected area. We must not
100 * overwrite it because it contains boot-time GDT, taglist, and some other structures.
102 * Default address is bootstrap start + 4KB, just in case.
104 static void boot_start(struct TagItem *msg)
106 fb_Mirror = (void *)LibGetTagData(KRN_ProtAreaEnd, 0x101000, msg);
107 con_InitTagList(msg);
109 bug("AROS64 - The AROS Research OS\n");
110 bug("64-bit ");
111 #if defined(__AROSEXEC_SMP__)
112 bug("SMP ");
113 #endif
114 bug("build. Compiled %s\n", __DATE__);
115 D(bug("[Kernel] boot_start: Jumped into kernel.resource @ %p [stub @ %p].\n", boot_start, start64));
117 kernel_cstart(msg);
121 * This routine actually launches the kickstart. It's called either upon first start or upon warm reboot.
122 * The only assumption is that stack is outside .bss . For both cases this is true:
123 * 1. First boot - the stack is located inside the bootstrap.
124 * 2. Warm reboot - the stack is located in supervisor area (__KernBootPrivate->SystemStack).
126 void core_Kick(struct TagItem *msg, void *target)
128 const struct TagItem *bss = LibFindTagItem(KRN_KernelBss, msg);
130 /* First clear .bss */
131 if (bss)
132 __clear_bss((const struct KernelBSS *)bss->ti_Data);
135 * ... then switch to initial stack and jump to target address.
136 * We set rbp to 0 and use call here in order to get correct stack traces
137 * if the boot task crashes. Otherwise backtrace goes beyond this location
138 * into memory areas with undefined contents.
140 asm volatile("movq %1, %%rsp\n\t"
141 "movq $0, %%rbp\n\t"
142 "call *%2\n"::"D"(msg), "r"(boot_stack + STACK_SIZE), "r"(target));
146 * This is the main entry point.
147 * We run from here both at first boot and upon reboot.
149 void kernel_cstart(const struct TagItem *start_msg)
151 struct MinList memList;
152 struct TagItem *msg = (struct TagItem *)start_msg;
153 struct MemHeader *mh, *mh2;
154 struct mb_mmap *mmap = NULL;
155 IPTR mmap_len = 0, addr = 0, klo = 0, memtop = 0;
156 struct TagItem *tag;
157 #if defined(__AROSEXEC_SMP__)
158 struct X86SchedulerPrivate *scheduleData;
159 #endif
160 UWORD *ranges[] =
162 NULL,
163 NULL,
164 (UWORD *)-1
167 /* Enable fxsave/fxrstor */
168 wrcr(cr4, rdcr(cr4) | _CR4_OSFXSR | _CR4_OSXMMEXCPT);
170 D(bug("[Kernel] %s: Boot data: 0x%p\n", __func__, __KernBootPrivate));
171 DSTACK(bug("[Kernel] %s: Boot stack: 0x%p - 0x%p\n", __func__, boot_stack, boot_stack + STACK_SIZE));
173 if (__KernBootPrivate == NULL)
175 /* This is our first start. */
176 struct vbe_mode *vmode = NULL;
177 char *cmdline = NULL;
178 IPTR khi;
180 /* We need highest KS address and memory map to begin the work */
181 khi = LibGetTagData(KRN_KernelHighest, 0, msg);
182 mmap = (struct mb_mmap *)LibGetTagData(KRN_MMAPAddress, 0, msg);
183 mmap_len = LibGetTagData(KRN_MMAPLength, 0, msg);
185 if ((!khi) || (!mmap) || (!mmap_len))
187 krnPanic(NULL, "Incomplete information from the bootstrap\n"
188 "\n"
189 "Kickstart top: 0x%p\n"
190 "Memory map: address 0x%p, length %lu\n", khi, mmap, mmap_len);
194 * Our boot taglist is located just somewhere in memory. Additionally, it's very fragmented
195 * (its linked data, like VBE information, were also placed just somewhere, by GRUB.
196 * Now we need some memory to gather these things together. This memory will be preserved
197 * accross warm restarts.
198 * We know the bootstrap has reserved some space right beyond the kickstart. We get our highest
199 * address, and use memory map to locate topmost address of this area.
201 khi = AROS_ROUNDUP2(khi + 1, sizeof(APTR));
202 mmap = mmap_FindRegion(khi, mmap, mmap_len);
204 if (!mmap)
206 krnPanic(NULL, "Inconsistent memory map or kickstart placement\n"
207 "Kickstart region not found");
210 if (mmap->type != MMAP_TYPE_RAM)
212 krnPanic(NULL, "Inconsistent memory map or kickstart placement\n"
213 "Reserved memory overwritten\n"
214 "Region 0x%p - 0x%p type %d\n"
215 "Kickstart top 0x%p", mmap->addr, mmap->addr + mmap->len - 1, mmap->type, khi);
218 /* Initialize boot-time memory allocator */
219 BootMemPtr = (void *)khi;
220 BootMemLimit = (void *)mmap->addr + mmap->len;
222 D(bug("[Kernel] Bootinfo storage 0x%p - 0x%p\n", BootMemPtr, BootMemLimit));
225 * Our boot taglist is placed by the bootstrap just somewhere in memory.
226 * The first thing is to move it into some safe place.
229 /* This will relocate the taglist itself */
230 RelocateBootMsg(msg);
233 * Now relocate linked data.
234 * Here we actually process only tags we know about and expect to get.
235 * For example, we are not going to receive KRN_HostInterface or KRN_OpenfirmwareTree.
237 msg = BootMsg;
238 while ((tag = LibNextTagItem(&msg)))
240 switch (tag->ti_Tag)
242 case KRN_KernelBss:
243 RelocateBSSData(tag);
244 break;
246 case KRN_MMAPAddress:
247 RelocateTagData(tag, mmap_len);
248 break;
250 case KRN_VBEModeInfo:
251 RelocateTagData(tag, sizeof(struct vbe_mode));
252 vmode = (struct vbe_mode *)tag->ti_Data;
253 break;
255 case KRN_VBEControllerInfo:
256 RelocateTagData(tag, sizeof(struct vbe_controller));
257 break;
259 case KRN_CmdLine:
260 RelocateStringData(tag);
261 cmdline = (char *)tag->ti_Data;
262 break;
264 case KRN_BootLoader:
265 RelocateStringData(tag);
266 break;
270 /* Now allocate KernBootPrivate */
271 __KernBootPrivate = krnAllocBootMem(sizeof(struct KernBootPrivate));
273 if (cmdline && vmode && vmode->phys_base && strstr(cmdline, "vesahack"))
275 bug("[Kernel] VESA debugging hack activated\n");
278 * VESA hack.
279 * It divides screen height by 2 and increments framebuffer pointer.
280 * This allows VESA driver to use only upper half of the screen, while
281 * lower half will still be used for debug output.
283 vmode->y_resolution >>= 1;
285 __KernBootPrivate->debug_y_resolution = vmode->y_resolution;
286 __KernBootPrivate->debug_framebuffer = (void *)(unsigned long)vmode->phys_base + vmode->y_resolution * vmode->bytes_per_scanline;
289 if (cmdline && strstr(cmdline, "notlsf"))
290 allocator = ALLOCATOR_STD;
293 /* We are x86-64, and we know we always have APIC. */
294 __KernBootPrivate->_APICBase = core_APIC_GetBase();
296 /* Pre-Allocare TLS & GDT */
297 if (!__KernBootPrivate->BOOTTLS)
298 __KernBootPrivate->BOOTTLS = core_AllocBootTLS(__KernBootPrivate);
299 if (!__KernBootPrivate->BOOTGDT)
300 __KernBootPrivate->BOOTGDT = core_AllocBootGDT(__KernBootPrivate);
301 if (!__KernBootPrivate->TSS)
302 __KernBootPrivate->TSS = core_AllocBootTSS(__KernBootPrivate);
304 /* Setup GDT */
305 core_SetupGDT(__KernBootPrivate, 0,
306 __KernBootPrivate->BOOTGDT,
307 __KernBootPrivate->BOOTTLS,
308 __KernBootPrivate->TSS);
310 if (!__KernBootPrivate->SystemStack)
313 * Allocate our supervisor stack from boot-time memory.
314 * It will be protected from user's intervention.
315 * Allocate actually three stacks: panic, supervisor, ring1.
316 * Note that we do the actual allocation only once. The region is kept
317 * in __KernBootPrivate which survives warm reboots.
319 __KernBootPrivate->SystemStack = (IPTR)krnAllocBootMem(STACK_SIZE * 3);
321 DSTACK(bug("[Kernel] %s: Allocated supervisor stack 0x%p - 0x%p\n",
322 __func__,
323 __KernBootPrivate->SystemStack,
324 __KernBootPrivate->SystemStack + STACK_SIZE * 3));
328 bug("[Kernel] %s: launching on BSP APIC ID %03u\n", __func__, core_APIC_GetID(__KernBootPrivate->_APICBase));
329 bug("[Kernel] %s: apicbase : 0x%p\n", __func__, __KernBootPrivate->_APICBase);
330 bug("[Kernel] %s: GDT : 0x%p\n", __func__, __KernBootPrivate->BOOTGDT);
331 bug("[Kernel] %s: TLS : 0x%p\n", __func__, __KernBootPrivate->BOOTTLS);
332 bug("[Kernel] %s: TSS : 0x%p\n", __func__, __KernBootPrivate->TSS);
335 /* Load the TSS, and GDT */
336 core_CPUSetup(0, __KernBootPrivate->BOOTGDT, __KernBootPrivate->SystemStack);
338 D(bug("[Kernel] %s: preparing interrupt vectors\n", __func__));
339 /* Set-up the IDT */
340 __KernBootPrivate->BOOTIDT = core_AllocBootIDT(__KernBootPrivate);
341 D(bug("[Kernel] %s: IDT : 0x%p\n", __func__, __KernBootPrivate->BOOTIDT);)
342 core_SetupIDT(0, (apicidt_t *)__KernBootPrivate->BOOTIDT);
344 /* Set-up MMU */
345 // Re-read mmap pointer, since we have modified it previously...
346 mmap = (struct mb_mmap *)LibGetTagData(KRN_MMAPAddress, 0, BootMsg);
347 memtop = mmap_LargestAddress(mmap, mmap_len);
348 D(bug("[Kernel] %s: memtop @ 0x%p\n", __func__, memtop));
349 core_SetupMMU(&__KernBootPrivate->MMU, memtop);
352 * Here we ended all boot-time allocations.
353 * We won't do them again, for example on warm reboot. All our areas are stored in struct KernBootPrivate.
354 * We are going to make this area read-only and reset-proof.
356 if (!kick_highest)
358 D(bug("[Kernel] Boot-time setup complete\n"));
359 kick_highest = AROS_ROUNDUP2((IPTR)BootMemPtr, PAGE_SIZE);
362 D(bug("[Kernel] End of kickstart area 0x%p\n", kick_highest));
365 * Obtain the needed data from the boot taglist.
366 * We need to do this even on first boot, because the taglist and its data
367 * have been moved to the permanent storage.
369 msg = BootMsg;
370 while ((tag = LibNextTagItem(&msg)))
372 switch (tag->ti_Tag)
374 case KRN_KernelBase:
376 * KRN_KernelBase is actually a border between read-only
377 * (code) and read-write (data) sections of the kickstart.
378 * read-write section goes to lower addresses from this one,
379 * so we align it upwards in order not to make part of RW data
380 * read-only.
382 addr = AROS_ROUNDUP2(tag->ti_Data, PAGE_SIZE);
383 break;
385 case KRN_KernelLowest:
386 klo = AROS_ROUNDDOWN2(tag->ti_Data, PAGE_SIZE);
387 break;
389 case KRN_MMAPAddress:
390 mmap = (struct mb_mmap *)tag->ti_Data;
391 break;
393 case KRN_MMAPLength:
394 mmap_len = tag->ti_Data;
395 break;
399 /* Sanity check */
400 if ((!klo) || (!addr))
402 krnPanic(NULL, "Incomplete information from the bootstrap\n"
403 "\n"
404 "Kickstart lowest 0x%p, base 0x%p\n", klo, addr);
408 * Explore memory map and create MemHeaders.
409 * We reserve one page (PAGE_SIZE) at zero address. We will protect it.
411 NEWLIST(&memList);
412 mmap_InitMemory(mmap, mmap_len, &memList, klo, kick_highest, PAGE_SIZE, PC_Memory, allocator);
414 D(bug("[Kernel] kernel_cstart: Booting exec.library...\n"));
417 * mmap_InitMemory() adds MemHeaders to the list in the order they were created.
418 * I. e. highest addresses are added last.
419 * Take highest region in order to create SysBase in it.
421 mh = (struct MemHeader *)REMTAIL(&memList);
422 D(bug("[Kernel] Initial MemHeader: 0x%p - 0x%p (%s)\n", mh->mh_Lower, mh->mh_Upper, mh->mh_Node.ln_Name));
424 if (SysBase)
426 D(bug("[Kernel] Got old SysBase 0x%p...\n", SysBase));
428 * Validate existing SysBase pointer.
429 * Here we check that if refers to a valid existing memory region.
430 * Checksums etc are checked in arch-independent code in exec.library.
431 * It's enough to use only size of public part. Anyway, SysBase will be
432 * reallocated by PrepareExecBase(), it will just keep over some data from
433 * public part (KickMemPtr, KickTagPtr and capture vectors).
435 if (!mmap_ValidateRegion((unsigned long)SysBase, sizeof(struct ExecBase), mmap, mmap_len))
437 D(bug("[Kernel] ... invalidated\n"));
438 SysBase = NULL;
442 /* This handles failures itself */
443 ranges[0] = (UWORD *)klo;
444 ranges[1] = (UWORD *)kick_highest;
445 krnPrepareExecBase(ranges, mh, BootMsg);
447 krnCreateROMHeader("Kickstart ROM", (APTR)klo, (APTR)kick_highest);
449 #if defined(__AROSEXEC_SMP__)
450 D(bug("[Kernel] Allocating CPU #0 Scheduling Data\n"));
451 scheduleData = AllocMem(sizeof(struct X86SchedulerPrivate), MEMF_PUBLIC|MEMF_CLEAR);
452 if (!scheduleData)
453 krnPanic(KernelBase, "Failed to Allocate Boot Processor Scheduling Data!");
455 core_InitScheduleData(scheduleData);
457 TLS_SET(ScheduleData, scheduleData);
458 D(bug("[Kernel] Scheduling Data @ 0x%p\n", TLS_GET(ScheduleData)));
459 #endif
461 * Now we have working exec.library memory allocator.
462 * Move console mirror buffer away from unused memory.
463 * WARNING!!! Do not report anything in the debug log before this is done. Remember that sequental
464 * AllocMem()s return sequental blocks! And right beyond our allocated area there will be MemChunk.
465 * Between krnPrepareExecBase() and this AllocMem() upon warm reboot console mirror buffer is set
466 * to an old value right above ExecBase. During krnPrepareExecBase() a MemChunk is built there,
467 * which can be overwritten by bootconsole, especially if the output scrolls.
469 if (scr_Type == SCR_GFX)
471 char *mirror = AllocMem(scr_Width * scr_Height, MEMF_PUBLIC);
473 fb_SetMirror(mirror);
476 D(bug("[Kernel] Created SysBase at 0x%p (pointer at 0x%p), MemHeader 0x%p\n", SysBase, &SysBase, mh));
478 /* Block all user's access to zero page */
479 core_ProtKernelArea(0, PAGE_SIZE, 1, 0, 0);
481 /* Store important private data */
482 TLS_SET(SysBase, SysBase);
484 /* Provide information about our supevisor stack. Useful at least for diagnostics. */
485 SysBase->SysStkLower = (APTR)__KernBootPrivate->SystemStack;
486 SysBase->SysStkUpper = (APTR)__KernBootPrivate->SystemStack + STACK_SIZE * 3;
489 * Make kickstart code area read-only.
490 * We do it only after ExecBase creation because SysBase pointer is put
491 * into .rodata. This way we prevent it from ocassional modification by buggy software.
493 core_ProtKernelArea(addr, kick_highest - addr, 1, 0, 1);
495 /* Transfer the rest of memory list into SysBase */
496 D(bug("[Kernel] Transferring memory list into SysBase...\n"));
497 for (mh = (struct MemHeader *)memList.mlh_Head; mh->mh_Node.ln_Succ; mh = mh2)
499 mh2 = (struct MemHeader *)mh->mh_Node.ln_Succ;
501 D(bug("[Kernel] * 0x%p - 0x%p (%s pri %d)\n", mh->mh_Lower, mh->mh_Upper, mh->mh_Node.ln_Name, mh->mh_Node.ln_Pri));
502 Enqueue(&SysBase->MemList, &mh->mh_Node);
506 * RTF_SINGLETASK residents are called with supervisor privilege level.
507 * Original AmigaOS(tm) does the same, some Amiga hardware expansion ROM
508 * rely on it. Here we continue the tradition, because it's useful for
509 * acpica.library (which needs to look for RSDP in the first 1M)
511 InitCode(RTF_SINGLETASK, 0);
514 * After InitCode(RTF_SINGLETASK) we may have acpica.library
515 * To perform some basic initialization.
517 PlatformPostInit();
519 /* Drop privileges down to user mode before calling RTF_COLDSTART */
520 D(bug("[Kernel] Leaving supervisor mode\n"));
521 krnLeaveSupervisorRing(FLAGS_INTENABLED);
524 * We are fully done. Run exec.library and the rest.
525 * exec.library will be the first resident to run. It will enable interrupts and multitasking for us.
527 InitCode(RTF_COLDSTART, 0);
529 /* The above must not return */
530 krnPanic(KernelBase, "System Boot Failed!");
533 /* Small delay routine used by exec_cinit initializer */
534 asm("\ndelay:\t.short 0x00eb\n\tretq");
536 /* Our boot-time stack */
537 static char boot_stack[STACK_SIZE] __attribute__((aligned(16)));
539 void core_SetupGDT
541 struct KernBootPrivate *__KernBootPrivate,
542 apicid_t cpuNo,
543 APTR gdtBase,
544 APTR gdtTLS,
545 APTR gdtTSS
548 struct gdt_64bit *gdtPtr = (struct gdt_64bit *)gdtBase;
549 struct tss_64bit *tssPtr = (struct tss_64bit *)gdtTSS;
550 int i;
552 D(bug("[Kernel] %s(%03u, 0x%p, 0x%p, 0x%p)\n", __func__, cpuNo, gdtBase, gdtTLS, gdtTSS));
554 // TODO: ASSERT GDT is aligned
556 /* Supervisor segments */
557 gdtPtr->super_cs.type = 0x1a; /* code segment */
558 gdtPtr->super_cs.dpl = 0; /* supervisor level */
559 gdtPtr->super_cs.p = 1; /* present */
560 gdtPtr->super_cs.l = 1; /* long (64-bit) one */
561 gdtPtr->super_cs.d = 0; /* must be zero */
562 gdtPtr->super_cs.limit_low = 0xffff;
563 gdtPtr->super_cs.limit_high = 0xf;
564 gdtPtr->super_cs.g = 1;
566 gdtPtr->super_ds.type = 0x12; /* data segment */
567 gdtPtr->super_ds.dpl = 0; /* supervisor level */
568 gdtPtr->super_ds.p = 1; /* present */
569 gdtPtr->super_ds.l = 1; /* long (64-bit) one */
570 gdtPtr->super_ds.d = 1; /* */
571 gdtPtr->super_ds.limit_low = 0xffff;
572 gdtPtr->super_ds.limit_high = 0xf;
573 gdtPtr->super_ds.g = 1;
575 /* User mode segments */
576 gdtPtr->user_cs.type = 0x1a; /* code segment */
577 gdtPtr->user_cs.dpl = 3; /* User level */
578 gdtPtr->user_cs.p = 1; /* present */
579 gdtPtr->user_cs.l = 1; /* long mode */
580 gdtPtr->user_cs.d = 0; /* must be zero */
581 gdtPtr->user_cs.limit_low = 0xffff;
582 gdtPtr->user_cs.limit_high = 0xf;
583 gdtPtr->user_cs.g = 1;
585 gdtPtr->user_cs32.type = 0x1a; /* code segment for legacy 32-bit code. NOT USED YET! */
586 gdtPtr->user_cs32.dpl = 3; /* user level */
587 gdtPtr->user_cs32.p = 1; /* present */
588 gdtPtr->user_cs32.l = 0; /* 32-bit mode */
589 gdtPtr->user_cs32.d = 1; /* 32-bit code */
590 gdtPtr->user_cs32.limit_low = 0xffff;
591 gdtPtr->user_cs32.limit_high = 0xf;
592 gdtPtr->user_cs32.g = 1;
594 gdtPtr->user_ds.type = 0x12; /* data segment */
595 gdtPtr->user_ds.dpl = 3; /* user level */
596 gdtPtr->user_ds.p = 1; /* present */
597 gdtPtr->user_ds.l = 1; /* long mode */
598 gdtPtr->user_ds.d = 1;
599 gdtPtr->user_ds.limit_low = 0xffff;
600 gdtPtr->user_ds.limit_high = 0xf;
601 gdtPtr->user_ds.g = 1;
603 for (i=0; i < 16; i++)
605 const unsigned long tss_limit = sizeof(struct tss_64bit) * 16 - 1;
607 /* Task State Segment */
608 gdtPtr->tss[i].tss_low.type = 0x09; /* 64-bit TSS */
609 gdtPtr->tss[i].tss_low.dpl = 3; /* User mode task */
610 gdtPtr->tss[i].tss_low.p = 1; /* present */
611 gdtPtr->tss[i].tss_low.l = 1; /* long mode */
612 gdtPtr->tss[i].tss_low.d = 1;
613 gdtPtr->tss[i].tss_low.limit_low = tss_limit;
614 gdtPtr->tss[i].tss_low.base_low = ((unsigned long)&tssPtr[i]) & 0xffff;
615 gdtPtr->tss[i].tss_low.base_mid = (((unsigned long)&tssPtr[i]) >> 16) & 0xff;
616 gdtPtr->tss[i].tss_low.limit_high = (tss_limit >> 16) & 0x0f;
617 gdtPtr->tss[i].tss_low.base_high = (((unsigned long)&tssPtr[i]) >> 24) & 0xff;
618 gdtPtr->tss[i].tss_high.base_ext = 0; /* is within 4GB :-D */
621 gdtPtr->gs.type = 0x12; /* data segment */
622 gdtPtr->gs.dpl = 3; /* user level */
623 gdtPtr->gs.p = 1; /* present */
624 gdtPtr->gs.l = 1; /* long mode */
625 gdtPtr->gs.d = 1;
626 gdtPtr->gs.base_low = (intptr_t)gdtTLS & 0xffff;
627 gdtPtr->gs.base_mid = ((intptr_t)gdtTLS >> 16) & 0xff;
628 gdtPtr->gs.base_high = ((intptr_t)gdtTLS >> 24) & 0xff;
629 gdtPtr->gs.g = 1;
632 void core_CPUSetup(apicid_t cpuNo, APTR cpuGDT, IPTR SystemStack)
634 struct segment_selector cpuGDTsel;
635 struct tss_64bit *tssBase = __KernBootPrivate->TSS;
637 D(bug("[Kernel] %s(%03u, 0x%p, 0x%p)\n", __func__, cpuNo, cpuGDT, SystemStack));
640 * At the moment two of three stacks are reserved. IST is not used (indexes == 0 in interrupt gates)
641 * and ring 1 is not used either. However, the space pointed to by IST is used as a temporary stack
642 * for warm restart routine.
644 tssBase[cpuNo].ist1 = SystemStack + STACK_SIZE - 16; /* Interrupt stack entry 1 (failsafe) */
645 tssBase[cpuNo].rsp0 = SystemStack + STACK_SIZE * 2 - 16; /* Ring 0 (Supervisor) */
646 tssBase[cpuNo].rsp1 = SystemStack + STACK_SIZE * 3 - 16; /* Ring 1 (reserved) */
648 D(bug("[Kernel] %s[%03u]: Reloading -:\n", __func__, cpuNo));
649 D(bug("[Kernel] %s[%03u]: CPU GDT @ 0x%p\n", __func__, cpuNo, cpuGDT));
650 D(bug("[Kernel] %s[%03u]: CPU TSS @ 0x%p\n", __func__, cpuNo, &tssBase[cpuNo]));
652 cpuGDTsel.size = sizeof(struct gdt_64bit) - 1;
653 cpuGDTsel.base = (unsigned long)cpuGDT;
654 asm volatile ("lgdt %0"::"m"(cpuGDTsel));
655 asm volatile ("ltr %w0"::"r"(TASK_SEG + (cpuNo << 4)));
656 asm volatile ("mov %0,%%gs"::"a"(USER_GS));
659 /* Boot-Time Allocation routines ... */
661 static APTR core_AllocBootTLS(struct KernBootPrivate *__KernBootPrivate)
663 tls_t *tlsPtr;
665 tlsPtr = (tls_t *)krnAllocBootMem(sizeof(tls_t));
667 return (APTR)tlsPtr;
670 static APTR core_AllocBootTSS(struct KernBootPrivate *__KernBootPrivate)
672 struct tss_64bit *tssPtr;
674 tssPtr = krnAllocBootMemAligned(sizeof(struct tss_64bit) * 16, 128);
676 return (APTR)tssPtr;
679 static APTR core_AllocBootIDT(struct KernBootPrivate *__KernBootPrivate)
681 if (!__KernBootPrivate->BOOTIDT)
682 __KernBootPrivate->BOOTIDT = krnAllocBootMemAligned(sizeof(struct int_gate_64bit) * 256, 256);
684 return (APTR)__KernBootPrivate->BOOTIDT;
687 static APTR core_AllocBootGDT(struct KernBootPrivate *__KernBootPrivate)
689 struct gdt_64bit *gdtPtr;
691 gdtPtr = (struct gdt_64bit *)krnAllocBootMemAligned(sizeof(struct gdt_64bit), 128);
693 return (APTR)gdtPtr;