Wanderer/Info: fix compilation
[AROS.git] / arch / x86_64-pc / bootstrap / cpu.c
blob6c8b40bb1624bf1e46764339461be1a81d475e78
1 /*
2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <aros/kernel.h>
7 #include <asm/x86_64/cpu.h>
9 #include "bootstrap.h"
10 #include "support.h"
12 /* Segment registers */
13 #define SEG_SUPER_CS 0x08
14 #define SEG_SUPER_DS 0x10
15 #define SEG_USER_CS32 0x18
16 #define SEG_USER_CS64 0x28
17 #define SEG_USER_DS 0x20
18 #define SEG_TSS 0x30
20 /* Global descriptor table */
21 static struct
23 struct segment_desc seg0; /* seg 0x00 */
24 struct segment_desc super_cs; /* seg 0x08 */
25 struct segment_desc super_ds; /* seg 0x10 */
26 } GDT __attribute__((used,aligned(128),section(".bss.aros.tables")));
28 /* Data used to load GDTR */
29 const struct segment_selector GDT_sel =
31 sizeof(GDT)-1,
32 (unsigned long)&GDT
35 /* Far jump detination specification (address and segment selector */
36 static struct
38 void *off;
39 unsigned short seg;
40 } __attribute__((packed)) KernelTarget =
42 (void*)KERNEL_TARGET_ADDRESS,
43 SEG_SUPER_CS
47 * The MMU pages and directories. They are stored at fixed location and may be either reused in the
48 * 64-bit kernel, or replaced by it. Four PDE directories (PDE2M structures) are enough to map whole
49 * 4GB address space.
51 static struct PML4E PML4[512] __attribute__((used,aligned(4096),section(".bss.aros.tables")));
52 static struct PDPE PDP[512] __attribute__((used,aligned(4096),section(".bss.aros.tables")));
53 static struct PDE2M PDE[4][512] __attribute__((used,aligned(4096),section(".bss.aros.tables")));
56 * The 64-bit long mode may be activated only, when MMU paging is enabled. Therefore the basic
57 * MMU tables have to be prepared first. This routine uses 6 tables (2048 + 5 entries) in order
58 * to map the first 4GB of address space as user-accessible executable RW area.
60 * This mapping may be changed later by the 64-bit kernel, in order to provide separate address spaces,
61 * protect kernel from being overwritten and so on and so forth.
63 * To simplify things down we will use 2MB memory page size. In this mode the address is broken up into 4 fields:
64 * - Bits 63–48 sign extension of bit 47 as required for canonical address forms.
65 * - Bits 47–39 index into the 512-entry page-map level-4 table.
66 * - Bits 38–30 index into the 512-entry page-directory-pointer table.
67 * - Bits 29–21 index into the 512-entry page-directory table.
68 * - Bits 20–0 byte offset into the physical page.
69 * Let's remember that our topmost address is 0xFFFFF000, as specified by GDT.
71 void setup_mmu(void)
73 int i;
74 struct PDE2M *pdes[] = { &PDE[0][0], &PDE[1][0], &PDE[2][0], &PDE[3][0] };
76 D(kprintf("[BOOT] Setting up MMU, kickstart base 0x%p\n", kick_base));
77 D(kprintf("[BOOT] cr0: 0x%p cr3: 0x%p cr4: 0x%p\n", rdcr(cr0), rdcr(cr3), rdcr(cr4)));
79 D(kprintf("[BOOT] Setting up descriptor tables.\n"));
81 /* Supervisor code segment */
82 GDT.super_cs.type = 0x1a; /* code, non-conforming, readable */
83 GDT.super_cs.dpl = 0; /* supervisor level (ring 0) */
84 GDT.super_cs.p = 1; /* present */
85 GDT.super_cs.l = 1; /* long mode enabled */
86 GDT.super_cs.d = 0; /* must be zero for long mode */
87 GDT.super_cs.limit_low = 0xffff; /* Limit is actually 0xFFFFF000 */
88 GDT.super_cs.limit_high = 0xf;
89 GDT.super_cs.g = 1; /* Limit is in 4K pages */
90 GDT.super_cs.base_low = 0; /* Segment starts at zero address */
91 GDT.super_cs.base_mid = 0;
92 GDT.super_cs.base_high = 0;
94 /* Supervisor data segment. Actually ignored in long mode. */
95 GDT.super_ds.type = 0x12; /* data, expand up, writable */
96 GDT.super_ds.dpl = 0; /* supervisor level */
97 GDT.super_ds.p = 1; /* present */
98 GDT.super_ds.limit_low = 0xffff; /* Limit = 0xFFFFF000 */
99 GDT.super_ds.limit_high = 0xf;
100 GDT.super_ds.g = 1; /* 4K granularity */
101 GDT.super_ds.d = 1; /* 32-bit operands */
102 GDT.super_ds.base_low = 0; /* Start at zero address */
103 GDT.super_ds.base_mid = 0;
104 GDT.super_ds.base_high = 0;
106 D(kprintf("[BOOT] Mapping first 4G area with MMU\n"));
107 D(kprintf("[BOOT] PML4 0x%p, PDP 0x%p, PDE 0x%p\n", PML4, PDP, PDE));
110 * Page map level 4 Entry.
111 * Since we actually use only 32-bit addresses, we need only one entry
112 * number zero (bits 47-39 of our address are zeroes).
114 PML4[0].p = 1; /* present in physical RAM */
115 PML4[0].rw = 1; /* read/write access */
116 PML4[0].us = 1; /* accessible on user level */
117 PML4[0].pwt = 0; /* write-through cache mode */
118 PML4[0].pcd = 0; /* caching enabled */
119 PML4[0].a = 0; /* clear access bit (just in case) */
120 PML4[0].mbz = 0; /* reserved, must be zero */
121 PML4[0].avl = 0; /* user-defined flags, clear them */
122 PML4[0].base_low = (unsigned int)PDP >> 12; /* Base address of directory pointer table to use */
123 PML4[0].nx = 0; /* code execution allowed */
124 PML4[0].avail = 0; /* more user-defined flags */
125 PML4[0].base_high = 0;
128 * Page directory pointer entries.
129 * Our address contains usable bits 30 and 31, so there are four of them.
131 for (i=0; i < 4; i++)
133 int j;
135 D(kprintf("[BOOT] PDE[%u] 0x%p\n", i, pdes[i]));
138 * Set the PDP entry up and point to the PDE table.
139 * Field meanings are analogous to PML4, just 'base' points to page directory tables
141 PDP[i].p = 1;
142 PDP[i].rw = 1;
143 PDP[i].us = 1;
144 PDP[i].pwt = 0;
145 PDP[i].pcd = 0;
146 PDP[i].a = 0;
147 PDP[i].mbz = 0;
148 PDP[i].base_low = (unsigned int)pdes[i] >> 12;
149 PDP[i].nx = 0;
150 PDP[i].avail = 0;
151 PDP[i].base_high = 0;
153 for (j=0; j < 512; j++)
155 /* Build a complete PDE set (512 entries) for every PDP entry */
156 struct PDE2M *PDE = pdes[i];
158 PDE[j].p = 1;
159 PDE[j].rw = 1;
160 PDE[j].us = 1;
161 PDE[j].pwt = 0;
162 PDE[j].pcd = 0;
163 PDE[j].a = 0;
164 PDE[j].d = 0; /* Clear write tracking bit */
165 PDE[j].g = 0; /* Page is global */
166 PDE[j].pat = 0; /* Most significant PAT bit */
167 PDE[j].ps = 1; /* It's PDE (not PTE) and page size will be 2MB (after we enable PAE) */
168 PDE[j].base_low = ((i << 30) + (j << 21)) >> 13; /* Base address of the physical page. This is 1:1 mapping. */
169 PDE[j].avail = 0;
170 PDE[j].nx = 0;
171 PDE[j].base_high = 0;
175 tag->ti_Tag = KRN_GDT;
176 tag->ti_Data = KERNEL_OFFSET | (unsigned long)&GDT;
177 tag++;
179 tag->ti_Tag = KRN_PL4;
180 tag->ti_Data = KERNEL_OFFSET | (unsigned long)&PML4;
181 tag++;
185 * This tiny procedure sets the complete 64-bit environment up - it loads the descriptors,
186 * enables 64-bit mode, loads MMU tables and through paging it activates the 64-bit long mode.
188 * After that it is perfectly safe to jump into the pure 64-bit kernel.
190 void kick(void *kick_base, struct TagItem64 *km)
192 unsigned int v1 = 0, v2 = 0, v3 = 0, v4 = 0;
194 cpuid(0x80000000, v1, v2, v3, v4);
195 if (v1 > 0x80000000)
197 cpuid(0x80000001, v1, v2, v3, v4);
198 if (v4 & (1 << 29))
200 D(kprintf("[BOOT] x86-64 CPU ok\n"));
202 KernelTarget.off = kick_base;
204 asm volatile ("lgdt %0"::"m"(GDT_sel));
205 D(kprintf("[BOOT] GDTR loaded\n"));
207 /* Enable PAE */
208 wrcr(cr4, _CR4_PAE | _CR4_PGE);
209 D(kprintf("[BOOT] PAE is on\n"));
211 /* enable pages */
212 wrcr(cr3, &PML4);
213 D(kprintf("[BOOT] cr3 loaded\n"));
215 /* enable long mode */
216 rdmsr(EFER, &v1, &v2);
217 v1 |= _EFER_LME;
218 wrmsr(EFER, v1, v2);
219 D(kprintf("[BOOT] Long mode is on\n"));
221 /* enable paging and activate long mode */
222 wrcr(cr0, _CR0_PG | _CR0_PE);
224 kprintf("[BOOT] Leaving 32-bit environment. LJMP $%x,$%p\n\n", SEG_SUPER_CS, KernelTarget.off);
225 asm volatile("ljmp *%0"::"m"(KernelTarget),"D"(km),"S"(AROS_BOOT_MAGIC));
229 kprintf("Your processor is not x86-64 compatible\n");