2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
13 #include <linux/linkage.h>
14 #include <linux/threads.h>
16 #include <asm/segment.h>
19 #include <asm/cache.h>
21 /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
22 * because we need identity-mapped pages on setup so define __START_KERNEL to
23 * 0x100000 for this stage
29 /* %bx: 1 if coming from smp trampoline on secondary cpu */
33 * At this point the CPU runs in 32bit protected mode (CS.D = 1) with
34 * paging disabled and the point of this file is to switch to 64bit
35 * long mode with a kernel mapping for kerneland to jump into the
36 * kernel virtual addresses.
37 * There is no stack until we set one up.
40 movl %ebx,%ebp /* Save trampoline flag */
42 movl $__KERNEL_DS,%eax
45 /* If the CPU doesn't support CPUID this will double fault.
46 * Unfortunately it is hard to check for CPUID without a stack.
49 /* Check if extended functions are implemented */
50 movl $0x80000000, %eax
52 cmpl $0x80000000, %eax
54 /* Check if long mode is implemented */
63 * Prepare for entering 64bits mode
66 /* Enable PAE mode and PGE */
72 /* Setup early boot stage 4 level pagetables */
73 movl $(init_level4_pgt - __START_KERNEL_map), %eax
76 /* Setup EFER (Extended Feature Enable Register) */
80 /* Enable Long Mode */
82 /* Enable System Call */
85 /* No Execute supported? */
91 /* Make changes effective */
95 btsl $31, %eax /* Enable paging and in turn activate Long Mode */
96 btsl $0, %eax /* Enable protected mode */
97 btsl $1, %eax /* Enable MP */
98 btsl $4, %eax /* Enable ET */
99 btsl $5, %eax /* Enable NE */
100 btsl $16, %eax /* Enable WP */
101 btsl $18, %eax /* Enable AM */
102 /* Make changes effective */
104 jmp reach_compatibility_mode
105 reach_compatibility_mode:
108 * At this point we're in long mode but in 32bit compatibility mode
109 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
110 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
111 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
114 testw %bp,%bp /* secondary CPU? */
117 /* Load new GDT with the 64bit segment using 32bit descriptor */
118 movl $(pGDT32 - __START_KERNEL_map), %eax
122 movl $(ljumpvector - __START_KERNEL_map), %eax
123 /* Finally jump in 64bit mode */
129 movq init_rsp(%rip),%rsp
131 /* zero EFLAGS after setting rsp */
136 * We must switch to a new descriptor in kernel space for the GDT
137 * because soon the kernel won't have access anymore to the userspace
138 * addresses where we're currently running on. We have to do that here
139 * because in 32bit we couldn't load a 64bit linear address.
144 * Setup up a dummy PDA. this is just for some early bootup code
145 * that does in_interrupt()
147 movl $MSR_GS_BASE,%ecx
148 movq $empty_zero_page,%rax
153 /* set up data segments. actually 0 would do too */
154 movl $__KERNEL_DS,%eax
159 /* esi is pointer to real mode structure with interesting info.
163 /* Finally jump to run C code and to be on real kernel address
164 * Since we are running on identity-mapped space we have to jump
165 * to the full 64bit address , this is only possible as indirect
168 movq initial_code(%rip),%rax
171 /* SMP bootup changes these two */
174 .quad x86_64_start_kernel
177 .quad init_thread_union+THREAD_SIZE-8
182 /* This isn't an x86-64 CPU so hang */
189 .word gdt32_end-gdt_table32
190 .long gdt_table32-__START_KERNEL_map
194 .long reach_long64-__START_KERNEL_map
201 * This default setting generates an ident mapping at address 0x100000
202 * and a mapping for the kernel that precisely maps virtual address
203 * 0xffffffff80000000 to physical address 0x000000. (always using
204 * 2Mbyte large pages provided by PAE mode)
207 ENTRY(init_level4_pgt)
208 .quad 0x0000000000102007 /* -> level3_ident_pgt */
210 .quad 0x000000000010a007
212 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
213 .quad 0x0000000000103007 /* -> level3_kernel_pgt */
216 /* Kernel does not "know" about 4-th level of page tables. */
217 ENTRY(level3_ident_pgt)
218 .quad 0x0000000000104007
222 ENTRY(level3_kernel_pgt)
224 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
225 .quad 0x0000000000105007 /* -> level2_kernel_pgt */
229 ENTRY(level2_ident_pgt)
230 /* 40MB for bootup. */
231 .quad 0x0000000000000283
232 .quad 0x0000000000200183
233 .quad 0x0000000000400183
234 .quad 0x0000000000600183
235 .quad 0x0000000000800183
236 .quad 0x0000000000A00183
237 .quad 0x0000000000C00183
238 .quad 0x0000000000E00183
239 .quad 0x0000000001000183
240 .quad 0x0000000001200183
241 .quad 0x0000000001400183
242 .quad 0x0000000001600183
243 .quad 0x0000000001800183
244 .quad 0x0000000001A00183
245 .quad 0x0000000001C00183
246 .quad 0x0000000001E00183
247 .quad 0x0000000002000183
248 .quad 0x0000000002200183
249 .quad 0x0000000002400183
250 .quad 0x0000000002600183
251 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
252 .globl temp_boot_pmds
257 ENTRY(level2_kernel_pgt)
258 /* 40MB kernel mapping. The kernel code cannot be bigger than that.
259 When you change this change KERNEL_TEXT_SIZE in page.h too. */
260 /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
261 .quad 0x0000000000000183
262 .quad 0x0000000000200183
263 .quad 0x0000000000400183
264 .quad 0x0000000000600183
265 .quad 0x0000000000800183
266 .quad 0x0000000000A00183
267 .quad 0x0000000000C00183
268 .quad 0x0000000000E00183
269 .quad 0x0000000001000183
270 .quad 0x0000000001200183
271 .quad 0x0000000001400183
272 .quad 0x0000000001600183
273 .quad 0x0000000001800183
274 .quad 0x0000000001A00183
275 .quad 0x0000000001C00183
276 .quad 0x0000000001E00183
277 .quad 0x0000000002000183
278 .quad 0x0000000002200183
279 .quad 0x0000000002400183
280 .quad 0x0000000002600183
281 /* Module mapping starts here */
285 ENTRY(empty_zero_page)
288 ENTRY(empty_bad_page)
291 ENTRY(empty_bad_pte_table)
294 ENTRY(empty_bad_pmd_table)
297 ENTRY(level3_physmem_pgt)
298 .quad 0x0000000000105007 /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
301 #ifdef CONFIG_ACPI_SLEEP
302 ENTRY(wakeup_level4_pgt)
303 .quad 0x0000000000102007 /* -> level3_ident_pgt */
305 .quad 0x000000000010a007
307 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
308 .quad 0x0000000000103007 /* -> level3_kernel_pgt */
316 .word gdt_end-cpu_gdt_table
327 .quad 0x0000000000000000 /* This one is magic */
328 .quad 0x0000000000000000 /* unused */
329 .quad 0x00af9a000000ffff /* __KERNEL_CS */
332 /* We need valid kernel segments for data and code in long mode too
333 * IRET will check the segment types kkeil 2000/10/28
334 * Also sysret mandates a special GDT layout
337 .align L1_CACHE_BYTES
339 /* The TLS descriptors are currently at a different place compared to i386.
340 Hopefully nobody expects them at a fixed place (Wine?) */
343 .quad 0x0000000000000000 /* NULL descriptor */
344 .quad 0x008f9a000000ffff /* __KERNEL_COMPAT32_CS */
345 .quad 0x00af9a000000ffff /* __KERNEL_CS */
346 .quad 0x00cf92000000ffff /* __KERNEL_DS */
347 .quad 0x00cffe000000ffff /* __USER32_CS */
348 .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
349 .quad 0x00affa000000ffff /* __USER_CS */
350 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
353 .quad 0,0,0 /* three TLS descriptors */
354 .quad 0 /* unused now */
355 .quad 0x00009a000000ffff /* __KERNEL16_CS - 16bit PM for S3 wakeup. */
356 /* base must be patched for real base address. */
358 /* asm/segment.h:GDT_ENTRIES must match this */
359 /* This should be a multiple of the cache line size */
360 /* GDTs of other CPUs: */
361 .fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
363 .align L1_CACHE_BYTES