2 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
12 #include <linux/linkage.h>
13 #include <linux/threads.h>
14 #include <linux/init.h>
15 #include <asm/segment.h>
16 #include <asm/pgtable.h>
19 #include <asm/cache.h>
20 #include <asm/processor-flags.h>
21 #include <asm/percpu.h>
24 #ifdef CONFIG_PARAVIRT
25 #include <asm/asm-offsets.h>
26 #include <asm/paravirt.h>
27 #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
29 #define GET_CR2_INTO(reg) movq %cr2, reg
30 #define INTERRUPT_RETURN iretq
33 /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
34 * because we need identity-mapped pages.
38 #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
40 L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
41 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
42 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
43 L3_START_KERNEL = pud_index(__START_KERNEL_map)
51 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
52 * and someone has loaded an identity mapped page table
53 * for us. These identity mapped page tables map all of the
54 * kernel pages and possibly all of memory.
56 * %rsi holds a physical pointer to real_mode_data.
58 * We come here either directly from a 64bit bootloader, or from
59 * arch/x86/boot/compressed/head_64.S.
61 * We only come here initially at boot nothing else comes here.
63 * Since we may be loaded at an address different from what we were
64 * compiled to run at we first fixup the physical addresses in our page
65 * tables and then reload them.
68 /* Sanitize CPU configuration */
72 * Compute the delta between the address I am compiled to run at and the
73 * address I am actually running at.
75 leaq _text(%rip), %rbp
76 subq $_text - __START_KERNEL_map, %rbp
78 /* Is the address not 2M aligned? */
80 andl $~PMD_PAGE_MASK, %eax
85 * Is the address too large?
87 leaq _text(%rip), %rax
88 shrq $MAX_PHYSMEM_BITS, %rax
92 * Fixup the physical addresses in the page table
94 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
96 addq %rbp, level3_kernel_pgt + (510*8)(%rip)
97 addq %rbp, level3_kernel_pgt + (511*8)(%rip)
99 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
102 * Set up the identity mapping for the switchover. These
103 * entries should *NOT* have the global bit set! This also
104 * creates a bunch of nonsense entries but that is fine --
105 * it avoids problems around wraparound.
107 leaq _text(%rip), %rdi
108 leaq early_level4_pgt(%rip), %rbx
111 shrq $PGDIR_SHIFT, %rax
113 leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx
114 movq %rdx, 0(%rbx,%rax,8)
115 movq %rdx, 8(%rbx,%rax,8)
119 shrq $PUD_SHIFT, %rax
120 andl $(PTRS_PER_PUD-1), %eax
121 movq %rdx, 4096(%rbx,%rax,8)
123 andl $(PTRS_PER_PUD-1), %eax
124 movq %rdx, 4096(%rbx,%rax,8)
128 shrq $PMD_SHIFT, %rdi
129 addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
130 leaq (_end - 1)(%rip), %rcx
131 shrq $PMD_SHIFT, %rcx
136 andq $(PTRS_PER_PMD - 1), %rdi
137 movq %rax, (%rbx,%rdi,8)
144 * Fixup the kernel text+data virtual addresses. Note that
145 * we might write invalid pmds, when the kernel is relocated
146 * cleanup_highmap() fixes this up along with the mappings
149 leaq level2_kernel_pgt(%rip), %rdi
151 /* See if it is a valid page table entry */
155 /* Go to the next page */
160 /* Fixup phys_base */
161 addq %rbp, phys_base(%rip)
163 movq $(early_level4_pgt - __START_KERNEL_map), %rax
165 ENTRY(secondary_startup_64)
167 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
168 * and someone has loaded a mapped page table.
170 * %rsi holds a physical pointer to real_mode_data.
172 * We come here either from startup_64 (using physical addresses)
173 * or from trampoline.S (using virtual addresses).
175 * Using virtual addresses from trampoline.S removes the need
176 * to have any identity mapped pages in the kernel page table
177 * after the boot processor executes this code.
180 /* Sanitize CPU configuration */
183 movq $(init_level4_pgt - __START_KERNEL_map), %rax
186 /* Enable PAE mode and PGE */
187 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
190 /* Setup early boot stage 4 level pagetables. */
191 addq phys_base(%rip), %rax
194 /* Ensure I am executing from virtual addresses */
199 /* Check if nx is implemented */
200 movl $0x80000001, %eax
204 /* Setup EFER (Extended Feature Enable Register) */
207 btsl $_EFER_SCE, %eax /* Enable System Call */
208 btl $20,%edi /* No Execute supported? */
211 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
212 1: wrmsr /* Make changes effective */
215 #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
216 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
218 movl $CR0_STATE, %eax
219 /* Make changes effective */
222 /* Setup a boot time stack */
223 movq stack_start(%rip), %rsp
225 /* zero EFLAGS after setting rsp */
230 * We must switch to a new descriptor in kernel space for the GDT
231 * because soon the kernel won't have access anymore to the userspace
232 * addresses where we're currently running on. We have to do that here
233 * because in 32bit we couldn't load a 64bit linear address.
235 lgdt early_gdt_descr(%rip)
237 /* set up data segments */
244 * We don't really need to load %fs or %gs, but load them anyway
245 * to kill any stale realmode selectors. This allows execution
253 * The base of %gs always points to the bottom of the irqstack
254 * union. If the stack protector canary is enabled, it is
255 * located at %gs:40. Note that, on SMP, the boot cpu uses
256 * init data section till per cpu areas are set up.
258 movl $MSR_GS_BASE,%ecx
259 movl initial_gs(%rip),%eax
260 movl initial_gs+4(%rip),%edx
263 /* rsi is pointer to real mode structure with interesting info.
267 /* Finally jump to run C code and to be on real kernel address
268 * Since we are running on identity-mapped space we have to jump
269 * to the full 64bit address, this is only possible as indirect
270 * jump. In addition we need to ensure %cs is set so we make this
273 * Note: do not change to far jump indirect with 64bit offset.
275 * AMD does not support far jump indirect with 64bit offset.
276 * AMD64 Architecture Programmer's Manual, Volume 3: states only
277 * JMP FAR mem16:16 FF /5 Far jump indirect,
278 * with the target specified by a far pointer in memory.
279 * JMP FAR mem16:32 FF /5 Far jump indirect,
280 * with the target specified by a far pointer in memory.
282 * Intel64 does support 64bit offset.
283 * Software Developer Manual Vol 2: states:
284 * FF /5 JMP m16:16 Jump far, absolute indirect,
285 * address given in m16:16
286 * FF /5 JMP m16:32 Jump far, absolute indirect,
287 * address given in m16:32.
288 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
289 * address given in m16:64.
291 movq initial_code(%rip),%rax
292 pushq $0 # fake return address to stop unwinder
293 pushq $__KERNEL_CS # set correct cs
294 pushq %rax # target address in negative space
297 #include "verify_cpu.S"
299 #ifdef CONFIG_HOTPLUG_CPU
301 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
302 * up already except stack. We just set up stack here. Then call
306 movq stack_start(%rip),%rsp
307 movq initial_code(%rip),%rax
308 pushq $0 # fake return address to stop unwinder
309 pushq $__KERNEL_CS # set correct cs
310 pushq %rax # target address in negative space
315 /* SMP bootup changes these two */
319 .quad x86_64_start_kernel
321 .quad INIT_PER_CPU_VAR(irq_stack_union)
324 .quad init_thread_union+THREAD_SIZE-8
332 ENTRY(early_idt_handler_array)
336 # 80(%rsp) error code
338 .rept NUM_EXCEPTION_VECTORS
339 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
340 pushq $0 # Dummy error code, to make stack frame uniform
342 pushq $i # 72(%rsp) Vector number
343 jmp early_idt_handler_common
345 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
347 ENDPROC(early_idt_handler_array)
349 early_idt_handler_common:
351 * The stack is the hardware frame, an error code or zero, and the
356 cmpl $2,(%rsp) # X86_TRAP_NMI
357 je .Lis_nmi # Ignore NMI
359 cmpl $2,early_recursion_flag(%rip)
361 incl early_recursion_flag(%rip)
363 pushq %rax # 64(%rsp)
364 pushq %rcx # 56(%rsp)
365 pushq %rdx # 48(%rsp)
366 pushq %rsi # 40(%rsp)
367 pushq %rdi # 32(%rsp)
373 cmpl $__KERNEL_CS,96(%rsp)
376 cmpl $14,72(%rsp) # Page fault?
378 GET_CR2_INTO(%rdi) # can clobber any volatile register if pv
379 call early_make_pgtable
384 leaq 88(%rsp),%rdi # Pointer to %rip
385 call early_fixup_exception
387 jnz 20f # Found an exception entry
390 #ifdef CONFIG_EARLY_PRINTK
391 GET_CR2_INTO(%r9) # can clobber any volatile register if pv
392 movl 80(%rsp),%r8d # error code
393 movl 72(%rsp),%esi # vector number
394 movl 96(%rsp),%edx # %cs
395 movq 88(%rsp),%rcx # %rip
397 leaq early_idt_msg(%rip),%rdi
399 cmpl $2,early_recursion_flag(%rip)
402 #ifdef CONFIG_KALLSYMS
403 leaq early_idt_ripmsg(%rip),%rdi
404 movq 40(%rsp),%rsi # %rip again
407 #endif /* EARLY_PRINTK */
411 20: # Exception table entry found or page table generated
421 decl early_recursion_flag(%rip)
423 addq $16,%rsp # drop vector number and error code
425 ENDPROC(early_idt_handler_common)
430 early_recursion_flag:
433 #ifdef CONFIG_EARLY_PRINTK
435 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
438 #endif /* CONFIG_EARLY_PRINTK */
440 #define NEXT_PAGE(name) \
444 /* Automate the creation of 1 to 1 mapping pmd entries */
445 #define PMDS(START, PERM, COUNT) \
448 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
453 NEXT_PAGE(early_level4_pgt)
455 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
457 NEXT_PAGE(early_dynamic_pgts)
458 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
463 NEXT_PAGE(init_level4_pgt)
466 NEXT_PAGE(init_level4_pgt)
467 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
468 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
469 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
470 .org init_level4_pgt + L4_START_KERNEL*8, 0
471 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
472 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
474 NEXT_PAGE(level3_ident_pgt)
475 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
477 NEXT_PAGE(level2_ident_pgt)
478 /* Since I easily can, map the first 1G.
479 * Don't set NX because code runs from these pages.
481 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
484 NEXT_PAGE(level3_kernel_pgt)
485 .fill L3_START_KERNEL,8,0
486 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
487 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
488 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
490 NEXT_PAGE(level2_kernel_pgt)
492 * 512 MB kernel mapping. We spend a full page on this pagetable
495 * The kernel code+data+bss must not be bigger than that.
497 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
498 * If you want to increase this then increase MODULES_VADDR
501 PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
502 KERNEL_IMAGE_SIZE/PMD_SIZE)
504 NEXT_PAGE(level2_fixmap_pgt)
506 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
507 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
510 NEXT_PAGE(level1_fixmap_pgt)
517 .globl early_gdt_descr
519 .word GDT_ENTRIES*8-1
520 early_gdt_descr_base:
521 .quad INIT_PER_CPU_VAR(gdt_page)
524 /* This must match the first entry in level2_kernel_pgt */
525 .quad 0x0000000000000000
527 #include "../../x86/xen/xen-head.S"
530 NEXT_PAGE(empty_zero_page)