1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <cpu/x86/cpu_info.S.inc>
4 #include <cpu/x86/post_code.h>
5 #include <arch/ram_segs.h>
7 /* Place the stack in the bss section. It's not necessary to define it in
8 * the linker script. */
9 .section .bss, "aw", @nobits
14 /* Stack alignment is not enforced with rmodule loader, reserve one
15 * extra CPU such that alignment can be enforced on entry. */
16 .align CONFIG_STACK_SIZE
18 .space (CONFIG_MAX_CPUS+1)*CONFIG_STACK_SIZE
20 .set _stack_size, _estack - _stack
22 .section ".text._start", "ax", @progbits
36 ljmp $RAM_CODE_SEG, $1f
38 1: movl $RAM_DATA_SEG, %eax
42 xor %eax, %eax /* zero out the gs and fs segment index */
44 movl %eax, %gs /* Will be used for cpu_info */
46 mov $RAM_CODE_SEG64, %ecx
50 post_code(POST_ENTRY_C_START) /* post 13 */
56 movabs %rax, _cbmem_top_ptr
59 /* The return argument is at 0(%esp), the calling argument at 4(%esp) */
61 movl %eax, _cbmem_top_ptr
65 /** poison the stack. Code should not count on the
66 * stack being full of zeros. This stack poisoning
67 * recently uncovered a bug in the broadcast SIPI
72 shrl $2, %ecx /* it is 32 bit aligned, right? */
73 movl $0xDEADBEEF, %eax
77 /* Set new stack with enforced alignment. */
79 andl $(~(CONFIG_STACK_SIZE-1)), %esp
83 #if CONFIG(CPU_INFO_V2)
84 /* Allocate the per_cpu_segment_data on the stack */
85 push_per_cpu_segment_data
88 * Update the BSP's per_cpu_segment_descriptor to point to the
89 * per_cpu_segment_data that was allocated on the stack.
91 set_segment_descriptor_base $per_cpu_segment_descriptors, %esp
93 mov $per_cpu_segment_selector, %eax
99 * Now we are finished. Memory is up, data is copied and
100 * bss is cleared. Now we call the main routine and
101 * let it do the rest.
103 post_code(POST_PRE_HARDWAREMAIN) /* post 6e */
105 andl $0xFFFFFFF0, %esp
107 #if CONFIG(ASAN_IN_RAMSTAGE)
113 call gdb_stub_breakpoint
118 post_code(POST_DEAD_CODE) /* post ee */
124 .globl gdb_stub_breakpoint
127 pop %rax /* Return address */
130 push %rax /* Return address */
131 push $0 /* No error code */
132 push $32 /* vector 32 is user defined */
134 popl %eax /* Return address */
137 pushl %eax /* Return address */
138 pushl $0 /* No error code */
139 pushl $32 /* vector 32 is user defined */
145 .global per_cpu_segment_descriptors, per_cpu_segment_selector
148 .word gdt_end - gdt - 1
152 .long gdt /* we know the offset */
157 /* This is the gdt for GCC part of coreboot.
158 * It is different from the gdt in ASM part of coreboot
159 * which is defined in gdt_init.S
161 * When the machine is initially started, we use a very simple
162 * gdt from ROM (that in gdt_init.S) which only contains those
163 * entries we need for protected mode.
165 * When we're executing code from RAM, we want to do more complex
166 * stuff, like initializing PCI option ROMs in real mode, or doing
167 * a resume from a suspend to RAM.
170 /* selgdt 0, unused */
171 .word 0x0000, 0x0000 /* dummy */
172 .byte 0x00, 0x00, 0x00, 0x00
174 /* selgdt 8, unused */
175 .word 0x0000, 0x0000 /* dummy */
176 .byte 0x00, 0x00, 0x00, 0x00
178 /* selgdt 0x10, flat code segment */
180 .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for
184 /* selgdt 0x18, flat data segment */
187 .byte 0x00, 0x92, 0xcf, 0x00
189 .byte 0x00, 0x93, 0xcf, 0x00
192 /* selgdt 0x20, unused */
193 .word 0x0000, 0x0000 /* dummy */
194 .byte 0x00, 0x00, 0x00, 0x00
196 /* The next two entries are used for executing VGA option ROMs */
198 /* selgdt 0x28 16 bit 64k code at 0x00000000 */
202 /* selgdt 0x30 16 bit 64k data at 0x00000000 */
206 /* The next two entries are used for ACPI S3 RESUME */
208 /* selgdt 0x38, flat data segment 16 bit */
209 .word 0x0000, 0x0000 /* dummy */
210 .byte 0x00, 0x93, 0x8f, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for
214 /* selgdt 0x40, flat code segment 16 bit */
216 .byte 0x00, 0x9b, 0x8f, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for
221 /* selgdt 0x48, flat x64 code segment */
223 .byte 0x00, 0x9b, 0xaf, 0x00
225 #if CONFIG(CPU_INFO_V2)
226 per_cpu_segment_descriptors:
227 .rept CONFIG_MAX_CPUS
228 /* flat data segment */
231 .byte 0x00, 0x92, 0xcf, 0x00
233 .byte 0x00, 0x93, 0xcf, 0x00
236 #endif /* CPU_INFO_V2 */
239 #if CONFIG(CPU_INFO_V2)
240 /* Segment selector pointing to the first per_cpu_segment_descriptor. */
241 per_cpu_segment_selector:
242 .long per_cpu_segment_descriptors - gdt
243 #endif /* CPU_INFO_V2 */
245 .section ".text._start", "ax", @progbits
248 # save rsp because iret will align it to a 16 byte boundary
251 # use iret to jump to a 64-bit offset in a new code segment
252 # iret will pop cs:rip, flags, then ss:rsp
253 mov %ss, %ax # need to push ss..
254 push %rax # push ss instruction not valid in x64 mode,
258 push %rcx # cx is code segment selector from caller
259 movabs $setCodeSelectorLongJump, %rax
262 # the iret will continue at next instruction, with the new cs value
266 setCodeSelectorLongJump:
267 # restore rsp, it might not have been 16-byte aligned on entry