2 Copyright © 1995-2017, The AROS Development Team. All rights reserved.
6 #define __KERNEL_NOLIBBASE__
8 #include <aros/types/spinlock_s.h>
9 #include <aros/atomic.h>
11 #include <exec/execbase.h>
12 #include <exec/memory.h>
13 #include <proto/exec.h>
14 #include <proto/kernel.h>
16 #include "kernel_base.h"
17 #include "kernel_debug.h"
18 #include "kernel_globals.h"
19 #include "kernel_intern.h"
20 #include "kernel_syscall.h"
21 #include "kernel_ipi.h"
27 extern const void *_binary_smpbootstrap_start
;
28 extern const void *_binary_smpbootstrap_size
;
30 extern APTR
PlatformAllocGDT(struct KernelBase
*, apicid_t
);
31 extern APTR
PlatformAllocTLS(struct KernelBase
*, apicid_t
);
32 extern APTR
PlatformAllocIDT(struct KernelBase
*, apicid_t
);
34 #if defined(__AROSEXEC_SMP__)
35 extern void cpu_PrepareExec(struct ExecBase
*);
36 extern struct Task
*cpu_InitBootStrap(apicid_t
);
37 extern void cpu_BootStrap(struct Task
*);
40 static void smp_Entry(IPTR stackBase
, spinlock_t
*apicReadyLock
, struct KernelBase
*KernelBase
, apicid_t apicCPUNo
)
43 * This is the entry point for secondary cores.
44 * KernelBase is already set up by the primary CPU, so we can use it.
46 struct APICData
*apicData
= KernelBase
->kb_PlatformData
->kb_APIC
;
47 __unused
struct CPUData
*apicCPU
;
52 #if defined(__AROSEXEC_SMP__)
53 struct Task
*apicBSTask
;
57 /* Enable fxsave/fxrstor */
58 wrcr(cr4
, rdcr(cr4
) | _CR4_OSFXSR
| _CR4_OSXMMEXCPT
);
61 apicCPU
= &apicData
->cores
[apicCPUNo
];
64 /* Find out ourselves */
65 _APICBase
= core_APIC_GetBase();
66 _APICID
= core_APIC_GetID(_APICBase
);
68 bug("[Kernel:SMP] %s[%03u]: APIC ID %03u starting up...\n", __func__
, apicCPUNo
, _APICID
);
69 if (apicCPU
->cpu_LocalID
!= _APICID
)
71 bug("[Kernel:SMP] %s[%03u]: Warning! expected ID %03u\n", __func__
, apicCPUNo
, apicCPU
->cpu_LocalID
);
73 bug("[Kernel:SMP] %s[%03u]: APIC base @ 0x%p\n", __func__
, apicCPUNo
, _APICBase
);
75 bug("[Kernel:SMP] %s[%03u]: KernelBootPrivate 0x%p\n", __func__
, apicCPUNo
, __KernBootPrivate
);
77 bug("[Kernel:SMP] %s[%03u]: StackBase 0x%p\n", __func__
, apicCPUNo
, stackBase
);
78 bug("[Kernel:SMP] %s[%03u]: Ready Lock 0x%p\n", __func__
, apicCPUNo
, apicReadyLock
);
82 apicCPUNo
= core_APIC_GetNumber(apicData
);
85 D(bug("[Kernel:SMP] %s[%03u]: APIC CPU Data @ 0x%p\n", __func__
, apicCPUNo
, apicCPU
));
87 /* Set up GDT and LDT for our core */
88 D(bug("[Kernel:SMP] %s[%03u]: GDT @ 0x%p, TLS @ 0x%p\n", __func__
, apicCPUNo
, apicCPU
->cpu_GDT
, apicCPU
->cpu_TLS
));
90 core_SetupGDT(__KernBootPrivate
, apicCPUNo
, apicCPU
->cpu_GDT
, apicCPU
->cpu_TLS
, __KernBootPrivate
->TSS
);
92 core_CPUSetup(apicCPUNo
, apicCPU
->cpu_GDT
, stackBase
);
95 D(bug("[Kernel:SMP] %s[%03u]: Core IDT @ 0x%p\n", __func__
, apicCPUNo
, apicCPU
->cpu_IDT
));
97 core_SetupIDT(apicCPUNo
, apicCPU
->cpu_IDT
);
99 if (!core_SetIDTGate((struct int_gate_64bit
*)apicCPU
->cpu_IDT
, APIC_CPU_EXCEPT_TO_VECTOR(APIC_EXCEPT_SYSCALL
), (uintptr_t)IntrDefaultGates
[APIC_CPU_EXCEPT_TO_VECTOR(APIC_EXCEPT_SYSCALL
)], TRUE
))
101 krnPanic(NULL
, "Failed to set APIC Syscall Vector\n"
103 APIC_CPU_EXCEPT_TO_VECTOR(APIC_EXCEPT_SYSCALL
));
105 D(bug("[Kernel:SMP] %s[%03u]: APIC Syscall Vector configured\n", __func__
, apicCPUNo
));
107 D(bug("[Kernel:SMP] %s[%03u]: Preparing MMU...\n", __func__
, apicCPUNo
));
108 core_LoadMMU(&__KernBootPrivate
->MMU
);
111 #if defined(__AROSEXEC_SMP__)
112 D(bug("[Kernel:SMP] %s[%03u]: SysBase @ 0x%p\n", __func__
, apicCPUNo
, SysBase
));
114 TLS_SET(SysBase
,SysBase
);
115 TLS_SET(KernelBase
,KernelBase
);
117 if ((apicBSTask
= cpu_InitBootStrap(apicCPUNo
)) != NULL
)
119 apicBSTask
->tc_SPLower
= NULL
;
120 apicBSTask
->tc_SPUpper
= (APTR
)~0;
122 cpu_BootStrap(apicBSTask
);
125 bug("[Kernel:SMP] APIC #%u of %u Going IDLE (Halting)...\n", apicCPUNo
+ 1, apicData
->apic_count
);
128 /* Signal the bootstrap core that we are running */
129 KrnSpinUnLock((spinlock_t
*)apicReadyLock
);
131 #if defined(__AROSEXEC_SMP__)
134 D(bug("[Kernel:SMP] %s[%03u]: Starting up Scheduler...\n", __func__
, apicCPUNo
);)
136 /* clean up now we are done */
140 bug("[Kernel:SMP] APIC #%u Failed to bootstrap (Halting)...\n", apicCPUNo
+ 1);
141 while (1) asm volatile("cli; hlt");
143 while (1) asm volatile("hlt");
147 static int smp_Setup(struct KernelBase
*KernelBase
)
149 struct PlatformData
*pdata
= KernelBase
->kb_PlatformData
;
150 unsigned long bslen
= (unsigned long)&_binary_smpbootstrap_size
;
151 struct MemHeader
*lowmem
;
153 struct SMPBootstrap
*bs
;
155 D(bug("[Kernel:SMP] %s()\n", __func__
));
157 /* Find a suitable memheader to allocate the bootstrap from .. */
158 ForeachNode(&SysBase
->MemList
, lowmem
)
160 /* Is it in lowmem? */
161 if ((IPTR
)lowmem
->mh_Lower
< 0x000100000)
163 D(bug("[Kernel:SMP] Trying memheader @ 0x%p\n", lowmem
));
164 D(bug("[Kernel:SMP] * 0x%p - 0x%p (%s pri %d)\n", lowmem
->mh_Lower
, lowmem
->mh_Upper
, lowmem
->mh_Node
.ln_Name
, lowmem
->mh_Node
.ln_Pri
));
167 * Attempt to allocate space for the SMP bootstrap code.
168 * NB: Its address must be page-aligned!.
169 * NB2: Every CPU starts up in real mode
171 smpboot
= Allocate(lowmem
, bslen
+ PAGE_SIZE
- 1);
179 bug("[Kernel:SMP] Failed to allocate %dbytes for SMP bootstrap\n", bslen
+ PAGE_SIZE
- 1);
183 /* Install SMP bootstrap code */
184 bs
= (APTR
)AROS_ROUNDUP2((IPTR
)smpboot
, PAGE_SIZE
);
185 CopyMem(&_binary_smpbootstrap_start
, bs
, (unsigned long)&_binary_smpbootstrap_size
);
186 pdata
->kb_APIC_TrampolineBase
= bs
;
188 D(bug("[Kernel:SMP] Copied APIC bootstrap code to 0x%p\n", bs
));
191 * Store constant arguments in bootstrap's data area
192 * WARNING!!! The bootstrap code assumes PML4 is placed in a 32-bit memory,
193 * and there seem to be no easy way to fix this.
194 * If AROS kickstart is ever loaded into high memory, we would need to take
195 * a special care about it.
197 bs
->Arg3
= (IPTR
)KernelBase
;
199 //TODO: Allocate the cores own MMU structures and copy necessary data to it
200 bs
->PML4
= __KernBootPrivate
->MMU
.mmu_PML4
;
208 * Here we wake up our secondary cores.
210 static int smp_Wake(struct KernelBase
*KernelBase
)
212 struct PlatformData
*pdata
= KernelBase
->kb_PlatformData
;
213 struct SMPBootstrap
*bs
= pdata
->kb_APIC_TrampolineBase
;
214 struct APICData
*apicData
= pdata
->kb_APIC
;
216 IPTR wakeresult
= -1;
218 #if defined(__AROSEXEC_SMP__)
221 spinlock_t
*apicReadyLocks
;
223 apicReadyLocks
= AllocMem(sizeof(spinlock_t
) * apicData
->apic_count
, MEMF_CLEAR
|MEMF_ANY
);
224 D(bug("[Kernel:SMP] %d Ready spinlocks starting at 0x%p\n", apicData
->apic_count
, apicReadyLocks
));
226 /* Core number 0 is our bootstrap core, so we start from No 1 */
227 for (cpuNo
= 1; cpuNo
< apicData
->apic_count
; cpuNo
++)
229 struct APICCPUWake_Data apicWake
=
233 apicData
->cores
[cpuNo
].cpu_LocalID
236 D(bug("[Kernel:SMP] Launching CPU #%u (ID %03u)\n", cpuNo
+ 1, apicData
->cores
[cpuNo
].cpu_LocalID
));
238 KrnSpinInit(&apicReadyLocks
[cpuNo
]);
240 apicData
->cores
[cpuNo
].cpu_GDT
= PlatformAllocGDT(KernelBase
, apicData
->cores
[cpuNo
].cpu_LocalID
);
241 apicData
->cores
[cpuNo
].cpu_TLS
= PlatformAllocTLS(KernelBase
, apicData
->cores
[cpuNo
].cpu_LocalID
);
242 #if defined(__AROSEXEC_SMP__)
243 apicTLS
= apicData
->cores
[cpuNo
].cpu_TLS
;
244 apicTLS
->ScheduleData
= AllocMem(sizeof(struct X86SchedulerPrivate
), MEMF_PUBLIC
);
245 core_InitScheduleData(apicTLS
->ScheduleData
);
246 D(bug("[Kernel:SMP] Scheduling Data @ 0x%p\n", apicTLS
->ScheduleData
));
248 apicData
->cores
[cpuNo
].cpu_IDT
= PlatformAllocIDT(KernelBase
, apicData
->cores
[cpuNo
].cpu_LocalID
);
251 * First we need to allocate a stack for our CPU.
252 * We allocate the same three stacks as in core_CPUSetup().
254 _APICStackBase
= AllocMem(STACK_SIZE
* 3, MEMF_CLEAR
);
255 D(bug("[Kernel:SMP] Allocated STACK for APIC ID %03u @ 0x%p ..\n", apicData
->cores
[cpuNo
].cpu_LocalID
, _APICStackBase
));
259 /* Pass some vital information to the
261 bs
->Arg1
= (IPTR
)_APICStackBase
;
262 bs
->Arg2
= (IPTR
)&apicReadyLocks
[cpuNo
];
263 // Arg3 = KernelBase - already set by smp_Setup()
264 bs
->Arg4
= (IPTR
)cpuNo
;
265 bs
->SP
= _APICStackBase
+ STACK_SIZE
;
267 /* Lock the spinlock before launching the core */
268 KrnSpinLock(&apicReadyLocks
[cpuNo
], NULL
, SPINLOCK_MODE_WRITE
);
270 /* Start IPI sequence */
271 wakeresult
= krnSysCallCPUWake(&apicWake
);
273 /* wakeresult != 0 means error */
276 UQUAD current
, start
= RDTSC();
278 * Before we proceed we need to make sure that the core has picked up
279 * its stack and we can reload bootstrap argument area with another one.
281 DWAKE(bug("[Kernel:SMP] Waiting for CPU #%u to initialise .. ", cpuNo
+ 1));
282 while (!KrnSpinTryLock(&apicReadyLocks
[cpuNo
], SPINLOCK_MODE_READ
))
284 asm volatile("pause");
286 if (((current
- start
)/apicData
->cores
[0].cpu_TSCFreq
) >
298 if (wakeresult
!= -1)
300 KrnSpinUnLock(&apicReadyLocks
[cpuNo
]);
301 D(bug("[Kernel:SMP] CPU #%u started up\n", cpuNo
+ 1));
304 D(if (wakeresult
) { bug("[Kernel:SMP] core_APIC_Wake() failed, status 0x%p\n", wakeresult
); } )
307 D(bug("[Kernel:SMP] Done\n"));
312 int smp_Initialize(void)
314 struct KernelBase
*KernelBase
= getKernelBase();
315 struct PlatformData
*pdata
= KernelBase
->kb_PlatformData
;
317 if (pdata
->kb_APIC
&& (pdata
->kb_APIC
->apic_count
> 1))
319 int number_of_ipi_messages
= 0;
320 struct IPIHook
*hooks
;
323 #if defined(__AROSEXEC_SMP__)
324 cpu_PrepareExec(SysBase
);
327 D(bug("[Kernel:SMP] %s: Initializing Lists for IPI messages ...\n", __func__
));
328 NEWLIST(&pdata
->kb_FreeIPIHooks
);
329 NEWLIST(&pdata
->kb_BusyIPIHooks
);
330 KrnSpinInit(&pdata
->kb_FreeIPIHooksLock
);
331 KrnSpinInit(&pdata
->kb_BusyIPIHooksLock
);
333 number_of_ipi_messages
= pdata
->kb_APIC
->apic_count
* 10;
334 D(bug("[Kernel:SMP] %s: Allocating %d IPI CALL_HOOK messages ...\n", __func__
, number_of_ipi_messages
));
335 hooks
= AllocMem((sizeof(struct IPIHook
) * number_of_ipi_messages
+ 127), MEMF_PUBLIC
| MEMF_CLEAR
);
336 hooks
= (struct IPIHook
*)(((IPTR
)hooks
+ 127) & ~127);
339 for (i
=0; i
< number_of_ipi_messages
; i
++)
341 hooks
[i
].ih_CPUDone
= KrnAllocCPUMask();
342 hooks
[i
].ih_CPURequested
= KrnAllocCPUMask();
343 KrnSpinInit(&hooks
[i
].ih_Lock
);
345 ADDHEAD(&pdata
->kb_FreeIPIHooks
, &hooks
[i
]);
350 bug("[Kernel:SMP] %s: Failed to get IPI slots!\n", __func__
);
353 if (!smp_Setup(KernelBase
))
355 D(bug("[Kernel:SMP] Failed to prepare the environment!\n"));
357 pdata
->kb_APIC
->apic_count
= 1; /* We have only one working CPU */
361 return smp_Wake(KernelBase
);
364 /* This is not an SMP machine, but it's okay */