4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2010, Intel Corporation.
26 * All rights reserved.
29 * Copyright 2011 Joyent, Inc. All rights reserved.
33 * Welcome to the world of the "real mode platter".
34 * See also startup.c, mpcore.s and apic.c for related routines.
37 #include <sys/types.h>
38 #include <sys/systm.h>
39 #include <sys/cpuvar.h>
40 #include <sys/cpu_module.h>
42 #include <sys/archsystm.h>
43 #include <sys/machsystm.h>
44 #include <sys/controlregs.h>
45 #include <sys/x86_archext.h>
46 #include <sys/smp_impldefs.h>
47 #include <sys/sysmacros.h>
48 #include <sys/mach_mmu.h>
49 #include <sys/promif.h>
51 #include <sys/cpu_event.h>
52 #include <sys/sunndi.h>
53 #include <sys/fs/dv_node.h>
54 #include <vm/hat_i86.h>
57 extern cpuset_t cpu_ready_set
;
59 extern int mp_start_cpu_common(cpu_t
*cp
, boolean_t boot
);
60 extern void real_mode_start_cpu(void);
61 extern void real_mode_start_cpu_end(void);
62 extern void real_mode_stop_cpu_stage1(void);
63 extern void real_mode_stop_cpu_stage1_end(void);
64 extern void real_mode_stop_cpu_stage2(void);
65 extern void real_mode_stop_cpu_stage2_end(void);
67 void rmp_gdt_init(rm_platter_t
*);
70 * Fill up the real mode platter to make it easy for real mode code to
71 * kick it off. This area should really be one passed by boot to kernel
72 * and guaranteed to be below 1MB and aligned to 16 bytes. Should also
73 * have identical physical and virtual address in paged mode.
75 static ushort_t
*warm_reset_vector
= NULL
;
78 mach_cpucontext_init(void)
82 struct rm_platter
*rm
= (struct rm_platter
*)rm_platter_va
;
84 if (!(vec
= (ushort_t
*)psm_map_phys(WARM_RESET_VECTOR
,
85 sizeof (vec
), PROT_READ
| PROT_WRITE
)))
89 * setup secondary cpu bios boot up vector
90 * Write page offset to 0x467 and page frame number to 0x469.
92 addr
= (ulong_t
)((caddr_t
)rm
->rm_code
- (caddr_t
)rm
) + rm_platter_pa
;
93 vec
[0] = (ushort_t
)(addr
& PAGEOFFSET
);
94 vec
[1] = (ushort_t
)((addr
& (0xfffff & PAGEMASK
)) >> 4);
95 warm_reset_vector
= vec
;
97 /* Map real mode platter into kas so kernel can access it. */
98 hat_devload(kas
.a_hat
,
99 (caddr_t
)(uintptr_t)rm_platter_pa
, MMU_PAGESIZE
,
100 btop(rm_platter_pa
), PROT_READ
| PROT_WRITE
| PROT_EXEC
,
103 /* Copy CPU startup code to rm_platter if it's still during boot. */
104 if (!plat_dr_enabled()) {
105 ASSERT((size_t)real_mode_start_cpu_end
-
106 (size_t)real_mode_start_cpu
<= RM_PLATTER_CODE_SIZE
);
107 bcopy((caddr_t
)real_mode_start_cpu
, (caddr_t
)rm
->rm_code
,
108 (size_t)real_mode_start_cpu_end
-
109 (size_t)real_mode_start_cpu
);
116 mach_cpucontext_fini(void)
118 if (warm_reset_vector
)
119 psm_unmap_phys((caddr_t
)warm_reset_vector
,
120 sizeof (warm_reset_vector
));
121 hat_unload(kas
.a_hat
, (caddr_t
)(uintptr_t)rm_platter_pa
, MMU_PAGESIZE
,
126 extern void *long_mode_64(void);
131 rmp_gdt_init(rm_platter_t
*rm
)
135 /* Use the kas address space for the CPU startup thread. */
136 if (MAKECR3(kas
.a_hat
->hat_htable
->ht_pfn
) > 0xffffffffUL
)
137 panic("Cannot initialize CPUs; kernel's 64-bit page tables\n"
138 "located above 4G in physical memory (@ 0x%lx)",
139 MAKECR3(kas
.a_hat
->hat_htable
->ht_pfn
));
142 * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY
143 * by code in real_mode_start_cpu():
145 * GDT[0]: NULL selector
146 * GDT[1]: 64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1
148 * Clear the IDT as interrupts will be off and a limit of 0 will cause
149 * the CPU to triple fault and reset on an NMI, seemingly as reasonable
150 * a course of action as any other, though it may cause the entire
151 * platform to reset in some cases...
153 rm
->rm_temp_gdt
[0] = 0;
154 rm
->rm_temp_gdt
[TEMPGDT_KCODE64
] = 0x20980000000000ULL
;
156 rm
->rm_temp_gdt_lim
= (ushort_t
)(sizeof (rm
->rm_temp_gdt
) - 1);
157 rm
->rm_temp_gdt_base
= rm_platter_pa
+
158 (uint32_t)offsetof(rm_platter_t
, rm_temp_gdt
);
159 rm
->rm_temp_idt_lim
= 0;
160 rm
->rm_temp_idt_base
= 0;
163 * Since the CPU needs to jump to protected mode using an identity
164 * mapped address, we need to calculate it here.
166 rm
->rm_longmode64_addr
= rm_platter_pa
+
167 (uint32_t)((uintptr_t)long_mode_64
-
168 (uintptr_t)real_mode_start_cpu
);
173 mach_cpucontext_alloc_tables(struct cpu
*cp
)
176 struct cpu_tables
*ct
;
179 * Allocate space for stack, tss, gdt and idt. We round the size
180 * allotted for cpu_tables up, so that the TSS is on a unique page.
181 * This is more efficient when running in virtual machines.
183 ct
= kmem_zalloc(P2ROUNDUP(sizeof (*ct
), PAGESIZE
), KM_SLEEP
);
184 if ((uintptr_t)ct
& PAGEOFFSET
)
185 panic("mach_cpucontext_alloc_tables: cpu%d misaligned tables",
188 ntss
= cp
->cpu_tss
= &ct
->ct_tss
;
193 * #DF (double fault).
195 ntss
->tss_ist1
= (uint64_t)&ct
->ct_stack
[sizeof (ct
->ct_stack
)];
197 #elif defined(__i386)
199 ntss
->tss_esp0
= ntss
->tss_esp1
= ntss
->tss_esp2
= ntss
->tss_esp
=
200 (uint32_t)&ct
->ct_stack
[sizeof (ct
->ct_stack
)];
202 ntss
->tss_ss0
= ntss
->tss_ss1
= ntss
->tss_ss2
= ntss
->tss_ss
= KDS_SEL
;
204 ntss
->tss_eip
= (uint32_t)cp
->cpu_thread
->t_pc
;
206 ntss
->tss_cs
= KCS_SEL
;
207 ntss
->tss_ds
= ntss
->tss_es
= KDS_SEL
;
208 ntss
->tss_fs
= KFS_SEL
;
209 ntss
->tss_gs
= KGS_SEL
;
214 * Set I/O bit map offset equal to size of TSS segment limit
215 * for no I/O permission map. This will cause all user I/O
216 * instructions to generate #gp fault.
218 ntss
->tss_bitmapbase
= sizeof (*ntss
);
223 set_syssegd((system_desc_t
*)&cp
->cpu_gdt
[GDT_KTSS
], cp
->cpu_tss
,
224 sizeof (*cp
->cpu_tss
) - 1, SDT_SYSTSS
, SEL_KPL
);
230 mach_cpucontext_xalloc(struct cpu
*cp
, int optype
)
233 struct cpu_tables
*ct
;
234 rm_platter_t
*rm
= (rm_platter_t
*)rm_platter_va
;
235 static int cpu_halt_code_ready
;
237 if (optype
== MACH_CPUCONTEXT_OP_STOP
) {
238 ASSERT(plat_dr_enabled());
241 * The WARM_RESET_VECTOR has a limitation that the physical
242 * address written to it must be page-aligned. To work around
243 * this limitation, the CPU stop code has been splitted into
245 * The stage 2 code, which implements the real logic to halt
246 * CPUs, is copied to the rm_cpu_halt_code field in the real
247 * mode platter. The stage 1 code, which simply jumps to the
248 * stage 2 code in the rm_cpu_halt_code field, is copied to
249 * rm_code field in the real mode platter and it may be
250 * overwritten after the CPU has been stopped.
252 if (!cpu_halt_code_ready
) {
254 * The rm_cpu_halt_code field in the real mode platter
255 * is used by the CPU stop code only. So only copy the
256 * CPU stop stage 2 code into the rm_cpu_halt_code
257 * field on the first call.
259 len
= (size_t)real_mode_stop_cpu_stage2_end
-
260 (size_t)real_mode_stop_cpu_stage2
;
261 ASSERT(len
<= RM_PLATTER_CPU_HALT_CODE_SIZE
);
262 bcopy((caddr_t
)real_mode_stop_cpu_stage2
,
263 (caddr_t
)rm
->rm_cpu_halt_code
, len
);
264 cpu_halt_code_ready
= 1;
268 * The rm_code field in the real mode platter is shared by
269 * the CPU start, CPU stop, CPR and fast reboot code. So copy
270 * the CPU stop stage 1 code into the rm_code field every time.
272 len
= (size_t)real_mode_stop_cpu_stage1_end
-
273 (size_t)real_mode_stop_cpu_stage1
;
274 ASSERT(len
<= RM_PLATTER_CODE_SIZE
);
275 bcopy((caddr_t
)real_mode_stop_cpu_stage1
,
276 (caddr_t
)rm
->rm_code
, len
);
277 rm
->rm_cpu_halted
= 0;
279 return (cp
->cpu_m
.mcpu_mach_ctx_ptr
);
280 } else if (optype
!= MACH_CPUCONTEXT_OP_START
) {
285 * Only need to allocate tables when starting CPU.
286 * Tables allocated when starting CPU will be reused when stopping CPU.
288 ct
= mach_cpucontext_alloc_tables(cp
);
293 /* Copy CPU startup code to rm_platter for CPU hot-add operations. */
294 if (plat_dr_enabled()) {
295 bcopy((caddr_t
)real_mode_start_cpu
, (caddr_t
)rm
->rm_code
,
296 (size_t)real_mode_start_cpu_end
-
297 (size_t)real_mode_start_cpu
);
301 * Now copy all that we've set up onto the real mode platter
302 * for the real mode code to digest as part of starting the cpu.
304 rm
->rm_idt_base
= cp
->cpu_idt
;
305 rm
->rm_idt_lim
= sizeof (*cp
->cpu_idt
) * NIDT
- 1;
306 rm
->rm_gdt_base
= cp
->cpu_gdt
;
307 rm
->rm_gdt_lim
= sizeof (*cp
->cpu_gdt
) * NGDT
- 1;
310 * CPU needs to access kernel address space after powering on.
311 * When hot-adding CPU at runtime, directly use top level page table
312 * of kas other than the return value of getcr3(). getcr3() returns
313 * current process's top level page table, which may be different from
316 rm
->rm_pdbr
= MAKECR3(kas
.a_hat
->hat_htable
->ht_pfn
);
317 rm
->rm_cpu
= cp
->cpu_id
;
320 * For hot-adding CPU at runtime, Machine Check and Performance Counter
321 * should be disabled. They will be enabled on demand after CPU powers
324 rm
->rm_cr4
= getcr4();
325 rm
->rm_cr4
&= ~(CR4_MCE
| CR4_PCE
);
333 mach_cpucontext_xfree(struct cpu
*cp
, void *arg
, int err
, int optype
)
335 struct cpu_tables
*ct
= arg
;
337 ASSERT(&ct
->ct_tss
== cp
->cpu_tss
);
338 if (optype
== MACH_CPUCONTEXT_OP_START
) {
342 * Save pointer for reuse when stopping CPU.
344 cp
->cpu_m
.mcpu_mach_ctx_ptr
= arg
;
348 * The processor was poked, but failed to start before
349 * we gave up waiting for it. In case it starts later,
350 * don't free anything.
352 cp
->cpu_m
.mcpu_mach_ctx_ptr
= arg
;
356 * Some other, passive, error occurred.
358 kmem_free(ct
, P2ROUNDUP(sizeof (*ct
), PAGESIZE
));
362 } else if (optype
== MACH_CPUCONTEXT_OP_STOP
) {
366 * Free resources allocated when starting CPU.
368 kmem_free(ct
, P2ROUNDUP(sizeof (*ct
), PAGESIZE
));
370 cp
->cpu_m
.mcpu_mach_ctx_ptr
= NULL
;
374 * Don't touch table pointer in case of failure.
384 mach_cpucontext_alloc(struct cpu
*cp
)
386 return (mach_cpucontext_xalloc(cp
, MACH_CPUCONTEXT_OP_START
));
390 mach_cpucontext_free(struct cpu
*cp
, void *arg
, int err
)
392 mach_cpucontext_xfree(cp
, arg
, err
, MACH_CPUCONTEXT_OP_START
);
396 * "Enter monitor." Called via cross-call from stop_other_cpus().
399 mach_cpu_halt(char *msg
)
402 prom_printf("%s\n", msg
);
404 /*CONSTANTCONDITION*/
416 mach_cpu_pause(volatile char *safe
)
419 * This cpu is now safe.
422 membar_enter(); /* make sure stores are flushed */
425 * Now we wait. When we are allowed to continue, safe
426 * will be set to PAUSE_IDLE.
428 while (*safe
!= PAUSE_IDLE
)
433 * Power on the target CPU.
436 mp_cpu_poweron(struct cpu
*cp
)
444 if (use_mp
== 0 || plat_dr_support_cpu() == 0) {
446 } else if (cpuid
< 0 || cpuid
>= max_ncpus
) {
451 * The currrent x86 implementaiton of mp_cpu_configure() and
452 * mp_cpu_poweron() have a limitation that mp_cpu_poweron() could only
453 * be called once after calling mp_cpu_configure() for a specific CPU.
454 * It's because mp_cpu_poweron() will destroy data structure created
455 * by mp_cpu_configure(). So reject the request if the CPU has already
456 * been powered on once after calling mp_cpu_configure().
457 * This limitaiton only affects the p_online syscall and the DR driver
458 * won't be affected because the DR driver always invoke public CPU
459 * management interfaces in the predefined order:
460 * cpu_configure()->cpu_poweron()...->cpu_poweroff()->cpu_unconfigure()
462 if (cpuid_checkpass(cp
, 4) || cp
->cpu_thread
== cp
->cpu_idle_thread
) {
467 * Check if there's at least a Mbyte of kmem available
468 * before attempting to start the cpu.
470 if (kmem_avail() < 1024 * 1024) {
472 * Kick off a reap in case that helps us with
479 affinity_set(CPU
->cpu_id
);
482 * Start the target CPU. No need to call mach_cpucontext_fini()
483 * if mach_cpucontext_init() fails.
485 if ((error
= mach_cpucontext_init()) == 0) {
486 error
= mp_start_cpu_common(cp
, B_FALSE
);
487 mach_cpucontext_fini();
494 /* Wait for the target cpu to reach READY state. */
495 tempset
= cpu_ready_set
;
496 while (!CPU_IN_SET(tempset
, cpuid
)) {
498 tempset
= *((volatile cpuset_t
*)&cpu_ready_set
);
501 /* Mark the target CPU as available for mp operation. */
502 CPUSET_ATOMIC_ADD(mp_cpus
, cpuid
);
504 /* Free the space allocated to hold the microcode file */
512 #define MP_CPU_DETACH_MAX_TRIES 5
513 #define MP_CPU_DETACH_DELAY 100
516 mp_cpu_detach_driver(dev_info_t
*dip
)
522 pdip
= ddi_get_parent(dip
);
523 ASSERT(pdip
!= NULL
);
525 * Check if caller holds pdip busy - can cause deadlocks in
526 * e_ddi_branch_unconfigure(), which calls devfs_clean().
528 if (DEVI_BUSY_OWNED(pdip
)) {
532 for (i
= 0; i
< MP_CPU_DETACH_MAX_TRIES
; i
++) {
533 if (e_ddi_branch_unconfigure(dip
, NULL
, 0) == 0) {
537 DELAY(MP_CPU_DETACH_DELAY
);
544 * Power off the target CPU.
545 * Note: cpu_lock will be released and then reacquired.
548 mp_cpu_poweroff(struct cpu
*cp
)
552 dev_info_t
*dip
= NULL
;
553 rm_platter_t
*rm
= (rm_platter_t
*)rm_platter_va
;
554 extern void cpupm_start(cpu_t
*);
555 extern void cpupm_stop(cpu_t
*);
558 ASSERT((cp
->cpu_flags
& CPU_OFFLINE
) != 0);
559 ASSERT((cp
->cpu_flags
& CPU_QUIESCED
) != 0);
561 if (use_mp
== 0 || plat_dr_support_cpu() == 0) {
565 * There is no support for powering off cpu0 yet.
566 * There are many pieces of code which have a hard dependency on cpu0.
568 if (cp
->cpu_id
== 0) {
572 if (mach_cpu_get_device_node(cp
, &dip
) != PSM_SUCCESS
) {
576 if (mp_cpu_detach_driver(dip
) != 0) {
581 /* Allocate CPU context for stopping */
582 if (mach_cpucontext_init() != 0) {
586 ctx
= mach_cpucontext_xalloc(cp
, MACH_CPUCONTEXT_OP_STOP
);
589 goto out_context_fini
;
593 cpu_event_fini_cpu(cp
);
595 if (cp
->cpu_m
.mcpu_cmi_hdl
!= NULL
) {
596 cmi_fini(cp
->cpu_m
.mcpu_cmi_hdl
);
597 cp
->cpu_m
.mcpu_cmi_hdl
= NULL
;
600 rv
= mach_cpu_stop(cp
, ctx
);
605 /* Wait until the target CPU has been halted. */
606 while (*(volatile ushort_t
*)&(rm
->rm_cpu_halted
) != 0xdead) {
609 rm
->rm_cpu_halted
= 0xffff;
611 /* CPU_READY has been cleared by mach_cpu_stop. */
612 ASSERT((cp
->cpu_flags
& CPU_READY
) == 0);
613 ASSERT((cp
->cpu_flags
& CPU_RUNNING
) == 0);
614 cp
->cpu_flags
= CPU_OFFLINE
| CPU_QUIESCED
| CPU_POWEROFF
;
615 CPUSET_ATOMIC_DEL(mp_cpus
, cp
->cpu_id
);
617 mach_cpucontext_xfree(cp
, ctx
, 0, MACH_CPUCONTEXT_OP_STOP
);
618 mach_cpucontext_fini();
626 if ((hdl
= cmi_init(CMI_HDL_NATIVE
, cmi_ntv_hwchipid(cp
),
627 cmi_ntv_hwcoreid(cp
), cmi_ntv_hwstrandid(cp
))) != NULL
) {
628 if (is_x86_feature(x86_featureset
, X86FSET_MCA
))
630 cp
->cpu_m
.mcpu_cmi_hdl
= hdl
;
633 cpu_event_init_cpu(cp
);
635 mach_cpucontext_xfree(cp
, ctx
, rv
, MACH_CPUCONTEXT_OP_STOP
);
638 mach_cpucontext_fini();
641 (void) e_ddi_branch_configure(dip
, NULL
, 0);
643 if (rv
!= EAGAIN
&& rv
!= ETIME
) {
651 * Return vcpu state, since this could be a virtual environment that we
652 * are unaware of, return "unknown".
656 vcpu_on_pcpu(processorid_t cpu
)
658 return (VCPU_STATE_UNKNOWN
);