4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
32 * Copyright (c) 2018, Joyent, Inc.
36 * Generic x86 CPU Module
38 * This CPU module is used for generic x86 CPUs when Solaris has no other
39 * CPU-specific support module available. Code in this module should be the
40 * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
43 #include <sys/types.h>
44 #include <sys/cpu_module_impl.h>
45 #include <sys/cpuvar.h>
47 #include <sys/modctl.h>
49 #include <sys/x86_archext.h>
54 * Prevent generic cpu support from loading.
58 #define GCPU_MAX_CHIPID 32
59 static struct gcpu_chipshared
*gcpu_shared
[GCPU_MAX_CHIPID
];
61 int gcpu_id_disable
= 0;
62 static const char *gcpu_id_override
[GCPU_MAX_CHIPID
] = { NULL
};
66 * This should probably be delegated to a CPU specific module. However, as those
67 * haven't been developed as actively for recent CPUs, we should revisit this
68 * when we do have it and move this out of gcpu.
70 * This method is only supported on Intel Xeon platforms. It relies on a
71 * combination of the PPIN and the cpuid signature. Both are required to form
72 * the synthetic ID. This ID is preceded with iv0-INTC to represent that this is
73 * an Intel synthetic ID. The iv0 is the illumos version zero of the ID for
74 * Intel. If we have a new scheme for a new generation of processors, then that
75 * should rev the version field, otherwise for a given processor, this synthetic
76 * ID should not change. For more information on PPIN and these MSRS, see the
77 * relevant processor external design specification.
80 gcpu_init_ident_intc(cmi_hdl_t hdl
)
85 * This list should be extended as new Intel Xeon family processors come
88 switch (cmi_hdl_model(hdl
)) {
89 case INTC_MODEL_IVYBRIDGE_XEON
:
90 case INTC_MODEL_HASWELL_XEON
:
91 case INTC_MODEL_BROADWELL_XEON
:
92 case INTC_MODEL_BROADWELL_XEON_D
:
93 case INTC_MODEL_SKYLAKE_XEON
:
99 if (cmi_hdl_rdmsr(hdl
, MSR_PLATFORM_INFO
, &msr
) != CMI_SUCCESS
) {
103 if ((msr
& MSR_PLATFORM_INFO_PPIN
) == 0) {
107 if (cmi_hdl_rdmsr(hdl
, MSR_PPIN_CTL
, &msr
) != CMI_SUCCESS
) {
111 if ((msr
& MSR_PPIN_CTL_ENABLED
) == 0) {
112 if ((msr
& MSR_PPIN_CTL_LOCKED
) != 0) {
116 if (cmi_hdl_wrmsr(hdl
, MSR_PPIN_CTL
, MSR_PPIN_CTL_ENABLED
) !=
122 if (cmi_hdl_rdmsr(hdl
, MSR_PPIN
, &msr
) != CMI_SUCCESS
) {
127 * Now that we've read data, lock the PPIN. Don't worry about success or
128 * failure of this part, as we will have gotten everything that we need.
129 * It is possible that it locked open, for example.
131 (void) cmi_hdl_wrmsr(hdl
, MSR_PPIN_CTL
, MSR_PPIN_CTL_LOCKED
);
133 return (kmem_asprintf("iv0-INTC-%x-%llx", cmi_hdl_chipsig(hdl
), msr
));
137 gcpu_init_ident(cmi_hdl_t hdl
, struct gcpu_chipshared
*sp
)
143 * On debug, allow a developer to override the string to more
144 * easily test CPU autoreplace without needing to physically
147 if (gcpu_id_disable
!= 0) {
151 chipid
= cmi_hdl_chipid(hdl
);
152 if (gcpu_id_override
[chipid
] != NULL
) {
153 sp
->gcpus_ident
= strdup(gcpu_id_override
[chipid
]);
158 switch (cmi_hdl_vendor(hdl
)) {
159 case X86_VENDOR_Intel
:
160 sp
->gcpus_ident
= gcpu_init_ident_intc(hdl
);
167 * Our cmi_init entry point, called during startup of each cpu instance.
170 gcpu_init(cmi_hdl_t hdl
, void **datap
)
172 uint_t chipid
= cmi_hdl_chipid(hdl
);
173 struct gcpu_chipshared
*sp
, *osp
;
176 if (gcpu_disable
|| chipid
>= GCPU_MAX_CHIPID
)
180 * Allocate the state structure for this cpu. We will only
181 * allocate the bank logout areas in gcpu_mca_init once we
182 * know how many banks there are.
184 gcpu
= *datap
= kmem_zalloc(sizeof (gcpu_data_t
), KM_SLEEP
);
185 cmi_hdl_hold(hdl
); /* release in gcpu_fini */
186 gcpu
->gcpu_hdl
= hdl
;
189 * Allocate a chipshared structure if no sibling cpu has already
190 * allocated it, but allow for the fact that a sibling core may
191 * be starting up in parallel.
193 if ((sp
= gcpu_shared
[chipid
]) == NULL
) {
194 sp
= kmem_zalloc(sizeof (struct gcpu_chipshared
), KM_SLEEP
);
195 mutex_init(&sp
->gcpus_poll_lock
, NULL
, MUTEX_DRIVER
, NULL
);
196 mutex_init(&sp
->gcpus_cfglock
, NULL
, MUTEX_DRIVER
, NULL
);
197 osp
= atomic_cas_ptr(&gcpu_shared
[chipid
], NULL
, sp
);
199 mutex_destroy(&sp
->gcpus_cfglock
);
200 mutex_destroy(&sp
->gcpus_poll_lock
);
201 kmem_free(sp
, sizeof (struct gcpu_chipshared
));
204 gcpu_init_ident(hdl
, sp
);
208 atomic_inc_32(&sp
->gcpus_actv_cnt
);
209 gcpu
->gcpu_shared
= sp
;
215 * deconfigure gcpu_init()
218 gcpu_fini(cmi_hdl_t hdl
)
220 uint_t chipid
= cmi_hdl_chipid(hdl
);
221 gcpu_data_t
*gcpu
= cmi_hdl_getcmidata(hdl
);
222 struct gcpu_chipshared
*sp
;
224 if (gcpu_disable
|| chipid
>= GCPU_MAX_CHIPID
)
230 * Keep shared data in cache for reuse.
232 sp
= gcpu_shared
[chipid
];
234 atomic_dec_32(&sp
->gcpus_actv_cnt
);
237 kmem_free(gcpu
, sizeof (gcpu_data_t
));
239 /* Release reference count held in gcpu_init(). */
244 gcpu_post_startup(cmi_hdl_t hdl
)
246 gcpu_data_t
*gcpu
= cmi_hdl_getcmidata(hdl
);
252 cms_post_startup(hdl
);
256 gcpu_post_mpstartup(cmi_hdl_t hdl
)
261 cms_post_mpstartup(hdl
);
264 * All cpu handles are initialized only once all cpus are started, so we
265 * can begin polling post mp startup.
267 gcpu_mca_poll_start(hdl
);
271 gcpu_ident(cmi_hdl_t hdl
)
274 struct gcpu_chipshared
*sp
;
279 chipid
= cmi_hdl_chipid(hdl
);
280 if (chipid
>= GCPU_MAX_CHIPID
)
283 if (cmi_hdl_getcmidata(hdl
) == NULL
)
286 sp
= gcpu_shared
[cmi_hdl_chipid(hdl
)];
287 return (sp
->gcpus_ident
);
290 #define GCPU_OP(ntvop, xpvop) ntvop
292 cmi_api_ver_t _cmi_api_version
= CMI_API_VERSION_3
;
294 const cmi_ops_t _cmi_ops
= {
295 gcpu_init
, /* cmi_init */
296 gcpu_post_startup
, /* cmi_post_startup */
297 gcpu_post_mpstartup
, /* cmi_post_mpstartup */
298 gcpu_faulted_enter
, /* cmi_faulted_enter */
299 gcpu_faulted_exit
, /* cmi_faulted_exit */
300 gcpu_mca_init
, /* cmi_mca_init */
301 GCPU_OP(gcpu_mca_trap
, NULL
), /* cmi_mca_trap */
302 GCPU_OP(gcpu_cmci_trap
, NULL
), /* cmi_cmci_trap */
303 gcpu_msrinject
, /* cmi_msrinject */
304 GCPU_OP(gcpu_hdl_poke
, NULL
), /* cmi_hdl_poke */
305 gcpu_fini
, /* cmi_fini */
306 GCPU_OP(NULL
, gcpu_xpv_panic_callback
), /* cmi_panic_callback */
307 gcpu_ident
/* cmi_ident */
310 static struct modlcpu modlcpu
= {
312 "Generic x86 CPU Module"
315 static struct modlinkage modlinkage
= {
324 return (mod_install(&modlinkage
));
328 _info(struct modinfo
*modinfop
)
330 return (mod_info(&modlinkage
, modinfop
));
336 return (mod_remove(&modlinkage
));