4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
31 * CPU Module Interface - hardware abstraction.
35 #include <sys/xpv_user.h>
38 #include <sys/types.h>
39 #include <sys/cpu_module.h>
41 #include <sys/x86_archext.h>
42 #include <sys/cpuvar.h>
43 #include <sys/ksynch.h>
44 #include <sys/x_call.h>
46 #include <sys/pci_cfgacc.h>
47 #include <sys/pci_cfgspace.h>
48 #include <sys/archsystm.h>
49 #include <sys/ontrap.h>
50 #include <sys/controlregs.h>
51 #include <sys/sunddi.h>
53 #include <sys/mca_x86.h>
54 #include <sys/processor.h>
55 #include <sys/cmn_err.h>
56 #include <sys/nvpair.h>
57 #include <sys/fm/util.h>
58 #include <sys/fm/protocol.h>
59 #include <sys/fm/smb/fmsmb.h>
60 #include <sys/cpu_module_impl.h>
63 * Variable which determines if the SMBIOS supports x86 generic topology; or
64 * if legacy topolgy enumeration will occur.
66 extern int x86gentopo_legacy
;
69 * Outside of this file consumers use the opaque cmi_hdl_t. This
70 * definition is duplicated in the generic_cpu mdb module, so keep
71 * them in-sync when making changes.
73 typedef struct cmi_hdl_impl
{
74 enum cmi_hdl_class cmih_class
; /* Handle nature */
75 const struct cmi_hdl_ops
*cmih_ops
; /* Operations vector */
76 uint_t cmih_chipid
; /* Chipid of cpu resource */
77 uint_t cmih_procnodeid
; /* Nodeid of cpu resource */
78 uint_t cmih_coreid
; /* Core within die */
79 uint_t cmih_strandid
; /* Thread within core */
80 uint_t cmih_procnodes_per_pkg
; /* Nodes in a processor */
81 boolean_t cmih_mstrand
; /* cores are multithreaded */
82 volatile uint32_t *cmih_refcntp
; /* Reference count pointer */
83 uint64_t cmih_msrsrc
; /* MSR data source flags */
84 void *cmih_hdlpriv
; /* cmi_hw.c private data */
85 void *cmih_spec
; /* cmi_hdl_{set,get}_specific */
86 void *cmih_cmi
; /* cpu mod control structure */
87 void *cmih_cmidata
; /* cpu mod private data */
88 const struct cmi_mc_ops
*cmih_mcops
; /* Memory-controller ops */
89 void *cmih_mcdata
; /* Memory-controller data */
90 uint64_t cmih_flags
; /* See CMIH_F_* below */
91 uint16_t cmih_smbiosid
; /* SMBIOS Type 4 struct ID */
92 uint_t cmih_smb_chipid
; /* SMBIOS factored chipid */
93 nvlist_t
*cmih_smb_bboard
; /* SMBIOS bboard nvlist */
96 #define IMPLHDL(ophdl) ((cmi_hdl_impl_t *)ophdl)
97 #define HDLOPS(hdl) ((hdl)->cmih_ops)
99 #define CMIH_F_INJACTV 0x1ULL
100 #define CMIH_F_DEAD 0x2ULL
103 * Ops structure for handle operations.
107 * These ops are required in an implementation.
109 uint_t (*cmio_vendor
)(cmi_hdl_impl_t
*);
110 const char *(*cmio_vendorstr
)(cmi_hdl_impl_t
*);
111 uint_t (*cmio_family
)(cmi_hdl_impl_t
*);
112 uint_t (*cmio_model
)(cmi_hdl_impl_t
*);
113 uint_t (*cmio_stepping
)(cmi_hdl_impl_t
*);
114 uint_t (*cmio_chipid
)(cmi_hdl_impl_t
*);
115 uint_t (*cmio_procnodeid
)(cmi_hdl_impl_t
*);
116 uint_t (*cmio_coreid
)(cmi_hdl_impl_t
*);
117 uint_t (*cmio_strandid
)(cmi_hdl_impl_t
*);
118 uint_t (*cmio_procnodes_per_pkg
)(cmi_hdl_impl_t
*);
119 uint_t (*cmio_strand_apicid
)(cmi_hdl_impl_t
*);
120 uint32_t (*cmio_chiprev
)(cmi_hdl_impl_t
*);
121 const char *(*cmio_chiprevstr
)(cmi_hdl_impl_t
*);
122 uint32_t (*cmio_getsockettype
)(cmi_hdl_impl_t
*);
123 const char *(*cmio_getsocketstr
)(cmi_hdl_impl_t
*);
125 id_t (*cmio_logical_id
)(cmi_hdl_impl_t
*);
127 * These ops are optional in an implementation.
129 ulong_t (*cmio_getcr4
)(cmi_hdl_impl_t
*);
130 void (*cmio_setcr4
)(cmi_hdl_impl_t
*, ulong_t
);
131 cmi_errno_t (*cmio_rdmsr
)(cmi_hdl_impl_t
*, uint_t
, uint64_t *);
132 cmi_errno_t (*cmio_wrmsr
)(cmi_hdl_impl_t
*, uint_t
, uint64_t);
133 cmi_errno_t (*cmio_msrinterpose
)(cmi_hdl_impl_t
*, uint_t
, uint64_t);
134 void (*cmio_int
)(cmi_hdl_impl_t
*, int);
135 int (*cmio_online
)(cmi_hdl_impl_t
*, int, int *);
136 uint16_t (*cmio_smbiosid
) (cmi_hdl_impl_t
*);
137 uint_t (*cmio_smb_chipid
)(cmi_hdl_impl_t
*);
138 nvlist_t
*(*cmio_smb_bboard
)(cmi_hdl_impl_t
*);
141 static const struct cmi_hdl_ops cmi_hdl_ops
;
144 * Handles are looked up from contexts such as polling, injection etc
145 * where the context is reasonably well defined (although a poller could
146 * interrupt any old thread holding any old lock). They are also looked
147 * up by machine check handlers, which may strike at inconvenient times
148 * such as during handle initialization or destruction or during handle
149 * lookup (which the #MC handler itself will also have to perform).
151 * So keeping handles in a linked list makes locking difficult when we
152 * consider #MC handlers. Our solution is to have a look-up table indexed
153 * by that which uniquely identifies a handle - chip/core/strand id -
154 * with each entry a structure including a pointer to a handle
155 * structure for the resource, and a reference count for the handle.
156 * Reference counts are modified atomically. The public cmi_hdl_hold
157 * always succeeds because this can only be used after handle creation
158 * and before the call to destruct, so the hold count is already at least one.
159 * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
160 * we must be certain that the count has not already decrmented to zero
161 * before applying our hold.
163 * The table is an array of maximum number of chips defined in
164 * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the
165 * entry is NULL. Each entry is a pointer to another array which contains a
166 * list of all strands of the chip. This first level table is allocated when
167 * first we want to populate an entry. The size of the latter (per chip) table
168 * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts.
170 * Ideally we should only allocate to the actual number of chips, cores per
171 * chip and strand per core. The number of chips is not available until all
172 * of them are passed. The number of cores and strands are partially available.
173 * For now we stick with the above approach.
175 #define CMI_MAX_CHIPID_NBITS 6 /* max chipid of 63 */
176 #define CMI_MAX_CORES_PER_CHIP_NBITS 4 /* 16 cores per chip max */
177 #define CMI_MAX_STRANDS_PER_CORE_NBITS 3 /* 8 strands per core max */
179 #define CMI_MAX_CHIPID ((1 << (CMI_MAX_CHIPID_NBITS)) - 1)
180 #define CMI_MAX_CORES_PER_CHIP(cbits) (1 << (cbits))
181 #define CMI_MAX_COREID(cbits) ((1 << (cbits)) - 1)
182 #define CMI_MAX_STRANDS_PER_CORE(sbits) (1 << (sbits))
183 #define CMI_MAX_STRANDID(sbits) ((1 << (sbits)) - 1)
184 #define CMI_MAX_STRANDS_PER_CHIP(cbits, sbits) \
185 (CMI_MAX_CORES_PER_CHIP(cbits) * CMI_MAX_STRANDS_PER_CORE(sbits))
187 #define CMI_CHIPID_ARR_SZ (1 << CMI_MAX_CHIPID_NBITS)
189 typedef struct cmi_hdl_ent
{
190 volatile uint32_t cmae_refcnt
;
191 cmi_hdl_impl_t
*cmae_hdlp
;
194 static cmi_hdl_ent_t
*cmi_chip_tab
[CMI_CHIPID_ARR_SZ
];
197 * Default values for the number of core and strand bits.
199 uint_t cmi_core_nbits
= CMI_MAX_CORES_PER_CHIP_NBITS
;
200 uint_t cmi_strand_nbits
= CMI_MAX_STRANDS_PER_CORE_NBITS
;
201 static int cmi_ext_topo_check
= 0;
204 * Controls where we will source PCI config space data.
206 #define CMI_PCICFG_FLAG_RD_HWOK 0x0001
207 #define CMI_PCICFG_FLAG_RD_INTERPOSEOK 0X0002
208 #define CMI_PCICFG_FLAG_WR_HWOK 0x0004
209 #define CMI_PCICFG_FLAG_WR_INTERPOSEOK 0X0008
211 static uint64_t cmi_pcicfg_flags
=
212 CMI_PCICFG_FLAG_RD_HWOK
| CMI_PCICFG_FLAG_RD_INTERPOSEOK
|
213 CMI_PCICFG_FLAG_WR_HWOK
| CMI_PCICFG_FLAG_WR_INTERPOSEOK
;
216 * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
218 #define CMI_MSR_FLAG_RD_HWOK 0x0001
219 #define CMI_MSR_FLAG_RD_INTERPOSEOK 0x0002
220 #define CMI_MSR_FLAG_WR_HWOK 0x0004
221 #define CMI_MSR_FLAG_WR_INTERPOSEOK 0x0008
223 int cmi_call_func_ntv_tries
= 3;
226 call_func_ntv(int cpuid
, xc_func_t func
, xc_arg_t arg1
, xc_arg_t arg2
)
233 if (CPU
->cpu_id
== cpuid
) {
234 (*func
)(arg1
, arg2
, (xc_arg_t
)&rc
);
237 * This should not happen for a #MC trap or a poll, so
238 * this is likely an error injection or similar.
239 * We will try to cross call with xc_trycall - we
240 * can't guarantee success with xc_call because
241 * the interrupt code in the case of a #MC may
242 * already hold the xc mutex.
244 for (i
= 0; i
< cmi_call_func_ntv_tries
; i
++) {
247 CPUSET_ONLY(cpus
, cpuid
);
248 xc_priority(arg1
, arg2
, (xc_arg_t
)&rc
,
249 CPUSET2BV(cpus
), func
);
259 return (rc
!= -1 ? rc
: CMIERR_DEADLOCK
);
262 static uint64_t injcnt
;
265 cmi_hdl_inj_begin(cmi_hdl_t ophdl
)
267 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
270 hdl
->cmih_flags
|= CMIH_F_INJACTV
;
272 cmn_err(CE_NOTE
, "Hardware error injection/simulation "
278 cmi_hdl_inj_end(cmi_hdl_t ophdl
)
280 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
282 ASSERT(hdl
== NULL
|| hdl
->cmih_flags
& CMIH_F_INJACTV
);
284 hdl
->cmih_flags
&= ~CMIH_F_INJACTV
;
288 cmi_inj_tainted(void)
290 return (injcnt
!= 0 ? B_TRUE
: B_FALSE
);
294 * =======================================================
295 * | MSR Interposition |
296 * | ----------------- |
298 * -------------------------------------------------------
301 #define CMI_MSRI_HASHSZ 16
302 #define CMI_MSRI_HASHIDX(hdl, msr) \
303 (((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1))
305 struct cmi_msri_bkt
{
307 struct cmi_msri_hashent
*msrib_head
;
310 struct cmi_msri_hashent
{
311 struct cmi_msri_hashent
*msrie_next
;
312 struct cmi_msri_hashent
*msrie_prev
;
313 cmi_hdl_impl_t
*msrie_hdl
;
315 uint64_t msrie_msrval
;
318 #define CMI_MSRI_MATCH(ent, hdl, req_msr) \
319 ((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
321 static struct cmi_msri_bkt msrihash
[CMI_MSRI_HASHSZ
];
324 msri_addent(cmi_hdl_impl_t
*hdl
, uint_t msr
, uint64_t val
)
326 int idx
= CMI_MSRI_HASHIDX(hdl
, msr
);
327 struct cmi_msri_bkt
*hbp
= &msrihash
[idx
];
328 struct cmi_msri_hashent
*hep
;
330 mutex_enter(&hbp
->msrib_lock
);
332 for (hep
= hbp
->msrib_head
; hep
!= NULL
; hep
= hep
->msrie_next
) {
333 if (CMI_MSRI_MATCH(hep
, hdl
, msr
))
338 hep
->msrie_msrval
= val
;
340 hep
= kmem_alloc(sizeof (*hep
), KM_SLEEP
);
341 hep
->msrie_hdl
= hdl
;
342 hep
->msrie_msrnum
= msr
;
343 hep
->msrie_msrval
= val
;
345 if (hbp
->msrib_head
!= NULL
)
346 hbp
->msrib_head
->msrie_prev
= hep
;
347 hep
->msrie_next
= hbp
->msrib_head
;
348 hep
->msrie_prev
= NULL
;
349 hbp
->msrib_head
= hep
;
352 mutex_exit(&hbp
->msrib_lock
);
356 * Look for a match for the given hanlde and msr. Return 1 with valp
357 * filled if a match is found, otherwise return 0 with valp untouched.
360 msri_lookup(cmi_hdl_impl_t
*hdl
, uint_t msr
, uint64_t *valp
)
362 int idx
= CMI_MSRI_HASHIDX(hdl
, msr
);
363 struct cmi_msri_bkt
*hbp
= &msrihash
[idx
];
364 struct cmi_msri_hashent
*hep
;
367 * This function is called during #MC trap handling, so we should
368 * consider the possibility that the hash mutex is held by the
369 * interrupted thread. This should not happen because interposition
370 * is an artificial injection mechanism and the #MC is requested
371 * after adding entries, but just in case of a real #MC at an
372 * unlucky moment we'll use mutex_tryenter here.
374 if (!mutex_tryenter(&hbp
->msrib_lock
))
377 for (hep
= hbp
->msrib_head
; hep
!= NULL
; hep
= hep
->msrie_next
) {
378 if (CMI_MSRI_MATCH(hep
, hdl
, msr
)) {
379 *valp
= hep
->msrie_msrval
;
384 mutex_exit(&hbp
->msrib_lock
);
386 return (hep
!= NULL
);
390 * Remove any interposed value that matches.
393 msri_rment(cmi_hdl_impl_t
*hdl
, uint_t msr
)
396 int idx
= CMI_MSRI_HASHIDX(hdl
, msr
);
397 struct cmi_msri_bkt
*hbp
= &msrihash
[idx
];
398 struct cmi_msri_hashent
*hep
;
400 if (!mutex_tryenter(&hbp
->msrib_lock
))
403 for (hep
= hbp
->msrib_head
; hep
!= NULL
; hep
= hep
->msrie_next
) {
404 if (CMI_MSRI_MATCH(hep
, hdl
, msr
)) {
405 if (hep
->msrie_prev
!= NULL
)
406 hep
->msrie_prev
->msrie_next
= hep
->msrie_next
;
408 if (hep
->msrie_next
!= NULL
)
409 hep
->msrie_next
->msrie_prev
= hep
->msrie_prev
;
411 if (hbp
->msrib_head
== hep
)
412 hbp
->msrib_head
= hep
->msrie_next
;
414 kmem_free(hep
, sizeof (*hep
));
419 mutex_exit(&hbp
->msrib_lock
);
423 * =======================================================
424 * | PCI Config Space Interposition |
425 * | ------------------------------ |
427 * -------------------------------------------------------
431 * Hash for interposed PCI config space values. We lookup on bus/dev/fun/offset
432 * and then record whether the value stashed was made with a byte, word or
433 * doubleword access; we will only return a hit for an access of the
434 * same size. If you access say a 32-bit register using byte accesses
435 * and then attempt to read the full 32-bit value back you will not obtain
436 * any sort of merged result - you get a lookup miss.
439 #define CMI_PCII_HASHSZ 16
440 #define CMI_PCII_HASHIDX(b, d, f, o) \
441 (((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
443 struct cmi_pcii_bkt
{
445 struct cmi_pcii_hashent
*pciib_head
;
448 struct cmi_pcii_hashent
{
449 struct cmi_pcii_hashent
*pcii_next
;
450 struct cmi_pcii_hashent
*pcii_prev
;
459 #define CMI_PCII_MATCH(ent, b, d, f, r, asz) \
460 ((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
461 (ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
462 (ent)->pcii_asize == (asz))
464 static struct cmi_pcii_bkt pciihash
[CMI_PCII_HASHSZ
];
468 * Add a new entry to the PCI interpose hash, overwriting any existing
469 * entry that is found.
472 pcii_addent(int bus
, int dev
, int func
, int reg
, uint32_t val
, int asz
)
474 int idx
= CMI_PCII_HASHIDX(bus
, dev
, func
, reg
);
475 struct cmi_pcii_bkt
*hbp
= &pciihash
[idx
];
476 struct cmi_pcii_hashent
*hep
;
478 cmi_hdl_inj_begin(NULL
);
480 mutex_enter(&hbp
->pciib_lock
);
482 for (hep
= hbp
->pciib_head
; hep
!= NULL
; hep
= hep
->pcii_next
) {
483 if (CMI_PCII_MATCH(hep
, bus
, dev
, func
, reg
, asz
))
490 hep
= kmem_alloc(sizeof (*hep
), KM_SLEEP
);
493 hep
->pcii_func
= func
;
495 hep
->pcii_asize
= asz
;
498 if (hbp
->pciib_head
!= NULL
)
499 hbp
->pciib_head
->pcii_prev
= hep
;
500 hep
->pcii_next
= hbp
->pciib_head
;
501 hep
->pcii_prev
= NULL
;
502 hbp
->pciib_head
= hep
;
505 mutex_exit(&hbp
->pciib_lock
);
507 cmi_hdl_inj_end(NULL
);
511 * Look for a match for the given bus/dev/func/reg; return 1 with valp
512 * filled if a match is found, otherwise return 0 with valp untouched.
515 pcii_lookup(int bus
, int dev
, int func
, int reg
, int asz
, uint32_t *valp
)
517 int idx
= CMI_PCII_HASHIDX(bus
, dev
, func
, reg
);
518 struct cmi_pcii_bkt
*hbp
= &pciihash
[idx
];
519 struct cmi_pcii_hashent
*hep
;
521 if (!mutex_tryenter(&hbp
->pciib_lock
))
524 for (hep
= hbp
->pciib_head
; hep
!= NULL
; hep
= hep
->pcii_next
) {
525 if (CMI_PCII_MATCH(hep
, bus
, dev
, func
, reg
, asz
)) {
526 *valp
= hep
->pcii_val
;
531 mutex_exit(&hbp
->pciib_lock
);
533 return (hep
!= NULL
);
537 pcii_rment(int bus
, int dev
, int func
, int reg
, int asz
)
539 int idx
= CMI_PCII_HASHIDX(bus
, dev
, func
, reg
);
540 struct cmi_pcii_bkt
*hbp
= &pciihash
[idx
];
541 struct cmi_pcii_hashent
*hep
;
543 mutex_enter(&hbp
->pciib_lock
);
545 for (hep
= hbp
->pciib_head
; hep
!= NULL
; hep
= hep
->pcii_next
) {
546 if (CMI_PCII_MATCH(hep
, bus
, dev
, func
, reg
, asz
)) {
547 if (hep
->pcii_prev
!= NULL
)
548 hep
->pcii_prev
->pcii_next
= hep
->pcii_next
;
550 if (hep
->pcii_next
!= NULL
)
551 hep
->pcii_next
->pcii_prev
= hep
->pcii_prev
;
553 if (hbp
->pciib_head
== hep
)
554 hbp
->pciib_head
= hep
->pcii_next
;
556 kmem_free(hep
, sizeof (*hep
));
561 mutex_exit(&hbp
->pciib_lock
);
567 * =======================================================
571 * | These are used when we are running native on bare- |
572 * | metal, or simply don't know any better. |
573 * ---------------------------------------------------------
576 #define HDLPRIV(hdl) ((cpu_t *)(hdl)->cmih_hdlpriv)
579 ntv_vendor(cmi_hdl_impl_t
*hdl
)
581 return (cpuid_getvendor(HDLPRIV(hdl
)));
585 ntv_vendorstr(cmi_hdl_impl_t
*hdl
)
587 return (cpuid_getvendorstr(HDLPRIV(hdl
)));
591 ntv_family(cmi_hdl_impl_t
*hdl
)
593 return (cpuid_getfamily(HDLPRIV(hdl
)));
597 ntv_model(cmi_hdl_impl_t
*hdl
)
599 return (cpuid_getmodel(HDLPRIV(hdl
)));
603 ntv_stepping(cmi_hdl_impl_t
*hdl
)
605 return (cpuid_getstep(HDLPRIV(hdl
)));
609 ntv_chipid(cmi_hdl_impl_t
*hdl
)
611 return (hdl
->cmih_chipid
);
616 ntv_procnodeid(cmi_hdl_impl_t
*hdl
)
618 return (hdl
->cmih_procnodeid
);
622 ntv_procnodes_per_pkg(cmi_hdl_impl_t
*hdl
)
624 return (hdl
->cmih_procnodes_per_pkg
);
628 ntv_coreid(cmi_hdl_impl_t
*hdl
)
630 return (hdl
->cmih_coreid
);
634 ntv_strandid(cmi_hdl_impl_t
*hdl
)
636 return (hdl
->cmih_strandid
);
640 ntv_strand_apicid(cmi_hdl_impl_t
*hdl
)
642 return (cpuid_get_apicid(HDLPRIV(hdl
)));
646 ntv_smbiosid(cmi_hdl_impl_t
*hdl
)
648 return (hdl
->cmih_smbiosid
);
652 ntv_smb_chipid(cmi_hdl_impl_t
*hdl
)
654 return (hdl
->cmih_smb_chipid
);
658 ntv_smb_bboard(cmi_hdl_impl_t
*hdl
)
660 return (hdl
->cmih_smb_bboard
);
664 ntv_chiprev(cmi_hdl_impl_t
*hdl
)
666 return (cpuid_getchiprev(HDLPRIV(hdl
)));
670 ntv_chiprevstr(cmi_hdl_impl_t
*hdl
)
672 return (cpuid_getchiprevstr(HDLPRIV(hdl
)));
676 ntv_getsockettype(cmi_hdl_impl_t
*hdl
)
678 return (cpuid_getsockettype(HDLPRIV(hdl
)));
682 ntv_getsocketstr(cmi_hdl_impl_t
*hdl
)
684 return (cpuid_getsocketstr(HDLPRIV(hdl
)));
688 ntv_logical_id(cmi_hdl_impl_t
*hdl
)
690 return (HDLPRIV(hdl
)->cpu_id
);
695 ntv_getcr4_xc(xc_arg_t arg1
, xc_arg_t arg2
, xc_arg_t arg3
)
697 ulong_t
*dest
= (ulong_t
*)arg1
;
698 cmi_errno_t
*rcp
= (cmi_errno_t
*)arg3
;
707 ntv_getcr4(cmi_hdl_impl_t
*hdl
)
709 cpu_t
*cp
= HDLPRIV(hdl
);
712 (void) call_func_ntv(cp
->cpu_id
, ntv_getcr4_xc
, (xc_arg_t
)&val
, NULL
);
719 ntv_setcr4_xc(xc_arg_t arg1
, xc_arg_t arg2
, xc_arg_t arg3
)
721 ulong_t val
= (ulong_t
)arg1
;
722 cmi_errno_t
*rcp
= (cmi_errno_t
*)arg3
;
731 ntv_setcr4(cmi_hdl_impl_t
*hdl
, ulong_t val
)
733 cpu_t
*cp
= HDLPRIV(hdl
);
735 (void) call_func_ntv(cp
->cpu_id
, ntv_setcr4_xc
, (xc_arg_t
)val
, NULL
);
738 volatile uint32_t cmi_trapped_rdmsr
;
742 ntv_rdmsr_xc(xc_arg_t arg1
, xc_arg_t arg2
, xc_arg_t arg3
)
744 uint_t msr
= (uint_t
)arg1
;
745 uint64_t *valp
= (uint64_t *)arg2
;
746 cmi_errno_t
*rcp
= (cmi_errno_t
*)arg3
;
750 if (on_trap(&otd
, OT_DATA_ACCESS
) == 0) {
751 if (checked_rdmsr(msr
, valp
) == 0)
754 *rcp
= CMIERR_NOTSUP
;
756 *rcp
= CMIERR_MSRGPF
;
757 atomic_inc_32(&cmi_trapped_rdmsr
);
765 ntv_rdmsr(cmi_hdl_impl_t
*hdl
, uint_t msr
, uint64_t *valp
)
767 cpu_t
*cp
= HDLPRIV(hdl
);
769 if (!(hdl
->cmih_msrsrc
& CMI_MSR_FLAG_RD_HWOK
))
770 return (CMIERR_INTERPOSE
);
772 return (call_func_ntv(cp
->cpu_id
, ntv_rdmsr_xc
,
773 (xc_arg_t
)msr
, (xc_arg_t
)valp
));
776 volatile uint32_t cmi_trapped_wrmsr
;
780 ntv_wrmsr_xc(xc_arg_t arg1
, xc_arg_t arg2
, xc_arg_t arg3
)
782 uint_t msr
= (uint_t
)arg1
;
783 uint64_t val
= *((uint64_t *)arg2
);
784 cmi_errno_t
*rcp
= (cmi_errno_t
*)arg3
;
787 if (on_trap(&otd
, OT_DATA_ACCESS
) == 0) {
788 if (checked_wrmsr(msr
, val
) == 0)
791 *rcp
= CMIERR_NOTSUP
;
793 *rcp
= CMIERR_MSRGPF
;
794 atomic_inc_32(&cmi_trapped_wrmsr
);
803 ntv_wrmsr(cmi_hdl_impl_t
*hdl
, uint_t msr
, uint64_t val
)
805 cpu_t
*cp
= HDLPRIV(hdl
);
807 if (!(hdl
->cmih_msrsrc
& CMI_MSR_FLAG_WR_HWOK
))
808 return (CMI_SUCCESS
);
810 return (call_func_ntv(cp
->cpu_id
, ntv_wrmsr_xc
,
811 (xc_arg_t
)msr
, (xc_arg_t
)&val
));
815 ntv_msrinterpose(cmi_hdl_impl_t
*hdl
, uint_t msr
, uint64_t val
)
817 msri_addent(hdl
, msr
, val
);
818 return (CMI_SUCCESS
);
823 ntv_int_xc(xc_arg_t arg1
, xc_arg_t arg2
, xc_arg_t arg3
)
825 cmi_errno_t
*rcp
= (cmi_errno_t
*)arg3
;
826 int int_no
= (int)arg1
;
838 ntv_int(cmi_hdl_impl_t
*hdl
, int int_no
)
840 cpu_t
*cp
= HDLPRIV(hdl
);
842 (void) call_func_ntv(cp
->cpu_id
, ntv_int_xc
, (xc_arg_t
)int_no
, NULL
);
846 ntv_online(cmi_hdl_impl_t
*hdl
, int new_status
, int *old_status
)
849 processorid_t cpuid
= HDLPRIV(hdl
)->cpu_id
;
851 while (mutex_tryenter(&cpu_lock
) == 0) {
852 if (hdl
->cmih_flags
& CMIH_F_DEAD
)
856 rc
= p_online_internal_locked(cpuid
, new_status
, old_status
);
857 mutex_exit(&cpu_lock
);
865 * =======================================================
866 * | xVM dom0 methods |
867 * | ---------------- |
869 * | These are used when we are running as dom0 in |
870 * | a Solaris xVM context. |
871 * ---------------------------------------------------------
874 #define HDLPRIV(hdl) ((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv)
876 extern uint_t
_cpuid_vendorstr_to_vendorcode(char *);
880 xpv_vendor(cmi_hdl_impl_t
*hdl
)
882 return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr(
887 xpv_vendorstr(cmi_hdl_impl_t
*hdl
)
889 return (xen_physcpu_vendorstr(HDLPRIV(hdl
)));
893 xpv_family(cmi_hdl_impl_t
*hdl
)
895 return (xen_physcpu_family(HDLPRIV(hdl
)));
899 xpv_model(cmi_hdl_impl_t
*hdl
)
901 return (xen_physcpu_model(HDLPRIV(hdl
)));
905 xpv_stepping(cmi_hdl_impl_t
*hdl
)
907 return (xen_physcpu_stepping(HDLPRIV(hdl
)));
911 xpv_chipid(cmi_hdl_impl_t
*hdl
)
913 return (hdl
->cmih_chipid
);
917 xpv_procnodeid(cmi_hdl_impl_t
*hdl
)
919 return (hdl
->cmih_procnodeid
);
923 xpv_procnodes_per_pkg(cmi_hdl_impl_t
*hdl
)
925 return (hdl
->cmih_procnodes_per_pkg
);
929 xpv_coreid(cmi_hdl_impl_t
*hdl
)
931 return (hdl
->cmih_coreid
);
935 xpv_strandid(cmi_hdl_impl_t
*hdl
)
937 return (hdl
->cmih_strandid
);
941 xpv_strand_apicid(cmi_hdl_impl_t
*hdl
)
943 return (xen_physcpu_initial_apicid(HDLPRIV(hdl
)));
947 xpv_smbiosid(cmi_hdl_impl_t
*hdl
)
949 return (hdl
->cmih_smbiosid
);
953 xpv_smb_chipid(cmi_hdl_impl_t
*hdl
)
955 return (hdl
->cmih_smb_chipid
);
959 xpv_smb_bboard(cmi_hdl_impl_t
*hdl
)
961 return (hdl
->cmih_smb_bboard
);
964 extern uint32_t _cpuid_chiprev(uint_t
, uint_t
, uint_t
, uint_t
);
967 xpv_chiprev(cmi_hdl_impl_t
*hdl
)
969 return (_cpuid_chiprev(xpv_vendor(hdl
), xpv_family(hdl
),
970 xpv_model(hdl
), xpv_stepping(hdl
)));
973 extern const char *_cpuid_chiprevstr(uint_t
, uint_t
, uint_t
, uint_t
);
976 xpv_chiprevstr(cmi_hdl_impl_t
*hdl
)
978 return (_cpuid_chiprevstr(xpv_vendor(hdl
), xpv_family(hdl
),
979 xpv_model(hdl
), xpv_stepping(hdl
)));
982 extern uint32_t _cpuid_skt(uint_t
, uint_t
, uint_t
, uint_t
);
985 xpv_getsockettype(cmi_hdl_impl_t
*hdl
)
987 return (_cpuid_skt(xpv_vendor(hdl
), xpv_family(hdl
),
988 xpv_model(hdl
), xpv_stepping(hdl
)));
991 extern const char *_cpuid_sktstr(uint_t
, uint_t
, uint_t
, uint_t
);
994 xpv_getsocketstr(cmi_hdl_impl_t
*hdl
)
996 return (_cpuid_sktstr(xpv_vendor(hdl
), xpv_family(hdl
),
997 xpv_model(hdl
), xpv_stepping(hdl
)));
1001 xpv_logical_id(cmi_hdl_impl_t
*hdl
)
1003 return (xen_physcpu_logical_id(HDLPRIV(hdl
)));
1007 xpv_rdmsr(cmi_hdl_impl_t
*hdl
, uint_t msr
, uint64_t *valp
)
1010 case IA32_MSR_MCG_CAP
:
1011 *valp
= xen_physcpu_mcg_cap(HDLPRIV(hdl
));
1015 return (CMIERR_NOTSUP
);
1018 return (CMI_SUCCESS
);
1022 * Request the hypervisor to write an MSR for us. The hypervisor
1023 * will only accept MCA-related MSRs, as this is for MCA error
1024 * simulation purposes alone. We will pre-screen MSRs for injection
1025 * so we don't bother the HV with bogus requests. We will permit
1026 * injection to any MCA bank register, and to MCG_STATUS.
1029 #define IS_MCA_INJ_MSR(msr) \
1030 (((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \
1031 (msr) == IA32_MSR_MCG_STATUS)
1034 xpv_wrmsr_cmn(cmi_hdl_impl_t
*hdl
, uint_t msr
, uint64_t val
, boolean_t intpose
)
1037 struct xen_mc_msrinject
*mci
= &xmc
.u
.mc_msrinject
;
1039 if (!(hdl
->cmih_flags
& CMIH_F_INJACTV
))
1040 return (CMIERR_NOTSUP
); /* for injection use only! */
1042 if (!IS_MCA_INJ_MSR(msr
))
1043 return (CMIERR_API
);
1046 return (CMIERR_DEADLOCK
);
1048 mci
->mcinj_cpunr
= xen_physcpu_logical_id(HDLPRIV(hdl
));
1049 mci
->mcinj_flags
= intpose
? MC_MSRINJ_F_INTERPOSE
: 0;
1050 mci
->mcinj_count
= 1; /* learn to batch sometime */
1051 mci
->mcinj_msr
[0].reg
= msr
;
1052 mci
->mcinj_msr
[0].value
= val
;
1054 return (HYPERVISOR_mca(XEN_MC_msrinject
, &xmc
) ==
1055 0 ? CMI_SUCCESS
: CMIERR_NOTSUP
);
1059 xpv_wrmsr(cmi_hdl_impl_t
*hdl
, uint_t msr
, uint64_t val
)
1061 return (xpv_wrmsr_cmn(hdl
, msr
, val
, B_FALSE
));
1066 xpv_msrinterpose(cmi_hdl_impl_t
*hdl
, uint_t msr
, uint64_t val
)
1068 return (xpv_wrmsr_cmn(hdl
, msr
, val
, B_TRUE
));
1072 xpv_int(cmi_hdl_impl_t
*hdl
, int int_no
)
1075 struct xen_mc_mceinject
*mce
= &xmc
.u
.mc_mceinject
;
1077 if (!(hdl
->cmih_flags
& CMIH_F_INJACTV
))
1080 if (int_no
!= T_MCE
) {
1081 cmn_err(CE_WARN
, "xpv_int: int_no %d unimplemented\n",
1085 mce
->mceinj_cpunr
= xen_physcpu_logical_id(HDLPRIV(hdl
));
1087 (void) HYPERVISOR_mca(XEN_MC_mceinject
, &xmc
);
1091 xpv_online(cmi_hdl_impl_t
*hdl
, int new_status
, int *old_status
)
1096 new_status
&= ~P_FORCED
;
1098 switch (new_status
) {
1100 op
= XEN_SYSCTL_CPU_HOTPLUG_STATUS
;
1104 op
= XEN_SYSCTL_CPU_HOTPLUG_OFFLINE
;
1107 op
= XEN_SYSCTL_CPU_HOTPLUG_ONLINE
;
1113 xs
.cmd
= XEN_SYSCTL_cpu_hotplug
;
1114 xs
.interface_version
= XEN_SYSCTL_INTERFACE_VERSION
;
1115 xs
.u
.cpu_hotplug
.cpu
= xen_physcpu_logical_id(HDLPRIV(hdl
));
1116 xs
.u
.cpu_hotplug
.op
= op
;
1118 if ((rc
= HYPERVISOR_sysctl(&xs
)) >= 0) {
1122 case XEN_CPU_HOTPLUG_STATUS_NEW
:
1123 *old_status
= P_OFFLINE
;
1125 case XEN_CPU_HOTPLUG_STATUS_OFFLINE
:
1126 *old_status
= P_FAULTED
;
1128 case XEN_CPU_HOTPLUG_STATUS_ONLINE
:
1129 *old_status
= P_ONLINE
;
1143 cpu_search(enum cmi_hdl_class
class, uint_t chipid
, uint_t coreid
,
1147 xen_mc_lcpu_cookie_t cpi
;
1149 for (cpi
= xen_physcpu_next(NULL
); cpi
!= NULL
;
1150 cpi
= xen_physcpu_next(cpi
)) {
1151 if (xen_physcpu_chipid(cpi
) == chipid
&&
1152 xen_physcpu_coreid(cpi
) == coreid
&&
1153 xen_physcpu_strandid(cpi
) == strandid
)
1154 return ((void *)cpi
);
1160 cpu_t
*cp
, *startcp
;
1165 if (cmi_ntv_hwchipid(cp
) == chipid
&&
1166 cmi_ntv_hwcoreid(cp
) == coreid
&&
1167 cmi_ntv_hwstrandid(cp
) == strandid
) {
1169 return ((void *)cp
);
1173 } while (cp
!= startcp
);
1180 cpu_is_cmt(void *priv
)
1183 return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t
)priv
));
1185 cpu_t
*cp
= (cpu_t
*)priv
;
1187 int strands_per_core
= cpuid_get_ncpu_per_chip(cp
) /
1188 cpuid_get_ncore_per_chip(cp
);
1190 return (strands_per_core
> 1);
1195 * Find the handle entry of a given cpu identified by a <chip,core,strand>
1198 static cmi_hdl_ent_t
*
1199 cmi_hdl_ent_lookup(uint_t chipid
, uint_t coreid
, uint_t strandid
)
1201 int max_strands
= CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits
,
1205 * Allocate per-chip table which contains a list of handle of
1206 * all strands of the chip.
1208 if (cmi_chip_tab
[chipid
] == NULL
) {
1212 sz
= max_strands
* sizeof (cmi_hdl_ent_t
);
1213 pg
= kmem_zalloc(sz
, KM_SLEEP
);
1215 /* test and set the per-chip table if it is not allocated */
1216 if (atomic_cas_ptr(&cmi_chip_tab
[chipid
], NULL
, pg
) != NULL
)
1217 kmem_free(pg
, sz
); /* someone beats us */
1220 return (cmi_chip_tab
[chipid
] +
1221 ((((coreid
) & CMI_MAX_COREID(cmi_core_nbits
)) << cmi_strand_nbits
) |
1222 ((strandid
) & CMI_MAX_STRANDID(cmi_strand_nbits
))));
1225 extern void cpuid_get_ext_topo(uint_t
, uint_t
*, uint_t
*);
1228 cmi_hdl_create(enum cmi_hdl_class
class, uint_t chipid
, uint_t coreid
,
1231 cmi_hdl_impl_t
*hdl
;
1237 ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA
);
1239 ASSERT(class == CMI_HDL_NATIVE
);
1242 if ((priv
= cpu_search(class, chipid
, coreid
, strandid
)) == NULL
)
1246 * Assume all chips in the system are the same type.
1247 * For Intel, attempt to check if extended topology is available
1248 * CPUID.EAX=0xB. If so, get the number of core and strand bits.
1251 vendor
= _cpuid_vendorstr_to_vendorcode(
1252 (char *)xen_physcpu_vendorstr((xen_mc_lcpu_cookie_t
)priv
));
1254 vendor
= cpuid_getvendor((cpu_t
*)priv
);
1256 if (vendor
== X86_VENDOR_Intel
&& cmi_ext_topo_check
== 0) {
1257 cpuid_get_ext_topo(vendor
, &cmi_core_nbits
, &cmi_strand_nbits
);
1258 cmi_ext_topo_check
= 1;
1261 if (chipid
> CMI_MAX_CHIPID
||
1262 coreid
> CMI_MAX_COREID(cmi_core_nbits
) ||
1263 strandid
> CMI_MAX_STRANDID(cmi_strand_nbits
))
1266 hdl
= kmem_zalloc(sizeof (*hdl
), KM_SLEEP
);
1268 hdl
->cmih_class
= class;
1269 HDLOPS(hdl
) = &cmi_hdl_ops
;
1270 hdl
->cmih_chipid
= chipid
;
1271 hdl
->cmih_coreid
= coreid
;
1272 hdl
->cmih_strandid
= strandid
;
1273 hdl
->cmih_mstrand
= cpu_is_cmt(priv
);
1274 hdl
->cmih_hdlpriv
= priv
;
1276 hdl
->cmih_msrsrc
= CMI_MSR_FLAG_RD_INTERPOSEOK
|
1277 CMI_MSR_FLAG_WR_INTERPOSEOK
;
1280 * XXX: need hypervisor support for procnodeid, for now assume
1281 * single-node processors (procnodeid = chipid)
1283 hdl
->cmih_procnodeid
= xen_physcpu_chipid((xen_mc_lcpu_cookie_t
)priv
);
1284 hdl
->cmih_procnodes_per_pkg
= 1;
1286 hdl
->cmih_msrsrc
= CMI_MSR_FLAG_RD_HWOK
| CMI_MSR_FLAG_RD_INTERPOSEOK
|
1287 CMI_MSR_FLAG_WR_HWOK
| CMI_MSR_FLAG_WR_INTERPOSEOK
;
1288 hdl
->cmih_procnodeid
= cpuid_get_procnodeid((cpu_t
*)priv
);
1289 hdl
->cmih_procnodes_per_pkg
=
1290 cpuid_get_procnodes_per_pkg((cpu_t
*)priv
);
1293 ent
= cmi_hdl_ent_lookup(chipid
, coreid
, strandid
);
1294 if (ent
->cmae_refcnt
!= 0 || ent
->cmae_hdlp
!= NULL
) {
1296 * Somehow this (chipid, coreid, strandid) id tuple has
1297 * already been assigned! This indicates that the
1298 * callers logic in determining these values is busted,
1299 * or perhaps undermined by bad BIOS setup. Complain,
1300 * and refuse to initialize this tuple again as bad things
1303 cmn_err(CE_NOTE
, "cmi_hdl_create: chipid %d coreid %d "
1304 "strandid %d handle already allocated!",
1305 chipid
, coreid
, strandid
);
1306 kmem_free(hdl
, sizeof (*hdl
));
1311 * Once we store a nonzero reference count others can find this
1312 * handle via cmi_hdl_lookup etc. This initial hold on the handle
1313 * is to be dropped only if some other part of cmi initialization
1314 * fails or, if it succeeds, at later cpu deconfigure. Note the
1315 * the module private data we hold in cmih_cmi and cmih_cmidata
1316 * is still NULL at this point (the caller will fill it with
1317 * cmi_hdl_setcmi if it initializes) so consumers of handles
1318 * should always be ready for that possibility.
1320 ent
->cmae_hdlp
= hdl
;
1321 hdl
->cmih_refcntp
= &ent
->cmae_refcnt
;
1322 ent
->cmae_refcnt
= 1;
1324 return ((cmi_hdl_t
)hdl
);
1328 cmi_read_smbios(cmi_hdl_t ophdl
)
1331 uint_t strand_apicid
= UINT_MAX
;
1332 uint_t chip_inst
= UINT_MAX
;
1333 uint16_t smb_id
= USHRT_MAX
;
1336 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1338 /* set x86gentopo compatibility */
1342 strand_apicid
= ntv_strand_apicid(hdl
);
1344 strand_apicid
= xpv_strand_apicid(hdl
);
1347 if (!x86gentopo_legacy
) {
1349 * If fm_smb_chipinst() or fm_smb_bboard() fails,
1350 * topo reverts to legacy mode
1352 rc
= fm_smb_chipinst(strand_apicid
, &chip_inst
, &smb_id
);
1354 hdl
->cmih_smb_chipid
= chip_inst
;
1355 hdl
->cmih_smbiosid
= smb_id
;
1358 cmn_err(CE_NOTE
, "!cmi reads smbios chip info failed");
1363 hdl
->cmih_smb_bboard
= fm_smb_bboard(strand_apicid
);
1365 if (hdl
->cmih_smb_bboard
== NULL
)
1367 "!cmi reads smbios base boards info failed");
1373 cmi_hdl_hold(cmi_hdl_t ophdl
)
1375 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1377 ASSERT(*hdl
->cmih_refcntp
!= 0); /* must not be the initial hold */
1379 atomic_inc_32(hdl
->cmih_refcntp
);
1383 cmi_hdl_canref(cmi_hdl_ent_t
*ent
)
1385 volatile uint32_t *refcntp
;
1388 refcntp
= &ent
->cmae_refcnt
;
1393 * Associated object never existed, is being destroyed,
1394 * or has been destroyed.
1400 * We cannot use atomic increment here because once the reference
1401 * count reaches zero it must never be bumped up again.
1403 while (refcnt
!= 0) {
1404 if (atomic_cas_32(refcntp
, refcnt
, refcnt
+ 1) == refcnt
)
1410 * Somebody dropped the reference count to 0 after our initial
1418 cmi_hdl_rele(cmi_hdl_t ophdl
)
1420 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1422 ASSERT(*hdl
->cmih_refcntp
> 0);
1423 atomic_dec_32(hdl
->cmih_refcntp
);
1427 cmi_hdl_destroy(cmi_hdl_t ophdl
)
1429 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1432 /* Release the reference count held by cmi_hdl_create(). */
1433 ASSERT(*hdl
->cmih_refcntp
> 0);
1434 atomic_dec_32(hdl
->cmih_refcntp
);
1435 hdl
->cmih_flags
|= CMIH_F_DEAD
;
1437 ent
= cmi_hdl_ent_lookup(hdl
->cmih_chipid
, hdl
->cmih_coreid
,
1438 hdl
->cmih_strandid
);
1440 * Use busy polling instead of condition variable here because
1441 * cmi_hdl_rele() may be called from #MC handler.
1443 while (cmi_hdl_canref(ent
)) {
1444 cmi_hdl_rele(ophdl
);
1447 ent
->cmae_hdlp
= NULL
;
1449 kmem_free(hdl
, sizeof (*hdl
));
1453 cmi_hdl_setspecific(cmi_hdl_t ophdl
, void *arg
)
1455 IMPLHDL(ophdl
)->cmih_spec
= arg
;
1459 cmi_hdl_getspecific(cmi_hdl_t ophdl
)
1461 return (IMPLHDL(ophdl
)->cmih_spec
);
1465 cmi_hdl_setmc(cmi_hdl_t ophdl
, const struct cmi_mc_ops
*mcops
, void *mcdata
)
1467 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1469 ASSERT(hdl
->cmih_mcops
== NULL
&& hdl
->cmih_mcdata
== NULL
);
1470 hdl
->cmih_mcops
= mcops
;
1471 hdl
->cmih_mcdata
= mcdata
;
1474 const struct cmi_mc_ops
*
1475 cmi_hdl_getmcops(cmi_hdl_t ophdl
)
1477 return (IMPLHDL(ophdl
)->cmih_mcops
);
1481 cmi_hdl_getmcdata(cmi_hdl_t ophdl
)
1483 return (IMPLHDL(ophdl
)->cmih_mcdata
);
1487 cmi_hdl_lookup(enum cmi_hdl_class
class, uint_t chipid
, uint_t coreid
,
1492 if (chipid
> CMI_MAX_CHIPID
||
1493 coreid
> CMI_MAX_COREID(cmi_core_nbits
) ||
1494 strandid
> CMI_MAX_STRANDID(cmi_strand_nbits
))
1497 ent
= cmi_hdl_ent_lookup(chipid
, coreid
, strandid
);
1499 if (class == CMI_HDL_NEUTRAL
)
1501 class = CMI_HDL_SOLARIS_xVM_MCA
;
1503 class = CMI_HDL_NATIVE
;
1506 if (!cmi_hdl_canref(ent
))
1509 if (ent
->cmae_hdlp
->cmih_class
!= class) {
1510 cmi_hdl_rele((cmi_hdl_t
)ent
->cmae_hdlp
);
1514 return ((cmi_hdl_t
)ent
->cmae_hdlp
);
1522 int max_strands
= CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits
,
1525 for (i
= 0; i
< CMI_CHIPID_ARR_SZ
; i
++) {
1526 if (cmi_chip_tab
[i
] == NULL
)
1528 for (j
= 0, ent
= cmi_chip_tab
[i
]; j
< max_strands
;
1530 if (cmi_hdl_canref(ent
))
1531 return ((cmi_hdl_t
)ent
->cmae_hdlp
);
1539 cmi_hdl_walk(int (*cbfunc
)(cmi_hdl_t
, void *, void *, void *),
1540 void *arg1
, void *arg2
, void *arg3
)
1544 int max_strands
= CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits
,
1547 for (i
= 0; i
< CMI_CHIPID_ARR_SZ
; i
++) {
1548 if (cmi_chip_tab
[i
] == NULL
)
1550 for (j
= 0, ent
= cmi_chip_tab
[i
]; j
< max_strands
;
1552 if (cmi_hdl_canref(ent
)) {
1553 cmi_hdl_impl_t
*hdl
= ent
->cmae_hdlp
;
1554 if ((*cbfunc
)((cmi_hdl_t
)hdl
, arg1
, arg2
, arg3
)
1555 == CMI_HDL_WALK_DONE
) {
1556 cmi_hdl_rele((cmi_hdl_t
)hdl
);
1559 cmi_hdl_rele((cmi_hdl_t
)hdl
);
1566 cmi_hdl_setcmi(cmi_hdl_t ophdl
, void *cmi
, void *cmidata
)
1568 IMPLHDL(ophdl
)->cmih_cmidata
= cmidata
;
1569 IMPLHDL(ophdl
)->cmih_cmi
= cmi
;
1573 cmi_hdl_getcmi(cmi_hdl_t ophdl
)
1575 return (IMPLHDL(ophdl
)->cmih_cmi
);
1579 cmi_hdl_getcmidata(cmi_hdl_t ophdl
)
1581 return (IMPLHDL(ophdl
)->cmih_cmidata
);
1585 cmi_hdl_class(cmi_hdl_t ophdl
)
1587 return (IMPLHDL(ophdl
)->cmih_class
);
1590 #define CMI_HDL_OPFUNC(what, type) \
1592 cmi_hdl_##what(cmi_hdl_t ophdl) \
1594 return (HDLOPS(IMPLHDL(ophdl))-> \
1595 cmio_##what(IMPLHDL(ophdl))); \
1598 CMI_HDL_OPFUNC(vendor
, uint_t
)
1599 CMI_HDL_OPFUNC(vendorstr
, const char *)
1600 CMI_HDL_OPFUNC(family
, uint_t
)
1601 CMI_HDL_OPFUNC(model
, uint_t
)
1602 CMI_HDL_OPFUNC(stepping
, uint_t
)
1603 CMI_HDL_OPFUNC(chipid
, uint_t
)
1604 CMI_HDL_OPFUNC(procnodeid
, uint_t
)
1605 CMI_HDL_OPFUNC(coreid
, uint_t
)
1606 CMI_HDL_OPFUNC(strandid
, uint_t
)
1607 CMI_HDL_OPFUNC(procnodes_per_pkg
, uint_t
)
1608 CMI_HDL_OPFUNC(strand_apicid
, uint_t
)
1609 CMI_HDL_OPFUNC(chiprev
, uint32_t)
1610 CMI_HDL_OPFUNC(chiprevstr
, const char *)
1611 CMI_HDL_OPFUNC(getsockettype
, uint32_t)
1612 CMI_HDL_OPFUNC(getsocketstr
, const char *)
1613 CMI_HDL_OPFUNC(logical_id
, id_t
)
1614 CMI_HDL_OPFUNC(smbiosid
, uint16_t)
1615 CMI_HDL_OPFUNC(smb_chipid
, uint_t
)
1616 CMI_HDL_OPFUNC(smb_bboard
, nvlist_t
*)
1619 cmi_hdl_is_cmt(cmi_hdl_t ophdl
)
1621 return (IMPLHDL(ophdl
)->cmih_mstrand
);
1625 cmi_hdl_int(cmi_hdl_t ophdl
, int num
)
1627 if (HDLOPS(IMPLHDL(ophdl
))->cmio_int
== NULL
)
1630 cmi_hdl_inj_begin(ophdl
);
1631 HDLOPS(IMPLHDL(ophdl
))->cmio_int(IMPLHDL(ophdl
), num
);
1632 cmi_hdl_inj_end(NULL
);
1636 cmi_hdl_online(cmi_hdl_t ophdl
, int new_status
, int *old_status
)
1638 return (HDLOPS(IMPLHDL(ophdl
))->cmio_online(IMPLHDL(ophdl
),
1639 new_status
, old_status
));
1644 * Return hardware chip instance; cpuid_get_chipid provides this directly.
1647 cmi_ntv_hwchipid(cpu_t
*cp
)
1649 return (cpuid_get_chipid(cp
));
1653 * Return hardware node instance; cpuid_get_procnodeid provides this directly.
1656 cmi_ntv_hwprocnodeid(cpu_t
*cp
)
1658 return (cpuid_get_procnodeid(cp
));
1662 * Return core instance within a single chip.
1665 cmi_ntv_hwcoreid(cpu_t
*cp
)
1667 return (cpuid_get_pkgcoreid(cp
));
1671 * Return strand number within a single core. cpuid_get_clogid numbers
1672 * all execution units (strands, or cores in unstranded models) sequentially
1673 * within a single chip.
1676 cmi_ntv_hwstrandid(cpu_t
*cp
)
1678 int strands_per_core
= cpuid_get_ncpu_per_chip(cp
) /
1679 cpuid_get_ncore_per_chip(cp
);
1681 return (cpuid_get_clogid(cp
) % strands_per_core
);
1685 cmi_ntv_hwdisable_mce_xc(void)
1690 cr4
= cr4
& (~CR4_MCE
);
1695 cmi_ntv_hwdisable_mce(cmi_hdl_t hdl
)
1698 cmi_hdl_impl_t
*thdl
= IMPLHDL(hdl
);
1699 cpu_t
*cp
= HDLPRIV(thdl
);
1701 if (CPU
->cpu_id
== cp
->cpu_id
) {
1702 cmi_ntv_hwdisable_mce_xc();
1704 CPUSET_ONLY(set
, cp
->cpu_id
);
1705 xc_call(NULL
, NULL
, NULL
, CPUSET2BV(set
),
1706 (xc_func_t
)cmi_ntv_hwdisable_mce_xc
);
1713 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl
)
1715 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1717 hdl
->cmih_msrsrc
&= ~CMI_MSR_FLAG_RD_HWOK
;
1721 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl
)
1723 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1725 hdl
->cmih_msrsrc
&= ~CMI_MSR_FLAG_WR_HWOK
;
1729 cmi_hdl_rdmsr(cmi_hdl_t ophdl
, uint_t msr
, uint64_t *valp
)
1731 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1734 * Regardless of the handle class, we first check for am
1735 * interposed value. In the xVM case you probably want to
1736 * place interposed values within the hypervisor itself, but
1737 * we still allow interposing them in dom0 for test and bringup
1740 if ((hdl
->cmih_msrsrc
& CMI_MSR_FLAG_RD_INTERPOSEOK
) &&
1741 msri_lookup(hdl
, msr
, valp
))
1742 return (CMI_SUCCESS
);
1744 if (HDLOPS(hdl
)->cmio_rdmsr
== NULL
)
1745 return (CMIERR_NOTSUP
);
1747 return (HDLOPS(hdl
)->cmio_rdmsr(hdl
, msr
, valp
));
1751 cmi_hdl_wrmsr(cmi_hdl_t ophdl
, uint_t msr
, uint64_t val
)
1753 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1755 /* Invalidate any interposed value */
1756 msri_rment(hdl
, msr
);
1758 if (HDLOPS(hdl
)->cmio_wrmsr
== NULL
)
1759 return (CMI_SUCCESS
); /* pretend all is ok */
1761 return (HDLOPS(hdl
)->cmio_wrmsr(hdl
, msr
, val
));
1765 cmi_hdl_enable_mce(cmi_hdl_t ophdl
)
1767 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1770 if (HDLOPS(hdl
)->cmio_getcr4
== NULL
||
1771 HDLOPS(hdl
)->cmio_setcr4
== NULL
)
1774 cr4
= HDLOPS(hdl
)->cmio_getcr4(hdl
);
1776 HDLOPS(hdl
)->cmio_setcr4(hdl
, cr4
| CR4_MCE
);
1780 cmi_hdl_msrinterpose(cmi_hdl_t ophdl
, cmi_mca_regs_t
*regs
, uint_t nregs
)
1782 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1785 if (HDLOPS(hdl
)->cmio_msrinterpose
== NULL
)
1788 cmi_hdl_inj_begin(ophdl
);
1790 for (i
= 0; i
< nregs
; i
++, regs
++)
1791 HDLOPS(hdl
)->cmio_msrinterpose(hdl
, regs
->cmr_msrnum
,
1794 cmi_hdl_inj_end(ophdl
);
1799 cmi_hdl_msrforward(cmi_hdl_t ophdl
, cmi_mca_regs_t
*regs
, uint_t nregs
)
1802 cmi_hdl_impl_t
*hdl
= IMPLHDL(ophdl
);
1805 for (i
= 0; i
< nregs
; i
++, regs
++)
1806 msri_addent(hdl
, regs
->cmr_msrnum
, regs
->cmr_msrval
);
1812 cmi_pcird_nohw(void)
1814 cmi_pcicfg_flags
&= ~CMI_PCICFG_FLAG_RD_HWOK
;
1818 cmi_pciwr_nohw(void)
1820 cmi_pcicfg_flags
&= ~CMI_PCICFG_FLAG_WR_HWOK
;
1824 cmi_pci_get_cmn(int bus
, int dev
, int func
, int reg
, int asz
,
1825 int *interpose
, ddi_acc_handle_t hdl
)
1829 if (cmi_pcicfg_flags
& CMI_PCICFG_FLAG_RD_INTERPOSEOK
&&
1830 pcii_lookup(bus
, dev
, func
, reg
, asz
, &val
)) {
1838 if (!(cmi_pcicfg_flags
& CMI_PCICFG_FLAG_RD_HWOK
))
1844 val
= pci_config_get8(hdl
, (off_t
)reg
);
1846 val
= pci_cfgacc_get8(NULL
, PCI_GETBDF(bus
, dev
, func
),
1851 val
= pci_config_get16(hdl
, (off_t
)reg
);
1853 val
= pci_cfgacc_get16(NULL
, PCI_GETBDF(bus
, dev
, func
),
1858 val
= pci_config_get32(hdl
, (off_t
)reg
);
1860 val
= pci_cfgacc_get32(NULL
, PCI_GETBDF(bus
, dev
, func
),
1870 cmi_pci_getb(int bus
, int dev
, int func
, int reg
, int *interpose
,
1871 ddi_acc_handle_t hdl
)
1873 return ((uint8_t)cmi_pci_get_cmn(bus
, dev
, func
, reg
, 1, interpose
,
1878 cmi_pci_getw(int bus
, int dev
, int func
, int reg
, int *interpose
,
1879 ddi_acc_handle_t hdl
)
1881 return ((uint16_t)cmi_pci_get_cmn(bus
, dev
, func
, reg
, 2, interpose
,
1886 cmi_pci_getl(int bus
, int dev
, int func
, int reg
, int *interpose
,
1887 ddi_acc_handle_t hdl
)
1889 return (cmi_pci_get_cmn(bus
, dev
, func
, reg
, 4, interpose
, hdl
));
1893 cmi_pci_interposeb(int bus
, int dev
, int func
, int reg
, uint8_t val
)
1895 pcii_addent(bus
, dev
, func
, reg
, val
, 1);
1899 cmi_pci_interposew(int bus
, int dev
, int func
, int reg
, uint16_t val
)
1901 pcii_addent(bus
, dev
, func
, reg
, val
, 2);
1905 cmi_pci_interposel(int bus
, int dev
, int func
, int reg
, uint32_t val
)
1907 pcii_addent(bus
, dev
, func
, reg
, val
, 4);
1911 cmi_pci_put_cmn(int bus
, int dev
, int func
, int reg
, int asz
,
1912 ddi_acc_handle_t hdl
, uint32_t val
)
1915 * If there is an interposed value for this register invalidate it.
1917 pcii_rment(bus
, dev
, func
, reg
, asz
);
1919 if (!(cmi_pcicfg_flags
& CMI_PCICFG_FLAG_WR_HWOK
))
1925 pci_config_put8(hdl
, (off_t
)reg
, (uint8_t)val
);
1927 pci_cfgacc_put8(NULL
, PCI_GETBDF(bus
, dev
, func
), reg
,
1933 pci_config_put16(hdl
, (off_t
)reg
, (uint16_t)val
);
1935 pci_cfgacc_put16(NULL
, PCI_GETBDF(bus
, dev
, func
), reg
,
1941 pci_config_put32(hdl
, (off_t
)reg
, val
);
1943 pci_cfgacc_put32(NULL
, PCI_GETBDF(bus
, dev
, func
), reg
,
1953 cmi_pci_putb(int bus
, int dev
, int func
, int reg
, ddi_acc_handle_t hdl
,
1956 cmi_pci_put_cmn(bus
, dev
, func
, reg
, 1, hdl
, val
);
1960 cmi_pci_putw(int bus
, int dev
, int func
, int reg
, ddi_acc_handle_t hdl
,
1963 cmi_pci_put_cmn(bus
, dev
, func
, reg
, 2, hdl
, val
);
1967 cmi_pci_putl(int bus
, int dev
, int func
, int reg
, ddi_acc_handle_t hdl
,
1970 cmi_pci_put_cmn(bus
, dev
, func
, reg
, 4, hdl
, val
);
1973 static const struct cmi_hdl_ops cmi_hdl_ops
= {
1976 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0
1978 xpv_vendor
, /* cmio_vendor */
1979 xpv_vendorstr
, /* cmio_vendorstr */
1980 xpv_family
, /* cmio_family */
1981 xpv_model
, /* cmio_model */
1982 xpv_stepping
, /* cmio_stepping */
1983 xpv_chipid
, /* cmio_chipid */
1984 xpv_procnodeid
, /* cmio_procnodeid */
1985 xpv_coreid
, /* cmio_coreid */
1986 xpv_strandid
, /* cmio_strandid */
1987 xpv_procnodes_per_pkg
, /* cmio_procnodes_per_pkg */
1988 xpv_strand_apicid
, /* cmio_strand_apicid */
1989 xpv_chiprev
, /* cmio_chiprev */
1990 xpv_chiprevstr
, /* cmio_chiprevstr */
1991 xpv_getsockettype
, /* cmio_getsockettype */
1992 xpv_getsocketstr
, /* cmio_getsocketstr */
1993 xpv_logical_id
, /* cmio_logical_id */
1994 NULL
, /* cmio_getcr4 */
1995 NULL
, /* cmio_setcr4 */
1996 xpv_rdmsr
, /* cmio_rdmsr */
1997 xpv_wrmsr
, /* cmio_wrmsr */
1998 xpv_msrinterpose
, /* cmio_msrinterpose */
1999 xpv_int
, /* cmio_int */
2000 xpv_online
, /* cmio_online */
2001 xpv_smbiosid
, /* cmio_smbiosid */
2002 xpv_smb_chipid
, /* cmio_smb_chipid */
2003 xpv_smb_bboard
/* cmio_smb_bboard */
2008 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
2010 ntv_vendor
, /* cmio_vendor */
2011 ntv_vendorstr
, /* cmio_vendorstr */
2012 ntv_family
, /* cmio_family */
2013 ntv_model
, /* cmio_model */
2014 ntv_stepping
, /* cmio_stepping */
2015 ntv_chipid
, /* cmio_chipid */
2016 ntv_procnodeid
, /* cmio_procnodeid */
2017 ntv_coreid
, /* cmio_coreid */
2018 ntv_strandid
, /* cmio_strandid */
2019 ntv_procnodes_per_pkg
, /* cmio_procnodes_per_pkg */
2020 ntv_strand_apicid
, /* cmio_strand_apicid */
2021 ntv_chiprev
, /* cmio_chiprev */
2022 ntv_chiprevstr
, /* cmio_chiprevstr */
2023 ntv_getsockettype
, /* cmio_getsockettype */
2024 ntv_getsocketstr
, /* cmio_getsocketstr */
2025 ntv_logical_id
, /* cmio_logical_id */
2026 ntv_getcr4
, /* cmio_getcr4 */
2027 ntv_setcr4
, /* cmio_setcr4 */
2028 ntv_rdmsr
, /* cmio_rdmsr */
2029 ntv_wrmsr
, /* cmio_wrmsr */
2030 ntv_msrinterpose
, /* cmio_msrinterpose */
2031 ntv_int
, /* cmio_int */
2032 ntv_online
, /* cmio_online */
2033 ntv_smbiosid
, /* cmio_smbiosid */
2034 ntv_smb_chipid
, /* cmio_smb_chipid */
2035 ntv_smb_bboard
/* cmio_smb_bboard */