4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/cpu_acpi.h>
26 #include <sys/cpu_idle.h>
27 #include <sys/dtrace.h>
31 * List of the processor ACPI object types that are being used.
33 typedef enum cpu_acpi_obj
{
48 * Container to store object name.
49 * Other attributes can be added in the future as necessary.
51 typedef struct cpu_acpi_obj_attr
{
53 } cpu_acpi_obj_attr_t
;
56 * List of object attributes.
57 * NOTE: Please keep the ordering of the list as same as cpu_acpi_obj_t.
59 static cpu_acpi_obj_attr_t cpu_acpi_obj_attrs
[] = {
74 * Cache the ACPI CPU control data objects.
77 cpu_acpi_cache_ctrl_regs(cpu_acpi_handle_t handle
, cpu_acpi_obj_t objtype
,
78 cpu_acpi_ctrl_regs_t
*regs
)
83 AML_RESOURCE_GENERIC_REGISTER
*greg
;
88 * Fetch the control registers (if present) for the CPU node.
89 * Since they are optional, non-existence is not a failure
90 * (we just consider it a fixed hardware case).
92 abuf
.Length
= ACPI_ALLOCATE_BUFFER
;
94 astatus
= AcpiEvaluateObjectTyped(handle
->cs_handle
,
95 cpu_acpi_obj_attrs
[objtype
].name
, NULL
, &abuf
, ACPI_TYPE_PACKAGE
);
96 if (ACPI_FAILURE(astatus
)) {
97 if (astatus
== AE_NOT_FOUND
) {
98 DTRACE_PROBE3(cpu_acpi__eval__err
, int, handle
->cs_id
,
99 int, objtype
, int, astatus
);
100 regs
[0].cr_addrspace_id
= ACPI_ADR_SPACE_FIXED_HARDWARE
;
101 regs
[1].cr_addrspace_id
= ACPI_ADR_SPACE_FIXED_HARDWARE
;
104 cmn_err(CE_NOTE
, "!cpu_acpi: error %d evaluating %s package "
105 "for CPU %d.", astatus
, cpu_acpi_obj_attrs
[objtype
].name
,
111 if (obj
->Package
.Count
!= 2) {
112 cmn_err(CE_NOTE
, "!cpu_acpi: %s package bad count %d for "
113 "CPU %d.", cpu_acpi_obj_attrs
[objtype
].name
,
114 obj
->Package
.Count
, handle
->cs_id
);
119 * Does the package look coherent?
121 for (i
= 0; i
< obj
->Package
.Count
; i
++) {
122 if (obj
->Package
.Elements
[i
].Type
!= ACPI_TYPE_BUFFER
) {
123 cmn_err(CE_NOTE
, "!cpu_acpi: Unexpected data in "
124 "%s package for CPU %d.",
125 cpu_acpi_obj_attrs
[objtype
].name
,
130 greg
= (AML_RESOURCE_GENERIC_REGISTER
*)
131 obj
->Package
.Elements
[i
].Buffer
.Pointer
;
132 if (greg
->DescriptorType
!=
133 ACPI_RESOURCE_NAME_GENERIC_REGISTER
) {
134 cmn_err(CE_NOTE
, "!cpu_acpi: %s package has format "
136 cpu_acpi_obj_attrs
[objtype
].name
,
140 if (greg
->ResourceLength
!=
141 ACPI_AML_SIZE_LARGE(AML_RESOURCE_GENERIC_REGISTER
)) {
142 cmn_err(CE_NOTE
, "!cpu_acpi: %s package not right "
144 cpu_acpi_obj_attrs
[objtype
].name
,
148 if (greg
->AddressSpaceId
!= ACPI_ADR_SPACE_FIXED_HARDWARE
&&
149 greg
->AddressSpaceId
!= ACPI_ADR_SPACE_SYSTEM_IO
) {
150 cmn_err(CE_NOTE
, "!cpu_apci: %s contains unsupported "
151 "address space type %x for CPU %d.",
152 cpu_acpi_obj_attrs
[objtype
].name
,
153 greg
->AddressSpaceId
,
162 for (i
= 0; i
< obj
->Package
.Count
; i
++) {
163 greg
= (AML_RESOURCE_GENERIC_REGISTER
*)
164 obj
->Package
.Elements
[i
].Buffer
.Pointer
;
165 regs
[i
].cr_addrspace_id
= greg
->AddressSpaceId
;
166 regs
[i
].cr_width
= greg
->BitWidth
;
167 regs
[i
].cr_offset
= greg
->BitOffset
;
168 regs
[i
].cr_asize
= greg
->AccessSize
;
169 regs
[i
].cr_address
= greg
->Address
;
173 if (abuf
.Pointer
!= NULL
)
174 AcpiOsFree(abuf
.Pointer
);
179 * Cache the ACPI _PCT data. The _PCT data defines the interface to use
180 * when making power level transitions (i.e., system IO ports, fixed
181 * hardware port, etc).
184 cpu_acpi_cache_pct(cpu_acpi_handle_t handle
)
189 CPU_ACPI_OBJ_IS_NOT_CACHED(handle
, CPU_ACPI_PCT_CACHED
);
190 pct
= &CPU_ACPI_PCT(handle
)[0];
191 if ((ret
= cpu_acpi_cache_ctrl_regs(handle
, PCT_OBJ
, pct
)) == 0)
192 CPU_ACPI_OBJ_IS_CACHED(handle
, CPU_ACPI_PCT_CACHED
);
197 * Cache the ACPI _PTC data. The _PTC data defines the interface to use
198 * when making T-state transitions (i.e., system IO ports, fixed
199 * hardware port, etc).
202 cpu_acpi_cache_ptc(cpu_acpi_handle_t handle
)
207 CPU_ACPI_OBJ_IS_NOT_CACHED(handle
, CPU_ACPI_PTC_CACHED
);
208 ptc
= &CPU_ACPI_PTC(handle
)[0];
209 if ((ret
= cpu_acpi_cache_ctrl_regs(handle
, PTC_OBJ
, ptc
)) == 0)
210 CPU_ACPI_OBJ_IS_CACHED(handle
, CPU_ACPI_PTC_CACHED
);
215 * Cache the ACPI CPU state dependency data objects.
218 cpu_acpi_cache_state_dependencies(cpu_acpi_handle_t handle
,
219 cpu_acpi_obj_t objtype
, cpu_acpi_state_dependency_t
*sd
)
223 ACPI_OBJECT
*pkg
, *elements
;
227 if (objtype
== CSD_OBJ
) {
233 * Fetch the dependencies (if present) for the CPU node.
234 * Since they are optional, non-existence is not a failure
235 * (it's up to the caller to determine how to handle non-existence).
237 abuf
.Length
= ACPI_ALLOCATE_BUFFER
;
239 astatus
= AcpiEvaluateObjectTyped(handle
->cs_handle
,
240 cpu_acpi_obj_attrs
[objtype
].name
, NULL
, &abuf
, ACPI_TYPE_PACKAGE
);
241 if (ACPI_FAILURE(astatus
)) {
242 if (astatus
== AE_NOT_FOUND
) {
243 DTRACE_PROBE3(cpu_acpi__eval__err
, int, handle
->cs_id
,
244 int, objtype
, int, astatus
);
247 cmn_err(CE_NOTE
, "!cpu_acpi: error %d evaluating %s package "
248 "for CPU %d.", astatus
, cpu_acpi_obj_attrs
[objtype
].name
,
255 if (((objtype
!= CSD_OBJ
) && (pkg
->Package
.Count
!= 1)) ||
256 ((objtype
== CSD_OBJ
) && (pkg
->Package
.Count
!= 1) &&
257 (pkg
->Package
.Count
!= 2))) {
258 cmn_err(CE_NOTE
, "!cpu_acpi: %s unsupported package count %d "
259 "for CPU %d.", cpu_acpi_obj_attrs
[objtype
].name
,
260 pkg
->Package
.Count
, handle
->cs_id
);
265 * For C-state domain, we assume C2 and C3 have the same
268 if (pkg
->Package
.Elements
[0].Type
!= ACPI_TYPE_PACKAGE
||
269 pkg
->Package
.Elements
[0].Package
.Count
!= number
) {
270 cmn_err(CE_NOTE
, "!cpu_acpi: Unexpected data in %s package "
271 "for CPU %d.", cpu_acpi_obj_attrs
[objtype
].name
,
275 elements
= pkg
->Package
.Elements
[0].Package
.Elements
;
276 if (elements
[0].Integer
.Value
!= number
||
277 elements
[1].Integer
.Value
!= 0) {
278 cmn_err(CE_NOTE
, "!cpu_acpi: Unexpected %s revision for "
279 "CPU %d.", cpu_acpi_obj_attrs
[objtype
].name
,
284 sd
->sd_entries
= elements
[0].Integer
.Value
;
285 sd
->sd_revision
= elements
[1].Integer
.Value
;
286 sd
->sd_domain
= elements
[2].Integer
.Value
;
287 sd
->sd_type
= elements
[3].Integer
.Value
;
288 sd
->sd_num
= elements
[4].Integer
.Value
;
289 if (objtype
== CSD_OBJ
) {
290 sd
->sd_index
= elements
[5].Integer
.Value
;
295 if (abuf
.Pointer
!= NULL
)
296 AcpiOsFree(abuf
.Pointer
);
301 * Cache the ACPI _PSD data. The _PSD data defines P-state CPU dependencies
302 * (think CPU domains).
305 cpu_acpi_cache_psd(cpu_acpi_handle_t handle
)
310 CPU_ACPI_OBJ_IS_NOT_CACHED(handle
, CPU_ACPI_PSD_CACHED
);
311 psd
= &CPU_ACPI_PSD(handle
);
312 ret
= cpu_acpi_cache_state_dependencies(handle
, PSD_OBJ
, psd
);
314 CPU_ACPI_OBJ_IS_CACHED(handle
, CPU_ACPI_PSD_CACHED
);
320 * Cache the ACPI _TSD data. The _TSD data defines T-state CPU dependencies
321 * (think CPU domains).
324 cpu_acpi_cache_tsd(cpu_acpi_handle_t handle
)
329 CPU_ACPI_OBJ_IS_NOT_CACHED(handle
, CPU_ACPI_TSD_CACHED
);
330 tsd
= &CPU_ACPI_TSD(handle
);
331 ret
= cpu_acpi_cache_state_dependencies(handle
, TSD_OBJ
, tsd
);
333 CPU_ACPI_OBJ_IS_CACHED(handle
, CPU_ACPI_TSD_CACHED
);
339 * Cache the ACPI _CSD data. The _CSD data defines C-state CPU dependencies
340 * (think CPU domains).
343 cpu_acpi_cache_csd(cpu_acpi_handle_t handle
)
348 CPU_ACPI_OBJ_IS_NOT_CACHED(handle
, CPU_ACPI_CSD_CACHED
);
349 csd
= &CPU_ACPI_CSD(handle
);
350 ret
= cpu_acpi_cache_state_dependencies(handle
, CSD_OBJ
, csd
);
352 CPU_ACPI_OBJ_IS_CACHED(handle
, CPU_ACPI_CSD_CACHED
);
358 cpu_acpi_cache_pstate(cpu_acpi_handle_t handle
, ACPI_OBJECT
*obj
, int cnt
)
360 cpu_acpi_pstate_t
*pstate
;
364 CPU_ACPI_PSTATES_COUNT(handle
) = cnt
;
365 CPU_ACPI_PSTATES(handle
) = kmem_zalloc(CPU_ACPI_PSTATES_SIZE(cnt
),
367 pstate
= (cpu_acpi_pstate_t
*)CPU_ACPI_PSTATES(handle
);
368 for (i
= 0, l
= NULL
; i
< obj
->Package
.Count
&& cnt
> 0; i
++, l
= q
) {
371 q
= obj
->Package
.Elements
[i
].Package
.Elements
;
374 * Skip duplicate entries.
376 if (l
!= NULL
&& l
[0].Integer
.Value
== q
[0].Integer
.Value
)
379 up
= (uint32_t *)pstate
;
380 for (j
= 0; j
< CPU_ACPI_PSS_CNT
; j
++)
381 up
[j
] = q
[j
].Integer
.Value
;
388 cpu_acpi_cache_tstate(cpu_acpi_handle_t handle
, ACPI_OBJECT
*obj
, int cnt
)
390 cpu_acpi_tstate_t
*tstate
;
394 CPU_ACPI_TSTATES_COUNT(handle
) = cnt
;
395 CPU_ACPI_TSTATES(handle
) = kmem_zalloc(CPU_ACPI_TSTATES_SIZE(cnt
),
397 tstate
= (cpu_acpi_tstate_t
*)CPU_ACPI_TSTATES(handle
);
398 for (i
= 0, l
= NULL
; i
< obj
->Package
.Count
&& cnt
> 0; i
++, l
= q
) {
401 q
= obj
->Package
.Elements
[i
].Package
.Elements
;
404 * Skip duplicate entries.
406 if (l
!= NULL
&& l
[0].Integer
.Value
== q
[0].Integer
.Value
)
409 up
= (uint32_t *)tstate
;
410 for (j
= 0; j
< CPU_ACPI_TSS_CNT
; j
++)
411 up
[j
] = q
[j
].Integer
.Value
;
418 * Cache the _PSS or _TSS data.
421 cpu_acpi_cache_supported_states(cpu_acpi_handle_t handle
,
422 cpu_acpi_obj_t objtype
, int fcnt
)
426 ACPI_OBJECT
*obj
, *q
, *l
;
427 boolean_t eot
= B_FALSE
;
433 * Fetch the state data (if present) for the CPU node.
435 abuf
.Length
= ACPI_ALLOCATE_BUFFER
;
437 astatus
= AcpiEvaluateObjectTyped(handle
->cs_handle
,
438 cpu_acpi_obj_attrs
[objtype
].name
, NULL
, &abuf
,
440 if (ACPI_FAILURE(astatus
)) {
441 if (astatus
== AE_NOT_FOUND
) {
442 DTRACE_PROBE3(cpu_acpi__eval__err
, int, handle
->cs_id
,
443 int, objtype
, int, astatus
);
446 cmn_err(CE_NOTE
, "!cpu_acpi: error %d evaluating %s package "
447 "for CPU %d.", astatus
, cpu_acpi_obj_attrs
[objtype
].name
,
452 if (obj
->Package
.Count
< 2) {
453 cmn_err(CE_NOTE
, "!cpu_acpi: %s package bad count %d for "
454 "CPU %d.", cpu_acpi_obj_attrs
[objtype
].name
,
455 obj
->Package
.Count
, handle
->cs_id
);
460 * Does the package look coherent?
463 for (i
= 0, l
= NULL
; i
< obj
->Package
.Count
; i
++, l
= q
) {
464 if (obj
->Package
.Elements
[i
].Type
!= ACPI_TYPE_PACKAGE
||
465 obj
->Package
.Elements
[i
].Package
.Count
!= fcnt
) {
466 cmn_err(CE_NOTE
, "!cpu_acpi: Unexpected data in "
467 "%s package for CPU %d.",
468 cpu_acpi_obj_attrs
[objtype
].name
,
473 q
= obj
->Package
.Elements
[i
].Package
.Elements
;
474 for (j
= 0; j
< fcnt
; j
++) {
475 if (q
[j
].Type
!= ACPI_TYPE_INTEGER
) {
476 cmn_err(CE_NOTE
, "!cpu_acpi: %s element "
477 "invalid (type) for CPU %d.",
478 cpu_acpi_obj_attrs
[objtype
].name
,
485 * Ignore duplicate entries.
487 if (l
!= NULL
&& l
[0].Integer
.Value
== q
[0].Integer
.Value
)
491 * Some supported state tables are larger than required
492 * and unused elements are filled with patterns
493 * of 0xff. Simply check here for frequency = 0xffff
494 * and stop counting if found.
496 if (q
[0].Integer
.Value
== 0xffff) {
502 * We should never find a valid entry after we've hit
503 * an the end-of-table entry.
506 cmn_err(CE_NOTE
, "!cpu_acpi: Unexpected data in %s "
507 "package after eot for CPU %d.",
508 cpu_acpi_obj_attrs
[objtype
].name
,
514 * states must be defined in order from highest to lowest.
516 if (l
!= NULL
&& l
[0].Integer
.Value
< q
[0].Integer
.Value
) {
517 cmn_err(CE_NOTE
, "!cpu_acpi: %s package state "
518 "definitions out of order for CPU %d.",
519 cpu_acpi_obj_attrs
[objtype
].name
,
533 * Yes, fill in the structure.
535 ASSERT(objtype
== PSS_OBJ
|| objtype
== TSS_OBJ
);
536 (objtype
== PSS_OBJ
) ? cpu_acpi_cache_pstate(handle
, obj
, cnt
) :
537 cpu_acpi_cache_tstate(handle
, obj
, cnt
);
541 if (abuf
.Pointer
!= NULL
)
542 AcpiOsFree(abuf
.Pointer
);
547 * Cache the _PSS data. The _PSS data defines the different power levels
548 * supported by the CPU and the attributes associated with each power level
549 * (i.e., frequency, voltage, etc.). The power levels are number from
550 * highest to lowest. That is, the highest power level is _PSS entry 0
551 * and the lowest power level is the last _PSS entry.
554 cpu_acpi_cache_pstates(cpu_acpi_handle_t handle
)
558 CPU_ACPI_OBJ_IS_NOT_CACHED(handle
, CPU_ACPI_PSS_CACHED
);
559 ret
= cpu_acpi_cache_supported_states(handle
, PSS_OBJ
,
562 CPU_ACPI_OBJ_IS_CACHED(handle
, CPU_ACPI_PSS_CACHED
);
567 * Cache the _TSS data. The _TSS data defines the different freq throttle
568 * levels supported by the CPU and the attributes associated with each
569 * throttle level (i.e., frequency throttle percentage, voltage, etc.).
570 * The throttle levels are number from highest to lowest.
573 cpu_acpi_cache_tstates(cpu_acpi_handle_t handle
)
577 CPU_ACPI_OBJ_IS_NOT_CACHED(handle
, CPU_ACPI_TSS_CACHED
);
578 ret
= cpu_acpi_cache_supported_states(handle
, TSS_OBJ
,
581 CPU_ACPI_OBJ_IS_CACHED(handle
, CPU_ACPI_TSS_CACHED
);
586 * Cache the ACPI CPU present capabilities data objects.
589 cpu_acpi_cache_present_capabilities(cpu_acpi_handle_t handle
,
590 cpu_acpi_obj_t objtype
, cpu_acpi_present_capabilities_t
*pc
)
599 * Fetch the present capabilites object (if present) for the CPU node.
601 abuf
.Length
= ACPI_ALLOCATE_BUFFER
;
603 astatus
= AcpiEvaluateObject(handle
->cs_handle
,
604 cpu_acpi_obj_attrs
[objtype
].name
, NULL
, &abuf
);
605 if (ACPI_FAILURE(astatus
) && astatus
!= AE_NOT_FOUND
) {
606 cmn_err(CE_NOTE
, "!cpu_acpi: error %d evaluating %s "
607 "package for CPU %d.", astatus
,
608 cpu_acpi_obj_attrs
[objtype
].name
, handle
->cs_id
);
611 if (astatus
== AE_NOT_FOUND
|| abuf
.Length
== 0) {
616 obj
= (ACPI_OBJECT
*)abuf
.Pointer
;
617 *pc
= obj
->Integer
.Value
;
621 if (abuf
.Pointer
!= NULL
)
622 AcpiOsFree(abuf
.Pointer
);
627 * Cache the _PPC data. The _PPC simply contains an integer value which
628 * represents the highest power level that a CPU should transition to.
629 * That is, it's an index into the array of _PSS entries and will be
630 * greater than or equal to zero.
633 cpu_acpi_cache_ppc(cpu_acpi_handle_t handle
)
638 CPU_ACPI_OBJ_IS_NOT_CACHED(handle
, CPU_ACPI_PPC_CACHED
);
639 ppc
= &CPU_ACPI_PPC(handle
);
640 ret
= cpu_acpi_cache_present_capabilities(handle
, PPC_OBJ
, ppc
);
642 CPU_ACPI_OBJ_IS_CACHED(handle
, CPU_ACPI_PPC_CACHED
);
646 * Cache the _TPC data. The _TPC simply contains an integer value which
647 * represents the throttle level that a CPU should transition to.
648 * That is, it's an index into the array of _TSS entries and will be
649 * greater than or equal to zero.
652 cpu_acpi_cache_tpc(cpu_acpi_handle_t handle
)
657 CPU_ACPI_OBJ_IS_NOT_CACHED(handle
, CPU_ACPI_TPC_CACHED
);
658 tpc
= &CPU_ACPI_TPC(handle
);
659 ret
= cpu_acpi_cache_present_capabilities(handle
, TPC_OBJ
, tpc
);
661 CPU_ACPI_OBJ_IS_CACHED(handle
, CPU_ACPI_TPC_CACHED
);
665 cpu_acpi_verify_cstate(cpu_acpi_cstate_t
*cstate
)
667 uint32_t addrspaceid
= cstate
->cs_addrspace_id
;
669 if ((addrspaceid
!= ACPI_ADR_SPACE_FIXED_HARDWARE
) &&
670 (addrspaceid
!= ACPI_ADR_SPACE_SYSTEM_IO
)) {
671 cmn_err(CE_NOTE
, "!cpu_acpi: _CST unsupported address space id"
672 ":C%d, type: %d\n", cstate
->cs_type
, addrspaceid
);
679 cpu_acpi_cache_cst(cpu_acpi_handle_t handle
)
684 ACPI_INTEGER cnt
, old_cnt
;
685 cpu_acpi_cstate_t
*cstate
, *p
;
690 CPU_ACPI_OBJ_IS_NOT_CACHED(handle
, CPU_ACPI_CST_CACHED
);
692 abuf
.Length
= ACPI_ALLOCATE_BUFFER
;
696 * Fetch the C-state data (if present) for the CPU node.
698 astatus
= AcpiEvaluateObjectTyped(handle
->cs_handle
, "_CST",
699 NULL
, &abuf
, ACPI_TYPE_PACKAGE
);
700 if (ACPI_FAILURE(astatus
)) {
701 if (astatus
== AE_NOT_FOUND
) {
702 DTRACE_PROBE3(cpu_acpi__eval__err
, int, handle
->cs_id
,
703 int, CST_OBJ
, int, astatus
);
706 cmn_err(CE_NOTE
, "!cpu_acpi: error %d evaluating _CST package "
707 "for CPU %d.", astatus
, handle
->cs_id
);
711 obj
= (ACPI_OBJECT
*)abuf
.Pointer
;
712 if (obj
->Package
.Count
< 2) {
713 cmn_err(CE_NOTE
, "!cpu_acpi: _CST unsupported package "
714 "count %d for CPU %d.", obj
->Package
.Count
, handle
->cs_id
);
719 * Does the package look coherent?
721 cnt
= obj
->Package
.Elements
[0].Integer
.Value
;
722 if (cnt
< 1 || cnt
!= obj
->Package
.Count
- 1) {
723 cmn_err(CE_NOTE
, "!cpu_acpi: _CST invalid element "
724 "count %d != Package count %d for CPU %d",
725 (int)cnt
, (int)obj
->Package
.Count
- 1, handle
->cs_id
);
730 * Reuse the old buffer if the number of C states is the same.
732 if (CPU_ACPI_CSTATES(handle
) &&
733 (old_cnt
= CPU_ACPI_CSTATES_COUNT(handle
)) != cnt
) {
734 kmem_free(CPU_ACPI_CSTATES(handle
),
735 CPU_ACPI_CSTATES_SIZE(old_cnt
));
736 CPU_ACPI_CSTATES(handle
) = NULL
;
739 CPU_ACPI_CSTATES_COUNT(handle
) = (uint32_t)cnt
;
740 alloc_size
= CPU_ACPI_CSTATES_SIZE(cnt
);
741 if (CPU_ACPI_CSTATES(handle
) == NULL
)
742 CPU_ACPI_CSTATES(handle
) = kmem_zalloc(alloc_size
, KM_SLEEP
);
743 cstate
= (cpu_acpi_cstate_t
*)CPU_ACPI_CSTATES(handle
);
746 for (i
= 1, count
= 1; i
<= cnt
; i
++) {
748 AML_RESOURCE_GENERIC_REGISTER
*reg
;
749 ACPI_OBJECT
*element
;
751 pkg
= &(obj
->Package
.Elements
[i
]);
752 reg
= (AML_RESOURCE_GENERIC_REGISTER
*)
753 pkg
->Package
.Elements
[0].Buffer
.Pointer
;
754 cstate
->cs_addrspace_id
= reg
->AddressSpaceId
;
755 cstate
->cs_address
= reg
->Address
;
756 element
= &(pkg
->Package
.Elements
[1]);
757 cstate
->cs_type
= element
->Integer
.Value
;
758 element
= &(pkg
->Package
.Elements
[2]);
759 cstate
->cs_latency
= element
->Integer
.Value
;
760 element
= &(pkg
->Package
.Elements
[3]);
761 cstate
->cs_power
= element
->Integer
.Value
;
763 if (cpu_acpi_verify_cstate(cstate
)) {
765 * ignore this entry if it's not valid
771 } else if (p
->cs_type
== cstate
->cs_type
) {
773 * if there are duplicate entries, we keep the
774 * last one. This fixes:
775 * 1) some buggy BIOS have total duplicate entries.
776 * 2) ACPI Spec allows the same cstate entry with
777 * different power and latency, we use the one
778 * with more power saving.
780 (void) memcpy(p
, cstate
, sizeof (cpu_acpi_cstate_t
));
783 * we got a valid entry, cache it to the
792 cmn_err(CE_NOTE
, "!cpu_acpi: _CST invalid count %d < 2 for "
793 "CPU %d", count
, handle
->cs_id
);
794 kmem_free(CPU_ACPI_CSTATES(handle
), alloc_size
);
795 CPU_ACPI_CSTATES(handle
) = NULL
;
796 CPU_ACPI_CSTATES_COUNT(handle
) = (uint32_t)0;
799 cstate
= (cpu_acpi_cstate_t
*)CPU_ACPI_CSTATES(handle
);
800 if (cstate
[0].cs_type
!= CPU_ACPI_C1
) {
801 cmn_err(CE_NOTE
, "!cpu_acpi: _CST first element type not "
802 "C1: %d for CPU %d", (int)cstate
->cs_type
, handle
->cs_id
);
803 kmem_free(CPU_ACPI_CSTATES(handle
), alloc_size
);
804 CPU_ACPI_CSTATES(handle
) = NULL
;
805 CPU_ACPI_CSTATES_COUNT(handle
) = (uint32_t)0;
810 void *orig
= CPU_ACPI_CSTATES(handle
);
812 CPU_ACPI_CSTATES_COUNT(handle
) = (uint32_t)count
;
813 CPU_ACPI_CSTATES(handle
) = kmem_zalloc(
814 CPU_ACPI_CSTATES_SIZE(count
), KM_SLEEP
);
815 (void) memcpy(CPU_ACPI_CSTATES(handle
), orig
,
816 CPU_ACPI_CSTATES_SIZE(count
));
817 kmem_free(orig
, alloc_size
);
820 CPU_ACPI_OBJ_IS_CACHED(handle
, CPU_ACPI_CST_CACHED
);
825 if (abuf
.Pointer
!= NULL
)
826 AcpiOsFree(abuf
.Pointer
);
831 * Cache the _PCT, _PSS, _PSD and _PPC data.
834 cpu_acpi_cache_pstate_data(cpu_acpi_handle_t handle
)
836 if (cpu_acpi_cache_pct(handle
) < 0) {
837 DTRACE_PROBE2(cpu_acpi__cache__err
, int, handle
->cs_id
,
842 if (cpu_acpi_cache_pstates(handle
) != 0) {
843 DTRACE_PROBE2(cpu_acpi__cache__err
, int, handle
->cs_id
,
848 if (cpu_acpi_cache_psd(handle
) < 0) {
849 DTRACE_PROBE2(cpu_acpi__cache__err
, int, handle
->cs_id
,
854 cpu_acpi_cache_ppc(handle
);
860 cpu_acpi_free_pstate_data(cpu_acpi_handle_t handle
)
862 if (handle
!= NULL
) {
863 if (CPU_ACPI_PSTATES(handle
)) {
864 kmem_free(CPU_ACPI_PSTATES(handle
),
865 CPU_ACPI_PSTATES_SIZE(
866 CPU_ACPI_PSTATES_COUNT(handle
)));
867 CPU_ACPI_PSTATES(handle
) = NULL
;
873 * Cache the _PTC, _TSS, _TSD and _TPC data.
876 cpu_acpi_cache_tstate_data(cpu_acpi_handle_t handle
)
880 if (cpu_acpi_cache_ptc(handle
) < 0) {
881 DTRACE_PROBE2(cpu_acpi__cache__err
, int, handle
->cs_id
,
886 if ((ret
= cpu_acpi_cache_tstates(handle
)) != 0) {
887 DTRACE_PROBE2(cpu_acpi__cache__err
, int, handle
->cs_id
,
892 if (cpu_acpi_cache_tsd(handle
) < 0) {
893 DTRACE_PROBE2(cpu_acpi__cache__err
, int, handle
->cs_id
,
898 cpu_acpi_cache_tpc(handle
);
904 cpu_acpi_free_tstate_data(cpu_acpi_handle_t handle
)
906 if (handle
!= NULL
) {
907 if (CPU_ACPI_TSTATES(handle
)) {
908 kmem_free(CPU_ACPI_TSTATES(handle
),
909 CPU_ACPI_TSTATES_SIZE(
910 CPU_ACPI_TSTATES_COUNT(handle
)));
911 CPU_ACPI_TSTATES(handle
) = NULL
;
917 * Cache the _CST data.
920 cpu_acpi_cache_cstate_data(cpu_acpi_handle_t handle
)
924 if ((ret
= cpu_acpi_cache_cst(handle
)) != 0) {
925 DTRACE_PROBE2(cpu_acpi__cache__err
, int, handle
->cs_id
,
930 if (cpu_acpi_cache_csd(handle
) < 0) {
931 DTRACE_PROBE2(cpu_acpi__cache__err
, int, handle
->cs_id
,
940 cpu_acpi_free_cstate_data(cpu_acpi_handle_t handle
)
942 if (handle
!= NULL
) {
943 if (CPU_ACPI_CSTATES(handle
)) {
944 kmem_free(CPU_ACPI_CSTATES(handle
),
945 CPU_ACPI_CSTATES_SIZE(
946 CPU_ACPI_CSTATES_COUNT(handle
)));
947 CPU_ACPI_CSTATES(handle
) = NULL
;
953 * Register a handler for processor change notifications.
956 cpu_acpi_install_notify_handler(cpu_acpi_handle_t handle
,
957 ACPI_NOTIFY_HANDLER handler
, void *ctx
)
959 if (ACPI_FAILURE(AcpiInstallNotifyHandler(handle
->cs_handle
,
960 ACPI_DEVICE_NOTIFY
, handler
, ctx
)))
961 cmn_err(CE_NOTE
, "!cpu_acpi: Unable to register "
962 "notify handler for CPU %d.", handle
->cs_id
);
966 * Remove a handler for processor change notifications.
969 cpu_acpi_remove_notify_handler(cpu_acpi_handle_t handle
,
970 ACPI_NOTIFY_HANDLER handler
)
972 if (ACPI_FAILURE(AcpiRemoveNotifyHandler(handle
->cs_handle
,
973 ACPI_DEVICE_NOTIFY
, handler
)))
974 cmn_err(CE_NOTE
, "!cpu_acpi: Unable to remove "
975 "notify handler for CPU %d.", handle
->cs_id
);
982 cpu_acpi_write_pdc(cpu_acpi_handle_t handle
, uint32_t revision
, uint32_t count
,
983 uint32_t *capabilities
)
987 ACPI_OBJECT_LIST list
= { 1, &obj
};
994 bufsize
= (count
+ 2) * sizeof (uint32_t);
995 buffer
= kmem_zalloc(bufsize
, KM_SLEEP
);
996 buffer
[0] = revision
;
999 for (i
= 0; i
< count
; i
++)
1000 *bufptr
++ = *capabilities
++;
1002 obj
.Type
= ACPI_TYPE_BUFFER
;
1003 obj
.Buffer
.Length
= bufsize
;
1004 obj
.Buffer
.Pointer
= (void *)buffer
;
1007 * Fetch the ??? (if present) for the CPU node.
1009 astatus
= AcpiEvaluateObject(handle
->cs_handle
, "_PDC", &list
, NULL
);
1010 if (ACPI_FAILURE(astatus
)) {
1011 if (astatus
== AE_NOT_FOUND
) {
1012 DTRACE_PROBE3(cpu_acpi__eval__err
, int, handle
->cs_id
,
1013 int, PDC_OBJ
, int, astatus
);
1016 cmn_err(CE_NOTE
, "!cpu_acpi: error %d evaluating _PDC "
1017 "package for CPU %d.", astatus
, handle
->cs_id
);
1022 kmem_free(buffer
, bufsize
);
1027 * Write to system IO port.
1030 cpu_acpi_write_port(ACPI_IO_ADDRESS address
, uint32_t value
, uint32_t width
)
1032 if (ACPI_FAILURE(AcpiOsWritePort(address
, value
, width
))) {
1033 cmn_err(CE_NOTE
, "!cpu_acpi: error writing system IO port "
1034 "%lx.", (long)address
);
1041 * Read from a system IO port.
1044 cpu_acpi_read_port(ACPI_IO_ADDRESS address
, uint32_t *value
, uint32_t width
)
1046 if (ACPI_FAILURE(AcpiOsReadPort(address
, value
, width
))) {
1047 cmn_err(CE_NOTE
, "!cpu_acpi: error reading system IO port "
1048 "%lx.", (long)address
);
1055 * Return supported frequencies.
1058 cpu_acpi_get_speeds(cpu_acpi_handle_t handle
, int **speeds
)
1060 cpu_acpi_pstate_t
*pstate
;
1065 nspeeds
= CPU_ACPI_PSTATES_COUNT(handle
);
1066 pstate
= (cpu_acpi_pstate_t
*)CPU_ACPI_PSTATES(handle
);
1067 hspeeds
= kmem_zalloc(nspeeds
* sizeof (int), KM_SLEEP
);
1068 for (i
= 0; i
< nspeeds
; i
++) {
1069 hspeeds
[i
] = CPU_ACPI_FREQ(pstate
);
1077 * Free resources allocated by cpu_acpi_get_speeds().
1080 cpu_acpi_free_speeds(int *speeds
, uint_t nspeeds
)
1082 kmem_free(speeds
, nspeeds
* sizeof (int));
1086 cpu_acpi_get_max_cstates(cpu_acpi_handle_t handle
)
1088 if (CPU_ACPI_CSTATES(handle
))
1089 return (CPU_ACPI_CSTATES_COUNT(handle
));
1095 cpu_acpi_set_register(uint32_t bitreg
, uint32_t value
)
1097 (void) AcpiWriteBitRegister(bitreg
, value
);
1101 cpu_acpi_get_register(uint32_t bitreg
, uint32_t *value
)
1103 (void) AcpiReadBitRegister(bitreg
, value
);
1107 * Map the dip to an ACPI handle for the device.
1110 cpu_acpi_init(cpu_t
*cp
)
1112 cpu_acpi_handle_t handle
;
1114 handle
= kmem_zalloc(sizeof (cpu_acpi_state_t
), KM_SLEEP
);
1116 if (ACPI_FAILURE(acpica_get_handle_cpu(cp
->cpu_id
,
1117 &handle
->cs_handle
))) {
1118 kmem_free(handle
, sizeof (cpu_acpi_state_t
));
1121 handle
->cs_id
= cp
->cpu_id
;
1126 * Free any resources.
1129 cpu_acpi_fini(cpu_acpi_handle_t handle
)
1132 kmem_free(handle
, sizeof (cpu_acpi_state_t
));