4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2016 Joyent, Inc.
26 * Copyright 2016 PALO, Richard.
29 * Copyright (c) 2009-2010, Intel Corporation.
30 * All rights reserved.
33 * ACPI CA OSL for Solaris x86
36 #include <sys/types.h>
39 #include <sys/pci_cfgspace.h>
42 #include <sys/sunddi.h>
43 #include <sys/sunndi.h>
46 #include <sys/taskq.h>
47 #include <sys/strlog.h>
48 #include <sys/x86_archext.h>
50 #include <sys/promif.h>
52 #include <sys/acpi/accommon.h>
53 #include <sys/acpica.h>
55 #define MAX_DAT_FILE_SIZE (64*1024)
58 static int CompressEisaID(char *np
);
60 static void scan_d2a_subtree(dev_info_t
*dip
, ACPI_HANDLE acpiobj
, int bus
);
61 static int acpica_query_bbn_problem(void);
62 static int acpica_find_pcibus(int busno
, ACPI_HANDLE
*rh
);
63 static int acpica_eval_hid(ACPI_HANDLE dev
, char *method
, int *rint
);
64 static ACPI_STATUS
acpica_set_devinfo(ACPI_HANDLE
, dev_info_t
*);
65 static ACPI_STATUS
acpica_unset_devinfo(ACPI_HANDLE
);
66 static void acpica_devinfo_handler(ACPI_HANDLE
, void *);
71 int acpica_eventq_init
= 0;
72 ddi_taskq_t
*osl_eventq
[OSL_EC_BURST_HANDLER
+1];
75 * Priorities relative to minclsyspri that each taskq
76 * run at; OSL_NOTIFY_HANDLER needs to run at a higher
77 * priority than OSL_GPE_HANDLER. There's an implicit
78 * assumption that no priority here results in exceeding
80 * Note: these initializations need to match the order of
83 int osl_eventq_pri_delta
[OSL_EC_BURST_HANDLER
+1] = {
84 0, /* OSL_GLOBAL_LOCK_HANDLER */
85 2, /* OSL_NOTIFY_HANDLER */
86 0, /* OSL_GPE_HANDLER */
87 0, /* OSL_DEBUGGER_THREAD */
88 0, /* OSL_EC_POLL_HANDLER */
89 0 /* OSL_EC_BURST_HANDLER */
93 * Note, if you change this path, you need to update
94 * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
96 static char *acpi_table_path
= "/boot/acpi/tables/";
98 /* non-zero while scan_d2a_map() is working */
99 static int scanning_d2a_map
= 0;
100 static int d2a_done
= 0;
102 /* features supported by ACPICA and ACPI device configuration. */
103 uint64_t acpica_core_features
= ACPI_FEATURE_OSI_MODULE
;
104 static uint64_t acpica_devcfg_features
= 0;
106 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
107 int acpica_use_safe_delay
= 0;
109 /* CPU mapping data */
110 struct cpu_map_item
{
111 processorid_t cpu_id
;
117 kmutex_t cpu_map_lock
;
118 static struct cpu_map_item
**cpu_map
= NULL
;
119 static int cpu_map_count_max
= 0;
120 static int cpu_map_count
= 0;
121 static int cpu_map_built
= 0;
124 * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
125 * This flag is used to check for uppc-only systems by detecting whether
126 * acpica_map_cpu() has been called or not.
128 static int cpu_map_called
= 0;
130 static int acpi_has_broken_bbn
= -1;
132 /* buffer for AcpiOsVprintf() */
133 #define ACPI_OSL_PR_BUFLEN 1024
134 static char *acpi_osl_pr_buffer
= NULL
;
135 static int acpi_osl_pr_buflen
;
143 discard_event_queues()
148 * destroy event queues
150 for (i
= OSL_GLOBAL_LOCK_HANDLER
; i
<= OSL_EC_BURST_HANDLER
; i
++) {
152 ddi_taskq_destroy(osl_eventq
[i
]);
167 * Initialize event queues
170 /* Always allocate only 1 thread per queue to force FIFO execution */
171 for (i
= OSL_GLOBAL_LOCK_HANDLER
; i
<= OSL_EC_BURST_HANDLER
; i
++) {
172 snprintf(namebuf
, 32, "ACPI%d", i
);
173 osl_eventq
[i
] = ddi_taskq_create(NULL
, namebuf
, 1,
174 osl_eventq_pri_delta
[i
] + minclsyspri
, 0);
175 if (osl_eventq
[i
] == NULL
)
180 discard_event_queues();
182 cmn_err(CE_WARN
, "!acpica: could not initialize event queues");
187 acpica_eventq_init
= 1;
192 * One-time initialization of OSL layer
195 AcpiOsInitialize(void)
198 * Allocate buffer for AcpiOsVprintf() here to avoid
199 * kmem_alloc()/kmem_free() at high PIL
201 acpi_osl_pr_buffer
= kmem_alloc(ACPI_OSL_PR_BUFLEN
, KM_SLEEP
);
202 if (acpi_osl_pr_buffer
!= NULL
)
203 acpi_osl_pr_buflen
= ACPI_OSL_PR_BUFLEN
;
209 * One-time shut-down of OSL layer
212 AcpiOsTerminate(void)
215 if (acpi_osl_pr_buffer
!= NULL
)
216 kmem_free(acpi_osl_pr_buffer
, acpi_osl_pr_buflen
);
218 discard_event_queues();
223 ACPI_PHYSICAL_ADDRESS
224 AcpiOsGetRootPointer()
226 ACPI_PHYSICAL_ADDRESS Address
;
229 * For EFI firmware, the root pointer is defined in EFI systab.
230 * The boot code process the table and put the physical address
231 * in the acpi-root-tab property.
233 Address
= ddi_prop_get_int64(DDI_DEV_T_ANY
, ddi_root_node(),
234 DDI_PROP_DONTPASS
, "acpi-root-tab", 0);
236 if ((Address
== (uintptr_t)NULL
) &&
237 ACPI_FAILURE(AcpiFindRootPointer(&Address
)))
238 Address
= (uintptr_t)NULL
;
245 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES
*InitVal
,
254 acpica_strncpy(char *dest
, const char *src
, int len
)
258 while ((*dest
++ = *src
++) && (--len
> 0))
259 /* copy the string */;
264 AcpiOsTableOverride(ACPI_TABLE_HEADER
*ExistingTable
,
265 ACPI_TABLE_HEADER
**NewTable
)
273 char acpi_table_loc
[128];
275 acpica_strncpy(signature
, ExistingTable
->Signature
, 4);
276 acpica_strncpy(oemid
, ExistingTable
->OemId
, 6);
277 acpica_strncpy(oemtableid
, ExistingTable
->OemTableId
, 8);
279 /* File name format is "signature_oemid_oemtableid.dat" */
280 (void) strcpy(acpi_table_loc
, acpi_table_path
);
281 (void) strcat(acpi_table_loc
, signature
); /* for example, DSDT */
282 (void) strcat(acpi_table_loc
, "_");
283 (void) strcat(acpi_table_loc
, oemid
); /* for example, IntelR */
284 (void) strcat(acpi_table_loc
, "_");
285 (void) strcat(acpi_table_loc
, oemtableid
); /* for example, AWRDACPI */
286 (void) strcat(acpi_table_loc
, ".dat");
288 file
= kobj_open_file(acpi_table_loc
);
289 if (file
== (struct _buf
*)-1) {
293 buf1
= kmem_alloc(MAX_DAT_FILE_SIZE
, KM_SLEEP
);
294 count
= kobj_read_file(file
, buf1
, MAX_DAT_FILE_SIZE
-1, 0);
295 if (count
>= MAX_DAT_FILE_SIZE
) {
296 cmn_err(CE_WARN
, "!acpica: table %s file size too big",
300 buf2
= kmem_alloc(count
, KM_SLEEP
);
301 (void) memcpy(buf2
, buf1
, count
);
302 *NewTable
= (ACPI_TABLE_HEADER
*)buf2
;
303 cmn_err(CE_NOTE
, "!acpica: replacing table: %s",
307 kobj_close_file(file
);
308 kmem_free(buf1
, MAX_DAT_FILE_SIZE
);
314 AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER
*ExistingTable
,
315 ACPI_PHYSICAL_ADDRESS
*NewAddress
, UINT32
*NewTableLength
)
321 * ACPI semaphore implementation
335 acpi_sema_init(acpi_sema_t
*sp
, unsigned max
, unsigned count
)
337 mutex_init(&sp
->mutex
, NULL
, MUTEX_DRIVER
, NULL
);
338 cv_init(&sp
->cv
, NULL
, CV_DRIVER
, NULL
);
339 /* no need to enter mutex here at creation */
340 sp
->available
= count
;
349 acpi_sema_destroy(acpi_sema_t
*sp
)
353 mutex_destroy(&sp
->mutex
);
360 acpi_sema_p(acpi_sema_t
*sp
, unsigned count
, uint16_t wait_time
)
362 ACPI_STATUS rv
= AE_OK
;
365 mutex_enter(&sp
->mutex
);
367 if (sp
->available
>= count
) {
369 * Enough units available, no blocking
371 sp
->available
-= count
;
372 mutex_exit(&sp
->mutex
);
374 } else if (wait_time
== 0) {
376 * Not enough units available and timeout
377 * specifies no blocking
380 mutex_exit(&sp
->mutex
);
385 * Not enough units available and timeout specifies waiting
387 if (wait_time
!= ACPI_WAIT_FOREVER
)
388 deadline
= ddi_get_lbolt() +
389 (clock_t)drv_usectohz(wait_time
* 1000);
392 if (wait_time
== ACPI_WAIT_FOREVER
)
393 cv_wait(&sp
->cv
, &sp
->mutex
);
394 else if (cv_timedwait(&sp
->cv
, &sp
->mutex
, deadline
) < 0) {
398 } while (sp
->available
< count
);
400 /* if we dropped out of the wait with AE_OK, we got the units */
402 sp
->available
-= count
;
404 mutex_exit(&sp
->mutex
);
412 acpi_sema_v(acpi_sema_t
*sp
, unsigned count
)
414 mutex_enter(&sp
->mutex
);
415 sp
->available
+= count
;
416 cv_broadcast(&sp
->cv
);
417 mutex_exit(&sp
->mutex
);
422 AcpiOsCreateSemaphore(UINT32 MaxUnits
, UINT32 InitialUnits
,
423 ACPI_HANDLE
*OutHandle
)
427 if ((OutHandle
== NULL
) || (InitialUnits
> MaxUnits
))
428 return (AE_BAD_PARAMETER
);
430 sp
= (acpi_sema_t
*)kmem_alloc(sizeof (acpi_sema_t
), KM_SLEEP
);
431 acpi_sema_init(sp
, MaxUnits
, InitialUnits
);
432 *OutHandle
= (ACPI_HANDLE
)sp
;
438 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle
)
442 return (AE_BAD_PARAMETER
);
444 acpi_sema_destroy((acpi_sema_t
*)Handle
);
445 kmem_free((void *)Handle
, sizeof (acpi_sema_t
));
450 AcpiOsWaitSemaphore(ACPI_HANDLE Handle
, UINT32 Units
, UINT16 Timeout
)
453 if ((Handle
== NULL
) || (Units
< 1))
454 return (AE_BAD_PARAMETER
);
456 return (acpi_sema_p((acpi_sema_t
*)Handle
, Units
, Timeout
));
460 AcpiOsSignalSemaphore(ACPI_HANDLE Handle
, UINT32 Units
)
463 if ((Handle
== NULL
) || (Units
< 1))
464 return (AE_BAD_PARAMETER
);
466 acpi_sema_v((acpi_sema_t
*)Handle
, Units
);
471 AcpiOsCreateLock(ACPI_HANDLE
*OutHandle
)
475 if (OutHandle
== NULL
)
476 return (AE_BAD_PARAMETER
);
478 mp
= kmem_alloc(sizeof (kmutex_t
), KM_SLEEP
);
479 mutex_init(mp
, NULL
, MUTEX_DRIVER
, NULL
);
480 *OutHandle
= (ACPI_HANDLE
)mp
;
485 AcpiOsDeleteLock(ACPI_HANDLE Handle
)
491 mutex_destroy((kmutex_t
*)Handle
);
492 kmem_free((void *)Handle
, sizeof (kmutex_t
));
496 AcpiOsAcquireLock(ACPI_HANDLE Handle
)
501 return (AE_BAD_PARAMETER
);
503 if (curthread
== CPU
->cpu_idle_thread
) {
504 while (!mutex_tryenter((kmutex_t
*)Handle
))
507 mutex_enter((kmutex_t
*)Handle
);
512 AcpiOsReleaseLock(ACPI_HANDLE Handle
, ACPI_CPU_FLAGS Flags
)
514 _NOTE(ARGUNUSED(Flags
))
516 mutex_exit((kmutex_t
*)Handle
);
521 AcpiOsAllocate(ACPI_SIZE Size
)
525 Size
+= sizeof (Size
);
526 tmp_ptr
= (ACPI_SIZE
*)kmem_zalloc(Size
, KM_SLEEP
);
532 AcpiOsFree(void *Memory
)
534 ACPI_SIZE size
, *tmp_ptr
;
536 tmp_ptr
= (ACPI_SIZE
*)Memory
;
539 kmem_free(tmp_ptr
, size
);
542 static int napics_found
; /* number of ioapic addresses in array */
543 static ACPI_PHYSICAL_ADDRESS ioapic_paddr
[MAX_IO_APIC
];
544 static ACPI_TABLE_MADT
*acpi_mapic_dtp
= NULL
;
545 static void *dummy_ioapicadr
;
548 acpica_find_ioapics(void)
550 int madt_seen
, madt_size
;
551 ACPI_SUBTABLE_HEADER
*ap
;
552 ACPI_MADT_IO_APIC
*mia
;
554 if (acpi_mapic_dtp
!= NULL
)
555 return; /* already parsed table */
556 if (AcpiGetTable(ACPI_SIG_MADT
, 1,
557 (ACPI_TABLE_HEADER
**) &acpi_mapic_dtp
) != AE_OK
)
563 * Search the MADT for ioapics
565 ap
= (ACPI_SUBTABLE_HEADER
*) (acpi_mapic_dtp
+ 1);
566 madt_size
= acpi_mapic_dtp
->Header
.Length
;
567 madt_seen
= sizeof (*acpi_mapic_dtp
);
569 while (madt_seen
< madt_size
) {
572 case ACPI_MADT_TYPE_IO_APIC
:
573 mia
= (ACPI_MADT_IO_APIC
*) ap
;
574 if (napics_found
< MAX_IO_APIC
) {
575 ioapic_paddr
[napics_found
++] =
576 (ACPI_PHYSICAL_ADDRESS
)
577 (mia
->Address
& PAGEMASK
);
585 /* advance to next entry */
586 madt_seen
+= ap
->Length
;
587 ap
= (ACPI_SUBTABLE_HEADER
*)(((char *)ap
) + ap
->Length
);
589 if (dummy_ioapicadr
== NULL
)
590 dummy_ioapicadr
= kmem_zalloc(PAGESIZE
, KM_SLEEP
);
595 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress
, ACPI_SIZE Size
)
600 * If the iopaic address table is populated, check if trying
601 * to access an ioapic. Instead, return a pointer to a dummy ioapic.
603 for (i
= 0; i
< napics_found
; i
++) {
604 if ((PhysicalAddress
& PAGEMASK
) == ioapic_paddr
[i
])
605 return (dummy_ioapicadr
);
607 /* FUTUREWORK: test PhysicalAddress for > 32 bits */
608 return (psm_map_new((paddr_t
)PhysicalAddress
,
609 (size_t)Size
, PSM_PROT_WRITE
| PSM_PROT_READ
));
613 AcpiOsUnmapMemory(void *LogicalAddress
, ACPI_SIZE Size
)
616 * Check if trying to unmap dummy ioapic address.
618 if (LogicalAddress
== dummy_ioapicadr
)
621 psm_unmap((caddr_t
)LogicalAddress
, (size_t)Size
);
626 AcpiOsGetPhysicalAddress(void *LogicalAddress
,
627 ACPI_PHYSICAL_ADDRESS
*PhysicalAddress
)
630 /* UNIMPLEMENTED: not invoked by ACPI CA code */
631 return (AE_NOT_IMPLEMENTED
);
635 ACPI_OSD_HANDLER acpi_isr
;
636 void *acpi_isr_context
;
639 acpi_wrapper_isr(char *arg
)
641 _NOTE(ARGUNUSED(arg
))
645 status
= (*acpi_isr
)(acpi_isr_context
);
647 if (status
== ACPI_INTERRUPT_HANDLED
) {
648 return (DDI_INTR_CLAIMED
);
650 return (DDI_INTR_UNCLAIMED
);
654 static int acpi_intr_hooked
= 0;
657 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber
,
658 ACPI_OSD_HANDLER ServiceRoutine
,
661 _NOTE(ARGUNUSED(InterruptNumber
))
667 acpi_isr
= ServiceRoutine
;
668 acpi_isr_context
= Context
;
671 * Get SCI (adjusted for PIC/APIC mode if necessary)
673 if (acpica_get_sci(&sci_vect
, &sci_flags
) != AE_OK
) {
678 cmn_err(CE_NOTE
, "!acpica: attaching SCI %d", sci_vect
);
681 retval
= add_avintr(NULL
, SCI_IPL
, (avfunc
)acpi_wrapper_isr
,
682 "ACPI SCI", sci_vect
, NULL
, NULL
, NULL
, NULL
);
684 acpi_intr_hooked
= 1;
687 return (AE_BAD_PARAMETER
);
691 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber
,
692 ACPI_OSD_HANDLER ServiceRoutine
)
694 _NOTE(ARGUNUSED(ServiceRoutine
))
697 cmn_err(CE_NOTE
, "!acpica: detaching SCI %d", InterruptNumber
);
699 if (acpi_intr_hooked
) {
700 rem_avintr(NULL
, LOCK_LEVEL
- 1, (avfunc
)acpi_wrapper_isr
,
702 acpi_intr_hooked
= 0;
709 AcpiOsGetThreadId(void)
712 * ACPI CA doesn't care what actual value is returned as long
713 * as it is non-zero and unique to each existing thread.
714 * ACPI CA assumes that thread ID is castable to a pointer,
715 * so we use the current thread pointer.
717 return (ACPI_CAST_PTHREAD_T((uintptr_t)curthread
));
724 AcpiOsExecute(ACPI_EXECUTE_TYPE Type
, ACPI_OSD_EXEC_CALLBACK Function
,
728 if (!acpica_eventq_init
) {
730 * Create taskqs for event handling
732 if (init_event_queues() != AE_OK
)
736 if (ddi_taskq_dispatch(osl_eventq
[Type
], Function
, Context
,
737 DDI_NOSLEEP
) == DDI_FAILURE
) {
739 cmn_err(CE_WARN
, "!acpica: unable to dispatch event");
749 AcpiOsWaitEventsComplete(void)
754 * Wait for event queues to be empty.
756 for (i
= OSL_GLOBAL_LOCK_HANDLER
; i
<= OSL_EC_BURST_HANDLER
; i
++) {
757 if (osl_eventq
[i
] != NULL
) {
758 ddi_taskq_wait(osl_eventq
[i
]);
764 AcpiOsSleep(ACPI_INTEGER Milliseconds
)
767 * During kernel startup, before the first tick interrupt
768 * has taken place, we can't call delay; very late in
769 * kernel shutdown or suspend/resume, clock interrupts
770 * are blocked, so delay doesn't work then either.
771 * So we busy wait if lbolt == 0 (kernel startup)
772 * or if acpica_use_safe_delay has been set to a
775 if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay
)
776 drv_usecwait(Milliseconds
* 1000);
778 ddi_msleep(Milliseconds
);
782 AcpiOsStall(UINT32 Microseconds
)
784 drv_usecwait(Microseconds
);
789 * Implementation of "Windows 2001" compatible I/O permission map
792 #define OSL_IO_NONE (0)
793 #define OSL_IO_READ (1<<0)
794 #define OSL_IO_WRITE (1<<1)
795 #define OSL_IO_RW (OSL_IO_READ | OSL_IO_WRITE)
796 #define OSL_IO_TERM (1<<2)
797 #define OSL_IO_DEFAULT OSL_IO_RW
799 static struct io_perm
{
801 ACPI_IO_ADDRESS high
;
804 { 0xcf8, 0xd00, OSL_IO_TERM
| OSL_IO_RW
}
811 static struct io_perm
*
812 osl_io_find_perm(ACPI_IO_ADDRESS addr
)
818 if ((p
->low
<= addr
) && (addr
<= p
->high
))
820 p
= (p
->perm
& OSL_IO_TERM
) ? NULL
: p
+1;
830 AcpiOsReadPort(ACPI_IO_ADDRESS Address
, UINT32
*Value
, UINT32 Width
)
834 /* verify permission */
835 p
= osl_io_find_perm(Address
);
836 if (p
&& (p
->perm
& OSL_IO_READ
) == 0) {
837 cmn_err(CE_WARN
, "!AcpiOsReadPort: %lx %u not permitted",
838 (long)Address
, Width
);
845 *Value
= inb(Address
);
848 *Value
= inw(Address
);
851 *Value
= inl(Address
);
854 cmn_err(CE_WARN
, "!AcpiOsReadPort: %lx %u failed",
855 (long)Address
, Width
);
856 return (AE_BAD_PARAMETER
);
862 AcpiOsWritePort(ACPI_IO_ADDRESS Address
, UINT32 Value
, UINT32 Width
)
866 /* verify permission */
867 p
= osl_io_find_perm(Address
);
868 if (p
&& (p
->perm
& OSL_IO_WRITE
) == 0) {
869 cmn_err(CE_WARN
, "!AcpiOsWritePort: %lx %u not permitted",
870 (long)Address
, Width
);
876 outb(Address
, Value
);
879 outw(Address
, Value
);
882 outl(Address
, Value
);
885 cmn_err(CE_WARN
, "!AcpiOsWritePort: %lx %u failed",
886 (long)Address
, Width
);
887 return (AE_BAD_PARAMETER
);
897 #define OSL_RW(ptr, val, type, rw) \
898 { if (rw) *((type *)(ptr)) = *((type *) val); \
899 else *((type *) val) = *((type *)(ptr)); }
903 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address
, UINT64
*Value
,
904 UINT32 Width
, int write
)
906 size_t maplen
= Width
/ 8;
909 ptr
= psm_map_new((paddr_t
)Address
, maplen
,
910 PSM_PROT_WRITE
| PSM_PROT_READ
);
914 OSL_RW(ptr
, Value
, uint8_t, write
);
917 OSL_RW(ptr
, Value
, uint16_t, write
);
920 OSL_RW(ptr
, Value
, uint32_t, write
);
923 OSL_RW(ptr
, Value
, uint64_t, write
);
926 cmn_err(CE_WARN
, "!osl_rw_memory: invalid size %d",
931 psm_unmap(ptr
, maplen
);
935 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address
, UINT64
*Value
, UINT32 Width
)
937 osl_rw_memory(Address
, Value
, Width
, 0);
942 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address
, UINT64 Value
, UINT32 Width
)
944 osl_rw_memory(Address
, &Value
, Width
, 1);
950 AcpiOsReadPciConfiguration(ACPI_PCI_ID
*PciId
, UINT32 Reg
,
951 UINT64
*Value
, UINT32 Width
)
956 *Value
= (UINT64
)(*pci_getb_func
)
957 (PciId
->Bus
, PciId
->Device
, PciId
->Function
, Reg
);
960 *Value
= (UINT64
)(*pci_getw_func
)
961 (PciId
->Bus
, PciId
->Device
, PciId
->Function
, Reg
);
964 *Value
= (UINT64
)(*pci_getl_func
)
965 (PciId
->Bus
, PciId
->Device
, PciId
->Function
, Reg
);
969 cmn_err(CE_WARN
, "!AcpiOsReadPciConfiguration: %x %u failed",
971 return (AE_BAD_PARAMETER
);
979 int acpica_write_pci_config_ok
= 1;
982 AcpiOsWritePciConfiguration(ACPI_PCI_ID
*PciId
, UINT32 Reg
,
983 UINT64 Value
, UINT32 Width
)
986 if (!acpica_write_pci_config_ok
) {
987 cmn_err(CE_NOTE
, "!write to PCI cfg %x/%x/%x %x"
988 " %lx %d not permitted", PciId
->Bus
, PciId
->Device
,
989 PciId
->Function
, Reg
, (long)Value
, Width
);
995 (*pci_putb_func
)(PciId
->Bus
, PciId
->Device
, PciId
->Function
,
996 Reg
, (uint8_t)Value
);
999 (*pci_putw_func
)(PciId
->Bus
, PciId
->Device
, PciId
->Function
,
1000 Reg
, (uint16_t)Value
);
1003 (*pci_putl_func
)(PciId
->Bus
, PciId
->Device
, PciId
->Function
,
1004 Reg
, (uint32_t)Value
);
1008 cmn_err(CE_WARN
, "!AcpiOsWritePciConfiguration: %x %u failed",
1010 return (AE_BAD_PARAMETER
);
1016 * Called with ACPI_HANDLEs for both a PCI Config Space
1017 * OpRegion and (what ACPI CA thinks is) the PCI device
1018 * to which this ConfigSpace OpRegion belongs.
1020 * ACPI CA uses _BBN and _ADR objects to determine the default
1021 * values for bus, segment, device and function; anything ACPI CA
1022 * can't figure out from the ACPI tables will be 0. One very
1023 * old 32-bit x86 system is known to have broken _BBN; this is
1024 * not addressed here.
1026 * Some BIOSes implement _BBN() by reading PCI config space
1027 * on bus #0 - which means that we'll recurse when we attempt
1028 * to create the devinfo-to-ACPI map. If Derive is called during
1029 * scan_d2a_map, we don't translate the bus # and return.
1031 * We get the parent of the OpRegion, which must be a PCI
1032 * node, fetch the associated devinfo node and snag the
1036 AcpiOsDerivePciId(ACPI_HANDLE rhandle
, ACPI_HANDLE chandle
,
1037 ACPI_PCI_ID
**PciId
)
1041 int bus
, device
, func
, devfn
;
1044 * See above - avoid recursing during scanning_d2a_map.
1046 if (scanning_d2a_map
)
1050 * Get the OpRegion's parent
1052 if (AcpiGetParent(chandle
, &handle
) != AE_OK
)
1056 * If we've mapped the ACPI node to the devinfo
1057 * tree, use the devinfo reg property
1059 if (ACPI_SUCCESS(acpica_get_devinfo(handle
, &dip
)) &&
1060 (acpica_get_bdf(dip
, &bus
, &device
, &func
) >= 0)) {
1061 (*PciId
)->Bus
= bus
;
1062 (*PciId
)->Device
= device
;
1063 (*PciId
)->Function
= func
;
1070 AcpiOsReadable(void *Pointer
, ACPI_SIZE Length
)
1073 /* Always says yes; all mapped memory assumed readable */
1079 AcpiOsWritable(void *Pointer
, ACPI_SIZE Length
)
1082 /* Always says yes; all mapped memory assumed writable */
1087 AcpiOsGetTimer(void)
1089 /* gethrtime() returns 1nS resolution; convert to 100nS granules */
1090 return ((gethrtime() + 50) / 100);
1093 static struct AcpiOSIFeature_s
{
1094 uint64_t control_flag
;
1095 const char *feature_name
;
1096 } AcpiOSIFeatures
[] = {
1097 { ACPI_FEATURE_OSI_MODULE
, "Module Device" },
1098 { 0, "Processor Device" }
1103 AcpiOsValidateInterface(char *feature
)
1107 ASSERT(feature
!= NULL
);
1108 for (i
= 0; i
< sizeof (AcpiOSIFeatures
) / sizeof (AcpiOSIFeatures
[0]);
1110 if (strcmp(feature
, AcpiOSIFeatures
[i
].feature_name
) != 0) {
1113 /* Check whether required core features are available. */
1114 if (AcpiOSIFeatures
[i
].control_flag
!= 0 &&
1115 acpica_get_core_feature(AcpiOSIFeatures
[i
].control_flag
) !=
1116 AcpiOSIFeatures
[i
].control_flag
) {
1119 /* Feature supported. */
1123 return (AE_SUPPORT
);
1128 AcpiOsValidateAddress(UINT8 spaceid
, ACPI_PHYSICAL_ADDRESS addr
,
1135 AcpiOsSignal(UINT32 Function
, void *Info
)
1137 _NOTE(ARGUNUSED(Function
, Info
))
1139 /* FUTUREWORK: debugger support */
1141 cmn_err(CE_NOTE
, "!OsSignal unimplemented");
1145 void ACPI_INTERNAL_VAR_XFACE
1146 AcpiOsPrintf(const char *Format
, ...)
1150 va_start(ap
, Format
);
1151 AcpiOsVprintf(Format
, ap
);
1156 * When != 0, sends output to console
1157 * Patchable with kmdb or /etc/system.
1159 int acpica_console_out
= 0;
1161 #define ACPICA_OUTBUF_LEN 160
1162 char acpica_outbuf
[ACPICA_OUTBUF_LEN
];
1163 int acpica_outbuf_offset
;
1169 acpica_pr_buf(char *buf
)
1171 char c
, *bufp
, *outp
;
1175 * copy the supplied buffer into the output buffer
1176 * when we hit a '\n' or overflow the output buffer,
1177 * output and reset the output buffer
1180 outp
= acpica_outbuf
+ acpica_outbuf_offset
;
1181 out_remaining
= ACPICA_OUTBUF_LEN
- acpica_outbuf_offset
- 1;
1182 while (c
= *bufp
++) {
1184 if (c
== '\n' || --out_remaining
== 0) {
1186 switch (acpica_console_out
) {
1188 printf(acpica_outbuf
);
1191 prom_printf(acpica_outbuf
);
1195 (void) strlog(0, 0, 0,
1196 SL_CONSOLE
| SL_NOTE
| SL_LOGONLY
,
1200 acpica_outbuf_offset
= 0;
1201 outp
= acpica_outbuf
;
1202 out_remaining
= ACPICA_OUTBUF_LEN
- 1;
1206 acpica_outbuf_offset
= outp
- acpica_outbuf
;
1210 AcpiOsVprintf(const char *Format
, va_list Args
)
1214 * If AcpiOsInitialize() failed to allocate a string buffer,
1215 * resort to vprintf().
1217 if (acpi_osl_pr_buffer
== NULL
) {
1218 vprintf(Format
, Args
);
1223 * It is possible that a very long debug output statement will
1224 * be truncated; this is silently ignored.
1226 (void) vsnprintf(acpi_osl_pr_buffer
, acpi_osl_pr_buflen
, Format
, Args
);
1227 acpica_pr_buf(acpi_osl_pr_buffer
);
1231 AcpiOsRedirectOutput(void *Destination
)
1233 _NOTE(ARGUNUSED(Destination
))
1235 /* FUTUREWORK: debugger support */
1238 cmn_err(CE_WARN
, "!acpica: AcpiOsRedirectOutput called");
1244 AcpiOsGetLine(char *Buffer
, UINT32 len
, UINT32
*BytesRead
)
1246 _NOTE(ARGUNUSED(Buffer
))
1247 _NOTE(ARGUNUSED(len
))
1248 _NOTE(ARGUNUSED(BytesRead
))
1250 /* FUTUREWORK: debugger support */
1256 * Device tree binding
1259 acpica_find_pcibus_walker(ACPI_HANDLE hdl
, UINT32 lvl
, void *ctxp
, void **rvpp
)
1261 _NOTE(ARGUNUSED(lvl
));
1264 int busno
= (intptr_t)ctxp
;
1265 ACPI_HANDLE
*hdlp
= (ACPI_HANDLE
*)rvpp
;
1267 /* Check whether device exists. */
1268 if (ACPI_SUCCESS(acpica_eval_int(hdl
, "_STA", &sta
)) &&
1269 !(sta
& (ACPI_STA_DEVICE_PRESENT
| ACPI_STA_DEVICE_FUNCTIONING
))) {
1271 * Skip object if device doesn't exist.
1272 * According to ACPI Spec,
1273 * 1) setting either bit 0 or bit 3 means that device exists.
1274 * 2) Absence of _STA method means all status bits set.
1276 return (AE_CTRL_DEPTH
);
1279 if (ACPI_FAILURE(acpica_eval_hid(hdl
, "_HID", &hid
)) ||
1280 (hid
!= HID_PCI_BUS
&& hid
!= HID_PCI_EXPRESS_BUS
)) {
1281 /* Non PCI/PCIe host bridge. */
1285 if (acpi_has_broken_bbn
) {
1288 rb
.Length
= ACPI_ALLOCATE_BUFFER
;
1290 /* Decree _BBN == n from PCI<n> */
1291 if (AcpiGetName(hdl
, ACPI_SINGLE_NAME
, &rb
) != AE_OK
) {
1292 return (AE_CTRL_TERMINATE
);
1294 bbn
= ((char *)rb
.Pointer
)[3] - '0';
1295 AcpiOsFree(rb
.Pointer
);
1296 if (bbn
== busno
|| busno
== 0) {
1298 return (AE_CTRL_TERMINATE
);
1300 } else if (ACPI_SUCCESS(acpica_eval_int(hdl
, "_BBN", &bbn
))) {
1303 return (AE_CTRL_TERMINATE
);
1305 } else if (busno
== 0) {
1307 return (AE_CTRL_TERMINATE
);
1310 return (AE_CTRL_DEPTH
);
1314 acpica_find_pcibus(int busno
, ACPI_HANDLE
*rh
)
1316 ACPI_HANDLE sbobj
, busobj
;
1318 /* initialize static flag by querying ACPI namespace for bug */
1319 if (acpi_has_broken_bbn
== -1)
1320 acpi_has_broken_bbn
= acpica_query_bbn_problem();
1322 if (ACPI_SUCCESS(AcpiGetHandle(NULL
, "\\_SB", &sbobj
))) {
1324 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE
, sbobj
, UINT32_MAX
,
1325 acpica_find_pcibus_walker
, NULL
, (void *)(intptr_t)busno
,
1327 if (busobj
!= NULL
) {
1337 acpica_query_bbn_walker(ACPI_HANDLE hdl
, UINT32 lvl
, void *ctxp
, void **rvpp
)
1339 _NOTE(ARGUNUSED(lvl
));
1340 _NOTE(ARGUNUSED(rvpp
));
1343 int *cntp
= (int *)ctxp
;
1345 /* Check whether device exists. */
1346 if (ACPI_SUCCESS(acpica_eval_int(hdl
, "_STA", &sta
)) &&
1347 !(sta
& (ACPI_STA_DEVICE_PRESENT
| ACPI_STA_DEVICE_FUNCTIONING
))) {
1349 * Skip object if device doesn't exist.
1350 * According to ACPI Spec,
1351 * 1) setting either bit 0 or bit 3 means that device exists.
1352 * 2) Absence of _STA method means all status bits set.
1354 return (AE_CTRL_DEPTH
);
1357 if (ACPI_FAILURE(acpica_eval_hid(hdl
, "_HID", &hid
)) ||
1358 (hid
!= HID_PCI_BUS
&& hid
!= HID_PCI_EXPRESS_BUS
)) {
1359 /* Non PCI/PCIe host bridge. */
1361 } else if (ACPI_SUCCESS(acpica_eval_int(hdl
, "_BBN", &bbn
)) &&
1362 bbn
== 0 && ++(*cntp
) > 1) {
1364 * If we find more than one bus with a 0 _BBN
1365 * we have the problem that BigBear's BIOS shows
1367 return (AE_CTRL_TERMINATE
);
1370 * Skip children of PCI/PCIe host bridge.
1372 return (AE_CTRL_DEPTH
);
1377 * Look for ACPI problem where _BBN is zero for multiple PCI buses
1378 * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1379 * below if it exists.
1382 acpica_query_bbn_problem(void)
1389 if (ACPI_SUCCESS(AcpiGetHandle(NULL
, "\\_SB", &sbobj
))) {
1390 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE
, sbobj
, UINT32_MAX
,
1391 acpica_query_bbn_walker
, NULL
, &zerobbncnt
, &rv
);
1394 return (zerobbncnt
> 1 ? 1 : 0);
1397 static const char hextab
[] = "0123456789ABCDEF";
1405 * Returns the 4-bit hex digit named by the input character. Returns
1406 * zero if the input character is not valid hex!
1409 int x
= ((c
< 'a') || (c
> 'z')) ? c
: (c
- ' ');
1410 int j
= sizeof (hextab
);
1412 while (--j
&& (x
!= hextab
[j
])) {
1418 CompressEisaID(char *np
)
1421 * Compress an EISA device name:
1423 * This routine converts a 7-byte ASCII device name into the 4-byte
1424 * compressed form used by EISA (50 bytes of ROM to save 1 byte of
1428 union { char octets
[4]; int retval
; } myu
;
1430 myu
.octets
[0] = ((np
[0] & 0x1F) << 2) + ((np
[1] >> 3) & 0x03);
1431 myu
.octets
[1] = ((np
[1] & 0x07) << 5) + (np
[2] & 0x1F);
1432 myu
.octets
[2] = (hexdig(np
[3]) << 4) + hexdig(np
[4]);
1433 myu
.octets
[3] = (hexdig(np
[5]) << 4) + hexdig(np
[6]);
1435 return (myu
.retval
);
1439 acpica_eval_int(ACPI_HANDLE dev
, char *method
, int *rint
)
1446 rb
.Length
= sizeof (ro
);
1447 if ((status
= AcpiEvaluateObjectTyped(dev
, method
, NULL
, &rb
,
1448 ACPI_TYPE_INTEGER
)) == AE_OK
)
1449 *rint
= ro
.Integer
.Value
;
1455 acpica_eval_hid(ACPI_HANDLE dev
, char *method
, int *rint
)
1461 rb
.Length
= ACPI_ALLOCATE_BUFFER
;
1462 if (AcpiEvaluateObject(dev
, method
, NULL
, &rb
) == AE_OK
&&
1465 if (rv
->Type
== ACPI_TYPE_INTEGER
) {
1466 *rint
= rv
->Integer
.Value
;
1469 } else if (rv
->Type
== ACPI_TYPE_STRING
) {
1472 /* Convert the string into an EISA ID */
1473 if (rv
->String
.Pointer
== NULL
) {
1478 stringData
= rv
->String
.Pointer
;
1481 * If the string is an EisaID, it must be 7
1482 * characters; if it's an ACPI ID, it will be 8
1483 * (and we don't care about ACPI ids here).
1485 if (strlen(stringData
) != 7) {
1490 *rint
= CompressEisaID(stringData
);
1500 * Create linkage between devinfo nodes and ACPI nodes
1503 acpica_tag_devinfo(dev_info_t
*dip
, ACPI_HANDLE acpiobj
)
1509 * Tag the devinfo node with the ACPI name
1512 rb
.Length
= ACPI_ALLOCATE_BUFFER
;
1513 status
= AcpiGetName(acpiobj
, ACPI_FULL_PATHNAME
, &rb
);
1514 if (ACPI_FAILURE(status
)) {
1515 cmn_err(CE_WARN
, "acpica: could not get ACPI path!");
1517 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, dip
,
1518 "acpi-namespace", (char *)rb
.Pointer
);
1519 AcpiOsFree(rb
.Pointer
);
1522 * Tag the ACPI node with the dip
1524 status
= acpica_set_devinfo(acpiobj
, dip
);
1525 ASSERT(ACPI_SUCCESS(status
));
1532 * Destroy linkage between devinfo nodes and ACPI nodes
1535 acpica_untag_devinfo(dev_info_t
*dip
, ACPI_HANDLE acpiobj
)
1537 (void) acpica_unset_devinfo(acpiobj
);
1538 (void) ndi_prop_remove(DDI_DEV_T_NONE
, dip
, "acpi-namespace");
1544 * Return the ACPI device node matching the CPU dev_info node.
1547 acpica_get_handle_cpu(int cpu_id
, ACPI_HANDLE
*rh
)
1552 * if cpu_map itself is NULL, we're a uppc system and
1553 * acpica_build_processor_map() hasn't been called yet.
1556 if (cpu_map
== NULL
) {
1557 (void) acpica_build_processor_map();
1558 if (cpu_map
== NULL
)
1567 * search object with cpuid in cpu_map
1569 mutex_enter(&cpu_map_lock
);
1570 for (i
= 0; i
< cpu_map_count
; i
++) {
1571 if (cpu_map
[i
]->cpu_id
== cpu_id
) {
1575 if (i
< cpu_map_count
&& (cpu_map
[i
]->obj
!= NULL
)) {
1576 *rh
= cpu_map
[i
]->obj
;
1577 mutex_exit(&cpu_map_lock
);
1581 /* Handle special case for uppc-only systems. */
1582 if (cpu_map_called
== 0) {
1583 uint32_t apicid
= cpuid_get_apicid(CPU
);
1584 if (apicid
!= UINT32_MAX
) {
1585 for (i
= 0; i
< cpu_map_count
; i
++) {
1586 if (cpu_map
[i
]->apic_id
== apicid
) {
1590 if (i
< cpu_map_count
&& (cpu_map
[i
]->obj
!= NULL
)) {
1591 *rh
= cpu_map
[i
]->obj
;
1592 mutex_exit(&cpu_map_lock
);
1597 mutex_exit(&cpu_map_lock
);
1603 * Determine if this object is a processor
1606 acpica_probe_processor(ACPI_HANDLE obj
, UINT32 level
, void *ctx
, void **rv
)
1609 ACPI_OBJECT_TYPE objtype
;
1610 unsigned long acpi_id
;
1612 ACPI_DEVICE_INFO
*di
;
1614 if (AcpiGetType(obj
, &objtype
) != AE_OK
)
1617 if (objtype
== ACPI_TYPE_PROCESSOR
) {
1618 /* process a Processor */
1620 rb
.Length
= ACPI_ALLOCATE_BUFFER
;
1621 status
= AcpiEvaluateObjectTyped(obj
, NULL
, NULL
, &rb
,
1622 ACPI_TYPE_PROCESSOR
);
1623 if (status
!= AE_OK
) {
1624 cmn_err(CE_WARN
, "!acpica: error probing Processor");
1627 acpi_id
= ((ACPI_OBJECT
*)rb
.Pointer
)->Processor
.ProcId
;
1628 AcpiOsFree(rb
.Pointer
);
1629 } else if (objtype
== ACPI_TYPE_DEVICE
) {
1630 /* process a processor Device */
1631 status
= AcpiGetObjectInfo(obj
, &di
);
1632 if (status
!= AE_OK
) {
1634 "!acpica: error probing Processor Device\n");
1638 if (!(di
->Valid
& ACPI_VALID_UID
) ||
1639 ddi_strtoul(di
->UniqueId
.String
, NULL
, 10, &acpi_id
) != 0) {
1642 "!acpica: error probing Processor Device _UID\n");
1647 (void) acpica_add_processor_to_map(acpi_id
, obj
, UINT32_MAX
);
1655 dev_info_t
*dip
, *cdip
;
1656 ACPI_HANDLE acpiobj
;
1657 char *device_type_prop
;
1659 static int map_error
= 0;
1661 if (map_error
|| (d2a_done
!= 0))
1664 scanning_d2a_map
= 1;
1667 * Find all child-of-root PCI buses, and find their corresponding
1668 * ACPI child-of-root PCI nodes. For each one, add to the
1672 for (dip
= ddi_get_child(ddi_root_node());
1674 dip
= ddi_get_next_sibling(dip
)) {
1676 /* prune non-PCI nodes */
1677 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dip
,
1679 "device_type", &device_type_prop
) != DDI_PROP_SUCCESS
)
1682 if ((strcmp("pci", device_type_prop
) != 0) &&
1683 (strcmp("pciex", device_type_prop
) != 0)) {
1684 ddi_prop_free(device_type_prop
);
1688 ddi_prop_free(device_type_prop
);
1691 * To get bus number of dip, get first child and get its
1692 * bus number. If NULL, just continue, because we don't
1693 * care about bus nodes with no children anyway.
1695 if ((cdip
= ddi_get_child(dip
)) == NULL
)
1698 if (acpica_get_bdf(cdip
, &bus
, NULL
, NULL
) < 0) {
1700 cmn_err(CE_WARN
, "Can't get bus number of PCI child?");
1703 scanning_d2a_map
= 0;
1708 if (acpica_find_pcibus(bus
, &acpiobj
) == AE_ERROR
) {
1710 cmn_err(CE_WARN
, "No ACPI bus obj for bus %d?\n", bus
);
1716 acpica_tag_devinfo(dip
, acpiobj
);
1718 /* call recursively to enumerate subtrees */
1719 scan_d2a_subtree(dip
, acpiobj
, bus
);
1722 scanning_d2a_map
= 0;
1727 * For all acpi child devices of acpiobj, find their matching
1728 * dip under "dip" argument. (matching means "matches dev/fn").
1729 * bus is assumed to already be a match from caller, and is
1730 * used here only to record in the d2a entry. Recurse if necessary.
1733 scan_d2a_subtree(dev_info_t
*dip
, ACPI_HANDLE acpiobj
, int bus
)
1735 int acpi_devfn
, hid
;
1738 int dcld_b
, dcld_d
, dcld_f
;
1740 char *device_type_prop
;
1743 while (AcpiGetNextObject(ACPI_TYPE_DEVICE
, acpiobj
, acld
, &acld
)
1745 /* get the dev/func we're looking for in the devinfo tree */
1746 if (acpica_eval_int(acld
, "_ADR", &acpi_devfn
) != AE_OK
)
1748 dev
= (acpi_devfn
>> 16) & 0xFFFF;
1749 func
= acpi_devfn
& 0xFFFF;
1751 /* look through all the immediate children of dip */
1752 for (dcld
= ddi_get_child(dip
); dcld
!= NULL
;
1753 dcld
= ddi_get_next_sibling(dcld
)) {
1754 if (acpica_get_bdf(dcld
, &dcld_b
, &dcld_d
, &dcld_f
) < 0)
1757 /* dev must match; function must match or wildcard */
1758 if (dcld_d
!= dev
||
1759 (func
!= 0xFFFF && func
!= dcld_f
))
1763 /* found a match, record it */
1764 acpica_tag_devinfo(dcld
, acld
);
1766 /* if we find a bridge, recurse from here */
1767 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dcld
,
1768 DDI_PROP_DONTPASS
, "device_type",
1769 &device_type_prop
) == DDI_PROP_SUCCESS
) {
1770 if ((strcmp("pci", device_type_prop
) == 0) ||
1771 (strcmp("pciex", device_type_prop
) == 0))
1772 scan_d2a_subtree(dcld
, acld
, bus
);
1773 ddi_prop_free(device_type_prop
);
1776 /* done finding a match, so break now */
1783 * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1786 acpica_get_bdf(dev_info_t
*dip
, int *bus
, int *device
, int *func
)
1788 pci_regspec_t
*pci_rp
;
1791 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
1792 "reg", (int **)&pci_rp
, (uint_t
*)&len
) != DDI_SUCCESS
)
1795 if (len
< (sizeof (pci_regspec_t
) / sizeof (int))) {
1796 ddi_prop_free(pci_rp
);
1800 *bus
= (int)PCI_REG_BUS_G(pci_rp
->pci_phys_hi
);
1802 *device
= (int)PCI_REG_DEV_G(pci_rp
->pci_phys_hi
);
1804 *func
= (int)PCI_REG_FUNC_G(pci_rp
->pci_phys_hi
);
1805 ddi_prop_free(pci_rp
);
1810 * Return the ACPI device node matching this dev_info node, if it
1811 * exists in the ACPI tree.
1814 acpica_get_handle(dev_info_t
*dip
, ACPI_HANDLE
*rh
)
1821 cmn_err(CE_WARN
, "!acpica_get_handle:"
1822 " no ACPI mapping for %s", ddi_node_name(dip
));
1825 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
1826 "acpi-namespace", &acpiname
) != DDI_PROP_SUCCESS
) {
1830 status
= AcpiGetHandle(NULL
, acpiname
, rh
);
1831 ddi_prop_free((void *)acpiname
);
1838 * Manage OS data attachment to ACPI nodes
1842 * Return the (dev_info_t *) associated with the ACPI node.
1845 acpica_get_devinfo(ACPI_HANDLE obj
, dev_info_t
**dipp
)
1850 status
= AcpiGetData(obj
, acpica_devinfo_handler
, &ptr
);
1851 if (status
== AE_OK
)
1852 *dipp
= (dev_info_t
*)ptr
;
1858 * Set the dev_info_t associated with the ACPI node.
1861 acpica_set_devinfo(ACPI_HANDLE obj
, dev_info_t
*dip
)
1865 status
= AcpiAttachData(obj
, acpica_devinfo_handler
, (void *)dip
);
1870 * Unset the dev_info_t associated with the ACPI node.
1873 acpica_unset_devinfo(ACPI_HANDLE obj
)
1875 return (AcpiDetachData(obj
, acpica_devinfo_handler
));
1882 acpica_devinfo_handler(ACPI_HANDLE obj
, void *data
)
1888 acpica_build_processor_map(void)
1894 * shouldn't be called more than once anyway
1900 * ACPI device configuration driver has built mapping information
1901 * among processor id and object handle, no need to probe again.
1903 if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU
)) {
1909 * Look for Processor objects
1911 status
= AcpiWalkNamespace(ACPI_TYPE_PROCESSOR
,
1914 acpica_probe_processor
,
1918 ASSERT(status
== AE_OK
);
1921 * Look for processor Device objects
1923 status
= AcpiGetDevices("ACPI0007",
1924 acpica_probe_processor
,
1927 ASSERT(status
== AE_OK
);
1934 * Grow cpu map table on demand.
1937 acpica_grow_cpu_map(void)
1939 if (cpu_map_count
== cpu_map_count_max
) {
1941 struct cpu_map_item
**new_map
;
1943 ASSERT(cpu_map_count_max
< INT_MAX
/ 2);
1944 cpu_map_count_max
+= max_ncpus
;
1945 new_map
= kmem_zalloc(sizeof (cpu_map
[0]) * cpu_map_count_max
,
1947 if (cpu_map_count
!= 0) {
1948 ASSERT(cpu_map
!= NULL
);
1949 sz
= sizeof (cpu_map
[0]) * cpu_map_count
;
1950 kcopy(cpu_map
, new_map
, sz
);
1951 kmem_free(cpu_map
, sz
);
1958 * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1959 * ACPI handle). The mapping table will be setup in two steps:
1960 * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1961 * processor id and ACPI object handle.
1962 * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1963 * On systems with which have ACPI device configuration for CPUs enabled,
1964 * acpica_map_cpu() will be called after acpica_add_processor_to_map(),
1965 * otherwise acpica_map_cpu() will be called before
1966 * acpica_add_processor_to_map().
1969 acpica_add_processor_to_map(UINT32 acpi_id
, ACPI_HANDLE obj
, UINT32 apic_id
)
1972 ACPI_STATUS rc
= AE_OK
;
1973 struct cpu_map_item
*item
= NULL
;
1975 ASSERT(obj
!= NULL
);
1980 mutex_enter(&cpu_map_lock
);
1983 * Special case for uppc
1984 * If we're a uppc system and ACPI device configuration for CPU has
1985 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1986 * call acpica_map_cpu(). So create one and use the passed-in processor
1988 * Assumption: the first CPU returned by
1989 * AcpiGetDevices/AcpiWalkNamespace will be the BSP.
1990 * Unfortunately there appears to be no good way to ASSERT this.
1992 if (cpu_map
== NULL
&&
1993 !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU
)) {
1994 acpica_grow_cpu_map();
1995 ASSERT(cpu_map
!= NULL
);
1996 item
= kmem_zalloc(sizeof (*item
), KM_SLEEP
);
1998 item
->proc_id
= acpi_id
;
1999 item
->apic_id
= apic_id
;
2003 mutex_exit(&cpu_map_lock
);
2007 for (i
= 0; i
< cpu_map_count
; i
++) {
2008 if (cpu_map
[i
]->obj
== obj
) {
2009 rc
= AE_ALREADY_EXISTS
;
2011 } else if (cpu_map
[i
]->proc_id
== acpi_id
) {
2012 ASSERT(item
== NULL
);
2020 * ACPI alias objects may cause more than one objects
2021 * with the same ACPI processor id, only remember the
2022 * the first object encountered.
2024 if (item
->obj
== NULL
) {
2026 item
->apic_id
= apic_id
;
2028 rc
= AE_ALREADY_EXISTS
;
2030 } else if (cpu_map_count
>= INT_MAX
/ 2) {
2033 acpica_grow_cpu_map();
2034 ASSERT(cpu_map
!= NULL
);
2035 ASSERT(cpu_map_count
< cpu_map_count_max
);
2036 item
= kmem_zalloc(sizeof (*item
), KM_SLEEP
);
2038 item
->proc_id
= acpi_id
;
2039 item
->apic_id
= apic_id
;
2041 cpu_map
[cpu_map_count
] = item
;
2046 mutex_exit(&cpu_map_lock
);
2052 acpica_remove_processor_from_map(UINT32 acpi_id
)
2055 ACPI_STATUS rc
= AE_NOT_EXIST
;
2057 mutex_enter(&cpu_map_lock
);
2058 for (i
= 0; i
< cpu_map_count
; i
++) {
2059 if (cpu_map
[i
]->proc_id
!= acpi_id
) {
2062 cpu_map
[i
]->obj
= NULL
;
2063 /* Free item if no more reference to it. */
2064 if (cpu_map
[i
]->cpu_id
== -1) {
2065 kmem_free(cpu_map
[i
], sizeof (struct cpu_map_item
));
2068 if (i
!= cpu_map_count
) {
2069 cpu_map
[i
] = cpu_map
[cpu_map_count
];
2070 cpu_map
[cpu_map_count
] = NULL
;
2076 mutex_exit(&cpu_map_lock
);
2082 acpica_map_cpu(processorid_t cpuid
, UINT32 acpi_id
)
2085 ACPI_STATUS rc
= AE_OK
;
2086 struct cpu_map_item
*item
= NULL
;
2088 ASSERT(cpuid
!= -1);
2093 mutex_enter(&cpu_map_lock
);
2095 for (i
= 0; i
< cpu_map_count
; i
++) {
2096 if (cpu_map
[i
]->cpu_id
== cpuid
) {
2097 rc
= AE_ALREADY_EXISTS
;
2099 } else if (cpu_map
[i
]->proc_id
== acpi_id
) {
2100 ASSERT(item
== NULL
);
2106 if (item
->cpu_id
== -1) {
2107 item
->cpu_id
= cpuid
;
2109 rc
= AE_ALREADY_EXISTS
;
2111 } else if (cpu_map_count
>= INT_MAX
/ 2) {
2114 acpica_grow_cpu_map();
2115 ASSERT(cpu_map
!= NULL
);
2116 ASSERT(cpu_map_count
< cpu_map_count_max
);
2117 item
= kmem_zalloc(sizeof (*item
), KM_SLEEP
);
2118 item
->cpu_id
= cpuid
;
2119 item
->proc_id
= acpi_id
;
2120 item
->apic_id
= UINT32_MAX
;
2122 cpu_map
[cpu_map_count
] = item
;
2126 mutex_exit(&cpu_map_lock
);
2132 acpica_unmap_cpu(processorid_t cpuid
)
2135 ACPI_STATUS rc
= AE_NOT_EXIST
;
2137 ASSERT(cpuid
!= -1);
2142 mutex_enter(&cpu_map_lock
);
2143 for (i
= 0; i
< cpu_map_count
; i
++) {
2144 if (cpu_map
[i
]->cpu_id
!= cpuid
) {
2147 cpu_map
[i
]->cpu_id
= -1;
2148 /* Free item if no more reference. */
2149 if (cpu_map
[i
]->obj
== NULL
) {
2150 kmem_free(cpu_map
[i
], sizeof (struct cpu_map_item
));
2153 if (i
!= cpu_map_count
) {
2154 cpu_map
[i
] = cpu_map
[cpu_map_count
];
2155 cpu_map
[cpu_map_count
] = NULL
;
2161 mutex_exit(&cpu_map_lock
);
2167 acpica_get_cpu_object_by_cpuid(processorid_t cpuid
, ACPI_HANDLE
*hdlp
)
2170 ACPI_STATUS rc
= AE_NOT_EXIST
;
2172 ASSERT(cpuid
!= -1);
2177 mutex_enter(&cpu_map_lock
);
2178 for (i
= 0; i
< cpu_map_count
; i
++) {
2179 if (cpu_map
[i
]->cpu_id
== cpuid
&& cpu_map
[i
]->obj
!= NULL
) {
2180 *hdlp
= cpu_map
[i
]->obj
;
2185 mutex_exit(&cpu_map_lock
);
2191 acpica_get_cpu_object_by_procid(UINT32 procid
, ACPI_HANDLE
*hdlp
)
2194 ACPI_STATUS rc
= AE_NOT_EXIST
;
2196 mutex_enter(&cpu_map_lock
);
2197 for (i
= 0; i
< cpu_map_count
; i
++) {
2198 if (cpu_map
[i
]->proc_id
== procid
&& cpu_map
[i
]->obj
!= NULL
) {
2199 *hdlp
= cpu_map
[i
]->obj
;
2204 mutex_exit(&cpu_map_lock
);
2210 acpica_get_cpu_object_by_apicid(UINT32 apicid
, ACPI_HANDLE
*hdlp
)
2213 ACPI_STATUS rc
= AE_NOT_EXIST
;
2215 ASSERT(apicid
!= UINT32_MAX
);
2216 if (apicid
== UINT32_MAX
) {
2220 mutex_enter(&cpu_map_lock
);
2221 for (i
= 0; i
< cpu_map_count
; i
++) {
2222 if (cpu_map
[i
]->apic_id
== apicid
&& cpu_map
[i
]->obj
!= NULL
) {
2223 *hdlp
= cpu_map
[i
]->obj
;
2228 mutex_exit(&cpu_map_lock
);
2234 acpica_get_cpu_id_by_object(ACPI_HANDLE hdl
, processorid_t
*cpuidp
)
2237 ACPI_STATUS rc
= AE_NOT_EXIST
;
2239 ASSERT(cpuidp
!= NULL
);
2240 if (hdl
== NULL
|| cpuidp
== NULL
) {
2245 mutex_enter(&cpu_map_lock
);
2246 for (i
= 0; i
< cpu_map_count
; i
++) {
2247 if (cpu_map
[i
]->obj
== hdl
&& cpu_map
[i
]->cpu_id
!= -1) {
2248 *cpuidp
= cpu_map
[i
]->cpu_id
;
2253 mutex_exit(&cpu_map_lock
);
2259 acpica_get_apicid_by_object(ACPI_HANDLE hdl
, UINT32
*rp
)
2262 ACPI_STATUS rc
= AE_NOT_EXIST
;
2265 if (hdl
== NULL
|| rp
== NULL
) {
2270 mutex_enter(&cpu_map_lock
);
2271 for (i
= 0; i
< cpu_map_count
; i
++) {
2272 if (cpu_map
[i
]->obj
== hdl
&&
2273 cpu_map
[i
]->apic_id
!= UINT32_MAX
) {
2274 *rp
= cpu_map
[i
]->apic_id
;
2279 mutex_exit(&cpu_map_lock
);
2285 acpica_get_procid_by_object(ACPI_HANDLE hdl
, UINT32
*rp
)
2288 ACPI_STATUS rc
= AE_NOT_EXIST
;
2291 if (hdl
== NULL
|| rp
== NULL
) {
2296 mutex_enter(&cpu_map_lock
);
2297 for (i
= 0; i
< cpu_map_count
; i
++) {
2298 if (cpu_map
[i
]->obj
== hdl
) {
2299 *rp
= cpu_map
[i
]->proc_id
;
2304 mutex_exit(&cpu_map_lock
);
2310 acpica_set_core_feature(uint64_t features
)
2312 atomic_or_64(&acpica_core_features
, features
);
2316 acpica_clear_core_feature(uint64_t features
)
2318 atomic_and_64(&acpica_core_features
, ~features
);
2322 acpica_get_core_feature(uint64_t features
)
2324 return (acpica_core_features
& features
);
2328 acpica_set_devcfg_feature(uint64_t features
)
2330 atomic_or_64(&acpica_devcfg_features
, features
);
2334 acpica_clear_devcfg_feature(uint64_t features
)
2336 atomic_and_64(&acpica_devcfg_features
, ~features
);
2340 acpica_get_devcfg_feature(uint64_t features
)
2342 return (acpica_devcfg_features
& features
);
2346 acpica_get_global_FADT(ACPI_TABLE_FADT
**gbl_FADT
)
2348 *gbl_FADT
= &AcpiGbl_FADT
;
2352 acpica_write_cpupm_capabilities(boolean_t pstates
, boolean_t cstates
)
2354 if (pstates
&& AcpiGbl_FADT
.PstateControl
!= 0)
2355 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK
,
2356 AcpiGbl_FADT
.PstateControl
);
2358 if (cstates
&& AcpiGbl_FADT
.CstControl
!= 0)
2359 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK
,
2360 AcpiGbl_FADT
.CstControl
);
2364 acpi_strtoul(const char *str
, char **ep
, int base
)
2368 if (ddi_strtoul(str
, ep
, base
, &v
) != 0 || v
> ACPI_UINT32_MAX
) {
2369 return (ACPI_UINT32_MAX
);
2372 return ((uint32_t)v
);