dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / uts / intel / io / acpica / osl.c
blob3afa5a928d9ca14780ea1b02ea651f043b6a9822
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2016 Joyent, Inc.
26 * Copyright 2016 PALO, Richard.
29 * Copyright (c) 2009-2010, Intel Corporation.
30 * All rights reserved.
33 * ACPI CA OSL for Solaris x86
36 #include <sys/types.h>
37 #include <sys/kmem.h>
38 #include <sys/psm.h>
39 #include <sys/pci_cfgspace.h>
40 #include <sys/apic.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/sunndi.h>
44 #include <sys/pci.h>
45 #include <sys/kobj.h>
46 #include <sys/taskq.h>
47 #include <sys/strlog.h>
48 #include <sys/x86_archext.h>
49 #include <sys/note.h>
50 #include <sys/promif.h>
52 #include <sys/acpi/accommon.h>
53 #include <sys/acpica.h>
55 #define MAX_DAT_FILE_SIZE (64*1024)
57 /* local functions */
58 static int CompressEisaID(char *np);
60 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
61 static int acpica_query_bbn_problem(void);
62 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
63 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
64 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
65 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
66 static void acpica_devinfo_handler(ACPI_HANDLE, void *);
69 * Event queue vars
71 int acpica_eventq_init = 0;
72 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
75 * Priorities relative to minclsyspri that each taskq
76 * run at; OSL_NOTIFY_HANDLER needs to run at a higher
77 * priority than OSL_GPE_HANDLER. There's an implicit
78 * assumption that no priority here results in exceeding
79 * maxclsyspri.
80 * Note: these initializations need to match the order of
81 * ACPI_EXECUTE_TYPE.
83 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
84 0, /* OSL_GLOBAL_LOCK_HANDLER */
85 2, /* OSL_NOTIFY_HANDLER */
86 0, /* OSL_GPE_HANDLER */
87 0, /* OSL_DEBUGGER_THREAD */
88 0, /* OSL_EC_POLL_HANDLER */
89 0 /* OSL_EC_BURST_HANDLER */
93 * Note, if you change this path, you need to update
94 * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
96 static char *acpi_table_path = "/boot/acpi/tables/";
98 /* non-zero while scan_d2a_map() is working */
99 static int scanning_d2a_map = 0;
100 static int d2a_done = 0;
102 /* features supported by ACPICA and ACPI device configuration. */
103 uint64_t acpica_core_features = ACPI_FEATURE_OSI_MODULE;
104 static uint64_t acpica_devcfg_features = 0;
106 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
107 int acpica_use_safe_delay = 0;
109 /* CPU mapping data */
110 struct cpu_map_item {
111 processorid_t cpu_id;
112 UINT32 proc_id;
113 UINT32 apic_id;
114 ACPI_HANDLE obj;
117 kmutex_t cpu_map_lock;
118 static struct cpu_map_item **cpu_map = NULL;
119 static int cpu_map_count_max = 0;
120 static int cpu_map_count = 0;
121 static int cpu_map_built = 0;
124 * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
125 * This flag is used to check for uppc-only systems by detecting whether
126 * acpica_map_cpu() has been called or not.
128 static int cpu_map_called = 0;
130 static int acpi_has_broken_bbn = -1;
132 /* buffer for AcpiOsVprintf() */
133 #define ACPI_OSL_PR_BUFLEN 1024
134 static char *acpi_osl_pr_buffer = NULL;
135 static int acpi_osl_pr_buflen;
137 #define D2A_DEBUG
142 static void
143 discard_event_queues()
145 int i;
148 * destroy event queues
150 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
151 if (osl_eventq[i])
152 ddi_taskq_destroy(osl_eventq[i]);
160 static ACPI_STATUS
161 init_event_queues()
163 char namebuf[32];
164 int i, error = 0;
167 * Initialize event queues
170 /* Always allocate only 1 thread per queue to force FIFO execution */
171 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
172 snprintf(namebuf, 32, "ACPI%d", i);
173 osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
174 osl_eventq_pri_delta[i] + minclsyspri, 0);
175 if (osl_eventq[i] == NULL)
176 error++;
179 if (error != 0) {
180 discard_event_queues();
181 #ifdef DEBUG
182 cmn_err(CE_WARN, "!acpica: could not initialize event queues");
183 #endif
184 return (AE_ERROR);
187 acpica_eventq_init = 1;
188 return (AE_OK);
192 * One-time initialization of OSL layer
194 ACPI_STATUS
195 AcpiOsInitialize(void)
198 * Allocate buffer for AcpiOsVprintf() here to avoid
199 * kmem_alloc()/kmem_free() at high PIL
201 acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
202 if (acpi_osl_pr_buffer != NULL)
203 acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
205 return (AE_OK);
209 * One-time shut-down of OSL layer
211 ACPI_STATUS
212 AcpiOsTerminate(void)
215 if (acpi_osl_pr_buffer != NULL)
216 kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
218 discard_event_queues();
219 return (AE_OK);
223 ACPI_PHYSICAL_ADDRESS
224 AcpiOsGetRootPointer()
226 ACPI_PHYSICAL_ADDRESS Address;
229 * For EFI firmware, the root pointer is defined in EFI systab.
230 * The boot code process the table and put the physical address
231 * in the acpi-root-tab property.
233 Address = ddi_prop_get_int64(DDI_DEV_T_ANY, ddi_root_node(),
234 DDI_PROP_DONTPASS, "acpi-root-tab", 0);
236 if ((Address == (uintptr_t)NULL) &&
237 ACPI_FAILURE(AcpiFindRootPointer(&Address)))
238 Address = (uintptr_t)NULL;
240 return (Address);
243 /*ARGSUSED*/
244 ACPI_STATUS
245 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
246 ACPI_STRING *NewVal)
249 *NewVal = 0;
250 return (AE_OK);
253 static void
254 acpica_strncpy(char *dest, const char *src, int len)
257 /*LINTED*/
258 while ((*dest++ = *src++) && (--len > 0))
259 /* copy the string */;
260 *dest = '\0';
263 ACPI_STATUS
264 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
265 ACPI_TABLE_HEADER **NewTable)
267 char signature[5];
268 char oemid[7];
269 char oemtableid[9];
270 struct _buf *file;
271 char *buf1, *buf2;
272 int count;
273 char acpi_table_loc[128];
275 acpica_strncpy(signature, ExistingTable->Signature, 4);
276 acpica_strncpy(oemid, ExistingTable->OemId, 6);
277 acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
279 /* File name format is "signature_oemid_oemtableid.dat" */
280 (void) strcpy(acpi_table_loc, acpi_table_path);
281 (void) strcat(acpi_table_loc, signature); /* for example, DSDT */
282 (void) strcat(acpi_table_loc, "_");
283 (void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
284 (void) strcat(acpi_table_loc, "_");
285 (void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
286 (void) strcat(acpi_table_loc, ".dat");
288 file = kobj_open_file(acpi_table_loc);
289 if (file == (struct _buf *)-1) {
290 *NewTable = 0;
291 return (AE_OK);
292 } else {
293 buf1 = kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
294 count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
295 if (count >= MAX_DAT_FILE_SIZE) {
296 cmn_err(CE_WARN, "!acpica: table %s file size too big",
297 acpi_table_loc);
298 *NewTable = 0;
299 } else {
300 buf2 = kmem_alloc(count, KM_SLEEP);
301 (void) memcpy(buf2, buf1, count);
302 *NewTable = (ACPI_TABLE_HEADER *)buf2;
303 cmn_err(CE_NOTE, "!acpica: replacing table: %s",
304 acpi_table_loc);
307 kobj_close_file(file);
308 kmem_free(buf1, MAX_DAT_FILE_SIZE);
310 return (AE_OK);
313 ACPI_STATUS
314 AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER *ExistingTable,
315 ACPI_PHYSICAL_ADDRESS *NewAddress, UINT32 *NewTableLength)
317 return (AE_SUPPORT);
321 * ACPI semaphore implementation
323 typedef struct {
324 kmutex_t mutex;
325 kcondvar_t cv;
326 uint32_t available;
327 uint32_t initial;
328 uint32_t maximum;
329 } acpi_sema_t;
334 void
335 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
337 mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
338 cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
339 /* no need to enter mutex here at creation */
340 sp->available = count;
341 sp->initial = count;
342 sp->maximum = max;
348 void
349 acpi_sema_destroy(acpi_sema_t *sp)
352 cv_destroy(&sp->cv);
353 mutex_destroy(&sp->mutex);
359 ACPI_STATUS
360 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
362 ACPI_STATUS rv = AE_OK;
363 clock_t deadline;
365 mutex_enter(&sp->mutex);
367 if (sp->available >= count) {
369 * Enough units available, no blocking
371 sp->available -= count;
372 mutex_exit(&sp->mutex);
373 return (rv);
374 } else if (wait_time == 0) {
376 * Not enough units available and timeout
377 * specifies no blocking
379 rv = AE_TIME;
380 mutex_exit(&sp->mutex);
381 return (rv);
385 * Not enough units available and timeout specifies waiting
387 if (wait_time != ACPI_WAIT_FOREVER)
388 deadline = ddi_get_lbolt() +
389 (clock_t)drv_usectohz(wait_time * 1000);
391 do {
392 if (wait_time == ACPI_WAIT_FOREVER)
393 cv_wait(&sp->cv, &sp->mutex);
394 else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
395 rv = AE_TIME;
396 break;
398 } while (sp->available < count);
400 /* if we dropped out of the wait with AE_OK, we got the units */
401 if (rv == AE_OK)
402 sp->available -= count;
404 mutex_exit(&sp->mutex);
405 return (rv);
411 void
412 acpi_sema_v(acpi_sema_t *sp, unsigned count)
414 mutex_enter(&sp->mutex);
415 sp->available += count;
416 cv_broadcast(&sp->cv);
417 mutex_exit(&sp->mutex);
421 ACPI_STATUS
422 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
423 ACPI_HANDLE *OutHandle)
425 acpi_sema_t *sp;
427 if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
428 return (AE_BAD_PARAMETER);
430 sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
431 acpi_sema_init(sp, MaxUnits, InitialUnits);
432 *OutHandle = (ACPI_HANDLE)sp;
433 return (AE_OK);
437 ACPI_STATUS
438 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
441 if (Handle == NULL)
442 return (AE_BAD_PARAMETER);
444 acpi_sema_destroy((acpi_sema_t *)Handle);
445 kmem_free((void *)Handle, sizeof (acpi_sema_t));
446 return (AE_OK);
449 ACPI_STATUS
450 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
453 if ((Handle == NULL) || (Units < 1))
454 return (AE_BAD_PARAMETER);
456 return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
459 ACPI_STATUS
460 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
463 if ((Handle == NULL) || (Units < 1))
464 return (AE_BAD_PARAMETER);
466 acpi_sema_v((acpi_sema_t *)Handle, Units);
467 return (AE_OK);
470 ACPI_STATUS
471 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
473 kmutex_t *mp;
475 if (OutHandle == NULL)
476 return (AE_BAD_PARAMETER);
478 mp = kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
479 mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
480 *OutHandle = (ACPI_HANDLE)mp;
481 return (AE_OK);
484 void
485 AcpiOsDeleteLock(ACPI_HANDLE Handle)
488 if (Handle == NULL)
489 return;
491 mutex_destroy((kmutex_t *)Handle);
492 kmem_free((void *)Handle, sizeof (kmutex_t));
495 ACPI_CPU_FLAGS
496 AcpiOsAcquireLock(ACPI_HANDLE Handle)
500 if (Handle == NULL)
501 return (AE_BAD_PARAMETER);
503 if (curthread == CPU->cpu_idle_thread) {
504 while (!mutex_tryenter((kmutex_t *)Handle))
505 /* spin */;
506 } else
507 mutex_enter((kmutex_t *)Handle);
508 return (AE_OK);
511 void
512 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
514 _NOTE(ARGUNUSED(Flags))
516 mutex_exit((kmutex_t *)Handle);
520 void *
521 AcpiOsAllocate(ACPI_SIZE Size)
523 ACPI_SIZE *tmp_ptr;
525 Size += sizeof (Size);
526 tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
527 *tmp_ptr++ = Size;
528 return (tmp_ptr);
531 void
532 AcpiOsFree(void *Memory)
534 ACPI_SIZE size, *tmp_ptr;
536 tmp_ptr = (ACPI_SIZE *)Memory;
537 tmp_ptr -= 1;
538 size = *tmp_ptr;
539 kmem_free(tmp_ptr, size);
542 static int napics_found; /* number of ioapic addresses in array */
543 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
544 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
545 static void *dummy_ioapicadr;
547 void
548 acpica_find_ioapics(void)
550 int madt_seen, madt_size;
551 ACPI_SUBTABLE_HEADER *ap;
552 ACPI_MADT_IO_APIC *mia;
554 if (acpi_mapic_dtp != NULL)
555 return; /* already parsed table */
556 if (AcpiGetTable(ACPI_SIG_MADT, 1,
557 (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
558 return;
560 napics_found = 0;
563 * Search the MADT for ioapics
565 ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
566 madt_size = acpi_mapic_dtp->Header.Length;
567 madt_seen = sizeof (*acpi_mapic_dtp);
569 while (madt_seen < madt_size) {
571 switch (ap->Type) {
572 case ACPI_MADT_TYPE_IO_APIC:
573 mia = (ACPI_MADT_IO_APIC *) ap;
574 if (napics_found < MAX_IO_APIC) {
575 ioapic_paddr[napics_found++] =
576 (ACPI_PHYSICAL_ADDRESS)
577 (mia->Address & PAGEMASK);
579 break;
581 default:
582 break;
585 /* advance to next entry */
586 madt_seen += ap->Length;
587 ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
589 if (dummy_ioapicadr == NULL)
590 dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
594 void *
595 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
597 int i;
600 * If the iopaic address table is populated, check if trying
601 * to access an ioapic. Instead, return a pointer to a dummy ioapic.
603 for (i = 0; i < napics_found; i++) {
604 if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
605 return (dummy_ioapicadr);
607 /* FUTUREWORK: test PhysicalAddress for > 32 bits */
608 return (psm_map_new((paddr_t)PhysicalAddress,
609 (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
612 void
613 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
616 * Check if trying to unmap dummy ioapic address.
618 if (LogicalAddress == dummy_ioapicadr)
619 return;
621 psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
624 /*ARGSUSED*/
625 ACPI_STATUS
626 AcpiOsGetPhysicalAddress(void *LogicalAddress,
627 ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
630 /* UNIMPLEMENTED: not invoked by ACPI CA code */
631 return (AE_NOT_IMPLEMENTED);
635 ACPI_OSD_HANDLER acpi_isr;
636 void *acpi_isr_context;
638 uint_t
639 acpi_wrapper_isr(char *arg)
641 _NOTE(ARGUNUSED(arg))
643 int status;
645 status = (*acpi_isr)(acpi_isr_context);
647 if (status == ACPI_INTERRUPT_HANDLED) {
648 return (DDI_INTR_CLAIMED);
649 } else {
650 return (DDI_INTR_UNCLAIMED);
654 static int acpi_intr_hooked = 0;
656 ACPI_STATUS
657 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
658 ACPI_OSD_HANDLER ServiceRoutine,
659 void *Context)
661 _NOTE(ARGUNUSED(InterruptNumber))
663 int retval;
664 int sci_vect;
665 iflag_t sci_flags;
667 acpi_isr = ServiceRoutine;
668 acpi_isr_context = Context;
671 * Get SCI (adjusted for PIC/APIC mode if necessary)
673 if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
674 return (AE_ERROR);
677 #ifdef DEBUG
678 cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
679 #endif
681 retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
682 "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
683 if (retval) {
684 acpi_intr_hooked = 1;
685 return (AE_OK);
686 } else
687 return (AE_BAD_PARAMETER);
690 ACPI_STATUS
691 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
692 ACPI_OSD_HANDLER ServiceRoutine)
694 _NOTE(ARGUNUSED(ServiceRoutine))
696 #ifdef DEBUG
697 cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
698 #endif
699 if (acpi_intr_hooked) {
700 rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
701 InterruptNumber);
702 acpi_intr_hooked = 0;
704 return (AE_OK);
708 ACPI_THREAD_ID
709 AcpiOsGetThreadId(void)
712 * ACPI CA doesn't care what actual value is returned as long
713 * as it is non-zero and unique to each existing thread.
714 * ACPI CA assumes that thread ID is castable to a pointer,
715 * so we use the current thread pointer.
717 return (ACPI_CAST_PTHREAD_T((uintptr_t)curthread));
723 ACPI_STATUS
724 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
725 void *Context)
728 if (!acpica_eventq_init) {
730 * Create taskqs for event handling
732 if (init_event_queues() != AE_OK)
733 return (AE_ERROR);
736 if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
737 DDI_NOSLEEP) == DDI_FAILURE) {
738 #ifdef DEBUG
739 cmn_err(CE_WARN, "!acpica: unable to dispatch event");
740 #endif
741 return (AE_ERROR);
743 return (AE_OK);
748 void
749 AcpiOsWaitEventsComplete(void)
751 int i;
754 * Wait for event queues to be empty.
756 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
757 if (osl_eventq[i] != NULL) {
758 ddi_taskq_wait(osl_eventq[i]);
763 void
764 AcpiOsSleep(ACPI_INTEGER Milliseconds)
767 * During kernel startup, before the first tick interrupt
768 * has taken place, we can't call delay; very late in
769 * kernel shutdown or suspend/resume, clock interrupts
770 * are blocked, so delay doesn't work then either.
771 * So we busy wait if lbolt == 0 (kernel startup)
772 * or if acpica_use_safe_delay has been set to a
773 * non-zero value.
775 if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
776 drv_usecwait(Milliseconds * 1000);
777 else
778 ddi_msleep(Milliseconds);
781 void
782 AcpiOsStall(UINT32 Microseconds)
784 drv_usecwait(Microseconds);
789 * Implementation of "Windows 2001" compatible I/O permission map
792 #define OSL_IO_NONE (0)
793 #define OSL_IO_READ (1<<0)
794 #define OSL_IO_WRITE (1<<1)
795 #define OSL_IO_RW (OSL_IO_READ | OSL_IO_WRITE)
796 #define OSL_IO_TERM (1<<2)
797 #define OSL_IO_DEFAULT OSL_IO_RW
799 static struct io_perm {
800 ACPI_IO_ADDRESS low;
801 ACPI_IO_ADDRESS high;
802 uint8_t perm;
803 } osl_io_perm[] = {
804 { 0xcf8, 0xd00, OSL_IO_TERM | OSL_IO_RW}
811 static struct io_perm *
812 osl_io_find_perm(ACPI_IO_ADDRESS addr)
814 struct io_perm *p;
816 p = osl_io_perm;
817 while (p != NULL) {
818 if ((p->low <= addr) && (addr <= p->high))
819 break;
820 p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
823 return (p);
829 ACPI_STATUS
830 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
832 struct io_perm *p;
834 /* verify permission */
835 p = osl_io_find_perm(Address);
836 if (p && (p->perm & OSL_IO_READ) == 0) {
837 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
838 (long)Address, Width);
839 *Value = 0xffffffff;
840 return (AE_ERROR);
843 switch (Width) {
844 case 8:
845 *Value = inb(Address);
846 break;
847 case 16:
848 *Value = inw(Address);
849 break;
850 case 32:
851 *Value = inl(Address);
852 break;
853 default:
854 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
855 (long)Address, Width);
856 return (AE_BAD_PARAMETER);
858 return (AE_OK);
861 ACPI_STATUS
862 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
864 struct io_perm *p;
866 /* verify permission */
867 p = osl_io_find_perm(Address);
868 if (p && (p->perm & OSL_IO_WRITE) == 0) {
869 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
870 (long)Address, Width);
871 return (AE_ERROR);
874 switch (Width) {
875 case 8:
876 outb(Address, Value);
877 break;
878 case 16:
879 outw(Address, Value);
880 break;
881 case 32:
882 outl(Address, Value);
883 break;
884 default:
885 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
886 (long)Address, Width);
887 return (AE_BAD_PARAMETER);
889 return (AE_OK);
897 #define OSL_RW(ptr, val, type, rw) \
898 { if (rw) *((type *)(ptr)) = *((type *) val); \
899 else *((type *) val) = *((type *)(ptr)); }
902 static void
903 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT64 *Value,
904 UINT32 Width, int write)
906 size_t maplen = Width / 8;
907 caddr_t ptr;
909 ptr = psm_map_new((paddr_t)Address, maplen,
910 PSM_PROT_WRITE | PSM_PROT_READ);
912 switch (maplen) {
913 case 1:
914 OSL_RW(ptr, Value, uint8_t, write);
915 break;
916 case 2:
917 OSL_RW(ptr, Value, uint16_t, write);
918 break;
919 case 4:
920 OSL_RW(ptr, Value, uint32_t, write);
921 break;
922 case 8:
923 OSL_RW(ptr, Value, uint64_t, write);
924 break;
925 default:
926 cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
927 Width);
928 break;
931 psm_unmap(ptr, maplen);
934 ACPI_STATUS
935 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address, UINT64 *Value, UINT32 Width)
937 osl_rw_memory(Address, Value, Width, 0);
938 return (AE_OK);
941 ACPI_STATUS
942 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address, UINT64 Value, UINT32 Width)
944 osl_rw_memory(Address, &Value, Width, 1);
945 return (AE_OK);
949 ACPI_STATUS
950 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
951 UINT64 *Value, UINT32 Width)
954 switch (Width) {
955 case 8:
956 *Value = (UINT64)(*pci_getb_func)
957 (PciId->Bus, PciId->Device, PciId->Function, Reg);
958 break;
959 case 16:
960 *Value = (UINT64)(*pci_getw_func)
961 (PciId->Bus, PciId->Device, PciId->Function, Reg);
962 break;
963 case 32:
964 *Value = (UINT64)(*pci_getl_func)
965 (PciId->Bus, PciId->Device, PciId->Function, Reg);
966 break;
967 case 64:
968 default:
969 cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
970 Reg, Width);
971 return (AE_BAD_PARAMETER);
973 return (AE_OK);
979 int acpica_write_pci_config_ok = 1;
981 ACPI_STATUS
982 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
983 UINT64 Value, UINT32 Width)
986 if (!acpica_write_pci_config_ok) {
987 cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
988 " %lx %d not permitted", PciId->Bus, PciId->Device,
989 PciId->Function, Reg, (long)Value, Width);
990 return (AE_OK);
993 switch (Width) {
994 case 8:
995 (*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
996 Reg, (uint8_t)Value);
997 break;
998 case 16:
999 (*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
1000 Reg, (uint16_t)Value);
1001 break;
1002 case 32:
1003 (*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
1004 Reg, (uint32_t)Value);
1005 break;
1006 case 64:
1007 default:
1008 cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
1009 Reg, Width);
1010 return (AE_BAD_PARAMETER);
1012 return (AE_OK);
1016 * Called with ACPI_HANDLEs for both a PCI Config Space
1017 * OpRegion and (what ACPI CA thinks is) the PCI device
1018 * to which this ConfigSpace OpRegion belongs.
1020 * ACPI CA uses _BBN and _ADR objects to determine the default
1021 * values for bus, segment, device and function; anything ACPI CA
1022 * can't figure out from the ACPI tables will be 0. One very
1023 * old 32-bit x86 system is known to have broken _BBN; this is
1024 * not addressed here.
1026 * Some BIOSes implement _BBN() by reading PCI config space
1027 * on bus #0 - which means that we'll recurse when we attempt
1028 * to create the devinfo-to-ACPI map. If Derive is called during
1029 * scan_d2a_map, we don't translate the bus # and return.
1031 * We get the parent of the OpRegion, which must be a PCI
1032 * node, fetch the associated devinfo node and snag the
1033 * b/d/f from it.
1035 void
1036 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1037 ACPI_PCI_ID **PciId)
1039 ACPI_HANDLE handle;
1040 dev_info_t *dip;
1041 int bus, device, func, devfn;
1044 * See above - avoid recursing during scanning_d2a_map.
1046 if (scanning_d2a_map)
1047 return;
1050 * Get the OpRegion's parent
1052 if (AcpiGetParent(chandle, &handle) != AE_OK)
1053 return;
1056 * If we've mapped the ACPI node to the devinfo
1057 * tree, use the devinfo reg property
1059 if (ACPI_SUCCESS(acpica_get_devinfo(handle, &dip)) &&
1060 (acpica_get_bdf(dip, &bus, &device, &func) >= 0)) {
1061 (*PciId)->Bus = bus;
1062 (*PciId)->Device = device;
1063 (*PciId)->Function = func;
1068 /*ARGSUSED*/
1069 BOOLEAN
1070 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1073 /* Always says yes; all mapped memory assumed readable */
1074 return (1);
1077 /*ARGSUSED*/
1078 BOOLEAN
1079 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1082 /* Always says yes; all mapped memory assumed writable */
1083 return (1);
1086 UINT64
1087 AcpiOsGetTimer(void)
1089 /* gethrtime() returns 1nS resolution; convert to 100nS granules */
1090 return ((gethrtime() + 50) / 100);
1093 static struct AcpiOSIFeature_s {
1094 uint64_t control_flag;
1095 const char *feature_name;
1096 } AcpiOSIFeatures[] = {
1097 { ACPI_FEATURE_OSI_MODULE, "Module Device" },
1098 { 0, "Processor Device" }
1101 /*ARGSUSED*/
1102 ACPI_STATUS
1103 AcpiOsValidateInterface(char *feature)
1105 int i;
1107 ASSERT(feature != NULL);
1108 for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1109 i++) {
1110 if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1111 continue;
1113 /* Check whether required core features are available. */
1114 if (AcpiOSIFeatures[i].control_flag != 0 &&
1115 acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1116 AcpiOSIFeatures[i].control_flag) {
1117 break;
1119 /* Feature supported. */
1120 return (AE_OK);
1123 return (AE_SUPPORT);
1126 /*ARGSUSED*/
1127 ACPI_STATUS
1128 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1129 ACPI_SIZE length)
1131 return (AE_OK);
1134 ACPI_STATUS
1135 AcpiOsSignal(UINT32 Function, void *Info)
1137 _NOTE(ARGUNUSED(Function, Info))
1139 /* FUTUREWORK: debugger support */
1141 cmn_err(CE_NOTE, "!OsSignal unimplemented");
1142 return (AE_OK);
1145 void ACPI_INTERNAL_VAR_XFACE
1146 AcpiOsPrintf(const char *Format, ...)
1148 va_list ap;
1150 va_start(ap, Format);
1151 AcpiOsVprintf(Format, ap);
1152 va_end(ap);
1156 * When != 0, sends output to console
1157 * Patchable with kmdb or /etc/system.
1159 int acpica_console_out = 0;
1161 #define ACPICA_OUTBUF_LEN 160
1162 char acpica_outbuf[ACPICA_OUTBUF_LEN];
1163 int acpica_outbuf_offset;
1168 static void
1169 acpica_pr_buf(char *buf)
1171 char c, *bufp, *outp;
1172 int out_remaining;
1175 * copy the supplied buffer into the output buffer
1176 * when we hit a '\n' or overflow the output buffer,
1177 * output and reset the output buffer
1179 bufp = buf;
1180 outp = acpica_outbuf + acpica_outbuf_offset;
1181 out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1182 while (c = *bufp++) {
1183 *outp++ = c;
1184 if (c == '\n' || --out_remaining == 0) {
1185 *outp = '\0';
1186 switch (acpica_console_out) {
1187 case 1:
1188 printf(acpica_outbuf);
1189 break;
1190 case 2:
1191 prom_printf(acpica_outbuf);
1192 break;
1193 case 0:
1194 default:
1195 (void) strlog(0, 0, 0,
1196 SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1197 acpica_outbuf);
1198 break;
1200 acpica_outbuf_offset = 0;
1201 outp = acpica_outbuf;
1202 out_remaining = ACPICA_OUTBUF_LEN - 1;
1206 acpica_outbuf_offset = outp - acpica_outbuf;
1209 void
1210 AcpiOsVprintf(const char *Format, va_list Args)
1214 * If AcpiOsInitialize() failed to allocate a string buffer,
1215 * resort to vprintf().
1217 if (acpi_osl_pr_buffer == NULL) {
1218 vprintf(Format, Args);
1219 return;
1223 * It is possible that a very long debug output statement will
1224 * be truncated; this is silently ignored.
1226 (void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1227 acpica_pr_buf(acpi_osl_pr_buffer);
1230 void
1231 AcpiOsRedirectOutput(void *Destination)
1233 _NOTE(ARGUNUSED(Destination))
1235 /* FUTUREWORK: debugger support */
1237 #ifdef DEBUG
1238 cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1239 #endif
1243 UINT32
1244 AcpiOsGetLine(char *Buffer, UINT32 len, UINT32 *BytesRead)
1246 _NOTE(ARGUNUSED(Buffer))
1247 _NOTE(ARGUNUSED(len))
1248 _NOTE(ARGUNUSED(BytesRead))
1250 /* FUTUREWORK: debugger support */
1252 return (0);
1256 * Device tree binding
1258 static ACPI_STATUS
1259 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1261 _NOTE(ARGUNUSED(lvl));
1263 int sta, hid, bbn;
1264 int busno = (intptr_t)ctxp;
1265 ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1267 /* Check whether device exists. */
1268 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1269 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1271 * Skip object if device doesn't exist.
1272 * According to ACPI Spec,
1273 * 1) setting either bit 0 or bit 3 means that device exists.
1274 * 2) Absence of _STA method means all status bits set.
1276 return (AE_CTRL_DEPTH);
1279 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1280 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1281 /* Non PCI/PCIe host bridge. */
1282 return (AE_OK);
1285 if (acpi_has_broken_bbn) {
1286 ACPI_BUFFER rb;
1287 rb.Pointer = NULL;
1288 rb.Length = ACPI_ALLOCATE_BUFFER;
1290 /* Decree _BBN == n from PCI<n> */
1291 if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1292 return (AE_CTRL_TERMINATE);
1294 bbn = ((char *)rb.Pointer)[3] - '0';
1295 AcpiOsFree(rb.Pointer);
1296 if (bbn == busno || busno == 0) {
1297 *hdlp = hdl;
1298 return (AE_CTRL_TERMINATE);
1300 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1301 if (bbn == busno) {
1302 *hdlp = hdl;
1303 return (AE_CTRL_TERMINATE);
1305 } else if (busno == 0) {
1306 *hdlp = hdl;
1307 return (AE_CTRL_TERMINATE);
1310 return (AE_CTRL_DEPTH);
1313 static int
1314 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1316 ACPI_HANDLE sbobj, busobj;
1318 /* initialize static flag by querying ACPI namespace for bug */
1319 if (acpi_has_broken_bbn == -1)
1320 acpi_has_broken_bbn = acpica_query_bbn_problem();
1322 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1323 busobj = NULL;
1324 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1325 acpica_find_pcibus_walker, NULL, (void *)(intptr_t)busno,
1326 (void **)&busobj);
1327 if (busobj != NULL) {
1328 *rh = busobj;
1329 return (AE_OK);
1333 return (AE_ERROR);
1336 static ACPI_STATUS
1337 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1339 _NOTE(ARGUNUSED(lvl));
1340 _NOTE(ARGUNUSED(rvpp));
1342 int sta, hid, bbn;
1343 int *cntp = (int *)ctxp;
1345 /* Check whether device exists. */
1346 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1347 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1349 * Skip object if device doesn't exist.
1350 * According to ACPI Spec,
1351 * 1) setting either bit 0 or bit 3 means that device exists.
1352 * 2) Absence of _STA method means all status bits set.
1354 return (AE_CTRL_DEPTH);
1357 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1358 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1359 /* Non PCI/PCIe host bridge. */
1360 return (AE_OK);
1361 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1362 bbn == 0 && ++(*cntp) > 1) {
1364 * If we find more than one bus with a 0 _BBN
1365 * we have the problem that BigBear's BIOS shows
1367 return (AE_CTRL_TERMINATE);
1368 } else {
1370 * Skip children of PCI/PCIe host bridge.
1372 return (AE_CTRL_DEPTH);
1377 * Look for ACPI problem where _BBN is zero for multiple PCI buses
1378 * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1379 * below if it exists.
1381 static int
1382 acpica_query_bbn_problem(void)
1384 ACPI_HANDLE sbobj;
1385 int zerobbncnt;
1386 void *rv;
1388 zerobbncnt = 0;
1389 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1390 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1391 acpica_query_bbn_walker, NULL, &zerobbncnt, &rv);
1394 return (zerobbncnt > 1 ? 1 : 0);
1397 static const char hextab[] = "0123456789ABCDEF";
1399 static int
1400 hexdig(int c)
1403 * Get hex digit:
1405 * Returns the 4-bit hex digit named by the input character. Returns
1406 * zero if the input character is not valid hex!
1409 int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1410 int j = sizeof (hextab);
1412 while (--j && (x != hextab[j])) {
1414 return (j);
1417 static int
1418 CompressEisaID(char *np)
1421 * Compress an EISA device name:
1423 * This routine converts a 7-byte ASCII device name into the 4-byte
1424 * compressed form used by EISA (50 bytes of ROM to save 1 byte of
1425 * NV-RAM!)
1428 union { char octets[4]; int retval; } myu;
1430 myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1431 myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1432 myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1433 myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1435 return (myu.retval);
1438 ACPI_STATUS
1439 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1441 ACPI_STATUS status;
1442 ACPI_BUFFER rb;
1443 ACPI_OBJECT ro;
1445 rb.Pointer = &ro;
1446 rb.Length = sizeof (ro);
1447 if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1448 ACPI_TYPE_INTEGER)) == AE_OK)
1449 *rint = ro.Integer.Value;
1451 return (status);
1454 static int
1455 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1457 ACPI_BUFFER rb;
1458 ACPI_OBJECT *rv;
1460 rb.Pointer = NULL;
1461 rb.Length = ACPI_ALLOCATE_BUFFER;
1462 if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1463 rb.Length != 0) {
1464 rv = rb.Pointer;
1465 if (rv->Type == ACPI_TYPE_INTEGER) {
1466 *rint = rv->Integer.Value;
1467 AcpiOsFree(rv);
1468 return (AE_OK);
1469 } else if (rv->Type == ACPI_TYPE_STRING) {
1470 char *stringData;
1472 /* Convert the string into an EISA ID */
1473 if (rv->String.Pointer == NULL) {
1474 AcpiOsFree(rv);
1475 return (AE_ERROR);
1478 stringData = rv->String.Pointer;
1481 * If the string is an EisaID, it must be 7
1482 * characters; if it's an ACPI ID, it will be 8
1483 * (and we don't care about ACPI ids here).
1485 if (strlen(stringData) != 7) {
1486 AcpiOsFree(rv);
1487 return (AE_ERROR);
1490 *rint = CompressEisaID(stringData);
1491 AcpiOsFree(rv);
1492 return (AE_OK);
1493 } else
1494 AcpiOsFree(rv);
1496 return (AE_ERROR);
1500 * Create linkage between devinfo nodes and ACPI nodes
1502 ACPI_STATUS
1503 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1505 ACPI_STATUS status;
1506 ACPI_BUFFER rb;
1509 * Tag the devinfo node with the ACPI name
1511 rb.Pointer = NULL;
1512 rb.Length = ACPI_ALLOCATE_BUFFER;
1513 status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1514 if (ACPI_FAILURE(status)) {
1515 cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1516 } else {
1517 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1518 "acpi-namespace", (char *)rb.Pointer);
1519 AcpiOsFree(rb.Pointer);
1522 * Tag the ACPI node with the dip
1524 status = acpica_set_devinfo(acpiobj, dip);
1525 ASSERT(ACPI_SUCCESS(status));
1528 return (status);
1532 * Destroy linkage between devinfo nodes and ACPI nodes
1534 ACPI_STATUS
1535 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1537 (void) acpica_unset_devinfo(acpiobj);
1538 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1540 return (AE_OK);
1544 * Return the ACPI device node matching the CPU dev_info node.
1546 ACPI_STATUS
1547 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1549 int i;
1552 * if cpu_map itself is NULL, we're a uppc system and
1553 * acpica_build_processor_map() hasn't been called yet.
1554 * So call it here
1556 if (cpu_map == NULL) {
1557 (void) acpica_build_processor_map();
1558 if (cpu_map == NULL)
1559 return (AE_ERROR);
1562 if (cpu_id < 0) {
1563 return (AE_ERROR);
1567 * search object with cpuid in cpu_map
1569 mutex_enter(&cpu_map_lock);
1570 for (i = 0; i < cpu_map_count; i++) {
1571 if (cpu_map[i]->cpu_id == cpu_id) {
1572 break;
1575 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1576 *rh = cpu_map[i]->obj;
1577 mutex_exit(&cpu_map_lock);
1578 return (AE_OK);
1581 /* Handle special case for uppc-only systems. */
1582 if (cpu_map_called == 0) {
1583 uint32_t apicid = cpuid_get_apicid(CPU);
1584 if (apicid != UINT32_MAX) {
1585 for (i = 0; i < cpu_map_count; i++) {
1586 if (cpu_map[i]->apic_id == apicid) {
1587 break;
1590 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1591 *rh = cpu_map[i]->obj;
1592 mutex_exit(&cpu_map_lock);
1593 return (AE_OK);
1597 mutex_exit(&cpu_map_lock);
1599 return (AE_ERROR);
1603 * Determine if this object is a processor
1605 static ACPI_STATUS
1606 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1608 ACPI_STATUS status;
1609 ACPI_OBJECT_TYPE objtype;
1610 unsigned long acpi_id;
1611 ACPI_BUFFER rb;
1612 ACPI_DEVICE_INFO *di;
1614 if (AcpiGetType(obj, &objtype) != AE_OK)
1615 return (AE_OK);
1617 if (objtype == ACPI_TYPE_PROCESSOR) {
1618 /* process a Processor */
1619 rb.Pointer = NULL;
1620 rb.Length = ACPI_ALLOCATE_BUFFER;
1621 status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1622 ACPI_TYPE_PROCESSOR);
1623 if (status != AE_OK) {
1624 cmn_err(CE_WARN, "!acpica: error probing Processor");
1625 return (status);
1627 acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1628 AcpiOsFree(rb.Pointer);
1629 } else if (objtype == ACPI_TYPE_DEVICE) {
1630 /* process a processor Device */
1631 status = AcpiGetObjectInfo(obj, &di);
1632 if (status != AE_OK) {
1633 cmn_err(CE_WARN,
1634 "!acpica: error probing Processor Device\n");
1635 return (status);
1638 if (!(di->Valid & ACPI_VALID_UID) ||
1639 ddi_strtoul(di->UniqueId.String, NULL, 10, &acpi_id) != 0) {
1640 ACPI_FREE(di);
1641 cmn_err(CE_WARN,
1642 "!acpica: error probing Processor Device _UID\n");
1643 return (AE_ERROR);
1645 ACPI_FREE(di);
1647 (void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1649 return (AE_OK);
1652 void
1653 scan_d2a_map(void)
1655 dev_info_t *dip, *cdip;
1656 ACPI_HANDLE acpiobj;
1657 char *device_type_prop;
1658 int bus;
1659 static int map_error = 0;
1661 if (map_error || (d2a_done != 0))
1662 return;
1664 scanning_d2a_map = 1;
1667 * Find all child-of-root PCI buses, and find their corresponding
1668 * ACPI child-of-root PCI nodes. For each one, add to the
1669 * d2a table.
1672 for (dip = ddi_get_child(ddi_root_node());
1673 dip != NULL;
1674 dip = ddi_get_next_sibling(dip)) {
1676 /* prune non-PCI nodes */
1677 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1678 DDI_PROP_DONTPASS,
1679 "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1680 continue;
1682 if ((strcmp("pci", device_type_prop) != 0) &&
1683 (strcmp("pciex", device_type_prop) != 0)) {
1684 ddi_prop_free(device_type_prop);
1685 continue;
1688 ddi_prop_free(device_type_prop);
1691 * To get bus number of dip, get first child and get its
1692 * bus number. If NULL, just continue, because we don't
1693 * care about bus nodes with no children anyway.
1695 if ((cdip = ddi_get_child(dip)) == NULL)
1696 continue;
1698 if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1699 #ifdef D2ADEBUG
1700 cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1701 #endif
1702 map_error = 1;
1703 scanning_d2a_map = 0;
1704 d2a_done = 1;
1705 return;
1708 if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1709 #ifdef D2ADEBUG
1710 cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1711 #endif
1712 map_error = 1;
1713 continue;
1716 acpica_tag_devinfo(dip, acpiobj);
1718 /* call recursively to enumerate subtrees */
1719 scan_d2a_subtree(dip, acpiobj, bus);
1722 scanning_d2a_map = 0;
1723 d2a_done = 1;
1727 * For all acpi child devices of acpiobj, find their matching
1728 * dip under "dip" argument. (matching means "matches dev/fn").
1729 * bus is assumed to already be a match from caller, and is
1730 * used here only to record in the d2a entry. Recurse if necessary.
1732 static void
1733 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1735 int acpi_devfn, hid;
1736 ACPI_HANDLE acld;
1737 dev_info_t *dcld;
1738 int dcld_b, dcld_d, dcld_f;
1739 int dev, func;
1740 char *device_type_prop;
1742 acld = NULL;
1743 while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1744 == AE_OK) {
1745 /* get the dev/func we're looking for in the devinfo tree */
1746 if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1747 continue;
1748 dev = (acpi_devfn >> 16) & 0xFFFF;
1749 func = acpi_devfn & 0xFFFF;
1751 /* look through all the immediate children of dip */
1752 for (dcld = ddi_get_child(dip); dcld != NULL;
1753 dcld = ddi_get_next_sibling(dcld)) {
1754 if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1755 continue;
1757 /* dev must match; function must match or wildcard */
1758 if (dcld_d != dev ||
1759 (func != 0xFFFF && func != dcld_f))
1760 continue;
1761 bus = dcld_b;
1763 /* found a match, record it */
1764 acpica_tag_devinfo(dcld, acld);
1766 /* if we find a bridge, recurse from here */
1767 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1768 DDI_PROP_DONTPASS, "device_type",
1769 &device_type_prop) == DDI_PROP_SUCCESS) {
1770 if ((strcmp("pci", device_type_prop) == 0) ||
1771 (strcmp("pciex", device_type_prop) == 0))
1772 scan_d2a_subtree(dcld, acld, bus);
1773 ddi_prop_free(device_type_prop);
1776 /* done finding a match, so break now */
1777 break;
1783 * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1786 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1788 pci_regspec_t *pci_rp;
1789 int len;
1791 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1792 "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1793 return (-1);
1795 if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1796 ddi_prop_free(pci_rp);
1797 return (-1);
1799 if (bus != NULL)
1800 *bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1801 if (device != NULL)
1802 *device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1803 if (func != NULL)
1804 *func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1805 ddi_prop_free(pci_rp);
1806 return (0);
1810 * Return the ACPI device node matching this dev_info node, if it
1811 * exists in the ACPI tree.
1813 ACPI_STATUS
1814 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1816 ACPI_STATUS status;
1817 char *acpiname;
1819 #ifdef DEBUG
1820 if (d2a_done == 0)
1821 cmn_err(CE_WARN, "!acpica_get_handle:"
1822 " no ACPI mapping for %s", ddi_node_name(dip));
1823 #endif
1825 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1826 "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1827 return (AE_ERROR);
1830 status = AcpiGetHandle(NULL, acpiname, rh);
1831 ddi_prop_free((void *)acpiname);
1832 return (status);
1838 * Manage OS data attachment to ACPI nodes
1842 * Return the (dev_info_t *) associated with the ACPI node.
1844 ACPI_STATUS
1845 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1847 ACPI_STATUS status;
1848 void *ptr;
1850 status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1851 if (status == AE_OK)
1852 *dipp = (dev_info_t *)ptr;
1854 return (status);
1858 * Set the dev_info_t associated with the ACPI node.
1860 static ACPI_STATUS
1861 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1863 ACPI_STATUS status;
1865 status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1866 return (status);
1870 * Unset the dev_info_t associated with the ACPI node.
1872 static ACPI_STATUS
1873 acpica_unset_devinfo(ACPI_HANDLE obj)
1875 return (AcpiDetachData(obj, acpica_devinfo_handler));
1881 void
1882 acpica_devinfo_handler(ACPI_HANDLE obj, void *data)
1884 /* no-op */
1887 ACPI_STATUS
1888 acpica_build_processor_map(void)
1890 ACPI_STATUS status;
1891 void *rv;
1894 * shouldn't be called more than once anyway
1896 if (cpu_map_built)
1897 return (AE_OK);
1900 * ACPI device configuration driver has built mapping information
1901 * among processor id and object handle, no need to probe again.
1903 if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1904 cpu_map_built = 1;
1905 return (AE_OK);
1909 * Look for Processor objects
1911 status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1912 ACPI_ROOT_OBJECT,
1914 acpica_probe_processor,
1915 NULL,
1916 NULL,
1917 &rv);
1918 ASSERT(status == AE_OK);
1921 * Look for processor Device objects
1923 status = AcpiGetDevices("ACPI0007",
1924 acpica_probe_processor,
1925 NULL,
1926 &rv);
1927 ASSERT(status == AE_OK);
1928 cpu_map_built = 1;
1930 return (status);
1934 * Grow cpu map table on demand.
1936 static void
1937 acpica_grow_cpu_map(void)
1939 if (cpu_map_count == cpu_map_count_max) {
1940 size_t sz;
1941 struct cpu_map_item **new_map;
1943 ASSERT(cpu_map_count_max < INT_MAX / 2);
1944 cpu_map_count_max += max_ncpus;
1945 new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1946 KM_SLEEP);
1947 if (cpu_map_count != 0) {
1948 ASSERT(cpu_map != NULL);
1949 sz = sizeof (cpu_map[0]) * cpu_map_count;
1950 kcopy(cpu_map, new_map, sz);
1951 kmem_free(cpu_map, sz);
1953 cpu_map = new_map;
1958 * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1959 * ACPI handle). The mapping table will be setup in two steps:
1960 * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1961 * processor id and ACPI object handle.
1962 * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1963 * On systems with which have ACPI device configuration for CPUs enabled,
1964 * acpica_map_cpu() will be called after acpica_add_processor_to_map(),
1965 * otherwise acpica_map_cpu() will be called before
1966 * acpica_add_processor_to_map().
1968 ACPI_STATUS
1969 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1971 int i;
1972 ACPI_STATUS rc = AE_OK;
1973 struct cpu_map_item *item = NULL;
1975 ASSERT(obj != NULL);
1976 if (obj == NULL) {
1977 return (AE_ERROR);
1980 mutex_enter(&cpu_map_lock);
1983 * Special case for uppc
1984 * If we're a uppc system and ACPI device configuration for CPU has
1985 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1986 * call acpica_map_cpu(). So create one and use the passed-in processor
1987 * as CPU 0
1988 * Assumption: the first CPU returned by
1989 * AcpiGetDevices/AcpiWalkNamespace will be the BSP.
1990 * Unfortunately there appears to be no good way to ASSERT this.
1992 if (cpu_map == NULL &&
1993 !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1994 acpica_grow_cpu_map();
1995 ASSERT(cpu_map != NULL);
1996 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1997 item->cpu_id = 0;
1998 item->proc_id = acpi_id;
1999 item->apic_id = apic_id;
2000 item->obj = obj;
2001 cpu_map[0] = item;
2002 cpu_map_count = 1;
2003 mutex_exit(&cpu_map_lock);
2004 return (AE_OK);
2007 for (i = 0; i < cpu_map_count; i++) {
2008 if (cpu_map[i]->obj == obj) {
2009 rc = AE_ALREADY_EXISTS;
2010 break;
2011 } else if (cpu_map[i]->proc_id == acpi_id) {
2012 ASSERT(item == NULL);
2013 item = cpu_map[i];
2017 if (rc == AE_OK) {
2018 if (item != NULL) {
2020 * ACPI alias objects may cause more than one objects
2021 * with the same ACPI processor id, only remember the
2022 * the first object encountered.
2024 if (item->obj == NULL) {
2025 item->obj = obj;
2026 item->apic_id = apic_id;
2027 } else {
2028 rc = AE_ALREADY_EXISTS;
2030 } else if (cpu_map_count >= INT_MAX / 2) {
2031 rc = AE_NO_MEMORY;
2032 } else {
2033 acpica_grow_cpu_map();
2034 ASSERT(cpu_map != NULL);
2035 ASSERT(cpu_map_count < cpu_map_count_max);
2036 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2037 item->cpu_id = -1;
2038 item->proc_id = acpi_id;
2039 item->apic_id = apic_id;
2040 item->obj = obj;
2041 cpu_map[cpu_map_count] = item;
2042 cpu_map_count++;
2046 mutex_exit(&cpu_map_lock);
2048 return (rc);
2051 ACPI_STATUS
2052 acpica_remove_processor_from_map(UINT32 acpi_id)
2054 int i;
2055 ACPI_STATUS rc = AE_NOT_EXIST;
2057 mutex_enter(&cpu_map_lock);
2058 for (i = 0; i < cpu_map_count; i++) {
2059 if (cpu_map[i]->proc_id != acpi_id) {
2060 continue;
2062 cpu_map[i]->obj = NULL;
2063 /* Free item if no more reference to it. */
2064 if (cpu_map[i]->cpu_id == -1) {
2065 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2066 cpu_map[i] = NULL;
2067 cpu_map_count--;
2068 if (i != cpu_map_count) {
2069 cpu_map[i] = cpu_map[cpu_map_count];
2070 cpu_map[cpu_map_count] = NULL;
2073 rc = AE_OK;
2074 break;
2076 mutex_exit(&cpu_map_lock);
2078 return (rc);
2081 ACPI_STATUS
2082 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2084 int i;
2085 ACPI_STATUS rc = AE_OK;
2086 struct cpu_map_item *item = NULL;
2088 ASSERT(cpuid != -1);
2089 if (cpuid == -1) {
2090 return (AE_ERROR);
2093 mutex_enter(&cpu_map_lock);
2094 cpu_map_called = 1;
2095 for (i = 0; i < cpu_map_count; i++) {
2096 if (cpu_map[i]->cpu_id == cpuid) {
2097 rc = AE_ALREADY_EXISTS;
2098 break;
2099 } else if (cpu_map[i]->proc_id == acpi_id) {
2100 ASSERT(item == NULL);
2101 item = cpu_map[i];
2104 if (rc == AE_OK) {
2105 if (item != NULL) {
2106 if (item->cpu_id == -1) {
2107 item->cpu_id = cpuid;
2108 } else {
2109 rc = AE_ALREADY_EXISTS;
2111 } else if (cpu_map_count >= INT_MAX / 2) {
2112 rc = AE_NO_MEMORY;
2113 } else {
2114 acpica_grow_cpu_map();
2115 ASSERT(cpu_map != NULL);
2116 ASSERT(cpu_map_count < cpu_map_count_max);
2117 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2118 item->cpu_id = cpuid;
2119 item->proc_id = acpi_id;
2120 item->apic_id = UINT32_MAX;
2121 item->obj = NULL;
2122 cpu_map[cpu_map_count] = item;
2123 cpu_map_count++;
2126 mutex_exit(&cpu_map_lock);
2128 return (rc);
2131 ACPI_STATUS
2132 acpica_unmap_cpu(processorid_t cpuid)
2134 int i;
2135 ACPI_STATUS rc = AE_NOT_EXIST;
2137 ASSERT(cpuid != -1);
2138 if (cpuid == -1) {
2139 return (rc);
2142 mutex_enter(&cpu_map_lock);
2143 for (i = 0; i < cpu_map_count; i++) {
2144 if (cpu_map[i]->cpu_id != cpuid) {
2145 continue;
2147 cpu_map[i]->cpu_id = -1;
2148 /* Free item if no more reference. */
2149 if (cpu_map[i]->obj == NULL) {
2150 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2151 cpu_map[i] = NULL;
2152 cpu_map_count--;
2153 if (i != cpu_map_count) {
2154 cpu_map[i] = cpu_map[cpu_map_count];
2155 cpu_map[cpu_map_count] = NULL;
2158 rc = AE_OK;
2159 break;
2161 mutex_exit(&cpu_map_lock);
2163 return (rc);
2166 ACPI_STATUS
2167 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2169 int i;
2170 ACPI_STATUS rc = AE_NOT_EXIST;
2172 ASSERT(cpuid != -1);
2173 if (cpuid == -1) {
2174 return (rc);
2177 mutex_enter(&cpu_map_lock);
2178 for (i = 0; i < cpu_map_count; i++) {
2179 if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2180 *hdlp = cpu_map[i]->obj;
2181 rc = AE_OK;
2182 break;
2185 mutex_exit(&cpu_map_lock);
2187 return (rc);
2190 ACPI_STATUS
2191 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2193 int i;
2194 ACPI_STATUS rc = AE_NOT_EXIST;
2196 mutex_enter(&cpu_map_lock);
2197 for (i = 0; i < cpu_map_count; i++) {
2198 if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2199 *hdlp = cpu_map[i]->obj;
2200 rc = AE_OK;
2201 break;
2204 mutex_exit(&cpu_map_lock);
2206 return (rc);
2209 ACPI_STATUS
2210 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2212 int i;
2213 ACPI_STATUS rc = AE_NOT_EXIST;
2215 ASSERT(apicid != UINT32_MAX);
2216 if (apicid == UINT32_MAX) {
2217 return (rc);
2220 mutex_enter(&cpu_map_lock);
2221 for (i = 0; i < cpu_map_count; i++) {
2222 if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2223 *hdlp = cpu_map[i]->obj;
2224 rc = AE_OK;
2225 break;
2228 mutex_exit(&cpu_map_lock);
2230 return (rc);
2233 ACPI_STATUS
2234 acpica_get_cpu_id_by_object(ACPI_HANDLE hdl, processorid_t *cpuidp)
2236 int i;
2237 ACPI_STATUS rc = AE_NOT_EXIST;
2239 ASSERT(cpuidp != NULL);
2240 if (hdl == NULL || cpuidp == NULL) {
2241 return (rc);
2244 *cpuidp = -1;
2245 mutex_enter(&cpu_map_lock);
2246 for (i = 0; i < cpu_map_count; i++) {
2247 if (cpu_map[i]->obj == hdl && cpu_map[i]->cpu_id != -1) {
2248 *cpuidp = cpu_map[i]->cpu_id;
2249 rc = AE_OK;
2250 break;
2253 mutex_exit(&cpu_map_lock);
2255 return (rc);
2258 ACPI_STATUS
2259 acpica_get_apicid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2261 int i;
2262 ACPI_STATUS rc = AE_NOT_EXIST;
2264 ASSERT(rp != NULL);
2265 if (hdl == NULL || rp == NULL) {
2266 return (rc);
2269 *rp = UINT32_MAX;
2270 mutex_enter(&cpu_map_lock);
2271 for (i = 0; i < cpu_map_count; i++) {
2272 if (cpu_map[i]->obj == hdl &&
2273 cpu_map[i]->apic_id != UINT32_MAX) {
2274 *rp = cpu_map[i]->apic_id;
2275 rc = AE_OK;
2276 break;
2279 mutex_exit(&cpu_map_lock);
2281 return (rc);
2284 ACPI_STATUS
2285 acpica_get_procid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2287 int i;
2288 ACPI_STATUS rc = AE_NOT_EXIST;
2290 ASSERT(rp != NULL);
2291 if (hdl == NULL || rp == NULL) {
2292 return (rc);
2295 *rp = UINT32_MAX;
2296 mutex_enter(&cpu_map_lock);
2297 for (i = 0; i < cpu_map_count; i++) {
2298 if (cpu_map[i]->obj == hdl) {
2299 *rp = cpu_map[i]->proc_id;
2300 rc = AE_OK;
2301 break;
2304 mutex_exit(&cpu_map_lock);
2306 return (rc);
2309 void
2310 acpica_set_core_feature(uint64_t features)
2312 atomic_or_64(&acpica_core_features, features);
2315 void
2316 acpica_clear_core_feature(uint64_t features)
2318 atomic_and_64(&acpica_core_features, ~features);
2321 uint64_t
2322 acpica_get_core_feature(uint64_t features)
2324 return (acpica_core_features & features);
2327 void
2328 acpica_set_devcfg_feature(uint64_t features)
2330 atomic_or_64(&acpica_devcfg_features, features);
2333 void
2334 acpica_clear_devcfg_feature(uint64_t features)
2336 atomic_and_64(&acpica_devcfg_features, ~features);
2339 uint64_t
2340 acpica_get_devcfg_feature(uint64_t features)
2342 return (acpica_devcfg_features & features);
2345 void
2346 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2348 *gbl_FADT = &AcpiGbl_FADT;
2351 void
2352 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2354 if (pstates && AcpiGbl_FADT.PstateControl != 0)
2355 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2356 AcpiGbl_FADT.PstateControl);
2358 if (cstates && AcpiGbl_FADT.CstControl != 0)
2359 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2360 AcpiGbl_FADT.CstControl);
2363 uint32_t
2364 acpi_strtoul(const char *str, char **ep, int base)
2366 ulong_t v;
2368 if (ddi_strtoul(str, ep, base, &v) != 0 || v > ACPI_UINT32_MAX) {
2369 return (ACPI_UINT32_MAX);
2372 return ((uint32_t)v);