4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2016 Nexenta Systems, Inc.
24 * Copyright (c) 2017 by Delphix. All rights reserved.
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
32 * PSMI 1.1 extensions are supported only in 2.6 and later versions.
33 * PSMI 1.2 extensions are supported only in 2.7 and later versions.
34 * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
35 * PSMI 1.5 extensions are supported in Solaris Nevada.
36 * PSMI 1.6 extensions are supported in Solaris Nevada.
37 * PSMI 1.7 extensions are supported in Solaris Nevada.
41 #include <sys/processor.h>
44 #include <sys/smp_impldefs.h>
46 #include <sys/acpi/acpi.h>
47 #include <sys/acpica.h>
48 #include <sys/psm_common.h>
50 #include <sys/apic_timer.h>
53 #include <sys/sunddi.h>
54 #include <sys/ddi_impldefs.h>
56 #include <sys/promif.h>
57 #include <sys/x86_archext.h>
58 #include <sys/cpc_impl.h>
59 #include <sys/uadmin.h>
60 #include <sys/panic.h>
61 #include <sys/debug.h>
62 #include <sys/archsystm.h>
64 #include <sys/machsystm.h>
65 #include <sys/cpuvar.h>
66 #include <sys/rm_platter.h>
67 #include <sys/privregs.h>
68 #include <sys/cyclic.h>
70 #include <sys/pci_intr_lib.h>
71 #include <sys/sunndi.h>
73 #include <sys/clock.h>
76 * Local Function Prototypes
78 static int apic_handle_defconf();
79 static int apic_parse_mpct(caddr_t mpct
, int bypass
);
80 static struct apic_mpfps_hdr
*apic_find_fps_sig(caddr_t fptr
, int size
);
81 static int apic_checksum(caddr_t bptr
, int len
);
82 static int apic_find_bus_type(char *bus
);
83 static int apic_find_bus(int busid
);
84 static struct apic_io_intr
*apic_find_io_intr(int irqno
);
85 static int apic_find_free_irq(int start
, int end
);
86 struct apic_io_intr
*apic_find_io_intr_w_busid(int irqno
, int busid
);
87 static void apic_set_pwroff_method_from_mpcnfhdr(struct apic_mp_cnf_hdr
*hdrp
);
88 static void apic_free_apic_cpus(void);
89 static boolean_t
apic_is_ioapic_AMD_813x(uint32_t physaddr
);
90 static int apic_acpi_enter_apicmode(void);
92 int apic_handle_pci_pci_bridge(dev_info_t
*idip
, int child_devno
,
93 int child_ipin
, struct apic_io_intr
**intrp
);
94 int apic_find_bus_id(int bustype
);
95 int apic_find_intin(uchar_t ioapic
, uchar_t intin
);
96 void apic_record_rdt_entry(apic_irq_t
*irqptr
, int irq
);
98 int apic_debug_mps_id
= 0; /* 1 - print MPS ID strings */
100 /* ACPI SCI interrupt configuration; -1 if SCI not used */
101 int apic_sci_vect
= -1;
102 iflag_t apic_sci_flags
;
104 /* ACPI HPET interrupt configuration; -1 if HPET not used */
105 int apic_hpet_vect
= -1;
106 iflag_t apic_hpet_flags
;
113 /* ACPI support routines */
114 static int acpi_probe(char *);
115 static int apic_acpi_irq_configure(acpi_psm_lnk_t
*acpipsmlnkp
, dev_info_t
*dip
,
116 int *pci_irqp
, iflag_t
*intr_flagp
);
118 int apic_acpi_translate_pci_irq(dev_info_t
*dip
, int busid
, int devid
,
119 int ipin
, int *pci_irqp
, iflag_t
*intr_flagp
);
120 uchar_t
acpi_find_ioapic(int irq
);
121 static int acpi_intr_compatible(iflag_t iflag1
, iflag_t iflag2
);
123 /* Max wait time (in repetitions) for flags to clear in an RDT entry. */
124 int apic_max_reps_clear_pending
= 1000;
126 int apic_intr_policy
= INTR_ROUND_ROBIN
;
128 int apic_next_bind_cpu
= 1; /* For round robin assignment */
129 /* start with cpu 1 */
132 * If enabled, the distribution works as follows:
133 * On every interrupt entry, the current ipl for the CPU is set in cpu_info
134 * and the irq corresponding to the ipl is also set in the aci_current array.
135 * interrupt exit and setspl (due to soft interrupts) will cause the current
136 * ipl to be be changed. This is cache friendly as these frequently used
137 * paths write into a per cpu structure.
139 * Sampling is done by checking the structures for all CPUs and incrementing
140 * the busy field of the irq (if any) executing on each CPU and the busy field
141 * of the corresponding CPU.
142 * In periodic mode this is done on every clock interrupt.
143 * In one-shot mode, this is done thru a cyclic with an interval of
144 * apic_redistribute_sample_interval (default 10 milli sec).
146 * Every apic_sample_factor_redistribution times we sample, we do computations
147 * to decide which interrupt needs to be migrated (see comments
148 * before apic_intr_redistribute().
152 * Following 3 variables start as % and can be patched or set using an
153 * API to be defined in future. They will be scaled to
154 * sample_factor_redistribution which is in turn set to hertz+1 (in periodic
155 * mode), or 101 in one-shot mode to stagger it away from one sec processing
158 int apic_int_busy_mark
= 60;
159 int apic_int_free_mark
= 20;
160 int apic_diff_for_redistribution
= 10;
162 /* sampling interval for interrupt redistribution for dynamic migration */
163 int apic_redistribute_sample_interval
= NANOSEC
/ 100; /* 10 millisec */
166 * number of times we sample before deciding to redistribute interrupts
167 * for dynamic migration
169 int apic_sample_factor_redistribution
= 101;
171 int apic_redist_cpu_skip
= 0;
172 int apic_num_imbalance
= 0;
173 int apic_num_rebind
= 0;
176 * Maximum number of APIC CPUs in the system, -1 indicates that dynamic
177 * allocation of CPU ids is disabled.
179 int apic_max_nproc
= -1;
181 size_t apic_cpus_size
= 0;
182 int apic_defconf
= 0;
183 int apic_irq_translate
= 0;
184 int apic_spec_rev
= 0;
187 int apic_use_acpi
= 1; /* 1 = use ACPI, 0 = don't use ACPI */
188 int apic_use_acpi_madt_only
= 0; /* 1=ONLY use MADT from ACPI */
191 * For interrupt link devices, if apic_unconditional_srs is set, an irq resource
192 * will be assigned (via _SRS). If it is not set, use the current
193 * irq setting (via _CRS), but only if that irq is in the set of possible
194 * irqs (returned by _PRS) for the device.
196 int apic_unconditional_srs
= 1;
199 * For interrupt link devices, if apic_prefer_crs is set when we are
200 * assigning an IRQ resource to a device, prefer the current IRQ setting
201 * over other possible irq settings under same conditions.
204 int apic_prefer_crs
= 1;
206 uchar_t apic_io_id
[MAX_IO_APIC
];
207 volatile uint32_t *apicioadr
[MAX_IO_APIC
];
208 uchar_t apic_io_ver
[MAX_IO_APIC
];
209 uchar_t apic_io_vectbase
[MAX_IO_APIC
];
210 uchar_t apic_io_vectend
[MAX_IO_APIC
];
211 uchar_t apic_reserved_irqlist
[MAX_ISA_IRQ
+ 1];
212 uint32_t apic_physaddr
[MAX_IO_APIC
];
214 boolean_t ioapic_mask_workaround
[MAX_IO_APIC
];
217 * First available slot to be used as IRQ index into the apic_irq_table
218 * for those interrupts (like MSI/X) that don't have a physical IRQ.
220 int apic_first_avail_irq
= APIC_FIRST_FREE_IRQ
;
223 * apic_ioapic_lock protects the ioapics (reg select), the status, temp_bound
224 * and bound elements of cpus_info and the temp_cpu element of irq_struct
226 lock_t apic_ioapic_lock
;
228 int apic_io_max
= 0; /* no. of i/o apics enabled */
230 struct apic_io_intr
*apic_io_intrp
= NULL
;
231 static struct apic_bus
*apic_busp
;
233 uchar_t apic_resv_vector
[MAXIPL
+1];
235 char apic_level_intr
[APIC_MAX_VECTOR
+1];
237 uint32_t eisa_level_intr_mask
= 0;
238 /* At least MSB will be set if EISA bus */
240 int apic_pci_bus_total
= 0;
241 uchar_t apic_single_pci_busid
= 0;
244 * airq_mutex protects additions to the apic_irq_table - the first
245 * pointer and any airq_nexts off of that one. It also protects
246 * apic_max_device_irq & apic_min_device_irq. It also guarantees
247 * that share_id is unique as new ids are generated only when new
248 * irq_t structs are linked in. Once linked in the structs are never
249 * deleted. temp_cpu & mps_intr_index field indicate if it is programmed
250 * or allocated. Note that there is a slight gap between allocating in
251 * apic_introp_xlate and programming in addspl.
254 apic_irq_t
*apic_irq_table
[APIC_MAX_VECTOR
+1];
255 int apic_max_device_irq
= 0;
256 int apic_min_device_irq
= APIC_MAX_VECTOR
;
258 typedef struct prs_irq_list_ent
{
262 acpi_prs_private_t prsprv
;
263 struct prs_irq_list_ent
*next
;
270 /* 1 = acpi is enabled & working, 0 = acpi is not enabled or not there */
271 int apic_enable_acpi
= 0;
273 /* ACPI Multiple APIC Description Table ptr */
274 static ACPI_TABLE_MADT
*acpi_mapic_dtp
= NULL
;
276 /* ACPI Interrupt Source Override Structure ptr */
277 ACPI_MADT_INTERRUPT_OVERRIDE
*acpi_isop
= NULL
;
278 int acpi_iso_cnt
= 0;
280 /* ACPI Non-maskable Interrupt Sources ptr */
281 static ACPI_MADT_NMI_SOURCE
*acpi_nmi_sp
= NULL
;
282 static int acpi_nmi_scnt
= 0;
283 static ACPI_MADT_LOCAL_APIC_NMI
*acpi_nmi_cp
= NULL
;
284 static int acpi_nmi_ccnt
= 0;
286 static boolean_t acpi_found_smp_config
= B_FALSE
;
289 * The following added to identify a software poweroff method if available.
294 char oem_id
[APIC_MPS_OEM_ID_LEN
+ 1]; /* MAX + 1 for NULL */
295 char prod_id
[APIC_MPS_PROD_ID_LEN
+ 1]; /* MAX + 1 for NULL */
297 { APIC_POWEROFF_VIA_RTC
, "INTEL", "ALDER" }, /* 4300 */
298 { APIC_POWEROFF_VIA_RTC
, "NCR", "AMC" }, /* 4300 */
299 { APIC_POWEROFF_VIA_ASPEN_BMC
, "INTEL", "A450NX" }, /* 4400? */
300 { APIC_POWEROFF_VIA_ASPEN_BMC
, "INTEL", "AD450NX" }, /* 4400 */
301 { APIC_POWEROFF_VIA_ASPEN_BMC
, "INTEL", "AC450NX" }, /* 4400R */
302 { APIC_POWEROFF_VIA_SITKA_BMC
, "INTEL", "S450NX" }, /* S50 */
303 { APIC_POWEROFF_VIA_SITKA_BMC
, "INTEL", "SC450NX" } /* S50? */
306 int apic_poweroff_method
= APIC_POWEROFF_NONE
;
309 * Auto-configuration routines
313 * Look at MPSpec 1.4 (Intel Order # 242016-005) for details of what we do here
314 * May work with 1.1 - but not guaranteed.
315 * According to the MP Spec, the MP floating pointer structure
316 * will be searched in the order described below:
317 * 1. In the first kilobyte of Extended BIOS Data Area (EBDA)
318 * 2. Within the last kilobyte of system base memory
319 * 3. In the BIOS ROM address space between 0F0000h and 0FFFFh
320 * Once we find the right signature with proper checksum, we call
321 * either handle_defconf or parse_mpct to get all info necessary for
322 * subsequent operations.
325 apic_probe_common(char *modname
)
327 uint32_t mpct_addr
, ebda_start
= 0, base_mem_end
;
331 int i
, mpct_size
, mapsize
, retval
= PSM_FAILURE
;
332 ushort_t ebda_seg
, base_mem_size
;
333 struct apic_mpfps_hdr
*fpsp
;
334 struct apic_mp_cnf_hdr
*hdrp
;
335 int bypass_cpu_and_ioapics_in_mptables
;
336 int acpi_user_options
;
338 if (apic_forceload
< 0)
342 * Remember who we are
346 /* Allow override for MADT-only mode */
347 acpi_user_options
= ddi_prop_get_int(DDI_DEV_T_ANY
, ddi_root_node(), 0,
348 "acpi-user-options", 0);
349 apic_use_acpi_madt_only
= ((acpi_user_options
& ACPI_OUSER_MADT
) != 0);
351 /* Allow apic_use_acpi to override MADT-only mode */
353 apic_use_acpi_madt_only
= 0;
355 retval
= acpi_probe(modname
);
358 * mapin the bios data area 40:0
359 * 40:13h - two-byte location reports the base memory size
360 * 40:0Eh - two-byte location for the exact starting address of
361 * the EBDA segment for EISA
363 biosdatap
= psm_map_phys(0x400, 0x20, PROT_READ
);
367 mapsize
= MPFPS_RAM_WIN_LEN
;
368 /*LINTED: pointer cast may result in improper alignment */
369 ebda_seg
= *((ushort_t
*)(biosdatap
+0xe));
370 /* check the 1k of EBDA */
372 ebda_start
= ((uint32_t)ebda_seg
) << 4;
373 fptr
= psm_map_phys(ebda_start
, MPFPS_RAM_WIN_LEN
, PROT_READ
);
376 apic_find_fps_sig(fptr
, MPFPS_RAM_WIN_LEN
)))
377 psm_unmap_phys(fptr
, MPFPS_RAM_WIN_LEN
);
380 /* If not in EBDA, check the last k of system base memory */
382 /*LINTED: pointer cast may result in improper alignment */
383 base_mem_size
= *((ushort_t
*)(biosdatap
+ 0x13));
385 if (base_mem_size
> 512)
386 base_mem_end
= 639 * 1024;
388 base_mem_end
= 511 * 1024;
389 /* if ebda == last k of base mem, skip to check BIOS ROM */
390 if (base_mem_end
!= ebda_start
) {
392 fptr
= psm_map_phys(base_mem_end
, MPFPS_RAM_WIN_LEN
,
396 if (!(fpsp
= apic_find_fps_sig(fptr
,
398 psm_unmap_phys(fptr
, MPFPS_RAM_WIN_LEN
);
402 psm_unmap_phys(biosdatap
, 0x20);
404 /* If still cannot find it, check the BIOS ROM space */
406 mapsize
= MPFPS_ROM_WIN_LEN
;
407 fptr
= psm_map_phys(MPFPS_ROM_WIN_START
,
408 MPFPS_ROM_WIN_LEN
, PROT_READ
);
411 apic_find_fps_sig(fptr
, MPFPS_ROM_WIN_LEN
))) {
412 psm_unmap_phys(fptr
, MPFPS_ROM_WIN_LEN
);
418 if (apic_checksum((caddr_t
)fpsp
, fpsp
->mpfps_length
* 16) != 0) {
419 psm_unmap_phys(fptr
, MPFPS_ROM_WIN_LEN
);
423 apic_spec_rev
= fpsp
->mpfps_spec_rev
;
424 if ((apic_spec_rev
!= 04) && (apic_spec_rev
!= 01)) {
425 psm_unmap_phys(fptr
, MPFPS_ROM_WIN_LEN
);
429 /* check IMCR is present or not */
430 apic_imcrp
= fpsp
->mpfps_featinfo2
& MPFPS_FEATINFO2_IMCRP
;
432 /* check default configuration (dual CPUs) */
433 if ((apic_defconf
= fpsp
->mpfps_featinfo1
) != 0) {
434 psm_unmap_phys(fptr
, mapsize
);
435 if ((retval
= apic_handle_defconf()) != PSM_SUCCESS
)
441 /* MP Configuration Table */
442 mpct_addr
= (uint32_t)(fpsp
->mpfps_mpct_paddr
);
444 psm_unmap_phys(fptr
, mapsize
); /* unmap floating ptr struct */
447 * Map in enough memory for the MP Configuration Table Header.
448 * Use this table to read the total length of the BIOS data and
449 * map in all the info
451 /*LINTED: pointer cast may result in improper alignment */
452 hdrp
= (struct apic_mp_cnf_hdr
*)psm_map_phys(mpct_addr
,
453 sizeof (struct apic_mp_cnf_hdr
), PROT_READ
);
457 /* check mp configuration table signature PCMP */
458 if (hdrp
->mpcnf_sig
!= 0x504d4350) {
459 psm_unmap_phys((caddr_t
)hdrp
, sizeof (struct apic_mp_cnf_hdr
));
462 mpct_size
= (int)hdrp
->mpcnf_tbl_length
;
464 apic_set_pwroff_method_from_mpcnfhdr(hdrp
);
466 psm_unmap_phys((caddr_t
)hdrp
, sizeof (struct apic_mp_cnf_hdr
));
468 if ((retval
== PSM_SUCCESS
) && !apic_use_acpi_madt_only
) {
469 /* This is an ACPI machine No need for further checks */
474 * Map in the entries for this machine, ie. Processor
475 * Entry Tables, Bus Entry Tables, etc.
476 * They are in fixed order following one another
478 mpct
= psm_map_phys(mpct_addr
, mpct_size
, PROT_READ
);
482 if (apic_checksum(mpct
, mpct_size
) != 0)
485 /*LINTED: pointer cast may result in improper alignment */
486 hdrp
= (struct apic_mp_cnf_hdr
*)mpct
;
487 apicadr
= (uint32_t *)mapin_apic((uint32_t)hdrp
->mpcnf_local_apic
,
488 APIC_LOCAL_MEMLEN
, PROT_READ
| PROT_WRITE
);
492 /* Parse all information in the tables */
493 bypass_cpu_and_ioapics_in_mptables
= (retval
== PSM_SUCCESS
);
494 if (apic_parse_mpct(mpct
, bypass_cpu_and_ioapics_in_mptables
) ==
496 retval
= PSM_SUCCESS
;
501 psm_unmap_phys(mpct
, mpct_size
);
505 if (retval
== PSM_SUCCESS
) {
506 extern int apic_ioapic_method_probe();
508 if ((retval
= apic_ioapic_method_probe()) == PSM_SUCCESS
)
509 return (PSM_SUCCESS
);
512 for (i
= 0; i
< apic_io_max
; i
++)
513 mapout_ioapic((caddr_t
)apicioadr
[i
], APIC_IO_MEMLEN
);
515 kmem_free(apic_cpus
, apic_cpus_size
);
519 mapout_apic((caddr_t
)apicadr
, APIC_LOCAL_MEMLEN
);
523 psm_unmap_phys(mpct
, mpct_size
);
529 apic_set_pwroff_method_from_mpcnfhdr(struct apic_mp_cnf_hdr
*hdrp
)
533 for (i
= 0; i
< (sizeof (apic_mps_ids
) / sizeof (apic_mps_ids
[0]));
535 if ((strncmp(hdrp
->mpcnf_oem_str
, apic_mps_ids
[i
].oem_id
,
536 strlen(apic_mps_ids
[i
].oem_id
)) == 0) &&
537 (strncmp(hdrp
->mpcnf_prod_str
, apic_mps_ids
[i
].prod_id
,
538 strlen(apic_mps_ids
[i
].prod_id
)) == 0)) {
540 apic_poweroff_method
= apic_mps_ids
[i
].poweroff_method
;
545 if (apic_debug_mps_id
!= 0) {
546 cmn_err(CE_CONT
, "%s: MPS OEM ID = '%c%c%c%c%c%c%c%c'"
547 "Product ID = '%c%c%c%c%c%c%c%c%c%c%c%c'\n",
549 hdrp
->mpcnf_oem_str
[0],
550 hdrp
->mpcnf_oem_str
[1],
551 hdrp
->mpcnf_oem_str
[2],
552 hdrp
->mpcnf_oem_str
[3],
553 hdrp
->mpcnf_oem_str
[4],
554 hdrp
->mpcnf_oem_str
[5],
555 hdrp
->mpcnf_oem_str
[6],
556 hdrp
->mpcnf_oem_str
[7],
557 hdrp
->mpcnf_prod_str
[0],
558 hdrp
->mpcnf_prod_str
[1],
559 hdrp
->mpcnf_prod_str
[2],
560 hdrp
->mpcnf_prod_str
[3],
561 hdrp
->mpcnf_prod_str
[4],
562 hdrp
->mpcnf_prod_str
[5],
563 hdrp
->mpcnf_prod_str
[6],
564 hdrp
->mpcnf_prod_str
[7],
565 hdrp
->mpcnf_prod_str
[8],
566 hdrp
->mpcnf_prod_str
[9],
567 hdrp
->mpcnf_prod_str
[10],
568 hdrp
->mpcnf_prod_str
[11]);
573 apic_free_apic_cpus(void)
575 if (apic_cpus
!= NULL
) {
576 kmem_free(apic_cpus
, apic_cpus_size
);
583 acpi_probe(char *modname
)
585 int i
, intmax
, index
;
587 int acpi_verboseflags
= 0;
588 int madt_seen
, madt_size
;
589 ACPI_SUBTABLE_HEADER
*ap
;
590 ACPI_MADT_LOCAL_APIC
*mpa
;
591 ACPI_MADT_LOCAL_X2APIC
*mpx2a
;
592 ACPI_MADT_IO_APIC
*mia
;
593 ACPI_MADT_IO_SAPIC
*misa
;
594 ACPI_MADT_INTERRUPT_OVERRIDE
*mio
;
595 ACPI_MADT_NMI_SOURCE
*mns
;
596 ACPI_MADT_INTERRUPT_SOURCE
*mis
;
597 ACPI_MADT_LOCAL_APIC_NMI
*mlan
;
598 ACPI_MADT_LOCAL_X2APIC_NMI
*mx2alan
;
599 ACPI_MADT_LOCAL_APIC_OVERRIDE
*mao
;
602 volatile uint32_t *ioapic
;
610 return (PSM_FAILURE
);
612 if (AcpiGetTable(ACPI_SIG_MADT
, 1,
613 (ACPI_TABLE_HEADER
**) &acpi_mapic_dtp
) != AE_OK
) {
614 cmn_err(CE_WARN
, "!acpi_probe: No MADT found!");
615 return (PSM_FAILURE
);
618 apicadr
= mapin_apic((uint32_t)acpi_mapic_dtp
->Address
,
619 APIC_LOCAL_MEMLEN
, PROT_READ
| PROT_WRITE
);
621 return (PSM_FAILURE
);
623 if ((local_ids
= kmem_zalloc(NCPU
* sizeof (uint32_t),
624 KM_NOSLEEP
)) == NULL
)
625 return (PSM_FAILURE
);
627 if ((proc_ids
= kmem_zalloc(NCPU
* sizeof (uint32_t),
628 KM_NOSLEEP
)) == NULL
) {
629 kmem_free(local_ids
, NCPU
* sizeof (uint32_t));
630 return (PSM_FAILURE
);
633 id
= apic_reg_ops
->apic_read(APIC_LID_REG
);
634 local_ids
[0] = (uchar_t
)(id
>> 24);
635 apic_nproc
= index
= 1;
638 ap
= (ACPI_SUBTABLE_HEADER
*) (acpi_mapic_dtp
+ 1);
639 madt_size
= acpi_mapic_dtp
->Header
.Length
;
640 madt_seen
= sizeof (*acpi_mapic_dtp
);
642 while (madt_seen
< madt_size
) {
644 case ACPI_MADT_TYPE_LOCAL_APIC
:
645 mpa
= (ACPI_MADT_LOCAL_APIC
*) ap
;
646 if (mpa
->LapicFlags
& ACPI_MADT_ENABLED
) {
647 if (mpa
->Id
== 255) {
648 cmn_err(CE_WARN
, "!%s: encountered "
649 "invalid entry in MADT: CPU %d "
650 "has Local APIC Id equal to 255 ",
651 psm_name
, mpa
->ProcessorId
);
653 if (mpa
->Id
== local_ids
[0]) {
655 proc_ids
[0] = mpa
->ProcessorId
;
656 } else if (apic_nproc
< NCPU
&& use_mp
&&
657 apic_nproc
< boot_ncpus
) {
658 local_ids
[index
] = mpa
->Id
;
659 proc_ids
[index
] = mpa
->ProcessorId
;
662 } else if (apic_nproc
== NCPU
&& !warned
) {
663 cmn_err(CE_WARN
, "%s: CPU limit "
665 #if !defined(__amd64)
668 "; Solaris will use %d CPUs.",
675 case ACPI_MADT_TYPE_IO_APIC
:
676 mia
= (ACPI_MADT_IO_APIC
*) ap
;
677 if (apic_io_max
< MAX_IO_APIC
) {
678 ioapic_ix
= apic_io_max
;
679 apic_io_id
[apic_io_max
] = mia
->Id
;
680 apic_io_vectbase
[apic_io_max
] =
682 apic_physaddr
[apic_io_max
] =
683 (uint32_t)mia
->Address
;
684 ioapic
= apicioadr
[apic_io_max
] =
685 mapin_ioapic((uint32_t)mia
->Address
,
686 APIC_IO_MEMLEN
, PROT_READ
| PROT_WRITE
);
689 ioapic_mask_workaround
[apic_io_max
] =
690 apic_is_ioapic_AMD_813x(mia
->Address
);
695 case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE
:
696 mio
= (ACPI_MADT_INTERRUPT_OVERRIDE
*) ap
;
697 if (acpi_isop
== NULL
)
702 case ACPI_MADT_TYPE_NMI_SOURCE
:
704 mns
= (ACPI_MADT_NMI_SOURCE
*) ap
;
705 if (acpi_nmi_sp
== NULL
)
709 cmn_err(CE_NOTE
, "!apic: nmi source: %d 0x%x\n",
710 mns
->GlobalIrq
, mns
->IntiFlags
);
713 case ACPI_MADT_TYPE_LOCAL_APIC_NMI
:
715 mlan
= (ACPI_MADT_LOCAL_APIC_NMI
*) ap
;
716 if (acpi_nmi_cp
== NULL
)
720 cmn_err(CE_NOTE
, "!apic: local nmi: %d 0x%x %d\n",
721 mlan
->ProcessorId
, mlan
->IntiFlags
,
725 case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE
:
727 mao
= (ACPI_MADT_LOCAL_APIC_OVERRIDE
*) ap
;
728 cmn_err(CE_NOTE
, "!apic: address override: %lx\n",
732 case ACPI_MADT_TYPE_IO_SAPIC
:
734 misa
= (ACPI_MADT_IO_SAPIC
*) ap
;
736 cmn_err(CE_NOTE
, "!apic: io sapic: %d %d %lx\n",
737 misa
->Id
, misa
->GlobalIrqBase
,
738 (long)misa
->Address
);
741 case ACPI_MADT_TYPE_INTERRUPT_SOURCE
:
743 mis
= (ACPI_MADT_INTERRUPT_SOURCE
*) ap
;
746 "!apic: irq source: %d %d %d 0x%x %d %d\n",
747 mis
->Id
, mis
->Eid
, mis
->GlobalIrq
,
748 mis
->IntiFlags
, mis
->Type
,
752 case ACPI_MADT_TYPE_LOCAL_X2APIC
:
753 mpx2a
= (ACPI_MADT_LOCAL_X2APIC
*) ap
;
756 * All logical processors with APIC ID values
757 * of 255 and greater will have their APIC
758 * reported through Processor X2APIC structure.
759 * All logical processors with APIC ID less than
760 * 255 will have their APIC reported through
761 * Processor Local APIC.
763 * Some systems apparently don't care and report all
764 * processors through Processor X2APIC structures. We
765 * warn about that but don't ignore those CPUs.
767 if (mpx2a
->LocalApicId
< 255) {
768 cmn_err(CE_WARN
, "!%s: ignoring invalid entry "
769 "in MADT: CPU %d has X2APIC Id %d (< 255)",
770 psm_name
, mpx2a
->Uid
, mpx2a
->LocalApicId
);
772 if (mpx2a
->LapicFlags
& ACPI_MADT_ENABLED
) {
773 if (mpx2a
->LocalApicId
== local_ids
[0]) {
775 proc_ids
[0] = mpx2a
->Uid
;
776 } else if (apic_nproc
< NCPU
&& use_mp
&&
777 apic_nproc
< boot_ncpus
) {
778 local_ids
[index
] = mpx2a
->LocalApicId
;
779 proc_ids
[index
] = mpx2a
->Uid
;
782 } else if (apic_nproc
== NCPU
&& !warned
) {
783 cmn_err(CE_WARN
, "%s: CPU limit "
785 #if !defined(__amd64)
788 "; Solaris will use %d CPUs.",
796 case ACPI_MADT_TYPE_LOCAL_X2APIC_NMI
:
798 mx2alan
= (ACPI_MADT_LOCAL_X2APIC_NMI
*) ap
;
799 if (mx2alan
->Uid
>> 8)
804 "!apic: local x2apic nmi: %d 0x%x %d\n",
805 mx2alan
->Uid
, mx2alan
->IntiFlags
, mx2alan
->Lint
);
810 case ACPI_MADT_TYPE_RESERVED
:
815 /* advance to next entry */
816 madt_seen
+= ap
->Length
;
817 ap
= (ACPI_SUBTABLE_HEADER
*)(((char *)ap
) + ap
->Length
);
820 /* We found multiple enabled cpus via MADT */
821 if ((apic_nproc
> 1) && (apic_io_max
> 0)) {
822 acpi_found_smp_config
= B_TRUE
;
824 "!apic: Using ACPI (MADT) for SMP configuration");
828 * allocate enough space for possible hot-adding of CPUs.
829 * max_ncpus may be less than apic_nproc if it's set by user.
831 if (plat_dr_support_cpu()) {
832 apic_max_nproc
= max_ncpus
;
834 apic_cpus_size
= max(apic_nproc
, max_ncpus
) * sizeof (*apic_cpus
);
835 if ((apic_cpus
= kmem_zalloc(apic_cpus_size
, KM_NOSLEEP
)) == NULL
)
839 * ACPI doesn't provide the local apic ver, get it directly from the
842 ver
= apic_reg_ops
->apic_read(APIC_VERS_REG
);
843 for (i
= 0; i
< apic_nproc
; i
++) {
844 apic_cpus
[i
].aci_local_id
= local_ids
[i
];
845 apic_cpus
[i
].aci_local_ver
= (uchar_t
)(ver
& 0xFF);
846 apic_cpus
[i
].aci_processor_id
= proc_ids
[i
];
847 /* Only build mapping info for CPUs present at boot. */
849 (void) acpica_map_cpu(i
, proc_ids
[i
]);
853 * To support CPU dynamic reconfiguration, the apic CPU info structure
854 * for each possible CPU will be pre-allocated at boot time.
855 * The state for each apic CPU info structure will be assigned according
856 * to the following rules:
858 * Slot index range: [0, min(apic_nproc, boot_ncpus))
860 * Note: cpu exists and will be configured/enabled at boot time
862 * Slot index range: [boot_ncpus, apic_nproc)
863 * State flags: APIC_CPU_FREE | APIC_CPU_DIRTY
864 * Note: cpu exists but won't be configured/enabled at boot time
866 * Slot index range: [apic_nproc, boot_ncpus)
867 * State flags: APIC_CPU_FREE
868 * Note: cpu doesn't exist at boot time
870 * Slot index range: [max(apic_nproc, boot_ncpus), max_ncpus)
871 * State flags: APIC_CPU_FREE
872 * Note: cpu doesn't exist at boot time
874 CPUSET_ZERO(apic_cpumask
);
875 for (i
= 0; i
< min(boot_ncpus
, apic_nproc
); i
++) {
876 CPUSET_ADD(apic_cpumask
, i
);
877 apic_cpus
[i
].aci_status
= 0;
879 for (i
= boot_ncpus
; i
< apic_nproc
; i
++) {
880 apic_cpus
[i
].aci_status
= APIC_CPU_FREE
| APIC_CPU_DIRTY
;
882 for (i
= apic_nproc
; i
< boot_ncpus
; i
++) {
883 apic_cpus
[i
].aci_status
= APIC_CPU_FREE
;
885 for (i
= max(boot_ncpus
, apic_nproc
); i
< max_ncpus
; i
++) {
886 apic_cpus
[i
].aci_status
= APIC_CPU_FREE
;
889 for (i
= 0; i
< apic_io_max
; i
++) {
893 * need to check Sitka on the following acpi problem
894 * On the Sitka, the ioapic's apic_id field isn't reporting
895 * the actual io apic id. We have reported this problem
896 * to Intel. Until they fix the problem, we will get the
897 * actual id directly from the ioapic.
899 id
= ioapic_read(ioapic_ix
, APIC_ID_CMD
);
900 hid
= (uchar_t
)(id
>> 24);
902 if (hid
!= apic_io_id
[i
]) {
903 if (apic_io_id
[i
] == 0)
905 else { /* set ioapic id to whatever reported by ACPI */
906 id
= ((uint32_t)apic_io_id
[i
]) << 24;
907 ioapic_write(ioapic_ix
, APIC_ID_CMD
, id
);
910 ver
= ioapic_read(ioapic_ix
, APIC_VERS_CMD
);
911 apic_io_ver
[i
] = (uchar_t
)(ver
& 0xff);
912 intmax
= (ver
>> 16) & 0xff;
913 apic_io_vectend
[i
] = apic_io_vectbase
[i
] + intmax
;
914 if (apic_first_avail_irq
<= apic_io_vectend
[i
])
915 apic_first_avail_irq
= apic_io_vectend
[i
] + 1;
920 * Process SCI configuration here
921 * An error may be returned here if
922 * acpi-user-options specifies legacy mode
923 * (no SCI, no ACPI mode)
925 if (acpica_get_sci(&sci
, &sci_flags
) != AE_OK
)
929 * Now call acpi_init() to generate namespaces
930 * If this fails, we don't attempt to use ACPI
931 * even if we were able to get a MADT above
933 if (acpica_init() != AE_OK
) {
934 cmn_err(CE_WARN
, "!apic: Failed to initialize acpica!");
939 * Call acpica_build_processor_map() now that we have
940 * ACPI namesspace access
942 (void) acpica_build_processor_map();
945 * Squirrel away the SCI and flags for later on
946 * in apic_picinit() when we're ready
949 apic_sci_flags
= sci_flags
;
951 if (apic_verbose
& APIC_VERBOSE_IRQ_FLAG
)
952 acpi_verboseflags
|= PSM_VERBOSE_IRQ_FLAG
;
954 if (apic_verbose
& APIC_VERBOSE_POWEROFF_FLAG
)
955 acpi_verboseflags
|= PSM_VERBOSE_POWEROFF_FLAG
;
957 if (apic_verbose
& APIC_VERBOSE_POWEROFF_PAUSE_FLAG
)
958 acpi_verboseflags
|= PSM_VERBOSE_POWEROFF_PAUSE_FLAG
;
960 if (acpi_psm_init(modname
, acpi_verboseflags
) == ACPI_PSM_FAILURE
)
963 /* Enable ACPI APIC interrupt routing */
964 if (apic_acpi_enter_apicmode() != PSM_FAILURE
) {
965 cmn_err(CE_NOTE
, "!apic: Using APIC interrupt routing mode");
966 build_reserved_irqlist((uchar_t
*)apic_reserved_irqlist
);
967 apic_enable_acpi
= 1;
968 if (apic_sci_vect
> 0) {
969 acpica_set_core_feature(ACPI_FEATURE_SCI_EVENT
);
971 if (apic_use_acpi_madt_only
) {
973 "?Using ACPI for CPU/IOAPIC information ONLY\n");
977 * probe ACPI for hpet information here which is used later
980 if (hpet_acpi_init(&apic_hpet_vect
, &apic_hpet_flags
) < 0) {
981 cmn_err(CE_NOTE
, "!ACPI HPET table query failed\n");
984 kmem_free(local_ids
, NCPU
* sizeof (uint32_t));
985 kmem_free(proc_ids
, NCPU
* sizeof (uint32_t));
986 return (PSM_SUCCESS
);
988 /* if setting APIC mode failed above, we fall through to cleanup */
991 cmn_err(CE_WARN
, "!apic: Failed acpi_probe, SMP config was %s",
992 acpi_found_smp_config
? "found" : "not found");
993 apic_free_apic_cpus();
994 if (apicadr
!= NULL
) {
995 mapout_apic((caddr_t
)apicadr
, APIC_LOCAL_MEMLEN
);
1000 for (i
= 0; i
< apic_io_max
; i
++) {
1001 mapout_ioapic((caddr_t
)apicioadr
[i
], APIC_IO_MEMLEN
);
1002 apicioadr
[i
] = NULL
;
1011 acpi_found_smp_config
= B_FALSE
;
1012 kmem_free(local_ids
, NCPU
* sizeof (uint32_t));
1013 kmem_free(proc_ids
, NCPU
* sizeof (uint32_t));
1014 return (PSM_FAILURE
);
1018 * Handle default configuration. Fill in reqd global variables & tables
1019 * Fill all details as MP table does not give any more info
1022 apic_handle_defconf()
1026 /* Failed to probe ACPI MADT tables, disable CPU DR. */
1027 apic_max_nproc
= -1;
1028 apic_free_apic_cpus();
1029 plat_dr_disable_cpu();
1031 apicioadr
[0] = (void *)mapin_ioapic(APIC_IO_ADDR
,
1032 APIC_IO_MEMLEN
, PROT_READ
| PROT_WRITE
);
1033 apicadr
= (void *)psm_map_phys(APIC_LOCAL_ADDR
,
1034 APIC_LOCAL_MEMLEN
, PROT_READ
);
1035 apic_cpus_size
= 2 * sizeof (*apic_cpus
);
1036 apic_cpus
= (apic_cpus_info_t
*)
1037 kmem_zalloc(apic_cpus_size
, KM_NOSLEEP
);
1038 if ((!apicadr
) || (!apicioadr
[0]) || (!apic_cpus
))
1039 goto apic_handle_defconf_fail
;
1040 CPUSET_ONLY(apic_cpumask
, 0);
1041 CPUSET_ADD(apic_cpumask
, 1);
1043 lid
= apic_reg_ops
->apic_read(APIC_LID_REG
);
1044 apic_cpus
[0].aci_local_id
= (uchar_t
)(lid
>> APIC_ID_BIT_OFFSET
);
1046 * According to the PC+MP spec 1.1, the local ids
1047 * for the default configuration has to be 0 or 1
1049 if (apic_cpus
[0].aci_local_id
== 1)
1050 apic_cpus
[1].aci_local_id
= 0;
1051 else if (apic_cpus
[0].aci_local_id
== 0)
1052 apic_cpus
[1].aci_local_id
= 1;
1054 goto apic_handle_defconf_fail
;
1058 if (apic_defconf
>= 5) {
1059 apic_cpus
[0].aci_local_ver
= APIC_INTEGRATED_VERS
;
1060 apic_cpus
[1].aci_local_ver
= APIC_INTEGRATED_VERS
;
1061 apic_io_ver
[0] = APIC_INTEGRATED_VERS
;
1063 apic_cpus
[0].aci_local_ver
= 0; /* 82489 DX */
1064 apic_cpus
[1].aci_local_ver
= 0;
1067 if (apic_defconf
== 2 || apic_defconf
== 3 || apic_defconf
== 6)
1068 eisa_level_intr_mask
= (inb(EISA_LEVEL_CNTL
+ 1) << 8) |
1069 inb(EISA_LEVEL_CNTL
) | ((uint_t
)INT32_MAX
+ 1);
1070 return (PSM_SUCCESS
);
1072 apic_handle_defconf_fail
:
1074 mapout_apic((caddr_t
)apicadr
, APIC_LOCAL_MEMLEN
);
1076 mapout_ioapic((caddr_t
)apicioadr
[0], APIC_IO_MEMLEN
);
1077 return (PSM_FAILURE
);
1080 /* Parse the entries in MP configuration table and collect info that we need */
1082 apic_parse_mpct(caddr_t mpct
, int bypass_cpus_and_ioapics
)
1084 struct apic_procent
*procp
;
1085 struct apic_bus
*busp
;
1086 struct apic_io_entry
*ioapicp
;
1087 struct apic_io_intr
*intrp
;
1094 /*LINTED: pointer cast may result in improper alignment */
1095 procp
= (struct apic_procent
*)(mpct
+ sizeof (struct apic_mp_cnf_hdr
));
1097 /* No need to count cpu entries if we won't use them */
1098 if (!bypass_cpus_and_ioapics
) {
1100 /* Find max # of CPUS and allocate structure accordingly */
1102 CPUSET_ZERO(apic_cpumask
);
1103 while (procp
->proc_entry
== APIC_CPU_ENTRY
) {
1104 if (procp
->proc_cpuflags
& CPUFLAGS_EN
) {
1105 if (apic_nproc
< NCPU
&& use_mp
&&
1106 apic_nproc
< boot_ncpus
) {
1107 CPUSET_ADD(apic_cpumask
, apic_nproc
);
1109 } else if (apic_nproc
== NCPU
&& !warned
) {
1110 cmn_err(CE_WARN
, "%s: CPU limit "
1112 #if !defined(__amd64)
1115 "; Solaris will use %d CPUs.",
1123 apic_cpus_size
= apic_nproc
* sizeof (*apic_cpus
);
1124 if (!apic_nproc
|| !(apic_cpus
= (apic_cpus_info_t
*)
1125 kmem_zalloc(apic_cpus_size
, KM_NOSLEEP
)))
1126 return (PSM_FAILURE
);
1129 /*LINTED: pointer cast may result in improper alignment */
1130 procp
= (struct apic_procent
*)(mpct
+ sizeof (struct apic_mp_cnf_hdr
));
1133 * start with index 1 as 0 needs to be filled in with Boot CPU, but
1134 * if we're bypassing this information, it has already been filled
1135 * in by acpi_probe(), so don't overwrite it.
1137 if (!bypass_cpus_and_ioapics
)
1140 while (procp
->proc_entry
== APIC_CPU_ENTRY
) {
1141 /* check whether the cpu exists or not */
1142 if (!bypass_cpus_and_ioapics
&&
1143 procp
->proc_cpuflags
& CPUFLAGS_EN
) {
1144 if (procp
->proc_cpuflags
& CPUFLAGS_BP
) { /* Boot CPU */
1145 lid
= apic_reg_ops
->apic_read(APIC_LID_REG
);
1146 apic_cpus
[0].aci_local_id
= procp
->proc_apicid
;
1147 if (apic_cpus
[0].aci_local_id
!=
1148 (uchar_t
)(lid
>> APIC_ID_BIT_OFFSET
)) {
1149 return (PSM_FAILURE
);
1151 apic_cpus
[0].aci_local_ver
=
1152 procp
->proc_version
;
1153 } else if (apic_nproc
< NCPU
&& use_mp
&&
1154 apic_nproc
< boot_ncpus
) {
1155 apic_cpus
[apic_nproc
].aci_local_id
=
1158 apic_cpus
[apic_nproc
].aci_local_ver
=
1159 procp
->proc_version
;
1168 * Save start of bus entries for later use.
1169 * Get EISA level cntrl if EISA bus is present.
1170 * Also get the CPI bus id for single CPI bus case
1172 apic_busp
= busp
= (struct apic_bus
*)procp
;
1173 while (busp
->bus_entry
== APIC_BUS_ENTRY
) {
1174 lid
= apic_find_bus_type((char *)&busp
->bus_str1
);
1175 if (lid
== BUS_EISA
) {
1176 eisa_level_intr_mask
= (inb(EISA_LEVEL_CNTL
+ 1) << 8) |
1177 inb(EISA_LEVEL_CNTL
) | ((uint_t
)INT32_MAX
+ 1);
1178 } else if (lid
== BUS_PCI
) {
1180 * apic_single_pci_busid will be used only if
1181 * apic_pic_bus_total is equal to 1
1183 apic_pci_bus_total
++;
1184 apic_single_pci_busid
= busp
->bus_id
;
1189 ioapicp
= (struct apic_io_entry
*)busp
;
1191 if (!bypass_cpus_and_ioapics
)
1194 if (!bypass_cpus_and_ioapics
&& apic_io_max
< MAX_IO_APIC
) {
1195 if (ioapicp
->io_flags
& IOAPIC_FLAGS_EN
) {
1196 apic_io_id
[apic_io_max
] = ioapicp
->io_apicid
;
1197 apic_io_ver
[apic_io_max
] = ioapicp
->io_version
;
1198 apicioadr
[apic_io_max
] =
1199 (void *)mapin_ioapic(
1200 (uint32_t)ioapicp
->io_apic_addr
,
1201 APIC_IO_MEMLEN
, PROT_READ
| PROT_WRITE
);
1203 if (!apicioadr
[apic_io_max
])
1204 return (PSM_FAILURE
);
1206 ioapic_mask_workaround
[apic_io_max
] =
1207 apic_is_ioapic_AMD_813x(
1208 ioapicp
->io_apic_addr
);
1210 ioapic_ix
= apic_io_max
;
1211 id
= ioapic_read(ioapic_ix
, APIC_ID_CMD
);
1212 hid
= (uchar_t
)(id
>> 24);
1214 if (hid
!= apic_io_id
[apic_io_max
]) {
1215 if (apic_io_id
[apic_io_max
] == 0)
1216 apic_io_id
[apic_io_max
] = hid
;
1219 * set ioapic id to whatever
1222 * may not need to set index
1224 * take it out and try
1228 apic_io_id
[apic_io_max
]) <<
1231 ioapic_write(ioapic_ix
,
1239 } while (ioapicp
->io_entry
== APIC_IO_ENTRY
);
1241 apic_io_intrp
= (struct apic_io_intr
*)ioapicp
;
1243 intrp
= apic_io_intrp
;
1244 while (intrp
->intr_entry
== APIC_IO_INTR_ENTRY
) {
1245 if ((intrp
->intr_irq
> APIC_MAX_ISA_IRQ
) ||
1246 (apic_find_bus(intrp
->intr_busid
) == BUS_PCI
)) {
1247 apic_irq_translate
= 1;
1253 return (PSM_SUCCESS
);
1257 apic_cpu_in_range(int cpu
)
1259 cpu
&= ~IRQ_USER_BOUND
;
1260 /* Check whether cpu id is in valid range. */
1261 if (cpu
< 0 || cpu
>= apic_nproc
) {
1263 } else if (apic_max_nproc
!= -1 && cpu
>= apic_max_nproc
) {
1265 * Check whether cpuid is in valid range if CPU DR is enabled.
1268 } else if (!CPU_IN_SET(apic_cpumask
, cpu
)) {
1276 apic_get_next_bind_cpu(void)
1279 processorid_t cpuid
= 0;
1281 for (count
= 0; count
< apic_nproc
; count
++) {
1282 if (apic_next_bind_cpu
>= apic_nproc
) {
1283 apic_next_bind_cpu
= 0;
1285 i
= apic_next_bind_cpu
++;
1286 if (apic_cpu_in_range(i
)) {
1296 apic_get_apic_version()
1299 uchar_t min_io_apic_ver
= 0;
1300 static uint16_t version
; /* Cache as value is constant */
1301 static boolean_t found
= B_FALSE
; /* Accomodate zero version */
1303 if (found
== B_FALSE
) {
1307 * Don't assume all IO APICs in the system are the same.
1309 * Set to the minimum version.
1311 for (i
= 0; i
< apic_io_max
; i
++) {
1312 if ((apic_io_ver
[i
] != 0) &&
1313 ((min_io_apic_ver
== 0) ||
1314 (min_io_apic_ver
>= apic_io_ver
[i
])))
1315 min_io_apic_ver
= apic_io_ver
[i
];
1318 /* Assume all local APICs are of the same version. */
1319 version
= (min_io_apic_ver
<< 8) | apic_cpus
[0].aci_local_ver
;
1324 static struct apic_mpfps_hdr
*
1325 apic_find_fps_sig(caddr_t cptr
, int len
)
1329 /* Look for the pattern "_MP_" */
1330 for (i
= 0; i
< len
; i
+= 16) {
1331 if ((*(cptr
+i
) == '_') &&
1332 (*(cptr
+i
+1) == 'M') &&
1333 (*(cptr
+i
+2) == 'P') &&
1334 (*(cptr
+i
+3) == '_'))
1335 /*LINTED: pointer cast may result in improper alignment */
1336 return ((struct apic_mpfps_hdr
*)(cptr
+ i
));
1342 apic_checksum(caddr_t bptr
, int len
)
1348 for (i
= 0; i
< len
; i
++)
1350 return ((int)cksum
);
1354 * On machines with PCI-PCI bridges, a device behind a PCI-PCI bridge
1355 * needs special handling. We may need to chase up the device tree,
1356 * using the PCI-PCI Bridge specification's "rotating IPIN assumptions",
1357 * to find the IPIN at the root bus that relates to the IPIN on the
1358 * subsidiary bus (for ACPI or MP). We may, however, have an entry
1359 * in the MP table or the ACPI namespace for this device itself.
1360 * We handle both cases in the search below.
1362 /* this is the non-acpi version */
1364 apic_handle_pci_pci_bridge(dev_info_t
*idip
, int child_devno
, int child_ipin
,
1365 struct apic_io_intr
**intrp
)
1367 dev_info_t
*dipp
, *dip
;
1369 ddi_acc_handle_t cfg_handle
;
1370 int bridge_devno
, bridge_bus
;
1377 if (((dipp
= ddi_get_parent(dip
)) == (dev_info_t
*)NULL
) ||
1378 (pci_config_setup(dipp
, &cfg_handle
) != DDI_SUCCESS
))
1380 if ((pci_config_get8(cfg_handle
, PCI_CONF_BASCLASS
) ==
1381 PCI_CLASS_BRIDGE
) && (pci_config_get8(cfg_handle
,
1382 PCI_CONF_SUBCLASS
) == PCI_BRIDGE_PCI
)) {
1383 pci_config_teardown(&cfg_handle
);
1384 if (acpica_get_bdf(dipp
, &bridge_bus
, &bridge_devno
,
1388 * This is the rotating scheme documented in the
1389 * PCI-to-PCI spec. If the PCI-to-PCI bridge is
1390 * behind another PCI-to-PCI bridge, then it needs
1391 * to keep ascending until an interrupt entry is
1392 * found or the root is reached.
1394 ipin
= (child_devno
+ child_ipin
) % PCI_INTD
;
1395 if (bridge_bus
== 0 && apic_pci_bus_total
== 1)
1396 bridge_bus
= (int)apic_single_pci_busid
;
1397 pci_irq
= ((bridge_devno
& 0x1f) << 2) |
1399 if ((*intrp
= apic_find_io_intr_w_busid(pci_irq
,
1400 bridge_bus
)) != NULL
) {
1404 child_devno
= bridge_devno
;
1407 pci_config_teardown(&cfg_handle
);
1411 /*LINTED: function will not fall off the bottom */
1415 acpi_find_ioapic(int irq
)
1419 for (i
= 0; i
< apic_io_max
; i
++) {
1420 if (irq
>= apic_io_vectbase
[i
] && irq
<= apic_io_vectend
[i
])
1421 return ((uchar_t
)i
);
1423 return (0xFF); /* shouldn't happen */
1427 * See if two irqs are compatible for sharing a vector.
1428 * Currently we only support sharing of PCI devices.
1431 acpi_intr_compatible(iflag_t iflag1
, iflag_t iflag2
)
1436 /* Assume active high by default */
1440 if (iflag1
.bustype
!= iflag2
.bustype
|| iflag1
.bustype
!= BUS_PCI
)
1443 if (iflag1
.intr_el
== INTR_EL_CONFORM
)
1446 level1
= (iflag1
.intr_el
== INTR_EL_LEVEL
) ? AV_LEVEL
: 0;
1448 if (level1
&& ((iflag1
.intr_po
== INTR_PO_ACTIVE_LOW
) ||
1449 (iflag1
.intr_po
== INTR_PO_CONFORM
)))
1450 po1
= AV_ACTIVE_LOW
;
1452 if (iflag2
.intr_el
== INTR_EL_CONFORM
)
1455 level2
= (iflag2
.intr_el
== INTR_EL_LEVEL
) ? AV_LEVEL
: 0;
1457 if (level2
&& ((iflag2
.intr_po
== INTR_PO_ACTIVE_LOW
) ||
1458 (iflag2
.intr_po
== INTR_PO_CONFORM
)))
1459 po2
= AV_ACTIVE_LOW
;
1461 if ((level1
== level2
) && (po1
== po2
))
1467 struct apic_io_intr
*
1468 apic_find_io_intr_w_busid(int irqno
, int busid
)
1470 struct apic_io_intr
*intrp
;
1473 * It can have more than 1 entry with same source bus IRQ,
1474 * but unique with the source bus id
1476 intrp
= apic_io_intrp
;
1477 if (intrp
!= NULL
) {
1478 while (intrp
->intr_entry
== APIC_IO_INTR_ENTRY
) {
1479 if (intrp
->intr_irq
== irqno
&&
1480 intrp
->intr_busid
== busid
&&
1481 intrp
->intr_type
== IO_INTR_INT
)
1486 APIC_VERBOSE_IOAPIC((CE_NOTE
, "Did not find io intr for irqno:"
1487 "busid %x:%x\n", irqno
, busid
));
1492 struct mps_bus_info
{
1495 } bus_info_array
[] = {
1499 "XPRESS", BUS_XPRESS
,
1500 "PCMCIA", BUS_PCMCIA
,
1503 "CBUSII", BUS_CBUSII
,
1504 "FUTURE", BUS_FUTURE
,
1505 "INTERN", BUS_INTERN
,
1510 "NUBUS ", BUS_NUBUS
,
1517 apic_find_bus_type(char *bus
)
1521 for (; i
< sizeof (bus_info_array
)/sizeof (struct mps_bus_info
); i
++)
1522 if (strncmp(bus
, bus_info_array
[i
].bus_name
,
1523 strlen(bus_info_array
[i
].bus_name
)) == 0)
1524 return (bus_info_array
[i
].bus_id
);
1525 APIC_VERBOSE_IOAPIC((CE_WARN
, "Did not find bus type for bus %s", bus
));
1530 apic_find_bus(int busid
)
1532 struct apic_bus
*busp
;
1535 while (busp
->bus_entry
== APIC_BUS_ENTRY
) {
1536 if (busp
->bus_id
== busid
)
1537 return (apic_find_bus_type((char *)&busp
->bus_str1
));
1540 APIC_VERBOSE_IOAPIC((CE_WARN
, "Did not find bus for bus id %x", busid
));
1545 apic_find_bus_id(int bustype
)
1547 struct apic_bus
*busp
;
1550 while (busp
->bus_entry
== APIC_BUS_ENTRY
) {
1551 if (apic_find_bus_type((char *)&busp
->bus_str1
) == bustype
)
1552 return (busp
->bus_id
);
1555 APIC_VERBOSE_IOAPIC((CE_WARN
, "Did not find bus id for bustype %x",
1561 * Check if a particular irq need to be reserved for any io_intr
1563 static struct apic_io_intr
*
1564 apic_find_io_intr(int irqno
)
1566 struct apic_io_intr
*intrp
;
1568 intrp
= apic_io_intrp
;
1569 if (intrp
!= NULL
) {
1570 while (intrp
->intr_entry
== APIC_IO_INTR_ENTRY
) {
1571 if (intrp
->intr_irq
== irqno
&&
1572 intrp
->intr_type
== IO_INTR_INT
)
1581 * Check if the given ioapicindex intin combination has already been assigned
1582 * an irq. If so return irqno. Else -1
1585 apic_find_intin(uchar_t ioapic
, uchar_t intin
)
1590 /* find ioapic and intin in the apic_irq_table[] and return the index */
1591 for (i
= apic_min_device_irq
; i
<= apic_max_device_irq
; i
++) {
1592 irqptr
= apic_irq_table
[i
];
1594 if ((irqptr
->airq_mps_intr_index
>= 0) &&
1595 (irqptr
->airq_intin_no
== intin
) &&
1596 (irqptr
->airq_ioapicindex
== ioapic
)) {
1597 APIC_VERBOSE_IOAPIC((CE_NOTE
, "!Found irq "
1598 "entry for ioapic:intin %x:%x "
1599 "shared interrupts ?", ioapic
, intin
));
1602 irqptr
= irqptr
->airq_next
;
1609 apic_allocate_irq(int irq
)
1613 if ((freeirq
= apic_find_free_irq(irq
, (APIC_RESV_IRQ
- 1))) == -1)
1614 if ((freeirq
= apic_find_free_irq(APIC_FIRST_FREE_IRQ
,
1615 (irq
- 1))) == -1) {
1617 * if BIOS really defines every single irq in the mps
1618 * table, then don't worry about conflicting with
1619 * them, just use any free slot in apic_irq_table
1621 for (i
= APIC_FIRST_FREE_IRQ
; i
< APIC_RESV_IRQ
; i
++) {
1622 if ((apic_irq_table
[i
] == NULL
) ||
1623 apic_irq_table
[i
]->airq_mps_intr_index
==
1629 if (freeirq
== -1) {
1630 /* This shouldn't happen, but just in case */
1631 cmn_err(CE_WARN
, "%s: NO available IRQ", psm_name
);
1635 if (apic_irq_table
[freeirq
] == NULL
) {
1636 apic_irq_table
[freeirq
] =
1637 kmem_zalloc(sizeof (apic_irq_t
), KM_NOSLEEP
);
1638 if (apic_irq_table
[freeirq
] == NULL
) {
1639 cmn_err(CE_WARN
, "%s: NO memory to allocate IRQ",
1643 apic_irq_table
[freeirq
]->airq_temp_cpu
= IRQ_UNINIT
;
1644 apic_irq_table
[freeirq
]->airq_mps_intr_index
= FREE_INDEX
;
1650 apic_find_free_irq(int start
, int end
)
1654 for (i
= start
; i
<= end
; i
++)
1655 /* Check if any I/O entry needs this IRQ */
1656 if (apic_find_io_intr(i
) == NULL
) {
1657 /* Then see if it is free */
1658 if ((apic_irq_table
[i
] == NULL
) ||
1659 (apic_irq_table
[i
]->airq_mps_intr_index
==
1668 * compute the polarity, trigger mode and vector for programming into
1669 * the I/O apic and record in airq_rdt_entry.
1672 apic_record_rdt_entry(apic_irq_t
*irqptr
, int irq
)
1674 int ioapicindex
, bus_type
, vector
;
1676 uint_t level
, po
, io_po
;
1677 struct apic_io_intr
*iointrp
;
1679 intr_index
= irqptr
->airq_mps_intr_index
;
1680 DDI_INTR_IMPLDBG((CE_CONT
, "apic_record_rdt_entry: intr_index=%d "
1681 "irq = 0x%x dip = 0x%p vector = 0x%x\n", intr_index
, irq
,
1682 (void *)irqptr
->airq_dip
, irqptr
->airq_vector
));
1684 if (intr_index
== RESERVE_INDEX
) {
1685 apic_error
|= APIC_ERR_INVALID_INDEX
;
1687 } else if (APIC_IS_MSI_OR_MSIX_INDEX(intr_index
)) {
1691 vector
= irqptr
->airq_vector
;
1692 ioapicindex
= irqptr
->airq_ioapicindex
;
1693 /* Assume edge triggered by default */
1695 /* Assume active high by default */
1698 if (intr_index
== DEFAULT_INDEX
|| intr_index
== FREE_INDEX
) {
1700 if (eisa_level_intr_mask
& (1 << irq
))
1702 if (intr_index
== FREE_INDEX
&& apic_defconf
== 0)
1703 apic_error
|= APIC_ERR_INVALID_INDEX
;
1704 } else if (intr_index
== ACPI_INDEX
) {
1705 bus_type
= irqptr
->airq_iflag
.bustype
;
1706 if (irqptr
->airq_iflag
.intr_el
== INTR_EL_CONFORM
) {
1707 if (bus_type
== BUS_PCI
)
1710 level
= (irqptr
->airq_iflag
.intr_el
== INTR_EL_LEVEL
) ?
1713 ((irqptr
->airq_iflag
.intr_po
== INTR_PO_ACTIVE_LOW
) ||
1714 (irqptr
->airq_iflag
.intr_po
== INTR_PO_CONFORM
&&
1715 bus_type
== BUS_PCI
)))
1718 iointrp
= apic_io_intrp
+ intr_index
;
1719 bus_type
= apic_find_bus(iointrp
->intr_busid
);
1720 if (iointrp
->intr_el
== INTR_EL_CONFORM
) {
1721 if ((irq
< 16) && (eisa_level_intr_mask
& (1 << irq
)))
1723 else if (bus_type
== BUS_PCI
)
1726 level
= (iointrp
->intr_el
== INTR_EL_LEVEL
) ?
1728 if (level
&& ((iointrp
->intr_po
== INTR_PO_ACTIVE_LOW
) ||
1729 (iointrp
->intr_po
== INTR_PO_CONFORM
&&
1730 bus_type
== BUS_PCI
)))
1734 apic_level_intr
[irq
] = 1;
1736 * The 82489DX External APIC cannot do active low polarity interrupts.
1738 if (po
&& (apic_io_ver
[ioapicindex
] != IOAPIC_VER_82489DX
))
1743 if (apic_verbose
& APIC_VERBOSE_IOAPIC_FLAG
)
1744 prom_printf("setio: ioapic=0x%x intin=0x%x level=0x%x po=0x%x "
1745 "vector=0x%x cpu=0x%x\n\n", ioapicindex
,
1746 irqptr
->airq_intin_no
, level
, io_po
, vector
,
1749 irqptr
->airq_rdt_entry
= level
|io_po
|vector
;
1753 apic_acpi_translate_pci_irq(dev_info_t
*dip
, int busid
, int devid
,
1754 int ipin
, int *pci_irqp
, iflag_t
*intr_flagp
)
1758 acpi_psm_lnk_t acpipsmlnk
;
1760 if ((status
= acpi_get_irq_cache_ent(busid
, devid
, ipin
, pci_irqp
,
1761 intr_flagp
)) == ACPI_PSM_SUCCESS
) {
1762 APIC_VERBOSE_IRQ((CE_CONT
, "!%s: Found irqno %d "
1763 "from cache for device %s, instance #%d\n", psm_name
,
1764 *pci_irqp
, ddi_get_name(dip
), ddi_get_instance(dip
)));
1768 bzero(&acpipsmlnk
, sizeof (acpi_psm_lnk_t
));
1770 if ((status
= acpi_translate_pci_irq(dip
, ipin
, pci_irqp
, intr_flagp
,
1771 &acpipsmlnk
)) == ACPI_PSM_FAILURE
) {
1772 APIC_VERBOSE_IRQ((CE_WARN
, "%s: "
1773 " acpi_translate_pci_irq failed for device %s, instance"
1774 " #%d", psm_name
, ddi_get_name(dip
),
1775 ddi_get_instance(dip
)));
1779 if (status
== ACPI_PSM_PARTIAL
&& acpipsmlnk
.lnkobj
!= NULL
) {
1780 status
= apic_acpi_irq_configure(&acpipsmlnk
, dip
, pci_irqp
,
1782 if (status
!= ACPI_PSM_SUCCESS
) {
1783 status
= acpi_get_current_irq_resource(&acpipsmlnk
,
1784 pci_irqp
, intr_flagp
);
1788 if (status
== ACPI_PSM_SUCCESS
) {
1789 acpi_new_irq_cache_ent(busid
, devid
, ipin
, *pci_irqp
,
1790 intr_flagp
, &acpipsmlnk
);
1792 APIC_VERBOSE_IRQ((CE_CONT
, "%s: [ACPI] "
1793 "new irq %d for device %s, instance #%d\n", psm_name
,
1794 *pci_irqp
, ddi_get_name(dip
), ddi_get_instance(dip
)));
1801 * Adds an entry to the irq list passed in, and returns the new list.
1802 * Entries are added in priority order (lower numerical priorities are
1803 * placed closer to the head of the list)
1805 static prs_irq_list_t
*
1806 acpi_insert_prs_irq_ent(prs_irq_list_t
*listp
, int priority
, int irq
,
1807 iflag_t
*iflagp
, acpi_prs_private_t
*prsprvp
)
1809 struct prs_irq_list_ent
*newent
, *prevp
= NULL
, *origlistp
;
1811 newent
= kmem_zalloc(sizeof (struct prs_irq_list_ent
), KM_SLEEP
);
1813 newent
->list_prio
= priority
;
1815 newent
->intrflags
= *iflagp
;
1816 newent
->prsprv
= *prsprvp
;
1817 /* ->next is NULL from kmem_zalloc */
1820 * New list -- return the new entry as the list.
1826 * Save original list pointer for return (since we're not modifying
1832 * Insertion sort, with entries with identical keys stored AFTER
1833 * existing entries (the less-than-or-equal test of priority does
1836 while (listp
!= NULL
&& listp
->list_prio
<= priority
) {
1838 listp
= listp
->next
;
1841 newent
->next
= listp
;
1843 if (prevp
== NULL
) { /* Add at head of list (newent is the new head) */
1846 prevp
->next
= newent
;
1852 * Frees the list passed in, deallocating all memory and leaving *listpp
1856 acpi_destroy_prs_irq_list(prs_irq_list_t
**listpp
)
1858 struct prs_irq_list_ent
*nextp
;
1860 ASSERT(listpp
!= NULL
);
1862 while (*listpp
!= NULL
) {
1863 nextp
= (*listpp
)->next
;
1864 kmem_free(*listpp
, sizeof (struct prs_irq_list_ent
));
1870 * apic_choose_irqs_from_prs returns a list of irqs selected from the list of
1871 * irqs returned by the link device's _PRS method. The irqs are chosen
1872 * to minimize contention in situations where the interrupt link device
1873 * can be programmed to steer interrupts to different interrupt controller
1874 * inputs (some of which may already be in use). The list is sorted in order
1875 * of irqs to use, with the highest priority given to interrupt controller
1876 * inputs that are not shared. When an interrupt controller input
1877 * must be shared, apic_choose_irqs_from_prs adds the possible irqs to the
1878 * returned list in the order that minimizes sharing (thereby ensuring lowest
1879 * possible latency from interrupt trigger time to ISR execution time).
1881 static prs_irq_list_t
*
1882 apic_choose_irqs_from_prs(acpi_irqlist_t
*irqlistent
, dev_info_t
*dip
,
1887 prs_irq_list_t
*prsirqlistp
= NULL
;
1890 while (irqlistent
!= NULL
) {
1891 irqlistent
->intr_flags
.bustype
= BUS_PCI
;
1893 for (i
= 0; i
< irqlistent
->num_irqs
; i
++) {
1895 irq
= irqlistent
->irqs
[i
];
1898 /* invalid irq number */
1902 if ((irq
< 16) && (apic_reserved_irqlist
[irq
]))
1905 if ((apic_irq_table
[irq
] == NULL
) ||
1906 (apic_irq_table
[irq
]->airq_dip
== dip
)) {
1908 prsirqlistp
= acpi_insert_prs_irq_ent(
1909 prsirqlistp
, 0 /* Highest priority */, irq
,
1910 &irqlistent
->intr_flags
,
1911 &irqlistent
->acpi_prs_prv
);
1914 * If we do not prefer the current irq from _CRS
1915 * or if we do and this irq is the same as the
1916 * current irq from _CRS, this is the one
1919 if (!(apic_prefer_crs
) || (irq
== crs_irq
)) {
1920 return (prsirqlistp
);
1926 * Edge-triggered interrupts cannot be shared
1928 if (irqlistent
->intr_flags
.intr_el
== INTR_EL_EDGE
)
1932 * To work around BIOSes that contain incorrect
1933 * interrupt polarity information in interrupt
1934 * descriptors returned by _PRS, we assume that
1935 * the polarity of the other device sharing this
1936 * interrupt controller input is compatible.
1937 * If it's not, the caller will catch it when
1938 * the caller invokes the link device's _CRS method
1939 * (after invoking its _SRS method).
1941 iflags
= irqlistent
->intr_flags
;
1943 apic_irq_table
[irq
]->airq_iflag
.intr_po
;
1945 if (!acpi_intr_compatible(iflags
,
1946 apic_irq_table
[irq
]->airq_iflag
)) {
1947 APIC_VERBOSE_IRQ((CE_CONT
, "!%s: irq %d "
1948 "not compatible [%x:%x:%x !~ %x:%x:%x]",
1953 apic_irq_table
[irq
]->airq_iflag
.intr_po
,
1954 apic_irq_table
[irq
]->airq_iflag
.intr_el
,
1955 apic_irq_table
[irq
]->airq_iflag
.bustype
));
1960 * If we prefer the irq from _CRS, no need
1961 * to search any further (and make sure
1962 * to add this irq with the highest priority
1963 * so it's tried first).
1965 if (crs_irq
== irq
&& apic_prefer_crs
) {
1967 return (acpi_insert_prs_irq_ent(
1969 0 /* Highest priority */,
1971 &irqlistent
->acpi_prs_prv
));
1975 * Priority is equal to the share count (lower
1976 * share count is higher priority). Note that
1977 * the intr flags passed in here are the ones we
1978 * changed above -- if incorrect, it will be
1979 * caught by the caller's _CRS flags comparison.
1981 prsirqlistp
= acpi_insert_prs_irq_ent(
1983 apic_irq_table
[irq
]->airq_share
, irq
,
1984 &iflags
, &irqlistent
->acpi_prs_prv
);
1987 /* Go to the next irqlist entry */
1988 irqlistent
= irqlistent
->next
;
1991 return (prsirqlistp
);
1995 * Configures the irq for the interrupt link device identified by
1998 * Gets the current and the list of possible irq settings for the
1999 * device. If apic_unconditional_srs is not set, and the current
2000 * resource setting is in the list of possible irq settings,
2001 * current irq resource setting is passed to the caller.
2003 * Otherwise, picks an irq number from the list of possible irq
2004 * settings, and sets the irq of the device to this value.
2005 * If prefer_crs is set, among a set of irq numbers in the list that have
2006 * the least number of devices sharing the interrupt, we pick current irq
2007 * resource setting if it is a member of this set.
2009 * Passes the irq number in the value pointed to by pci_irqp, and
2010 * polarity and sensitivity in the structure pointed to by dipintrflagp
2013 * Note that if setting the irq resource failed, but successfuly obtained
2014 * the current irq resource settings, passes the current irq resources
2015 * and considers it a success.
2018 * ACPI_PSM_SUCCESS on success.
2020 * ACPI_PSM_FAILURE if an error occured during the configuration or
2021 * if a suitable irq was not found for this device, or if setting the
2022 * irq resource and obtaining the current resource fails.
2026 apic_acpi_irq_configure(acpi_psm_lnk_t
*acpipsmlnkp
, dev_info_t
*dip
,
2027 int *pci_irqp
, iflag_t
*dipintr_flagp
)
2031 acpi_irqlist_t
*irqlistp
;
2032 prs_irq_list_t
*prs_irq_listp
, *prs_irq_entp
;
2033 boolean_t found_irq
= B_FALSE
;
2035 dipintr_flagp
->bustype
= BUS_PCI
;
2037 if ((acpi_get_possible_irq_resources(acpipsmlnkp
, &irqlistp
))
2038 == ACPI_PSM_FAILURE
) {
2039 APIC_VERBOSE_IRQ((CE_WARN
, "!%s: Unable to determine "
2040 "or assign IRQ for device %s, instance #%d: The system was "
2041 "unable to get the list of potential IRQs from ACPI.",
2042 psm_name
, ddi_get_name(dip
), ddi_get_instance(dip
)));
2044 return (ACPI_PSM_FAILURE
);
2047 if ((acpi_get_current_irq_resource(acpipsmlnkp
, &cur_irq
,
2048 dipintr_flagp
) == ACPI_PSM_SUCCESS
) && (!apic_unconditional_srs
) &&
2051 * If an IRQ is set in CRS and that IRQ exists in the set
2052 * returned from _PRS, return that IRQ, otherwise print
2056 if (acpi_irqlist_find_irq(irqlistp
, cur_irq
, NULL
)
2057 == ACPI_PSM_SUCCESS
) {
2059 ASSERT(pci_irqp
!= NULL
);
2060 *pci_irqp
= cur_irq
;
2061 acpi_free_irqlist(irqlistp
);
2062 return (ACPI_PSM_SUCCESS
);
2065 APIC_VERBOSE_IRQ((CE_WARN
, "!%s: Could not find the "
2066 "current irq %d for device %s, instance #%d in ACPI's "
2067 "list of possible irqs for this device. Picking one from "
2068 " the latter list.", psm_name
, cur_irq
, ddi_get_name(dip
),
2069 ddi_get_instance(dip
)));
2072 if ((prs_irq_listp
= apic_choose_irqs_from_prs(irqlistp
, dip
,
2073 cur_irq
)) == NULL
) {
2075 APIC_VERBOSE_IRQ((CE_WARN
, "!%s: Could not find a "
2076 "suitable irq from the list of possible irqs for device "
2077 "%s, instance #%d in ACPI's list of possible irqs",
2078 psm_name
, ddi_get_name(dip
), ddi_get_instance(dip
)));
2080 acpi_free_irqlist(irqlistp
);
2081 return (ACPI_PSM_FAILURE
);
2084 acpi_free_irqlist(irqlistp
);
2086 for (prs_irq_entp
= prs_irq_listp
;
2087 prs_irq_entp
!= NULL
&& found_irq
== B_FALSE
;
2088 prs_irq_entp
= prs_irq_entp
->next
) {
2090 acpipsmlnkp
->acpi_prs_prv
= prs_irq_entp
->prsprv
;
2091 irq
= prs_irq_entp
->irq
;
2093 APIC_VERBOSE_IRQ((CE_CONT
, "!%s: Setting irq %d for "
2094 "device %s instance #%d\n", psm_name
, irq
,
2095 ddi_get_name(dip
), ddi_get_instance(dip
)));
2097 if ((acpi_set_irq_resource(acpipsmlnkp
, irq
))
2098 == ACPI_PSM_SUCCESS
) {
2100 * setting irq was successful, check to make sure CRS
2101 * reflects that. If CRS does not agree with what we
2102 * set, return the irq that was set.
2105 if (acpi_get_current_irq_resource(acpipsmlnkp
, &cur_irq
,
2106 dipintr_flagp
) == ACPI_PSM_SUCCESS
) {
2109 APIC_VERBOSE_IRQ((CE_WARN
,
2110 "!%s: IRQ resource set "
2111 "(irqno %d) for device %s "
2112 "instance #%d, differs from "
2113 "current setting irqno %d",
2114 psm_name
, irq
, ddi_get_name(dip
),
2115 ddi_get_instance(dip
), cur_irq
));
2118 * On at least one system, there was a bug in
2119 * a DSDT method called by _STA, causing _STA to
2120 * indicate that the link device was disabled
2121 * (when, in fact, it was enabled). Since _SRS
2122 * succeeded, assume that _CRS is lying and use
2123 * the iflags from this _PRS interrupt choice.
2124 * If we're wrong about the flags, the polarity
2125 * will be incorrect and we may get an interrupt
2126 * storm, but there's not much else we can do
2129 *dipintr_flagp
= prs_irq_entp
->intrflags
;
2133 * Return the irq that was set, and not what _CRS
2134 * reports, since _CRS has been seen to return
2135 * different IRQs than what was passed to _SRS on some
2136 * systems (and just not return successfully on others).
2141 APIC_VERBOSE_IRQ((CE_WARN
, "!%s: set resource "
2142 "irq %d failed for device %s instance #%d",
2143 psm_name
, irq
, ddi_get_name(dip
),
2144 ddi_get_instance(dip
)));
2146 if (cur_irq
== -1) {
2147 acpi_destroy_prs_irq_list(&prs_irq_listp
);
2148 return (ACPI_PSM_FAILURE
);
2153 acpi_destroy_prs_irq_list(&prs_irq_listp
);
2156 return (ACPI_PSM_FAILURE
);
2158 ASSERT(pci_irqp
!= NULL
);
2159 *pci_irqp
= cur_irq
;
2160 return (ACPI_PSM_SUCCESS
);
2164 ioapic_disable_redirection()
2170 /* Disable the I/O APIC redirection entries */
2171 for (ioapic_ix
= 0; ioapic_ix
< apic_io_max
; ioapic_ix
++) {
2173 /* Bits 23-16 define the maximum redirection entries */
2174 intin_max
= (ioapic_read(ioapic_ix
, APIC_VERS_CMD
) >> 16)
2177 for (intin_ix
= 0; intin_ix
<= intin_max
; intin_ix
++) {
2179 * The assumption here is that this is safe, even for
2180 * systems with IOAPICs that suffer from the hardware
2181 * erratum because all devices have been quiesced before
2182 * this function is called from apic_shutdown()
2183 * (or equivalent). If that assumption turns out to be
2184 * false, this mask operation can induce the same
2185 * erratum result we're trying to avoid.
2187 ioapic_write(ioapic_ix
, APIC_RDT_CMD
+ 2 * intin_ix
,
2194 * Looks for an IOAPIC with the specified physical address in the /ioapics
2195 * node in the device tree (created by the PCI enumerator).
2198 apic_is_ioapic_AMD_813x(uint32_t physaddr
)
2201 * Look in /ioapics, for the ioapic with
2202 * the physical address given
2204 dev_info_t
*ioapicsnode
= ddi_find_devinfo(IOAPICS_NODE_NAME
, -1, 0);
2205 dev_info_t
*ioapic_child
;
2206 boolean_t rv
= B_FALSE
;
2208 uint64_t ioapic_paddr
;
2209 boolean_t done
= B_FALSE
;
2211 if (ioapicsnode
== NULL
)
2214 /* Load first child: */
2215 ioapic_child
= ddi_get_child(ioapicsnode
);
2216 while (!done
&& ioapic_child
!= 0) { /* Iterate over children */
2218 if ((ioapic_paddr
= (uint64_t)ddi_prop_get_int64(DDI_DEV_T_ANY
,
2219 ioapic_child
, DDI_PROP_DONTPASS
, "reg", 0))
2220 != 0 && physaddr
== ioapic_paddr
) {
2222 vid
= ddi_prop_get_int(DDI_DEV_T_ANY
, ioapic_child
,
2223 DDI_PROP_DONTPASS
, IOAPICS_PROP_VENID
, 0);
2225 if (vid
== VENID_AMD
) {
2227 did
= ddi_prop_get_int(DDI_DEV_T_ANY
,
2228 ioapic_child
, DDI_PROP_DONTPASS
,
2229 IOAPICS_PROP_DEVID
, 0);
2231 if (did
== DEVID_8131_IOAPIC
||
2232 did
== DEVID_8132_IOAPIC
) {
2240 ioapic_child
= ddi_get_next_sibling(ioapic_child
);
2243 /* The ioapics node was held by ddi_find_devinfo, so release it */
2244 ndi_rele_devi(ioapicsnode
);
2249 int32_t as_task_reg
;
2250 int32_t as_dest_reg
;
2251 int32_t as_format_reg
;
2252 int32_t as_local_timer
;
2253 int32_t as_pcint_vect
;
2254 int32_t as_int_vect0
;
2255 int32_t as_int_vect1
;
2256 int32_t as_err_vect
;
2257 int32_t as_init_count
;
2258 int32_t as_divide_reg
;
2259 int32_t as_spur_int_reg
;
2260 uint32_t as_ioapic_ids
[MAX_IO_APIC
];
2265 apic_acpi_enter_apicmode(void)
2267 ACPI_OBJECT_LIST arglist
;
2271 /* Setup parameter object */
2273 arglist
.Pointer
= &arg
;
2274 arg
.Type
= ACPI_TYPE_INTEGER
;
2275 arg
.Integer
.Value
= ACPI_APIC_MODE
;
2277 status
= AcpiEvaluateObject(NULL
, "\\_PIC", &arglist
, NULL
);
2279 * Per ACPI spec - section 5.8.1 _PIC Method
2280 * calling the \_PIC control method is optional for the OS
2281 * and might not be found. It's ok to not fail in such cases.
2282 * This is the case on linux KVM and qemu (status AE_NOT_FOUND)
2284 if (ACPI_FAILURE(status
) && (status
!= AE_NOT_FOUND
)) {
2286 "!apic: Reporting APIC mode failed (via _PIC), err: 0x%x",
2287 ACPI_FAILURE(status
));
2288 return (PSM_FAILURE
);
2290 return (PSM_SUCCESS
);
2296 apic_save_state(struct apic_state
*sp
)
2301 PMD(PMD_SX
, ("apic_save_state %p\n", (void *)sp
))
2303 * First the local APIC.
2305 sp
->as_task_reg
= apic_reg_ops
->apic_get_pri();
2306 sp
->as_dest_reg
= apic_reg_ops
->apic_read(APIC_DEST_REG
);
2307 if (apic_mode
== LOCAL_APIC
)
2308 sp
->as_format_reg
= apic_reg_ops
->apic_read(APIC_FORMAT_REG
);
2309 sp
->as_local_timer
= apic_reg_ops
->apic_read(APIC_LOCAL_TIMER
);
2310 sp
->as_pcint_vect
= apic_reg_ops
->apic_read(APIC_PCINT_VECT
);
2311 sp
->as_int_vect0
= apic_reg_ops
->apic_read(APIC_INT_VECT0
);
2312 sp
->as_int_vect1
= apic_reg_ops
->apic_read(APIC_INT_VECT1
);
2313 sp
->as_err_vect
= apic_reg_ops
->apic_read(APIC_ERR_VECT
);
2314 sp
->as_init_count
= apic_reg_ops
->apic_read(APIC_INIT_COUNT
);
2315 sp
->as_divide_reg
= apic_reg_ops
->apic_read(APIC_DIVIDE_REG
);
2316 sp
->as_spur_int_reg
= apic_reg_ops
->apic_read(APIC_SPUR_INT_REG
);
2319 * If on the boot processor then save the IOAPICs' IDs
2321 if ((cpuid
= psm_get_cpu_id()) == 0) {
2323 iflag
= intr_clear();
2324 lock_set(&apic_ioapic_lock
);
2326 for (i
= 0; i
< apic_io_max
; i
++)
2327 sp
->as_ioapic_ids
[i
] = ioapic_read(i
, APIC_ID_CMD
);
2329 lock_clear(&apic_ioapic_lock
);
2330 intr_restore(iflag
);
2333 /* apic_state() is currently invoked only in Suspend/Resume */
2334 apic_cpus
[cpuid
].aci_status
|= APIC_CPU_SUSPEND
;
2338 apic_restore_state(struct apic_state
*sp
)
2344 * First the local APIC.
2346 apic_reg_ops
->apic_write_task_reg(sp
->as_task_reg
);
2347 if (apic_mode
== LOCAL_APIC
) {
2348 apic_reg_ops
->apic_write(APIC_DEST_REG
, sp
->as_dest_reg
);
2349 apic_reg_ops
->apic_write(APIC_FORMAT_REG
, sp
->as_format_reg
);
2351 apic_reg_ops
->apic_write(APIC_LOCAL_TIMER
, sp
->as_local_timer
);
2352 apic_reg_ops
->apic_write(APIC_PCINT_VECT
, sp
->as_pcint_vect
);
2353 apic_reg_ops
->apic_write(APIC_INT_VECT0
, sp
->as_int_vect0
);
2354 apic_reg_ops
->apic_write(APIC_INT_VECT1
, sp
->as_int_vect1
);
2355 apic_reg_ops
->apic_write(APIC_ERR_VECT
, sp
->as_err_vect
);
2356 apic_reg_ops
->apic_write(APIC_INIT_COUNT
, sp
->as_init_count
);
2357 apic_reg_ops
->apic_write(APIC_DIVIDE_REG
, sp
->as_divide_reg
);
2358 apic_reg_ops
->apic_write(APIC_SPUR_INT_REG
, sp
->as_spur_int_reg
);
2361 * the following only needs to be done once, so we do it on the
2362 * boot processor, since we know that we only have one of those
2364 if (psm_get_cpu_id() == 0) {
2366 iflag
= intr_clear();
2367 lock_set(&apic_ioapic_lock
);
2369 /* Restore IOAPICs' APIC IDs */
2370 for (i
= 0; i
< apic_io_max
; i
++) {
2371 ioapic_write(i
, APIC_ID_CMD
, sp
->as_ioapic_ids
[i
]);
2374 lock_clear(&apic_ioapic_lock
);
2375 intr_restore(iflag
);
2378 * Reenter APIC mode before restoring LNK devices
2380 (void) apic_acpi_enter_apicmode();
2383 * restore acpi link device mappings
2385 acpi_restore_link_devices();
2390 * Returns 0 on success
2393 apic_state(psm_state_request_t
*rp
)
2395 PMD(PMD_SX
, ("apic_state "))
2396 switch (rp
->psr_cmd
) {
2397 case PSM_STATE_ALLOC
:
2398 rp
->req
.psm_state_req
.psr_state
=
2399 kmem_zalloc(sizeof (struct apic_state
), KM_NOSLEEP
);
2400 if (rp
->req
.psm_state_req
.psr_state
== NULL
)
2402 rp
->req
.psm_state_req
.psr_state_size
=
2403 sizeof (struct apic_state
);
2404 PMD(PMD_SX
, (":STATE_ALLOC: state %p, size %lx\n",
2405 rp
->req
.psm_state_req
.psr_state
,
2406 rp
->req
.psm_state_req
.psr_state_size
))
2409 case PSM_STATE_FREE
:
2410 kmem_free(rp
->req
.psm_state_req
.psr_state
,
2411 rp
->req
.psm_state_req
.psr_state_size
);
2412 PMD(PMD_SX
, (" STATE_FREE: state %p, size %lx\n",
2413 rp
->req
.psm_state_req
.psr_state
,
2414 rp
->req
.psm_state_req
.psr_state_size
))
2417 case PSM_STATE_SAVE
:
2418 PMD(PMD_SX
, (" STATE_SAVE: state %p, size %lx\n",
2419 rp
->req
.psm_state_req
.psr_state
,
2420 rp
->req
.psm_state_req
.psr_state_size
))
2421 apic_save_state(rp
->req
.psm_state_req
.psr_state
);
2424 case PSM_STATE_RESTORE
:
2425 apic_restore_state(rp
->req
.psm_state_req
.psr_state
);
2426 PMD(PMD_SX
, (" STATE_RESTORE: state %p, size %lx\n",
2427 rp
->req
.psm_state_req
.psr_state
,
2428 rp
->req
.psm_state_req
.psr_state_size
))