4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
30 * Copyright (c) 2017, Joyent, Inc. All rights reserved.
34 * To understand how the pcplusmp module interacts with the interrupt subsystem
35 * read the theory statement in uts/i86pc/os/intr.c.
39 * PSMI 1.1 extensions are supported only in 2.6 and later versions.
40 * PSMI 1.2 extensions are supported only in 2.7 and later versions.
41 * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
42 * PSMI 1.5 extensions are supported in Solaris Nevada.
43 * PSMI 1.6 extensions are supported in Solaris Nevada.
44 * PSMI 1.7 extensions are supported in Solaris Nevada.
48 #include <sys/processor.h>
51 #include <sys/smp_impldefs.h>
53 #include <sys/acpi/acpi.h>
54 #include <sys/acpica.h>
55 #include <sys/psm_common.h>
59 #include <sys/sunddi.h>
60 #include <sys/ddi_impldefs.h>
62 #include <sys/promif.h>
63 #include <sys/x86_archext.h>
64 #include <sys/cpc_impl.h>
65 #include <sys/uadmin.h>
66 #include <sys/panic.h>
67 #include <sys/debug.h>
68 #include <sys/archsystm.h>
70 #include <sys/machsystm.h>
71 #include <sys/sysmacros.h>
72 #include <sys/cpuvar.h>
73 #include <sys/rm_platter.h>
74 #include <sys/privregs.h>
76 #include <sys/pci_intr_lib.h>
78 #include <sys/clock.h>
79 #include <sys/cyclic.h>
80 #include <sys/dditypes.h>
81 #include <sys/sunddi.h>
82 #include <sys/x_call.h>
83 #include <sys/reboot.h>
85 #include <sys/apic_common.h>
86 #include <sys/apic_timer.h>
89 * Local Function Prototypes
91 static void apic_init_intr(void);
96 static int apic_probe(void);
97 static int apic_getclkirq(int ipl
);
98 static void apic_init(void);
99 static void apic_picinit(void);
100 static int apic_post_cpu_start(void);
101 static int apic_intr_enter(int ipl
, int *vect
);
102 static void apic_setspl(int ipl
);
103 static void x2apic_setspl(int ipl
);
104 static int apic_addspl(int ipl
, int vector
, int min_ipl
, int max_ipl
);
105 static int apic_delspl(int ipl
, int vector
, int min_ipl
, int max_ipl
);
106 static int apic_disable_intr(processorid_t cpun
);
107 static void apic_enable_intr(processorid_t cpun
);
108 static int apic_get_ipivect(int ipl
, int type
);
109 static void apic_post_cyclic_setup(void *arg
);
112 * The following vector assignments influence the value of ipltopri and
113 * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program
114 * idle to 0 and IPL 0 to 0xf to differentiate idle in case
115 * we care to do so in future. Note some IPLs which are rarely used
116 * will share the vector ranges and heavily used IPLs (5 and 6) have
119 * This array is used to initialize apic_ipls[] (in apic_init()).
121 * IPL Vector range. as passed to intr_enter
123 * 1,2,3 0x20-0x2f 0x0-0xf
124 * 4 0x30-0x3f 0x10-0x1f
125 * 5 0x40-0x5f 0x20-0x3f
126 * 6 0x60-0x7f 0x40-0x5f
127 * 7,8,9 0x80-0x8f 0x60-0x6f
128 * 10 0x90-0x9f 0x70-0x7f
129 * 11 0xa0-0xaf 0x80-0x8f
131 * 15 0xe0-0xef 0xc0-0xcf
132 * 15 0xf0-0xff 0xd0-0xdf
134 uchar_t apic_vectortoipl
[APIC_AVAIL_VECTOR
/ APIC_VECTOR_PER_IPL
] = {
135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
139 * NOTE that this is vector as passed into intr_enter which is
140 * programmed vector - 0x20 (APIC_BASE_VECT)
143 uchar_t apic_ipltopri
[MAXIPL
+ 1]; /* unix ipl to apic pri */
144 /* The taskpri to be programmed into apic to mask given ipl */
147 * Correlation of the hardware vector to the IPL in use, initialized
148 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
149 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
150 * connected to errata-stricken IOAPICs
152 uchar_t apic_ipls
[APIC_AVAIL_VECTOR
];
155 * Patchable global variables.
157 int apic_enable_hwsoftint
= 0; /* 0 - disable, 1 - enable */
158 int apic_enable_bind_log
= 1; /* 1 - display interrupt binding log */
163 static struct psm_ops apic_ops
= {
175 (int (*)(int))NULL
, /* psm_softlvl_to_irq */
176 (void (*)(int))NULL
, /* psm_set_softintr */
183 (void (*)(void))NULL
, /* psm_hrtimeinit */
186 apic_get_next_processorid
,
193 (int (*)(dev_info_t
*, int))NULL
, /* psm_translate_irq */
194 (void (*)(int, char *))NULL
, /* psm_notify_error */
195 (void (*)(int))NULL
, /* psm_notify_func */
196 apic_timer_reprogram
,
199 apic_post_cyclic_setup
,
201 apic_intr_ops
, /* Advanced DDI Interrupt framework */
202 apic_state
, /* save, restore apic state for S3 */
203 apic_cpu_ops
, /* CPU control interface. */
206 struct psm_ops
*psmops
= &apic_ops
;
208 static struct psm_info apic_psm_info
= {
209 PSM_INFO_VER01_7
, /* version */
210 PSM_OWN_EXCLUSIVE
, /* ownership */
211 (struct psm_ops
*)&apic_ops
, /* operation */
212 APIC_PCPLUSMP_NAME
, /* machine name */
213 "pcplusmp v1.4 compatible",
216 static void *apic_hdlp
;
218 /* to gather intr data and redistribute */
219 static void apic_redistribute_compute(void);
222 * This is the loadable module wrapper
228 if (apic_coarse_hrtime
)
229 apic_ops
.psm_gethrtime
= &apic_gettime
;
230 return (psm_mod_init(&apic_hdlp
, &apic_psm_info
));
236 return (psm_mod_fini(&apic_hdlp
, &apic_psm_info
));
240 _info(struct modinfo
*modinfop
)
242 return (psm_mod_info(&apic_hdlp
, &apic_psm_info
, modinfop
));
248 /* check if apix is initialized */
249 if (apix_enable
&& apix_loaded())
250 return (PSM_FAILURE
);
253 * Check whether x2APIC mode was activated by BIOS. We don't support
254 * that in pcplusmp as apix normally handles that.
256 if (apic_local_mode() == LOCAL_X2APIC
)
257 return (PSM_FAILURE
);
259 /* continue using pcplusmp PSM */
262 return (apic_probe_common(apic_psm_info
.p_mach_idstring
));
266 apic_xlate_vector_by_irq(uchar_t irq
)
268 if (apic_irq_table
[irq
] == NULL
)
271 return (apic_irq_table
[irq
]->airq_vector
);
280 psm_get_ioapicid
= apic_get_ioapicid
;
281 psm_get_localapicid
= apic_get_localapicid
;
282 psm_xlate_vector_by_irq
= apic_xlate_vector_by_irq
;
284 apic_ipltopri
[0] = APIC_VECTOR_PER_IPL
; /* leave 0 for idle */
285 for (i
= 0; i
< (APIC_AVAIL_VECTOR
/ APIC_VECTOR_PER_IPL
); i
++) {
286 if ((i
< ((APIC_AVAIL_VECTOR
/ APIC_VECTOR_PER_IPL
) - 1)) &&
287 (apic_vectortoipl
[i
+ 1] == apic_vectortoipl
[i
]))
288 /* get to highest vector at the same ipl */
290 for (; j
<= apic_vectortoipl
[i
]; j
++) {
291 apic_ipltopri
[j
] = (i
<< APIC_IPL_SHIFT
) +
295 for (; j
< MAXIPL
+ 1; j
++)
296 /* fill up any empty ipltopri slots */
297 apic_ipltopri
[j
] = (i
<< APIC_IPL_SHIFT
) + APIC_BASE_VECT
;
300 #if !defined(__amd64)
301 if (cpuid_have_cr8access(CPU
))
302 apic_have_32bit_cr8
= 1;
309 processorid_t cpun
= psm_get_cpu_id();
311 uint32_t svr
= AV_UNIT_ENABLE
| APIC_SPUR_INTR
;
313 apic_reg_ops
->apic_write_task_reg(APIC_MASK_ALL
);
315 if (apic_mode
== LOCAL_APIC
) {
317 * We are running APIC in MMIO mode.
319 if (apic_flat_model
) {
320 apic_reg_ops
->apic_write(APIC_FORMAT_REG
,
323 apic_reg_ops
->apic_write(APIC_FORMAT_REG
,
327 apic_reg_ops
->apic_write(APIC_DEST_REG
,
328 AV_HIGH_ORDER
>> cpun
);
331 if (apic_directed_EOI_supported()) {
333 * Setting the 12th bit in the Spurious Interrupt Vector
334 * Register suppresses broadcast EOIs generated by the local
335 * APIC. The suppression of broadcast EOIs happens only when
336 * interrupts are level-triggered.
338 svr
|= APIC_SVR_SUPPRESS_BROADCAST_EOI
;
341 /* need to enable APIC before unmasking NMI */
342 apic_reg_ops
->apic_write(APIC_SPUR_INT_REG
, svr
);
345 * Presence of an invalid vector with delivery mode AV_FIXED can
346 * cause an error interrupt, even if the entry is masked...so
347 * write a valid vector to LVT entries along with the mask bit
350 /* All APICs have timer and LINT0/1 */
351 apic_reg_ops
->apic_write(APIC_LOCAL_TIMER
, AV_MASK
|APIC_RESV_IRQ
);
352 apic_reg_ops
->apic_write(APIC_INT_VECT0
, AV_MASK
|APIC_RESV_IRQ
);
353 apic_reg_ops
->apic_write(APIC_INT_VECT1
, AV_NMI
); /* enable NMI */
356 * On integrated APICs, the number of LVT entries is
357 * 'Max LVT entry' + 1; on 82489DX's (non-integrated
358 * APICs), nlvt is "3" (LINT0, LINT1, and timer)
361 if (apic_cpus
[cpun
].aci_local_ver
< APIC_INTEGRATED_VERS
) {
364 nlvt
= ((apic_reg_ops
->apic_read(APIC_VERS_REG
) >> 16) &
369 /* Enable performance counter overflow interrupt */
371 if (!is_x86_feature(x86_featureset
, X86FSET_MSR
))
372 apic_enable_cpcovf_intr
= 0;
373 if (apic_enable_cpcovf_intr
) {
374 if (apic_cpcovf_vect
== 0) {
375 int ipl
= APIC_PCINT_IPL
;
376 int irq
= apic_get_ipivect(ipl
, -1);
380 apic_irq_table
[irq
]->airq_vector
;
381 ASSERT(apic_cpcovf_vect
);
382 (void) add_avintr(NULL
, ipl
,
383 (avfunc
)kcpc_hw_overflow_intr
,
384 "apic pcint", irq
, NULL
, NULL
, NULL
, NULL
);
385 kcpc_hw_overflow_intr_installed
= 1;
386 kcpc_hw_enable_cpc_intr
=
387 apic_cpcovf_mask_clear
;
389 apic_reg_ops
->apic_write(APIC_PCINT_VECT
,
395 /* Only mask TM intr if the BIOS apparently doesn't use it */
399 lvtval
= apic_reg_ops
->apic_read(APIC_THERM_VECT
);
400 if (((lvtval
& AV_MASK
) == AV_MASK
) ||
401 ((lvtval
& AV_DELIV_MODE
) != AV_SMI
)) {
402 apic_reg_ops
->apic_write(APIC_THERM_VECT
,
403 AV_MASK
|APIC_RESV_IRQ
);
407 /* Enable error interrupt */
409 if (nlvt
>= 4 && apic_enable_error_intr
) {
410 if (apic_errvect
== 0) {
411 int ipl
= 0xf; /* get highest priority intr */
412 int irq
= apic_get_ipivect(ipl
, -1);
415 apic_errvect
= apic_irq_table
[irq
]->airq_vector
;
416 ASSERT(apic_errvect
);
418 * Not PSMI compliant, but we are going to merge
421 (void) add_avintr(NULL
, ipl
,
422 (avfunc
)apic_error_intr
, "apic error intr",
423 irq
, NULL
, NULL
, NULL
, NULL
);
425 apic_reg_ops
->apic_write(APIC_ERR_VECT
, apic_errvect
);
426 apic_reg_ops
->apic_write(APIC_ERROR_STATUS
, 0);
427 apic_reg_ops
->apic_write(APIC_ERROR_STATUS
, 0);
430 /* Enable CMCI interrupt */
431 if (cmi_enable_cmci
) {
433 mutex_enter(&cmci_cpu_setup_lock
);
434 if (cmci_cpu_setup_registered
== 0) {
435 mutex_enter(&cpu_lock
);
436 register_cpu_setup_func(cmci_cpu_setup
, NULL
);
437 mutex_exit(&cpu_lock
);
438 cmci_cpu_setup_registered
= 1;
440 mutex_exit(&cmci_cpu_setup_lock
);
442 if (apic_cmci_vect
== 0) {
444 int irq
= apic_get_ipivect(ipl
, -1);
447 apic_cmci_vect
= apic_irq_table
[irq
]->airq_vector
;
448 ASSERT(apic_cmci_vect
);
450 (void) add_avintr(NULL
, ipl
,
451 (avfunc
)cmi_cmci_trap
,
452 "apic cmci intr", irq
, NULL
, NULL
, NULL
, NULL
);
454 apic_reg_ops
->apic_write(APIC_CMCI_VECT
, apic_cmci_vect
);
465 * Initialize and enable interrupt remapping before apic
466 * hardware initialization
468 apic_intrmap_init(apic_mode
);
471 * On UniSys Model 6520, the BIOS leaves vector 0x20 isr
472 * bit on without clearing it with EOI. Since softint
473 * uses vector 0x20 to interrupt itself, so softint will
474 * not work on this machine. In order to fix this problem
475 * a check is made to verify all the isr bits are clear.
476 * If not, EOIs are issued to clear the bits.
478 for (i
= 7; i
>= 1; i
--) {
479 isr
= apic_reg_ops
->apic_read(APIC_ISR_REG
+ (i
* 4));
481 for (j
= 0; ((j
< 32) && (isr
!= 0)); j
++)
482 if (isr
& (1 << j
)) {
483 apic_reg_ops
->apic_write(
486 apic_error
|= APIC_ERR_BOOT_EOI
;
490 /* set a flag so we know we have run apic_picinit() */
491 apic_picinit_called
= 1;
492 LOCK_INIT_CLEAR(&apic_gethrtime_lock
);
493 LOCK_INIT_CLEAR(&apic_ioapic_lock
);
494 LOCK_INIT_CLEAR(&apic_error_lock
);
495 LOCK_INIT_CLEAR(&apic_mode_switch_lock
);
497 picsetup(); /* initialise the 8259 */
499 /* add nmi handler - least priority nmi handler */
500 LOCK_INIT_CLEAR(&apic_nmi_lock
);
502 if (!psm_add_nmintr(0, (avfunc
) apic_nmi_intr
,
503 "pcplusmp NMI handler", NULL
))
504 cmn_err(CE_WARN
, "pcplusmp: Unable to add nmi handler");
507 * Check for directed-EOI capability in the local APIC.
509 if (apic_directed_EOI_supported() == 1) {
510 apic_set_directed_EOI_handler();
515 /* enable apic mode if imcr present */
517 outb(APIC_IMCR_P1
, (uchar_t
)APIC_IMCR_SELECT
);
518 outb(APIC_IMCR_P2
, (uchar_t
)APIC_IMCR_APIC
);
521 ioapic_init_intr(IOAPIC_MASK
);
532 * platform_intr_enter
534 * Called at the beginning of the interrupt service routine to
535 * mask all level equal to and below the interrupt priority
536 * of the interrupting vector. An EOI should be given to
537 * the interrupt controller to enable other HW interrupts.
539 * Return -1 for spurious interrupts
544 apic_intr_enter(int ipl
, int *vectorp
)
550 apic_cpus_info_t
*cpu_infop
;
553 * The real vector delivered is (*vectorp + 0x20), but our caller
554 * subtracts 0x20 from the vector before passing it to us.
555 * (That's why APIC_BASE_VECT is 0x20.)
557 vector
= (uchar_t
)*vectorp
;
559 /* if interrupted by the clock, increment apic_nsec_since_boot */
560 if (vector
== apic_clkvect
) {
562 /* NOTE: this is not MT aware */
564 apic_nsec_since_boot
+= apic_nsec_per_intr
;
566 last_count_read
= apic_hertz_count
;
567 apic_redistribute_compute();
570 /* We will avoid all the book keeping overhead for clock */
571 nipl
= apic_ipls
[vector
];
573 *vectorp
= apic_vector_to_irq
[vector
+ APIC_BASE_VECT
];
575 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[nipl
]);
576 apic_reg_ops
->apic_send_eoi(0);
581 cpu_infop
= &apic_cpus
[psm_get_cpu_id()];
583 if (vector
== (APIC_SPUR_INTR
- APIC_BASE_VECT
)) {
584 cpu_infop
->aci_spur_cnt
++;
585 return (APIC_INT_SPURIOUS
);
588 /* Check if the vector we got is really what we need */
589 if (apic_revector_pending
) {
591 * Disable interrupts for the duration of
592 * the vector translation to prevent a self-race for
593 * the apic_revector_lock. This cannot be done
594 * in apic_xlate_vector because it is recursive and
595 * we want the vector translation to be atomic with
596 * respect to other (higher-priority) interrupts.
598 iflag
= intr_clear();
599 vector
= apic_xlate_vector(vector
+ APIC_BASE_VECT
) -
604 nipl
= apic_ipls
[vector
];
605 *vectorp
= irq
= apic_vector_to_irq
[vector
+ APIC_BASE_VECT
];
607 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[nipl
]);
609 cpu_infop
->aci_current
[nipl
] = (uchar_t
)irq
;
610 cpu_infop
->aci_curipl
= (uchar_t
)nipl
;
611 cpu_infop
->aci_ISR_in_progress
|= 1 << nipl
;
614 * apic_level_intr could have been assimilated into the irq struct.
615 * but, having it as a character array is more efficient in terms of
616 * cache usage. So, we leave it as is.
618 if (!apic_level_intr
[irq
]) {
619 apic_reg_ops
->apic_send_eoi(0);
623 APIC_DEBUG_BUF_PUT(vector
);
624 APIC_DEBUG_BUF_PUT(irq
);
625 APIC_DEBUG_BUF_PUT(nipl
);
626 APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
627 if ((apic_stretch_interrupts
) && (apic_stretch_ISR
& (1 << nipl
)))
628 drv_usecwait(apic_stretch_interrupts
);
630 if (apic_break_on_cpu
== psm_get_cpu_id())
637 * This macro is a common code used by MMIO local apic and X2APIC
640 #define APIC_INTR_EXIT() \
642 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
643 if (apic_level_intr[irq]) \
644 apic_reg_ops->apic_send_eoi(irq); \
645 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
646 /* ISR above current pri could not be in progress */ \
647 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
651 * Any changes made to this function must also change X2APIC
652 * version of intr_exit.
655 apic_intr_exit(int prev_ipl
, int irq
)
657 apic_cpus_info_t
*cpu_infop
;
659 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[prev_ipl
]);
665 * Same as apic_intr_exit() except it uses MSR rather than MMIO
666 * to access local apic registers.
669 x2apic_intr_exit(int prev_ipl
, int irq
)
671 apic_cpus_info_t
*cpu_infop
;
673 X2APIC_WRITE(APIC_TASK_REG
, apic_ipltopri
[prev_ipl
]);
678 psm_intr_exit_fn(void)
680 if (apic_mode
== LOCAL_X2APIC
)
681 return (x2apic_intr_exit
);
683 return (apic_intr_exit
);
687 * Mask all interrupts below or equal to the given IPL.
688 * Any changes made to this function must also change X2APIC
694 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[ipl
]);
696 /* interrupts at ipl above this cannot be in progress */
697 apic_cpus
[psm_get_cpu_id()].aci_ISR_in_progress
&= (2 << ipl
) - 1;
699 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
700 * have enough time to come in before the priority is raised again
701 * during the idle() loop.
703 if (apic_setspl_delay
)
704 (void) apic_reg_ops
->apic_get_pri();
708 * X2APIC version of setspl.
709 * Mask all interrupts below or equal to the given IPL
712 x2apic_setspl(int ipl
)
714 X2APIC_WRITE(APIC_TASK_REG
, apic_ipltopri
[ipl
]);
716 /* interrupts at ipl above this cannot be in progress */
717 apic_cpus
[psm_get_cpu_id()].aci_ISR_in_progress
&= (2 << ipl
) - 1;
722 apic_addspl(int irqno
, int ipl
, int min_ipl
, int max_ipl
)
724 return (apic_addspl_common(irqno
, ipl
, min_ipl
, max_ipl
));
728 apic_delspl(int irqno
, int ipl
, int min_ipl
, int max_ipl
)
730 return (apic_delspl_common(irqno
, ipl
, min_ipl
, max_ipl
));
734 apic_post_cpu_start(void)
737 static int cpus_started
= 1;
739 /* We know this CPU + BSP started successfully. */
743 * On BSP we would have enabled X2APIC, if supported by processor,
744 * in acpi_probe(), but on AP we do it here.
746 * We enable X2APIC mode only if BSP is running in X2APIC & the
747 * local APIC mode of the current CPU is MMIO (xAPIC).
749 if (apic_mode
== LOCAL_X2APIC
&& apic_detect_x2apic() &&
750 apic_local_mode() == LOCAL_APIC
) {
751 apic_enable_x2apic();
755 * Switch back to x2apic IPI sending method for performance when target
756 * CPU has entered x2apic mode.
758 if (apic_mode
== LOCAL_X2APIC
) {
759 apic_switch_ipi_callback(B_FALSE
);
762 splx(ipltospl(LOCK_LEVEL
));
766 * since some systems don't enable the internal cache on the non-boot
767 * cpus, so we have to enable them here
769 setcr0(getcr0() & ~(CR0_CD
| CR0_NW
));
772 APIC_AV_PENDING_SET();
774 if (apic_mode
== LOCAL_APIC
)
775 APIC_AV_PENDING_SET();
779 * We may be booting, or resuming from suspend; aci_status will
780 * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
781 * APIC_CPU_ONLINE flag here rather than setting aci_status completely.
783 cpun
= psm_get_cpu_id();
784 apic_cpus
[cpun
].aci_status
|= APIC_CPU_ONLINE
;
786 apic_reg_ops
->apic_write(APIC_DIVIDE_REG
, apic_divide_reg_init
);
787 return (PSM_SUCCESS
);
791 * type == -1 indicates it is an internal request. Do not change
792 * resv_vector for these requests
795 apic_get_ipivect(int ipl
, int type
)
800 if ((irq
= apic_allocate_irq(APIC_VECTOR(ipl
))) != -1) {
801 if ((vector
= apic_allocate_vector(ipl
, irq
, 1))) {
802 apic_irq_table
[irq
]->airq_mps_intr_index
=
804 apic_irq_table
[irq
]->airq_vector
= vector
;
806 apic_resv_vector
[ipl
] = vector
;
811 apic_error
|= APIC_ERR_GET_IPIVECT_FAIL
;
812 return (-1); /* shouldn't happen */
816 apic_getclkirq(int ipl
)
820 if ((irq
= apic_get_ipivect(ipl
, -1)) == -1)
823 * Note the vector in apic_clkvect for per clock handling.
825 apic_clkvect
= apic_irq_table
[irq
]->airq_vector
- APIC_BASE_VECT
;
826 APIC_VERBOSE_IOAPIC((CE_NOTE
, "get_clkirq: vector = %x\n",
832 * Try and disable all interrupts. We just assign interrupts to other
833 * processors based on policy. If any were bound by user request, we
834 * let them continue and return failure. We do not bother to check
835 * for cache affinity while rebinding.
839 apic_disable_intr(processorid_t cpun
)
841 int bind_cpu
= 0, i
, hardbound
= 0;
845 iflag
= intr_clear();
846 lock_set(&apic_ioapic_lock
);
848 for (i
= 0; i
<= APIC_MAX_VECTOR
; i
++) {
849 if (apic_reprogram_info
[i
].done
== B_FALSE
) {
850 if (apic_reprogram_info
[i
].bindcpu
== cpun
) {
852 * CPU is busy -- it's the target of
853 * a pending reprogramming attempt
855 lock_clear(&apic_ioapic_lock
);
857 return (PSM_FAILURE
);
862 apic_cpus
[cpun
].aci_status
&= ~APIC_CPU_INTR_ENABLE
;
864 apic_cpus
[cpun
].aci_curipl
= 0;
866 i
= apic_min_device_irq
;
867 for (; i
<= apic_max_device_irq
; i
++) {
869 * If there are bound interrupts on this cpu, then
870 * rebind them to other processors.
872 if ((irq_ptr
= apic_irq_table
[i
]) != NULL
) {
873 ASSERT((irq_ptr
->airq_temp_cpu
== IRQ_UNBOUND
) ||
874 (irq_ptr
->airq_temp_cpu
== IRQ_UNINIT
) ||
875 (apic_cpu_in_range(irq_ptr
->airq_temp_cpu
)));
877 if (irq_ptr
->airq_temp_cpu
== (cpun
| IRQ_USER_BOUND
)) {
882 if (irq_ptr
->airq_temp_cpu
== cpun
) {
885 apic_find_cpu(APIC_CPU_INTR_ENABLE
);
886 } while (apic_rebind_all(irq_ptr
, bind_cpu
));
891 lock_clear(&apic_ioapic_lock
);
895 cmn_err(CE_WARN
, "Could not disable interrupts on %d"
896 "due to user bound interrupts", cpun
);
897 return (PSM_FAILURE
);
900 return (PSM_SUCCESS
);
904 * Bind interrupts to the CPU's local APIC.
905 * Interrupts should not be bound to a CPU's local APIC until the CPU
906 * is ready to receive interrupts.
909 apic_enable_intr(processorid_t cpun
)
915 iflag
= intr_clear();
916 lock_set(&apic_ioapic_lock
);
918 apic_cpus
[cpun
].aci_status
|= APIC_CPU_INTR_ENABLE
;
920 i
= apic_min_device_irq
;
921 for (i
= apic_min_device_irq
; i
<= apic_max_device_irq
; i
++) {
922 if ((irq_ptr
= apic_irq_table
[i
]) != NULL
) {
923 if ((irq_ptr
->airq_cpu
& ~IRQ_USER_BOUND
) == cpun
) {
924 (void) apic_rebind_all(irq_ptr
,
930 if (apic_cpus
[cpun
].aci_status
& APIC_CPU_SUSPEND
)
931 apic_cpus
[cpun
].aci_status
&= ~APIC_CPU_SUSPEND
;
933 lock_clear(&apic_ioapic_lock
);
938 * If this module needs a periodic handler for the interrupt distribution, it
939 * can be added here. The argument to the periodic handler is not currently
940 * used, but is reserved for future.
943 apic_post_cyclic_setup(void *arg
)
945 _NOTE(ARGUNUSED(arg
))
950 /* cpu_lock is held */
951 /* set up a periodic handler for intr redistribution */
954 * In peridoc mode intr redistribution processing is done in
955 * apic_intr_enter during clk intr processing
961 * Register a periodical handler for the redistribution processing.
962 * Though we would generally prefer to use the DDI interface for
963 * periodic handler invocation, ddi_periodic_add(9F), we are
964 * unfortunately already holding cpu_lock, which ddi_periodic_add will
965 * attempt to take for us. Thus, we add our own cyclic directly:
967 cyh
.cyh_func
= (void (*)(void *))apic_redistribute_compute
;
969 cyh
.cyh_level
= CY_LOW_LEVEL
;
972 cyt
.cyt_interval
= apic_redistribute_sample_interval
;
974 apic_cyclic_id
= cyclic_add(&cyh
, &cyt
);
978 apic_redistribute_compute(void)
982 if (apic_enable_dynamic_migration
) {
983 if (++apic_nticks
== apic_sample_factor_redistribution
) {
985 * Time to call apic_intr_redistribute().
986 * reset apic_nticks. This will cause max_busy
987 * to be calculated below and if it is more than
988 * apic_int_busy, we will do the whole thing
993 for (i
= 0; i
< apic_nproc
; i
++) {
994 if (!apic_cpu_in_range(i
))
998 * Check if curipl is non zero & if ISR is in
1001 if (((j
= apic_cpus
[i
].aci_curipl
) != 0) &&
1002 (apic_cpus
[i
].aci_ISR_in_progress
& (1 << j
))) {
1005 apic_cpus
[i
].aci_busy
++;
1006 irq
= apic_cpus
[i
].aci_current
[j
];
1007 apic_irq_table
[irq
]->airq_busy
++;
1011 (apic_cpus
[i
].aci_busy
> max_busy
))
1012 max_busy
= apic_cpus
[i
].aci_busy
;
1015 if (max_busy
> apic_int_busy_mark
) {
1017 * We could make the following check be
1018 * skipped > 1 in which case, we get a
1019 * redistribution at half the busy mark (due to
1020 * double interval). Need to be able to collect
1021 * more empirical data to decide if that is a
1022 * good strategy. Punt for now.
1024 if (apic_skipped_redistribute
) {
1025 apic_cleanup_busy();
1026 apic_skipped_redistribute
= 0;
1028 apic_intr_redistribute();
1031 apic_skipped_redistribute
++;
1038 * The following functions are in the platform specific file so that they
1039 * can be different functions depending on whether we are running on
1040 * bare metal or a hypervisor.
1044 * Check to make sure there are enough irq slots
1047 apic_check_free_irqs(int count
)
1052 for (i
= APIC_FIRST_FREE_IRQ
; i
< APIC_RESV_IRQ
; i
++) {
1053 if ((apic_irq_table
[i
] == NULL
) ||
1054 apic_irq_table
[i
]->airq_mps_intr_index
== FREE_INDEX
) {
1055 if (++avail
>= count
)
1056 return (PSM_SUCCESS
);
1059 return (PSM_FAILURE
);
1063 * This function allocates "count" MSI vector(s) for the given "dip/pri/type"
1066 apic_alloc_msi_vectors(dev_info_t
*dip
, int inum
, int count
, int pri
,
1070 uchar_t start
, irqno
;
1075 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: dip=0x%p "
1076 "inum=0x%x pri=0x%x count=0x%x behavior=%d\n",
1077 (void *)dip
, inum
, pri
, count
, behavior
));
1080 if (behavior
== DDI_INTR_ALLOC_STRICT
&&
1081 apic_multi_msi_enable
== 0)
1083 if (apic_multi_msi_enable
== 0)
1087 if ((rcount
= apic_navail_vector(dip
, pri
)) > count
)
1089 else if (rcount
== 0 || (rcount
< count
&&
1090 behavior
== DDI_INTR_ALLOC_STRICT
))
1093 /* if not ISP2, then round it down */
1095 rcount
= 1 << (highbit(rcount
) - 1);
1097 mutex_enter(&airq_mutex
);
1099 for (start
= 0; rcount
> 0; rcount
>>= 1) {
1100 if ((start
= apic_find_multi_vectors(pri
, rcount
)) != 0 ||
1101 behavior
== DDI_INTR_ALLOC_STRICT
)
1106 /* no vector available */
1107 mutex_exit(&airq_mutex
);
1111 if (apic_check_free_irqs(rcount
) == PSM_FAILURE
) {
1112 /* not enough free irq slots available */
1113 mutex_exit(&airq_mutex
);
1117 major
= (dip
!= NULL
) ? ddi_driver_major(dip
) : 0;
1118 for (i
= 0; i
< rcount
; i
++) {
1119 if ((irqno
= apic_allocate_irq(apic_first_avail_irq
)) ==
1122 * shouldn't happen because of the
1123 * apic_check_free_irqs() check earlier
1125 mutex_exit(&airq_mutex
);
1126 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: "
1127 "apic_allocate_irq failed\n"));
1130 apic_max_device_irq
= max(irqno
, apic_max_device_irq
);
1131 apic_min_device_irq
= min(irqno
, apic_min_device_irq
);
1132 irqptr
= apic_irq_table
[irqno
];
1134 if (apic_vector_to_irq
[start
+ i
] != APIC_RESV_IRQ
)
1135 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: "
1136 "apic_vector_to_irq is not APIC_RESV_IRQ\n"));
1138 apic_vector_to_irq
[start
+ i
] = (uchar_t
)irqno
;
1140 irqptr
->airq_vector
= (uchar_t
)(start
+ i
);
1141 irqptr
->airq_ioapicindex
= (uchar_t
)inum
; /* start */
1142 irqptr
->airq_intin_no
= (uchar_t
)rcount
;
1143 irqptr
->airq_ipl
= pri
;
1144 irqptr
->airq_vector
= start
+ i
;
1145 irqptr
->airq_origirq
= (uchar_t
)(inum
+ i
);
1146 irqptr
->airq_share_id
= 0;
1147 irqptr
->airq_mps_intr_index
= MSI_INDEX
;
1148 irqptr
->airq_dip
= dip
;
1149 irqptr
->airq_major
= major
;
1150 if (i
== 0) /* they all bound to the same cpu */
1151 cpu
= irqptr
->airq_cpu
= apic_bind_intr(dip
, irqno
,
1154 irqptr
->airq_cpu
= cpu
;
1155 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: irq=0x%x "
1156 "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno
,
1157 (void *)irqptr
->airq_dip
, irqptr
->airq_vector
,
1158 irqptr
->airq_origirq
, pri
));
1160 mutex_exit(&airq_mutex
);
1165 * This function allocates "count" MSI-X vector(s) for the given "dip/pri/type"
1168 apic_alloc_msix_vectors(dev_info_t
*dip
, int inum
, int count
, int pri
,
1174 mutex_enter(&airq_mutex
);
1176 if ((rcount
= apic_navail_vector(dip
, pri
)) > count
)
1178 else if (rcount
== 0 || (rcount
< count
&&
1179 behavior
== DDI_INTR_ALLOC_STRICT
)) {
1184 if (apic_check_free_irqs(rcount
) == PSM_FAILURE
) {
1185 /* not enough free irq slots available */
1190 major
= (dip
!= NULL
) ? ddi_driver_major(dip
) : 0;
1191 for (i
= 0; i
< rcount
; i
++) {
1192 uchar_t vector
, irqno
;
1195 if ((irqno
= apic_allocate_irq(apic_first_avail_irq
)) ==
1198 * shouldn't happen because of the
1199 * apic_check_free_irqs() check earlier
1201 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msix_vectors: "
1202 "apic_allocate_irq failed\n"));
1206 if ((vector
= apic_allocate_vector(pri
, irqno
, 1)) == 0) {
1208 * shouldn't happen because of the
1209 * apic_navail_vector() call earlier
1211 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msix_vectors: "
1212 "apic_allocate_vector failed\n"));
1216 apic_max_device_irq
= max(irqno
, apic_max_device_irq
);
1217 apic_min_device_irq
= min(irqno
, apic_min_device_irq
);
1218 irqptr
= apic_irq_table
[irqno
];
1219 irqptr
->airq_vector
= (uchar_t
)vector
;
1220 irqptr
->airq_ipl
= pri
;
1221 irqptr
->airq_origirq
= (uchar_t
)(inum
+ i
);
1222 irqptr
->airq_share_id
= 0;
1223 irqptr
->airq_mps_intr_index
= MSIX_INDEX
;
1224 irqptr
->airq_dip
= dip
;
1225 irqptr
->airq_major
= major
;
1226 irqptr
->airq_cpu
= apic_bind_intr(dip
, irqno
, 0xff, 0xff);
1229 mutex_exit(&airq_mutex
);
1234 * Allocate a free vector for irq at ipl. Takes care of merging of multiple
1235 * IPLs into a single APIC level as well as stretching some IPLs onto multiple
1236 * levels. APIC_HI_PRI_VECTS interrupts are reserved for high priority
1237 * requests and allocated only when pri is set.
1240 apic_allocate_vector(int ipl
, int irq
, int pri
)
1242 int lowest
, highest
, i
;
1244 highest
= apic_ipltopri
[ipl
] + APIC_VECTOR_MASK
;
1245 lowest
= apic_ipltopri
[ipl
- 1] + APIC_VECTOR_PER_IPL
;
1247 if (highest
< lowest
) /* Both ipl and ipl - 1 map to same pri */
1248 lowest
-= APIC_VECTOR_PER_IPL
;
1251 if (apic_restrict_vector
) /* for testing shared interrupt logic */
1252 highest
= lowest
+ apic_restrict_vector
+ APIC_HI_PRI_VECTS
;
1255 highest
-= APIC_HI_PRI_VECTS
;
1257 for (i
= lowest
; i
<= highest
; i
++) {
1258 if (APIC_CHECK_RESERVE_VECTORS(i
))
1260 if (apic_vector_to_irq
[i
] == APIC_RESV_IRQ
) {
1261 apic_vector_to_irq
[i
] = (uchar_t
)irq
;
1269 /* Mark vector as not being used by any irq */
1271 apic_free_vector(uchar_t vector
)
1273 apic_vector_to_irq
[vector
] = APIC_RESV_IRQ
;
1277 * Call rebind to do the actual programming.
1278 * Must be called with interrupts disabled and apic_ioapic_lock held
1279 * 'p' is polymorphic -- if this function is called to process a deferred
1280 * reprogramming, p is of type 'struct ioapic_reprogram_data *', from which
1281 * the irq pointer is retrieved. If not doing deferred reprogramming,
1282 * p is of the type 'apic_irq_t *'.
1284 * apic_ioapic_lock must be held across this call, as it protects apic_rebind
1285 * and it protects apic_get_next_bind_cpu() from a race in which a CPU can be
1286 * taken offline after a cpu is selected, but before apic_rebind is called to
1287 * bind interrupts to it.
1290 apic_setup_io_intr(void *p
, int irq
, boolean_t deferred
)
1293 struct ioapic_reprogram_data
*drep
= NULL
;
1297 drep
= (struct ioapic_reprogram_data
*)p
;
1298 ASSERT(drep
!= NULL
);
1299 irqptr
= drep
->irqp
;
1301 irqptr
= (apic_irq_t
*)p
;
1303 ASSERT(irqptr
!= NULL
);
1305 rv
= apic_rebind(irqptr
, apic_irq_table
[irq
]->airq_cpu
, drep
);
1308 * CPU is not up or interrupts are disabled. Fall back to
1309 * the first available CPU
1311 rv
= apic_rebind(irqptr
, apic_find_cpu(APIC_CPU_INTR_ENABLE
),
1320 apic_modify_vector(uchar_t vector
, int irq
)
1322 apic_vector_to_irq
[vector
] = (uchar_t
)irq
;
1327 apic_get_apic_type(void)
1329 return (apic_psm_info
.p_mach_idstring
);
1333 x2apic_update_psm(void)
1335 struct psm_ops
*pops
= &apic_ops
;
1337 ASSERT(pops
!= NULL
);
1339 pops
->psm_intr_exit
= x2apic_intr_exit
;
1340 pops
->psm_setspl
= x2apic_setspl
;
1342 pops
->psm_send_ipi
= x2apic_send_ipi
;
1343 send_dirintf
= pops
->psm_send_ipi
;
1345 apic_mode
= LOCAL_X2APIC
;