4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
30 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
34 * To understand how the pcplusmp module interacts with the interrupt subsystem
35 * read the theory statement in uts/i86pc/os/intr.c.
39 * PSMI 1.1 extensions are supported only in 2.6 and later versions.
40 * PSMI 1.2 extensions are supported only in 2.7 and later versions.
41 * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
42 * PSMI 1.5 extensions are supported in Solaris Nevada.
43 * PSMI 1.6 extensions are supported in Solaris Nevada.
44 * PSMI 1.7 extensions are supported in Solaris Nevada.
48 #include <sys/processor.h>
51 #include <sys/smp_impldefs.h>
53 #include <sys/acpi/acpi.h>
54 #include <sys/acpica.h>
55 #include <sys/psm_common.h>
59 #include <sys/sunddi.h>
60 #include <sys/ddi_impldefs.h>
62 #include <sys/promif.h>
63 #include <sys/x86_archext.h>
64 #include <sys/cpc_impl.h>
65 #include <sys/uadmin.h>
66 #include <sys/panic.h>
67 #include <sys/debug.h>
68 #include <sys/archsystm.h>
70 #include <sys/machsystm.h>
71 #include <sys/sysmacros.h>
72 #include <sys/cpuvar.h>
73 #include <sys/rm_platter.h>
74 #include <sys/privregs.h>
76 #include <sys/pci_intr_lib.h>
78 #include <sys/clock.h>
79 #include <sys/cyclic.h>
80 #include <sys/dditypes.h>
81 #include <sys/sunddi.h>
82 #include <sys/x_call.h>
83 #include <sys/reboot.h>
85 #include <sys/apic_common.h>
86 #include <sys/apic_timer.h>
89 * Local Function Prototypes
91 static void apic_init_intr(void);
96 static int apic_probe(void);
97 static int apic_getclkirq(int ipl
);
98 static void apic_init(void);
99 static void apic_picinit(void);
100 static int apic_post_cpu_start(void);
101 static int apic_intr_enter(int ipl
, int *vect
);
102 static void apic_setspl(int ipl
);
103 static void x2apic_setspl(int ipl
);
104 static int apic_addspl(int ipl
, int vector
, int min_ipl
, int max_ipl
);
105 static int apic_delspl(int ipl
, int vector
, int min_ipl
, int max_ipl
);
106 static int apic_disable_intr(processorid_t cpun
);
107 static void apic_enable_intr(processorid_t cpun
);
108 static int apic_get_ipivect(int ipl
, int type
);
109 static void apic_post_cyclic_setup(void *arg
);
112 * The following vector assignments influence the value of ipltopri and
113 * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program
114 * idle to 0 and IPL 0 to 0xf to differentiate idle in case
115 * we care to do so in future. Note some IPLs which are rarely used
116 * will share the vector ranges and heavily used IPLs (5 and 6) have
119 * This array is used to initialize apic_ipls[] (in apic_init()).
121 * IPL Vector range. as passed to intr_enter
123 * 1,2,3 0x20-0x2f 0x0-0xf
124 * 4 0x30-0x3f 0x10-0x1f
125 * 5 0x40-0x5f 0x20-0x3f
126 * 6 0x60-0x7f 0x40-0x5f
127 * 7,8,9 0x80-0x8f 0x60-0x6f
128 * 10 0x90-0x9f 0x70-0x7f
129 * 11 0xa0-0xaf 0x80-0x8f
131 * 15 0xe0-0xef 0xc0-0xcf
132 * 15 0xf0-0xff 0xd0-0xdf
134 uchar_t apic_vectortoipl
[APIC_AVAIL_VECTOR
/ APIC_VECTOR_PER_IPL
] = {
135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
139 * NOTE that this is vector as passed into intr_enter which is
140 * programmed vector - 0x20 (APIC_BASE_VECT)
143 uchar_t apic_ipltopri
[MAXIPL
+ 1]; /* unix ipl to apic pri */
144 /* The taskpri to be programmed into apic to mask given ipl */
147 * Correlation of the hardware vector to the IPL in use, initialized
148 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
149 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
150 * connected to errata-stricken IOAPICs
152 uchar_t apic_ipls
[APIC_AVAIL_VECTOR
];
155 * Patchable global variables.
157 int apic_enable_hwsoftint
= 0; /* 0 - disable, 1 - enable */
158 int apic_enable_bind_log
= 1; /* 1 - display interrupt binding log */
163 static struct psm_ops apic_ops
= {
175 (int (*)(int))NULL
, /* psm_softlvl_to_irq */
176 (void (*)(int))NULL
, /* psm_set_softintr */
183 (void (*)(void))NULL
, /* psm_hrtimeinit */
186 apic_get_next_processorid
,
193 (int (*)(dev_info_t
*, int))NULL
, /* psm_translate_irq */
194 (void (*)(int, char *))NULL
, /* psm_notify_error */
195 (void (*)(int))NULL
, /* psm_notify_func */
196 apic_timer_reprogram
,
199 apic_post_cyclic_setup
,
201 apic_intr_ops
, /* Advanced DDI Interrupt framework */
202 apic_state
, /* save, restore apic state for S3 */
203 apic_cpu_ops
, /* CPU control interface. */
206 struct psm_ops
*psmops
= &apic_ops
;
208 static struct psm_info apic_psm_info
= {
209 PSM_INFO_VER01_7
, /* version */
210 PSM_OWN_EXCLUSIVE
, /* ownership */
211 (struct psm_ops
*)&apic_ops
, /* operation */
212 APIC_PCPLUSMP_NAME
, /* machine name */
213 "pcplusmp v1.4 compatible",
216 static void *apic_hdlp
;
218 /* to gather intr data and redistribute */
219 static void apic_redistribute_compute(void);
222 * This is the loadable module wrapper
228 if (apic_coarse_hrtime
)
229 apic_ops
.psm_gethrtime
= &apic_gettime
;
230 return (psm_mod_init(&apic_hdlp
, &apic_psm_info
));
236 return (psm_mod_fini(&apic_hdlp
, &apic_psm_info
));
240 _info(struct modinfo
*modinfop
)
242 return (psm_mod_info(&apic_hdlp
, &apic_psm_info
, modinfop
));
248 /* check if apix is initialized */
249 if (apix_enable
&& apix_loaded())
250 return (PSM_FAILURE
);
252 apix_enable
= 0; /* continue using pcplusmp PSM */
254 return (apic_probe_common(apic_psm_info
.p_mach_idstring
));
258 apic_xlate_vector_by_irq(uchar_t irq
)
260 if (apic_irq_table
[irq
] == NULL
)
263 return (apic_irq_table
[irq
]->airq_vector
);
272 psm_get_ioapicid
= apic_get_ioapicid
;
273 psm_get_localapicid
= apic_get_localapicid
;
274 psm_xlate_vector_by_irq
= apic_xlate_vector_by_irq
;
276 apic_ipltopri
[0] = APIC_VECTOR_PER_IPL
; /* leave 0 for idle */
277 for (i
= 0; i
< (APIC_AVAIL_VECTOR
/ APIC_VECTOR_PER_IPL
); i
++) {
278 if ((i
< ((APIC_AVAIL_VECTOR
/ APIC_VECTOR_PER_IPL
) - 1)) &&
279 (apic_vectortoipl
[i
+ 1] == apic_vectortoipl
[i
]))
280 /* get to highest vector at the same ipl */
282 for (; j
<= apic_vectortoipl
[i
]; j
++) {
283 apic_ipltopri
[j
] = (i
<< APIC_IPL_SHIFT
) +
287 for (; j
< MAXIPL
+ 1; j
++)
288 /* fill up any empty ipltopri slots */
289 apic_ipltopri
[j
] = (i
<< APIC_IPL_SHIFT
) + APIC_BASE_VECT
;
292 #if !defined(__amd64)
293 if (cpuid_have_cr8access(CPU
))
294 apic_have_32bit_cr8
= 1;
301 processorid_t cpun
= psm_get_cpu_id();
303 uint32_t svr
= AV_UNIT_ENABLE
| APIC_SPUR_INTR
;
305 apic_reg_ops
->apic_write_task_reg(APIC_MASK_ALL
);
307 if (apic_mode
== LOCAL_APIC
) {
309 * We are running APIC in MMIO mode.
311 if (apic_flat_model
) {
312 apic_reg_ops
->apic_write(APIC_FORMAT_REG
,
315 apic_reg_ops
->apic_write(APIC_FORMAT_REG
,
319 apic_reg_ops
->apic_write(APIC_DEST_REG
,
320 AV_HIGH_ORDER
>> cpun
);
323 if (apic_directed_EOI_supported()) {
325 * Setting the 12th bit in the Spurious Interrupt Vector
326 * Register suppresses broadcast EOIs generated by the local
327 * APIC. The suppression of broadcast EOIs happens only when
328 * interrupts are level-triggered.
330 svr
|= APIC_SVR_SUPPRESS_BROADCAST_EOI
;
333 /* need to enable APIC before unmasking NMI */
334 apic_reg_ops
->apic_write(APIC_SPUR_INT_REG
, svr
);
337 * Presence of an invalid vector with delivery mode AV_FIXED can
338 * cause an error interrupt, even if the entry is masked...so
339 * write a valid vector to LVT entries along with the mask bit
342 /* All APICs have timer and LINT0/1 */
343 apic_reg_ops
->apic_write(APIC_LOCAL_TIMER
, AV_MASK
|APIC_RESV_IRQ
);
344 apic_reg_ops
->apic_write(APIC_INT_VECT0
, AV_MASK
|APIC_RESV_IRQ
);
345 apic_reg_ops
->apic_write(APIC_INT_VECT1
, AV_NMI
); /* enable NMI */
348 * On integrated APICs, the number of LVT entries is
349 * 'Max LVT entry' + 1; on 82489DX's (non-integrated
350 * APICs), nlvt is "3" (LINT0, LINT1, and timer)
353 if (apic_cpus
[cpun
].aci_local_ver
< APIC_INTEGRATED_VERS
) {
356 nlvt
= ((apic_reg_ops
->apic_read(APIC_VERS_REG
) >> 16) &
361 /* Enable performance counter overflow interrupt */
363 if (!is_x86_feature(x86_featureset
, X86FSET_MSR
))
364 apic_enable_cpcovf_intr
= 0;
365 if (apic_enable_cpcovf_intr
) {
366 if (apic_cpcovf_vect
== 0) {
367 int ipl
= APIC_PCINT_IPL
;
368 int irq
= apic_get_ipivect(ipl
, -1);
372 apic_irq_table
[irq
]->airq_vector
;
373 ASSERT(apic_cpcovf_vect
);
374 (void) add_avintr(NULL
, ipl
,
375 (avfunc
)kcpc_hw_overflow_intr
,
376 "apic pcint", irq
, NULL
, NULL
, NULL
, NULL
);
377 kcpc_hw_overflow_intr_installed
= 1;
378 kcpc_hw_enable_cpc_intr
=
379 apic_cpcovf_mask_clear
;
381 apic_reg_ops
->apic_write(APIC_PCINT_VECT
,
387 /* Only mask TM intr if the BIOS apparently doesn't use it */
391 lvtval
= apic_reg_ops
->apic_read(APIC_THERM_VECT
);
392 if (((lvtval
& AV_MASK
) == AV_MASK
) ||
393 ((lvtval
& AV_DELIV_MODE
) != AV_SMI
)) {
394 apic_reg_ops
->apic_write(APIC_THERM_VECT
,
395 AV_MASK
|APIC_RESV_IRQ
);
399 /* Enable error interrupt */
401 if (nlvt
>= 4 && apic_enable_error_intr
) {
402 if (apic_errvect
== 0) {
403 int ipl
= 0xf; /* get highest priority intr */
404 int irq
= apic_get_ipivect(ipl
, -1);
407 apic_errvect
= apic_irq_table
[irq
]->airq_vector
;
408 ASSERT(apic_errvect
);
410 * Not PSMI compliant, but we are going to merge
413 (void) add_avintr((void *)NULL
, ipl
,
414 (avfunc
)apic_error_intr
, "apic error intr",
415 irq
, NULL
, NULL
, NULL
, NULL
);
417 apic_reg_ops
->apic_write(APIC_ERR_VECT
, apic_errvect
);
418 apic_reg_ops
->apic_write(APIC_ERROR_STATUS
, 0);
419 apic_reg_ops
->apic_write(APIC_ERROR_STATUS
, 0);
422 /* Enable CMCI interrupt */
423 if (cmi_enable_cmci
) {
425 mutex_enter(&cmci_cpu_setup_lock
);
426 if (cmci_cpu_setup_registered
== 0) {
427 mutex_enter(&cpu_lock
);
428 register_cpu_setup_func(cmci_cpu_setup
, NULL
);
429 mutex_exit(&cpu_lock
);
430 cmci_cpu_setup_registered
= 1;
432 mutex_exit(&cmci_cpu_setup_lock
);
434 if (apic_cmci_vect
== 0) {
436 int irq
= apic_get_ipivect(ipl
, -1);
439 apic_cmci_vect
= apic_irq_table
[irq
]->airq_vector
;
440 ASSERT(apic_cmci_vect
);
442 (void) add_avintr(NULL
, ipl
,
443 (avfunc
)cmi_cmci_trap
,
444 "apic cmci intr", irq
, NULL
, NULL
, NULL
, NULL
);
446 apic_reg_ops
->apic_write(APIC_CMCI_VECT
, apic_cmci_vect
);
457 * Initialize and enable interrupt remapping before apic
458 * hardware initialization
460 apic_intrmap_init(apic_mode
);
463 * On UniSys Model 6520, the BIOS leaves vector 0x20 isr
464 * bit on without clearing it with EOI. Since softint
465 * uses vector 0x20 to interrupt itself, so softint will
466 * not work on this machine. In order to fix this problem
467 * a check is made to verify all the isr bits are clear.
468 * If not, EOIs are issued to clear the bits.
470 for (i
= 7; i
>= 1; i
--) {
471 isr
= apic_reg_ops
->apic_read(APIC_ISR_REG
+ (i
* 4));
473 for (j
= 0; ((j
< 32) && (isr
!= 0)); j
++)
474 if (isr
& (1 << j
)) {
475 apic_reg_ops
->apic_write(
478 apic_error
|= APIC_ERR_BOOT_EOI
;
482 /* set a flag so we know we have run apic_picinit() */
483 apic_picinit_called
= 1;
484 LOCK_INIT_CLEAR(&apic_gethrtime_lock
);
485 LOCK_INIT_CLEAR(&apic_ioapic_lock
);
486 LOCK_INIT_CLEAR(&apic_error_lock
);
487 LOCK_INIT_CLEAR(&apic_mode_switch_lock
);
489 picsetup(); /* initialise the 8259 */
491 /* add nmi handler - least priority nmi handler */
492 LOCK_INIT_CLEAR(&apic_nmi_lock
);
494 if (!psm_add_nmintr(0, (avfunc
) apic_nmi_intr
,
495 "pcplusmp NMI handler", (caddr_t
)NULL
))
496 cmn_err(CE_WARN
, "pcplusmp: Unable to add nmi handler");
499 * Check for directed-EOI capability in the local APIC.
501 if (apic_directed_EOI_supported() == 1) {
502 apic_set_directed_EOI_handler();
507 /* enable apic mode if imcr present */
509 outb(APIC_IMCR_P1
, (uchar_t
)APIC_IMCR_SELECT
);
510 outb(APIC_IMCR_P2
, (uchar_t
)APIC_IMCR_APIC
);
513 ioapic_init_intr(IOAPIC_MASK
);
524 * platform_intr_enter
526 * Called at the beginning of the interrupt service routine to
527 * mask all level equal to and below the interrupt priority
528 * of the interrupting vector. An EOI should be given to
529 * the interrupt controller to enable other HW interrupts.
531 * Return -1 for spurious interrupts
536 apic_intr_enter(int ipl
, int *vectorp
)
542 apic_cpus_info_t
*cpu_infop
;
545 * The real vector delivered is (*vectorp + 0x20), but our caller
546 * subtracts 0x20 from the vector before passing it to us.
547 * (That's why APIC_BASE_VECT is 0x20.)
549 vector
= (uchar_t
)*vectorp
;
551 /* if interrupted by the clock, increment apic_nsec_since_boot */
552 if (vector
== apic_clkvect
) {
554 /* NOTE: this is not MT aware */
556 apic_nsec_since_boot
+= apic_nsec_per_intr
;
558 last_count_read
= apic_hertz_count
;
559 apic_redistribute_compute();
562 /* We will avoid all the book keeping overhead for clock */
563 nipl
= apic_ipls
[vector
];
565 *vectorp
= apic_vector_to_irq
[vector
+ APIC_BASE_VECT
];
567 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[nipl
]);
568 apic_reg_ops
->apic_send_eoi(0);
573 cpu_infop
= &apic_cpus
[psm_get_cpu_id()];
575 if (vector
== (APIC_SPUR_INTR
- APIC_BASE_VECT
)) {
576 cpu_infop
->aci_spur_cnt
++;
577 return (APIC_INT_SPURIOUS
);
580 /* Check if the vector we got is really what we need */
581 if (apic_revector_pending
) {
583 * Disable interrupts for the duration of
584 * the vector translation to prevent a self-race for
585 * the apic_revector_lock. This cannot be done
586 * in apic_xlate_vector because it is recursive and
587 * we want the vector translation to be atomic with
588 * respect to other (higher-priority) interrupts.
590 iflag
= intr_clear();
591 vector
= apic_xlate_vector(vector
+ APIC_BASE_VECT
) -
596 nipl
= apic_ipls
[vector
];
597 *vectorp
= irq
= apic_vector_to_irq
[vector
+ APIC_BASE_VECT
];
599 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[nipl
]);
601 cpu_infop
->aci_current
[nipl
] = (uchar_t
)irq
;
602 cpu_infop
->aci_curipl
= (uchar_t
)nipl
;
603 cpu_infop
->aci_ISR_in_progress
|= 1 << nipl
;
606 * apic_level_intr could have been assimilated into the irq struct.
607 * but, having it as a character array is more efficient in terms of
608 * cache usage. So, we leave it as is.
610 if (!apic_level_intr
[irq
]) {
611 apic_reg_ops
->apic_send_eoi(0);
615 APIC_DEBUG_BUF_PUT(vector
);
616 APIC_DEBUG_BUF_PUT(irq
);
617 APIC_DEBUG_BUF_PUT(nipl
);
618 APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
619 if ((apic_stretch_interrupts
) && (apic_stretch_ISR
& (1 << nipl
)))
620 drv_usecwait(apic_stretch_interrupts
);
622 if (apic_break_on_cpu
== psm_get_cpu_id())
629 * This macro is a common code used by MMIO local apic and X2APIC
632 #define APIC_INTR_EXIT() \
634 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
635 if (apic_level_intr[irq]) \
636 apic_reg_ops->apic_send_eoi(irq); \
637 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
638 /* ISR above current pri could not be in progress */ \
639 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
643 * Any changes made to this function must also change X2APIC
644 * version of intr_exit.
647 apic_intr_exit(int prev_ipl
, int irq
)
649 apic_cpus_info_t
*cpu_infop
;
651 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[prev_ipl
]);
657 * Same as apic_intr_exit() except it uses MSR rather than MMIO
658 * to access local apic registers.
661 x2apic_intr_exit(int prev_ipl
, int irq
)
663 apic_cpus_info_t
*cpu_infop
;
665 X2APIC_WRITE(APIC_TASK_REG
, apic_ipltopri
[prev_ipl
]);
670 psm_intr_exit_fn(void)
672 if (apic_mode
== LOCAL_X2APIC
)
673 return (x2apic_intr_exit
);
675 return (apic_intr_exit
);
679 * Mask all interrupts below or equal to the given IPL.
680 * Any changes made to this function must also change X2APIC
686 apic_reg_ops
->apic_write_task_reg(apic_ipltopri
[ipl
]);
688 /* interrupts at ipl above this cannot be in progress */
689 apic_cpus
[psm_get_cpu_id()].aci_ISR_in_progress
&= (2 << ipl
) - 1;
691 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
692 * have enough time to come in before the priority is raised again
693 * during the idle() loop.
695 if (apic_setspl_delay
)
696 (void) apic_reg_ops
->apic_get_pri();
700 * X2APIC version of setspl.
701 * Mask all interrupts below or equal to the given IPL
704 x2apic_setspl(int ipl
)
706 X2APIC_WRITE(APIC_TASK_REG
, apic_ipltopri
[ipl
]);
708 /* interrupts at ipl above this cannot be in progress */
709 apic_cpus
[psm_get_cpu_id()].aci_ISR_in_progress
&= (2 << ipl
) - 1;
714 apic_addspl(int irqno
, int ipl
, int min_ipl
, int max_ipl
)
716 return (apic_addspl_common(irqno
, ipl
, min_ipl
, max_ipl
));
720 apic_delspl(int irqno
, int ipl
, int min_ipl
, int max_ipl
)
722 return (apic_delspl_common(irqno
, ipl
, min_ipl
, max_ipl
));
726 apic_post_cpu_start(void)
729 static int cpus_started
= 1;
731 /* We know this CPU + BSP started successfully. */
735 * On BSP we would have enabled X2APIC, if supported by processor,
736 * in acpi_probe(), but on AP we do it here.
738 * We enable X2APIC mode only if BSP is running in X2APIC & the
739 * local APIC mode of the current CPU is MMIO (xAPIC).
741 if (apic_mode
== LOCAL_X2APIC
&& apic_detect_x2apic() &&
742 apic_local_mode() == LOCAL_APIC
) {
743 apic_enable_x2apic();
747 * Switch back to x2apic IPI sending method for performance when target
748 * CPU has entered x2apic mode.
750 if (apic_mode
== LOCAL_X2APIC
) {
751 apic_switch_ipi_callback(B_FALSE
);
754 splx(ipltospl(LOCK_LEVEL
));
758 * since some systems don't enable the internal cache on the non-boot
759 * cpus, so we have to enable them here
761 setcr0(getcr0() & ~(CR0_CD
| CR0_NW
));
764 APIC_AV_PENDING_SET();
766 if (apic_mode
== LOCAL_APIC
)
767 APIC_AV_PENDING_SET();
771 * We may be booting, or resuming from suspend; aci_status will
772 * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
773 * APIC_CPU_ONLINE flag here rather than setting aci_status completely.
775 cpun
= psm_get_cpu_id();
776 apic_cpus
[cpun
].aci_status
|= APIC_CPU_ONLINE
;
778 apic_reg_ops
->apic_write(APIC_DIVIDE_REG
, apic_divide_reg_init
);
779 return (PSM_SUCCESS
);
783 * type == -1 indicates it is an internal request. Do not change
784 * resv_vector for these requests
787 apic_get_ipivect(int ipl
, int type
)
792 if ((irq
= apic_allocate_irq(APIC_VECTOR(ipl
))) != -1) {
793 if ((vector
= apic_allocate_vector(ipl
, irq
, 1))) {
794 apic_irq_table
[irq
]->airq_mps_intr_index
=
796 apic_irq_table
[irq
]->airq_vector
= vector
;
798 apic_resv_vector
[ipl
] = vector
;
803 apic_error
|= APIC_ERR_GET_IPIVECT_FAIL
;
804 return (-1); /* shouldn't happen */
808 apic_getclkirq(int ipl
)
812 if ((irq
= apic_get_ipivect(ipl
, -1)) == -1)
815 * Note the vector in apic_clkvect for per clock handling.
817 apic_clkvect
= apic_irq_table
[irq
]->airq_vector
- APIC_BASE_VECT
;
818 APIC_VERBOSE_IOAPIC((CE_NOTE
, "get_clkirq: vector = %x\n",
824 * Try and disable all interrupts. We just assign interrupts to other
825 * processors based on policy. If any were bound by user request, we
826 * let them continue and return failure. We do not bother to check
827 * for cache affinity while rebinding.
831 apic_disable_intr(processorid_t cpun
)
833 int bind_cpu
= 0, i
, hardbound
= 0;
837 iflag
= intr_clear();
838 lock_set(&apic_ioapic_lock
);
840 for (i
= 0; i
<= APIC_MAX_VECTOR
; i
++) {
841 if (apic_reprogram_info
[i
].done
== B_FALSE
) {
842 if (apic_reprogram_info
[i
].bindcpu
== cpun
) {
844 * CPU is busy -- it's the target of
845 * a pending reprogramming attempt
847 lock_clear(&apic_ioapic_lock
);
849 return (PSM_FAILURE
);
854 apic_cpus
[cpun
].aci_status
&= ~APIC_CPU_INTR_ENABLE
;
856 apic_cpus
[cpun
].aci_curipl
= 0;
858 i
= apic_min_device_irq
;
859 for (; i
<= apic_max_device_irq
; i
++) {
861 * If there are bound interrupts on this cpu, then
862 * rebind them to other processors.
864 if ((irq_ptr
= apic_irq_table
[i
]) != NULL
) {
865 ASSERT((irq_ptr
->airq_temp_cpu
== IRQ_UNBOUND
) ||
866 (irq_ptr
->airq_temp_cpu
== IRQ_UNINIT
) ||
867 (apic_cpu_in_range(irq_ptr
->airq_temp_cpu
)));
869 if (irq_ptr
->airq_temp_cpu
== (cpun
| IRQ_USER_BOUND
)) {
874 if (irq_ptr
->airq_temp_cpu
== cpun
) {
877 apic_find_cpu(APIC_CPU_INTR_ENABLE
);
878 } while (apic_rebind_all(irq_ptr
, bind_cpu
));
883 lock_clear(&apic_ioapic_lock
);
887 cmn_err(CE_WARN
, "Could not disable interrupts on %d"
888 "due to user bound interrupts", cpun
);
889 return (PSM_FAILURE
);
892 return (PSM_SUCCESS
);
896 * Bind interrupts to the CPU's local APIC.
897 * Interrupts should not be bound to a CPU's local APIC until the CPU
898 * is ready to receive interrupts.
901 apic_enable_intr(processorid_t cpun
)
907 iflag
= intr_clear();
908 lock_set(&apic_ioapic_lock
);
910 apic_cpus
[cpun
].aci_status
|= APIC_CPU_INTR_ENABLE
;
912 i
= apic_min_device_irq
;
913 for (i
= apic_min_device_irq
; i
<= apic_max_device_irq
; i
++) {
914 if ((irq_ptr
= apic_irq_table
[i
]) != NULL
) {
915 if ((irq_ptr
->airq_cpu
& ~IRQ_USER_BOUND
) == cpun
) {
916 (void) apic_rebind_all(irq_ptr
,
922 if (apic_cpus
[cpun
].aci_status
& APIC_CPU_SUSPEND
)
923 apic_cpus
[cpun
].aci_status
&= ~APIC_CPU_SUSPEND
;
925 lock_clear(&apic_ioapic_lock
);
930 * If this module needs a periodic handler for the interrupt distribution, it
931 * can be added here. The argument to the periodic handler is not currently
932 * used, but is reserved for future.
935 apic_post_cyclic_setup(void *arg
)
937 _NOTE(ARGUNUSED(arg
))
942 /* cpu_lock is held */
943 /* set up a periodic handler for intr redistribution */
946 * In peridoc mode intr redistribution processing is done in
947 * apic_intr_enter during clk intr processing
953 * Register a periodical handler for the redistribution processing.
954 * Though we would generally prefer to use the DDI interface for
955 * periodic handler invocation, ddi_periodic_add(9F), we are
956 * unfortunately already holding cpu_lock, which ddi_periodic_add will
957 * attempt to take for us. Thus, we add our own cyclic directly:
959 cyh
.cyh_func
= (void (*)(void *))apic_redistribute_compute
;
961 cyh
.cyh_level
= CY_LOW_LEVEL
;
964 cyt
.cyt_interval
= apic_redistribute_sample_interval
;
966 apic_cyclic_id
= cyclic_add(&cyh
, &cyt
);
970 apic_redistribute_compute(void)
974 if (apic_enable_dynamic_migration
) {
975 if (++apic_nticks
== apic_sample_factor_redistribution
) {
977 * Time to call apic_intr_redistribute().
978 * reset apic_nticks. This will cause max_busy
979 * to be calculated below and if it is more than
980 * apic_int_busy, we will do the whole thing
985 for (i
= 0; i
< apic_nproc
; i
++) {
986 if (!apic_cpu_in_range(i
))
990 * Check if curipl is non zero & if ISR is in
993 if (((j
= apic_cpus
[i
].aci_curipl
) != 0) &&
994 (apic_cpus
[i
].aci_ISR_in_progress
& (1 << j
))) {
997 apic_cpus
[i
].aci_busy
++;
998 irq
= apic_cpus
[i
].aci_current
[j
];
999 apic_irq_table
[irq
]->airq_busy
++;
1003 (apic_cpus
[i
].aci_busy
> max_busy
))
1004 max_busy
= apic_cpus
[i
].aci_busy
;
1007 if (max_busy
> apic_int_busy_mark
) {
1009 * We could make the following check be
1010 * skipped > 1 in which case, we get a
1011 * redistribution at half the busy mark (due to
1012 * double interval). Need to be able to collect
1013 * more empirical data to decide if that is a
1014 * good strategy. Punt for now.
1016 if (apic_skipped_redistribute
) {
1017 apic_cleanup_busy();
1018 apic_skipped_redistribute
= 0;
1020 apic_intr_redistribute();
1023 apic_skipped_redistribute
++;
1030 * The following functions are in the platform specific file so that they
1031 * can be different functions depending on whether we are running on
1032 * bare metal or a hypervisor.
1036 * Check to make sure there are enough irq slots
1039 apic_check_free_irqs(int count
)
1044 for (i
= APIC_FIRST_FREE_IRQ
; i
< APIC_RESV_IRQ
; i
++) {
1045 if ((apic_irq_table
[i
] == NULL
) ||
1046 apic_irq_table
[i
]->airq_mps_intr_index
== FREE_INDEX
) {
1047 if (++avail
>= count
)
1048 return (PSM_SUCCESS
);
1051 return (PSM_FAILURE
);
1055 * This function allocates "count" MSI vector(s) for the given "dip/pri/type"
1058 apic_alloc_msi_vectors(dev_info_t
*dip
, int inum
, int count
, int pri
,
1062 uchar_t start
, irqno
;
1067 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: dip=0x%p "
1068 "inum=0x%x pri=0x%x count=0x%x behavior=%d\n",
1069 (void *)dip
, inum
, pri
, count
, behavior
));
1072 if (behavior
== DDI_INTR_ALLOC_STRICT
&&
1073 apic_multi_msi_enable
== 0)
1075 if (apic_multi_msi_enable
== 0)
1079 if ((rcount
= apic_navail_vector(dip
, pri
)) > count
)
1081 else if (rcount
== 0 || (rcount
< count
&&
1082 behavior
== DDI_INTR_ALLOC_STRICT
))
1085 /* if not ISP2, then round it down */
1087 rcount
= 1 << (highbit(rcount
) - 1);
1089 mutex_enter(&airq_mutex
);
1091 for (start
= 0; rcount
> 0; rcount
>>= 1) {
1092 if ((start
= apic_find_multi_vectors(pri
, rcount
)) != 0 ||
1093 behavior
== DDI_INTR_ALLOC_STRICT
)
1098 /* no vector available */
1099 mutex_exit(&airq_mutex
);
1103 if (apic_check_free_irqs(rcount
) == PSM_FAILURE
) {
1104 /* not enough free irq slots available */
1105 mutex_exit(&airq_mutex
);
1109 major
= (dip
!= NULL
) ? ddi_driver_major(dip
) : 0;
1110 for (i
= 0; i
< rcount
; i
++) {
1111 if ((irqno
= apic_allocate_irq(apic_first_avail_irq
)) ==
1114 * shouldn't happen because of the
1115 * apic_check_free_irqs() check earlier
1117 mutex_exit(&airq_mutex
);
1118 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: "
1119 "apic_allocate_irq failed\n"));
1122 apic_max_device_irq
= max(irqno
, apic_max_device_irq
);
1123 apic_min_device_irq
= min(irqno
, apic_min_device_irq
);
1124 irqptr
= apic_irq_table
[irqno
];
1126 if (apic_vector_to_irq
[start
+ i
] != APIC_RESV_IRQ
)
1127 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: "
1128 "apic_vector_to_irq is not APIC_RESV_IRQ\n"));
1130 apic_vector_to_irq
[start
+ i
] = (uchar_t
)irqno
;
1132 irqptr
->airq_vector
= (uchar_t
)(start
+ i
);
1133 irqptr
->airq_ioapicindex
= (uchar_t
)inum
; /* start */
1134 irqptr
->airq_intin_no
= (uchar_t
)rcount
;
1135 irqptr
->airq_ipl
= pri
;
1136 irqptr
->airq_vector
= start
+ i
;
1137 irqptr
->airq_origirq
= (uchar_t
)(inum
+ i
);
1138 irqptr
->airq_share_id
= 0;
1139 irqptr
->airq_mps_intr_index
= MSI_INDEX
;
1140 irqptr
->airq_dip
= dip
;
1141 irqptr
->airq_major
= major
;
1142 if (i
== 0) /* they all bound to the same cpu */
1143 cpu
= irqptr
->airq_cpu
= apic_bind_intr(dip
, irqno
,
1146 irqptr
->airq_cpu
= cpu
;
1147 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msi_vectors: irq=0x%x "
1148 "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno
,
1149 (void *)irqptr
->airq_dip
, irqptr
->airq_vector
,
1150 irqptr
->airq_origirq
, pri
));
1152 mutex_exit(&airq_mutex
);
1157 * This function allocates "count" MSI-X vector(s) for the given "dip/pri/type"
1160 apic_alloc_msix_vectors(dev_info_t
*dip
, int inum
, int count
, int pri
,
1166 mutex_enter(&airq_mutex
);
1168 if ((rcount
= apic_navail_vector(dip
, pri
)) > count
)
1170 else if (rcount
== 0 || (rcount
< count
&&
1171 behavior
== DDI_INTR_ALLOC_STRICT
)) {
1176 if (apic_check_free_irqs(rcount
) == PSM_FAILURE
) {
1177 /* not enough free irq slots available */
1182 major
= (dip
!= NULL
) ? ddi_driver_major(dip
) : 0;
1183 for (i
= 0; i
< rcount
; i
++) {
1184 uchar_t vector
, irqno
;
1187 if ((irqno
= apic_allocate_irq(apic_first_avail_irq
)) ==
1190 * shouldn't happen because of the
1191 * apic_check_free_irqs() check earlier
1193 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msix_vectors: "
1194 "apic_allocate_irq failed\n"));
1198 if ((vector
= apic_allocate_vector(pri
, irqno
, 1)) == 0) {
1200 * shouldn't happen because of the
1201 * apic_navail_vector() call earlier
1203 DDI_INTR_IMPLDBG((CE_CONT
, "apic_alloc_msix_vectors: "
1204 "apic_allocate_vector failed\n"));
1208 apic_max_device_irq
= max(irqno
, apic_max_device_irq
);
1209 apic_min_device_irq
= min(irqno
, apic_min_device_irq
);
1210 irqptr
= apic_irq_table
[irqno
];
1211 irqptr
->airq_vector
= (uchar_t
)vector
;
1212 irqptr
->airq_ipl
= pri
;
1213 irqptr
->airq_origirq
= (uchar_t
)(inum
+ i
);
1214 irqptr
->airq_share_id
= 0;
1215 irqptr
->airq_mps_intr_index
= MSIX_INDEX
;
1216 irqptr
->airq_dip
= dip
;
1217 irqptr
->airq_major
= major
;
1218 irqptr
->airq_cpu
= apic_bind_intr(dip
, irqno
, 0xff, 0xff);
1221 mutex_exit(&airq_mutex
);
1226 * Allocate a free vector for irq at ipl. Takes care of merging of multiple
1227 * IPLs into a single APIC level as well as stretching some IPLs onto multiple
1228 * levels. APIC_HI_PRI_VECTS interrupts are reserved for high priority
1229 * requests and allocated only when pri is set.
1232 apic_allocate_vector(int ipl
, int irq
, int pri
)
1234 int lowest
, highest
, i
;
1236 highest
= apic_ipltopri
[ipl
] + APIC_VECTOR_MASK
;
1237 lowest
= apic_ipltopri
[ipl
- 1] + APIC_VECTOR_PER_IPL
;
1239 if (highest
< lowest
) /* Both ipl and ipl - 1 map to same pri */
1240 lowest
-= APIC_VECTOR_PER_IPL
;
1243 if (apic_restrict_vector
) /* for testing shared interrupt logic */
1244 highest
= lowest
+ apic_restrict_vector
+ APIC_HI_PRI_VECTS
;
1247 highest
-= APIC_HI_PRI_VECTS
;
1249 for (i
= lowest
; i
<= highest
; i
++) {
1250 if (APIC_CHECK_RESERVE_VECTORS(i
))
1252 if (apic_vector_to_irq
[i
] == APIC_RESV_IRQ
) {
1253 apic_vector_to_irq
[i
] = (uchar_t
)irq
;
1261 /* Mark vector as not being used by any irq */
1263 apic_free_vector(uchar_t vector
)
1265 apic_vector_to_irq
[vector
] = APIC_RESV_IRQ
;
1269 * Call rebind to do the actual programming.
1270 * Must be called with interrupts disabled and apic_ioapic_lock held
1271 * 'p' is polymorphic -- if this function is called to process a deferred
1272 * reprogramming, p is of type 'struct ioapic_reprogram_data *', from which
1273 * the irq pointer is retrieved. If not doing deferred reprogramming,
1274 * p is of the type 'apic_irq_t *'.
1276 * apic_ioapic_lock must be held across this call, as it protects apic_rebind
1277 * and it protects apic_get_next_bind_cpu() from a race in which a CPU can be
1278 * taken offline after a cpu is selected, but before apic_rebind is called to
1279 * bind interrupts to it.
1282 apic_setup_io_intr(void *p
, int irq
, boolean_t deferred
)
1285 struct ioapic_reprogram_data
*drep
= NULL
;
1289 drep
= (struct ioapic_reprogram_data
*)p
;
1290 ASSERT(drep
!= NULL
);
1291 irqptr
= drep
->irqp
;
1293 irqptr
= (apic_irq_t
*)p
;
1295 ASSERT(irqptr
!= NULL
);
1297 rv
= apic_rebind(irqptr
, apic_irq_table
[irq
]->airq_cpu
, drep
);
1300 * CPU is not up or interrupts are disabled. Fall back to
1301 * the first available CPU
1303 rv
= apic_rebind(irqptr
, apic_find_cpu(APIC_CPU_INTR_ENABLE
),
1312 apic_modify_vector(uchar_t vector
, int irq
)
1314 apic_vector_to_irq
[vector
] = (uchar_t
)irq
;
1319 apic_get_apic_type(void)
1321 return (apic_psm_info
.p_mach_idstring
);
1325 x2apic_update_psm(void)
1327 struct psm_ops
*pops
= &apic_ops
;
1329 ASSERT(pops
!= NULL
);
1331 pops
->psm_intr_exit
= x2apic_intr_exit
;
1332 pops
->psm_setspl
= x2apic_setspl
;
1334 pops
->psm_send_ipi
= x2apic_send_ipi
;
1335 send_dirintf
= pops
->psm_send_ipi
;
1337 apic_mode
= LOCAL_X2APIC
;