4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2013 Pluribus Networks, Inc.
28 * Has code for Advanced DDI interrupt framework support.
31 #include <sys/cpuvar.h>
33 #include <sys/archsystm.h>
35 #include <sys/sunddi.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/mach_intr.h>
38 #include <sys/sysmacros.h>
41 #include <sys/pci_intr_lib.h>
42 #include <sys/apic_common.h>
44 extern struct av_head autovect
[];
47 * Local Function Prototypes
49 apic_irq_t
*apic_find_irq(dev_info_t
*, struct intrspec
*, int);
52 * apic_pci_msi_enable_vector:
53 * Set the address/data fields in the MSI/X capability structure
58 apic_pci_msi_enable_vector(apic_irq_t
*irq_ptr
, int type
, int inum
, int vector
,
59 int count
, int target_apic_id
)
61 uint64_t msi_addr
, msi_data
;
63 dev_info_t
*dip
= irq_ptr
->airq_dip
;
64 int cap_ptr
= i_ddi_get_msi_msix_cap_ptr(dip
);
65 ddi_acc_handle_t handle
= i_ddi_get_pci_config_handle(dip
);
68 void *intrmap_tbl
[PCI_MSI_MAX_INTRS
];
70 DDI_INTR_IMPLDBG((CE_CONT
, "apic_pci_msi_enable_vector: dip=0x%p\n"
71 "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip
,
72 ddi_driver_name(dip
), inum
, vector
, target_apic_id
));
74 ASSERT((handle
!= NULL
) && (cap_ptr
!= 0));
76 msi_regs
.mr_data
= vector
;
77 msi_regs
.mr_addr
= target_apic_id
;
79 for (i
= 0; i
< count
; i
++) {
80 irqno
= apic_vector_to_irq
[vector
+ i
];
81 intrmap_tbl
[i
] = apic_irq_table
[irqno
]->airq_intrmap_private
;
83 apic_vt_ops
->apic_intrmap_alloc_entry(intrmap_tbl
, dip
, type
,
85 for (i
= 0; i
< count
; i
++) {
86 irqno
= apic_vector_to_irq
[vector
+ i
];
87 apic_irq_table
[irqno
]->airq_intrmap_private
=
91 apic_vt_ops
->apic_intrmap_map_entry(irq_ptr
->airq_intrmap_private
,
92 (void *)&msi_regs
, type
, count
);
93 apic_vt_ops
->apic_intrmap_record_msi(irq_ptr
->airq_intrmap_private
,
97 msi_addr
= msi_regs
.mr_addr
;
99 /* MSI Data: MSI is edge triggered according to spec */
100 msi_data
= msi_regs
.mr_data
;
102 DDI_INTR_IMPLDBG((CE_CONT
, "apic_pci_msi_enable_vector: addr=0x%lx "
103 "data=0x%lx\n", (long)msi_addr
, (long)msi_data
));
105 if (type
== DDI_INTR_TYPE_MSI
) {
106 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSI_CTRL
);
108 /* Set the bits to inform how many MSIs are enabled */
109 msi_ctrl
|= ((highbit(count
) -1) << PCI_MSI_MME_SHIFT
);
110 pci_config_put16(handle
, cap_ptr
+ PCI_MSI_CTRL
, msi_ctrl
);
113 * Only set vector if not on hypervisor
115 pci_config_put32(handle
,
116 cap_ptr
+ PCI_MSI_ADDR_OFFSET
, msi_addr
);
118 if (msi_ctrl
& PCI_MSI_64BIT_MASK
) {
119 pci_config_put32(handle
,
120 cap_ptr
+ PCI_MSI_ADDR_OFFSET
+ 4, msi_addr
>> 32);
121 pci_config_put16(handle
,
122 cap_ptr
+ PCI_MSI_64BIT_DATA
, msi_data
);
124 pci_config_put16(handle
,
125 cap_ptr
+ PCI_MSI_32BIT_DATA
, msi_data
);
128 } else if (type
== DDI_INTR_TYPE_MSIX
) {
130 ddi_intr_msix_t
*msix_p
= i_ddi_get_msix(dip
);
132 ASSERT(msix_p
!= NULL
);
134 /* Offset into the "inum"th entry in the MSI-X table */
135 off
= (uintptr_t)msix_p
->msix_tbl_addr
+
136 (inum
* PCI_MSIX_VECTOR_SIZE
);
138 ddi_put32(msix_p
->msix_tbl_hdl
,
139 (uint32_t *)(off
+ PCI_MSIX_DATA_OFFSET
), msi_data
);
140 ddi_put32(msix_p
->msix_tbl_hdl
,
141 (uint32_t *)(off
+ PCI_MSIX_LOWER_ADDR_OFFSET
), msi_addr
);
142 ddi_put32(msix_p
->msix_tbl_hdl
,
143 (uint32_t *)(off
+ PCI_MSIX_UPPER_ADDR_OFFSET
),
149 * This function returns the no. of vectors available for the pri.
150 * dip is not used at this moment. If we really don't need that,
151 * it will be removed.
155 apic_navail_vector(dev_info_t
*dip
, int pri
)
157 int lowest
, highest
, i
, navail
, count
;
159 DDI_INTR_IMPLDBG((CE_CONT
, "apic_navail_vector: dip: %p, pri: %x\n",
162 highest
= apic_ipltopri
[pri
] + APIC_VECTOR_MASK
;
163 lowest
= apic_ipltopri
[pri
- 1] + APIC_VECTOR_PER_IPL
;
166 if (highest
< lowest
) /* Both ipl and ipl - 1 map to same pri */
167 lowest
-= APIC_VECTOR_PER_IPL
;
169 /* It has to be contiguous */
170 for (i
= lowest
; i
<= highest
; i
++) {
172 while ((apic_vector_to_irq
[i
] == APIC_RESV_IRQ
) &&
174 if (APIC_CHECK_RESERVE_VECTORS(i
))
186 * Finds "count" contiguous MSI vectors starting at the proper alignment
188 * Caller needs to make sure that count has to be power of 2 and should not
192 apic_find_multi_vectors(int pri
, int count
)
194 int lowest
, highest
, i
, navail
, start
, msibits
;
196 DDI_INTR_IMPLDBG((CE_CONT
, "apic_find_mult: pri: %x, count: %x\n",
199 highest
= apic_ipltopri
[pri
] + APIC_VECTOR_MASK
;
200 lowest
= apic_ipltopri
[pri
- 1] + APIC_VECTOR_PER_IPL
;
203 if (highest
< lowest
) /* Both ipl and ipl - 1 map to same pri */
204 lowest
-= APIC_VECTOR_PER_IPL
;
207 * msibits is the no. of lower order message data bits for the
208 * allocated MSI vectors and is used to calculate the aligned
213 /* It has to be contiguous */
214 for (i
= lowest
; i
<= highest
; i
++) {
218 * starting vector has to be aligned accordingly for
222 i
= (i
+ msibits
) & ~msibits
;
224 while ((apic_vector_to_irq
[i
] == APIC_RESV_IRQ
) &&
226 if (APIC_CHECK_RESERVE_VECTORS(i
))
239 * It finds the apic_irq_t associates with the dip, ispec and type.
242 apic_find_irq(dev_info_t
*dip
, struct intrspec
*ispec
, int type
)
247 DDI_INTR_IMPLDBG((CE_CONT
, "apic_find_irq: dip=0x%p vec=0x%x "
248 "ipl=0x%x type=0x%x\n", (void *)dip
, ispec
->intrspec_vec
,
249 ispec
->intrspec_pri
, type
));
251 for (i
= apic_min_device_irq
; i
<= apic_max_device_irq
; i
++) {
252 for (irqp
= apic_irq_table
[i
]; irqp
; irqp
= irqp
->airq_next
) {
253 if ((irqp
->airq_dip
== dip
) &&
254 (irqp
->airq_origirq
== ispec
->intrspec_vec
) &&
255 (irqp
->airq_ipl
== ispec
->intrspec_pri
)) {
256 if (type
== DDI_INTR_TYPE_MSI
) {
257 if (irqp
->airq_mps_intr_index
==
260 } else if (type
== DDI_INTR_TYPE_MSIX
) {
261 if (irqp
->airq_mps_intr_index
==
269 DDI_INTR_IMPLDBG((CE_CONT
, "apic_find_irq: return NULL\n"));
274 * This function will return the pending bit of the irqp.
275 * It either comes from the IRR register of the APIC or the RDT
276 * entry of the I/O APIC.
277 * For the IRR to work, it needs to be to its binding CPU
280 apic_get_pending(apic_irq_t
*irqp
, int type
)
282 int bit
, index
, irr
, pending
;
286 DDI_INTR_IMPLDBG((CE_CONT
, "apic_get_pending: irqp: %p, cpuid: %x "
287 "type: %x\n", (void *)irqp
, irqp
->airq_cpu
& ~IRQ_USER_BOUND
,
290 /* need to get on the bound cpu */
291 mutex_enter(&cpu_lock
);
292 affinity_set(irqp
->airq_cpu
& ~IRQ_USER_BOUND
);
294 index
= irqp
->airq_vector
/ 32;
295 bit
= irqp
->airq_vector
% 32;
296 irr
= apic_reg_ops
->apic_read(APIC_IRR_REG
+ index
);
299 mutex_exit(&cpu_lock
);
301 pending
= (irr
& (1 << bit
)) ? 1 : 0;
302 if (!pending
&& (type
== DDI_INTR_TYPE_FIXED
)) {
303 /* check I/O APIC for fixed interrupt */
304 intin_no
= irqp
->airq_intin_no
;
305 apic_ix
= irqp
->airq_ioapicindex
;
306 pending
= (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix
, intin_no
) &
314 * This function will clear the mask for the interrupt on the I/O APIC
317 apic_clear_mask(apic_irq_t
*irqp
)
324 DDI_INTR_IMPLDBG((CE_CONT
, "apic_clear_mask: irqp: %p\n",
327 intin_no
= irqp
->airq_intin_no
;
328 apic_ix
= irqp
->airq_ioapicindex
;
330 iflag
= intr_clear();
331 lock_set(&apic_ioapic_lock
);
333 rdt_entry
= READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix
, intin_no
);
336 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix
, intin_no
,
337 ((~AV_MASK
) & rdt_entry
));
339 lock_clear(&apic_ioapic_lock
);
345 * This function will mask the interrupt on the I/O APIC
348 apic_set_mask(apic_irq_t
*irqp
)
355 DDI_INTR_IMPLDBG((CE_CONT
, "apic_set_mask: irqp: %p\n", (void *)irqp
));
357 intin_no
= irqp
->airq_intin_no
;
358 apic_ix
= irqp
->airq_ioapicindex
;
360 iflag
= intr_clear();
362 lock_set(&apic_ioapic_lock
);
364 rdt_entry
= READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix
, intin_no
);
367 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix
, intin_no
,
368 (AV_MASK
| rdt_entry
));
370 lock_clear(&apic_ioapic_lock
);
376 apic_free_vectors(dev_info_t
*dip
, int inum
, int count
, int pri
, int type
)
380 struct intrspec ispec
;
382 DDI_INTR_IMPLDBG((CE_CONT
, "apic_free_vectors: dip: %p inum: %x "
383 "count: %x pri: %x type: %x\n",
384 (void *)dip
, inum
, count
, pri
, type
));
387 if (!DDI_INTR_IS_MSI_OR_MSIX(type
))
390 for (i
= 0; i
< count
; i
++) {
391 DDI_INTR_IMPLDBG((CE_CONT
, "apic_free_vectors: inum=0x%x "
392 "pri=0x%x count=0x%x\n", inum
, pri
, count
));
393 ispec
.intrspec_vec
= inum
+ i
;
394 ispec
.intrspec_pri
= pri
;
395 if ((irqptr
= apic_find_irq(dip
, &ispec
, type
)) == NULL
) {
396 DDI_INTR_IMPLDBG((CE_CONT
, "apic_free_vectors: "
397 "dip=0x%p inum=0x%x pri=0x%x apic_find_irq() "
398 "failed\n", (void *)dip
, inum
, pri
));
401 irqptr
->airq_mps_intr_index
= FREE_INDEX
;
402 apic_vector_to_irq
[irqptr
->airq_vector
] = APIC_RESV_IRQ
;
407 * apic_pci_msi_enable_mode:
410 apic_pci_msi_enable_mode(dev_info_t
*rdip
, int type
, int inum
)
413 int cap_ptr
= i_ddi_get_msi_msix_cap_ptr(rdip
);
414 ddi_acc_handle_t handle
= i_ddi_get_pci_config_handle(rdip
);
416 ASSERT((handle
!= NULL
) && (cap_ptr
!= 0));
418 if (type
== DDI_INTR_TYPE_MSI
) {
419 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSI_CTRL
);
420 if ((msi_ctrl
& PCI_MSI_ENABLE_BIT
))
423 msi_ctrl
|= PCI_MSI_ENABLE_BIT
;
424 pci_config_put16(handle
, cap_ptr
+ PCI_MSI_CTRL
, msi_ctrl
);
426 } else if (type
== DDI_INTR_TYPE_MSIX
) {
429 ddi_intr_msix_t
*msix_p
;
431 msix_p
= i_ddi_get_msix(rdip
);
433 ASSERT(msix_p
!= NULL
);
435 /* Offset into "inum"th entry in the MSI-X table & clear mask */
436 off
= (uintptr_t)msix_p
->msix_tbl_addr
+ (inum
*
437 PCI_MSIX_VECTOR_SIZE
) + PCI_MSIX_VECTOR_CTRL_OFFSET
;
439 mask
= ddi_get32(msix_p
->msix_tbl_hdl
, (uint32_t *)off
);
441 ddi_put32(msix_p
->msix_tbl_hdl
, (uint32_t *)off
, (mask
& ~1));
443 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSIX_CTRL
);
445 if (!(msi_ctrl
& PCI_MSIX_ENABLE_BIT
)) {
446 msi_ctrl
|= PCI_MSIX_ENABLE_BIT
;
447 pci_config_put16(handle
, cap_ptr
+ PCI_MSIX_CTRL
,
454 apic_set_cpu(int irqno
, int cpu
, int *result
)
460 DDI_INTR_IMPLDBG((CE_CONT
, "APIC_SET_CPU\n"));
462 mutex_enter(&airq_mutex
);
463 irqp
= apic_irq_table
[irqno
];
464 mutex_exit(&airq_mutex
);
468 return (PSM_FAILURE
);
471 /* Fail if this is an MSI intr and is part of a group. */
472 if ((irqp
->airq_mps_intr_index
== MSI_INDEX
) &&
473 (irqp
->airq_intin_no
> 1)) {
475 return (PSM_FAILURE
);
478 iflag
= intr_clear();
479 lock_set(&apic_ioapic_lock
);
481 ret
= apic_rebind_all(irqp
, cpu
);
483 lock_clear(&apic_ioapic_lock
);
488 return (PSM_FAILURE
);
491 * keep tracking the default interrupt cpu binding
493 irqp
->airq_cpu
= cpu
;
496 return (PSM_SUCCESS
);
500 apic_grp_set_cpu(int irqno
, int new_cpu
, int *result
)
502 dev_info_t
*orig_dip
;
505 apic_irq_t
*irqps
[PCI_MSI_MAX_INTRS
];
511 ddi_acc_handle_t handle
;
515 DDI_INTR_IMPLDBG((CE_CONT
, "APIC_GRP_SET_CPU\n"));
518 * Take mutex to insure that table doesn't change out from underneath
519 * us while we're playing with it.
521 mutex_enter(&airq_mutex
);
522 irqps
[0] = apic_irq_table
[irqno
];
523 orig_cpu
= irqps
[0]->airq_temp_cpu
;
524 orig_dip
= irqps
[0]->airq_dip
;
525 num_vectors
= irqps
[0]->airq_intin_no
;
526 vector
= irqps
[0]->airq_vector
;
529 if (num_vectors
== 1) {
530 mutex_exit(&airq_mutex
);
531 return (apic_set_cpu(irqno
, new_cpu
, result
));
536 if (irqps
[0]->airq_mps_intr_index
!= MSI_INDEX
) {
537 mutex_exit(&airq_mutex
);
538 DDI_INTR_IMPLDBG((CE_CONT
, "set_grp: intr not MSI\n"));
539 goto set_grp_intr_done
;
541 if ((num_vectors
< 1) || ((num_vectors
- 1) & vector
)) {
542 mutex_exit(&airq_mutex
);
543 DDI_INTR_IMPLDBG((CE_CONT
,
544 "set_grp: base vec not part of a grp or not aligned: "
545 "vec:0x%x, num_vec:0x%x\n", vector
, num_vectors
));
546 goto set_grp_intr_done
;
548 DDI_INTR_IMPLDBG((CE_CONT
, "set_grp: num intrs in grp: %d\n",
551 ASSERT((num_vectors
+ vector
) < APIC_MAX_VECTOR
);
556 * All IRQ entries in the table for the given device will be not
557 * shared. Since they are not shared, the dip in the table will
558 * be true to the device of interest.
560 for (i
= 1; i
< num_vectors
; i
++) {
561 irqps
[i
] = apic_irq_table
[apic_vector_to_irq
[vector
+ i
]];
562 if (irqps
[i
] == NULL
) {
563 mutex_exit(&airq_mutex
);
564 goto set_grp_intr_done
;
567 /* Sanity check: CPU and dip is the same for all entries. */
568 if ((irqps
[i
]->airq_dip
!= orig_dip
) ||
569 (irqps
[i
]->airq_temp_cpu
!= orig_cpu
)) {
570 mutex_exit(&airq_mutex
);
571 DDI_INTR_IMPLDBG((CE_CONT
,
572 "set_grp: cpu or dip for vec 0x%x difft than for "
573 "vec 0x%x\n", vector
, vector
+ i
));
574 DDI_INTR_IMPLDBG((CE_CONT
,
575 " cpu: %d vs %d, dip: 0x%p vs 0x%p\n", orig_cpu
,
576 irqps
[i
]->airq_temp_cpu
, (void *)orig_dip
,
577 (void *)irqps
[i
]->airq_dip
));
578 goto set_grp_intr_done
;
582 mutex_exit(&airq_mutex
);
584 cap_ptr
= i_ddi_get_msi_msix_cap_ptr(orig_dip
);
585 handle
= i_ddi_get_pci_config_handle(orig_dip
);
586 msi_ctrl
= pci_config_get16(handle
, cap_ptr
+ PCI_MSI_CTRL
);
588 /* MSI Per vector masking is supported. */
589 if (msi_ctrl
& PCI_MSI_PVM_MASK
) {
590 if (msi_ctrl
& PCI_MSI_64BIT_MASK
)
591 msi_mask_off
= cap_ptr
+ PCI_MSI_64BIT_MASKBITS
;
593 msi_mask_off
= cap_ptr
+ PCI_MSI_32BIT_MASK
;
594 msi_pvm
= pci_config_get32(handle
, msi_mask_off
);
595 pci_config_put32(handle
, msi_mask_off
, (uint32_t)-1);
596 DDI_INTR_IMPLDBG((CE_CONT
,
597 "set_grp: pvm supported. Mask set to 0x%x\n",
598 pci_config_get32(handle
, msi_mask_off
)));
601 iflag
= intr_clear();
602 lock_set(&apic_ioapic_lock
);
605 * Do the first rebind and check for errors. Apic_rebind_all returns
606 * an error if the CPU is not accepting interrupts. If the first one
607 * succeeds they all will.
609 if (apic_rebind_all(irqps
[0], new_cpu
))
610 (void) apic_rebind_all(irqps
[0], orig_cpu
);
612 irqps
[0]->airq_cpu
= new_cpu
;
614 for (i
= 1; i
< num_vectors
; i
++) {
615 (void) apic_rebind_all(irqps
[i
], new_cpu
);
616 irqps
[i
]->airq_cpu
= new_cpu
;
618 *result
= 0; /* SUCCESS */
621 lock_clear(&apic_ioapic_lock
);
624 /* Reenable vectors if per vector masking is supported. */
625 if (msi_ctrl
& PCI_MSI_PVM_MASK
) {
626 pci_config_put32(handle
, msi_mask_off
, msi_pvm
);
627 DDI_INTR_IMPLDBG((CE_CONT
,
628 "set_grp: pvm supported. Mask restored to 0x%x\n",
629 pci_config_get32(handle
, msi_mask_off
)));
634 return (PSM_FAILURE
);
636 return (PSM_SUCCESS
);
640 apic_get_vector_intr_info(int vecirq
, apic_get_intr_t
*intr_params_p
)
642 struct autovec
*av_dev
;
647 /* Sanity check the vector/irq argument. */
648 ASSERT((vecirq
>= 0) || (vecirq
<= APIC_MAX_VECTOR
));
650 mutex_enter(&airq_mutex
);
653 * Convert the vecirq arg to an irq using vector_to_irq table
654 * if the arg is a vector. Pass thru if already an irq.
656 if ((intr_params_p
->avgi_req_flags
& PSMGI_INTRBY_FLAGS
) ==
658 irqno
= apic_vector_to_irq
[vecirq
];
662 irq_p
= apic_irq_table
[irqno
];
664 if ((irq_p
== NULL
) ||
665 ((irq_p
->airq_mps_intr_index
!= RESERVE_INDEX
) &&
666 ((irq_p
->airq_temp_cpu
== IRQ_UNBOUND
) ||
667 (irq_p
->airq_temp_cpu
== IRQ_UNINIT
)))) {
668 mutex_exit(&airq_mutex
);
669 return (PSM_FAILURE
);
672 if (intr_params_p
->avgi_req_flags
& PSMGI_REQ_CPUID
) {
674 /* Get the (temp) cpu from apic_irq table, indexed by irq. */
675 intr_params_p
->avgi_cpu_id
= irq_p
->airq_temp_cpu
;
677 /* Return user bound info for intrd. */
678 if (intr_params_p
->avgi_cpu_id
& IRQ_USER_BOUND
) {
679 intr_params_p
->avgi_cpu_id
&= ~IRQ_USER_BOUND
;
680 intr_params_p
->avgi_cpu_id
|= PSMGI_CPU_USER_BOUND
;
684 if (intr_params_p
->avgi_req_flags
& PSMGI_REQ_VECTOR
)
685 intr_params_p
->avgi_vector
= irq_p
->airq_vector
;
687 if (intr_params_p
->avgi_req_flags
&
688 (PSMGI_REQ_NUM_DEVS
| PSMGI_REQ_GET_DEVS
))
689 /* Get number of devices from apic_irq table shared field. */
690 intr_params_p
->avgi_num_devs
= irq_p
->airq_share
;
692 if (intr_params_p
->avgi_req_flags
& PSMGI_REQ_GET_DEVS
) {
694 intr_params_p
->avgi_req_flags
|= PSMGI_REQ_NUM_DEVS
;
696 /* Some devices have NULL dip. Don't count these. */
697 if (intr_params_p
->avgi_num_devs
> 0) {
698 for (i
= 0, av_dev
= autovect
[irqno
].avh_link
;
699 av_dev
; av_dev
= av_dev
->av_link
)
700 if (av_dev
->av_vector
&& av_dev
->av_dip
)
702 intr_params_p
->avgi_num_devs
=
703 MIN(intr_params_p
->avgi_num_devs
, i
);
706 /* There are no viable dips to return. */
707 if (intr_params_p
->avgi_num_devs
== 0)
708 intr_params_p
->avgi_dip_list
= NULL
;
710 else { /* Return list of dips */
712 /* Allocate space in array for that number of devs. */
713 intr_params_p
->avgi_dip_list
= kmem_zalloc(
714 intr_params_p
->avgi_num_devs
*
715 sizeof (dev_info_t
*),
719 * Loop through the device list of the autovec table
720 * filling in the dip array.
722 * Note that the autovect table may have some special
723 * entries which contain NULL dips. These will be
726 for (i
= 0, av_dev
= autovect
[irqno
].avh_link
;
727 av_dev
; av_dev
= av_dev
->av_link
)
728 if (av_dev
->av_vector
&& av_dev
->av_dip
)
729 intr_params_p
->avgi_dip_list
[i
++] =
734 mutex_exit(&airq_mutex
);
736 return (PSM_SUCCESS
);
740 * This function provides external interface to the nexus for all
741 * functionalities related to the new DDI interrupt framework.
744 * dip - pointer to the dev_info structure of the requested device
745 * hdlp - pointer to the internal interrupt handle structure for the
746 * requested interrupt
747 * intr_op - opcode for this call
748 * result - pointer to the integer that will hold the result to be
749 * passed back if return value is PSM_SUCCESS
752 * return value is either PSM_SUCCESS or PSM_FAILURE
755 apic_intr_ops(dev_info_t
*dip
, ddi_intr_handle_impl_t
*hdlp
,
756 psm_intr_op_t intr_op
, int *result
)
764 struct intrspec
*ispec
, intr_spec
;
766 DDI_INTR_IMPLDBG((CE_CONT
, "apic_intr_ops: dip: %p hdlp: %p "
767 "intr_op: %x\n", (void *)dip
, (void *)hdlp
, intr_op
));
770 ispec
->intrspec_pri
= hdlp
->ih_pri
;
771 ispec
->intrspec_vec
= hdlp
->ih_inum
;
772 ispec
->intrspec_func
= hdlp
->ih_cb_func
;
775 case PSM_INTR_OP_CHECK_MSI
:
777 * Check MSI/X is supported or not at APIC level and
778 * masked off the MSI/X bits in hdlp->ih_type if not
779 * supported before return. If MSI/X is supported,
780 * leave the ih_type unchanged and return.
782 * hdlp->ih_type passed in from the nexus has all the
783 * interrupt types supported by the device.
785 if (apic_support_msi
== 0) {
787 * if apic_support_msi is not set, call
788 * apic_check_msi_support() to check whether msi
791 if (apic_check_msi_support() == PSM_SUCCESS
)
792 apic_support_msi
= 1;
794 apic_support_msi
= -1;
796 if (apic_support_msi
== 1) {
797 if (apic_msix_enable
)
798 *result
= hdlp
->ih_type
;
800 *result
= hdlp
->ih_type
& ~DDI_INTR_TYPE_MSIX
;
802 *result
= hdlp
->ih_type
& ~(DDI_INTR_TYPE_MSI
|
805 case PSM_INTR_OP_ALLOC_VECTORS
:
806 if (hdlp
->ih_type
== DDI_INTR_TYPE_MSI
)
807 *result
= apic_alloc_msi_vectors(dip
, hdlp
->ih_inum
,
808 hdlp
->ih_scratch1
, hdlp
->ih_pri
,
809 (int)(uintptr_t)hdlp
->ih_scratch2
);
811 *result
= apic_alloc_msix_vectors(dip
, hdlp
->ih_inum
,
812 hdlp
->ih_scratch1
, hdlp
->ih_pri
,
813 (int)(uintptr_t)hdlp
->ih_scratch2
);
815 case PSM_INTR_OP_FREE_VECTORS
:
816 apic_free_vectors(dip
, hdlp
->ih_inum
, hdlp
->ih_scratch1
,
817 hdlp
->ih_pri
, hdlp
->ih_type
);
819 case PSM_INTR_OP_NAVAIL_VECTORS
:
820 *result
= apic_navail_vector(dip
, hdlp
->ih_pri
);
822 case PSM_INTR_OP_XLATE_VECTOR
:
823 ispec
= ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
;
824 *result
= apic_introp_xlate(dip
, ispec
, hdlp
->ih_type
);
826 return (PSM_FAILURE
);
828 case PSM_INTR_OP_GET_PENDING
:
829 if ((irqp
= apic_find_irq(dip
, ispec
, hdlp
->ih_type
)) == NULL
)
830 return (PSM_FAILURE
);
831 *result
= apic_get_pending(irqp
, hdlp
->ih_type
);
833 case PSM_INTR_OP_CLEAR_MASK
:
834 if (hdlp
->ih_type
!= DDI_INTR_TYPE_FIXED
)
835 return (PSM_FAILURE
);
836 irqp
= apic_find_irq(dip
, ispec
, hdlp
->ih_type
);
838 return (PSM_FAILURE
);
839 apic_clear_mask(irqp
);
841 case PSM_INTR_OP_SET_MASK
:
842 if (hdlp
->ih_type
!= DDI_INTR_TYPE_FIXED
)
843 return (PSM_FAILURE
);
844 if ((irqp
= apic_find_irq(dip
, ispec
, hdlp
->ih_type
)) == NULL
)
845 return (PSM_FAILURE
);
848 case PSM_INTR_OP_GET_CAP
:
849 cap
= DDI_INTR_FLAG_PENDING
;
850 if (hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
)
851 cap
|= DDI_INTR_FLAG_MASKABLE
;
854 case PSM_INTR_OP_GET_SHARED
:
855 if (hdlp
->ih_type
!= DDI_INTR_TYPE_FIXED
)
856 return (PSM_FAILURE
);
857 ispec
= ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
;
858 if ((irqp
= apic_find_irq(dip
, ispec
, hdlp
->ih_type
)) == NULL
)
859 return (PSM_FAILURE
);
860 *result
= (irqp
->airq_share
> 1) ? 1: 0;
862 case PSM_INTR_OP_SET_PRI
:
863 old_priority
= hdlp
->ih_pri
; /* save old value */
864 new_priority
= *(int *)result
; /* try the new value */
866 if (hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
) {
867 return (PSM_SUCCESS
);
870 /* Now allocate the vectors */
871 if (hdlp
->ih_type
== DDI_INTR_TYPE_MSI
) {
872 /* SET_PRI does not support the case of multiple MSI */
873 if (i_ddi_intr_get_current_nintrs(hdlp
->ih_dip
) > 1)
874 return (PSM_FAILURE
);
876 count_vec
= apic_alloc_msi_vectors(dip
, hdlp
->ih_inum
,
878 DDI_INTR_ALLOC_STRICT
);
880 count_vec
= apic_alloc_msix_vectors(dip
, hdlp
->ih_inum
,
882 DDI_INTR_ALLOC_STRICT
);
885 /* Did we get new vectors? */
887 return (PSM_FAILURE
);
889 /* Finally, free the previously allocated vectors */
890 apic_free_vectors(dip
, hdlp
->ih_inum
, count_vec
,
891 old_priority
, hdlp
->ih_type
);
893 case PSM_INTR_OP_SET_CPU
:
894 case PSM_INTR_OP_GRP_SET_CPU
:
896 * The interrupt handle given here has been allocated
897 * specifically for this command, and ih_private carries
900 new_cpu
= (int)(intptr_t)hdlp
->ih_private
;
901 if (!apic_cpu_in_range(new_cpu
)) {
902 DDI_INTR_IMPLDBG((CE_CONT
,
903 "[grp_]set_cpu: cpu out of range: %d\n", new_cpu
));
905 return (PSM_FAILURE
);
907 if (hdlp
->ih_vector
> APIC_MAX_VECTOR
) {
908 DDI_INTR_IMPLDBG((CE_CONT
,
909 "[grp_]set_cpu: vector out of range: %d\n",
912 return (PSM_FAILURE
);
914 if ((hdlp
->ih_flags
& PSMGI_INTRBY_FLAGS
) == PSMGI_INTRBY_VEC
)
915 hdlp
->ih_vector
= apic_vector_to_irq
[hdlp
->ih_vector
];
916 if (intr_op
== PSM_INTR_OP_SET_CPU
) {
917 if (apic_set_cpu(hdlp
->ih_vector
, new_cpu
, result
) !=
919 return (PSM_FAILURE
);
921 if (apic_grp_set_cpu(hdlp
->ih_vector
, new_cpu
,
922 result
) != PSM_SUCCESS
)
923 return (PSM_FAILURE
);
926 case PSM_INTR_OP_GET_INTR
:
928 * The interrupt handle given here has been allocated
929 * specifically for this command, and ih_private carries
930 * a pointer to a apic_get_intr_t.
932 if (apic_get_vector_intr_info(
933 hdlp
->ih_vector
, hdlp
->ih_private
) != PSM_SUCCESS
)
934 return (PSM_FAILURE
);
936 case PSM_INTR_OP_APIC_TYPE
:
937 ((apic_get_type_t
*)(hdlp
->ih_private
))->avgi_type
=
938 apic_get_apic_type();
939 ((apic_get_type_t
*)(hdlp
->ih_private
))->avgi_num_intr
=
941 ((apic_get_type_t
*)(hdlp
->ih_private
))->avgi_num_cpu
=
943 hdlp
->ih_ver
= apic_get_apic_version();
945 case PSM_INTR_OP_SET_CAP
:
947 return (PSM_FAILURE
);
949 return (PSM_SUCCESS
);