2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/bootmem.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/pci.h>
35 #include <linux/msi.h>
36 #include <linux/htirq.h>
37 #include <linux/freezer.h>
38 #include <linux/kthread.h>
39 #include <linux/jiffies.h> /* time_after() */
44 #include <asm/timer.h>
45 #include <asm/i8259.h>
47 #include <asm/msidef.h>
48 #include <asm/hypertransport.h>
50 #include <mach_apic.h>
51 #include <mach_apicdef.h>
53 int (*ioapic_renumber_irq
)(int ioapic
, int irq
);
54 atomic_t irq_mis_count
;
56 /* Where if anywhere is the i8259 connect in external int mode */
57 static struct { int pin
, apic
; } ioapic_i8259
= { -1, -1 };
59 static DEFINE_SPINLOCK(ioapic_lock
);
60 DEFINE_SPINLOCK(vector_lock
);
62 int timer_through_8259 __initdata
;
65 * Is the SiS APIC rmw bug present ?
66 * -1 = don't know, 0 = no, 1 = yes
68 int sis_apic_bug
= -1;
71 * # of IRQ routing registers
73 int nr_ioapic_registers
[MAX_IO_APICS
];
75 /* I/O APIC entries */
76 struct mp_config_ioapic mp_ioapics
[MAX_IO_APICS
];
79 /* MP IRQ source entries */
80 struct mp_config_intsrc mp_irqs
[MAX_IRQ_SOURCES
];
82 /* # of MP IRQ source entries */
85 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
86 int mp_bus_id_to_type
[MAX_MP_BUSSES
];
89 DECLARE_BITMAP(mp_bus_not_pci
, MAX_MP_BUSSES
);
91 static int disable_timer_pin_1 __initdata
;
94 * Rough estimation of how many shared IRQs there are, can
97 #define MAX_PLUS_SHARED_IRQS NR_IRQS
98 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
101 * This is performance-critical, we want to do it O(1)
103 * the indexing order of this array favors 1:1 mappings
104 * between pins and IRQs.
107 static struct irq_pin_list
{
109 } irq_2_pin
[PIN_MAP_SIZE
];
113 unsigned int unused
[3];
117 static __attribute_const__
struct io_apic __iomem
*io_apic_base(int idx
)
119 return (void __iomem
*) __fix_to_virt(FIX_IO_APIC_BASE_0
+ idx
)
120 + (mp_ioapics
[idx
].mp_apicaddr
& ~PAGE_MASK
);
123 static inline unsigned int io_apic_read(unsigned int apic
, unsigned int reg
)
125 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
126 writel(reg
, &io_apic
->index
);
127 return readl(&io_apic
->data
);
130 static inline void io_apic_write(unsigned int apic
, unsigned int reg
, unsigned int value
)
132 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
133 writel(reg
, &io_apic
->index
);
134 writel(value
, &io_apic
->data
);
138 * Re-write a value: to be used for read-modify-write
139 * cycles where the read already set up the index register.
141 * Older SiS APIC requires we rewrite the index register
143 static inline void io_apic_modify(unsigned int apic
, unsigned int reg
, unsigned int value
)
145 volatile struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
147 writel(reg
, &io_apic
->index
);
148 writel(value
, &io_apic
->data
);
152 struct { u32 w1
, w2
; };
153 struct IO_APIC_route_entry entry
;
156 static struct IO_APIC_route_entry
ioapic_read_entry(int apic
, int pin
)
158 union entry_union eu
;
160 spin_lock_irqsave(&ioapic_lock
, flags
);
161 eu
.w1
= io_apic_read(apic
, 0x10 + 2 * pin
);
162 eu
.w2
= io_apic_read(apic
, 0x11 + 2 * pin
);
163 spin_unlock_irqrestore(&ioapic_lock
, flags
);
168 * When we write a new IO APIC routing entry, we need to write the high
169 * word first! If the mask bit in the low word is clear, we will enable
170 * the interrupt, and we need to make sure the entry is fully populated
171 * before that happens.
174 __ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
176 union entry_union eu
;
178 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
179 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
182 static void ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
185 spin_lock_irqsave(&ioapic_lock
, flags
);
186 __ioapic_write_entry(apic
, pin
, e
);
187 spin_unlock_irqrestore(&ioapic_lock
, flags
);
191 * When we mask an IO APIC routing entry, we need to write the low
192 * word first, in order to set the mask bit before we change the
195 static void ioapic_mask_entry(int apic
, int pin
)
198 union entry_union eu
= { .entry
.mask
= 1 };
200 spin_lock_irqsave(&ioapic_lock
, flags
);
201 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
202 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
203 spin_unlock_irqrestore(&ioapic_lock
, flags
);
207 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
208 * shared ISA-space IRQs, so we have to support them. We are super
209 * fast in the common case, and fast for shared ISA-space IRQs.
211 static void add_pin_to_irq(unsigned int irq
, int apic
, int pin
)
213 static int first_free_entry
= NR_IRQS
;
214 struct irq_pin_list
*entry
= irq_2_pin
+ irq
;
217 entry
= irq_2_pin
+ entry
->next
;
219 if (entry
->pin
!= -1) {
220 entry
->next
= first_free_entry
;
221 entry
= irq_2_pin
+ entry
->next
;
222 if (++first_free_entry
>= PIN_MAP_SIZE
)
223 panic("io_apic.c: whoops");
230 * Reroute an IRQ to a different pin.
232 static void __init
replace_pin_at_irq(unsigned int irq
,
233 int oldapic
, int oldpin
,
234 int newapic
, int newpin
)
236 struct irq_pin_list
*entry
= irq_2_pin
+ irq
;
239 if (entry
->apic
== oldapic
&& entry
->pin
== oldpin
) {
240 entry
->apic
= newapic
;
245 entry
= irq_2_pin
+ entry
->next
;
249 static void __modify_IO_APIC_irq(unsigned int irq
, unsigned long enable
, unsigned long disable
)
251 struct irq_pin_list
*entry
= irq_2_pin
+ irq
;
252 unsigned int pin
, reg
;
258 reg
= io_apic_read(entry
->apic
, 0x10 + pin
*2);
261 io_apic_modify(entry
->apic
, 0x10 + pin
*2, reg
);
264 entry
= irq_2_pin
+ entry
->next
;
269 static void __mask_IO_APIC_irq(unsigned int irq
)
271 __modify_IO_APIC_irq(irq
, IO_APIC_REDIR_MASKED
, 0);
275 static void __unmask_IO_APIC_irq(unsigned int irq
)
277 __modify_IO_APIC_irq(irq
, 0, IO_APIC_REDIR_MASKED
);
280 /* mask = 1, trigger = 0 */
281 static void __mask_and_edge_IO_APIC_irq(unsigned int irq
)
283 __modify_IO_APIC_irq(irq
, IO_APIC_REDIR_MASKED
,
284 IO_APIC_REDIR_LEVEL_TRIGGER
);
287 /* mask = 0, trigger = 1 */
288 static void __unmask_and_level_IO_APIC_irq(unsigned int irq
)
290 __modify_IO_APIC_irq(irq
, IO_APIC_REDIR_LEVEL_TRIGGER
,
291 IO_APIC_REDIR_MASKED
);
294 static void mask_IO_APIC_irq(unsigned int irq
)
298 spin_lock_irqsave(&ioapic_lock
, flags
);
299 __mask_IO_APIC_irq(irq
);
300 spin_unlock_irqrestore(&ioapic_lock
, flags
);
303 static void unmask_IO_APIC_irq(unsigned int irq
)
307 spin_lock_irqsave(&ioapic_lock
, flags
);
308 __unmask_IO_APIC_irq(irq
);
309 spin_unlock_irqrestore(&ioapic_lock
, flags
);
312 static void clear_IO_APIC_pin(unsigned int apic
, unsigned int pin
)
314 struct IO_APIC_route_entry entry
;
316 /* Check delivery_mode to be sure we're not clearing an SMI pin */
317 entry
= ioapic_read_entry(apic
, pin
);
318 if (entry
.delivery_mode
== dest_SMI
)
322 * Disable it in the IO-APIC irq-routing table:
324 ioapic_mask_entry(apic
, pin
);
327 static void clear_IO_APIC(void)
331 for (apic
= 0; apic
< nr_ioapics
; apic
++)
332 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
333 clear_IO_APIC_pin(apic
, pin
);
337 static void set_ioapic_affinity_irq(unsigned int irq
, cpumask_t cpumask
)
341 struct irq_pin_list
*entry
= irq_2_pin
+ irq
;
342 unsigned int apicid_value
;
345 cpus_and(tmp
, cpumask
, cpu_online_map
);
349 cpus_and(cpumask
, tmp
, CPU_MASK_ALL
);
351 apicid_value
= cpu_mask_to_apicid(cpumask
);
352 /* Prepare to do the io_apic_write */
353 apicid_value
= apicid_value
<< 24;
354 spin_lock_irqsave(&ioapic_lock
, flags
);
359 io_apic_write(entry
->apic
, 0x10 + 1 + pin
*2, apicid_value
);
362 entry
= irq_2_pin
+ entry
->next
;
364 irq_desc
[irq
].affinity
= cpumask
;
365 spin_unlock_irqrestore(&ioapic_lock
, flags
);
368 #if defined(CONFIG_IRQBALANCE)
369 # include <asm/processor.h> /* kernel_thread() */
370 # include <linux/kernel_stat.h> /* kstat */
371 # include <linux/slab.h> /* kmalloc() */
372 # include <linux/timer.h>
374 #define IRQBALANCE_CHECK_ARCH -999
375 #define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
376 #define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
377 #define BALANCED_IRQ_MORE_DELTA (HZ/10)
378 #define BALANCED_IRQ_LESS_DELTA (HZ)
380 static int irqbalance_disabled __read_mostly
= IRQBALANCE_CHECK_ARCH
;
381 static int physical_balance __read_mostly
;
382 static long balanced_irq_interval __read_mostly
= MAX_BALANCED_IRQ_INTERVAL
;
384 static struct irq_cpu_info
{
385 unsigned long *last_irq
;
386 unsigned long *irq_delta
;
388 } irq_cpu_data
[NR_CPUS
];
390 #define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
391 #define LAST_CPU_IRQ(cpu, irq) (irq_cpu_data[cpu].last_irq[irq])
392 #define IRQ_DELTA(cpu, irq) (irq_cpu_data[cpu].irq_delta[irq])
394 #define IDLE_ENOUGH(cpu,now) \
395 (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
397 #define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
399 #define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i)))
401 static cpumask_t balance_irq_affinity
[NR_IRQS
] = {
402 [0 ... NR_IRQS
-1] = CPU_MASK_ALL
405 void set_balance_irq_affinity(unsigned int irq
, cpumask_t mask
)
407 balance_irq_affinity
[irq
] = mask
;
410 static unsigned long move(int curr_cpu
, cpumask_t allowed_mask
,
411 unsigned long now
, int direction
)
419 if (unlikely(cpu
== curr_cpu
))
422 if (direction
== 1) {
431 } while (!cpu_online(cpu
) || !IRQ_ALLOWED(cpu
, allowed_mask
) ||
432 (search_idle
&& !IDLE_ENOUGH(cpu
, now
)));
437 static inline void balance_irq(int cpu
, int irq
)
439 unsigned long now
= jiffies
;
440 cpumask_t allowed_mask
;
441 unsigned int new_cpu
;
443 if (irqbalance_disabled
)
446 cpus_and(allowed_mask
, cpu_online_map
, balance_irq_affinity
[irq
]);
447 new_cpu
= move(cpu
, allowed_mask
, now
, 1);
449 set_pending_irq(irq
, cpumask_of_cpu(new_cpu
));
452 static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold
)
456 for_each_online_cpu(i
) {
457 for (j
= 0; j
< NR_IRQS
; j
++) {
458 if (!irq_desc
[j
].action
)
460 /* Is it a significant load ? */
461 if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i
), j
) <
462 useful_load_threshold
)
467 balanced_irq_interval
= max((long)MIN_BALANCED_IRQ_INTERVAL
,
468 balanced_irq_interval
- BALANCED_IRQ_LESS_DELTA
);
472 static void do_irq_balance(void)
475 unsigned long max_cpu_irq
= 0, min_cpu_irq
= (~0);
476 unsigned long move_this_load
= 0;
477 int max_loaded
= 0, min_loaded
= 0;
479 unsigned long useful_load_threshold
= balanced_irq_interval
+ 10;
481 int tmp_loaded
, first_attempt
= 1;
482 unsigned long tmp_cpu_irq
;
483 unsigned long imbalance
= 0;
484 cpumask_t allowed_mask
, target_cpu_mask
, tmp
;
486 for_each_possible_cpu(i
) {
491 package_index
= CPU_TO_PACKAGEINDEX(i
);
492 for (j
= 0; j
< NR_IRQS
; j
++) {
493 unsigned long value_now
, delta
;
494 /* Is this an active IRQ or balancing disabled ? */
495 if (!irq_desc
[j
].action
|| irq_balancing_disabled(j
))
497 if (package_index
== i
)
498 IRQ_DELTA(package_index
, j
) = 0;
499 /* Determine the total count per processor per IRQ */
500 value_now
= (unsigned long) kstat_cpu(i
).irqs
[j
];
502 /* Determine the activity per processor per IRQ */
503 delta
= value_now
- LAST_CPU_IRQ(i
, j
);
505 /* Update last_cpu_irq[][] for the next time */
506 LAST_CPU_IRQ(i
, j
) = value_now
;
508 /* Ignore IRQs whose rate is less than the clock */
509 if (delta
< useful_load_threshold
)
511 /* update the load for the processor or package total */
512 IRQ_DELTA(package_index
, j
) += delta
;
514 /* Keep track of the higher numbered sibling as well */
515 if (i
!= package_index
)
518 * We have sibling A and sibling B in the package
520 * cpu_irq[A] = load for cpu A + load for cpu B
521 * cpu_irq[B] = load for cpu B
523 CPU_IRQ(package_index
) += delta
;
526 /* Find the least loaded processor package */
527 for_each_online_cpu(i
) {
528 if (i
!= CPU_TO_PACKAGEINDEX(i
))
530 if (min_cpu_irq
> CPU_IRQ(i
)) {
531 min_cpu_irq
= CPU_IRQ(i
);
535 max_cpu_irq
= ULONG_MAX
;
539 * Look for heaviest loaded processor.
540 * We may come back to get the next heaviest loaded processor.
541 * Skip processors with trivial loads.
545 for_each_online_cpu(i
) {
546 if (i
!= CPU_TO_PACKAGEINDEX(i
))
548 if (max_cpu_irq
<= CPU_IRQ(i
))
550 if (tmp_cpu_irq
< CPU_IRQ(i
)) {
551 tmp_cpu_irq
= CPU_IRQ(i
);
556 if (tmp_loaded
== -1) {
558 * In the case of small number of heavy interrupt sources,
559 * loading some of the cpus too much. We use Ingo's original
560 * approach to rotate them around.
562 if (!first_attempt
&& imbalance
>= useful_load_threshold
) {
563 rotate_irqs_among_cpus(useful_load_threshold
);
566 goto not_worth_the_effort
;
569 first_attempt
= 0; /* heaviest search */
570 max_cpu_irq
= tmp_cpu_irq
; /* load */
571 max_loaded
= tmp_loaded
; /* processor */
572 imbalance
= (max_cpu_irq
- min_cpu_irq
) / 2;
575 * if imbalance is less than approx 10% of max load, then
576 * observe diminishing returns action. - quit
578 if (imbalance
< (max_cpu_irq
>> 3))
579 goto not_worth_the_effort
;
582 /* if we select an IRQ to move that can't go where we want, then
583 * see if there is another one to try.
587 for (j
= 0; j
< NR_IRQS
; j
++) {
588 /* Is this an active IRQ? */
589 if (!irq_desc
[j
].action
)
591 if (imbalance
<= IRQ_DELTA(max_loaded
, j
))
593 /* Try to find the IRQ that is closest to the imbalance
594 * without going over.
596 if (move_this_load
< IRQ_DELTA(max_loaded
, j
)) {
597 move_this_load
= IRQ_DELTA(max_loaded
, j
);
601 if (selected_irq
== -1)
604 imbalance
= move_this_load
;
606 /* For physical_balance case, we accumulated both load
607 * values in the one of the siblings cpu_irq[],
608 * to use the same code for physical and logical processors
609 * as much as possible.
611 * NOTE: the cpu_irq[] array holds the sum of the load for
612 * sibling A and sibling B in the slot for the lowest numbered
613 * sibling (A), _AND_ the load for sibling B in the slot for
614 * the higher numbered sibling.
616 * We seek the least loaded sibling by making the comparison
619 load
= CPU_IRQ(min_loaded
) >> 1;
620 for_each_cpu_mask(j
, per_cpu(cpu_sibling_map
, min_loaded
)) {
621 if (load
> CPU_IRQ(j
)) {
622 /* This won't change cpu_sibling_map[min_loaded] */
628 cpus_and(allowed_mask
,
630 balance_irq_affinity
[selected_irq
]);
631 target_cpu_mask
= cpumask_of_cpu(min_loaded
);
632 cpus_and(tmp
, target_cpu_mask
, allowed_mask
);
634 if (!cpus_empty(tmp
)) {
635 /* mark for change destination */
636 set_pending_irq(selected_irq
, cpumask_of_cpu(min_loaded
));
638 /* Since we made a change, come back sooner to
639 * check for more variation.
641 balanced_irq_interval
= max((long)MIN_BALANCED_IRQ_INTERVAL
,
642 balanced_irq_interval
- BALANCED_IRQ_LESS_DELTA
);
647 not_worth_the_effort
:
649 * if we did not find an IRQ to move, then adjust the time interval
652 balanced_irq_interval
= min((long)MAX_BALANCED_IRQ_INTERVAL
,
653 balanced_irq_interval
+ BALANCED_IRQ_MORE_DELTA
);
657 static int balanced_irq(void *unused
)
660 unsigned long prev_balance_time
= jiffies
;
661 long time_remaining
= balanced_irq_interval
;
663 /* push everything to CPU 0 to give us a starting point. */
664 for (i
= 0 ; i
< NR_IRQS
; i
++) {
665 irq_desc
[i
].pending_mask
= cpumask_of_cpu(0);
666 set_pending_irq(i
, cpumask_of_cpu(0));
671 time_remaining
= schedule_timeout_interruptible(time_remaining
);
673 if (time_after(jiffies
,
674 prev_balance_time
+balanced_irq_interval
)) {
677 prev_balance_time
= jiffies
;
678 time_remaining
= balanced_irq_interval
;
685 static int __init
balanced_irq_init(void)
688 struct cpuinfo_x86
*c
;
691 cpus_shift_right(tmp
, cpu_online_map
, 2);
693 /* When not overwritten by the command line ask subarchitecture. */
694 if (irqbalance_disabled
== IRQBALANCE_CHECK_ARCH
)
695 irqbalance_disabled
= NO_BALANCE_IRQ
;
696 if (irqbalance_disabled
)
699 /* disable irqbalance completely if there is only one processor online */
700 if (num_online_cpus() < 2) {
701 irqbalance_disabled
= 1;
705 * Enable physical balance only if more than 1 physical processor
708 if (smp_num_siblings
> 1 && !cpus_empty(tmp
))
709 physical_balance
= 1;
711 for_each_online_cpu(i
) {
712 irq_cpu_data
[i
].irq_delta
= kzalloc(sizeof(unsigned long) * NR_IRQS
, GFP_KERNEL
);
713 irq_cpu_data
[i
].last_irq
= kzalloc(sizeof(unsigned long) * NR_IRQS
, GFP_KERNEL
);
714 if (irq_cpu_data
[i
].irq_delta
== NULL
|| irq_cpu_data
[i
].last_irq
== NULL
) {
715 printk(KERN_ERR
"balanced_irq_init: out of memory");
720 printk(KERN_INFO
"Starting balanced_irq\n");
721 if (!IS_ERR(kthread_run(balanced_irq
, NULL
, "kirqd")))
723 printk(KERN_ERR
"balanced_irq_init: failed to spawn balanced_irq");
725 for_each_possible_cpu(i
) {
726 kfree(irq_cpu_data
[i
].irq_delta
);
727 irq_cpu_data
[i
].irq_delta
= NULL
;
728 kfree(irq_cpu_data
[i
].last_irq
);
729 irq_cpu_data
[i
].last_irq
= NULL
;
734 int __devinit
irqbalance_disable(char *str
)
736 irqbalance_disabled
= 1;
740 __setup("noirqbalance", irqbalance_disable
);
742 late_initcall(balanced_irq_init
);
743 #endif /* CONFIG_IRQBALANCE */
744 #endif /* CONFIG_SMP */
747 void send_IPI_self(int vector
)
754 apic_wait_icr_idle();
755 cfg
= APIC_DM_FIXED
| APIC_DEST_SELF
| vector
| APIC_DEST_LOGICAL
;
757 * Send the IPI. The write to APIC_ICR fires this off.
759 apic_write(APIC_ICR
, cfg
);
761 #endif /* !CONFIG_SMP */
765 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
766 * specific CPU-side IRQs.
770 static int pirq_entries
[MAX_PIRQS
];
771 static int pirqs_enabled
;
772 int skip_ioapic_setup
;
774 static int __init
ioapic_pirq_setup(char *str
)
777 int ints
[MAX_PIRQS
+1];
779 get_options(str
, ARRAY_SIZE(ints
), ints
);
781 for (i
= 0; i
< MAX_PIRQS
; i
++)
782 pirq_entries
[i
] = -1;
785 apic_printk(APIC_VERBOSE
, KERN_INFO
786 "PIRQ redirection, working around broken MP-BIOS.\n");
788 if (ints
[0] < MAX_PIRQS
)
791 for (i
= 0; i
< max
; i
++) {
792 apic_printk(APIC_VERBOSE
, KERN_DEBUG
793 "... PIRQ%d -> IRQ %d\n", i
, ints
[i
+1]);
795 * PIRQs are mapped upside down, usually.
797 pirq_entries
[MAX_PIRQS
-i
-1] = ints
[i
+1];
802 __setup("pirq=", ioapic_pirq_setup
);
805 * Find the IRQ entry number of a certain pin.
807 static int find_irq_entry(int apic
, int pin
, int type
)
811 for (i
= 0; i
< mp_irq_entries
; i
++)
812 if (mp_irqs
[i
].mp_irqtype
== type
&&
813 (mp_irqs
[i
].mp_dstapic
== mp_ioapics
[apic
].mp_apicid
||
814 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
) &&
815 mp_irqs
[i
].mp_dstirq
== pin
)
822 * Find the pin to which IRQ[irq] (ISA) is connected
824 static int __init
find_isa_irq_pin(int irq
, int type
)
828 for (i
= 0; i
< mp_irq_entries
; i
++) {
829 int lbus
= mp_irqs
[i
].mp_srcbus
;
831 if (test_bit(lbus
, mp_bus_not_pci
) &&
832 (mp_irqs
[i
].mp_irqtype
== type
) &&
833 (mp_irqs
[i
].mp_srcbusirq
== irq
))
835 return mp_irqs
[i
].mp_dstirq
;
840 static int __init
find_isa_irq_apic(int irq
, int type
)
844 for (i
= 0; i
< mp_irq_entries
; i
++) {
845 int lbus
= mp_irqs
[i
].mp_srcbus
;
847 if (test_bit(lbus
, mp_bus_not_pci
) &&
848 (mp_irqs
[i
].mp_irqtype
== type
) &&
849 (mp_irqs
[i
].mp_srcbusirq
== irq
))
852 if (i
< mp_irq_entries
) {
854 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
855 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
)
864 * Find a specific PCI IRQ entry.
865 * Not an __init, possibly needed by modules
867 static int pin_2_irq(int idx
, int apic
, int pin
);
869 int IO_APIC_get_PCI_irq_vector(int bus
, int slot
, int pin
)
871 int apic
, i
, best_guess
= -1;
873 apic_printk(APIC_DEBUG
, "querying PCI -> IRQ mapping bus:%d, "
874 "slot:%d, pin:%d.\n", bus
, slot
, pin
);
875 if (test_bit(bus
, mp_bus_not_pci
)) {
876 printk(KERN_WARNING
"PCI BIOS passed nonexistent PCI bus %d!\n", bus
);
879 for (i
= 0; i
< mp_irq_entries
; i
++) {
880 int lbus
= mp_irqs
[i
].mp_srcbus
;
882 for (apic
= 0; apic
< nr_ioapics
; apic
++)
883 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
||
884 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
)
887 if (!test_bit(lbus
, mp_bus_not_pci
) &&
888 !mp_irqs
[i
].mp_irqtype
&&
890 (slot
== ((mp_irqs
[i
].mp_srcbusirq
>> 2) & 0x1f))) {
891 int irq
= pin_2_irq(i
, apic
, mp_irqs
[i
].mp_dstirq
);
893 if (!(apic
|| IO_APIC_IRQ(irq
)))
896 if (pin
== (mp_irqs
[i
].mp_srcbusirq
& 3))
899 * Use the first all-but-pin matching entry as a
900 * best-guess fuzzy result for broken mptables.
908 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector
);
911 * This function currently is only a helper for the i386 smp boot process where
912 * we need to reprogram the ioredtbls to cater for the cpus which have come online
913 * so mask in all cases should simply be TARGET_CPUS
916 void __init
setup_ioapic_dest(void)
918 int pin
, ioapic
, irq
, irq_entry
;
920 if (skip_ioapic_setup
== 1)
923 for (ioapic
= 0; ioapic
< nr_ioapics
; ioapic
++) {
924 for (pin
= 0; pin
< nr_ioapic_registers
[ioapic
]; pin
++) {
925 irq_entry
= find_irq_entry(ioapic
, pin
, mp_INT
);
928 irq
= pin_2_irq(irq_entry
, ioapic
, pin
);
929 set_ioapic_affinity_irq(irq
, TARGET_CPUS
);
936 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
938 * EISA Edge/Level control register, ELCR
940 static int EISA_ELCR(unsigned int irq
)
943 unsigned int port
= 0x4d0 + (irq
>> 3);
944 return (inb(port
) >> (irq
& 7)) & 1;
946 apic_printk(APIC_VERBOSE
, KERN_INFO
947 "Broken MPtable reports ISA irq %d\n", irq
);
952 /* ISA interrupts are always polarity zero edge triggered,
953 * when listed as conforming in the MP table. */
955 #define default_ISA_trigger(idx) (0)
956 #define default_ISA_polarity(idx) (0)
958 /* EISA interrupts are always polarity zero and can be edge or level
959 * trigger depending on the ELCR value. If an interrupt is listed as
960 * EISA conforming in the MP table, that means its trigger type must
961 * be read in from the ELCR */
963 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
964 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
966 /* PCI interrupts are always polarity one level triggered,
967 * when listed as conforming in the MP table. */
969 #define default_PCI_trigger(idx) (1)
970 #define default_PCI_polarity(idx) (1)
972 /* MCA interrupts are always polarity zero level triggered,
973 * when listed as conforming in the MP table. */
975 #define default_MCA_trigger(idx) (1)
976 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
978 static int MPBIOS_polarity(int idx
)
980 int bus
= mp_irqs
[idx
].mp_srcbus
;
984 * Determine IRQ line polarity (high active or low active):
986 switch (mp_irqs
[idx
].mp_irqflag
& 3) {
987 case 0: /* conforms, ie. bus-type dependent polarity */
989 polarity
= test_bit(bus
, mp_bus_not_pci
)?
990 default_ISA_polarity(idx
):
991 default_PCI_polarity(idx
);
994 case 1: /* high active */
999 case 2: /* reserved */
1001 printk(KERN_WARNING
"broken BIOS!!\n");
1005 case 3: /* low active */
1010 default: /* invalid */
1012 printk(KERN_WARNING
"broken BIOS!!\n");
1020 static int MPBIOS_trigger(int idx
)
1022 int bus
= mp_irqs
[idx
].mp_srcbus
;
1026 * Determine IRQ trigger mode (edge or level sensitive):
1028 switch ((mp_irqs
[idx
].mp_irqflag
>>2) & 3) {
1029 case 0: /* conforms, ie. bus-type dependent */
1031 trigger
= test_bit(bus
, mp_bus_not_pci
)?
1032 default_ISA_trigger(idx
):
1033 default_PCI_trigger(idx
);
1034 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1035 switch (mp_bus_id_to_type
[bus
]) {
1036 case MP_BUS_ISA
: /* ISA pin */
1038 /* set before the switch */
1041 case MP_BUS_EISA
: /* EISA pin */
1043 trigger
= default_EISA_trigger(idx
);
1046 case MP_BUS_PCI
: /* PCI pin */
1048 /* set before the switch */
1051 case MP_BUS_MCA
: /* MCA pin */
1053 trigger
= default_MCA_trigger(idx
);
1058 printk(KERN_WARNING
"broken BIOS!!\n");
1071 case 2: /* reserved */
1073 printk(KERN_WARNING
"broken BIOS!!\n");
1082 default: /* invalid */
1084 printk(KERN_WARNING
"broken BIOS!!\n");
1092 static inline int irq_polarity(int idx
)
1094 return MPBIOS_polarity(idx
);
1097 static inline int irq_trigger(int idx
)
1099 return MPBIOS_trigger(idx
);
1102 static int pin_2_irq(int idx
, int apic
, int pin
)
1105 int bus
= mp_irqs
[idx
].mp_srcbus
;
1108 * Debugging check, we are in big trouble if this message pops up!
1110 if (mp_irqs
[idx
].mp_dstirq
!= pin
)
1111 printk(KERN_ERR
"broken BIOS or MPTABLE parser, ayiee!!\n");
1113 if (test_bit(bus
, mp_bus_not_pci
))
1114 irq
= mp_irqs
[idx
].mp_srcbusirq
;
1117 * PCI IRQs are mapped in order
1121 irq
+= nr_ioapic_registers
[i
++];
1125 * For MPS mode, so far only needed by ES7000 platform
1127 if (ioapic_renumber_irq
)
1128 irq
= ioapic_renumber_irq(apic
, irq
);
1132 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1134 if ((pin
>= 16) && (pin
<= 23)) {
1135 if (pirq_entries
[pin
-16] != -1) {
1136 if (!pirq_entries
[pin
-16]) {
1137 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1138 "disabling PIRQ%d\n", pin
-16);
1140 irq
= pirq_entries
[pin
-16];
1141 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1142 "using PIRQ%d -> IRQ %d\n",
1150 static inline int IO_APIC_irq_trigger(int irq
)
1154 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1155 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1156 idx
= find_irq_entry(apic
, pin
, mp_INT
);
1157 if ((idx
!= -1) && (irq
== pin_2_irq(idx
, apic
, pin
)))
1158 return irq_trigger(idx
);
1162 * nonexistent IRQs are edge default
1167 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
1168 static u8 irq_vector
[NR_IRQ_VECTORS
] __read_mostly
= { FIRST_DEVICE_VECTOR
, 0 };
1170 static int __assign_irq_vector(int irq
)
1172 static int current_vector
= FIRST_DEVICE_VECTOR
, current_offset
;
1175 BUG_ON((unsigned)irq
>= NR_IRQ_VECTORS
);
1177 if (irq_vector
[irq
] > 0)
1178 return irq_vector
[irq
];
1180 vector
= current_vector
;
1181 offset
= current_offset
;
1184 if (vector
>= first_system_vector
) {
1185 offset
= (offset
+ 1) % 8;
1186 vector
= FIRST_DEVICE_VECTOR
+ offset
;
1188 if (vector
== current_vector
)
1190 if (test_and_set_bit(vector
, used_vectors
))
1193 current_vector
= vector
;
1194 current_offset
= offset
;
1195 irq_vector
[irq
] = vector
;
1200 static int assign_irq_vector(int irq
)
1202 unsigned long flags
;
1205 spin_lock_irqsave(&vector_lock
, flags
);
1206 vector
= __assign_irq_vector(irq
);
1207 spin_unlock_irqrestore(&vector_lock
, flags
);
1212 static struct irq_chip ioapic_chip
;
1214 #define IOAPIC_AUTO -1
1215 #define IOAPIC_EDGE 0
1216 #define IOAPIC_LEVEL 1
1218 static void ioapic_register_intr(int irq
, int vector
, unsigned long trigger
)
1220 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1221 trigger
== IOAPIC_LEVEL
) {
1222 irq_desc
[irq
].status
|= IRQ_LEVEL
;
1223 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1224 handle_fasteoi_irq
, "fasteoi");
1226 irq_desc
[irq
].status
&= ~IRQ_LEVEL
;
1227 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1228 handle_edge_irq
, "edge");
1230 set_intr_gate(vector
, interrupt
[irq
]);
1233 static void __init
setup_IO_APIC_irqs(void)
1235 struct IO_APIC_route_entry entry
;
1236 int apic
, pin
, idx
, irq
, first_notcon
= 1, vector
;
1238 apic_printk(APIC_VERBOSE
, KERN_DEBUG
"init IO_APIC IRQs\n");
1240 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1241 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1244 * add it to the IO-APIC irq-routing table:
1246 memset(&entry
, 0, sizeof(entry
));
1248 entry
.delivery_mode
= INT_DELIVERY_MODE
;
1249 entry
.dest_mode
= INT_DEST_MODE
;
1250 entry
.mask
= 0; /* enable IRQ */
1251 entry
.dest
.logical
.logical_dest
=
1252 cpu_mask_to_apicid(TARGET_CPUS
);
1254 idx
= find_irq_entry(apic
, pin
, mp_INT
);
1257 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1258 " IO-APIC (apicid-pin) %d-%d",
1259 mp_ioapics
[apic
].mp_apicid
,
1263 apic_printk(APIC_VERBOSE
, ", %d-%d",
1264 mp_ioapics
[apic
].mp_apicid
, pin
);
1268 if (!first_notcon
) {
1269 apic_printk(APIC_VERBOSE
, " not connected.\n");
1273 entry
.trigger
= irq_trigger(idx
);
1274 entry
.polarity
= irq_polarity(idx
);
1276 if (irq_trigger(idx
)) {
1281 irq
= pin_2_irq(idx
, apic
, pin
);
1283 * skip adding the timer int on secondary nodes, which causes
1284 * a small but painful rift in the time-space continuum
1286 if (multi_timer_check(apic
, irq
))
1289 add_pin_to_irq(irq
, apic
, pin
);
1291 if (!apic
&& !IO_APIC_IRQ(irq
))
1294 if (IO_APIC_IRQ(irq
)) {
1295 vector
= assign_irq_vector(irq
);
1296 entry
.vector
= vector
;
1297 ioapic_register_intr(irq
, vector
, IOAPIC_AUTO
);
1299 if (!apic
&& (irq
< 16))
1300 disable_8259A_irq(irq
);
1302 ioapic_write_entry(apic
, pin
, entry
);
1307 apic_printk(APIC_VERBOSE
, " not connected.\n");
1311 * Set up the timer pin, possibly with the 8259A-master behind.
1313 static void __init
setup_timer_IRQ0_pin(unsigned int apic
, unsigned int pin
,
1316 struct IO_APIC_route_entry entry
;
1318 memset(&entry
, 0, sizeof(entry
));
1321 * We use logical delivery to get the timer IRQ
1324 entry
.dest_mode
= INT_DEST_MODE
;
1325 entry
.mask
= 1; /* mask IRQ now */
1326 entry
.dest
.logical
.logical_dest
= cpu_mask_to_apicid(TARGET_CPUS
);
1327 entry
.delivery_mode
= INT_DELIVERY_MODE
;
1330 entry
.vector
= vector
;
1333 * The timer IRQ doesn't have to know that behind the
1334 * scene we may have a 8259A-master in AEOI mode ...
1336 ioapic_register_intr(0, vector
, IOAPIC_EDGE
);
1339 * Add it to the IO-APIC irq-routing table:
1341 ioapic_write_entry(apic
, pin
, entry
);
1344 void __init
print_IO_APIC(void)
1347 union IO_APIC_reg_00 reg_00
;
1348 union IO_APIC_reg_01 reg_01
;
1349 union IO_APIC_reg_02 reg_02
;
1350 union IO_APIC_reg_03 reg_03
;
1351 unsigned long flags
;
1353 if (apic_verbosity
== APIC_QUIET
)
1356 printk(KERN_DEBUG
"number of MP IRQ sources: %d.\n", mp_irq_entries
);
1357 for (i
= 0; i
< nr_ioapics
; i
++)
1358 printk(KERN_DEBUG
"number of IO-APIC #%d registers: %d.\n",
1359 mp_ioapics
[i
].mp_apicid
, nr_ioapic_registers
[i
]);
1362 * We are a bit conservative about what we expect. We have to
1363 * know about every hardware change ASAP.
1365 printk(KERN_INFO
"testing the IO APIC.......................\n");
1367 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1369 spin_lock_irqsave(&ioapic_lock
, flags
);
1370 reg_00
.raw
= io_apic_read(apic
, 0);
1371 reg_01
.raw
= io_apic_read(apic
, 1);
1372 if (reg_01
.bits
.version
>= 0x10)
1373 reg_02
.raw
= io_apic_read(apic
, 2);
1374 if (reg_01
.bits
.version
>= 0x20)
1375 reg_03
.raw
= io_apic_read(apic
, 3);
1376 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1378 printk(KERN_DEBUG
"IO APIC #%d......\n", mp_ioapics
[apic
].mp_apicid
);
1379 printk(KERN_DEBUG
".... register #00: %08X\n", reg_00
.raw
);
1380 printk(KERN_DEBUG
"....... : physical APIC id: %02X\n", reg_00
.bits
.ID
);
1381 printk(KERN_DEBUG
"....... : Delivery Type: %X\n", reg_00
.bits
.delivery_type
);
1382 printk(KERN_DEBUG
"....... : LTS : %X\n", reg_00
.bits
.LTS
);
1384 printk(KERN_DEBUG
".... register #01: %08X\n", reg_01
.raw
);
1385 printk(KERN_DEBUG
"....... : max redirection entries: %04X\n", reg_01
.bits
.entries
);
1387 printk(KERN_DEBUG
"....... : PRQ implemented: %X\n", reg_01
.bits
.PRQ
);
1388 printk(KERN_DEBUG
"....... : IO APIC version: %04X\n", reg_01
.bits
.version
);
1391 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1392 * but the value of reg_02 is read as the previous read register
1393 * value, so ignore it if reg_02 == reg_01.
1395 if (reg_01
.bits
.version
>= 0x10 && reg_02
.raw
!= reg_01
.raw
) {
1396 printk(KERN_DEBUG
".... register #02: %08X\n", reg_02
.raw
);
1397 printk(KERN_DEBUG
"....... : arbitration: %02X\n", reg_02
.bits
.arbitration
);
1401 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1402 * or reg_03, but the value of reg_0[23] is read as the previous read
1403 * register value, so ignore it if reg_03 == reg_0[12].
1405 if (reg_01
.bits
.version
>= 0x20 && reg_03
.raw
!= reg_02
.raw
&&
1406 reg_03
.raw
!= reg_01
.raw
) {
1407 printk(KERN_DEBUG
".... register #03: %08X\n", reg_03
.raw
);
1408 printk(KERN_DEBUG
"....... : Boot DT : %X\n", reg_03
.bits
.boot_DT
);
1411 printk(KERN_DEBUG
".... IRQ redirection table:\n");
1413 printk(KERN_DEBUG
" NR Log Phy Mask Trig IRR Pol"
1414 " Stat Dest Deli Vect: \n");
1416 for (i
= 0; i
<= reg_01
.bits
.entries
; i
++) {
1417 struct IO_APIC_route_entry entry
;
1419 entry
= ioapic_read_entry(apic
, i
);
1421 printk(KERN_DEBUG
" %02x %03X %02X ",
1423 entry
.dest
.logical
.logical_dest
,
1424 entry
.dest
.physical
.physical_dest
1427 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1432 entry
.delivery_status
,
1434 entry
.delivery_mode
,
1439 printk(KERN_DEBUG
"IRQ to pin mappings:\n");
1440 for (i
= 0; i
< NR_IRQS
; i
++) {
1441 struct irq_pin_list
*entry
= irq_2_pin
+ i
;
1444 printk(KERN_DEBUG
"IRQ%d ", i
);
1446 printk("-> %d:%d", entry
->apic
, entry
->pin
);
1449 entry
= irq_2_pin
+ entry
->next
;
1454 printk(KERN_INFO
".................................... done.\n");
1461 static void print_APIC_bitfield(int base
)
1466 if (apic_verbosity
== APIC_QUIET
)
1469 printk(KERN_DEBUG
"0123456789abcdef0123456789abcdef\n" KERN_DEBUG
);
1470 for (i
= 0; i
< 8; i
++) {
1471 v
= apic_read(base
+ i
*0x10);
1472 for (j
= 0; j
< 32; j
++) {
1482 void /*__init*/ print_local_APIC(void *dummy
)
1484 unsigned int v
, ver
, maxlvt
;
1486 if (apic_verbosity
== APIC_QUIET
)
1489 printk("\n" KERN_DEBUG
"printing local APIC contents on CPU#%d/%d:\n",
1490 smp_processor_id(), hard_smp_processor_id());
1491 v
= apic_read(APIC_ID
);
1492 printk(KERN_INFO
"... APIC ID: %08x (%01x)\n", v
,
1493 GET_APIC_ID(read_apic_id()));
1494 v
= apic_read(APIC_LVR
);
1495 printk(KERN_INFO
"... APIC VERSION: %08x\n", v
);
1496 ver
= GET_APIC_VERSION(v
);
1497 maxlvt
= lapic_get_maxlvt();
1499 v
= apic_read(APIC_TASKPRI
);
1500 printk(KERN_DEBUG
"... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
1502 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1503 v
= apic_read(APIC_ARBPRI
);
1504 printk(KERN_DEBUG
"... APIC ARBPRI: %08x (%02x)\n", v
,
1505 v
& APIC_ARBPRI_MASK
);
1506 v
= apic_read(APIC_PROCPRI
);
1507 printk(KERN_DEBUG
"... APIC PROCPRI: %08x\n", v
);
1510 v
= apic_read(APIC_EOI
);
1511 printk(KERN_DEBUG
"... APIC EOI: %08x\n", v
);
1512 v
= apic_read(APIC_RRR
);
1513 printk(KERN_DEBUG
"... APIC RRR: %08x\n", v
);
1514 v
= apic_read(APIC_LDR
);
1515 printk(KERN_DEBUG
"... APIC LDR: %08x\n", v
);
1516 v
= apic_read(APIC_DFR
);
1517 printk(KERN_DEBUG
"... APIC DFR: %08x\n", v
);
1518 v
= apic_read(APIC_SPIV
);
1519 printk(KERN_DEBUG
"... APIC SPIV: %08x\n", v
);
1521 printk(KERN_DEBUG
"... APIC ISR field:\n");
1522 print_APIC_bitfield(APIC_ISR
);
1523 printk(KERN_DEBUG
"... APIC TMR field:\n");
1524 print_APIC_bitfield(APIC_TMR
);
1525 printk(KERN_DEBUG
"... APIC IRR field:\n");
1526 print_APIC_bitfield(APIC_IRR
);
1528 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1529 if (maxlvt
> 3) /* Due to the Pentium erratum 3AP. */
1530 apic_write(APIC_ESR
, 0);
1531 v
= apic_read(APIC_ESR
);
1532 printk(KERN_DEBUG
"... APIC ESR: %08x\n", v
);
1535 v
= apic_read(APIC_ICR
);
1536 printk(KERN_DEBUG
"... APIC ICR: %08x\n", v
);
1537 v
= apic_read(APIC_ICR2
);
1538 printk(KERN_DEBUG
"... APIC ICR2: %08x\n", v
);
1540 v
= apic_read(APIC_LVTT
);
1541 printk(KERN_DEBUG
"... APIC LVTT: %08x\n", v
);
1543 if (maxlvt
> 3) { /* PC is LVT#4. */
1544 v
= apic_read(APIC_LVTPC
);
1545 printk(KERN_DEBUG
"... APIC LVTPC: %08x\n", v
);
1547 v
= apic_read(APIC_LVT0
);
1548 printk(KERN_DEBUG
"... APIC LVT0: %08x\n", v
);
1549 v
= apic_read(APIC_LVT1
);
1550 printk(KERN_DEBUG
"... APIC LVT1: %08x\n", v
);
1552 if (maxlvt
> 2) { /* ERR is LVT#3. */
1553 v
= apic_read(APIC_LVTERR
);
1554 printk(KERN_DEBUG
"... APIC LVTERR: %08x\n", v
);
1557 v
= apic_read(APIC_TMICT
);
1558 printk(KERN_DEBUG
"... APIC TMICT: %08x\n", v
);
1559 v
= apic_read(APIC_TMCCT
);
1560 printk(KERN_DEBUG
"... APIC TMCCT: %08x\n", v
);
1561 v
= apic_read(APIC_TDCR
);
1562 printk(KERN_DEBUG
"... APIC TDCR: %08x\n", v
);
1566 void print_all_local_APICs(void)
1568 on_each_cpu(print_local_APIC
, NULL
, 1);
1571 void /*__init*/ print_PIC(void)
1574 unsigned long flags
;
1576 if (apic_verbosity
== APIC_QUIET
)
1579 printk(KERN_DEBUG
"\nprinting PIC contents\n");
1581 spin_lock_irqsave(&i8259A_lock
, flags
);
1583 v
= inb(0xa1) << 8 | inb(0x21);
1584 printk(KERN_DEBUG
"... PIC IMR: %04x\n", v
);
1586 v
= inb(0xa0) << 8 | inb(0x20);
1587 printk(KERN_DEBUG
"... PIC IRR: %04x\n", v
);
1591 v
= inb(0xa0) << 8 | inb(0x20);
1595 spin_unlock_irqrestore(&i8259A_lock
, flags
);
1597 printk(KERN_DEBUG
"... PIC ISR: %04x\n", v
);
1599 v
= inb(0x4d1) << 8 | inb(0x4d0);
1600 printk(KERN_DEBUG
"... PIC ELCR: %04x\n", v
);
1605 static void __init
enable_IO_APIC(void)
1607 union IO_APIC_reg_01 reg_01
;
1608 int i8259_apic
, i8259_pin
;
1610 unsigned long flags
;
1612 for (i
= 0; i
< PIN_MAP_SIZE
; i
++) {
1613 irq_2_pin
[i
].pin
= -1;
1614 irq_2_pin
[i
].next
= 0;
1617 for (i
= 0; i
< MAX_PIRQS
; i
++)
1618 pirq_entries
[i
] = -1;
1621 * The number of IO-APIC IRQ registers (== #pins):
1623 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1624 spin_lock_irqsave(&ioapic_lock
, flags
);
1625 reg_01
.raw
= io_apic_read(apic
, 1);
1626 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1627 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
1629 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1631 /* See if any of the pins is in ExtINT mode */
1632 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1633 struct IO_APIC_route_entry entry
;
1634 entry
= ioapic_read_entry(apic
, pin
);
1637 /* If the interrupt line is enabled and in ExtInt mode
1638 * I have found the pin where the i8259 is connected.
1640 if ((entry
.mask
== 0) && (entry
.delivery_mode
== dest_ExtINT
)) {
1641 ioapic_i8259
.apic
= apic
;
1642 ioapic_i8259
.pin
= pin
;
1648 /* Look to see what if the MP table has reported the ExtINT */
1649 /* If we could not find the appropriate pin by looking at the ioapic
1650 * the i8259 probably is not connected the ioapic but give the
1651 * mptable a chance anyway.
1653 i8259_pin
= find_isa_irq_pin(0, mp_ExtINT
);
1654 i8259_apic
= find_isa_irq_apic(0, mp_ExtINT
);
1655 /* Trust the MP table if nothing is setup in the hardware */
1656 if ((ioapic_i8259
.pin
== -1) && (i8259_pin
>= 0)) {
1657 printk(KERN_WARNING
"ExtINT not setup in hardware but reported by MP table\n");
1658 ioapic_i8259
.pin
= i8259_pin
;
1659 ioapic_i8259
.apic
= i8259_apic
;
1661 /* Complain if the MP table and the hardware disagree */
1662 if (((ioapic_i8259
.apic
!= i8259_apic
) || (ioapic_i8259
.pin
!= i8259_pin
)) &&
1663 (i8259_pin
>= 0) && (ioapic_i8259
.pin
>= 0))
1665 printk(KERN_WARNING
"ExtINT in hardware and MP table differ\n");
1669 * Do not trust the IO-APIC being empty at bootup
1675 * Not an __init, needed by the reboot code
1677 void disable_IO_APIC(void)
1680 * Clear the IO-APIC before rebooting:
1685 * If the i8259 is routed through an IOAPIC
1686 * Put that IOAPIC in virtual wire mode
1687 * so legacy interrupts can be delivered.
1689 if (ioapic_i8259
.pin
!= -1) {
1690 struct IO_APIC_route_entry entry
;
1692 memset(&entry
, 0, sizeof(entry
));
1693 entry
.mask
= 0; /* Enabled */
1694 entry
.trigger
= 0; /* Edge */
1696 entry
.polarity
= 0; /* High */
1697 entry
.delivery_status
= 0;
1698 entry
.dest_mode
= 0; /* Physical */
1699 entry
.delivery_mode
= dest_ExtINT
; /* ExtInt */
1701 entry
.dest
.physical
.physical_dest
=
1702 GET_APIC_ID(read_apic_id());
1705 * Add it to the IO-APIC irq-routing table:
1707 ioapic_write_entry(ioapic_i8259
.apic
, ioapic_i8259
.pin
, entry
);
1709 disconnect_bsp_APIC(ioapic_i8259
.pin
!= -1);
1713 * function to set the IO-APIC physical IDs based on the
1714 * values stored in the MPC table.
1716 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1719 static void __init
setup_ioapic_ids_from_mpc(void)
1721 union IO_APIC_reg_00 reg_00
;
1722 physid_mask_t phys_id_present_map
;
1725 unsigned char old_id
;
1726 unsigned long flags
;
1728 #ifdef CONFIG_X86_NUMAQ
1734 * Don't check I/O APIC IDs for xAPIC systems. They have
1735 * no meaning without the serial APIC bus.
1737 if (!(boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
)
1738 || APIC_XAPIC(apic_version
[boot_cpu_physical_apicid
]))
1741 * This is broken; anything with a real cpu count has to
1742 * circumvent this idiocy regardless.
1744 phys_id_present_map
= ioapic_phys_id_map(phys_cpu_present_map
);
1747 * Set the IOAPIC ID to the value stored in the MPC table.
1749 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1751 /* Read the register 0 value */
1752 spin_lock_irqsave(&ioapic_lock
, flags
);
1753 reg_00
.raw
= io_apic_read(apic
, 0);
1754 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1756 old_id
= mp_ioapics
[apic
].mp_apicid
;
1758 if (mp_ioapics
[apic
].mp_apicid
>= get_physical_broadcast()) {
1759 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1760 apic
, mp_ioapics
[apic
].mp_apicid
);
1761 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
1763 mp_ioapics
[apic
].mp_apicid
= reg_00
.bits
.ID
;
1767 * Sanity check, is the ID really free? Every APIC in a
1768 * system must have a unique ID or we get lots of nice
1769 * 'stuck on smp_invalidate_needed IPI wait' messages.
1771 if (check_apicid_used(phys_id_present_map
,
1772 mp_ioapics
[apic
].mp_apicid
)) {
1773 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1774 apic
, mp_ioapics
[apic
].mp_apicid
);
1775 for (i
= 0; i
< get_physical_broadcast(); i
++)
1776 if (!physid_isset(i
, phys_id_present_map
))
1778 if (i
>= get_physical_broadcast())
1779 panic("Max APIC ID exceeded!\n");
1780 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
1782 physid_set(i
, phys_id_present_map
);
1783 mp_ioapics
[apic
].mp_apicid
= i
;
1786 tmp
= apicid_to_cpu_present(mp_ioapics
[apic
].mp_apicid
);
1787 apic_printk(APIC_VERBOSE
, "Setting %d in the "
1788 "phys_id_present_map\n",
1789 mp_ioapics
[apic
].mp_apicid
);
1790 physids_or(phys_id_present_map
, phys_id_present_map
, tmp
);
1795 * We need to adjust the IRQ routing table
1796 * if the ID changed.
1798 if (old_id
!= mp_ioapics
[apic
].mp_apicid
)
1799 for (i
= 0; i
< mp_irq_entries
; i
++)
1800 if (mp_irqs
[i
].mp_dstapic
== old_id
)
1801 mp_irqs
[i
].mp_dstapic
1802 = mp_ioapics
[apic
].mp_apicid
;
1805 * Read the right value from the MPC table and
1806 * write it into the ID register.
1808 apic_printk(APIC_VERBOSE
, KERN_INFO
1809 "...changing IO-APIC physical APIC ID to %d ...",
1810 mp_ioapics
[apic
].mp_apicid
);
1812 reg_00
.bits
.ID
= mp_ioapics
[apic
].mp_apicid
;
1813 spin_lock_irqsave(&ioapic_lock
, flags
);
1814 io_apic_write(apic
, 0, reg_00
.raw
);
1815 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1820 spin_lock_irqsave(&ioapic_lock
, flags
);
1821 reg_00
.raw
= io_apic_read(apic
, 0);
1822 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1823 if (reg_00
.bits
.ID
!= mp_ioapics
[apic
].mp_apicid
)
1824 printk("could not set ID!\n");
1826 apic_printk(APIC_VERBOSE
, " ok.\n");
1830 int no_timer_check __initdata
;
1832 static int __init
notimercheck(char *s
)
1837 __setup("no_timer_check", notimercheck
);
1840 * There is a nasty bug in some older SMP boards, their mptable lies
1841 * about the timer IRQ. We do the following to work around the situation:
1843 * - timer IRQ defaults to IO-APIC IRQ
1844 * - if this function detects that timer IRQs are defunct, then we fall
1845 * back to ISA timer IRQs
1847 static int __init
timer_irq_works(void)
1849 unsigned long t1
= jiffies
;
1850 unsigned long flags
;
1855 local_save_flags(flags
);
1857 /* Let ten ticks pass... */
1858 mdelay((10 * 1000) / HZ
);
1859 local_irq_restore(flags
);
1862 * Expect a few ticks at least, to be sure some possible
1863 * glue logic does not lock up after one or two first
1864 * ticks in a non-ExtINT mode. Also the local APIC
1865 * might have cached one ExtINT interrupt. Finally, at
1866 * least one tick may be lost due to delays.
1868 if (time_after(jiffies
, t1
+ 4))
1875 * In the SMP+IOAPIC case it might happen that there are an unspecified
1876 * number of pending IRQ events unhandled. These cases are very rare,
1877 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1878 * better to do it this way as thus we do not have to be aware of
1879 * 'pending' interrupts in the IRQ path, except at this point.
1882 * Edge triggered needs to resend any interrupt
1883 * that was delayed but this is now handled in the device
1890 * Starting up a edge-triggered IO-APIC interrupt is
1891 * nasty - we need to make sure that we get the edge.
1892 * If it is already asserted for some reason, we need
1893 * return 1 to indicate that is was pending.
1895 * This is not complete - we should be able to fake
1896 * an edge even if it isn't on the 8259A...
1898 * (We do this for level-triggered IRQs too - it cannot hurt.)
1900 static unsigned int startup_ioapic_irq(unsigned int irq
)
1902 int was_pending
= 0;
1903 unsigned long flags
;
1905 spin_lock_irqsave(&ioapic_lock
, flags
);
1907 disable_8259A_irq(irq
);
1908 if (i8259A_irq_pending(irq
))
1911 __unmask_IO_APIC_irq(irq
);
1912 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1917 static void ack_ioapic_irq(unsigned int irq
)
1919 move_native_irq(irq
);
1923 static void ack_ioapic_quirk_irq(unsigned int irq
)
1928 move_native_irq(irq
);
1930 * It appears there is an erratum which affects at least version 0x11
1931 * of I/O APIC (that's the 82093AA and cores integrated into various
1932 * chipsets). Under certain conditions a level-triggered interrupt is
1933 * erroneously delivered as edge-triggered one but the respective IRR
1934 * bit gets set nevertheless. As a result the I/O unit expects an EOI
1935 * message but it will never arrive and further interrupts are blocked
1936 * from the source. The exact reason is so far unknown, but the
1937 * phenomenon was observed when two consecutive interrupt requests
1938 * from a given source get delivered to the same CPU and the source is
1939 * temporarily disabled in between.
1941 * A workaround is to simulate an EOI message manually. We achieve it
1942 * by setting the trigger mode to edge and then to level when the edge
1943 * trigger mode gets detected in the TMR of a local APIC for a
1944 * level-triggered interrupt. We mask the source for the time of the
1945 * operation to prevent an edge-triggered interrupt escaping meanwhile.
1946 * The idea is from Manfred Spraul. --macro
1948 i
= irq_vector
[irq
];
1950 v
= apic_read(APIC_TMR
+ ((i
& ~0x1f) >> 1));
1954 if (!(v
& (1 << (i
& 0x1f)))) {
1955 atomic_inc(&irq_mis_count
);
1956 spin_lock(&ioapic_lock
);
1957 __mask_and_edge_IO_APIC_irq(irq
);
1958 __unmask_and_level_IO_APIC_irq(irq
);
1959 spin_unlock(&ioapic_lock
);
1963 static int ioapic_retrigger_irq(unsigned int irq
)
1965 send_IPI_self(irq_vector
[irq
]);
1970 static struct irq_chip ioapic_chip __read_mostly
= {
1972 .startup
= startup_ioapic_irq
,
1973 .mask
= mask_IO_APIC_irq
,
1974 .unmask
= unmask_IO_APIC_irq
,
1975 .ack
= ack_ioapic_irq
,
1976 .eoi
= ack_ioapic_quirk_irq
,
1978 .set_affinity
= set_ioapic_affinity_irq
,
1980 .retrigger
= ioapic_retrigger_irq
,
1984 static inline void init_IO_APIC_traps(void)
1989 * NOTE! The local APIC isn't very good at handling
1990 * multiple interrupts at the same interrupt level.
1991 * As the interrupt level is determined by taking the
1992 * vector number and shifting that right by 4, we
1993 * want to spread these out a bit so that they don't
1994 * all fall in the same interrupt level.
1996 * Also, we've got to be careful not to trash gate
1997 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1999 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
2000 if (IO_APIC_IRQ(irq
) && !irq_vector
[irq
]) {
2002 * Hmm.. We don't have an entry for this,
2003 * so default to an old-fashioned 8259
2004 * interrupt if we can..
2007 make_8259A_irq(irq
);
2009 /* Strange. Oh, well.. */
2010 irq_desc
[irq
].chip
= &no_irq_chip
;
2016 * The local APIC irq-chip implementation:
2019 static void ack_lapic_irq(unsigned int irq
)
2024 static void mask_lapic_irq(unsigned int irq
)
2028 v
= apic_read(APIC_LVT0
);
2029 apic_write(APIC_LVT0
, v
| APIC_LVT_MASKED
);
2032 static void unmask_lapic_irq(unsigned int irq
)
2036 v
= apic_read(APIC_LVT0
);
2037 apic_write(APIC_LVT0
, v
& ~APIC_LVT_MASKED
);
2040 static struct irq_chip lapic_chip __read_mostly
= {
2041 .name
= "local-APIC",
2042 .mask
= mask_lapic_irq
,
2043 .unmask
= unmask_lapic_irq
,
2044 .ack
= ack_lapic_irq
,
2047 static void lapic_register_intr(int irq
, int vector
)
2049 irq_desc
[irq
].status
&= ~IRQ_LEVEL
;
2050 set_irq_chip_and_handler_name(irq
, &lapic_chip
, handle_edge_irq
,
2052 set_intr_gate(vector
, interrupt
[irq
]);
2055 static void __init
setup_nmi(void)
2058 * Dirty trick to enable the NMI watchdog ...
2059 * We put the 8259A master into AEOI mode and
2060 * unmask on all local APICs LVT0 as NMI.
2062 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2063 * is from Maciej W. Rozycki - so we do not have to EOI from
2064 * the NMI handler or the timer interrupt.
2066 apic_printk(APIC_VERBOSE
, KERN_INFO
"activating NMI Watchdog ...");
2068 enable_NMI_through_LVT0();
2070 apic_printk(APIC_VERBOSE
, " done.\n");
2074 * This looks a bit hackish but it's about the only one way of sending
2075 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2076 * not support the ExtINT mode, unfortunately. We need to send these
2077 * cycles as some i82489DX-based boards have glue logic that keeps the
2078 * 8259A interrupt line asserted until INTA. --macro
2080 static inline void __init
unlock_ExtINT_logic(void)
2083 struct IO_APIC_route_entry entry0
, entry1
;
2084 unsigned char save_control
, save_freq_select
;
2086 pin
= find_isa_irq_pin(8, mp_INT
);
2091 apic
= find_isa_irq_apic(8, mp_INT
);
2097 entry0
= ioapic_read_entry(apic
, pin
);
2098 clear_IO_APIC_pin(apic
, pin
);
2100 memset(&entry1
, 0, sizeof(entry1
));
2102 entry1
.dest_mode
= 0; /* physical delivery */
2103 entry1
.mask
= 0; /* unmask IRQ now */
2104 entry1
.dest
.physical
.physical_dest
= hard_smp_processor_id();
2105 entry1
.delivery_mode
= dest_ExtINT
;
2106 entry1
.polarity
= entry0
.polarity
;
2110 ioapic_write_entry(apic
, pin
, entry1
);
2112 save_control
= CMOS_READ(RTC_CONTROL
);
2113 save_freq_select
= CMOS_READ(RTC_FREQ_SELECT
);
2114 CMOS_WRITE((save_freq_select
& ~RTC_RATE_SELECT
) | 0x6,
2116 CMOS_WRITE(save_control
| RTC_PIE
, RTC_CONTROL
);
2121 if ((CMOS_READ(RTC_INTR_FLAGS
) & RTC_PF
) == RTC_PF
)
2125 CMOS_WRITE(save_control
, RTC_CONTROL
);
2126 CMOS_WRITE(save_freq_select
, RTC_FREQ_SELECT
);
2127 clear_IO_APIC_pin(apic
, pin
);
2129 ioapic_write_entry(apic
, pin
, entry0
);
2133 * This code may look a bit paranoid, but it's supposed to cooperate with
2134 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2135 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2136 * fanatically on his truly buggy board.
2138 static inline void __init
check_timer(void)
2140 int apic1
, pin1
, apic2
, pin2
;
2144 unsigned long flags
;
2146 local_irq_save(flags
);
2148 ver
= apic_read(APIC_LVR
);
2149 ver
= GET_APIC_VERSION(ver
);
2152 * get/set the timer IRQ vector:
2154 disable_8259A_irq(0);
2155 vector
= assign_irq_vector(0);
2156 set_intr_gate(vector
, interrupt
[0]);
2159 * As IRQ0 is to be enabled in the 8259A, the virtual
2160 * wire has to be disabled in the local APIC. Also
2161 * timer interrupts need to be acknowledged manually in
2162 * the 8259A for the i82489DX when using the NMI
2163 * watchdog as that APIC treats NMIs as level-triggered.
2164 * The AEOI mode will finish them in the 8259A
2167 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_EXTINT
);
2169 timer_ack
= (nmi_watchdog
== NMI_IO_APIC
&& !APIC_INTEGRATED(ver
));
2171 pin1
= find_isa_irq_pin(0, mp_INT
);
2172 apic1
= find_isa_irq_apic(0, mp_INT
);
2173 pin2
= ioapic_i8259
.pin
;
2174 apic2
= ioapic_i8259
.apic
;
2176 apic_printk(APIC_QUIET
, KERN_INFO
"..TIMER: vector=0x%02X "
2177 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2178 vector
, apic1
, pin1
, apic2
, pin2
);
2181 * Some BIOS writers are clueless and report the ExtINTA
2182 * I/O APIC input from the cascaded 8259A as the timer
2183 * interrupt input. So just in case, if only one pin
2184 * was found above, try it both directly and through the
2191 } else if (pin2
== -1) {
2198 * Ok, does IRQ0 through the IOAPIC work?
2201 add_pin_to_irq(0, apic1
, pin1
);
2202 setup_timer_IRQ0_pin(apic1
, pin1
, vector
);
2204 unmask_IO_APIC_irq(0);
2205 if (timer_irq_works()) {
2206 if (nmi_watchdog
== NMI_IO_APIC
) {
2208 enable_8259A_irq(0);
2210 if (disable_timer_pin_1
> 0)
2211 clear_IO_APIC_pin(0, pin1
);
2214 clear_IO_APIC_pin(apic1
, pin1
);
2216 apic_printk(APIC_QUIET
, KERN_ERR
"..MP-BIOS bug: "
2217 "8254 timer not connected to IO-APIC\n");
2219 apic_printk(APIC_QUIET
, KERN_INFO
"...trying to set up timer "
2220 "(IRQ0) through the 8259A ...\n");
2221 apic_printk(APIC_QUIET
, KERN_INFO
2222 "..... (found apic %d pin %d) ...\n", apic2
, pin2
);
2224 * legacy devices should be connected to IO APIC #0
2226 replace_pin_at_irq(0, apic1
, pin1
, apic2
, pin2
);
2227 setup_timer_IRQ0_pin(apic2
, pin2
, vector
);
2228 unmask_IO_APIC_irq(0);
2229 enable_8259A_irq(0);
2230 if (timer_irq_works()) {
2231 apic_printk(APIC_QUIET
, KERN_INFO
"....... works.\n");
2232 timer_through_8259
= 1;
2233 if (nmi_watchdog
== NMI_IO_APIC
) {
2234 disable_8259A_irq(0);
2236 enable_8259A_irq(0);
2241 * Cleanup, just in case ...
2243 disable_8259A_irq(0);
2244 clear_IO_APIC_pin(apic2
, pin2
);
2245 apic_printk(APIC_QUIET
, KERN_INFO
"....... failed.\n");
2248 if (nmi_watchdog
== NMI_IO_APIC
) {
2249 apic_printk(APIC_QUIET
, KERN_WARNING
"timer doesn't work "
2250 "through the IO-APIC - disabling NMI Watchdog!\n");
2251 nmi_watchdog
= NMI_NONE
;
2255 apic_printk(APIC_QUIET
, KERN_INFO
2256 "...trying to set up timer as Virtual Wire IRQ...\n");
2258 lapic_register_intr(0, vector
);
2259 apic_write(APIC_LVT0
, APIC_DM_FIXED
| vector
); /* Fixed mode */
2260 enable_8259A_irq(0);
2262 if (timer_irq_works()) {
2263 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
2266 disable_8259A_irq(0);
2267 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_FIXED
| vector
);
2268 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed.\n");
2270 apic_printk(APIC_QUIET
, KERN_INFO
2271 "...trying to set up timer as ExtINT IRQ...\n");
2275 apic_write(APIC_LVT0
, APIC_DM_EXTINT
);
2277 unlock_ExtINT_logic();
2279 if (timer_irq_works()) {
2280 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
2283 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed :(.\n");
2284 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2285 "report. Then try booting with the 'noapic' option.\n");
2287 local_irq_restore(flags
);
2291 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2292 * to devices. However there may be an I/O APIC pin available for
2293 * this interrupt regardless. The pin may be left unconnected, but
2294 * typically it will be reused as an ExtINT cascade interrupt for
2295 * the master 8259A. In the MPS case such a pin will normally be
2296 * reported as an ExtINT interrupt in the MP table. With ACPI
2297 * there is no provision for ExtINT interrupts, and in the absence
2298 * of an override it would be treated as an ordinary ISA I/O APIC
2299 * interrupt, that is edge-triggered and unmasked by default. We
2300 * used to do this, but it caused problems on some systems because
2301 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2302 * the same ExtINT cascade interrupt to drive the local APIC of the
2303 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2304 * the I/O APIC in all cases now. No actual device should request
2305 * it anyway. --macro
2307 #define PIC_IRQS (1 << PIC_CASCADE_IR)
2309 void __init
setup_IO_APIC(void)
2313 /* Reserve all the system vectors. */
2314 for (i
= first_system_vector
; i
< NR_VECTORS
; i
++)
2315 set_bit(i
, used_vectors
);
2317 /* Mark FIRST_DEVICE_VECTOR which is assigned to IRQ0 as used. */
2318 set_bit(FIRST_DEVICE_VECTOR
, used_vectors
);
2322 io_apic_irqs
= ~PIC_IRQS
;
2324 printk("ENABLING IO-APIC IRQs\n");
2327 * Set up IO-APIC IRQ routing.
2330 setup_ioapic_ids_from_mpc();
2332 setup_IO_APIC_irqs();
2333 init_IO_APIC_traps();
2340 * Called after all the initialization is done. If we didnt find any
2341 * APIC bugs then we can allow the modify fast path
2344 static int __init
io_apic_bug_finalize(void)
2346 if (sis_apic_bug
== -1)
2351 late_initcall(io_apic_bug_finalize
);
2353 struct sysfs_ioapic_data
{
2354 struct sys_device dev
;
2355 struct IO_APIC_route_entry entry
[0];
2357 static struct sysfs_ioapic_data
*mp_ioapic_data
[MAX_IO_APICS
];
2359 static int ioapic_suspend(struct sys_device
*dev
, pm_message_t state
)
2361 struct IO_APIC_route_entry
*entry
;
2362 struct sysfs_ioapic_data
*data
;
2365 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2366 entry
= data
->entry
;
2367 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++)
2368 entry
[i
] = ioapic_read_entry(dev
->id
, i
);
2373 static int ioapic_resume(struct sys_device
*dev
)
2375 struct IO_APIC_route_entry
*entry
;
2376 struct sysfs_ioapic_data
*data
;
2377 unsigned long flags
;
2378 union IO_APIC_reg_00 reg_00
;
2381 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2382 entry
= data
->entry
;
2384 spin_lock_irqsave(&ioapic_lock
, flags
);
2385 reg_00
.raw
= io_apic_read(dev
->id
, 0);
2386 if (reg_00
.bits
.ID
!= mp_ioapics
[dev
->id
].mp_apicid
) {
2387 reg_00
.bits
.ID
= mp_ioapics
[dev
->id
].mp_apicid
;
2388 io_apic_write(dev
->id
, 0, reg_00
.raw
);
2390 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2391 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++)
2392 ioapic_write_entry(dev
->id
, i
, entry
[i
]);
2397 static struct sysdev_class ioapic_sysdev_class
= {
2399 .suspend
= ioapic_suspend
,
2400 .resume
= ioapic_resume
,
2403 static int __init
ioapic_init_sysfs(void)
2405 struct sys_device
*dev
;
2406 int i
, size
, error
= 0;
2408 error
= sysdev_class_register(&ioapic_sysdev_class
);
2412 for (i
= 0; i
< nr_ioapics
; i
++) {
2413 size
= sizeof(struct sys_device
) + nr_ioapic_registers
[i
]
2414 * sizeof(struct IO_APIC_route_entry
);
2415 mp_ioapic_data
[i
] = kzalloc(size
, GFP_KERNEL
);
2416 if (!mp_ioapic_data
[i
]) {
2417 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
2420 dev
= &mp_ioapic_data
[i
]->dev
;
2422 dev
->cls
= &ioapic_sysdev_class
;
2423 error
= sysdev_register(dev
);
2425 kfree(mp_ioapic_data
[i
]);
2426 mp_ioapic_data
[i
] = NULL
;
2427 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
2435 device_initcall(ioapic_init_sysfs
);
2438 * Dynamic irq allocate and deallocation
2440 int create_irq(void)
2442 /* Allocate an unused irq */
2443 int irq
, new, vector
= 0;
2444 unsigned long flags
;
2447 spin_lock_irqsave(&vector_lock
, flags
);
2448 for (new = (NR_IRQS
- 1); new >= 0; new--) {
2449 if (platform_legacy_irq(new))
2451 if (irq_vector
[new] != 0)
2453 vector
= __assign_irq_vector(new);
2454 if (likely(vector
> 0))
2458 spin_unlock_irqrestore(&vector_lock
, flags
);
2461 set_intr_gate(vector
, interrupt
[irq
]);
2462 dynamic_irq_init(irq
);
2467 void destroy_irq(unsigned int irq
)
2469 unsigned long flags
;
2471 dynamic_irq_cleanup(irq
);
2473 spin_lock_irqsave(&vector_lock
, flags
);
2474 clear_bit(irq_vector
[irq
], used_vectors
);
2475 irq_vector
[irq
] = 0;
2476 spin_unlock_irqrestore(&vector_lock
, flags
);
2480 * MSI message composition
2482 #ifdef CONFIG_PCI_MSI
2483 static int msi_compose_msg(struct pci_dev
*pdev
, unsigned int irq
, struct msi_msg
*msg
)
2488 vector
= assign_irq_vector(irq
);
2490 dest
= cpu_mask_to_apicid(TARGET_CPUS
);
2492 msg
->address_hi
= MSI_ADDR_BASE_HI
;
2495 ((INT_DEST_MODE
== 0) ?
2496 MSI_ADDR_DEST_MODE_PHYSICAL
:
2497 MSI_ADDR_DEST_MODE_LOGICAL
) |
2498 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
2499 MSI_ADDR_REDIRECTION_CPU
:
2500 MSI_ADDR_REDIRECTION_LOWPRI
) |
2501 MSI_ADDR_DEST_ID(dest
);
2504 MSI_DATA_TRIGGER_EDGE
|
2505 MSI_DATA_LEVEL_ASSERT
|
2506 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
2507 MSI_DATA_DELIVERY_FIXED
:
2508 MSI_DATA_DELIVERY_LOWPRI
) |
2509 MSI_DATA_VECTOR(vector
);
2515 static void set_msi_irq_affinity(unsigned int irq
, cpumask_t mask
)
2522 cpus_and(tmp
, mask
, cpu_online_map
);
2523 if (cpus_empty(tmp
))
2526 vector
= assign_irq_vector(irq
);
2530 dest
= cpu_mask_to_apicid(mask
);
2532 read_msi_msg(irq
, &msg
);
2534 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
2535 msg
.data
|= MSI_DATA_VECTOR(vector
);
2536 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
2537 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
2539 write_msi_msg(irq
, &msg
);
2540 irq_desc
[irq
].affinity
= mask
;
2542 #endif /* CONFIG_SMP */
2545 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2546 * which implement the MSI or MSI-X Capability Structure.
2548 static struct irq_chip msi_chip
= {
2550 .unmask
= unmask_msi_irq
,
2551 .mask
= mask_msi_irq
,
2552 .ack
= ack_ioapic_irq
,
2554 .set_affinity
= set_msi_irq_affinity
,
2556 .retrigger
= ioapic_retrigger_irq
,
2559 int arch_setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*desc
)
2567 ret
= msi_compose_msg(dev
, irq
, &msg
);
2573 set_irq_msi(irq
, desc
);
2574 write_msi_msg(irq
, &msg
);
2576 set_irq_chip_and_handler_name(irq
, &msi_chip
, handle_edge_irq
,
2582 void arch_teardown_msi_irq(unsigned int irq
)
2587 #endif /* CONFIG_PCI_MSI */
2590 * Hypertransport interrupt support
2592 #ifdef CONFIG_HT_IRQ
2596 static void target_ht_irq(unsigned int irq
, unsigned int dest
)
2598 struct ht_irq_msg msg
;
2599 fetch_ht_irq_msg(irq
, &msg
);
2601 msg
.address_lo
&= ~(HT_IRQ_LOW_DEST_ID_MASK
);
2602 msg
.address_hi
&= ~(HT_IRQ_HIGH_DEST_ID_MASK
);
2604 msg
.address_lo
|= HT_IRQ_LOW_DEST_ID(dest
);
2605 msg
.address_hi
|= HT_IRQ_HIGH_DEST_ID(dest
);
2607 write_ht_irq_msg(irq
, &msg
);
2610 static void set_ht_irq_affinity(unsigned int irq
, cpumask_t mask
)
2615 cpus_and(tmp
, mask
, cpu_online_map
);
2616 if (cpus_empty(tmp
))
2619 cpus_and(mask
, tmp
, CPU_MASK_ALL
);
2621 dest
= cpu_mask_to_apicid(mask
);
2623 target_ht_irq(irq
, dest
);
2624 irq_desc
[irq
].affinity
= mask
;
2628 static struct irq_chip ht_irq_chip
= {
2630 .mask
= mask_ht_irq
,
2631 .unmask
= unmask_ht_irq
,
2632 .ack
= ack_ioapic_irq
,
2634 .set_affinity
= set_ht_irq_affinity
,
2636 .retrigger
= ioapic_retrigger_irq
,
2639 int arch_setup_ht_irq(unsigned int irq
, struct pci_dev
*dev
)
2643 vector
= assign_irq_vector(irq
);
2645 struct ht_irq_msg msg
;
2650 cpu_set(vector
>> 8, tmp
);
2651 dest
= cpu_mask_to_apicid(tmp
);
2653 msg
.address_hi
= HT_IRQ_HIGH_DEST_ID(dest
);
2657 HT_IRQ_LOW_DEST_ID(dest
) |
2658 HT_IRQ_LOW_VECTOR(vector
) |
2659 ((INT_DEST_MODE
== 0) ?
2660 HT_IRQ_LOW_DM_PHYSICAL
:
2661 HT_IRQ_LOW_DM_LOGICAL
) |
2662 HT_IRQ_LOW_RQEOI_EDGE
|
2663 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
2664 HT_IRQ_LOW_MT_FIXED
:
2665 HT_IRQ_LOW_MT_ARBITRATED
) |
2666 HT_IRQ_LOW_IRQ_MASKED
;
2668 write_ht_irq_msg(irq
, &msg
);
2670 set_irq_chip_and_handler_name(irq
, &ht_irq_chip
,
2671 handle_edge_irq
, "edge");
2675 #endif /* CONFIG_HT_IRQ */
2677 /* --------------------------------------------------------------------------
2678 ACPI-based IOAPIC Configuration
2679 -------------------------------------------------------------------------- */
2683 int __init
io_apic_get_unique_id(int ioapic
, int apic_id
)
2685 union IO_APIC_reg_00 reg_00
;
2686 static physid_mask_t apic_id_map
= PHYSID_MASK_NONE
;
2688 unsigned long flags
;
2692 * The P4 platform supports up to 256 APIC IDs on two separate APIC
2693 * buses (one for LAPICs, one for IOAPICs), where predecessors only
2694 * supports up to 16 on one shared APIC bus.
2696 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
2697 * advantage of new APIC bus architecture.
2700 if (physids_empty(apic_id_map
))
2701 apic_id_map
= ioapic_phys_id_map(phys_cpu_present_map
);
2703 spin_lock_irqsave(&ioapic_lock
, flags
);
2704 reg_00
.raw
= io_apic_read(ioapic
, 0);
2705 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2707 if (apic_id
>= get_physical_broadcast()) {
2708 printk(KERN_WARNING
"IOAPIC[%d]: Invalid apic_id %d, trying "
2709 "%d\n", ioapic
, apic_id
, reg_00
.bits
.ID
);
2710 apic_id
= reg_00
.bits
.ID
;
2714 * Every APIC in a system must have a unique ID or we get lots of nice
2715 * 'stuck on smp_invalidate_needed IPI wait' messages.
2717 if (check_apicid_used(apic_id_map
, apic_id
)) {
2719 for (i
= 0; i
< get_physical_broadcast(); i
++) {
2720 if (!check_apicid_used(apic_id_map
, i
))
2724 if (i
== get_physical_broadcast())
2725 panic("Max apic_id exceeded!\n");
2727 printk(KERN_WARNING
"IOAPIC[%d]: apic_id %d already used, "
2728 "trying %d\n", ioapic
, apic_id
, i
);
2733 tmp
= apicid_to_cpu_present(apic_id
);
2734 physids_or(apic_id_map
, apic_id_map
, tmp
);
2736 if (reg_00
.bits
.ID
!= apic_id
) {
2737 reg_00
.bits
.ID
= apic_id
;
2739 spin_lock_irqsave(&ioapic_lock
, flags
);
2740 io_apic_write(ioapic
, 0, reg_00
.raw
);
2741 reg_00
.raw
= io_apic_read(ioapic
, 0);
2742 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2745 if (reg_00
.bits
.ID
!= apic_id
) {
2746 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic
);
2751 apic_printk(APIC_VERBOSE
, KERN_INFO
2752 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic
, apic_id
);
2758 int __init
io_apic_get_version(int ioapic
)
2760 union IO_APIC_reg_01 reg_01
;
2761 unsigned long flags
;
2763 spin_lock_irqsave(&ioapic_lock
, flags
);
2764 reg_01
.raw
= io_apic_read(ioapic
, 1);
2765 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2767 return reg_01
.bits
.version
;
2771 int __init
io_apic_get_redir_entries(int ioapic
)
2773 union IO_APIC_reg_01 reg_01
;
2774 unsigned long flags
;
2776 spin_lock_irqsave(&ioapic_lock
, flags
);
2777 reg_01
.raw
= io_apic_read(ioapic
, 1);
2778 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2780 return reg_01
.bits
.entries
;
2784 int io_apic_set_pci_routing(int ioapic
, int pin
, int irq
, int edge_level
, int active_high_low
)
2786 struct IO_APIC_route_entry entry
;
2788 if (!IO_APIC_IRQ(irq
)) {
2789 printk(KERN_ERR
"IOAPIC[%d]: Invalid reference to IRQ 0\n",
2795 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
2796 * Note that we mask (disable) IRQs now -- these get enabled when the
2797 * corresponding device driver registers for this IRQ.
2800 memset(&entry
, 0, sizeof(entry
));
2802 entry
.delivery_mode
= INT_DELIVERY_MODE
;
2803 entry
.dest_mode
= INT_DEST_MODE
;
2804 entry
.dest
.logical
.logical_dest
= cpu_mask_to_apicid(TARGET_CPUS
);
2805 entry
.trigger
= edge_level
;
2806 entry
.polarity
= active_high_low
;
2810 * IRQs < 16 are already in the irq_2_pin[] map
2813 add_pin_to_irq(irq
, ioapic
, pin
);
2815 entry
.vector
= assign_irq_vector(irq
);
2817 apic_printk(APIC_DEBUG
, KERN_DEBUG
"IOAPIC[%d]: Set PCI routing entry "
2818 "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic
,
2819 mp_ioapics
[ioapic
].mp_apicid
, pin
, entry
.vector
, irq
,
2820 edge_level
, active_high_low
);
2822 ioapic_register_intr(irq
, entry
.vector
, edge_level
);
2824 if (!ioapic
&& (irq
< 16))
2825 disable_8259A_irq(irq
);
2827 ioapic_write_entry(ioapic
, pin
, entry
);
2832 int acpi_get_override_irq(int bus_irq
, int *trigger
, int *polarity
)
2836 if (skip_ioapic_setup
)
2839 for (i
= 0; i
< mp_irq_entries
; i
++)
2840 if (mp_irqs
[i
].mp_irqtype
== mp_INT
&&
2841 mp_irqs
[i
].mp_srcbusirq
== bus_irq
)
2843 if (i
>= mp_irq_entries
)
2846 *trigger
= irq_trigger(i
);
2847 *polarity
= irq_polarity(i
);
2851 #endif /* CONFIG_ACPI */
2853 static int __init
parse_disable_timer_pin_1(char *arg
)
2855 disable_timer_pin_1
= 1;
2858 early_param("disable_timer_pin_1", parse_disable_timer_pin_1
);
2860 static int __init
parse_enable_timer_pin_1(char *arg
)
2862 disable_timer_pin_1
= -1;
2865 early_param("enable_timer_pin_1", parse_enable_timer_pin_1
);
2867 static int __init
parse_noapic(char *arg
)
2869 /* disable IO-APIC */
2870 disable_ioapic_setup();
2873 early_param("noapic", parse_noapic
);
2875 void __init
ioapic_init_mappings(void)
2877 unsigned long ioapic_phys
, idx
= FIX_IO_APIC_BASE_0
;
2880 for (i
= 0; i
< nr_ioapics
; i
++) {
2881 if (smp_found_config
) {
2882 ioapic_phys
= mp_ioapics
[i
].mp_apicaddr
;
2885 "WARNING: bogus zero IO-APIC "
2886 "address found in MPTABLE, "
2887 "disabling IO/APIC support!\n");
2888 smp_found_config
= 0;
2889 skip_ioapic_setup
= 1;
2890 goto fake_ioapic_page
;
2894 ioapic_phys
= (unsigned long)
2895 alloc_bootmem_pages(PAGE_SIZE
);
2896 ioapic_phys
= __pa(ioapic_phys
);
2898 set_fixmap_nocache(idx
, ioapic_phys
);
2899 printk(KERN_DEBUG
"mapped IOAPIC to %08lx (%08lx)\n",
2900 __fix_to_virt(idx
), ioapic_phys
);