x86: convert cpuinfo_x86 array to a per_cpu array
[wrt350n-kernel.git] / arch / x86 / kernel / apic_64.c
blobf47bc493dba94fbbb4dd8b8ee68bbf3f445109b0
1 /*
2 * Local APIC handling, local APIC timers
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
17 #include <linux/init.h>
19 #include <linux/mm.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/interrupt.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/sysdev.h>
26 #include <linux/module.h>
27 #include <linux/ioport.h>
28 #include <linux/clockchips.h>
30 #include <asm/atomic.h>
31 #include <asm/smp.h>
32 #include <asm/mtrr.h>
33 #include <asm/mpspec.h>
34 #include <asm/pgalloc.h>
35 #include <asm/mach_apic.h>
36 #include <asm/nmi.h>
37 #include <asm/idle.h>
38 #include <asm/proto.h>
39 #include <asm/timex.h>
40 #include <asm/hpet.h>
41 #include <asm/apic.h>
43 int apic_verbosity;
44 int disable_apic_timer __cpuinitdata;
45 static int apic_calibrate_pmtmr __initdata;
47 /* Local APIC timer works in C2? */
48 int local_apic_timer_c2_ok;
49 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
51 static struct resource *ioapic_resources;
52 static struct resource lapic_resource = {
53 .name = "Local APIC",
54 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
57 static unsigned int calibration_result;
59 static int lapic_next_event(unsigned long delta,
60 struct clock_event_device *evt);
61 static void lapic_timer_setup(enum clock_event_mode mode,
62 struct clock_event_device *evt);
64 static void lapic_timer_broadcast(cpumask_t mask);
66 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen);
68 static struct clock_event_device lapic_clockevent = {
69 .name = "lapic",
70 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
71 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
72 .shift = 32,
73 .set_mode = lapic_timer_setup,
74 .set_next_event = lapic_next_event,
75 .broadcast = lapic_timer_broadcast,
76 .rating = 100,
77 .irq = -1,
79 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
81 static int lapic_next_event(unsigned long delta,
82 struct clock_event_device *evt)
84 apic_write(APIC_TMICT, delta);
85 return 0;
88 static void lapic_timer_setup(enum clock_event_mode mode,
89 struct clock_event_device *evt)
91 unsigned long flags;
92 unsigned int v;
94 /* Lapic used as dummy for broadcast ? */
95 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
96 return;
98 local_irq_save(flags);
100 switch (mode) {
101 case CLOCK_EVT_MODE_PERIODIC:
102 case CLOCK_EVT_MODE_ONESHOT:
103 __setup_APIC_LVTT(calibration_result,
104 mode != CLOCK_EVT_MODE_PERIODIC, 1);
105 break;
106 case CLOCK_EVT_MODE_UNUSED:
107 case CLOCK_EVT_MODE_SHUTDOWN:
108 v = apic_read(APIC_LVTT);
109 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
110 apic_write(APIC_LVTT, v);
111 break;
112 case CLOCK_EVT_MODE_RESUME:
113 /* Nothing to do here */
114 break;
117 local_irq_restore(flags);
121 * Local APIC timer broadcast function
123 static void lapic_timer_broadcast(cpumask_t mask)
125 #ifdef CONFIG_SMP
126 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
127 #endif
130 static void apic_pm_activate(void);
132 void apic_wait_icr_idle(void)
134 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
135 cpu_relax();
138 unsigned int safe_apic_wait_icr_idle(void)
140 unsigned int send_status;
141 int timeout;
143 timeout = 0;
144 do {
145 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
146 if (!send_status)
147 break;
148 udelay(100);
149 } while (timeout++ < 1000);
151 return send_status;
154 void enable_NMI_through_LVT0 (void * dummy)
156 unsigned int v;
158 /* unmask and set to NMI */
159 v = APIC_DM_NMI;
160 apic_write(APIC_LVT0, v);
163 int get_maxlvt(void)
165 unsigned int v, maxlvt;
167 v = apic_read(APIC_LVR);
168 maxlvt = GET_APIC_MAXLVT(v);
169 return maxlvt;
173 * 'what should we do if we get a hw irq event on an illegal vector'.
174 * each architecture has to answer this themselves.
176 void ack_bad_irq(unsigned int irq)
178 printk("unexpected IRQ trap at vector %02x\n", irq);
180 * Currently unexpected vectors happen only on SMP and APIC.
181 * We _must_ ack these because every local APIC has only N
182 * irq slots per priority level, and a 'hanging, unacked' IRQ
183 * holds up an irq slot - in excessive cases (when multiple
184 * unexpected vectors occur) that might lock up the APIC
185 * completely.
186 * But don't ack when the APIC is disabled. -AK
188 if (!disable_apic)
189 ack_APIC_irq();
192 void clear_local_APIC(void)
194 int maxlvt;
195 unsigned int v;
197 maxlvt = get_maxlvt();
200 * Masking an LVT entry can trigger a local APIC error
201 * if the vector is zero. Mask LVTERR first to prevent this.
203 if (maxlvt >= 3) {
204 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
205 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
208 * Careful: we have to set masks only first to deassert
209 * any level-triggered sources.
211 v = apic_read(APIC_LVTT);
212 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
213 v = apic_read(APIC_LVT0);
214 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
215 v = apic_read(APIC_LVT1);
216 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
217 if (maxlvt >= 4) {
218 v = apic_read(APIC_LVTPC);
219 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
223 * Clean APIC state for other OSs:
225 apic_write(APIC_LVTT, APIC_LVT_MASKED);
226 apic_write(APIC_LVT0, APIC_LVT_MASKED);
227 apic_write(APIC_LVT1, APIC_LVT_MASKED);
228 if (maxlvt >= 3)
229 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
230 if (maxlvt >= 4)
231 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
232 apic_write(APIC_ESR, 0);
233 apic_read(APIC_ESR);
236 void disconnect_bsp_APIC(int virt_wire_setup)
238 /* Go back to Virtual Wire compatibility mode */
239 unsigned long value;
241 /* For the spurious interrupt use vector F, and enable it */
242 value = apic_read(APIC_SPIV);
243 value &= ~APIC_VECTOR_MASK;
244 value |= APIC_SPIV_APIC_ENABLED;
245 value |= 0xf;
246 apic_write(APIC_SPIV, value);
248 if (!virt_wire_setup) {
250 * For LVT0 make it edge triggered, active high,
251 * external and enabled
253 value = apic_read(APIC_LVT0);
254 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
255 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
256 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
257 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
258 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
259 apic_write(APIC_LVT0, value);
260 } else {
261 /* Disable LVT0 */
262 apic_write(APIC_LVT0, APIC_LVT_MASKED);
265 /* For LVT1 make it edge triggered, active high, nmi and enabled */
266 value = apic_read(APIC_LVT1);
267 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
268 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
269 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
270 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
271 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
272 apic_write(APIC_LVT1, value);
275 void disable_local_APIC(void)
277 unsigned int value;
279 clear_local_APIC();
282 * Disable APIC (implies clearing of registers
283 * for 82489DX!).
285 value = apic_read(APIC_SPIV);
286 value &= ~APIC_SPIV_APIC_ENABLED;
287 apic_write(APIC_SPIV, value);
291 * This is to verify that we're looking at a real local APIC.
292 * Check these against your board if the CPUs aren't getting
293 * started for no apparent reason.
295 int __init verify_local_APIC(void)
297 unsigned int reg0, reg1;
300 * The version register is read-only in a real APIC.
302 reg0 = apic_read(APIC_LVR);
303 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
304 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
305 reg1 = apic_read(APIC_LVR);
306 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
309 * The two version reads above should print the same
310 * numbers. If the second one is different, then we
311 * poke at a non-APIC.
313 if (reg1 != reg0)
314 return 0;
317 * Check if the version looks reasonably.
319 reg1 = GET_APIC_VERSION(reg0);
320 if (reg1 == 0x00 || reg1 == 0xff)
321 return 0;
322 reg1 = get_maxlvt();
323 if (reg1 < 0x02 || reg1 == 0xff)
324 return 0;
327 * The ID register is read/write in a real APIC.
329 reg0 = apic_read(APIC_ID);
330 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
331 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
332 reg1 = apic_read(APIC_ID);
333 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
334 apic_write(APIC_ID, reg0);
335 if (reg1 != (reg0 ^ APIC_ID_MASK))
336 return 0;
339 * The next two are just to see if we have sane values.
340 * They're only really relevant if we're in Virtual Wire
341 * compatibility mode, but most boxes are anymore.
343 reg0 = apic_read(APIC_LVT0);
344 apic_printk(APIC_DEBUG,"Getting LVT0: %x\n", reg0);
345 reg1 = apic_read(APIC_LVT1);
346 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
348 return 1;
351 void __init sync_Arb_IDs(void)
353 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
354 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
355 if (ver >= 0x14) /* P4 or higher */
356 return;
359 * Wait for idle.
361 apic_wait_icr_idle();
363 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
364 apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
365 | APIC_DM_INIT);
369 * An initial setup of the virtual wire mode.
371 void __init init_bsp_APIC(void)
373 unsigned int value;
376 * Don't do the setup now if we have a SMP BIOS as the
377 * through-I/O-APIC virtual wire mode might be active.
379 if (smp_found_config || !cpu_has_apic)
380 return;
382 value = apic_read(APIC_LVR);
385 * Do not trust the local APIC being empty at bootup.
387 clear_local_APIC();
390 * Enable APIC.
392 value = apic_read(APIC_SPIV);
393 value &= ~APIC_VECTOR_MASK;
394 value |= APIC_SPIV_APIC_ENABLED;
395 value |= APIC_SPIV_FOCUS_DISABLED;
396 value |= SPURIOUS_APIC_VECTOR;
397 apic_write(APIC_SPIV, value);
400 * Set up the virtual wire mode.
402 apic_write(APIC_LVT0, APIC_DM_EXTINT);
403 value = APIC_DM_NMI;
404 apic_write(APIC_LVT1, value);
407 void __cpuinit setup_local_APIC (void)
409 unsigned int value, maxlvt;
410 int i, j;
412 value = apic_read(APIC_LVR);
414 BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
417 * Double-check whether this APIC is really registered.
418 * This is meaningless in clustered apic mode, so we skip it.
420 if (!apic_id_registered())
421 BUG();
424 * Intel recommends to set DFR, LDR and TPR before enabling
425 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
426 * document number 292116). So here it goes...
428 init_apic_ldr();
431 * Set Task Priority to 'accept all'. We never change this
432 * later on.
434 value = apic_read(APIC_TASKPRI);
435 value &= ~APIC_TPRI_MASK;
436 apic_write(APIC_TASKPRI, value);
439 * After a crash, we no longer service the interrupts and a pending
440 * interrupt from previous kernel might still have ISR bit set.
442 * Most probably by now CPU has serviced that pending interrupt and
443 * it might not have done the ack_APIC_irq() because it thought,
444 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
445 * does not clear the ISR bit and cpu thinks it has already serivced
446 * the interrupt. Hence a vector might get locked. It was noticed
447 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
449 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
450 value = apic_read(APIC_ISR + i*0x10);
451 for (j = 31; j >= 0; j--) {
452 if (value & (1<<j))
453 ack_APIC_irq();
458 * Now that we are all set up, enable the APIC
460 value = apic_read(APIC_SPIV);
461 value &= ~APIC_VECTOR_MASK;
463 * Enable APIC
465 value |= APIC_SPIV_APIC_ENABLED;
467 /* We always use processor focus */
470 * Set spurious IRQ vector
472 value |= SPURIOUS_APIC_VECTOR;
473 apic_write(APIC_SPIV, value);
476 * Set up LVT0, LVT1:
478 * set up through-local-APIC on the BP's LINT0. This is not
479 * strictly necessary in pure symmetric-IO mode, but sometimes
480 * we delegate interrupts to the 8259A.
483 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
485 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
486 if (!smp_processor_id() && !value) {
487 value = APIC_DM_EXTINT;
488 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
489 smp_processor_id());
490 } else {
491 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
492 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
493 smp_processor_id());
495 apic_write(APIC_LVT0, value);
498 * only the BP should see the LINT1 NMI signal, obviously.
500 if (!smp_processor_id())
501 value = APIC_DM_NMI;
502 else
503 value = APIC_DM_NMI | APIC_LVT_MASKED;
504 apic_write(APIC_LVT1, value);
507 unsigned oldvalue;
508 maxlvt = get_maxlvt();
509 oldvalue = apic_read(APIC_ESR);
510 value = ERROR_APIC_VECTOR; // enables sending errors
511 apic_write(APIC_LVTERR, value);
513 * spec says clear errors after enabling vector.
515 if (maxlvt > 3)
516 apic_write(APIC_ESR, 0);
517 value = apic_read(APIC_ESR);
518 if (value != oldvalue)
519 apic_printk(APIC_VERBOSE,
520 "ESR value after enabling vector: %08x, after %08x\n",
521 oldvalue, value);
524 nmi_watchdog_default();
525 setup_apic_nmi_watchdog(NULL);
526 apic_pm_activate();
529 #ifdef CONFIG_PM
531 static struct {
532 /* 'active' is true if the local APIC was enabled by us and
533 not the BIOS; this signifies that we are also responsible
534 for disabling it before entering apm/acpi suspend */
535 int active;
536 /* r/w apic fields */
537 unsigned int apic_id;
538 unsigned int apic_taskpri;
539 unsigned int apic_ldr;
540 unsigned int apic_dfr;
541 unsigned int apic_spiv;
542 unsigned int apic_lvtt;
543 unsigned int apic_lvtpc;
544 unsigned int apic_lvt0;
545 unsigned int apic_lvt1;
546 unsigned int apic_lvterr;
547 unsigned int apic_tmict;
548 unsigned int apic_tdcr;
549 unsigned int apic_thmr;
550 } apic_pm_state;
552 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
554 unsigned long flags;
555 int maxlvt;
557 if (!apic_pm_state.active)
558 return 0;
560 maxlvt = get_maxlvt();
562 apic_pm_state.apic_id = apic_read(APIC_ID);
563 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
564 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
565 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
566 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
567 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
568 if (maxlvt >= 4)
569 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
570 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
571 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
572 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
573 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
574 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
575 #ifdef CONFIG_X86_MCE_INTEL
576 if (maxlvt >= 5)
577 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
578 #endif
579 local_irq_save(flags);
580 disable_local_APIC();
581 local_irq_restore(flags);
582 return 0;
585 static int lapic_resume(struct sys_device *dev)
587 unsigned int l, h;
588 unsigned long flags;
589 int maxlvt;
591 if (!apic_pm_state.active)
592 return 0;
594 maxlvt = get_maxlvt();
596 local_irq_save(flags);
597 rdmsr(MSR_IA32_APICBASE, l, h);
598 l &= ~MSR_IA32_APICBASE_BASE;
599 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
600 wrmsr(MSR_IA32_APICBASE, l, h);
601 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
602 apic_write(APIC_ID, apic_pm_state.apic_id);
603 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
604 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
605 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
606 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
607 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
608 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
609 #ifdef CONFIG_X86_MCE_INTEL
610 if (maxlvt >= 5)
611 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
612 #endif
613 if (maxlvt >= 4)
614 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
615 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
616 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
617 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
618 apic_write(APIC_ESR, 0);
619 apic_read(APIC_ESR);
620 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
621 apic_write(APIC_ESR, 0);
622 apic_read(APIC_ESR);
623 local_irq_restore(flags);
624 return 0;
627 static struct sysdev_class lapic_sysclass = {
628 set_kset_name("lapic"),
629 .resume = lapic_resume,
630 .suspend = lapic_suspend,
633 static struct sys_device device_lapic = {
634 .id = 0,
635 .cls = &lapic_sysclass,
638 static void __cpuinit apic_pm_activate(void)
640 apic_pm_state.active = 1;
643 static int __init init_lapic_sysfs(void)
645 int error;
646 if (!cpu_has_apic)
647 return 0;
648 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
649 error = sysdev_class_register(&lapic_sysclass);
650 if (!error)
651 error = sysdev_register(&device_lapic);
652 return error;
654 device_initcall(init_lapic_sysfs);
656 #else /* CONFIG_PM */
658 static void apic_pm_activate(void) { }
660 #endif /* CONFIG_PM */
662 static int __init apic_set_verbosity(char *str)
664 if (str == NULL) {
665 skip_ioapic_setup = 0;
666 ioapic_force = 1;
667 return 0;
669 if (strcmp("debug", str) == 0)
670 apic_verbosity = APIC_DEBUG;
671 else if (strcmp("verbose", str) == 0)
672 apic_verbosity = APIC_VERBOSE;
673 else {
674 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
675 " use apic=verbose or apic=debug\n", str);
676 return -EINVAL;
679 return 0;
681 early_param("apic", apic_set_verbosity);
684 * Detect and enable local APICs on non-SMP boards.
685 * Original code written by Keir Fraser.
686 * On AMD64 we trust the BIOS - if it says no APIC it is likely
687 * not correctly set up (usually the APIC timer won't work etc.)
690 static int __init detect_init_APIC (void)
692 if (!cpu_has_apic) {
693 printk(KERN_INFO "No local APIC present\n");
694 return -1;
697 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
698 boot_cpu_id = 0;
699 return 0;
702 #ifdef CONFIG_X86_IO_APIC
703 static struct resource * __init ioapic_setup_resources(void)
705 #define IOAPIC_RESOURCE_NAME_SIZE 11
706 unsigned long n;
707 struct resource *res;
708 char *mem;
709 int i;
711 if (nr_ioapics <= 0)
712 return NULL;
714 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
715 n *= nr_ioapics;
717 mem = alloc_bootmem(n);
718 res = (void *)mem;
720 if (mem != NULL) {
721 memset(mem, 0, n);
722 mem += sizeof(struct resource) * nr_ioapics;
724 for (i = 0; i < nr_ioapics; i++) {
725 res[i].name = mem;
726 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
727 sprintf(mem, "IOAPIC %u", i);
728 mem += IOAPIC_RESOURCE_NAME_SIZE;
732 ioapic_resources = res;
734 return res;
737 static int __init ioapic_insert_resources(void)
739 int i;
740 struct resource *r = ioapic_resources;
742 if (!r) {
743 printk("IO APIC resources could be not be allocated.\n");
744 return -1;
747 for (i = 0; i < nr_ioapics; i++) {
748 insert_resource(&iomem_resource, r);
749 r++;
752 return 0;
755 /* Insert the IO APIC resources after PCI initialization has occured to handle
756 * IO APICS that are mapped in on a BAR in PCI space. */
757 late_initcall(ioapic_insert_resources);
758 #endif
760 void __init init_apic_mappings(void)
762 unsigned long apic_phys;
765 * If no local APIC can be found then set up a fake all
766 * zeroes page to simulate the local APIC and another
767 * one for the IO-APIC.
769 if (!smp_found_config && detect_init_APIC()) {
770 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
771 apic_phys = __pa(apic_phys);
772 } else
773 apic_phys = mp_lapic_addr;
775 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
776 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
777 APIC_BASE, apic_phys);
779 /* Put local APIC into the resource map. */
780 lapic_resource.start = apic_phys;
781 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
782 insert_resource(&iomem_resource, &lapic_resource);
785 * Fetch the APIC ID of the BSP in case we have a
786 * default configuration (or the MP table is broken).
788 boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
791 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
792 int i;
793 struct resource *ioapic_res;
795 ioapic_res = ioapic_setup_resources();
796 for (i = 0; i < nr_ioapics; i++) {
797 if (smp_found_config) {
798 ioapic_phys = mp_ioapics[i].mpc_apicaddr;
799 } else {
800 ioapic_phys = (unsigned long)
801 alloc_bootmem_pages(PAGE_SIZE);
802 ioapic_phys = __pa(ioapic_phys);
804 set_fixmap_nocache(idx, ioapic_phys);
805 apic_printk(APIC_VERBOSE,
806 "mapped IOAPIC to %016lx (%016lx)\n",
807 __fix_to_virt(idx), ioapic_phys);
808 idx++;
810 if (ioapic_res != NULL) {
811 ioapic_res->start = ioapic_phys;
812 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
813 ioapic_res++;
820 * This function sets up the local APIC timer, with a timeout of
821 * 'clocks' APIC bus clock. During calibration we actually call
822 * this function twice on the boot CPU, once with a bogus timeout
823 * value, second time for real. The other (noncalibrating) CPUs
824 * call this function only once, with the real, calibrated value.
826 * We do reads before writes even if unnecessary, to get around the
827 * P5 APIC double write bug.
830 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
832 unsigned int lvtt_value, tmp_value;
834 lvtt_value = LOCAL_TIMER_VECTOR;
835 if (!oneshot)
836 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
837 if (!irqen)
838 lvtt_value |= APIC_LVT_MASKED;
840 apic_write(APIC_LVTT, lvtt_value);
843 * Divide PICLK by 16
845 tmp_value = apic_read(APIC_TDCR);
846 apic_write(APIC_TDCR, (tmp_value
847 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
848 | APIC_TDR_DIV_16);
850 if (!oneshot)
851 apic_write(APIC_TMICT, clocks);
854 static void setup_APIC_timer(void)
856 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
858 memcpy(levt, &lapic_clockevent, sizeof(*levt));
859 levt->cpumask = cpumask_of_cpu(smp_processor_id());
861 clockevents_register_device(levt);
865 * In this function we calibrate APIC bus clocks to the external
866 * timer. Unfortunately we cannot use jiffies and the timer irq
867 * to calibrate, since some later bootup code depends on getting
868 * the first irq? Ugh.
870 * We want to do the calibration only once since we
871 * want to have local timer irqs syncron. CPUs connected
872 * by the same APIC bus have the very same bus frequency.
873 * And we want to have irqs off anyways, no accidental
874 * APIC irq that way.
877 #define TICK_COUNT 100000000
879 static void __init calibrate_APIC_clock(void)
881 unsigned apic, apic_start;
882 unsigned long tsc, tsc_start;
883 int result;
885 local_irq_disable();
888 * Put whatever arbitrary (but long enough) timeout
889 * value into the APIC clock, we just want to get the
890 * counter running for calibration.
892 * No interrupt enable !
894 __setup_APIC_LVTT(250000000, 0, 0);
896 apic_start = apic_read(APIC_TMCCT);
897 #ifdef CONFIG_X86_PM_TIMER
898 if (apic_calibrate_pmtmr && pmtmr_ioport) {
899 pmtimer_wait(5000); /* 5ms wait */
900 apic = apic_read(APIC_TMCCT);
901 result = (apic_start - apic) * 1000L / 5;
902 } else
903 #endif
905 rdtscll(tsc_start);
907 do {
908 apic = apic_read(APIC_TMCCT);
909 rdtscll(tsc);
910 } while ((tsc - tsc_start) < TICK_COUNT &&
911 (apic_start - apic) < TICK_COUNT);
913 result = (apic_start - apic) * 1000L * tsc_khz /
914 (tsc - tsc_start);
917 local_irq_enable();
919 printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
921 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
922 result / 1000 / 1000, result / 1000 % 1000);
924 /* Calculate the scaled math multiplication factor */
925 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
926 lapic_clockevent.max_delta_ns =
927 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
928 lapic_clockevent.min_delta_ns =
929 clockevent_delta2ns(0xF, &lapic_clockevent);
931 calibration_result = result / HZ;
934 void __init setup_boot_APIC_clock (void)
937 * The local apic timer can be disabled via the kernel commandline.
938 * Register the lapic timer as a dummy clock event source on SMP
939 * systems, so the broadcast mechanism is used. On UP systems simply
940 * ignore it.
942 if (disable_apic_timer) {
943 printk(KERN_INFO "Disabling APIC timer\n");
944 /* No broadcast on UP ! */
945 if (num_possible_cpus() > 1)
946 setup_APIC_timer();
947 return;
950 printk(KERN_INFO "Using local APIC timer interrupts.\n");
951 calibrate_APIC_clock();
954 * If nmi_watchdog is set to IO_APIC, we need the
955 * PIT/HPET going. Otherwise register lapic as a dummy
956 * device.
958 if (nmi_watchdog != NMI_IO_APIC)
959 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
960 else
961 printk(KERN_WARNING "APIC timer registered as dummy,"
962 " due to nmi_watchdog=1!\n");
964 setup_APIC_timer();
968 * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
969 * C1E flag only in the secondary CPU, so when we detect the wreckage
970 * we already have enabled the boot CPU local apic timer. Check, if
971 * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
972 * set the DUMMY flag again and force the broadcast mode in the
973 * clockevents layer.
975 void __cpuinit check_boot_apic_timer_broadcast(void)
977 if (!disable_apic_timer ||
978 (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
979 return;
981 printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
982 lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
984 local_irq_enable();
985 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
986 local_irq_disable();
989 void __cpuinit setup_secondary_APIC_clock(void)
991 check_boot_apic_timer_broadcast();
992 setup_APIC_timer();
995 int setup_profiling_timer(unsigned int multiplier)
997 return -EINVAL;
1000 void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
1001 unsigned char msg_type, unsigned char mask)
1003 unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
1004 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
1005 apic_write(reg, v);
1009 * Local timer interrupt handler. It does both profiling and
1010 * process statistics/rescheduling.
1012 * We do profiling in every local tick, statistics/rescheduling
1013 * happen only every 'profiling multiplier' ticks. The default
1014 * multiplier is 1 and it can be changed by writing the new multiplier
1015 * value into /proc/profile.
1018 void smp_local_timer_interrupt(void)
1020 int cpu = smp_processor_id();
1021 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
1024 * Normally we should not be here till LAPIC has been initialized but
1025 * in some cases like kdump, its possible that there is a pending LAPIC
1026 * timer interrupt from previous kernel's context and is delivered in
1027 * new kernel the moment interrupts are enabled.
1029 * Interrupts are enabled early and LAPIC is setup much later, hence
1030 * its possible that when we get here evt->event_handler is NULL.
1031 * Check for event_handler being NULL and discard the interrupt as
1032 * spurious.
1034 if (!evt->event_handler) {
1035 printk(KERN_WARNING
1036 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
1037 /* Switch it off */
1038 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
1039 return;
1043 * the NMI deadlock-detector uses this.
1045 add_pda(apic_timer_irqs, 1);
1047 evt->event_handler(evt);
1051 * Local APIC timer interrupt. This is the most natural way for doing
1052 * local interrupts, but local timer interrupts can be emulated by
1053 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
1055 * [ if a single-CPU system runs an SMP kernel then we call the local
1056 * interrupt as well. Thus we cannot inline the local irq ... ]
1058 void smp_apic_timer_interrupt(struct pt_regs *regs)
1060 struct pt_regs *old_regs = set_irq_regs(regs);
1063 * NOTE! We'd better ACK the irq immediately,
1064 * because timer handling can be slow.
1066 ack_APIC_irq();
1068 * update_process_times() expects us to have done irq_enter().
1069 * Besides, if we don't timer interrupts ignore the global
1070 * interrupt lock, which is the WrongThing (tm) to do.
1072 exit_idle();
1073 irq_enter();
1074 smp_local_timer_interrupt();
1075 irq_exit();
1076 set_irq_regs(old_regs);
1080 * apic_is_clustered_box() -- Check if we can expect good TSC
1082 * Thus far, the major user of this is IBM's Summit2 series:
1084 * Clustered boxes may have unsynced TSC problems if they are
1085 * multi-chassis. Use available data to take a good guess.
1086 * If in doubt, go HPET.
1088 __cpuinit int apic_is_clustered_box(void)
1090 int i, clusters, zeros;
1091 unsigned id;
1092 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
1094 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1096 for (i = 0; i < NR_CPUS; i++) {
1097 id = bios_cpu_apicid[i];
1098 if (id != BAD_APICID)
1099 __set_bit(APIC_CLUSTERID(id), clustermap);
1102 /* Problem: Partially populated chassis may not have CPUs in some of
1103 * the APIC clusters they have been allocated. Only present CPUs have
1104 * bios_cpu_apicid entries, thus causing zeroes in the bitmap. Since
1105 * clusters are allocated sequentially, count zeros only if they are
1106 * bounded by ones.
1108 clusters = 0;
1109 zeros = 0;
1110 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
1111 if (test_bit(i, clustermap)) {
1112 clusters += 1 + zeros;
1113 zeros = 0;
1114 } else
1115 ++zeros;
1119 * If clusters > 2, then should be multi-chassis.
1120 * May have to revisit this when multi-core + hyperthreaded CPUs come
1121 * out, but AFAIK this will work even for them.
1123 return (clusters > 2);
1127 * This interrupt should _never_ happen with our APIC/SMP architecture
1129 asmlinkage void smp_spurious_interrupt(void)
1131 unsigned int v;
1132 exit_idle();
1133 irq_enter();
1135 * Check if this really is a spurious interrupt and ACK it
1136 * if it is a vectored one. Just in case...
1137 * Spurious interrupts should not be ACKed.
1139 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1140 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1141 ack_APIC_irq();
1143 add_pda(irq_spurious_count, 1);
1144 irq_exit();
1148 * This interrupt should never happen with our APIC/SMP architecture
1151 asmlinkage void smp_error_interrupt(void)
1153 unsigned int v, v1;
1155 exit_idle();
1156 irq_enter();
1157 /* First tickle the hardware, only then report what went on. -- REW */
1158 v = apic_read(APIC_ESR);
1159 apic_write(APIC_ESR, 0);
1160 v1 = apic_read(APIC_ESR);
1161 ack_APIC_irq();
1162 atomic_inc(&irq_err_count);
1164 /* Here is what the APIC error bits mean:
1165 0: Send CS error
1166 1: Receive CS error
1167 2: Send accept error
1168 3: Receive accept error
1169 4: Reserved
1170 5: Send illegal vector
1171 6: Received illegal vector
1172 7: Illegal register address
1174 printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1175 smp_processor_id(), v , v1);
1176 irq_exit();
1179 int disable_apic;
1182 * This initializes the IO-APIC and APIC hardware if this is
1183 * a UP kernel.
1185 int __init APIC_init_uniprocessor (void)
1187 if (disable_apic) {
1188 printk(KERN_INFO "Apic disabled\n");
1189 return -1;
1191 if (!cpu_has_apic) {
1192 disable_apic = 1;
1193 printk(KERN_INFO "Apic disabled by BIOS\n");
1194 return -1;
1197 verify_local_APIC();
1199 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
1200 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
1202 setup_local_APIC();
1204 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1205 setup_IO_APIC();
1206 else
1207 nr_ioapics = 0;
1208 setup_boot_APIC_clock();
1209 check_nmi_watchdog();
1210 return 0;
1213 static __init int setup_disableapic(char *str)
1215 disable_apic = 1;
1216 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1217 return 0;
1219 early_param("disableapic", setup_disableapic);
1221 /* same as disableapic, for compatibility */
1222 static __init int setup_nolapic(char *str)
1224 return setup_disableapic(str);
1226 early_param("nolapic", setup_nolapic);
1228 static int __init parse_lapic_timer_c2_ok(char *arg)
1230 local_apic_timer_c2_ok = 1;
1231 return 0;
1233 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1235 static __init int setup_noapictimer(char *str)
1237 if (str[0] != ' ' && str[0] != 0)
1238 return 0;
1239 disable_apic_timer = 1;
1240 return 1;
1242 __setup("noapictimer", setup_noapictimer);
1244 static __init int setup_apicpmtimer(char *s)
1246 apic_calibrate_pmtmr = 1;
1247 notsc_setup(NULL);
1248 return 0;
1250 __setup("apicpmtimer", setup_apicpmtimer);