1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* Copyright (C) 1999,2001
5 * Author: J.E.J.Bottomley@HansenPartnership.com
7 * linux/arch/i386/kernel/voyager_smp.c
9 * This file provides all the same external entries as smp.c but uses
10 * the voyager hal to provide the functionality
12 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/cache.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/bootmem.h>
22 #include <linux/completion.h>
24 #include <asm/voyager.h>
27 #include <asm/pgalloc.h>
28 #include <asm/tlbflush.h>
29 #include <asm/arch_hooks.h>
31 /* TLB state -- visible externally, indexed physically */
32 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state
, cpu_tlbstate
) = { &init_mm
, 0 };
34 /* CPU IRQ affinity -- set to all ones initially */
35 static unsigned long cpu_irq_affinity
[NR_CPUS
] __cacheline_aligned
= { [0 ... NR_CPUS
-1] = ~0UL };
37 /* per CPU data structure (for /proc/cpuinfo et al), visible externally
38 * indexed physically */
39 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86
, cpu_info
);
40 EXPORT_PER_CPU_SYMBOL(cpu_info
);
42 /* physical ID of the CPU used to boot the system */
43 unsigned char boot_cpu_id
;
45 /* The memory line addresses for the Quad CPIs */
46 struct voyager_qic_cpi
*voyager_quad_cpi_addr
[NR_CPUS
] __cacheline_aligned
;
48 /* The masks for the Extended VIC processors, filled in by cat_init */
49 __u32 voyager_extended_vic_processors
= 0;
51 /* Masks for the extended Quad processors which cannot be VIC booted */
52 __u32 voyager_allowed_boot_processors
= 0;
54 /* The mask for the Quad Processors (both extended and non-extended) */
55 __u32 voyager_quad_processors
= 0;
57 /* Total count of live CPUs, used in process.c to display
58 * the CPU information and in irq.c for the per CPU irq
59 * activity count. Finally exported by i386_ksyms.c */
60 static int voyager_extended_cpus
= 1;
62 /* Have we found an SMP box - used by time.c to do the profiling
63 interrupt for timeslicing; do not set to 1 until the per CPU timer
64 interrupt is active */
65 int smp_found_config
= 0;
67 /* Used for the invalidate map that's also checked in the spinlock */
68 static volatile unsigned long smp_invalidate_needed
;
70 /* Bitmask of currently online CPUs - used by setup.c for
71 /proc/cpuinfo, visible externally but still physical */
72 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
73 EXPORT_SYMBOL(cpu_online_map
);
75 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
76 * by scheduler but indexed physically */
77 cpumask_t phys_cpu_present_map
= CPU_MASK_NONE
;
80 /* The internal functions */
81 static void send_CPI(__u32 cpuset
, __u8 cpi
);
82 static void ack_CPI(__u8 cpi
);
83 static int ack_QIC_CPI(__u8 cpi
);
84 static void ack_special_QIC_CPI(__u8 cpi
);
85 static void ack_VIC_CPI(__u8 cpi
);
86 static void send_CPI_allbutself(__u8 cpi
);
87 static void mask_vic_irq(unsigned int irq
);
88 static void unmask_vic_irq(unsigned int irq
);
89 static unsigned int startup_vic_irq(unsigned int irq
);
90 static void enable_local_vic_irq(unsigned int irq
);
91 static void disable_local_vic_irq(unsigned int irq
);
92 static void before_handle_vic_irq(unsigned int irq
);
93 static void after_handle_vic_irq(unsigned int irq
);
94 static void set_vic_irq_affinity(unsigned int irq
, cpumask_t mask
);
95 static void ack_vic_irq(unsigned int irq
);
96 static void vic_enable_cpi(void);
97 static void do_boot_cpu(__u8 cpuid
);
98 static void do_quad_bootstrap(void);
100 int hard_smp_processor_id(void);
101 int safe_smp_processor_id(void);
103 /* Inline functions */
105 send_one_QIC_CPI(__u8 cpu
, __u8 cpi
)
107 voyager_quad_cpi_addr
[cpu
]->qic_cpi
[cpi
].cpi
=
108 (smp_processor_id() << 16) + cpi
;
112 send_QIC_CPI(__u32 cpuset
, __u8 cpi
)
116 for_each_online_cpu(cpu
) {
117 if(cpuset
& (1<<cpu
)) {
119 if(!cpu_isset(cpu
, cpu_online_map
))
120 VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi
, cpu
));
122 send_one_QIC_CPI(cpu
, cpi
- QIC_CPI_OFFSET
);
128 wrapper_smp_local_timer_interrupt(void)
131 smp_local_timer_interrupt();
136 send_one_CPI(__u8 cpu
, __u8 cpi
)
138 if(voyager_quad_processors
& (1<<cpu
))
139 send_one_QIC_CPI(cpu
, cpi
- QIC_CPI_OFFSET
);
141 send_CPI(1<<cpu
, cpi
);
145 send_CPI_allbutself(__u8 cpi
)
147 __u8 cpu
= smp_processor_id();
148 __u32 mask
= cpus_addr(cpu_online_map
)[0] & ~(1 << cpu
);
155 __u8 cpumask
= inb(VIC_PROC_WHO_AM_I
);
156 return ((cpumask
& QUAD_IDENTIFIER
) == QUAD_IDENTIFIER
);
160 is_cpu_extended(void)
162 __u8 cpu
= hard_smp_processor_id();
164 return(voyager_extended_vic_processors
& (1<<cpu
));
168 is_cpu_vic_boot(void)
170 __u8 cpu
= hard_smp_processor_id();
172 return(voyager_extended_vic_processors
173 & voyager_allowed_boot_processors
& (1<<cpu
));
181 case VIC_CPU_BOOT_CPI
:
182 if(is_cpu_quad() && !is_cpu_vic_boot())
189 /* These are slightly strange. Even on the Quad card,
190 * They are vectored as VIC CPIs */
192 ack_special_QIC_CPI(cpi
);
197 printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi
);
202 /* local variables */
204 /* The VIC IRQ descriptors -- these look almost identical to the
205 * 8259 IRQs except that masks and things must be kept per processor
207 static struct irq_chip vic_chip
= {
209 .startup
= startup_vic_irq
,
210 .mask
= mask_vic_irq
,
211 .unmask
= unmask_vic_irq
,
212 .set_affinity
= set_vic_irq_affinity
,
215 /* used to count up as CPUs are brought on line (starts at 0) */
216 static int cpucount
= 0;
218 /* steal a page from the bottom of memory for the trampoline and
219 * squirrel its address away here. This will be in kernel virtual
221 static __u32 trampoline_base
;
223 /* The per cpu profile stuff - used in smp_local_timer_interrupt */
224 static DEFINE_PER_CPU(int, prof_multiplier
) = 1;
225 static DEFINE_PER_CPU(int, prof_old_multiplier
) = 1;
226 static DEFINE_PER_CPU(int, prof_counter
) = 1;
228 /* the map used to check if a CPU has booted */
229 static __u32 cpu_booted_map
;
231 /* the synchronize flag used to hold all secondary CPUs spinning in
232 * a tight loop until the boot sequence is ready for them */
233 static cpumask_t smp_commenced_mask
= CPU_MASK_NONE
;
235 /* This is for the new dynamic CPU boot code */
236 cpumask_t cpu_callin_map
= CPU_MASK_NONE
;
237 cpumask_t cpu_callout_map
= CPU_MASK_NONE
;
238 EXPORT_SYMBOL(cpu_callout_map
);
239 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
240 EXPORT_SYMBOL(cpu_possible_map
);
242 /* The per processor IRQ masks (these are usually kept in sync) */
243 static __u16 vic_irq_mask
[NR_CPUS
] __cacheline_aligned
;
245 /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */
246 static __u16 vic_irq_enable_mask
[NR_CPUS
] __cacheline_aligned
= { 0 };
248 /* Lock for enable/disable of VIC interrupts */
249 static __cacheline_aligned
DEFINE_SPINLOCK(vic_irq_lock
);
251 /* The boot processor is correctly set up in PC mode when it
252 * comes up, but the secondaries need their master/slave 8259
253 * pairs initializing correctly */
255 /* Interrupt counters (per cpu) and total - used to try to
256 * even up the interrupt handling routines */
257 static long vic_intr_total
= 0;
258 static long vic_intr_count
[NR_CPUS
] __cacheline_aligned
= { 0 };
259 static unsigned long vic_tick
[NR_CPUS
] __cacheline_aligned
= { 0 };
261 /* Since we can only use CPI0, we fake all the other CPIs */
262 static unsigned long vic_cpi_mailbox
[NR_CPUS
] __cacheline_aligned
;
264 /* debugging routine to read the isr of the cpu's pic */
271 isr
= inb(0xa0) << 8;
282 /* not a quad, no setup */
285 outb(QIC_DEFAULT_MASK0
, QIC_MASK_REGISTER0
);
286 outb(QIC_CPI_ENABLE
, QIC_MASK_REGISTER1
);
288 if(is_cpu_extended()) {
289 /* the QIC duplicate of the VIC base register */
290 outb(VIC_DEFAULT_CPI_BASE
, QIC_VIC_CPI_BASE_REGISTER
);
291 outb(QIC_DEFAULT_CPI_BASE
, QIC_CPI_BASE_REGISTER
);
293 /* FIXME: should set up the QIC timer and memory parity
294 * error vectors here */
301 outb(1, VIC_REDIRECT_REGISTER_1
);
302 /* clear the claim registers for dynamic routing */
303 outb(0, VIC_CLAIM_REGISTER_0
);
304 outb(0, VIC_CLAIM_REGISTER_1
);
306 outb(0, VIC_PRIORITY_REGISTER
);
307 /* Set the Primary and Secondary Microchannel vector
308 * bases to be the same as the ordinary interrupts
310 * FIXME: This would be more efficient using separate
312 outb(FIRST_EXTERNAL_VECTOR
, VIC_PRIMARY_MC_BASE
);
313 outb(FIRST_EXTERNAL_VECTOR
, VIC_SECONDARY_MC_BASE
);
314 /* Now initiallise the master PIC belonging to this CPU by
315 * sending the four ICWs */
317 /* ICW1: level triggered, ICW4 needed */
320 /* ICW2: vector base */
321 outb(FIRST_EXTERNAL_VECTOR
, 0x21);
323 /* ICW3: slave at line 2 */
326 /* ICW4: 8086 mode */
329 /* now the same for the slave PIC */
331 /* ICW1: level trigger, ICW4 needed */
334 /* ICW2: slave vector base */
335 outb(FIRST_EXTERNAL_VECTOR
+ 8, 0xA1);
340 /* ICW4: 8086 mode */
345 do_quad_bootstrap(void)
347 if(is_cpu_quad() && is_cpu_vic_boot()) {
350 __u8 cpuid
= hard_smp_processor_id();
352 local_irq_save(flags
);
354 for(i
= 0; i
<4; i
++) {
355 /* FIXME: this would be >>3 &0x7 on the 32 way */
356 if(((cpuid
>> 2) & 0x03) == i
)
357 /* don't lower our own mask! */
360 /* masquerade as local Quad CPU */
361 outb(QIC_CPUID_ENABLE
| i
, QIC_PROCESSOR_ID
);
362 /* enable the startup CPI */
363 outb(QIC_BOOT_CPI_MASK
, QIC_MASK_REGISTER1
);
365 outb(0, QIC_PROCESSOR_ID
);
367 local_irq_restore(flags
);
372 /* Set up all the basic stuff: read the SMP config and make all the
373 * SMP information reflect only the boot cpu. All others will be
374 * brought on-line later. */
376 find_smp_config(void)
380 boot_cpu_id
= hard_smp_processor_id();
382 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id
);
384 /* initialize the CPU structures (moved from smp_boot_cpus) */
385 for(i
=0; i
<NR_CPUS
; i
++) {
386 cpu_irq_affinity
[i
] = ~0;
388 cpu_online_map
= cpumask_of_cpu(boot_cpu_id
);
390 /* The boot CPU must be extended */
391 voyager_extended_vic_processors
= 1<<boot_cpu_id
;
392 /* initially, all of the first 8 CPUs can boot */
393 voyager_allowed_boot_processors
= 0xff;
394 /* set up everything for just this CPU, we can alter
395 * this as we start the other CPUs later */
396 /* now get the CPU disposition from the extended CMOS */
397 cpus_addr(phys_cpu_present_map
)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
);
398 cpus_addr(phys_cpu_present_map
)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+ 1) << 8;
399 cpus_addr(phys_cpu_present_map
)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+ 2) << 16;
400 cpus_addr(phys_cpu_present_map
)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+ 3) << 24;
401 cpu_possible_map
= phys_cpu_present_map
;
402 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map
)[0]);
403 /* Here we set up the VIC to enable SMP */
404 /* enable the CPIs by writing the base vector to their register */
405 outb(VIC_DEFAULT_CPI_BASE
, VIC_CPI_BASE_REGISTER
);
406 outb(1, VIC_REDIRECT_REGISTER_1
);
407 /* set the claim registers for static routing --- Boot CPU gets
408 * all interrupts untill all other CPUs started */
409 outb(0xff, VIC_CLAIM_REGISTER_0
);
410 outb(0xff, VIC_CLAIM_REGISTER_1
);
411 /* Set the Primary and Secondary Microchannel vector
412 * bases to be the same as the ordinary interrupts
414 * FIXME: This would be more efficient using separate
416 outb(FIRST_EXTERNAL_VECTOR
, VIC_PRIMARY_MC_BASE
);
417 outb(FIRST_EXTERNAL_VECTOR
, VIC_SECONDARY_MC_BASE
);
419 /* Finally tell the firmware that we're driving */
420 outb(inb(VOYAGER_SUS_IN_CONTROL_PORT
) | VOYAGER_IN_CONTROL_FLAG
,
421 VOYAGER_SUS_IN_CONTROL_PORT
);
423 current_thread_info()->cpu
= boot_cpu_id
;
424 x86_write_percpu(cpu_number
, boot_cpu_id
);
428 * The bootstrap kernel entry code has set these up. Save them
429 * for a given CPU, id is physical */
431 smp_store_cpu_info(int id
)
433 struct cpuinfo_x86
*c
= &cpu_data(id
);
437 identify_secondary_cpu(c
);
440 /* set up the trampoline and return the physical address of the code */
442 setup_trampoline(void)
444 /* these two are global symbols in trampoline.S */
445 extern const __u8 trampoline_end
[];
446 extern const __u8 trampoline_data
[];
448 memcpy((__u8
*)trampoline_base
, trampoline_data
,
449 trampoline_end
- trampoline_data
);
450 return virt_to_phys((__u8
*)trampoline_base
);
453 /* Routine initially called when a non-boot CPU is brought online */
455 start_secondary(void *unused
)
457 __u8 cpuid
= hard_smp_processor_id();
458 /* external functions not defined in the headers */
459 extern void calibrate_delay(void);
463 /* OK, we're in the routine */
464 ack_CPI(VIC_CPU_BOOT_CPI
);
466 /* setup the 8259 master slave pair belonging to this CPU ---
467 * we won't actually receive any until the boot CPU
468 * relinquishes it's static routing mask */
473 if(is_cpu_quad() && !is_cpu_vic_boot()) {
474 /* clear the boot CPI */
477 dummy
= voyager_quad_cpi_addr
[cpuid
]->qic_cpi
[VIC_CPU_BOOT_CPI
].cpi
;
478 printk("read dummy %d\n", dummy
);
481 /* lower the mask to receive CPIs */
484 VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid
, &cpuid
));
486 /* enable interrupts */
489 /* get our bogomips */
492 /* save our processor parameters */
493 smp_store_cpu_info(cpuid
);
495 /* if we're a quad, we may need to bootstrap other CPUs */
498 /* FIXME: this is rather a poor hack to prevent the CPU
499 * activating softirqs while it's supposed to be waiting for
500 * permission to proceed. Without this, the new per CPU stuff
501 * in the softirqs will fail */
503 cpu_set(cpuid
, cpu_callin_map
);
505 /* signal that we're done */
508 while (!cpu_isset(cpuid
, smp_commenced_mask
))
514 cpu_set(cpuid
, cpu_online_map
);
520 /* Routine to kick start the given CPU and wait for it to report ready
521 * (or timeout in startup). When this routine returns, the requested
522 * CPU is either fully running and configured or known to be dead.
524 * We call this routine sequentially 1 CPU at a time, so no need for
528 do_boot_cpu(__u8 cpu
)
530 struct task_struct
*idle
;
533 int quad_boot
= (1<<cpu
) & voyager_quad_processors
534 & ~( voyager_extended_vic_processors
535 & voyager_allowed_boot_processors
);
537 /* This is an area in head.S which was used to set up the
538 * initial kernel stack. We need to alter this to give the
539 * booting CPU a new stack (taken from its idle process) */
544 /* This is the format of the CPI IDT gate (in real mode) which
545 * we're hijacking to boot the CPU */
554 __u32
*hijack_vector
;
555 __u32 start_phys_address
= setup_trampoline();
557 /* There's a clever trick to this: The linux trampoline is
558 * compiled to begin at absolute location zero, so make the
559 * address zero but have the data segment selector compensate
560 * for the actual address */
561 hijack_source
.idt
.Offset
= start_phys_address
& 0x000F;
562 hijack_source
.idt
.Segment
= (start_phys_address
>> 4) & 0xFFFF;
565 alternatives_smp_switch(1);
567 idle
= fork_idle(cpu
);
569 panic("failed fork for CPU%d", cpu
);
570 idle
->thread
.eip
= (unsigned long) start_secondary
;
571 /* init_tasks (in sched.c) is indexed logically */
572 stack_start
.esp
= (void *) idle
->thread
.esp
;
575 per_cpu(current_task
, cpu
) = idle
;
576 early_gdt_descr
.address
= (unsigned long)get_cpu_gdt_table(cpu
);
579 /* Note: Don't modify initial ss override */
580 VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu
,
581 (unsigned long)hijack_source
.val
, hijack_source
.idt
.Segment
,
582 hijack_source
.idt
.Offset
, stack_start
.esp
));
584 /* init lowmem identity mapping */
585 clone_pgd_range(swapper_pg_dir
, swapper_pg_dir
+ USER_PGD_PTRS
,
586 min_t(unsigned long, KERNEL_PGD_PTRS
, USER_PGD_PTRS
));
590 printk("CPU %d: non extended Quad boot\n", cpu
);
591 hijack_vector
= (__u32
*)phys_to_virt((VIC_CPU_BOOT_CPI
+ QIC_DEFAULT_CPI_BASE
)*4);
592 *hijack_vector
= hijack_source
.val
;
594 printk("CPU%d: extended VIC boot\n", cpu
);
595 hijack_vector
= (__u32
*)phys_to_virt((VIC_CPU_BOOT_CPI
+ VIC_DEFAULT_CPI_BASE
)*4);
596 *hijack_vector
= hijack_source
.val
;
597 /* VIC errata, may also receive interrupt at this address */
598 hijack_vector
= (__u32
*)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI
+ VIC_DEFAULT_CPI_BASE
)*4);
599 *hijack_vector
= hijack_source
.val
;
601 /* All non-boot CPUs start with interrupts fully masked. Need
602 * to lower the mask of the CPI we're about to send. We do
603 * this in the VIC by masquerading as the processor we're
604 * about to boot and lowering its interrupt mask */
605 local_irq_save(flags
);
607 send_one_QIC_CPI(cpu
, VIC_CPU_BOOT_CPI
);
609 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
, VIC_PROCESSOR_ID
);
610 /* here we're altering registers belonging to `cpu' */
612 outb(VIC_BOOT_INTERRUPT_MASK
, 0x21);
613 /* now go back to our original identity */
614 outb(boot_cpu_id
, VIC_PROCESSOR_ID
);
616 /* and boot the CPU */
618 send_CPI((1<<cpu
), VIC_CPU_BOOT_CPI
);
621 local_irq_restore(flags
);
623 /* now wait for it to become ready (or timeout) */
624 for(timeout
= 0; timeout
< 50000; timeout
++) {
629 /* reset the page table */
632 if (cpu_booted_map
) {
633 VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
634 cpu
, smp_processor_id()));
636 printk("CPU%d: ", cpu
);
637 print_cpu_info(&cpu_data(cpu
));
639 cpu_set(cpu
, cpu_callout_map
);
640 cpu_set(cpu
, cpu_present_map
);
643 printk("CPU%d FAILED TO BOOT: ", cpu
);
644 if (*((volatile unsigned char *)phys_to_virt(start_phys_address
))==0xA5)
647 printk("Not responding.\n");
658 /* CAT BUS initialisation must be done after the memory */
659 /* FIXME: The L4 has a catbus too, it just needs to be
660 * accessed in a totally different way */
661 if(voyager_level
== 5) {
664 /* now that the cat has probed the Voyager System Bus, sanity
665 * check the cpu map */
666 if( ((voyager_quad_processors
| voyager_extended_vic_processors
)
667 & cpus_addr(phys_cpu_present_map
)[0]) != cpus_addr(phys_cpu_present_map
)[0]) {
669 printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");
671 } else if(voyager_level
== 4)
672 voyager_extended_vic_processors
= cpus_addr(phys_cpu_present_map
)[0];
674 /* this sets up the idle task to run on the current cpu */
675 voyager_extended_cpus
= 1;
676 /* Remove the global_irq_holder setting, it triggers a BUG() on
677 * schedule at the moment */
678 //global_irq_holder = boot_cpu_id;
680 /* FIXME: Need to do something about this but currently only works
681 * on CPUs with a tsc which none of mine have.
682 smp_tune_scheduling();
684 smp_store_cpu_info(boot_cpu_id
);
685 printk("CPU%d: ", boot_cpu_id
);
686 print_cpu_info(&cpu_data(boot_cpu_id
));
689 /* booting on a Quad CPU */
690 printk("VOYAGER SMP: Boot CPU is Quad\n");
695 /* enable our own CPIs */
698 cpu_set(boot_cpu_id
, cpu_online_map
);
699 cpu_set(boot_cpu_id
, cpu_callout_map
);
701 /* loop over all the extended VIC CPUs and boot them. The
702 * Quad CPUs must be bootstrapped by their extended VIC cpu */
703 for(i
= 0; i
< NR_CPUS
; i
++) {
704 if(i
== boot_cpu_id
|| !cpu_isset(i
, phys_cpu_present_map
))
707 /* This udelay seems to be needed for the Quad boots
708 * don't remove unless you know what you're doing */
711 /* we could compute the total bogomips here, but why bother?,
712 * Code added from smpboot.c */
714 unsigned long bogosum
= 0;
715 for (i
= 0; i
< NR_CPUS
; i
++)
716 if (cpu_isset(i
, cpu_online_map
))
717 bogosum
+= cpu_data(i
).loops_per_jiffy
;
718 printk(KERN_INFO
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
721 (bogosum
/(5000/HZ
))%100);
723 voyager_extended_cpus
= hweight32(voyager_extended_vic_processors
);
724 printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus
, num_booting_cpus() - voyager_extended_cpus
);
725 /* that's it, switch to symmetric mode */
726 outb(0, VIC_PRIORITY_REGISTER
);
727 outb(0, VIC_CLAIM_REGISTER_0
);
728 outb(0, VIC_CLAIM_REGISTER_1
);
730 VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
733 /* Reload the secondary CPUs task structure (this function does not
736 initialize_secondary(void)
740 set_current(hard_get_current());
744 * We don't actually need to load the full TSS,
745 * basically just the stack pointer and the eip.
752 :"r" (current
->thread
.esp
),"r" (current
->thread
.eip
));
755 /* handle a Voyager SYS_INT -- If we don't, the base board will
758 * System interrupts occur because some problem was detected on the
759 * various busses. To find out what you have to probe all the
760 * hardware via the CAT bus. FIXME: At the moment we do nothing. */
762 smp_vic_sys_interrupt(struct pt_regs
*regs
)
764 ack_CPI(VIC_SYS_INT
);
765 printk("Voyager SYSTEM INTERRUPT\n");
768 /* Handle a voyager CMN_INT; These interrupts occur either because of
769 * a system status change or because a single bit memory error
770 * occurred. FIXME: At the moment, ignore all this. */
772 smp_vic_cmn_interrupt(struct pt_regs
*regs
)
774 static __u8 in_cmn_int
= 0;
775 static DEFINE_SPINLOCK(cmn_int_lock
);
777 /* common ints are broadcast, so make sure we only do this once */
778 _raw_spin_lock(&cmn_int_lock
);
783 _raw_spin_unlock(&cmn_int_lock
);
785 VDEBUG(("Voyager COMMON INTERRUPT\n"));
787 if(voyager_level
== 5)
788 voyager_cat_do_common_interrupt();
790 _raw_spin_lock(&cmn_int_lock
);
793 _raw_spin_unlock(&cmn_int_lock
);
794 ack_CPI(VIC_CMN_INT
);
798 * Reschedule call back. Nothing to do, all the work is done
799 * automatically when we return from the interrupt. */
801 smp_reschedule_interrupt(void)
806 static struct mm_struct
* flush_mm
;
807 static unsigned long flush_va
;
808 static DEFINE_SPINLOCK(tlbstate_lock
);
809 #define FLUSH_ALL 0xffffffff
812 * We cannot call mmdrop() because we are in interrupt context,
813 * instead update mm->cpu_vm_mask.
815 * We need to reload %cr3 since the page tables may be going
816 * away from under us..
819 leave_mm (unsigned long cpu
)
821 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
)
823 cpu_clear(cpu
, per_cpu(cpu_tlbstate
, cpu
).active_mm
->cpu_vm_mask
);
824 load_cr3(swapper_pg_dir
);
829 * Invalidate call-back
832 smp_invalidate_interrupt(void)
834 __u8 cpu
= smp_processor_id();
836 if (!test_bit(cpu
, &smp_invalidate_needed
))
838 /* This will flood messages. Don't uncomment unless you see
839 * Problems with cross cpu invalidation
840 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
841 smp_processor_id()));
844 if (flush_mm
== per_cpu(cpu_tlbstate
, cpu
).active_mm
) {
845 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
) {
846 if (flush_va
== FLUSH_ALL
)
849 __flush_tlb_one(flush_va
);
853 smp_mb__before_clear_bit();
854 clear_bit(cpu
, &smp_invalidate_needed
);
855 smp_mb__after_clear_bit();
858 /* All the new flush operations for 2.4 */
861 /* This routine is called with a physical cpu mask */
863 voyager_flush_tlb_others (unsigned long cpumask
, struct mm_struct
*mm
,
870 if ((cpumask
& cpus_addr(cpu_online_map
)[0]) != cpumask
)
872 if (cpumask
& (1 << smp_processor_id()))
877 spin_lock(&tlbstate_lock
);
881 atomic_set_mask(cpumask
, &smp_invalidate_needed
);
883 * We have to send the CPI only to
886 send_CPI(cpumask
, VIC_INVALIDATE_CPI
);
888 while (smp_invalidate_needed
) {
891 printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id());
896 /* Uncomment only to debug invalidation problems
897 VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
902 spin_unlock(&tlbstate_lock
);
906 flush_tlb_current_task(void)
908 struct mm_struct
*mm
= current
->mm
;
909 unsigned long cpu_mask
;
913 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
916 voyager_flush_tlb_others(cpu_mask
, mm
, FLUSH_ALL
);
923 flush_tlb_mm (struct mm_struct
* mm
)
925 unsigned long cpu_mask
;
929 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
931 if (current
->active_mm
== mm
) {
935 leave_mm(smp_processor_id());
938 voyager_flush_tlb_others(cpu_mask
, mm
, FLUSH_ALL
);
943 void flush_tlb_page(struct vm_area_struct
* vma
, unsigned long va
)
945 struct mm_struct
*mm
= vma
->vm_mm
;
946 unsigned long cpu_mask
;
950 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
951 if (current
->active_mm
== mm
) {
955 leave_mm(smp_processor_id());
959 voyager_flush_tlb_others(cpu_mask
, mm
, va
);
963 EXPORT_SYMBOL(flush_tlb_page
);
965 /* enable the requested IRQs */
967 smp_enable_irq_interrupt(void)
970 __u8 cpu
= get_cpu();
972 VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu
,
973 vic_irq_enable_mask
[cpu
]));
975 spin_lock(&vic_irq_lock
);
976 for(irq
= 0; irq
< 16; irq
++) {
977 if(vic_irq_enable_mask
[cpu
] & (1<<irq
))
978 enable_local_vic_irq(irq
);
980 vic_irq_enable_mask
[cpu
] = 0;
981 spin_unlock(&vic_irq_lock
);
983 put_cpu_no_resched();
990 smp_stop_cpu_function(void *dummy
)
992 VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
993 cpu_clear(smp_processor_id(), cpu_online_map
);
999 static DEFINE_SPINLOCK(call_lock
);
1001 struct call_data_struct
{
1002 void (*func
) (void *info
);
1004 volatile unsigned long started
;
1005 volatile unsigned long finished
;
1009 static struct call_data_struct
* call_data
;
1011 /* execute a thread on a new CPU. The function to be called must be
1012 * previously set up. This is used to schedule a function for
1013 * execution on all CPUs - set up the function then broadcast a
1014 * function_interrupt CPI to come here on each CPU */
1016 smp_call_function_interrupt(void)
1018 void (*func
) (void *info
) = call_data
->func
;
1019 void *info
= call_data
->info
;
1020 /* must take copy of wait because call_data may be replaced
1021 * unless the function is waiting for us to finish */
1022 int wait
= call_data
->wait
;
1023 __u8 cpu
= smp_processor_id();
1026 * Notify initiating CPU that I've grabbed the data and am
1027 * about to execute the function
1030 if(!test_and_clear_bit(cpu
, &call_data
->started
)) {
1031 /* If the bit wasn't set, this could be a replay */
1032 printk(KERN_WARNING
"VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu
);
1036 * At this point the info structure may be out of scope unless wait==1
1040 __get_cpu_var(irq_stat
).irq_call_count
++;
1044 clear_bit(cpu
, &call_data
->finished
);
1049 voyager_smp_call_function_mask (cpumask_t cpumask
,
1050 void (*func
) (void *info
), void *info
,
1053 struct call_data_struct data
;
1054 u32 mask
= cpus_addr(cpumask
)[0];
1056 mask
&= ~(1<<smp_processor_id());
1061 /* Can deadlock when called with interrupts disabled */
1062 WARN_ON(irqs_disabled());
1066 data
.started
= mask
;
1069 data
.finished
= mask
;
1071 spin_lock(&call_lock
);
1074 /* Send a message to all other CPUs and wait for them to respond */
1075 send_CPI(mask
, VIC_CALL_FUNCTION_CPI
);
1077 /* Wait for response */
1078 while (data
.started
)
1082 while (data
.finished
)
1085 spin_unlock(&call_lock
);
1090 /* Sorry about the name. In an APIC based system, the APICs
1091 * themselves are programmed to send a timer interrupt. This is used
1092 * by linux to reschedule the processor. Voyager doesn't have this,
1093 * so we use the system clock to interrupt one processor, which in
1094 * turn, broadcasts a timer CPI to all the others --- we receive that
1095 * CPI here. We don't use this actually for counting so losing
1096 * ticks doesn't matter
1098 * FIXME: For those CPUs which actually have a local APIC, we could
1099 * try to use it to trigger this interrupt instead of having to
1100 * broadcast the timer tick. Unfortunately, all my pentium DYADs have
1101 * no local APIC, so I can't do this
1103 * This function is currently a placeholder and is unused in the code */
1105 smp_apic_timer_interrupt(struct pt_regs
*regs
)
1107 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1108 wrapper_smp_local_timer_interrupt();
1109 set_irq_regs(old_regs
);
1112 /* All of the QUAD interrupt GATES */
1114 smp_qic_timer_interrupt(struct pt_regs
*regs
)
1116 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1117 ack_QIC_CPI(QIC_TIMER_CPI
);
1118 wrapper_smp_local_timer_interrupt();
1119 set_irq_regs(old_regs
);
1123 smp_qic_invalidate_interrupt(struct pt_regs
*regs
)
1125 ack_QIC_CPI(QIC_INVALIDATE_CPI
);
1126 smp_invalidate_interrupt();
1130 smp_qic_reschedule_interrupt(struct pt_regs
*regs
)
1132 ack_QIC_CPI(QIC_RESCHEDULE_CPI
);
1133 smp_reschedule_interrupt();
1137 smp_qic_enable_irq_interrupt(struct pt_regs
*regs
)
1139 ack_QIC_CPI(QIC_ENABLE_IRQ_CPI
);
1140 smp_enable_irq_interrupt();
1144 smp_qic_call_function_interrupt(struct pt_regs
*regs
)
1146 ack_QIC_CPI(QIC_CALL_FUNCTION_CPI
);
1147 smp_call_function_interrupt();
1151 smp_vic_cpi_interrupt(struct pt_regs
*regs
)
1153 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1154 __u8 cpu
= smp_processor_id();
1157 ack_QIC_CPI(VIC_CPI_LEVEL0
);
1159 ack_VIC_CPI(VIC_CPI_LEVEL0
);
1161 if(test_and_clear_bit(VIC_TIMER_CPI
, &vic_cpi_mailbox
[cpu
]))
1162 wrapper_smp_local_timer_interrupt();
1163 if(test_and_clear_bit(VIC_INVALIDATE_CPI
, &vic_cpi_mailbox
[cpu
]))
1164 smp_invalidate_interrupt();
1165 if(test_and_clear_bit(VIC_RESCHEDULE_CPI
, &vic_cpi_mailbox
[cpu
]))
1166 smp_reschedule_interrupt();
1167 if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI
, &vic_cpi_mailbox
[cpu
]))
1168 smp_enable_irq_interrupt();
1169 if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI
, &vic_cpi_mailbox
[cpu
]))
1170 smp_call_function_interrupt();
1171 set_irq_regs(old_regs
);
1175 do_flush_tlb_all(void* info
)
1177 unsigned long cpu
= smp_processor_id();
1180 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_LAZY
)
1185 /* flush the TLB of every active CPU in the system */
1189 on_each_cpu(do_flush_tlb_all
, 0, 1, 1);
1192 /* used to set up the trampoline for other CPUs when the memory manager
1195 smp_alloc_memory(void)
1197 trampoline_base
= (__u32
)alloc_bootmem_low_pages(PAGE_SIZE
);
1198 if(__pa(trampoline_base
) >= 0x93000)
1202 /* send a reschedule CPI to one CPU by physical CPU number*/
1204 voyager_smp_send_reschedule(int cpu
)
1206 send_one_CPI(cpu
, VIC_RESCHEDULE_CPI
);
1211 hard_smp_processor_id(void)
1214 __u8 cpumask
= inb(VIC_PROC_WHO_AM_I
);
1215 if((cpumask
& QUAD_IDENTIFIER
) == QUAD_IDENTIFIER
)
1216 return cpumask
& 0x1F;
1218 for(i
= 0; i
< 8; i
++) {
1219 if(cpumask
& (1<<i
))
1222 printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask
);
1227 safe_smp_processor_id(void)
1229 return hard_smp_processor_id();
1232 /* broadcast a halt to all other CPUs */
1234 voyager_smp_send_stop(void)
1236 smp_call_function(smp_stop_cpu_function
, NULL
, 1, 1);
1239 /* this function is triggered in time.c when a clock tick fires
1240 * we need to re-broadcast the tick to all CPUs */
1242 smp_vic_timer_interrupt(void)
1244 send_CPI_allbutself(VIC_TIMER_CPI
);
1245 smp_local_timer_interrupt();
1248 /* local (per CPU) timer interrupt. It does both profiling and
1249 * process statistics/rescheduling.
1251 * We do profiling in every local tick, statistics/rescheduling
1252 * happen only every 'profiling multiplier' ticks. The default
1253 * multiplier is 1 and it can be changed by writing the new multiplier
1254 * value into /proc/profile.
1257 smp_local_timer_interrupt(void)
1259 int cpu
= smp_processor_id();
1262 profile_tick(CPU_PROFILING
);
1263 if (--per_cpu(prof_counter
, cpu
) <= 0) {
1265 * The multiplier may have changed since the last time we got
1266 * to this point as a result of the user writing to
1267 * /proc/profile. In this case we need to adjust the APIC
1268 * timer accordingly.
1270 * Interrupts are already masked off at this point.
1272 per_cpu(prof_counter
,cpu
) = per_cpu(prof_multiplier
, cpu
);
1273 if (per_cpu(prof_counter
, cpu
) !=
1274 per_cpu(prof_old_multiplier
, cpu
)) {
1275 /* FIXME: need to update the vic timer tick here */
1276 per_cpu(prof_old_multiplier
, cpu
) =
1277 per_cpu(prof_counter
, cpu
);
1280 update_process_times(user_mode_vm(get_irq_regs()));
1283 if( ((1<<cpu
) & voyager_extended_vic_processors
) == 0)
1284 /* only extended VIC processors participate in
1285 * interrupt distribution */
1289 * We take the 'long' return path, and there every subsystem
1290 * grabs the appropriate locks (kernel lock/ irq lock).
1292 * we might want to decouple profiling from the 'long path',
1293 * and do the profiling totally in assembly.
1295 * Currently this isn't too much of an issue (performance wise),
1296 * we can take more than 100K local irqs per second on a 100 MHz P5.
1299 if((++vic_tick
[cpu
] & 0x7) != 0)
1301 /* get here every 16 ticks (about every 1/6 of a second) */
1303 /* Change our priority to give someone else a chance at getting
1304 * the IRQ. The algorithm goes like this:
1306 * In the VIC, the dynamically routed interrupt is always
1307 * handled by the lowest priority eligible (i.e. receiving
1308 * interrupts) CPU. If >1 eligible CPUs are equal lowest, the
1309 * lowest processor number gets it.
1311 * The priority of a CPU is controlled by a special per-CPU
1312 * VIC priority register which is 3 bits wide 0 being lowest
1313 * and 7 highest priority..
1315 * Therefore we subtract the average number of interrupts from
1316 * the number we've fielded. If this number is negative, we
1317 * lower the activity count and if it is positive, we raise
1320 * I'm afraid this still leads to odd looking interrupt counts:
1321 * the totals are all roughly equal, but the individual ones
1322 * look rather skewed.
1324 * FIXME: This algorithm is total crap when mixed with SMP
1325 * affinity code since we now try to even up the interrupt
1326 * counts when an affinity binding is keeping them on a
1328 weight
= (vic_intr_count
[cpu
]*voyager_extended_cpus
1329 - vic_intr_total
) >> 4;
1336 outb((__u8
)weight
, VIC_PRIORITY_REGISTER
);
1338 #ifdef VOYAGER_DEBUG
1339 if((vic_tick
[cpu
] & 0xFFF) == 0) {
1340 /* print this message roughly every 25 secs */
1341 printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
1342 cpu
, vic_tick
[cpu
], weight
);
1347 /* setup the profiling timer */
1349 setup_profiling_timer(unsigned int multiplier
)
1357 * Set the new multiplier for each CPU. CPUs don't start using the
1358 * new values until the next timer interrupt in which they do process
1361 for (i
= 0; i
< NR_CPUS
; ++i
)
1362 per_cpu(prof_multiplier
, i
) = multiplier
;
1367 /* This is a bit of a mess, but forced on us by the genirq changes
1368 * there's no genirq handler that really does what voyager wants
1369 * so hack it up with the simple IRQ handler */
1370 static void fastcall
1371 handle_vic_irq(unsigned int irq
, struct irq_desc
*desc
)
1373 before_handle_vic_irq(irq
);
1374 handle_simple_irq(irq
, desc
);
1375 after_handle_vic_irq(irq
);
1379 /* The CPIs are handled in the per cpu 8259s, so they must be
1380 * enabled to be received: FIX: enabling the CPIs in the early
1381 * boot sequence interferes with bug checking; enable them later
1383 #define VIC_SET_GATE(cpi, vector) \
1384 set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))
1385 #define QIC_SET_GATE(cpi, vector) \
1386 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1393 /* initialize the per cpu irq mask to all disabled */
1394 for(i
= 0; i
< NR_CPUS
; i
++)
1395 vic_irq_mask
[i
] = 0xFFFF;
1397 VIC_SET_GATE(VIC_CPI_LEVEL0
, vic_cpi_interrupt
);
1399 VIC_SET_GATE(VIC_SYS_INT
, vic_sys_interrupt
);
1400 VIC_SET_GATE(VIC_CMN_INT
, vic_cmn_interrupt
);
1402 QIC_SET_GATE(QIC_TIMER_CPI
, qic_timer_interrupt
);
1403 QIC_SET_GATE(QIC_INVALIDATE_CPI
, qic_invalidate_interrupt
);
1404 QIC_SET_GATE(QIC_RESCHEDULE_CPI
, qic_reschedule_interrupt
);
1405 QIC_SET_GATE(QIC_ENABLE_IRQ_CPI
, qic_enable_irq_interrupt
);
1406 QIC_SET_GATE(QIC_CALL_FUNCTION_CPI
, qic_call_function_interrupt
);
1409 /* now put the VIC descriptor into the first 48 IRQs
1411 * This is for later: first 16 correspond to PC IRQs; next 16
1412 * are Primary MC IRQs and final 16 are Secondary MC IRQs */
1413 for(i
= 0; i
< 48; i
++)
1414 set_irq_chip_and_handler(i
, &vic_chip
, handle_vic_irq
);
1417 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
1418 * processor to receive CPI */
1420 send_CPI(__u32 cpuset
, __u8 cpi
)
1423 __u32 quad_cpuset
= (cpuset
& voyager_quad_processors
);
1425 if(cpi
< VIC_START_FAKE_CPI
) {
1426 /* fake CPI are only used for booting, so send to the
1427 * extended quads as well---Quads must be VIC booted */
1428 outb((__u8
)(cpuset
), VIC_CPI_Registers
[cpi
]);
1432 send_QIC_CPI(quad_cpuset
, cpi
);
1433 cpuset
&= ~quad_cpuset
;
1434 cpuset
&= 0xff; /* only first 8 CPUs vaild for VIC CPI */
1437 for_each_online_cpu(cpu
) {
1438 if(cpuset
& (1<<cpu
))
1439 set_bit(cpi
, &vic_cpi_mailbox
[cpu
]);
1442 outb((__u8
)cpuset
, VIC_CPI_Registers
[VIC_CPI_LEVEL0
]);
1445 /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
1446 * set the cache line to shared by reading it.
1448 * DON'T make this inline otherwise the cache line read will be
1452 ack_QIC_CPI(__u8 cpi
) {
1453 __u8 cpu
= hard_smp_processor_id();
1457 outb(1<<cpi
, QIC_INTERRUPT_CLEAR1
);
1458 return voyager_quad_cpi_addr
[cpu
]->qic_cpi
[cpi
].cpi
;
1462 ack_special_QIC_CPI(__u8 cpi
)
1466 outb(QIC_CMN_INT
, QIC_INTERRUPT_CLEAR0
);
1469 outb(QIC_SYS_INT
, QIC_INTERRUPT_CLEAR0
);
1472 /* also clear at the VIC, just in case (nop for non-extended proc) */
1476 /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
1478 ack_VIC_CPI(__u8 cpi
)
1480 #ifdef VOYAGER_DEBUG
1481 unsigned long flags
;
1483 __u8 cpu
= smp_processor_id();
1485 local_irq_save(flags
);
1486 isr
= vic_read_isr();
1487 if((isr
& (1<<(cpi
&7))) == 0) {
1488 printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu
, cpi
);
1491 /* send specific EOI; the two system interrupts have
1492 * bit 4 set for a separate vector but behave as the
1493 * corresponding 3 bit intr */
1494 outb_p(0x60|(cpi
& 7),0x20);
1496 #ifdef VOYAGER_DEBUG
1497 if((vic_read_isr() & (1<<(cpi
&7))) != 0) {
1498 printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu
, cpi
);
1500 local_irq_restore(flags
);
1504 /* cribbed with thanks from irq.c */
1505 #define __byte(x,y) (((unsigned char *)&(y))[x])
1506 #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))
1507 #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))
1510 startup_vic_irq(unsigned int irq
)
1512 unmask_vic_irq(irq
);
1517 /* The enable and disable routines. This is where we run into
1518 * conflicting architectural philosophy. Fundamentally, the voyager
1519 * architecture does not expect to have to disable interrupts globally
1520 * (the IRQ controllers belong to each CPU). The processor masquerade
1521 * which is used to start the system shouldn't be used in a running OS
1522 * since it will cause great confusion if two separate CPUs drive to
1523 * the same IRQ controller (I know, I've tried it).
1525 * The solution is a variant on the NCR lazy SPL design:
1527 * 1) To disable an interrupt, do nothing (other than set the
1528 * IRQ_DISABLED flag). This dares the interrupt actually to arrive.
1530 * 2) If the interrupt dares to come in, raise the local mask against
1531 * it (this will result in all the CPU masks being raised
1534 * 3) To enable the interrupt, lower the mask on the local CPU and
1535 * broadcast an Interrupt enable CPI which causes all other CPUs to
1536 * adjust their masks accordingly. */
1539 unmask_vic_irq(unsigned int irq
)
1541 /* linux doesn't to processor-irq affinity, so enable on
1542 * all CPUs we know about */
1543 int cpu
= smp_processor_id(), real_cpu
;
1544 __u16 mask
= (1<<irq
);
1545 __u32 processorList
= 0;
1546 unsigned long flags
;
1548 VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
1549 irq
, cpu
, cpu_irq_affinity
[cpu
]));
1550 spin_lock_irqsave(&vic_irq_lock
, flags
);
1551 for_each_online_cpu(real_cpu
) {
1552 if(!(voyager_extended_vic_processors
& (1<<real_cpu
)))
1554 if(!(cpu_irq_affinity
[real_cpu
] & mask
)) {
1555 /* irq has no affinity for this CPU, ignore */
1558 if(real_cpu
== cpu
) {
1559 enable_local_vic_irq(irq
);
1561 else if(vic_irq_mask
[real_cpu
] & mask
) {
1562 vic_irq_enable_mask
[real_cpu
] |= mask
;
1563 processorList
|= (1<<real_cpu
);
1566 spin_unlock_irqrestore(&vic_irq_lock
, flags
);
1568 send_CPI(processorList
, VIC_ENABLE_IRQ_CPI
);
1572 mask_vic_irq(unsigned int irq
)
1574 /* lazy disable, do nothing */
1578 enable_local_vic_irq(unsigned int irq
)
1580 __u8 cpu
= smp_processor_id();
1581 __u16 mask
= ~(1 << irq
);
1582 __u16 old_mask
= vic_irq_mask
[cpu
];
1584 vic_irq_mask
[cpu
] &= mask
;
1585 if(vic_irq_mask
[cpu
] == old_mask
)
1588 VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
1592 outb_p(cached_A1(cpu
),0xA1);
1596 outb_p(cached_21(cpu
),0x21);
1602 disable_local_vic_irq(unsigned int irq
)
1604 __u8 cpu
= smp_processor_id();
1605 __u16 mask
= (1 << irq
);
1606 __u16 old_mask
= vic_irq_mask
[cpu
];
1611 vic_irq_mask
[cpu
] |= mask
;
1612 if(old_mask
== vic_irq_mask
[cpu
])
1615 VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
1619 outb_p(cached_A1(cpu
),0xA1);
1623 outb_p(cached_21(cpu
),0x21);
1628 /* The VIC is level triggered, so the ack can only be issued after the
1629 * interrupt completes. However, we do Voyager lazy interrupt
1630 * handling here: It is an extremely expensive operation to mask an
1631 * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If
1632 * this interrupt actually comes in, then we mask and ack here to push
1633 * the interrupt off to another CPU */
1635 before_handle_vic_irq(unsigned int irq
)
1637 irq_desc_t
*desc
= irq_desc
+ irq
;
1638 __u8 cpu
= smp_processor_id();
1640 _raw_spin_lock(&vic_irq_lock
);
1642 vic_intr_count
[cpu
]++;
1644 if(!(cpu_irq_affinity
[cpu
] & (1<<irq
))) {
1645 /* The irq is not in our affinity mask, push it off
1646 * onto another CPU */
1647 VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n",
1649 disable_local_vic_irq(irq
);
1650 /* set IRQ_INPROGRESS to prevent the handler in irq.c from
1651 * actually calling the interrupt routine */
1652 desc
->status
|= IRQ_REPLAY
| IRQ_INPROGRESS
;
1653 } else if(desc
->status
& IRQ_DISABLED
) {
1654 /* Damn, the interrupt actually arrived, do the lazy
1655 * disable thing. The interrupt routine in irq.c will
1656 * not handle a IRQ_DISABLED interrupt, so nothing more
1657 * need be done here */
1658 VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n",
1660 disable_local_vic_irq(irq
);
1661 desc
->status
|= IRQ_REPLAY
;
1663 desc
->status
&= ~IRQ_REPLAY
;
1666 _raw_spin_unlock(&vic_irq_lock
);
1669 /* Finish the VIC interrupt: basically mask */
1671 after_handle_vic_irq(unsigned int irq
)
1673 irq_desc_t
*desc
= irq_desc
+ irq
;
1675 _raw_spin_lock(&vic_irq_lock
);
1677 unsigned int status
= desc
->status
& ~IRQ_INPROGRESS
;
1678 #ifdef VOYAGER_DEBUG
1682 desc
->status
= status
;
1683 if ((status
& IRQ_DISABLED
))
1684 disable_local_vic_irq(irq
);
1685 #ifdef VOYAGER_DEBUG
1686 /* DEBUG: before we ack, check what's in progress */
1687 isr
= vic_read_isr();
1688 if((isr
& (1<<irq
) && !(status
& IRQ_REPLAY
)) == 0) {
1690 __u8 cpu
= smp_processor_id();
1692 int mask
; /* Um... initialize me??? --RR */
1694 printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
1696 for_each_possible_cpu(real_cpu
, mask
) {
1698 outb(VIC_CPU_MASQUERADE_ENABLE
| real_cpu
,
1700 isr
= vic_read_isr();
1701 if(isr
& (1<<irq
)) {
1702 printk("VOYAGER SMP: CPU%d ack irq %d\n",
1706 outb(cpu
, VIC_PROCESSOR_ID
);
1709 #endif /* VOYAGER_DEBUG */
1710 /* as soon as we ack, the interrupt is eligible for
1711 * receipt by another CPU so everything must be in
1714 if(status
& IRQ_REPLAY
) {
1715 /* replay is set if we disable the interrupt
1716 * in the before_handle_vic_irq() routine, so
1717 * clear the in progress bit here to allow the
1718 * next CPU to handle this correctly */
1719 desc
->status
&= ~(IRQ_REPLAY
| IRQ_INPROGRESS
);
1721 #ifdef VOYAGER_DEBUG
1722 isr
= vic_read_isr();
1723 if((isr
& (1<<irq
)) != 0)
1724 printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n",
1726 #endif /* VOYAGER_DEBUG */
1728 _raw_spin_unlock(&vic_irq_lock
);
1730 /* All code after this point is out of the main path - the IRQ
1731 * may be intercepted by another CPU if reasserted */
1735 /* Linux processor - interrupt affinity manipulations.
1737 * For each processor, we maintain a 32 bit irq affinity mask.
1738 * Initially it is set to all 1's so every processor accepts every
1739 * interrupt. In this call, we change the processor's affinity mask:
1741 * Change from enable to disable:
1743 * If the interrupt ever comes in to the processor, we will disable it
1744 * and ack it to push it off to another CPU, so just accept the mask here.
1746 * Change from disable to enable:
1748 * change the mask and then do an interrupt enable CPI to re-enable on
1749 * the selected processors */
1752 set_vic_irq_affinity(unsigned int irq
, cpumask_t mask
)
1754 /* Only extended processors handle interrupts */
1755 unsigned long real_mask
;
1756 unsigned long irq_mask
= 1 << irq
;
1759 real_mask
= cpus_addr(mask
)[0] & voyager_extended_vic_processors
;
1761 if(cpus_addr(mask
)[0] == 0)
1762 /* can't have no CPUs to accept the interrupt -- extremely
1763 * bad things will happen */
1767 /* can't change the affinity of the timer IRQ. This
1768 * is due to the constraint in the voyager
1769 * architecture that the CPI also comes in on and IRQ
1770 * line and we have chosen IRQ0 for this. If you
1771 * raise the mask on this interrupt, the processor
1772 * will no-longer be able to accept VIC CPIs */
1776 /* You can only have 32 interrupts in a voyager system
1777 * (and 32 only if you have a secondary microchannel
1781 for_each_online_cpu(cpu
) {
1782 unsigned long cpu_mask
= 1 << cpu
;
1784 if(cpu_mask
& real_mask
) {
1785 /* enable the interrupt for this cpu */
1786 cpu_irq_affinity
[cpu
] |= irq_mask
;
1788 /* disable the interrupt for this cpu */
1789 cpu_irq_affinity
[cpu
] &= ~irq_mask
;
1792 /* this is magic, we now have the correct affinity maps, so
1793 * enable the interrupt. This will send an enable CPI to
1794 * those CPUs who need to enable it in their local masks,
1795 * causing them to correct for the new affinity . If the
1796 * interrupt is currently globally disabled, it will simply be
1797 * disabled again as it comes in (voyager lazy disable). If
1798 * the affinity map is tightened to disable the interrupt on a
1799 * cpu, it will be pushed off when it comes in */
1800 unmask_vic_irq(irq
);
1804 ack_vic_irq(unsigned int irq
)
1807 outb(0x62,0x20); /* Specific EOI to cascade */
1808 outb(0x60|(irq
& 7),0xA0);
1810 outb(0x60 | (irq
& 7),0x20);
1814 /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259
1815 * but are not vectored by it. This means that the 8259 mask must be
1816 * lowered to receive them */
1818 vic_enable_cpi(void)
1820 __u8 cpu
= smp_processor_id();
1822 /* just take a copy of the current mask (nop for boot cpu) */
1823 vic_irq_mask
[cpu
] = vic_irq_mask
[boot_cpu_id
];
1825 enable_local_vic_irq(VIC_CPI_LEVEL0
);
1826 enable_local_vic_irq(VIC_CPI_LEVEL1
);
1827 /* for sys int and cmn int */
1828 enable_local_vic_irq(7);
1831 outb(QIC_DEFAULT_MASK0
, QIC_MASK_REGISTER0
);
1832 outb(QIC_CPI_ENABLE
, QIC_MASK_REGISTER1
);
1833 VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
1834 cpu
, QIC_CPI_ENABLE
));
1837 VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n",
1838 cpu
, vic_irq_mask
[cpu
]));
1844 int old_cpu
= smp_processor_id(), cpu
;
1846 /* dump the interrupt masks of each processor */
1847 for_each_online_cpu(cpu
) {
1848 __u16 imr
, isr
, irr
;
1849 unsigned long flags
;
1851 local_irq_save(flags
);
1852 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
, VIC_PROCESSOR_ID
);
1853 imr
= (inb(0xa1) << 8) | inb(0x21);
1855 irr
= inb(0xa0) << 8;
1859 isr
= inb(0xa0) << 8;
1862 outb(old_cpu
, VIC_PROCESSOR_ID
);
1863 local_irq_restore(flags
);
1864 printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n",
1865 cpu
, vic_irq_mask
[cpu
], imr
, irr
, isr
);
1867 /* These lines are put in to try to unstick an un ack'd irq */
1870 for(irq
=0; irq
<16; irq
++) {
1871 if(isr
& (1<<irq
)) {
1872 printk("\tCPU%d: ack irq %d\n",
1874 local_irq_save(flags
);
1875 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
,
1878 outb(old_cpu
, VIC_PROCESSOR_ID
);
1879 local_irq_restore(flags
);
1888 smp_voyager_power_off(void *dummy
)
1890 if(smp_processor_id() == boot_cpu_id
)
1891 voyager_power_off();
1893 smp_stop_cpu_function(NULL
);
1897 voyager_smp_prepare_cpus(unsigned int max_cpus
)
1899 /* FIXME: ignore max_cpus for now */
1903 static void __cpuinit
voyager_smp_prepare_boot_cpu(void)
1905 init_gdt(smp_processor_id());
1906 switch_to_new_gdt();
1908 cpu_set(smp_processor_id(), cpu_online_map
);
1909 cpu_set(smp_processor_id(), cpu_callout_map
);
1910 cpu_set(smp_processor_id(), cpu_possible_map
);
1911 cpu_set(smp_processor_id(), cpu_present_map
);
1914 static int __cpuinit
1915 voyager_cpu_up(unsigned int cpu
)
1917 /* This only works at boot for x86. See "rewrite" above. */
1918 if (cpu_isset(cpu
, smp_commenced_mask
))
1921 /* In case one didn't come up */
1922 if (!cpu_isset(cpu
, cpu_callin_map
))
1924 /* Unleash the CPU! */
1925 cpu_set(cpu
, smp_commenced_mask
);
1926 while (!cpu_isset(cpu
, cpu_online_map
))
1932 voyager_smp_cpus_done(unsigned int max_cpus
)
1938 smp_setup_processor_id(void)
1940 current_thread_info()->cpu
= hard_smp_processor_id();
1941 x86_write_percpu(cpu_number
, hard_smp_processor_id());
1944 struct smp_ops smp_ops
= {
1945 .smp_prepare_boot_cpu
= voyager_smp_prepare_boot_cpu
,
1946 .smp_prepare_cpus
= voyager_smp_prepare_cpus
,
1947 .cpu_up
= voyager_cpu_up
,
1948 .smp_cpus_done
= voyager_smp_cpus_done
,
1950 .smp_send_stop
= voyager_smp_send_stop
,
1951 .smp_send_reschedule
= voyager_smp_send_reschedule
,
1952 .smp_call_function_mask
= voyager_smp_call_function_mask
,