4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
7 * Lots of stuff stolen from arch/alpha/kernel/smp.c
9 * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized
10 * the existing code (on the lines of x86 port).
11 * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
12 * calibration on each CPU.
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
14 * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor
15 * & cpu_online_map now gets done here (instead of setup.c)
16 * 99/10/05 davidm Update to bring it in sync with new command-line processing
18 * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
19 * smp_call_function_single to resend IPI on timeouts
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/kernel_stat.h>
29 #include <linux/cache.h>
30 #include <linux/delay.h>
31 #include <linux/efi.h>
32 #include <linux/bitops.h>
33 #include <linux/kexec.h>
35 #include <linux/atomic.h>
36 #include <asm/current.h>
37 #include <asm/delay.h>
38 #include <asm/machvec.h>
42 #include <asm/pgalloc.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/ptrace.h>
47 #include <asm/tlbflush.h>
48 #include <asm/unistd.h>
52 * Note: alignment of 4 entries/cacheline was empirically determined
53 * to be a good tradeoff between hot cachelines & spreading the array
54 * across too many cacheline.
56 static struct local_tlb_flush_counts
{
58 } __attribute__((__aligned__(32))) local_tlb_flush_counts
[NR_CPUS
];
60 static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS
],
63 #define IPI_CALL_FUNC 0
64 #define IPI_CPU_STOP 1
65 #define IPI_CALL_FUNC_SINGLE 2
66 #define IPI_KDUMP_CPU_STOP 3
68 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
69 static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation
);
71 extern void cpu_halt (void);
79 set_cpu_online(smp_processor_id(), false);
91 /* Should never be here */
97 handle_IPI (int irq
, void *dev_id
)
99 int this_cpu
= get_cpu();
100 unsigned long *pending_ipis
= &__ia64_per_cpu_var(ipi_operation
);
103 mb(); /* Order interrupt and bit testing. */
104 while ((ops
= xchg(pending_ipis
, 0)) != 0) {
105 mb(); /* Order bit clearing and data access. */
110 ops
&= ~(1 << which
);
117 generic_smp_call_function_interrupt();
119 case IPI_CALL_FUNC_SINGLE
:
120 generic_smp_call_function_single_interrupt();
123 case IPI_KDUMP_CPU_STOP
:
124 unw_init_running(kdump_cpu_freeze
, NULL
);
128 printk(KERN_CRIT
"Unknown IPI on CPU %d: %lu\n",
133 mb(); /* Order data access and bit testing. */
142 * Called with preemption disabled.
145 send_IPI_single (int dest_cpu
, int op
)
147 set_bit(op
, &per_cpu(ipi_operation
, dest_cpu
));
148 platform_send_ipi(dest_cpu
, IA64_IPI_VECTOR
, IA64_IPI_DM_INT
, 0);
152 * Called with preemption disabled.
155 send_IPI_allbutself (int op
)
159 for_each_online_cpu(i
) {
160 if (i
!= smp_processor_id())
161 send_IPI_single(i
, op
);
166 * Called with preemption disabled.
169 send_IPI_mask(const struct cpumask
*mask
, int op
)
173 for_each_cpu(cpu
, mask
) {
174 send_IPI_single(cpu
, op
);
179 * Called with preemption disabled.
182 send_IPI_all (int op
)
186 for_each_online_cpu(i
) {
187 send_IPI_single(i
, op
);
192 * Called with preemption disabled.
195 send_IPI_self (int op
)
197 send_IPI_single(smp_processor_id(), op
);
202 kdump_smp_send_stop(void)
204 send_IPI_allbutself(IPI_KDUMP_CPU_STOP
);
208 kdump_smp_send_init(void)
210 unsigned int cpu
, self_cpu
;
211 self_cpu
= smp_processor_id();
212 for_each_online_cpu(cpu
) {
213 if (cpu
!= self_cpu
) {
214 if(kdump_status
[cpu
] == 0)
215 platform_send_ipi(cpu
, 0, IA64_IPI_DM_INIT
, 0);
221 * Called with preemption disabled.
224 smp_send_reschedule (int cpu
)
226 platform_send_ipi(cpu
, IA64_IPI_RESCHEDULE
, IA64_IPI_DM_INT
, 0);
228 EXPORT_SYMBOL_GPL(smp_send_reschedule
);
231 * Called with preemption disabled.
234 smp_send_local_flush_tlb (int cpu
)
236 platform_send_ipi(cpu
, IA64_IPI_LOCAL_TLB_FLUSH
, IA64_IPI_DM_INT
, 0);
240 smp_local_flush_tlb(void)
243 * Use atomic ops. Otherwise, the load/increment/store sequence from
244 * a "++" operation can have the line stolen between the load & store.
245 * The overhead of the atomic op in negligible in this case & offers
246 * significant benefit for the brief periods where lots of cpus
247 * are simultaneously flushing TLBs.
249 ia64_fetchadd(1, &local_tlb_flush_counts
[smp_processor_id()].count
, acq
);
250 local_flush_tlb_all();
253 #define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
256 smp_flush_tlb_cpumask(cpumask_t xcpumask
)
258 unsigned short *counts
= __ia64_per_cpu_var(shadow_flush_counts
);
259 cpumask_t cpumask
= xcpumask
;
260 int mycpu
, cpu
, flush_mycpu
= 0;
263 mycpu
= smp_processor_id();
265 for_each_cpu_mask(cpu
, cpumask
)
266 counts
[cpu
] = local_tlb_flush_counts
[cpu
].count
& 0xffff;
269 for_each_cpu_mask(cpu
, cpumask
) {
273 smp_send_local_flush_tlb(cpu
);
277 smp_local_flush_tlb();
279 for_each_cpu_mask(cpu
, cpumask
)
280 while(counts
[cpu
] == (local_tlb_flush_counts
[cpu
].count
& 0xffff))
287 smp_flush_tlb_all (void)
289 on_each_cpu((void (*)(void *))local_flush_tlb_all
, NULL
, 1);
293 smp_flush_tlb_mm (struct mm_struct
*mm
)
297 /* this happens for the common case of a single-threaded fork(): */
298 if (likely(mm
== current
->active_mm
&& atomic_read(&mm
->mm_users
) == 1))
300 local_finish_flush_tlb_mm(mm
);
304 if (!alloc_cpumask_var(&cpus
, GFP_ATOMIC
)) {
305 smp_call_function((void (*)(void *))local_finish_flush_tlb_mm
,
308 cpumask_copy(cpus
, mm_cpumask(mm
));
309 smp_call_function_many(cpus
,
310 (void (*)(void *))local_finish_flush_tlb_mm
, mm
, 1);
311 free_cpumask_var(cpus
);
314 local_finish_flush_tlb_mm(mm
);
319 void arch_send_call_function_single_ipi(int cpu
)
321 send_IPI_single(cpu
, IPI_CALL_FUNC_SINGLE
);
324 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
326 send_IPI_mask(mask
, IPI_CALL_FUNC
);
330 * this function calls the 'stop' function on all other CPUs in the system.
335 send_IPI_allbutself(IPI_CPU_STOP
);
339 setup_profiling_timer (unsigned int multiplier
)