vmalloc: walk vmap_areas by sorted list instead of rb_next()
[linux/fpc-iii.git] / arch / mn10300 / kernel / smp.c
blobe62c223e4c4594c3c06c762352d8e61a975e2d35
1 /* SMP support routines.
3 * Copyright (C) 2006-2008 Panasonic Corporation
4 * All Rights Reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/interrupt.h>
17 #include <linux/spinlock.h>
18 #include <linux/init.h>
19 #include <linux/jiffies.h>
20 #include <linux/cpumask.h>
21 #include <linux/err.h>
22 #include <linux/kernel.h>
23 #include <linux/delay.h>
24 #include <linux/sched.h>
25 #include <linux/profile.h>
26 #include <linux/smp.h>
27 #include <linux/cpu.h>
28 #include <asm/tlbflush.h>
29 #include <asm/bitops.h>
30 #include <asm/processor.h>
31 #include <asm/bug.h>
32 #include <asm/exceptions.h>
33 #include <asm/hardirq.h>
34 #include <asm/fpu.h>
35 #include <asm/mmu_context.h>
36 #include <asm/thread_info.h>
37 #include <asm/cpu-regs.h>
38 #include <asm/intctl-regs.h>
39 #include "internal.h"
41 #ifdef CONFIG_HOTPLUG_CPU
42 #include <asm/cacheflush.h>
44 static unsigned long sleep_mode[NR_CPUS];
46 static void run_sleep_cpu(unsigned int cpu);
47 static void run_wakeup_cpu(unsigned int cpu);
48 #endif /* CONFIG_HOTPLUG_CPU */
51 * Debug Message function
54 #undef DEBUG_SMP
55 #ifdef DEBUG_SMP
56 #define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
57 #else
58 #define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
59 #endif
61 /* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
62 #define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
65 * Structure and data for smp_nmi_call_function().
67 struct nmi_call_data_struct {
68 smp_call_func_t func;
69 void *info;
70 cpumask_t started;
71 cpumask_t finished;
72 int wait;
73 char size_alignment[0]
74 __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
75 } __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
77 static DEFINE_SPINLOCK(smp_nmi_call_lock);
78 static struct nmi_call_data_struct *nmi_call_data;
81 * Data structures and variables
83 static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */
84 static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */
85 cpumask_t cpu_boot_map; /* Bitmask of boot APs */
86 unsigned long start_stack[NR_CPUS - 1];
89 * Per CPU parameters
91 struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
93 static int cpucount; /* The count of boot CPUs */
94 static cpumask_t smp_commenced_mask;
95 cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
98 * Function Prototypes
100 static int do_boot_cpu(int);
101 static void smp_show_cpu_info(int cpu_id);
102 static void smp_callin(void);
103 static void smp_online(void);
104 static void smp_store_cpu_info(int);
105 static void smp_cpu_init(void);
106 static void smp_tune_scheduling(void);
107 static void send_IPI_mask(const cpumask_t *cpumask, int irq);
108 static void init_ipi(void);
111 * IPI Initialization interrupt definitions
113 static void mn10300_ipi_disable(unsigned int irq);
114 static void mn10300_ipi_enable(unsigned int irq);
115 static void mn10300_ipi_chip_disable(struct irq_data *d);
116 static void mn10300_ipi_chip_enable(struct irq_data *d);
117 static void mn10300_ipi_ack(struct irq_data *d);
118 static void mn10300_ipi_nop(struct irq_data *d);
120 static struct irq_chip mn10300_ipi_type = {
121 .name = "cpu_ipi",
122 .irq_disable = mn10300_ipi_chip_disable,
123 .irq_enable = mn10300_ipi_chip_enable,
124 .irq_ack = mn10300_ipi_ack,
125 .irq_eoi = mn10300_ipi_nop
128 static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
129 static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
131 static struct irqaction reschedule_ipi = {
132 .handler = smp_reschedule_interrupt,
133 .name = "smp reschedule IPI"
135 static struct irqaction call_function_ipi = {
136 .handler = smp_call_function_interrupt,
137 .name = "smp call function IPI"
140 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
141 static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
142 static struct irqaction local_timer_ipi = {
143 .handler = smp_ipi_timer_interrupt,
144 .flags = IRQF_DISABLED,
145 .name = "smp local timer IPI"
147 #endif
150 * init_ipi - Initialise the IPI mechanism
152 static void init_ipi(void)
154 unsigned long flags;
155 u16 tmp16;
157 /* set up the reschedule IPI */
158 irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type,
159 handle_percpu_irq);
160 setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
161 set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
162 mn10300_ipi_enable(RESCHEDULE_IPI);
164 /* set up the call function IPI */
165 irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type,
166 handle_percpu_irq);
167 setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
168 set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
169 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
171 /* set up the local timer IPI */
172 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
173 defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
174 irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type,
175 handle_percpu_irq);
176 setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
177 set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
178 mn10300_ipi_enable(LOCAL_TIMER_IPI);
179 #endif
181 #ifdef CONFIG_MN10300_CACHE_ENABLED
182 /* set up the cache flush IPI */
183 flags = arch_local_cli_save();
184 __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
185 mn10300_low_ipi_handler);
186 GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
187 mn10300_ipi_enable(FLUSH_CACHE_IPI);
188 arch_local_irq_restore(flags);
189 #endif
191 /* set up the NMI call function IPI */
192 flags = arch_local_cli_save();
193 GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
194 tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
195 arch_local_irq_restore(flags);
197 /* set up the SMP boot IPI */
198 flags = arch_local_cli_save();
199 __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
200 mn10300_low_ipi_handler);
201 arch_local_irq_restore(flags);
205 * mn10300_ipi_shutdown - Shut down handling of an IPI
206 * @irq: The IPI to be shut down.
208 static void mn10300_ipi_shutdown(unsigned int irq)
210 unsigned long flags;
211 u16 tmp;
213 flags = arch_local_cli_save();
215 tmp = GxICR(irq);
216 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
217 tmp = GxICR(irq);
219 arch_local_irq_restore(flags);
223 * mn10300_ipi_enable - Enable an IPI
224 * @irq: The IPI to be enabled.
226 static void mn10300_ipi_enable(unsigned int irq)
228 unsigned long flags;
229 u16 tmp;
231 flags = arch_local_cli_save();
233 tmp = GxICR(irq);
234 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
235 tmp = GxICR(irq);
237 arch_local_irq_restore(flags);
240 static void mn10300_ipi_chip_enable(struct irq_data *d)
242 mn10300_ipi_enable(d->irq);
246 * mn10300_ipi_disable - Disable an IPI
247 * @irq: The IPI to be disabled.
249 static void mn10300_ipi_disable(unsigned int irq)
251 unsigned long flags;
252 u16 tmp;
254 flags = arch_local_cli_save();
256 tmp = GxICR(irq);
257 GxICR(irq) = tmp & GxICR_LEVEL;
258 tmp = GxICR(irq);
260 arch_local_irq_restore(flags);
263 static void mn10300_ipi_chip_disable(struct irq_data *d)
265 mn10300_ipi_disable(d->irq);
270 * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
271 * @irq: The IPI to be acknowledged.
273 * Clear the interrupt detection flag for the IPI on the appropriate interrupt
274 * channel in the PIC.
276 static void mn10300_ipi_ack(struct irq_data *d)
278 unsigned int irq = d->irq;
279 unsigned long flags;
280 u16 tmp;
282 flags = arch_local_cli_save();
283 GxICR_u8(irq) = GxICR_DETECT;
284 tmp = GxICR(irq);
285 arch_local_irq_restore(flags);
289 * mn10300_ipi_nop - Dummy IPI action
290 * @irq: The IPI to be acted upon.
292 static void mn10300_ipi_nop(struct irq_data *d)
297 * send_IPI_mask - Send IPIs to all CPUs in list
298 * @cpumask: The list of CPUs to target.
299 * @irq: The IPI request to be sent.
301 * Send the specified IPI to all the CPUs in the list, not waiting for them to
302 * finish before returning. The caller is responsible for synchronisation if
303 * that is needed.
305 static void send_IPI_mask(const cpumask_t *cpumask, int irq)
307 int i;
308 u16 tmp;
310 for (i = 0; i < NR_CPUS; i++) {
311 if (cpumask_test_cpu(i, cpumask)) {
312 /* send IPI */
313 tmp = CROSS_GxICR(irq, i);
314 CROSS_GxICR(irq, i) =
315 tmp | GxICR_REQUEST | GxICR_DETECT;
316 tmp = CROSS_GxICR(irq, i); /* flush write buffer */
322 * send_IPI_self - Send an IPI to this CPU.
323 * @irq: The IPI request to be sent.
325 * Send the specified IPI to the current CPU.
327 void send_IPI_self(int irq)
329 send_IPI_mask(cpumask_of(smp_processor_id()), irq);
333 * send_IPI_allbutself - Send IPIs to all the other CPUs.
334 * @irq: The IPI request to be sent.
336 * Send the specified IPI to all CPUs in the system barring the current one,
337 * not waiting for them to finish before returning. The caller is responsible
338 * for synchronisation if that is needed.
340 void send_IPI_allbutself(int irq)
342 cpumask_t cpumask;
344 cpumask_copy(&cpumask, cpu_online_mask);
345 cpumask_clear_cpu(smp_processor_id(), &cpumask);
346 send_IPI_mask(&cpumask, irq);
349 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
351 BUG();
352 /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
355 void arch_send_call_function_single_ipi(int cpu)
357 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
361 * smp_send_reschedule - Send reschedule IPI to a CPU
362 * @cpu: The CPU to target.
364 void smp_send_reschedule(int cpu)
366 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
370 * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
371 * @func: The function to ask to be run.
372 * @info: The context data to pass to that function.
373 * @wait: If true, wait (atomically) until function is run on all CPUs.
375 * Send a non-maskable request to all CPUs in the system, requesting them to
376 * run the specified function with the given context data, and, potentially, to
377 * wait for completion of that function on all CPUs.
379 * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
380 * timeout.
382 int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
384 struct nmi_call_data_struct data;
385 unsigned long flags;
386 unsigned int cnt;
387 int cpus, ret = 0;
389 cpus = num_online_cpus() - 1;
390 if (cpus < 1)
391 return 0;
393 data.func = func;
394 data.info = info;
395 cpumask_copy(&data.started, cpu_online_mask);
396 cpumask_clear_cpu(smp_processor_id(), &data.started);
397 data.wait = wait;
398 if (wait)
399 data.finished = data.started;
401 spin_lock_irqsave(&smp_nmi_call_lock, flags);
402 nmi_call_data = &data;
403 smp_mb();
405 /* Send a message to all other CPUs and wait for them to respond */
406 send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
408 /* Wait for response */
409 if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
410 for (cnt = 0;
411 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
412 !cpumask_empty(&data.started);
413 cnt++)
414 mdelay(1);
416 if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
417 for (cnt = 0;
418 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
419 !cpumask_empty(&data.finished);
420 cnt++)
421 mdelay(1);
424 if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
425 ret = -ETIMEDOUT;
427 } else {
428 /* If timeout value is zero, wait until cpumask has been
429 * cleared */
430 while (!cpumask_empty(&data.started))
431 barrier();
432 if (wait)
433 while (!cpumask_empty(&data.finished))
434 barrier();
437 spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
438 return ret;
442 * smp_jump_to_debugger - Make other CPUs enter the debugger by sending an IPI
444 * Send a non-maskable request to all other CPUs in the system, instructing
445 * them to jump into the debugger. The caller is responsible for checking that
446 * the other CPUs responded to the instruction.
448 * The caller should make sure that this CPU's debugger IPI is disabled.
450 void smp_jump_to_debugger(void)
452 if (num_online_cpus() > 1)
453 /* Send a message to all other CPUs */
454 send_IPI_allbutself(DEBUGGER_NMI_IPI);
458 * stop_this_cpu - Callback to stop a CPU.
459 * @unused: Callback context (ignored).
461 void stop_this_cpu(void *unused)
463 static volatile int stopflag;
464 unsigned long flags;
466 #ifdef CONFIG_GDBSTUB
467 /* In case of single stepping smp_send_stop by other CPU,
468 * clear procindebug to avoid deadlock.
470 atomic_set(&procindebug[smp_processor_id()], 0);
471 #endif /* CONFIG_GDBSTUB */
473 flags = arch_local_cli_save();
474 set_cpu_online(smp_processor_id(), false);
476 while (!stopflag)
477 cpu_relax();
479 set_cpu_online(smp_processor_id(), true);
480 arch_local_irq_restore(flags);
484 * smp_send_stop - Send a stop request to all CPUs.
486 void smp_send_stop(void)
488 smp_nmi_call_function(stop_this_cpu, NULL, 0);
492 * smp_reschedule_interrupt - Reschedule IPI handler
493 * @irq: The interrupt number.
494 * @dev_id: The device ID.
496 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
498 static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
500 scheduler_ipi();
501 return IRQ_HANDLED;
505 * smp_call_function_interrupt - Call function IPI handler
506 * @irq: The interrupt number.
507 * @dev_id: The device ID.
509 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
511 static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
513 /* generic_smp_call_function_interrupt(); */
514 generic_smp_call_function_single_interrupt();
515 return IRQ_HANDLED;
519 * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
521 void smp_nmi_call_function_interrupt(void)
523 smp_call_func_t func = nmi_call_data->func;
524 void *info = nmi_call_data->info;
525 int wait = nmi_call_data->wait;
527 /* Notify the initiating CPU that I've grabbed the data and am about to
528 * execute the function
530 smp_mb();
531 cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started);
532 (*func)(info);
534 if (wait) {
535 smp_mb();
536 cpumask_clear_cpu(smp_processor_id(),
537 &nmi_call_data->finished);
541 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
542 defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
544 * smp_ipi_timer_interrupt - Local timer IPI handler
545 * @irq: The interrupt number.
546 * @dev_id: The device ID.
548 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
550 static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
552 return local_timer_interrupt();
554 #endif
556 void __init smp_init_cpus(void)
558 int i;
559 for (i = 0; i < NR_CPUS; i++) {
560 set_cpu_possible(i, true);
561 set_cpu_present(i, true);
566 * smp_cpu_init - Initialise AP in start_secondary.
568 * For this Application Processor, set up init_mm, initialise FPU and set
569 * interrupt level 0-6 setting.
571 static void __init smp_cpu_init(void)
573 unsigned long flags;
574 int cpu_id = smp_processor_id();
575 u16 tmp16;
577 if (test_and_set_bit(cpu_id, &cpu_initialized)) {
578 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
579 for (;;)
580 local_irq_enable();
582 printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
584 atomic_inc(&init_mm.mm_count);
585 current->active_mm = &init_mm;
586 BUG_ON(current->mm);
588 enter_lazy_tlb(&init_mm, current);
590 /* Force FPU initialization */
591 clear_using_fpu(current);
593 GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
594 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
596 GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
597 mn10300_ipi_enable(LOCAL_TIMER_IPI);
599 GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
600 mn10300_ipi_enable(RESCHEDULE_IPI);
602 #ifdef CONFIG_MN10300_CACHE_ENABLED
603 GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
604 mn10300_ipi_enable(FLUSH_CACHE_IPI);
605 #endif
607 mn10300_ipi_shutdown(SMP_BOOT_IRQ);
609 /* Set up the non-maskable call function IPI */
610 flags = arch_local_cli_save();
611 GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
612 tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
613 arch_local_irq_restore(flags);
617 * smp_prepare_cpu_init - Initialise CPU in startup_secondary
619 * Set interrupt level 0-6 setting and init ICR of the kernel debugger.
621 void smp_prepare_cpu_init(void)
623 int loop;
625 /* Set the interrupt vector registers */
626 IVAR0 = EXCEP_IRQ_LEVEL0;
627 IVAR1 = EXCEP_IRQ_LEVEL1;
628 IVAR2 = EXCEP_IRQ_LEVEL2;
629 IVAR3 = EXCEP_IRQ_LEVEL3;
630 IVAR4 = EXCEP_IRQ_LEVEL4;
631 IVAR5 = EXCEP_IRQ_LEVEL5;
632 IVAR6 = EXCEP_IRQ_LEVEL6;
634 /* Disable all interrupts and set to priority 6 (lowest) */
635 for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
636 GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
638 #ifdef CONFIG_KERNEL_DEBUGGER
639 /* initialise the kernel debugger interrupt */
640 do {
641 unsigned long flags;
642 u16 tmp16;
644 flags = arch_local_cli_save();
645 GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
646 tmp16 = GxICR(DEBUGGER_NMI_IPI);
647 arch_local_irq_restore(flags);
648 } while (0);
649 #endif
653 * start_secondary - Activate a secondary CPU (AP)
654 * @unused: Thread parameter (ignored).
656 int __init start_secondary(void *unused)
658 smp_cpu_init();
659 smp_callin();
660 while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
661 cpu_relax();
663 local_flush_tlb();
664 preempt_disable();
665 smp_online();
667 #ifdef CONFIG_GENERIC_CLOCKEVENTS
668 init_clockevents();
669 #endif
670 cpu_idle();
671 return 0;
675 * smp_prepare_cpus - Boot up secondary CPUs (APs)
676 * @max_cpus: Maximum number of CPUs to boot.
678 * Call do_boot_cpu, and boot up APs.
680 void __init smp_prepare_cpus(unsigned int max_cpus)
682 int phy_id;
684 /* Setup boot CPU information */
685 smp_store_cpu_info(0);
686 smp_tune_scheduling();
688 init_ipi();
690 /* If SMP should be disabled, then finish */
691 if (max_cpus == 0) {
692 printk(KERN_INFO "SMP mode deactivated.\n");
693 goto smp_done;
696 /* Boot secondary CPUs (for which phy_id > 0) */
697 for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
698 /* Don't boot primary CPU */
699 if (max_cpus <= cpucount + 1)
700 continue;
701 if (phy_id != 0)
702 do_boot_cpu(phy_id);
703 set_cpu_possible(phy_id, true);
704 smp_show_cpu_info(phy_id);
707 smp_done:
708 Dprintk("Boot done.\n");
712 * smp_store_cpu_info - Save a CPU's information
713 * @cpu: The CPU to save for.
715 * Save boot_cpu_data and jiffy for the specified CPU.
717 static void __init smp_store_cpu_info(int cpu)
719 struct mn10300_cpuinfo *ci = &cpu_data[cpu];
721 *ci = boot_cpu_data;
722 ci->loops_per_jiffy = loops_per_jiffy;
723 ci->type = CPUREV;
727 * smp_tune_scheduling - Set time slice value
729 * Nothing to do here.
731 static void __init smp_tune_scheduling(void)
736 * do_boot_cpu: Boot up one CPU
737 * @phy_id: Physical ID of CPU to boot.
739 * Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1
740 * otherwise.
742 static int __init do_boot_cpu(int phy_id)
744 struct task_struct *idle;
745 unsigned long send_status, callin_status;
746 int timeout, cpu_id;
748 send_status = GxICR_REQUEST;
749 callin_status = 0;
750 timeout = 0;
751 cpu_id = phy_id;
753 cpucount++;
755 /* Create idle thread for this CPU */
756 idle = fork_idle(cpu_id);
757 if (IS_ERR(idle))
758 panic("Failed fork for CPU#%d.", cpu_id);
760 idle->thread.pc = (unsigned long)start_secondary;
762 printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
763 start_stack[cpu_id - 1] = idle->thread.sp;
765 task_thread_info(idle)->cpu = cpu_id;
767 /* Send boot IPI to AP */
768 send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
770 Dprintk("Waiting for send to finish...\n");
772 /* Wait for AP's IPI receive in 100[ms] */
773 do {
774 udelay(1000);
775 send_status =
776 CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
777 } while (send_status == GxICR_REQUEST && timeout++ < 100);
779 Dprintk("Waiting for cpu_callin_map.\n");
781 if (send_status == 0) {
782 /* Allow AP to start initializing */
783 cpumask_set_cpu(cpu_id, &cpu_callout_map);
785 /* Wait for setting cpu_callin_map */
786 timeout = 0;
787 do {
788 udelay(1000);
789 callin_status = cpumask_test_cpu(cpu_id,
790 &cpu_callin_map);
791 } while (callin_status == 0 && timeout++ < 5000);
793 if (callin_status == 0)
794 Dprintk("Not responding.\n");
795 } else {
796 printk(KERN_WARNING "IPI not delivered.\n");
799 if (send_status == GxICR_REQUEST || callin_status == 0) {
800 cpumask_clear_cpu(cpu_id, &cpu_callout_map);
801 cpumask_clear_cpu(cpu_id, &cpu_callin_map);
802 cpumask_clear_cpu(cpu_id, &cpu_initialized);
803 cpucount--;
804 return 1;
806 return 0;
810 * smp_show_cpu_info - Show SMP CPU information
811 * @cpu: The CPU of interest.
813 static void __init smp_show_cpu_info(int cpu)
815 struct mn10300_cpuinfo *ci = &cpu_data[cpu];
817 printk(KERN_INFO
818 "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
819 cpu,
820 MN10300_IOCLK / 1000000,
821 (MN10300_IOCLK / 10000) % 100,
822 ci->loops_per_jiffy / (500000 / HZ),
823 (ci->loops_per_jiffy / (5000 / HZ)) % 100);
827 * smp_callin - Set cpu_callin_map of the current CPU ID
829 static void __init smp_callin(void)
831 unsigned long timeout;
832 int cpu;
834 cpu = smp_processor_id();
835 timeout = jiffies + (2 * HZ);
837 if (cpumask_test_cpu(cpu, &cpu_callin_map)) {
838 printk(KERN_ERR "CPU#%d already present.\n", cpu);
839 BUG();
841 Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
843 /* Wait for AP startup 2s total */
844 while (time_before(jiffies, timeout)) {
845 if (cpumask_test_cpu(cpu, &cpu_callout_map))
846 break;
847 cpu_relax();
850 if (!time_before(jiffies, timeout)) {
851 printk(KERN_ERR
852 "BUG: CPU#%d started up but did not get a callout!\n",
853 cpu);
854 BUG();
857 #ifdef CONFIG_CALIBRATE_DELAY
858 calibrate_delay(); /* Get our bogomips */
859 #endif
861 /* Save our processor parameters */
862 smp_store_cpu_info(cpu);
864 /* Allow the boot processor to continue */
865 cpumask_set_cpu(cpu, &cpu_callin_map);
869 * smp_online - Set cpu_online_mask
871 static void __init smp_online(void)
873 int cpu;
875 cpu = smp_processor_id();
877 notify_cpu_starting(cpu);
879 set_cpu_online(cpu, true);
881 local_irq_enable();
885 * smp_cpus_done -
886 * @max_cpus: Maximum CPU count.
888 * Do nothing.
890 void __init smp_cpus_done(unsigned int max_cpus)
895 * smp_prepare_boot_cpu - Set up stuff for the boot processor.
897 * Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot
898 * processor (CPU 0).
900 void __devinit smp_prepare_boot_cpu(void)
902 cpumask_set_cpu(0, &cpu_callout_map);
903 cpumask_set_cpu(0, &cpu_callin_map);
904 current_thread_info()->cpu = 0;
908 * initialize_secondary - Initialise a secondary CPU (Application Processor).
910 * Set SP register and jump to thread's PC address.
912 void initialize_secondary(void)
914 asm volatile (
915 "mov %0,sp \n"
916 "jmp (%1) \n"
918 : "a"(current->thread.sp), "a"(current->thread.pc));
922 * __cpu_up - Set smp_commenced_mask for the nominated CPU
923 * @cpu: The target CPU.
925 int __devinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
927 int timeout;
929 #ifdef CONFIG_HOTPLUG_CPU
930 if (num_online_cpus() == 1)
931 disable_hlt();
932 if (sleep_mode[cpu])
933 run_wakeup_cpu(cpu);
934 #endif /* CONFIG_HOTPLUG_CPU */
936 cpumask_set_cpu(cpu, &smp_commenced_mask);
938 /* Wait 5s total for a response */
939 for (timeout = 0 ; timeout < 5000 ; timeout++) {
940 if (cpu_online(cpu))
941 break;
942 udelay(1000);
945 BUG_ON(!cpu_online(cpu));
946 return 0;
950 * setup_profiling_timer - Set up the profiling timer
951 * @multiplier - The frequency multiplier to use
953 * The frequency of the profiling timer can be changed by writing a multiplier
954 * value into /proc/profile.
956 int setup_profiling_timer(unsigned int multiplier)
958 return -EINVAL;
962 * CPU hotplug routines
964 #ifdef CONFIG_HOTPLUG_CPU
966 static DEFINE_PER_CPU(struct cpu, cpu_devices);
968 static int __init topology_init(void)
970 int cpu, ret;
972 for_each_cpu(cpu) {
973 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
974 if (ret)
975 printk(KERN_WARNING
976 "topology_init: register_cpu %d failed (%d)\n",
977 cpu, ret);
979 return 0;
982 subsys_initcall(topology_init);
984 int __cpu_disable(void)
986 int cpu = smp_processor_id();
987 if (cpu == 0)
988 return -EBUSY;
990 migrate_irqs();
991 cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm));
992 return 0;
995 void __cpu_die(unsigned int cpu)
997 run_sleep_cpu(cpu);
999 if (num_online_cpus() == 1)
1000 enable_hlt();
1003 #ifdef CONFIG_MN10300_CACHE_ENABLED
1004 static inline void hotplug_cpu_disable_cache(void)
1006 int tmp;
1007 asm volatile(
1008 " movhu (%1),%0 \n"
1009 " and %2,%0 \n"
1010 " movhu %0,(%1) \n"
1011 "1: movhu (%1),%0 \n"
1012 " btst %3,%0 \n"
1013 " bne 1b \n"
1014 : "=&r"(tmp)
1015 : "a"(&CHCTR),
1016 "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
1017 "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
1018 : "memory", "cc");
1021 static inline void hotplug_cpu_enable_cache(void)
1023 int tmp;
1024 asm volatile(
1025 "movhu (%1),%0 \n"
1026 "or %2,%0 \n"
1027 "movhu %0,(%1) \n"
1028 : "=&r"(tmp)
1029 : "a"(&CHCTR),
1030 "i"(CHCTR_ICEN | CHCTR_DCEN)
1031 : "memory", "cc");
1034 static inline void hotplug_cpu_invalidate_cache(void)
1036 int tmp;
1037 asm volatile (
1038 "movhu (%1),%0 \n"
1039 "or %2,%0 \n"
1040 "movhu %0,(%1) \n"
1041 : "=&r"(tmp)
1042 : "a"(&CHCTR),
1043 "i"(CHCTR_ICINV | CHCTR_DCINV)
1044 : "cc");
1047 #else /* CONFIG_MN10300_CACHE_ENABLED */
1048 #define hotplug_cpu_disable_cache() do {} while (0)
1049 #define hotplug_cpu_enable_cache() do {} while (0)
1050 #define hotplug_cpu_invalidate_cache() do {} while (0)
1051 #endif /* CONFIG_MN10300_CACHE_ENABLED */
1054 * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
1055 * @cpumask: List of target CPUs.
1056 * @func: The function to call on those CPUs.
1057 * @info: The context data for the function to be called.
1058 * @wait: Whether to wait for the calls to complete.
1060 * Non-maskably call a function on another CPU for hotplug purposes.
1062 * This function must be called with maskable interrupts disabled.
1064 static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
1065 smp_call_func_t func, void *info,
1066 int wait)
1069 * The address and the size of nmi_call_func_mask_data
1070 * need to be aligned on L1_CACHE_BYTES.
1072 static struct nmi_call_data_struct nmi_call_func_mask_data
1073 __cacheline_aligned;
1074 unsigned long start, end;
1076 start = (unsigned long)&nmi_call_func_mask_data;
1077 end = start + sizeof(struct nmi_call_data_struct);
1079 nmi_call_func_mask_data.func = func;
1080 nmi_call_func_mask_data.info = info;
1081 nmi_call_func_mask_data.started = cpumask;
1082 nmi_call_func_mask_data.wait = wait;
1083 if (wait)
1084 nmi_call_func_mask_data.finished = cpumask;
1086 spin_lock(&smp_nmi_call_lock);
1087 nmi_call_data = &nmi_call_func_mask_data;
1088 mn10300_local_dcache_flush_range(start, end);
1089 smp_wmb();
1091 send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
1093 do {
1094 mn10300_local_dcache_inv_range(start, end);
1095 barrier();
1096 } while (!cpumask_empty(&nmi_call_func_mask_data.started));
1098 if (wait) {
1099 do {
1100 mn10300_local_dcache_inv_range(start, end);
1101 barrier();
1102 } while (!cpumask_empty(&nmi_call_func_mask_data.finished));
1105 spin_unlock(&smp_nmi_call_lock);
1106 return 0;
1109 static void restart_wakeup_cpu(void)
1111 unsigned int cpu = smp_processor_id();
1113 cpumask_set_cpu(cpu, &cpu_callin_map);
1114 local_flush_tlb();
1115 set_cpu_online(cpu, true);
1116 smp_wmb();
1119 static void prepare_sleep_cpu(void *unused)
1121 sleep_mode[smp_processor_id()] = 1;
1122 smp_mb();
1123 mn10300_local_dcache_flush_inv();
1124 hotplug_cpu_disable_cache();
1125 hotplug_cpu_invalidate_cache();
1128 /* when this function called, IE=0, NMID=0. */
1129 static void sleep_cpu(void *unused)
1131 unsigned int cpu_id = smp_processor_id();
1133 * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
1134 * before this cpu goes in SLEEP mode.
1136 do {
1137 smp_mb();
1138 __sleep_cpu();
1139 } while (sleep_mode[cpu_id]);
1140 restart_wakeup_cpu();
1143 static void run_sleep_cpu(unsigned int cpu)
1145 unsigned long flags;
1146 cpumask_t cpumask;
1148 cpumask_copy(&cpumask, &cpumask_of(cpu));
1149 flags = arch_local_cli_save();
1150 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
1151 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
1152 udelay(1); /* delay for the cpu to sleep. */
1153 arch_local_irq_restore(flags);
1156 static void wakeup_cpu(void)
1158 hotplug_cpu_invalidate_cache();
1159 hotplug_cpu_enable_cache();
1160 smp_mb();
1161 sleep_mode[smp_processor_id()] = 0;
1164 static void run_wakeup_cpu(unsigned int cpu)
1166 unsigned long flags;
1168 flags = arch_local_cli_save();
1169 #if NR_CPUS == 2
1170 mn10300_local_dcache_flush_inv();
1171 #else
1173 * Before waking up the cpu,
1174 * all online cpus should stop and flush D-Cache for global data.
1176 #error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
1177 #endif
1178 hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
1179 arch_local_irq_restore(flags);
1182 #endif /* CONFIG_HOTPLUG_CPU */