2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
5 * Based on arm64 and arc implementations
6 * Copyright (C) 2013 ARM Ltd.
7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 #include <linux/sched.h>
17 #include <linux/irq.h>
18 #include <asm/cpuinfo.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/cacheflush.h>
24 static void (*smp_cross_call
)(const struct cpumask
*, unsigned int);
26 unsigned long secondary_release
= -1;
27 struct thread_info
*secondary_thread_info
;
36 static DEFINE_SPINLOCK(boot_lock
);
38 static void boot_secondary(unsigned int cpu
, struct task_struct
*idle
)
41 * set synchronisation state between this boot processor
42 * and the secondary one
44 spin_lock(&boot_lock
);
46 secondary_release
= cpu
;
47 smp_cross_call(cpumask_of(cpu
), IPI_WAKEUP
);
50 * now the secondary core is starting up let it run its
51 * calibrations, then wait for it to finish
53 spin_unlock(&boot_lock
);
56 void __init
smp_prepare_boot_cpu(void)
60 void __init
smp_init_cpus(void)
64 for (i
= 0; i
< NR_CPUS
; i
++)
65 set_cpu_possible(i
, true);
68 void __init
smp_prepare_cpus(unsigned int max_cpus
)
73 * Initialise the present map, which describes the set of CPUs
74 * actually populated at the present time.
76 for (i
= 0; i
< max_cpus
; i
++)
77 set_cpu_present(i
, true);
80 void __init
smp_cpus_done(unsigned int max_cpus
)
84 static DECLARE_COMPLETION(cpu_running
);
86 int __cpu_up(unsigned int cpu
, struct task_struct
*idle
)
88 if (smp_cross_call
== NULL
) {
89 pr_warn("CPU%u: failed to start, IPI controller missing",
94 secondary_thread_info
= task_thread_info(idle
);
95 current_pgd
[cpu
] = init_mm
.pgd
;
97 boot_secondary(cpu
, idle
);
98 if (!wait_for_completion_timeout(&cpu_running
,
99 msecs_to_jiffies(1000))) {
100 pr_crit("CPU%u: failed to start\n", cpu
);
103 synchronise_count_master(cpu
);
108 asmlinkage __init
void secondary_start_kernel(void)
110 struct mm_struct
*mm
= &init_mm
;
111 unsigned int cpu
= smp_processor_id();
113 * All kernel threads share the same mm context; grab a
114 * reference and switch to it.
116 atomic_inc(&mm
->mm_count
);
117 current
->active_mm
= mm
;
118 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
120 pr_info("CPU%u: Booted secondary processor\n", cpu
);
123 openrisc_clockevent_init();
125 notify_cpu_starting(cpu
);
128 * OK, now it's safe to let the boot CPU continue
130 complete(&cpu_running
);
132 synchronise_count_slave(cpu
);
133 set_cpu_online(cpu
, true);
139 * OK, it's off to the idle thread for us
141 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
144 void handle_IPI(unsigned int ipi_msg
)
146 unsigned int cpu
= smp_processor_id();
157 generic_smp_call_function_interrupt();
160 case IPI_CALL_FUNC_SINGLE
:
161 generic_smp_call_function_single_interrupt();
165 WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu
, ipi_msg
);
170 void smp_send_reschedule(int cpu
)
172 smp_cross_call(cpumask_of(cpu
), IPI_RESCHEDULE
);
175 static void stop_this_cpu(void *dummy
)
177 /* Remove this CPU */
178 set_cpu_online(smp_processor_id(), false);
182 if (mfspr(SPR_UPR
) & SPR_UPR_PMP
)
183 mtspr(SPR_PMR
, mfspr(SPR_PMR
) | SPR_PMR_DME
);
184 /* If that didn't work, infinite loop */
189 void smp_send_stop(void)
191 smp_call_function(stop_this_cpu
, NULL
, 0);
194 /* not supported, yet */
195 int setup_profiling_timer(unsigned int multiplier
)
200 void __init
set_smp_cross_call(void (*fn
)(const struct cpumask
*, unsigned int))
205 void arch_send_call_function_single_ipi(int cpu
)
207 smp_cross_call(cpumask_of(cpu
), IPI_CALL_FUNC_SINGLE
);
210 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
212 smp_cross_call(mask
, IPI_CALL_FUNC
);
215 /* TLB flush operations - Performed on each CPU*/
216 static inline void ipi_flush_tlb_all(void *ignored
)
218 local_flush_tlb_all();
221 void flush_tlb_all(void)
223 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1);
227 * FIXME: implement proper functionality instead of flush_tlb_all.
228 * *But*, as things currently stands, the local_tlb_flush_* functions will
229 * all boil down to local_tlb_flush_all anyway.
231 void flush_tlb_mm(struct mm_struct
*mm
)
233 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1);
236 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long uaddr
)
238 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1);
241 void flush_tlb_range(struct vm_area_struct
*vma
,
242 unsigned long start
, unsigned long end
)
244 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1);
247 /* Instruction cache invalidate - performed on each cpu */
248 static void ipi_icache_page_inv(void *arg
)
250 struct page
*page
= arg
;
252 local_icache_page_inv(page
);
255 void smp_icache_page_inv(struct page
*page
)
257 on_each_cpu(ipi_icache_page_inv
, page
, 1);
259 EXPORT_SYMBOL(smp_icache_page_inv
);