2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
5 * Based on arm64 and arc implementations
6 * Copyright (C) 2013 ARM Ltd.
7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/irq.h>
19 #include <asm/cpuinfo.h>
20 #include <asm/mmu_context.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
25 static void (*smp_cross_call
)(const struct cpumask
*, unsigned int);
27 unsigned long secondary_release
= -1;
28 struct thread_info
*secondary_thread_info
;
37 static DEFINE_SPINLOCK(boot_lock
);
39 static void boot_secondary(unsigned int cpu
, struct task_struct
*idle
)
42 * set synchronisation state between this boot processor
43 * and the secondary one
45 spin_lock(&boot_lock
);
47 secondary_release
= cpu
;
48 smp_cross_call(cpumask_of(cpu
), IPI_WAKEUP
);
51 * now the secondary core is starting up let it run its
52 * calibrations, then wait for it to finish
54 spin_unlock(&boot_lock
);
57 void __init
smp_prepare_boot_cpu(void)
61 void __init
smp_init_cpus(void)
65 for (i
= 0; i
< NR_CPUS
; i
++)
66 set_cpu_possible(i
, true);
69 void __init
smp_prepare_cpus(unsigned int max_cpus
)
74 * Initialise the present map, which describes the set of CPUs
75 * actually populated at the present time.
77 for (i
= 0; i
< max_cpus
; i
++)
78 set_cpu_present(i
, true);
81 void __init
smp_cpus_done(unsigned int max_cpus
)
85 static DECLARE_COMPLETION(cpu_running
);
87 int __cpu_up(unsigned int cpu
, struct task_struct
*idle
)
89 if (smp_cross_call
== NULL
) {
90 pr_warn("CPU%u: failed to start, IPI controller missing",
95 secondary_thread_info
= task_thread_info(idle
);
96 current_pgd
[cpu
] = init_mm
.pgd
;
98 boot_secondary(cpu
, idle
);
99 if (!wait_for_completion_timeout(&cpu_running
,
100 msecs_to_jiffies(1000))) {
101 pr_crit("CPU%u: failed to start\n", cpu
);
104 synchronise_count_master(cpu
);
109 asmlinkage __init
void secondary_start_kernel(void)
111 struct mm_struct
*mm
= &init_mm
;
112 unsigned int cpu
= smp_processor_id();
114 * All kernel threads share the same mm context; grab a
115 * reference and switch to it.
118 current
->active_mm
= mm
;
119 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
121 pr_info("CPU%u: Booted secondary processor\n", cpu
);
124 openrisc_clockevent_init();
126 notify_cpu_starting(cpu
);
129 * OK, now it's safe to let the boot CPU continue
131 complete(&cpu_running
);
133 synchronise_count_slave(cpu
);
134 set_cpu_online(cpu
, true);
140 * OK, it's off to the idle thread for us
142 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
145 void handle_IPI(unsigned int ipi_msg
)
147 unsigned int cpu
= smp_processor_id();
158 generic_smp_call_function_interrupt();
161 case IPI_CALL_FUNC_SINGLE
:
162 generic_smp_call_function_single_interrupt();
166 WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu
, ipi_msg
);
171 void smp_send_reschedule(int cpu
)
173 smp_cross_call(cpumask_of(cpu
), IPI_RESCHEDULE
);
176 static void stop_this_cpu(void *dummy
)
178 /* Remove this CPU */
179 set_cpu_online(smp_processor_id(), false);
183 if (mfspr(SPR_UPR
) & SPR_UPR_PMP
)
184 mtspr(SPR_PMR
, mfspr(SPR_PMR
) | SPR_PMR_DME
);
185 /* If that didn't work, infinite loop */
190 void smp_send_stop(void)
192 smp_call_function(stop_this_cpu
, NULL
, 0);
195 /* not supported, yet */
196 int setup_profiling_timer(unsigned int multiplier
)
201 void __init
set_smp_cross_call(void (*fn
)(const struct cpumask
*, unsigned int))
206 void arch_send_call_function_single_ipi(int cpu
)
208 smp_cross_call(cpumask_of(cpu
), IPI_CALL_FUNC_SINGLE
);
211 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
213 smp_cross_call(mask
, IPI_CALL_FUNC
);
216 /* TLB flush operations - Performed on each CPU*/
217 static inline void ipi_flush_tlb_all(void *ignored
)
219 local_flush_tlb_all();
222 static inline void ipi_flush_tlb_mm(void *info
)
224 struct mm_struct
*mm
= (struct mm_struct
*)info
;
226 local_flush_tlb_mm(mm
);
229 static void smp_flush_tlb_mm(struct cpumask
*cmask
, struct mm_struct
*mm
)
233 if (cpumask_empty(cmask
))
238 if (cpumask_any_but(cmask
, cpuid
) >= nr_cpu_ids
) {
239 /* local cpu is the only cpu present in cpumask */
240 local_flush_tlb_mm(mm
);
242 on_each_cpu_mask(cmask
, ipi_flush_tlb_mm
, mm
, 1);
247 struct flush_tlb_data
{
252 static inline void ipi_flush_tlb_page(void *info
)
254 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
256 local_flush_tlb_page(NULL
, fd
->addr1
);
259 static inline void ipi_flush_tlb_range(void *info
)
261 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
263 local_flush_tlb_range(NULL
, fd
->addr1
, fd
->addr2
);
266 static void smp_flush_tlb_range(struct cpumask
*cmask
, unsigned long start
,
271 if (cpumask_empty(cmask
))
276 if (cpumask_any_but(cmask
, cpuid
) >= nr_cpu_ids
) {
277 /* local cpu is the only cpu present in cpumask */
278 if ((end
- start
) <= PAGE_SIZE
)
279 local_flush_tlb_page(NULL
, start
);
281 local_flush_tlb_range(NULL
, start
, end
);
283 struct flush_tlb_data fd
;
288 if ((end
- start
) <= PAGE_SIZE
)
289 on_each_cpu_mask(cmask
, ipi_flush_tlb_page
, &fd
, 1);
291 on_each_cpu_mask(cmask
, ipi_flush_tlb_range
, &fd
, 1);
296 void flush_tlb_all(void)
298 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1);
301 void flush_tlb_mm(struct mm_struct
*mm
)
303 smp_flush_tlb_mm(mm_cpumask(mm
), mm
);
306 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long uaddr
)
308 smp_flush_tlb_range(mm_cpumask(vma
->vm_mm
), uaddr
, uaddr
+ PAGE_SIZE
);
311 void flush_tlb_range(struct vm_area_struct
*vma
,
312 unsigned long start
, unsigned long end
)
314 smp_flush_tlb_range(mm_cpumask(vma
->vm_mm
), start
, end
);
317 /* Instruction cache invalidate - performed on each cpu */
318 static void ipi_icache_page_inv(void *arg
)
320 struct page
*page
= arg
;
322 local_icache_page_inv(page
);
325 void smp_icache_page_inv(struct page
*page
)
327 on_each_cpu(ipi_icache_page_inv
, page
, 1);
329 EXPORT_SYMBOL(smp_icache_page_inv
);