Linux 6.13-rc4
[linux.git] / arch / sh / kernel / smp.c
blob108d808767fa9984931cfd5c2d7c97c56fbedc95
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * arch/sh/kernel/smp.c
5 * SMP support for the SuperH processors.
7 * Copyright (C) 2002 - 2010 Paul Mundt
8 * Copyright (C) 2006 - 2007 Akio Idehara
9 */
10 #include <linux/err.h>
11 #include <linux/cache.h>
12 #include <linux/cpumask.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/spinlock.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/cpu.h>
19 #include <linux/interrupt.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/hotplug.h>
22 #include <linux/atomic.h>
23 #include <linux/clockchips.h>
24 #include <linux/profile.h>
26 #include <asm/processor.h>
27 #include <asm/mmu_context.h>
28 #include <asm/smp.h>
29 #include <asm/cacheflush.h>
30 #include <asm/sections.h>
31 #include <asm/setup.h>
33 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
34 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
36 struct plat_smp_ops *mp_ops = NULL;
38 /* State of each CPU */
39 DEFINE_PER_CPU(int, cpu_state) = { 0 };
41 void register_smp_ops(struct plat_smp_ops *ops)
43 if (mp_ops)
44 printk(KERN_WARNING "Overriding previously set SMP ops\n");
46 mp_ops = ops;
49 static inline void smp_store_cpu_info(unsigned int cpu)
51 struct sh_cpuinfo *c = cpu_data + cpu;
53 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
55 c->loops_per_jiffy = loops_per_jiffy;
58 void __init smp_prepare_cpus(unsigned int max_cpus)
60 unsigned int cpu = smp_processor_id();
62 init_new_context(current, &init_mm);
63 current_thread_info()->cpu = cpu;
64 mp_ops->prepare_cpus(max_cpus);
66 #ifndef CONFIG_HOTPLUG_CPU
67 init_cpu_present(cpu_possible_mask);
68 #endif
71 void __init smp_prepare_boot_cpu(void)
73 unsigned int cpu = smp_processor_id();
75 __cpu_number_map[0] = cpu;
76 __cpu_logical_map[0] = cpu;
78 set_cpu_online(cpu, true);
79 set_cpu_possible(cpu, true);
81 per_cpu(cpu_state, cpu) = CPU_ONLINE;
84 #ifdef CONFIG_HOTPLUG_CPU
85 void native_cpu_die(unsigned int cpu)
87 unsigned int i;
89 for (i = 0; i < 10; i++) {
90 smp_rmb();
91 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
92 if (system_state == SYSTEM_RUNNING)
93 pr_info("CPU %u is now offline\n", cpu);
95 return;
98 msleep(100);
101 pr_err("CPU %u didn't die...\n", cpu);
104 int native_cpu_disable(unsigned int cpu)
106 return cpu == 0 ? -EPERM : 0;
109 void play_dead_common(void)
111 idle_task_exit();
112 irq_ctx_exit(raw_smp_processor_id());
113 mb();
115 __this_cpu_write(cpu_state, CPU_DEAD);
116 local_irq_disable();
119 void native_play_dead(void)
121 play_dead_common();
124 int __cpu_disable(void)
126 unsigned int cpu = smp_processor_id();
127 int ret;
129 ret = mp_ops->cpu_disable(cpu);
130 if (ret)
131 return ret;
134 * Take this CPU offline. Once we clear this, we can't return,
135 * and we must not schedule until we're ready to give up the cpu.
137 set_cpu_online(cpu, false);
140 * OK - migrate IRQs away from this CPU
142 migrate_irqs();
145 * Flush user cache and TLB mappings, and then remove this CPU
146 * from the vm mask set of all processes.
148 flush_cache_all();
149 #ifdef CONFIG_MMU
150 local_flush_tlb_all();
151 #endif
153 clear_tasks_mm_cpumask(cpu);
155 return 0;
157 #else /* ... !CONFIG_HOTPLUG_CPU */
158 int native_cpu_disable(unsigned int cpu)
160 return -ENOSYS;
163 void native_cpu_die(unsigned int cpu)
165 /* We said "no" in __cpu_disable */
166 BUG();
169 void native_play_dead(void)
171 BUG();
173 #endif
175 static asmlinkage void start_secondary(void)
177 unsigned int cpu = smp_processor_id();
178 struct mm_struct *mm = &init_mm;
180 enable_mmu();
181 mmgrab(mm);
182 mmget(mm);
183 current->active_mm = mm;
184 #ifdef CONFIG_MMU
185 enter_lazy_tlb(mm, current);
186 local_flush_tlb_all();
187 #endif
189 per_cpu_trap_init();
191 notify_cpu_starting(cpu);
193 local_irq_enable();
195 calibrate_delay();
197 smp_store_cpu_info(cpu);
199 set_cpu_online(cpu, true);
200 per_cpu(cpu_state, cpu) = CPU_ONLINE;
202 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
205 extern struct {
206 unsigned long sp;
207 unsigned long bss_start;
208 unsigned long bss_end;
209 void *start_kernel_fn;
210 void *cpu_init_fn;
211 void *thread_info;
212 } stack_start;
214 int __cpu_up(unsigned int cpu, struct task_struct *tsk)
216 unsigned long timeout;
218 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
220 /* Fill in data in head.S for secondary cpus */
221 stack_start.sp = tsk->thread.sp;
222 stack_start.thread_info = tsk->stack;
223 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
224 stack_start.start_kernel_fn = start_secondary;
226 flush_icache_range((unsigned long)&stack_start,
227 (unsigned long)&stack_start + sizeof(stack_start));
228 wmb();
230 mp_ops->start_cpu(cpu, (unsigned long)_stext);
232 timeout = jiffies + HZ;
233 while (time_before(jiffies, timeout)) {
234 if (cpu_online(cpu))
235 break;
237 udelay(10);
238 barrier();
241 if (cpu_online(cpu))
242 return 0;
244 return -ENOENT;
247 void __init smp_cpus_done(unsigned int max_cpus)
249 unsigned long bogosum = 0;
250 int cpu;
252 for_each_online_cpu(cpu)
253 bogosum += cpu_data[cpu].loops_per_jiffy;
255 printk(KERN_INFO "SMP: Total of %d processors activated "
256 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
257 bogosum / (500000/HZ),
258 (bogosum / (5000/HZ)) % 100);
261 void arch_smp_send_reschedule(int cpu)
263 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
266 void smp_send_stop(void)
268 smp_call_function(stop_this_cpu, 0, 0);
271 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
273 int cpu;
275 for_each_cpu(cpu, mask)
276 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
279 void arch_send_call_function_single_ipi(int cpu)
281 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
284 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
285 void tick_broadcast(const struct cpumask *mask)
287 int cpu;
289 for_each_cpu(cpu, mask)
290 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
293 static void ipi_timer(void)
295 irq_enter();
296 tick_receive_broadcast();
297 irq_exit();
299 #endif
301 void smp_message_recv(unsigned int msg)
303 switch (msg) {
304 case SMP_MSG_FUNCTION:
305 generic_smp_call_function_interrupt();
306 break;
307 case SMP_MSG_RESCHEDULE:
308 scheduler_ipi();
309 break;
310 case SMP_MSG_FUNCTION_SINGLE:
311 generic_smp_call_function_single_interrupt();
312 break;
313 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
314 case SMP_MSG_TIMER:
315 ipi_timer();
316 break;
317 #endif
318 default:
319 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
320 smp_processor_id(), __func__, msg);
321 break;
325 #ifdef CONFIG_PROFILING
326 /* Not really SMP stuff ... */
327 int setup_profiling_timer(unsigned int multiplier)
329 return 0;
331 #endif
333 #ifdef CONFIG_MMU
335 static void flush_tlb_all_ipi(void *info)
337 local_flush_tlb_all();
340 void flush_tlb_all(void)
342 on_each_cpu(flush_tlb_all_ipi, 0, 1);
345 static void flush_tlb_mm_ipi(void *mm)
347 local_flush_tlb_mm((struct mm_struct *)mm);
351 * The following tlb flush calls are invoked when old translations are
352 * being torn down, or pte attributes are changing. For single threaded
353 * address spaces, a new context is obtained on the current cpu, and tlb
354 * context on other cpus are invalidated to force a new context allocation
355 * at switch_mm time, should the mm ever be used on other cpus. For
356 * multithreaded address spaces, intercpu interrupts have to be sent.
357 * Another case where intercpu interrupts are required is when the target
358 * mm might be active on another cpu (eg debuggers doing the flushes on
359 * behalf of debugees, kswapd stealing pages from another process etc).
360 * Kanoj 07/00.
362 void flush_tlb_mm(struct mm_struct *mm)
364 preempt_disable();
366 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
367 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
368 } else {
369 int i;
370 for_each_online_cpu(i)
371 if (smp_processor_id() != i)
372 cpu_context(i, mm) = 0;
374 local_flush_tlb_mm(mm);
376 preempt_enable();
379 struct flush_tlb_data {
380 struct vm_area_struct *vma;
381 unsigned long addr1;
382 unsigned long addr2;
385 static void flush_tlb_range_ipi(void *info)
387 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
389 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
392 void flush_tlb_range(struct vm_area_struct *vma,
393 unsigned long start, unsigned long end)
395 struct mm_struct *mm = vma->vm_mm;
397 preempt_disable();
398 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
399 struct flush_tlb_data fd;
401 fd.vma = vma;
402 fd.addr1 = start;
403 fd.addr2 = end;
404 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
405 } else {
406 int i;
407 for_each_online_cpu(i)
408 if (smp_processor_id() != i)
409 cpu_context(i, mm) = 0;
411 local_flush_tlb_range(vma, start, end);
412 preempt_enable();
415 static void flush_tlb_kernel_range_ipi(void *info)
417 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
419 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
422 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
424 struct flush_tlb_data fd;
426 fd.addr1 = start;
427 fd.addr2 = end;
428 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
431 static void flush_tlb_page_ipi(void *info)
433 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
435 local_flush_tlb_page(fd->vma, fd->addr1);
438 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
440 preempt_disable();
441 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
442 (current->mm != vma->vm_mm)) {
443 struct flush_tlb_data fd;
445 fd.vma = vma;
446 fd.addr1 = page;
447 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
448 } else {
449 int i;
450 for_each_online_cpu(i)
451 if (smp_processor_id() != i)
452 cpu_context(i, vma->vm_mm) = 0;
454 local_flush_tlb_page(vma, page);
455 preempt_enable();
458 static void flush_tlb_one_ipi(void *info)
460 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
461 local_flush_tlb_one(fd->addr1, fd->addr2);
464 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
466 struct flush_tlb_data fd;
468 fd.addr1 = asid;
469 fd.addr2 = vaddr;
471 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
472 local_flush_tlb_one(asid, vaddr);
475 #endif