1 // SPDX-License-Identifier: GPL-2.0
5 * SMP support for the SuperH processors.
7 * Copyright (C) 2002 - 2010 Paul Mundt
8 * Copyright (C) 2006 - 2007 Akio Idehara
10 #include <linux/err.h>
11 #include <linux/cache.h>
12 #include <linux/cpumask.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/spinlock.h>
17 #include <linux/module.h>
18 #include <linux/cpu.h>
19 #include <linux/interrupt.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/hotplug.h>
22 #include <linux/atomic.h>
23 #include <linux/clockchips.h>
24 #include <linux/profile.h>
26 #include <asm/processor.h>
27 #include <asm/mmu_context.h>
29 #include <asm/cacheflush.h>
30 #include <asm/sections.h>
31 #include <asm/setup.h>
33 int __cpu_number_map
[NR_CPUS
]; /* Map physical to logical */
34 int __cpu_logical_map
[NR_CPUS
]; /* Map logical to physical */
36 struct plat_smp_ops
*mp_ops
= NULL
;
38 /* State of each CPU */
39 DEFINE_PER_CPU(int, cpu_state
) = { 0 };
41 void register_smp_ops(struct plat_smp_ops
*ops
)
44 printk(KERN_WARNING
"Overriding previously set SMP ops\n");
49 static inline void smp_store_cpu_info(unsigned int cpu
)
51 struct sh_cpuinfo
*c
= cpu_data
+ cpu
;
53 memcpy(c
, &boot_cpu_data
, sizeof(struct sh_cpuinfo
));
55 c
->loops_per_jiffy
= loops_per_jiffy
;
58 void __init
smp_prepare_cpus(unsigned int max_cpus
)
60 unsigned int cpu
= smp_processor_id();
62 init_new_context(current
, &init_mm
);
63 current_thread_info()->cpu
= cpu
;
64 mp_ops
->prepare_cpus(max_cpus
);
66 #ifndef CONFIG_HOTPLUG_CPU
67 init_cpu_present(cpu_possible_mask
);
71 void __init
smp_prepare_boot_cpu(void)
73 unsigned int cpu
= smp_processor_id();
75 __cpu_number_map
[0] = cpu
;
76 __cpu_logical_map
[0] = cpu
;
78 set_cpu_online(cpu
, true);
79 set_cpu_possible(cpu
, true);
81 per_cpu(cpu_state
, cpu
) = CPU_ONLINE
;
84 #ifdef CONFIG_HOTPLUG_CPU
85 void native_cpu_die(unsigned int cpu
)
89 for (i
= 0; i
< 10; i
++) {
91 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
) {
92 if (system_state
== SYSTEM_RUNNING
)
93 pr_info("CPU %u is now offline\n", cpu
);
101 pr_err("CPU %u didn't die...\n", cpu
);
104 int native_cpu_disable(unsigned int cpu
)
106 return cpu
== 0 ? -EPERM
: 0;
109 void play_dead_common(void)
112 irq_ctx_exit(raw_smp_processor_id());
115 __this_cpu_write(cpu_state
, CPU_DEAD
);
119 void native_play_dead(void)
124 int __cpu_disable(void)
126 unsigned int cpu
= smp_processor_id();
129 ret
= mp_ops
->cpu_disable(cpu
);
134 * Take this CPU offline. Once we clear this, we can't return,
135 * and we must not schedule until we're ready to give up the cpu.
137 set_cpu_online(cpu
, false);
140 * OK - migrate IRQs away from this CPU
145 * Flush user cache and TLB mappings, and then remove this CPU
146 * from the vm mask set of all processes.
150 local_flush_tlb_all();
153 clear_tasks_mm_cpumask(cpu
);
157 #else /* ... !CONFIG_HOTPLUG_CPU */
158 int native_cpu_disable(unsigned int cpu
)
163 void native_cpu_die(unsigned int cpu
)
165 /* We said "no" in __cpu_disable */
169 void native_play_dead(void)
175 static asmlinkage
void start_secondary(void)
177 unsigned int cpu
= smp_processor_id();
178 struct mm_struct
*mm
= &init_mm
;
183 current
->active_mm
= mm
;
185 enter_lazy_tlb(mm
, current
);
186 local_flush_tlb_all();
191 notify_cpu_starting(cpu
);
197 smp_store_cpu_info(cpu
);
199 set_cpu_online(cpu
, true);
200 per_cpu(cpu_state
, cpu
) = CPU_ONLINE
;
202 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
207 unsigned long bss_start
;
208 unsigned long bss_end
;
209 void *start_kernel_fn
;
214 int __cpu_up(unsigned int cpu
, struct task_struct
*tsk
)
216 unsigned long timeout
;
218 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
220 /* Fill in data in head.S for secondary cpus */
221 stack_start
.sp
= tsk
->thread
.sp
;
222 stack_start
.thread_info
= tsk
->stack
;
223 stack_start
.bss_start
= 0; /* don't clear bss for secondary cpus */
224 stack_start
.start_kernel_fn
= start_secondary
;
226 flush_icache_range((unsigned long)&stack_start
,
227 (unsigned long)&stack_start
+ sizeof(stack_start
));
230 mp_ops
->start_cpu(cpu
, (unsigned long)_stext
);
232 timeout
= jiffies
+ HZ
;
233 while (time_before(jiffies
, timeout
)) {
247 void __init
smp_cpus_done(unsigned int max_cpus
)
249 unsigned long bogosum
= 0;
252 for_each_online_cpu(cpu
)
253 bogosum
+= cpu_data
[cpu
].loops_per_jiffy
;
255 printk(KERN_INFO
"SMP: Total of %d processors activated "
256 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
257 bogosum
/ (500000/HZ
),
258 (bogosum
/ (5000/HZ
)) % 100);
261 void arch_smp_send_reschedule(int cpu
)
263 mp_ops
->send_ipi(cpu
, SMP_MSG_RESCHEDULE
);
266 void smp_send_stop(void)
268 smp_call_function(stop_this_cpu
, 0, 0);
271 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
275 for_each_cpu(cpu
, mask
)
276 mp_ops
->send_ipi(cpu
, SMP_MSG_FUNCTION
);
279 void arch_send_call_function_single_ipi(int cpu
)
281 mp_ops
->send_ipi(cpu
, SMP_MSG_FUNCTION_SINGLE
);
284 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
285 void tick_broadcast(const struct cpumask
*mask
)
289 for_each_cpu(cpu
, mask
)
290 mp_ops
->send_ipi(cpu
, SMP_MSG_TIMER
);
293 static void ipi_timer(void)
296 tick_receive_broadcast();
301 void smp_message_recv(unsigned int msg
)
304 case SMP_MSG_FUNCTION
:
305 generic_smp_call_function_interrupt();
307 case SMP_MSG_RESCHEDULE
:
310 case SMP_MSG_FUNCTION_SINGLE
:
311 generic_smp_call_function_single_interrupt();
313 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
319 printk(KERN_WARNING
"SMP %d: %s(): unknown IPI %d\n",
320 smp_processor_id(), __func__
, msg
);
325 #ifdef CONFIG_PROFILING
326 /* Not really SMP stuff ... */
327 int setup_profiling_timer(unsigned int multiplier
)
335 static void flush_tlb_all_ipi(void *info
)
337 local_flush_tlb_all();
340 void flush_tlb_all(void)
342 on_each_cpu(flush_tlb_all_ipi
, 0, 1);
345 static void flush_tlb_mm_ipi(void *mm
)
347 local_flush_tlb_mm((struct mm_struct
*)mm
);
351 * The following tlb flush calls are invoked when old translations are
352 * being torn down, or pte attributes are changing. For single threaded
353 * address spaces, a new context is obtained on the current cpu, and tlb
354 * context on other cpus are invalidated to force a new context allocation
355 * at switch_mm time, should the mm ever be used on other cpus. For
356 * multithreaded address spaces, intercpu interrupts have to be sent.
357 * Another case where intercpu interrupts are required is when the target
358 * mm might be active on another cpu (eg debuggers doing the flushes on
359 * behalf of debugees, kswapd stealing pages from another process etc).
362 void flush_tlb_mm(struct mm_struct
*mm
)
366 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
367 smp_call_function(flush_tlb_mm_ipi
, (void *)mm
, 1);
370 for_each_online_cpu(i
)
371 if (smp_processor_id() != i
)
372 cpu_context(i
, mm
) = 0;
374 local_flush_tlb_mm(mm
);
379 struct flush_tlb_data
{
380 struct vm_area_struct
*vma
;
385 static void flush_tlb_range_ipi(void *info
)
387 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
389 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
392 void flush_tlb_range(struct vm_area_struct
*vma
,
393 unsigned long start
, unsigned long end
)
395 struct mm_struct
*mm
= vma
->vm_mm
;
398 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
399 struct flush_tlb_data fd
;
404 smp_call_function(flush_tlb_range_ipi
, (void *)&fd
, 1);
407 for_each_online_cpu(i
)
408 if (smp_processor_id() != i
)
409 cpu_context(i
, mm
) = 0;
411 local_flush_tlb_range(vma
, start
, end
);
415 static void flush_tlb_kernel_range_ipi(void *info
)
417 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
419 local_flush_tlb_kernel_range(fd
->addr1
, fd
->addr2
);
422 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
424 struct flush_tlb_data fd
;
428 on_each_cpu(flush_tlb_kernel_range_ipi
, (void *)&fd
, 1);
431 static void flush_tlb_page_ipi(void *info
)
433 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
435 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
438 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
441 if ((atomic_read(&vma
->vm_mm
->mm_users
) != 1) ||
442 (current
->mm
!= vma
->vm_mm
)) {
443 struct flush_tlb_data fd
;
447 smp_call_function(flush_tlb_page_ipi
, (void *)&fd
, 1);
450 for_each_online_cpu(i
)
451 if (smp_processor_id() != i
)
452 cpu_context(i
, vma
->vm_mm
) = 0;
454 local_flush_tlb_page(vma
, page
);
458 static void flush_tlb_one_ipi(void *info
)
460 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
461 local_flush_tlb_one(fd
->addr1
, fd
->addr2
);
464 void flush_tlb_one(unsigned long asid
, unsigned long vaddr
)
466 struct flush_tlb_data fd
;
471 smp_call_function(flush_tlb_one_ipi
, (void *)&fd
, 1);
472 local_flush_tlb_one(asid
, vaddr
);