2 * Xtensa SMP support functions.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 - 2013 Tensilica Inc.
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor <joe@tensilica.com>
12 * Pete Delaney <piet@tensilica.com
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/irqdomain.h>
21 #include <linux/irq.h>
22 #include <linux/kdebug.h>
23 #include <linux/module.h>
24 #include <linux/sched/mm.h>
25 #include <linux/sched/hotplug.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/reboot.h>
28 #include <linux/seq_file.h>
29 #include <linux/smp.h>
30 #include <linux/thread_info.h>
32 #include <asm/cacheflush.h>
33 #include <asm/kdebug.h>
34 #include <asm/mmu_context.h>
35 #include <asm/mxregs.h>
36 #include <asm/platform.h>
37 #include <asm/tlbflush.h>
38 #include <asm/traps.h>
41 # if XCHAL_HAVE_S32C1I == 0
42 # error "The S32C1I option is required for SMP."
46 static void system_invalidate_dcache_range(unsigned long start
,
48 static void system_flush_invalidate_dcache_range(unsigned long start
,
51 /* IPI (Inter Process Interrupt) */
55 static irqreturn_t
ipi_interrupt(int irq
, void *dev_id
);
56 static struct irqaction ipi_irqaction
= {
57 .handler
= ipi_interrupt
,
64 unsigned irq
= irq_create_mapping(NULL
, IPI_IRQ
);
65 setup_irq(irq
, &ipi_irqaction
);
68 static inline unsigned int get_core_count(void)
70 /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
71 unsigned int syscfgid
= get_er(SYSCFGID
);
72 return ((syscfgid
>> 18) & 0xf) + 1;
75 static inline int get_core_id(void)
77 /* Bits 0...18 of SYSCFGID contain the core id */
78 unsigned int core_id
= get_er(SYSCFGID
);
79 return core_id
& 0x3fff;
82 void __init
smp_prepare_cpus(unsigned int max_cpus
)
86 for_each_possible_cpu(i
)
87 set_cpu_present(i
, true);
90 void __init
smp_init_cpus(void)
93 unsigned int ncpus
= get_core_count();
94 unsigned int core_id
= get_core_id();
96 pr_info("%s: Core Count = %d\n", __func__
, ncpus
);
97 pr_info("%s: Core Id = %d\n", __func__
, core_id
);
99 if (ncpus
> NR_CPUS
) {
101 pr_info("%s: limiting core count by %d\n", __func__
, ncpus
);
104 for (i
= 0; i
< ncpus
; ++i
)
105 set_cpu_possible(i
, true);
108 void __init
smp_prepare_boot_cpu(void)
110 unsigned int cpu
= smp_processor_id();
112 cpu_asid_cache(cpu
) = ASID_USER_FIRST
;
115 void __init
smp_cpus_done(unsigned int max_cpus
)
119 static int boot_secondary_processors
= 1; /* Set with xt-gdb via .xt-gdb */
120 static DECLARE_COMPLETION(cpu_running
);
122 void secondary_start_kernel(void)
124 struct mm_struct
*mm
= &init_mm
;
125 unsigned int cpu
= smp_processor_id();
129 #ifdef CONFIG_DEBUG_MISC
130 if (boot_secondary_processors
== 0) {
131 pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
132 __func__
, boot_secondary_processors
, cpu
);
134 __asm__
__volatile__ ("waiti " __stringify(LOCKLEVEL
));
137 pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
138 __func__
, boot_secondary_processors
, cpu
);
142 secondary_trap_init();
144 /* All kernel threads share the same mm context. */
148 current
->active_mm
= mm
;
149 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
150 enter_lazy_tlb(mm
, current
);
153 trace_hardirqs_off();
157 notify_cpu_starting(cpu
);
159 secondary_init_irq();
160 local_timer_setup(cpu
);
162 set_cpu_online(cpu
, true);
166 complete(&cpu_running
);
168 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
171 static void mx_cpu_start(void *p
)
173 unsigned cpu
= (unsigned)p
;
174 unsigned long run_stall_mask
= get_er(MPSCORE
);
176 set_er(run_stall_mask
& ~(1u << cpu
), MPSCORE
);
177 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
178 __func__
, cpu
, run_stall_mask
, get_er(MPSCORE
));
181 static void mx_cpu_stop(void *p
)
183 unsigned cpu
= (unsigned)p
;
184 unsigned long run_stall_mask
= get_er(MPSCORE
);
186 set_er(run_stall_mask
| (1u << cpu
), MPSCORE
);
187 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
188 __func__
, cpu
, run_stall_mask
, get_er(MPSCORE
));
191 #ifdef CONFIG_HOTPLUG_CPU
192 unsigned long cpu_start_id __cacheline_aligned
;
194 unsigned long cpu_start_ccount
;
196 static int boot_secondary(unsigned int cpu
, struct task_struct
*ts
)
198 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1000);
199 unsigned long ccount
;
202 #ifdef CONFIG_HOTPLUG_CPU
203 WRITE_ONCE(cpu_start_id
, cpu
);
204 /* Pairs with the third memw in the cpu_restart */
206 system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id
,
207 sizeof(cpu_start_id
));
209 smp_call_function_single(0, mx_cpu_start
, (void *)cpu
, 1);
211 for (i
= 0; i
< 2; ++i
) {
213 ccount
= get_ccount();
216 WRITE_ONCE(cpu_start_ccount
, ccount
);
220 * Pairs with the first two memws in the
224 ccount
= READ_ONCE(cpu_start_ccount
);
225 } while (ccount
&& time_before(jiffies
, timeout
));
228 smp_call_function_single(0, mx_cpu_stop
,
230 WRITE_ONCE(cpu_start_ccount
, 0);
237 int __cpu_up(unsigned int cpu
, struct task_struct
*idle
)
241 if (cpu_asid_cache(cpu
) == 0)
242 cpu_asid_cache(cpu
) = ASID_USER_FIRST
;
244 start_info
.stack
= (unsigned long)task_pt_regs(idle
);
247 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
248 __func__
, cpu
, idle
, start_info
.stack
);
250 init_completion(&cpu_running
);
251 ret
= boot_secondary(cpu
, idle
);
253 wait_for_completion_timeout(&cpu_running
,
254 msecs_to_jiffies(1000));
255 if (!cpu_online(cpu
))
260 pr_err("CPU %u failed to boot\n", cpu
);
265 #ifdef CONFIG_HOTPLUG_CPU
268 * __cpu_disable runs on the processor to be shutdown.
270 int __cpu_disable(void)
272 unsigned int cpu
= smp_processor_id();
275 * Take this CPU offline. Once we clear this, we can't return,
276 * and we must not schedule until we're ready to give up the cpu.
278 set_cpu_online(cpu
, false);
281 * OK - migrate IRQs away from this CPU
286 * Flush user cache and TLB mappings, and then remove this CPU
287 * from the vm mask set of all processes.
289 local_flush_cache_all();
290 local_flush_tlb_all();
291 invalidate_page_directory();
293 clear_tasks_mm_cpumask(cpu
);
298 static void platform_cpu_kill(unsigned int cpu
)
300 smp_call_function_single(0, mx_cpu_stop
, (void *)cpu
, true);
304 * called on the thread which is asking for a CPU to be shutdown -
305 * waits until shutdown has completed, or it is timed out.
307 void __cpu_die(unsigned int cpu
)
309 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1000);
310 while (time_before(jiffies
, timeout
)) {
311 system_invalidate_dcache_range((unsigned long)&cpu_start_id
,
312 sizeof(cpu_start_id
));
313 /* Pairs with the second memw in the cpu_restart */
315 if (READ_ONCE(cpu_start_id
) == -cpu
) {
316 platform_cpu_kill(cpu
);
320 pr_err("CPU%u: unable to kill\n", cpu
);
323 void arch_cpu_idle_dead(void)
328 * Called from the idle thread for the CPU which has been shutdown.
330 * Note that we disable IRQs here, but do not re-enable them
331 * before returning to the caller. This is also the behaviour
332 * of the other hotplug-cpu capable cores, so presumably coming
333 * out of idle fixes this.
335 void __ref
cpu_die(void)
339 __asm__
__volatile__(
340 " movi a2, cpu_restart\n"
344 #endif /* CONFIG_HOTPLUG_CPU */
353 static const struct {
354 const char *short_text
;
355 const char *long_text
;
357 { .short_text
= "RES", .long_text
= "Rescheduling interrupts" },
358 { .short_text
= "CAL", .long_text
= "Function call interrupts" },
359 { .short_text
= "DIE", .long_text
= "CPU shutdown interrupts" },
363 unsigned long ipi_count
[IPI_MAX
];
366 static DEFINE_PER_CPU(struct ipi_data
, ipi_data
);
368 static void send_ipi_message(const struct cpumask
*callmask
,
369 enum ipi_msg_type msg_id
)
372 unsigned long mask
= 0;
374 for_each_cpu(index
, callmask
)
377 set_er(mask
, MIPISET(msg_id
));
380 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
382 send_ipi_message(mask
, IPI_CALL_FUNC
);
385 void arch_send_call_function_single_ipi(int cpu
)
387 send_ipi_message(cpumask_of(cpu
), IPI_CALL_FUNC
);
390 void smp_send_reschedule(int cpu
)
392 send_ipi_message(cpumask_of(cpu
), IPI_RESCHEDULE
);
395 void smp_send_stop(void)
397 struct cpumask targets
;
399 cpumask_copy(&targets
, cpu_online_mask
);
400 cpumask_clear_cpu(smp_processor_id(), &targets
);
401 send_ipi_message(&targets
, IPI_CPU_STOP
);
404 static void ipi_cpu_stop(unsigned int cpu
)
406 set_cpu_online(cpu
, false);
410 irqreturn_t
ipi_interrupt(int irq
, void *dev_id
)
412 unsigned int cpu
= smp_processor_id();
413 struct ipi_data
*ipi
= &per_cpu(ipi_data
, cpu
);
418 msg
= get_er(MIPICAUSE(cpu
));
419 set_er(msg
, MIPICAUSE(cpu
));
424 if (msg
& (1 << IPI_CALL_FUNC
)) {
425 ++ipi
->ipi_count
[IPI_CALL_FUNC
];
426 generic_smp_call_function_interrupt();
429 if (msg
& (1 << IPI_RESCHEDULE
)) {
430 ++ipi
->ipi_count
[IPI_RESCHEDULE
];
434 if (msg
& (1 << IPI_CPU_STOP
)) {
435 ++ipi
->ipi_count
[IPI_CPU_STOP
];
443 void show_ipi_list(struct seq_file
*p
, int prec
)
448 for (i
= 0; i
< IPI_MAX
; ++i
) {
449 seq_printf(p
, "%*s:", prec
, ipi_text
[i
].short_text
);
450 for_each_online_cpu(cpu
)
451 seq_printf(p
, " %10lu",
452 per_cpu(ipi_data
, cpu
).ipi_count
[i
]);
453 seq_printf(p
, " %s\n", ipi_text
[i
].long_text
);
457 int setup_profiling_timer(unsigned int multiplier
)
459 pr_debug("setup_profiling_timer %d\n", multiplier
);
463 /* TLB flush functions */
466 struct vm_area_struct
*vma
;
471 static void ipi_flush_tlb_all(void *arg
)
473 local_flush_tlb_all();
476 void flush_tlb_all(void)
478 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1);
481 static void ipi_flush_tlb_mm(void *arg
)
483 local_flush_tlb_mm(arg
);
486 void flush_tlb_mm(struct mm_struct
*mm
)
488 on_each_cpu(ipi_flush_tlb_mm
, mm
, 1);
491 static void ipi_flush_tlb_page(void *arg
)
493 struct flush_data
*fd
= arg
;
494 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
497 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
499 struct flush_data fd
= {
503 on_each_cpu(ipi_flush_tlb_page
, &fd
, 1);
506 static void ipi_flush_tlb_range(void *arg
)
508 struct flush_data
*fd
= arg
;
509 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
512 void flush_tlb_range(struct vm_area_struct
*vma
,
513 unsigned long start
, unsigned long end
)
515 struct flush_data fd
= {
520 on_each_cpu(ipi_flush_tlb_range
, &fd
, 1);
523 static void ipi_flush_tlb_kernel_range(void *arg
)
525 struct flush_data
*fd
= arg
;
526 local_flush_tlb_kernel_range(fd
->addr1
, fd
->addr2
);
529 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
531 struct flush_data fd
= {
535 on_each_cpu(ipi_flush_tlb_kernel_range
, &fd
, 1);
538 /* Cache flush functions */
540 static void ipi_flush_cache_all(void *arg
)
542 local_flush_cache_all();
545 void flush_cache_all(void)
547 on_each_cpu(ipi_flush_cache_all
, NULL
, 1);
550 static void ipi_flush_cache_page(void *arg
)
552 struct flush_data
*fd
= arg
;
553 local_flush_cache_page(fd
->vma
, fd
->addr1
, fd
->addr2
);
556 void flush_cache_page(struct vm_area_struct
*vma
,
557 unsigned long address
, unsigned long pfn
)
559 struct flush_data fd
= {
564 on_each_cpu(ipi_flush_cache_page
, &fd
, 1);
567 static void ipi_flush_cache_range(void *arg
)
569 struct flush_data
*fd
= arg
;
570 local_flush_cache_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
573 void flush_cache_range(struct vm_area_struct
*vma
,
574 unsigned long start
, unsigned long end
)
576 struct flush_data fd
= {
581 on_each_cpu(ipi_flush_cache_range
, &fd
, 1);
584 static void ipi_flush_icache_range(void *arg
)
586 struct flush_data
*fd
= arg
;
587 local_flush_icache_range(fd
->addr1
, fd
->addr2
);
590 void flush_icache_range(unsigned long start
, unsigned long end
)
592 struct flush_data fd
= {
596 on_each_cpu(ipi_flush_icache_range
, &fd
, 1);
598 EXPORT_SYMBOL(flush_icache_range
);
600 /* ------------------------------------------------------------------------- */
602 static void ipi_invalidate_dcache_range(void *arg
)
604 struct flush_data
*fd
= arg
;
605 __invalidate_dcache_range(fd
->addr1
, fd
->addr2
);
608 static void system_invalidate_dcache_range(unsigned long start
,
611 struct flush_data fd
= {
615 on_each_cpu(ipi_invalidate_dcache_range
, &fd
, 1);
618 static void ipi_flush_invalidate_dcache_range(void *arg
)
620 struct flush_data
*fd
= arg
;
621 __flush_invalidate_dcache_range(fd
->addr1
, fd
->addr2
);
624 static void system_flush_invalidate_dcache_range(unsigned long start
,
627 struct flush_data fd
= {
631 on_each_cpu(ipi_flush_invalidate_dcache_range
, &fd
, 1);