1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2000, 2001 Kanoj Sarcar
5 * Copyright (C) 2000, 2001 Ralf Baechle
6 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
7 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
9 #include <linux/cache.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/threads.h>
16 #include <linux/export.h>
17 #include <linux/time.h>
18 #include <linux/timex.h>
19 #include <linux/sched/mm.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpu.h>
22 #include <linux/err.h>
23 #include <linux/ftrace.h>
24 #include <linux/irqdomain.h>
26 #include <linux/of_irq.h>
28 #include <linux/atomic.h>
30 #include <asm/ginvt.h>
31 #include <asm/processor.h>
33 #include <asm/r4k-timer.h>
34 #include <asm/mips-cps.h>
35 #include <asm/mmu_context.h>
37 #include <asm/setup.h>
40 int __cpu_number_map
[CONFIG_MIPS_NR_CPU_NR_MAP
]; /* Map physical to logical */
41 EXPORT_SYMBOL(__cpu_number_map
);
43 int __cpu_logical_map
[NR_CPUS
]; /* Map logical to physical */
44 EXPORT_SYMBOL(__cpu_logical_map
);
46 /* Number of TCs (or siblings in Intel speak) per CPU core */
47 int smp_num_siblings
= 1;
48 EXPORT_SYMBOL(smp_num_siblings
);
50 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
51 cpumask_t cpu_sibling_map
[NR_CPUS
] __read_mostly
;
52 EXPORT_SYMBOL(cpu_sibling_map
);
54 /* representing the core map of multi-core chips of each logical CPU */
55 cpumask_t cpu_core_map
[NR_CPUS
] __read_mostly
;
56 EXPORT_SYMBOL(cpu_core_map
);
58 static DECLARE_COMPLETION(cpu_starting
);
59 static DECLARE_COMPLETION(cpu_running
);
62 * A logcal cpu mask containing only one VPE per core to
63 * reduce the number of IPIs on large MT systems.
65 cpumask_t cpu_foreign_map
[NR_CPUS
] __read_mostly
;
66 EXPORT_SYMBOL(cpu_foreign_map
);
68 /* representing cpus for which sibling maps can be computed */
69 static cpumask_t cpu_sibling_setup_map
;
71 /* representing cpus for which core maps can be computed */
72 static cpumask_t cpu_core_setup_map
;
74 cpumask_t cpu_coherent_mask
;
76 #ifdef CONFIG_GENERIC_IRQ_IPI
77 static struct irq_desc
*call_desc
;
78 static struct irq_desc
*sched_desc
;
81 static inline void set_cpu_sibling_map(int cpu
)
85 cpumask_set_cpu(cpu
, &cpu_sibling_setup_map
);
87 if (smp_num_siblings
> 1) {
88 for_each_cpu(i
, &cpu_sibling_setup_map
) {
89 if (cpus_are_siblings(cpu
, i
)) {
90 cpumask_set_cpu(i
, &cpu_sibling_map
[cpu
]);
91 cpumask_set_cpu(cpu
, &cpu_sibling_map
[i
]);
95 cpumask_set_cpu(cpu
, &cpu_sibling_map
[cpu
]);
98 static inline void set_cpu_core_map(int cpu
)
102 cpumask_set_cpu(cpu
, &cpu_core_setup_map
);
104 for_each_cpu(i
, &cpu_core_setup_map
) {
105 if (cpu_data
[cpu
].package
== cpu_data
[i
].package
) {
106 cpumask_set_cpu(i
, &cpu_core_map
[cpu
]);
107 cpumask_set_cpu(cpu
, &cpu_core_map
[i
]);
113 * Calculate a new cpu_foreign_map mask whenever a
114 * new cpu appears or disappears.
116 void calculate_cpu_foreign_map(void)
118 int i
, k
, core_present
;
119 cpumask_t temp_foreign_map
;
121 /* Re-calculate the mask */
122 cpumask_clear(&temp_foreign_map
);
123 for_each_online_cpu(i
) {
125 for_each_cpu(k
, &temp_foreign_map
)
126 if (cpus_are_siblings(i
, k
))
129 cpumask_set_cpu(i
, &temp_foreign_map
);
132 for_each_online_cpu(i
)
133 cpumask_andnot(&cpu_foreign_map
[i
],
134 &temp_foreign_map
, &cpu_sibling_map
[i
]);
137 const struct plat_smp_ops
*mp_ops
;
138 EXPORT_SYMBOL(mp_ops
);
140 void register_smp_ops(const struct plat_smp_ops
*ops
)
143 printk(KERN_WARNING
"Overriding previously set SMP ops\n");
148 #ifdef CONFIG_GENERIC_IRQ_IPI
149 void mips_smp_send_ipi_single(int cpu
, unsigned int action
)
151 mips_smp_send_ipi_mask(cpumask_of(cpu
), action
);
154 void mips_smp_send_ipi_mask(const struct cpumask
*mask
, unsigned int action
)
160 local_irq_save(flags
);
163 case SMP_CALL_FUNCTION
:
164 __ipi_send_mask(call_desc
, mask
);
167 case SMP_RESCHEDULE_YOURSELF
:
168 __ipi_send_mask(sched_desc
, mask
);
175 if (mips_cpc_present()) {
176 for_each_cpu(cpu
, mask
) {
177 if (cpus_are_siblings(cpu
, smp_processor_id()))
180 core
= cpu_core(&cpu_data
[cpu
]);
182 while (!cpumask_test_cpu(cpu
, &cpu_coherent_mask
)) {
183 mips_cm_lock_other_cpu(cpu
, CM_GCR_Cx_OTHER_BLOCK_LOCAL
);
184 mips_cpc_lock_other(core
);
185 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP
);
186 mips_cpc_unlock_other();
187 mips_cm_unlock_other();
192 local_irq_restore(flags
);
196 static irqreturn_t
ipi_resched_interrupt(int irq
, void *dev_id
)
203 static irqreturn_t
ipi_call_interrupt(int irq
, void *dev_id
)
205 generic_smp_call_function_interrupt();
210 static void smp_ipi_init_one(unsigned int virq
, const char *name
,
211 irq_handler_t handler
)
215 irq_set_handler(virq
, handle_percpu_irq
);
216 ret
= request_irq(virq
, handler
, IRQF_PERCPU
, name
, NULL
);
220 static unsigned int call_virq
, sched_virq
;
222 int mips_smp_ipi_allocate(const struct cpumask
*mask
)
225 struct irq_domain
*ipidomain
;
226 struct device_node
*node
;
228 node
= of_irq_find_parent(of_root
);
229 ipidomain
= irq_find_matching_host(node
, DOMAIN_BUS_IPI
);
232 * Some platforms have half DT setup. So if we found irq node but
233 * didn't find an ipidomain, try to search for one that is not in the
236 if (node
&& !ipidomain
)
237 ipidomain
= irq_find_matching_host(NULL
, DOMAIN_BUS_IPI
);
240 * There are systems which use IPI IRQ domains, but only have one
241 * registered when some runtime condition is met. For example a Malta
242 * kernel may include support for GIC & CPU interrupt controller IPI
243 * IRQ domains, but if run on a system with no GIC & no MT ASE then
244 * neither will be supported or registered.
246 * We only have a problem if we're actually using multiple CPUs so fail
247 * loudly if that is the case. Otherwise simply return, skipping IPI
248 * setup, if we're running with only a single CPU.
251 BUG_ON(num_present_cpus() > 1);
255 virq
= irq_reserve_ipi(ipidomain
, mask
);
260 virq
= irq_reserve_ipi(ipidomain
, mask
);
265 if (irq_domain_is_ipi_per_cpu(ipidomain
)) {
268 for_each_cpu(cpu
, mask
) {
269 smp_ipi_init_one(call_virq
+ cpu
, "IPI call",
271 smp_ipi_init_one(sched_virq
+ cpu
, "IPI resched",
272 ipi_resched_interrupt
);
275 smp_ipi_init_one(call_virq
, "IPI call", ipi_call_interrupt
);
276 smp_ipi_init_one(sched_virq
, "IPI resched",
277 ipi_resched_interrupt
);
283 int mips_smp_ipi_free(const struct cpumask
*mask
)
285 struct irq_domain
*ipidomain
;
286 struct device_node
*node
;
288 node
= of_irq_find_parent(of_root
);
289 ipidomain
= irq_find_matching_host(node
, DOMAIN_BUS_IPI
);
292 * Some platforms have half DT setup. So if we found irq node but
293 * didn't find an ipidomain, try to search for one that is not in the
296 if (node
&& !ipidomain
)
297 ipidomain
= irq_find_matching_host(NULL
, DOMAIN_BUS_IPI
);
301 if (irq_domain_is_ipi_per_cpu(ipidomain
)) {
304 for_each_cpu(cpu
, mask
) {
305 free_irq(call_virq
+ cpu
, NULL
);
306 free_irq(sched_virq
+ cpu
, NULL
);
309 irq_destroy_ipi(call_virq
, mask
);
310 irq_destroy_ipi(sched_virq
, mask
);
315 static int __init
mips_smp_ipi_init(void)
317 if (num_possible_cpus() == 1)
320 mips_smp_ipi_allocate(cpu_possible_mask
);
322 call_desc
= irq_to_desc(call_virq
);
323 sched_desc
= irq_to_desc(sched_virq
);
327 early_initcall(mips_smp_ipi_init
);
331 * First C code run on the secondary CPUs after being started up by
334 asmlinkage
void start_secondary(void)
339 per_cpu_trap_init(false);
340 mips_clockevent_init();
341 mp_ops
->init_secondary();
346 * XXX parity protection should be folded in here when it's converted
347 * to an option instead of something based on .cputype
352 cpu
= smp_processor_id();
353 cpu_data
[cpu
].udelay_val
= loops_per_jiffy
;
355 cpumask_set_cpu(cpu
, &cpu_coherent_mask
);
356 notify_cpu_starting(cpu
);
358 /* Notify boot CPU that we're starting & ready to sync counters */
359 complete(&cpu_starting
);
361 synchronise_count_slave(cpu
);
363 /* The CPU is running and counters synchronised, now mark it online */
364 set_cpu_online(cpu
, true);
366 set_cpu_sibling_map(cpu
);
367 set_cpu_core_map(cpu
);
369 calculate_cpu_foreign_map();
372 * Notify boot CPU that we're up & online and it can safely return
375 complete(&cpu_running
);
378 * irq will be enabled in ->smp_finish(), enabling it too early
381 WARN_ON_ONCE(!irqs_disabled());
382 mp_ops
->smp_finish();
384 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
387 static void stop_this_cpu(void *dummy
)
393 set_cpu_online(smp_processor_id(), false);
394 calculate_cpu_foreign_map();
399 void smp_send_stop(void)
401 smp_call_function(stop_this_cpu
, NULL
, 0);
404 void __init
smp_cpus_done(unsigned int max_cpus
)
408 /* called from main before smp_init() */
409 void __init
smp_prepare_cpus(unsigned int max_cpus
)
411 init_new_context(current
, &init_mm
);
412 current_thread_info()->cpu
= 0;
413 mp_ops
->prepare_cpus(max_cpus
);
414 set_cpu_sibling_map(0);
416 calculate_cpu_foreign_map();
417 #ifndef CONFIG_HOTPLUG_CPU
418 init_cpu_present(cpu_possible_mask
);
420 cpumask_copy(&cpu_coherent_mask
, cpu_possible_mask
);
423 /* preload SMP state for boot cpu */
424 void smp_prepare_boot_cpu(void)
426 if (mp_ops
->prepare_boot_cpu
)
427 mp_ops
->prepare_boot_cpu();
428 set_cpu_possible(0, true);
429 set_cpu_online(0, true);
432 int __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
436 err
= mp_ops
->boot_secondary(cpu
, tidle
);
440 /* Wait for CPU to start and be ready to sync counters */
441 if (!wait_for_completion_timeout(&cpu_starting
,
442 msecs_to_jiffies(1000))) {
443 pr_crit("CPU%u: failed to start\n", cpu
);
447 synchronise_count_master(cpu
);
449 /* Wait for CPU to finish startup & mark itself online before return */
450 wait_for_completion(&cpu_running
);
454 /* Not really SMP stuff ... */
455 int setup_profiling_timer(unsigned int multiplier
)
460 static void flush_tlb_all_ipi(void *info
)
462 local_flush_tlb_all();
465 void flush_tlb_all(void)
471 instruction_hazard();
476 on_each_cpu(flush_tlb_all_ipi
, NULL
, 1);
479 static void flush_tlb_mm_ipi(void *mm
)
481 drop_mmu_context((struct mm_struct
*)mm
);
485 * Special Variant of smp_call_function for use by TLB functions:
488 * o collapses to normal function call on UP kernels
489 * o collapses to normal function call on systems with a single shared
492 static inline void smp_on_other_tlbs(void (*func
) (void *info
), void *info
)
494 smp_call_function(func
, info
, 1);
497 static inline void smp_on_each_tlb(void (*func
) (void *info
), void *info
)
501 smp_on_other_tlbs(func
, info
);
508 * The following tlb flush calls are invoked when old translations are
509 * being torn down, or pte attributes are changing. For single threaded
510 * address spaces, a new context is obtained on the current cpu, and tlb
511 * context on other cpus are invalidated to force a new context allocation
512 * at switch_mm time, should the mm ever be used on other cpus. For
513 * multithreaded address spaces, intercpu interrupts have to be sent.
514 * Another case where intercpu interrupts are required is when the target
515 * mm might be active on another cpu (eg debuggers doing the flushes on
516 * behalf of debugees, kswapd stealing pages from another process etc).
520 void flush_tlb_mm(struct mm_struct
*mm
)
526 * No need to worry about other CPUs - the ginvt in
527 * drop_mmu_context() will be globalized.
529 } else if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
530 smp_on_other_tlbs(flush_tlb_mm_ipi
, mm
);
534 for_each_online_cpu(cpu
) {
535 if (cpu
!= smp_processor_id() && cpu_context(cpu
, mm
))
536 set_cpu_context(cpu
, mm
, 0);
539 drop_mmu_context(mm
);
544 struct flush_tlb_data
{
545 struct vm_area_struct
*vma
;
550 static void flush_tlb_range_ipi(void *info
)
552 struct flush_tlb_data
*fd
= info
;
554 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
557 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
559 struct mm_struct
*mm
= vma
->vm_mm
;
566 old_mmid
= read_c0_memorymapid();
567 write_c0_memorymapid(cpu_asid(0, mm
));
569 addr
= round_down(start
, PAGE_SIZE
* 2);
570 end
= round_up(end
, PAGE_SIZE
* 2);
574 addr
+= PAGE_SIZE
* 2;
575 } while (addr
< end
);
576 write_c0_memorymapid(old_mmid
);
577 instruction_hazard();
579 } else if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
580 struct flush_tlb_data fd
= {
586 smp_on_other_tlbs(flush_tlb_range_ipi
, &fd
);
587 local_flush_tlb_range(vma
, start
, end
);
590 int exec
= vma
->vm_flags
& VM_EXEC
;
592 for_each_online_cpu(cpu
) {
594 * flush_cache_range() will only fully flush icache if
595 * the VMA is executable, otherwise we must invalidate
596 * ASID without it appearing to has_valid_asid() as if
597 * mm has been completely unused by that CPU.
599 if (cpu
!= smp_processor_id() && cpu_context(cpu
, mm
))
600 set_cpu_context(cpu
, mm
, !exec
);
602 local_flush_tlb_range(vma
, start
, end
);
607 static void flush_tlb_kernel_range_ipi(void *info
)
609 struct flush_tlb_data
*fd
= info
;
611 local_flush_tlb_kernel_range(fd
->addr1
, fd
->addr2
);
614 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
616 struct flush_tlb_data fd
= {
621 on_each_cpu(flush_tlb_kernel_range_ipi
, &fd
, 1);
624 static void flush_tlb_page_ipi(void *info
)
626 struct flush_tlb_data
*fd
= info
;
628 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
631 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
638 old_mmid
= read_c0_memorymapid();
639 write_c0_memorymapid(cpu_asid(0, vma
->vm_mm
));
643 write_c0_memorymapid(old_mmid
);
644 instruction_hazard();
646 } else if ((atomic_read(&vma
->vm_mm
->mm_users
) != 1) ||
647 (current
->mm
!= vma
->vm_mm
)) {
648 struct flush_tlb_data fd
= {
653 smp_on_other_tlbs(flush_tlb_page_ipi
, &fd
);
654 local_flush_tlb_page(vma
, page
);
658 for_each_online_cpu(cpu
) {
660 * flush_cache_page() only does partial flushes, so
661 * invalidate ASID without it appearing to
662 * has_valid_asid() as if mm has been completely unused
665 if (cpu
!= smp_processor_id() && cpu_context(cpu
, vma
->vm_mm
))
666 set_cpu_context(cpu
, vma
->vm_mm
, 1);
668 local_flush_tlb_page(vma
, page
);
673 static void flush_tlb_one_ipi(void *info
)
675 unsigned long vaddr
= (unsigned long) info
;
677 local_flush_tlb_one(vaddr
);
680 void flush_tlb_one(unsigned long vaddr
)
682 smp_on_each_tlb(flush_tlb_one_ipi
, (void *) vaddr
);
685 EXPORT_SYMBOL(flush_tlb_page
);
686 EXPORT_SYMBOL(flush_tlb_one
);
688 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
690 static void tick_broadcast_callee(void *info
)
692 tick_receive_broadcast();
695 static DEFINE_PER_CPU(call_single_data_t
, tick_broadcast_csd
) =
696 CSD_INIT(tick_broadcast_callee
, NULL
);
698 void tick_broadcast(const struct cpumask
*mask
)
700 call_single_data_t
*csd
;
703 for_each_cpu(cpu
, mask
) {
704 csd
= &per_cpu(tick_broadcast_csd
, cpu
);
705 smp_call_function_single_async(cpu
, csd
);
709 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */