1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2000, 2001 Kanoj Sarcar
5 * Copyright (C) 2000, 2001 Ralf Baechle
6 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
7 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
9 #include <linux/cache.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/threads.h>
16 #include <linux/export.h>
17 #include <linux/time.h>
18 #include <linux/timex.h>
19 #include <linux/sched/mm.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpu.h>
22 #include <linux/err.h>
23 #include <linux/ftrace.h>
24 #include <linux/irqdomain.h>
26 #include <linux/of_irq.h>
28 #include <linux/atomic.h>
30 #include <asm/ginvt.h>
31 #include <asm/processor.h>
33 #include <asm/r4k-timer.h>
34 #include <asm/mips-cps.h>
35 #include <asm/mmu_context.h>
37 #include <asm/setup.h>
40 int __cpu_number_map
[CONFIG_MIPS_NR_CPU_NR_MAP
]; /* Map physical to logical */
41 EXPORT_SYMBOL(__cpu_number_map
);
43 int __cpu_logical_map
[NR_CPUS
]; /* Map logical to physical */
44 EXPORT_SYMBOL(__cpu_logical_map
);
46 /* Number of TCs (or siblings in Intel speak) per CPU core */
47 int smp_num_siblings
= 1;
48 EXPORT_SYMBOL(smp_num_siblings
);
50 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
51 cpumask_t cpu_sibling_map
[NR_CPUS
] __read_mostly
;
52 EXPORT_SYMBOL(cpu_sibling_map
);
54 /* representing the core map of multi-core chips of each logical CPU */
55 cpumask_t cpu_core_map
[NR_CPUS
] __read_mostly
;
56 EXPORT_SYMBOL(cpu_core_map
);
58 static DECLARE_COMPLETION(cpu_starting
);
59 static DECLARE_COMPLETION(cpu_running
);
62 * A logcal cpu mask containing only one VPE per core to
63 * reduce the number of IPIs on large MT systems.
65 cpumask_t cpu_foreign_map
[NR_CPUS
] __read_mostly
;
66 EXPORT_SYMBOL(cpu_foreign_map
);
68 /* representing cpus for which sibling maps can be computed */
69 static cpumask_t cpu_sibling_setup_map
;
71 /* representing cpus for which core maps can be computed */
72 static cpumask_t cpu_core_setup_map
;
74 cpumask_t cpu_coherent_mask
;
76 #ifdef CONFIG_GENERIC_IRQ_IPI
77 static struct irq_desc
*call_desc
;
78 static struct irq_desc
*sched_desc
;
81 static inline void set_cpu_sibling_map(int cpu
)
85 cpumask_set_cpu(cpu
, &cpu_sibling_setup_map
);
87 if (smp_num_siblings
> 1) {
88 for_each_cpu(i
, &cpu_sibling_setup_map
) {
89 if (cpus_are_siblings(cpu
, i
)) {
90 cpumask_set_cpu(i
, &cpu_sibling_map
[cpu
]);
91 cpumask_set_cpu(cpu
, &cpu_sibling_map
[i
]);
95 cpumask_set_cpu(cpu
, &cpu_sibling_map
[cpu
]);
98 static inline void set_cpu_core_map(int cpu
)
102 cpumask_set_cpu(cpu
, &cpu_core_setup_map
);
104 for_each_cpu(i
, &cpu_core_setup_map
) {
105 if (cpu_data
[cpu
].package
== cpu_data
[i
].package
) {
106 cpumask_set_cpu(i
, &cpu_core_map
[cpu
]);
107 cpumask_set_cpu(cpu
, &cpu_core_map
[i
]);
113 * Calculate a new cpu_foreign_map mask whenever a
114 * new cpu appears or disappears.
116 void calculate_cpu_foreign_map(void)
118 int i
, k
, core_present
;
119 cpumask_t temp_foreign_map
;
121 /* Re-calculate the mask */
122 cpumask_clear(&temp_foreign_map
);
123 for_each_online_cpu(i
) {
125 for_each_cpu(k
, &temp_foreign_map
)
126 if (cpus_are_siblings(i
, k
))
129 cpumask_set_cpu(i
, &temp_foreign_map
);
132 for_each_online_cpu(i
)
133 cpumask_andnot(&cpu_foreign_map
[i
],
134 &temp_foreign_map
, &cpu_sibling_map
[i
]);
137 const struct plat_smp_ops
*mp_ops
;
138 EXPORT_SYMBOL(mp_ops
);
140 void register_smp_ops(const struct plat_smp_ops
*ops
)
143 printk(KERN_WARNING
"Overriding previously set SMP ops\n");
148 #ifdef CONFIG_GENERIC_IRQ_IPI
149 void mips_smp_send_ipi_single(int cpu
, unsigned int action
)
151 mips_smp_send_ipi_mask(cpumask_of(cpu
), action
);
154 void mips_smp_send_ipi_mask(const struct cpumask
*mask
, unsigned int action
)
160 local_irq_save(flags
);
163 case SMP_CALL_FUNCTION
:
164 __ipi_send_mask(call_desc
, mask
);
167 case SMP_RESCHEDULE_YOURSELF
:
168 __ipi_send_mask(sched_desc
, mask
);
175 if (mips_cpc_present()) {
176 for_each_cpu(cpu
, mask
) {
177 if (cpus_are_siblings(cpu
, smp_processor_id()))
180 core
= cpu_core(&cpu_data
[cpu
]);
182 while (!cpumask_test_cpu(cpu
, &cpu_coherent_mask
)) {
183 mips_cm_lock_other_cpu(cpu
, CM_GCR_Cx_OTHER_BLOCK_LOCAL
);
184 mips_cpc_lock_other(core
);
185 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP
);
186 mips_cpc_unlock_other();
187 mips_cm_unlock_other();
192 local_irq_restore(flags
);
196 static irqreturn_t
ipi_resched_interrupt(int irq
, void *dev_id
)
203 static irqreturn_t
ipi_call_interrupt(int irq
, void *dev_id
)
205 generic_smp_call_function_interrupt();
210 static struct irqaction irq_resched
= {
211 .handler
= ipi_resched_interrupt
,
212 .flags
= IRQF_PERCPU
,
213 .name
= "IPI resched"
216 static struct irqaction irq_call
= {
217 .handler
= ipi_call_interrupt
,
218 .flags
= IRQF_PERCPU
,
222 static void smp_ipi_init_one(unsigned int virq
,
223 struct irqaction
*action
)
227 irq_set_handler(virq
, handle_percpu_irq
);
228 ret
= setup_irq(virq
, action
);
232 static unsigned int call_virq
, sched_virq
;
234 int mips_smp_ipi_allocate(const struct cpumask
*mask
)
237 struct irq_domain
*ipidomain
;
238 struct device_node
*node
;
240 node
= of_irq_find_parent(of_root
);
241 ipidomain
= irq_find_matching_host(node
, DOMAIN_BUS_IPI
);
244 * Some platforms have half DT setup. So if we found irq node but
245 * didn't find an ipidomain, try to search for one that is not in the
248 if (node
&& !ipidomain
)
249 ipidomain
= irq_find_matching_host(NULL
, DOMAIN_BUS_IPI
);
252 * There are systems which use IPI IRQ domains, but only have one
253 * registered when some runtime condition is met. For example a Malta
254 * kernel may include support for GIC & CPU interrupt controller IPI
255 * IRQ domains, but if run on a system with no GIC & no MT ASE then
256 * neither will be supported or registered.
258 * We only have a problem if we're actually using multiple CPUs so fail
259 * loudly if that is the case. Otherwise simply return, skipping IPI
260 * setup, if we're running with only a single CPU.
263 BUG_ON(num_present_cpus() > 1);
267 virq
= irq_reserve_ipi(ipidomain
, mask
);
272 virq
= irq_reserve_ipi(ipidomain
, mask
);
277 if (irq_domain_is_ipi_per_cpu(ipidomain
)) {
280 for_each_cpu(cpu
, mask
) {
281 smp_ipi_init_one(call_virq
+ cpu
, &irq_call
);
282 smp_ipi_init_one(sched_virq
+ cpu
, &irq_resched
);
285 smp_ipi_init_one(call_virq
, &irq_call
);
286 smp_ipi_init_one(sched_virq
, &irq_resched
);
292 int mips_smp_ipi_free(const struct cpumask
*mask
)
294 struct irq_domain
*ipidomain
;
295 struct device_node
*node
;
297 node
= of_irq_find_parent(of_root
);
298 ipidomain
= irq_find_matching_host(node
, DOMAIN_BUS_IPI
);
301 * Some platforms have half DT setup. So if we found irq node but
302 * didn't find an ipidomain, try to search for one that is not in the
305 if (node
&& !ipidomain
)
306 ipidomain
= irq_find_matching_host(NULL
, DOMAIN_BUS_IPI
);
310 if (irq_domain_is_ipi_per_cpu(ipidomain
)) {
313 for_each_cpu(cpu
, mask
) {
314 remove_irq(call_virq
+ cpu
, &irq_call
);
315 remove_irq(sched_virq
+ cpu
, &irq_resched
);
318 irq_destroy_ipi(call_virq
, mask
);
319 irq_destroy_ipi(sched_virq
, mask
);
324 static int __init
mips_smp_ipi_init(void)
326 if (num_possible_cpus() == 1)
329 mips_smp_ipi_allocate(cpu_possible_mask
);
331 call_desc
= irq_to_desc(call_virq
);
332 sched_desc
= irq_to_desc(sched_virq
);
336 early_initcall(mips_smp_ipi_init
);
340 * First C code run on the secondary CPUs after being started up by
343 asmlinkage
void start_secondary(void)
348 per_cpu_trap_init(false);
349 mips_clockevent_init();
350 mp_ops
->init_secondary();
355 * XXX parity protection should be folded in here when it's converted
356 * to an option instead of something based on .cputype
361 cpu
= smp_processor_id();
362 cpu_data
[cpu
].udelay_val
= loops_per_jiffy
;
364 cpumask_set_cpu(cpu
, &cpu_coherent_mask
);
365 notify_cpu_starting(cpu
);
367 /* Notify boot CPU that we're starting & ready to sync counters */
368 complete(&cpu_starting
);
370 synchronise_count_slave(cpu
);
372 /* The CPU is running and counters synchronised, now mark it online */
373 set_cpu_online(cpu
, true);
375 set_cpu_sibling_map(cpu
);
376 set_cpu_core_map(cpu
);
378 calculate_cpu_foreign_map();
381 * Notify boot CPU that we're up & online and it can safely return
384 complete(&cpu_running
);
387 * irq will be enabled in ->smp_finish(), enabling it too early
390 WARN_ON_ONCE(!irqs_disabled());
391 mp_ops
->smp_finish();
393 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
396 static void stop_this_cpu(void *dummy
)
402 set_cpu_online(smp_processor_id(), false);
403 calculate_cpu_foreign_map();
408 void smp_send_stop(void)
410 smp_call_function(stop_this_cpu
, NULL
, 0);
413 void __init
smp_cpus_done(unsigned int max_cpus
)
417 /* called from main before smp_init() */
418 void __init
smp_prepare_cpus(unsigned int max_cpus
)
420 init_new_context(current
, &init_mm
);
421 current_thread_info()->cpu
= 0;
422 mp_ops
->prepare_cpus(max_cpus
);
423 set_cpu_sibling_map(0);
425 calculate_cpu_foreign_map();
426 #ifndef CONFIG_HOTPLUG_CPU
427 init_cpu_present(cpu_possible_mask
);
429 cpumask_copy(&cpu_coherent_mask
, cpu_possible_mask
);
432 /* preload SMP state for boot cpu */
433 void smp_prepare_boot_cpu(void)
435 if (mp_ops
->prepare_boot_cpu
)
436 mp_ops
->prepare_boot_cpu();
437 set_cpu_possible(0, true);
438 set_cpu_online(0, true);
441 int __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
445 err
= mp_ops
->boot_secondary(cpu
, tidle
);
449 /* Wait for CPU to start and be ready to sync counters */
450 if (!wait_for_completion_timeout(&cpu_starting
,
451 msecs_to_jiffies(1000))) {
452 pr_crit("CPU%u: failed to start\n", cpu
);
456 synchronise_count_master(cpu
);
458 /* Wait for CPU to finish startup & mark itself online before return */
459 wait_for_completion(&cpu_running
);
463 /* Not really SMP stuff ... */
464 int setup_profiling_timer(unsigned int multiplier
)
469 static void flush_tlb_all_ipi(void *info
)
471 local_flush_tlb_all();
474 void flush_tlb_all(void)
480 instruction_hazard();
485 on_each_cpu(flush_tlb_all_ipi
, NULL
, 1);
488 static void flush_tlb_mm_ipi(void *mm
)
490 drop_mmu_context((struct mm_struct
*)mm
);
494 * Special Variant of smp_call_function for use by TLB functions:
497 * o collapses to normal function call on UP kernels
498 * o collapses to normal function call on systems with a single shared
501 static inline void smp_on_other_tlbs(void (*func
) (void *info
), void *info
)
503 smp_call_function(func
, info
, 1);
506 static inline void smp_on_each_tlb(void (*func
) (void *info
), void *info
)
510 smp_on_other_tlbs(func
, info
);
517 * The following tlb flush calls are invoked when old translations are
518 * being torn down, or pte attributes are changing. For single threaded
519 * address spaces, a new context is obtained on the current cpu, and tlb
520 * context on other cpus are invalidated to force a new context allocation
521 * at switch_mm time, should the mm ever be used on other cpus. For
522 * multithreaded address spaces, intercpu interrupts have to be sent.
523 * Another case where intercpu interrupts are required is when the target
524 * mm might be active on another cpu (eg debuggers doing the flushes on
525 * behalf of debugees, kswapd stealing pages from another process etc).
529 void flush_tlb_mm(struct mm_struct
*mm
)
535 * No need to worry about other CPUs - the ginvt in
536 * drop_mmu_context() will be globalized.
538 } else if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
539 smp_on_other_tlbs(flush_tlb_mm_ipi
, mm
);
543 for_each_online_cpu(cpu
) {
544 if (cpu
!= smp_processor_id() && cpu_context(cpu
, mm
))
545 set_cpu_context(cpu
, mm
, 0);
548 drop_mmu_context(mm
);
553 struct flush_tlb_data
{
554 struct vm_area_struct
*vma
;
559 static void flush_tlb_range_ipi(void *info
)
561 struct flush_tlb_data
*fd
= info
;
563 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
566 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
568 struct mm_struct
*mm
= vma
->vm_mm
;
575 old_mmid
= read_c0_memorymapid();
576 write_c0_memorymapid(cpu_asid(0, mm
));
578 addr
= round_down(start
, PAGE_SIZE
* 2);
579 end
= round_up(end
, PAGE_SIZE
* 2);
583 addr
+= PAGE_SIZE
* 2;
584 } while (addr
< end
);
585 write_c0_memorymapid(old_mmid
);
586 instruction_hazard();
588 } else if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
589 struct flush_tlb_data fd
= {
595 smp_on_other_tlbs(flush_tlb_range_ipi
, &fd
);
596 local_flush_tlb_range(vma
, start
, end
);
599 int exec
= vma
->vm_flags
& VM_EXEC
;
601 for_each_online_cpu(cpu
) {
603 * flush_cache_range() will only fully flush icache if
604 * the VMA is executable, otherwise we must invalidate
605 * ASID without it appearing to has_valid_asid() as if
606 * mm has been completely unused by that CPU.
608 if (cpu
!= smp_processor_id() && cpu_context(cpu
, mm
))
609 set_cpu_context(cpu
, mm
, !exec
);
611 local_flush_tlb_range(vma
, start
, end
);
616 static void flush_tlb_kernel_range_ipi(void *info
)
618 struct flush_tlb_data
*fd
= info
;
620 local_flush_tlb_kernel_range(fd
->addr1
, fd
->addr2
);
623 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
625 struct flush_tlb_data fd
= {
630 on_each_cpu(flush_tlb_kernel_range_ipi
, &fd
, 1);
633 static void flush_tlb_page_ipi(void *info
)
635 struct flush_tlb_data
*fd
= info
;
637 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
640 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
647 old_mmid
= read_c0_memorymapid();
648 write_c0_memorymapid(cpu_asid(0, vma
->vm_mm
));
652 write_c0_memorymapid(old_mmid
);
653 instruction_hazard();
655 } else if ((atomic_read(&vma
->vm_mm
->mm_users
) != 1) ||
656 (current
->mm
!= vma
->vm_mm
)) {
657 struct flush_tlb_data fd
= {
662 smp_on_other_tlbs(flush_tlb_page_ipi
, &fd
);
663 local_flush_tlb_page(vma
, page
);
667 for_each_online_cpu(cpu
) {
669 * flush_cache_page() only does partial flushes, so
670 * invalidate ASID without it appearing to
671 * has_valid_asid() as if mm has been completely unused
674 if (cpu
!= smp_processor_id() && cpu_context(cpu
, vma
->vm_mm
))
675 set_cpu_context(cpu
, vma
->vm_mm
, 1);
677 local_flush_tlb_page(vma
, page
);
682 static void flush_tlb_one_ipi(void *info
)
684 unsigned long vaddr
= (unsigned long) info
;
686 local_flush_tlb_one(vaddr
);
689 void flush_tlb_one(unsigned long vaddr
)
691 smp_on_each_tlb(flush_tlb_one_ipi
, (void *) vaddr
);
694 EXPORT_SYMBOL(flush_tlb_page
);
695 EXPORT_SYMBOL(flush_tlb_one
);
697 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
699 static DEFINE_PER_CPU(atomic_t
, tick_broadcast_count
);
700 static DEFINE_PER_CPU(call_single_data_t
, tick_broadcast_csd
);
702 void tick_broadcast(const struct cpumask
*mask
)
705 call_single_data_t
*csd
;
708 for_each_cpu(cpu
, mask
) {
709 count
= &per_cpu(tick_broadcast_count
, cpu
);
710 csd
= &per_cpu(tick_broadcast_csd
, cpu
);
712 if (atomic_inc_return(count
) == 1)
713 smp_call_function_single_async(cpu
, csd
);
717 static void tick_broadcast_callee(void *info
)
719 int cpu
= smp_processor_id();
720 tick_receive_broadcast();
721 atomic_set(&per_cpu(tick_broadcast_count
, cpu
), 0);
724 static int __init
tick_broadcast_init(void)
726 call_single_data_t
*csd
;
729 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
730 csd
= &per_cpu(tick_broadcast_csd
, cpu
);
731 csd
->func
= tick_broadcast_callee
;
736 early_initcall(tick_broadcast_init
);
738 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */