1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2010, 2011, 2012, Lemote, Inc.
4 * Author: Chen Huacai, chenhc@lemote.com
8 #include <linux/init.h>
10 #include <linux/sched.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/smp.h>
14 #include <linux/cpufreq.h>
15 #include <linux/kexec.h>
16 #include <asm/processor.h>
19 #include <asm/tlbflush.h>
20 #include <asm/cacheflush.h>
22 #include <loongson_regs.h>
23 #include <workarounds.h>
27 DEFINE_PER_CPU(int, cpu_state
);
29 #define LS_IPI_IRQ (MIPS_CPU_IRQ_BASE + 6)
31 static void __iomem
*ipi_set0_regs
[16];
32 static void __iomem
*ipi_clear0_regs
[16];
33 static void __iomem
*ipi_status0_regs
[16];
34 static void __iomem
*ipi_en0_regs
[16];
35 static void __iomem
*ipi_mailbox_buf
[16];
37 static u32 (*ipi_read_clear
)(int cpu
);
38 static void (*ipi_write_action
)(int cpu
, u32 action
);
39 static void (*ipi_write_enable
)(int cpu
);
40 static void (*ipi_clear_buf
)(int cpu
);
41 static void (*ipi_write_buf
)(int cpu
, struct task_struct
*idle
);
43 /* send mail via Mail_Send register for 3A4000+ CPU */
44 static void csr_mail_send(uint64_t data
, int cpu
, int mailbox
)
48 /* send high 32 bits */
49 val
= CSR_MAIL_SEND_BLOCK
;
50 val
|= (CSR_MAIL_SEND_BOX_HIGH(mailbox
) << CSR_MAIL_SEND_BOX_SHIFT
);
51 val
|= (cpu
<< CSR_MAIL_SEND_CPU_SHIFT
);
52 val
|= (data
& CSR_MAIL_SEND_H32_MASK
);
53 csr_writeq(val
, LOONGSON_CSR_MAIL_SEND
);
55 /* send low 32 bits */
56 val
= CSR_MAIL_SEND_BLOCK
;
57 val
|= (CSR_MAIL_SEND_BOX_LOW(mailbox
) << CSR_MAIL_SEND_BOX_SHIFT
);
58 val
|= (cpu
<< CSR_MAIL_SEND_CPU_SHIFT
);
59 val
|= (data
<< CSR_MAIL_SEND_BUF_SHIFT
);
60 csr_writeq(val
, LOONGSON_CSR_MAIL_SEND
);
63 static u32
csr_ipi_read_clear(int cpu
)
67 /* Load the ipi register to figure out what we're supposed to do */
68 action
= csr_readl(LOONGSON_CSR_IPI_STATUS
);
69 /* Clear the ipi register to clear the interrupt */
70 csr_writel(action
, LOONGSON_CSR_IPI_CLEAR
);
75 static void csr_ipi_write_action(int cpu
, u32 action
)
79 while ((irq
= ffs(action
))) {
80 uint32_t val
= CSR_IPI_SEND_BLOCK
;
82 val
|= (cpu
<< CSR_IPI_SEND_CPU_SHIFT
);
83 csr_writel(val
, LOONGSON_CSR_IPI_SEND
);
84 action
&= ~BIT(irq
- 1);
88 static void csr_ipi_write_enable(int cpu
)
90 csr_writel(0xffffffff, LOONGSON_CSR_IPI_EN
);
93 static void csr_ipi_clear_buf(int cpu
)
95 csr_writeq(0, LOONGSON_CSR_MAIL_BUF0
);
98 static void csr_ipi_write_buf(int cpu
, struct task_struct
*idle
)
100 unsigned long startargs
[4];
102 /* startargs[] are initial PC, SP and GP for secondary CPU */
103 startargs
[0] = (unsigned long)&smp_bootstrap
;
104 startargs
[1] = (unsigned long)__KSTK_TOS(idle
);
105 startargs
[2] = (unsigned long)task_thread_info(idle
);
108 pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n",
109 cpu
, startargs
[0], startargs
[1], startargs
[2]);
111 csr_mail_send(startargs
[3], cpu_logical_map(cpu
), 3);
112 csr_mail_send(startargs
[2], cpu_logical_map(cpu
), 2);
113 csr_mail_send(startargs
[1], cpu_logical_map(cpu
), 1);
114 csr_mail_send(startargs
[0], cpu_logical_map(cpu
), 0);
117 static u32
legacy_ipi_read_clear(int cpu
)
121 /* Load the ipi register to figure out what we're supposed to do */
122 action
= readl_relaxed(ipi_status0_regs
[cpu_logical_map(cpu
)]);
123 /* Clear the ipi register to clear the interrupt */
124 writel_relaxed(action
, ipi_clear0_regs
[cpu_logical_map(cpu
)]);
130 static void legacy_ipi_write_action(int cpu
, u32 action
)
132 writel_relaxed((u32
)action
, ipi_set0_regs
[cpu
]);
136 static void legacy_ipi_write_enable(int cpu
)
138 writel_relaxed(0xffffffff, ipi_en0_regs
[cpu_logical_map(cpu
)]);
141 static void legacy_ipi_clear_buf(int cpu
)
143 writeq_relaxed(0, ipi_mailbox_buf
[cpu_logical_map(cpu
)] + 0x0);
146 static void legacy_ipi_write_buf(int cpu
, struct task_struct
*idle
)
148 unsigned long startargs
[4];
150 /* startargs[] are initial PC, SP and GP for secondary CPU */
151 startargs
[0] = (unsigned long)&smp_bootstrap
;
152 startargs
[1] = (unsigned long)__KSTK_TOS(idle
);
153 startargs
[2] = (unsigned long)task_thread_info(idle
);
156 pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n",
157 cpu
, startargs
[0], startargs
[1], startargs
[2]);
159 writeq_relaxed(startargs
[3],
160 ipi_mailbox_buf
[cpu_logical_map(cpu
)] + 0x18);
161 writeq_relaxed(startargs
[2],
162 ipi_mailbox_buf
[cpu_logical_map(cpu
)] + 0x10);
163 writeq_relaxed(startargs
[1],
164 ipi_mailbox_buf
[cpu_logical_map(cpu
)] + 0x8);
165 writeq_relaxed(startargs
[0],
166 ipi_mailbox_buf
[cpu_logical_map(cpu
)] + 0x0);
170 static void csr_ipi_probe(void)
172 if (cpu_has_csr() && csr_readl(LOONGSON_CSR_FEATURES
) & LOONGSON_CSRF_IPI
) {
173 ipi_read_clear
= csr_ipi_read_clear
;
174 ipi_write_action
= csr_ipi_write_action
;
175 ipi_write_enable
= csr_ipi_write_enable
;
176 ipi_clear_buf
= csr_ipi_clear_buf
;
177 ipi_write_buf
= csr_ipi_write_buf
;
179 ipi_read_clear
= legacy_ipi_read_clear
;
180 ipi_write_action
= legacy_ipi_write_action
;
181 ipi_write_enable
= legacy_ipi_write_enable
;
182 ipi_clear_buf
= legacy_ipi_clear_buf
;
183 ipi_write_buf
= legacy_ipi_write_buf
;
187 static void ipi_set0_regs_init(void)
189 ipi_set0_regs
[0] = (void __iomem
*)
190 (SMP_CORE_GROUP0_BASE
+ SMP_CORE0_OFFSET
+ SET0
);
191 ipi_set0_regs
[1] = (void __iomem
*)
192 (SMP_CORE_GROUP0_BASE
+ SMP_CORE1_OFFSET
+ SET0
);
193 ipi_set0_regs
[2] = (void __iomem
*)
194 (SMP_CORE_GROUP0_BASE
+ SMP_CORE2_OFFSET
+ SET0
);
195 ipi_set0_regs
[3] = (void __iomem
*)
196 (SMP_CORE_GROUP0_BASE
+ SMP_CORE3_OFFSET
+ SET0
);
197 ipi_set0_regs
[4] = (void __iomem
*)
198 (SMP_CORE_GROUP1_BASE
+ SMP_CORE0_OFFSET
+ SET0
);
199 ipi_set0_regs
[5] = (void __iomem
*)
200 (SMP_CORE_GROUP1_BASE
+ SMP_CORE1_OFFSET
+ SET0
);
201 ipi_set0_regs
[6] = (void __iomem
*)
202 (SMP_CORE_GROUP1_BASE
+ SMP_CORE2_OFFSET
+ SET0
);
203 ipi_set0_regs
[7] = (void __iomem
*)
204 (SMP_CORE_GROUP1_BASE
+ SMP_CORE3_OFFSET
+ SET0
);
205 ipi_set0_regs
[8] = (void __iomem
*)
206 (SMP_CORE_GROUP2_BASE
+ SMP_CORE0_OFFSET
+ SET0
);
207 ipi_set0_regs
[9] = (void __iomem
*)
208 (SMP_CORE_GROUP2_BASE
+ SMP_CORE1_OFFSET
+ SET0
);
209 ipi_set0_regs
[10] = (void __iomem
*)
210 (SMP_CORE_GROUP2_BASE
+ SMP_CORE2_OFFSET
+ SET0
);
211 ipi_set0_regs
[11] = (void __iomem
*)
212 (SMP_CORE_GROUP2_BASE
+ SMP_CORE3_OFFSET
+ SET0
);
213 ipi_set0_regs
[12] = (void __iomem
*)
214 (SMP_CORE_GROUP3_BASE
+ SMP_CORE0_OFFSET
+ SET0
);
215 ipi_set0_regs
[13] = (void __iomem
*)
216 (SMP_CORE_GROUP3_BASE
+ SMP_CORE1_OFFSET
+ SET0
);
217 ipi_set0_regs
[14] = (void __iomem
*)
218 (SMP_CORE_GROUP3_BASE
+ SMP_CORE2_OFFSET
+ SET0
);
219 ipi_set0_regs
[15] = (void __iomem
*)
220 (SMP_CORE_GROUP3_BASE
+ SMP_CORE3_OFFSET
+ SET0
);
223 static void ipi_clear0_regs_init(void)
225 ipi_clear0_regs
[0] = (void __iomem
*)
226 (SMP_CORE_GROUP0_BASE
+ SMP_CORE0_OFFSET
+ CLEAR0
);
227 ipi_clear0_regs
[1] = (void __iomem
*)
228 (SMP_CORE_GROUP0_BASE
+ SMP_CORE1_OFFSET
+ CLEAR0
);
229 ipi_clear0_regs
[2] = (void __iomem
*)
230 (SMP_CORE_GROUP0_BASE
+ SMP_CORE2_OFFSET
+ CLEAR0
);
231 ipi_clear0_regs
[3] = (void __iomem
*)
232 (SMP_CORE_GROUP0_BASE
+ SMP_CORE3_OFFSET
+ CLEAR0
);
233 ipi_clear0_regs
[4] = (void __iomem
*)
234 (SMP_CORE_GROUP1_BASE
+ SMP_CORE0_OFFSET
+ CLEAR0
);
235 ipi_clear0_regs
[5] = (void __iomem
*)
236 (SMP_CORE_GROUP1_BASE
+ SMP_CORE1_OFFSET
+ CLEAR0
);
237 ipi_clear0_regs
[6] = (void __iomem
*)
238 (SMP_CORE_GROUP1_BASE
+ SMP_CORE2_OFFSET
+ CLEAR0
);
239 ipi_clear0_regs
[7] = (void __iomem
*)
240 (SMP_CORE_GROUP1_BASE
+ SMP_CORE3_OFFSET
+ CLEAR0
);
241 ipi_clear0_regs
[8] = (void __iomem
*)
242 (SMP_CORE_GROUP2_BASE
+ SMP_CORE0_OFFSET
+ CLEAR0
);
243 ipi_clear0_regs
[9] = (void __iomem
*)
244 (SMP_CORE_GROUP2_BASE
+ SMP_CORE1_OFFSET
+ CLEAR0
);
245 ipi_clear0_regs
[10] = (void __iomem
*)
246 (SMP_CORE_GROUP2_BASE
+ SMP_CORE2_OFFSET
+ CLEAR0
);
247 ipi_clear0_regs
[11] = (void __iomem
*)
248 (SMP_CORE_GROUP2_BASE
+ SMP_CORE3_OFFSET
+ CLEAR0
);
249 ipi_clear0_regs
[12] = (void __iomem
*)
250 (SMP_CORE_GROUP3_BASE
+ SMP_CORE0_OFFSET
+ CLEAR0
);
251 ipi_clear0_regs
[13] = (void __iomem
*)
252 (SMP_CORE_GROUP3_BASE
+ SMP_CORE1_OFFSET
+ CLEAR0
);
253 ipi_clear0_regs
[14] = (void __iomem
*)
254 (SMP_CORE_GROUP3_BASE
+ SMP_CORE2_OFFSET
+ CLEAR0
);
255 ipi_clear0_regs
[15] = (void __iomem
*)
256 (SMP_CORE_GROUP3_BASE
+ SMP_CORE3_OFFSET
+ CLEAR0
);
259 static void ipi_status0_regs_init(void)
261 ipi_status0_regs
[0] = (void __iomem
*)
262 (SMP_CORE_GROUP0_BASE
+ SMP_CORE0_OFFSET
+ STATUS0
);
263 ipi_status0_regs
[1] = (void __iomem
*)
264 (SMP_CORE_GROUP0_BASE
+ SMP_CORE1_OFFSET
+ STATUS0
);
265 ipi_status0_regs
[2] = (void __iomem
*)
266 (SMP_CORE_GROUP0_BASE
+ SMP_CORE2_OFFSET
+ STATUS0
);
267 ipi_status0_regs
[3] = (void __iomem
*)
268 (SMP_CORE_GROUP0_BASE
+ SMP_CORE3_OFFSET
+ STATUS0
);
269 ipi_status0_regs
[4] = (void __iomem
*)
270 (SMP_CORE_GROUP1_BASE
+ SMP_CORE0_OFFSET
+ STATUS0
);
271 ipi_status0_regs
[5] = (void __iomem
*)
272 (SMP_CORE_GROUP1_BASE
+ SMP_CORE1_OFFSET
+ STATUS0
);
273 ipi_status0_regs
[6] = (void __iomem
*)
274 (SMP_CORE_GROUP1_BASE
+ SMP_CORE2_OFFSET
+ STATUS0
);
275 ipi_status0_regs
[7] = (void __iomem
*)
276 (SMP_CORE_GROUP1_BASE
+ SMP_CORE3_OFFSET
+ STATUS0
);
277 ipi_status0_regs
[8] = (void __iomem
*)
278 (SMP_CORE_GROUP2_BASE
+ SMP_CORE0_OFFSET
+ STATUS0
);
279 ipi_status0_regs
[9] = (void __iomem
*)
280 (SMP_CORE_GROUP2_BASE
+ SMP_CORE1_OFFSET
+ STATUS0
);
281 ipi_status0_regs
[10] = (void __iomem
*)
282 (SMP_CORE_GROUP2_BASE
+ SMP_CORE2_OFFSET
+ STATUS0
);
283 ipi_status0_regs
[11] = (void __iomem
*)
284 (SMP_CORE_GROUP2_BASE
+ SMP_CORE3_OFFSET
+ STATUS0
);
285 ipi_status0_regs
[12] = (void __iomem
*)
286 (SMP_CORE_GROUP3_BASE
+ SMP_CORE0_OFFSET
+ STATUS0
);
287 ipi_status0_regs
[13] = (void __iomem
*)
288 (SMP_CORE_GROUP3_BASE
+ SMP_CORE1_OFFSET
+ STATUS0
);
289 ipi_status0_regs
[14] = (void __iomem
*)
290 (SMP_CORE_GROUP3_BASE
+ SMP_CORE2_OFFSET
+ STATUS0
);
291 ipi_status0_regs
[15] = (void __iomem
*)
292 (SMP_CORE_GROUP3_BASE
+ SMP_CORE3_OFFSET
+ STATUS0
);
295 static void ipi_en0_regs_init(void)
297 ipi_en0_regs
[0] = (void __iomem
*)
298 (SMP_CORE_GROUP0_BASE
+ SMP_CORE0_OFFSET
+ EN0
);
299 ipi_en0_regs
[1] = (void __iomem
*)
300 (SMP_CORE_GROUP0_BASE
+ SMP_CORE1_OFFSET
+ EN0
);
301 ipi_en0_regs
[2] = (void __iomem
*)
302 (SMP_CORE_GROUP0_BASE
+ SMP_CORE2_OFFSET
+ EN0
);
303 ipi_en0_regs
[3] = (void __iomem
*)
304 (SMP_CORE_GROUP0_BASE
+ SMP_CORE3_OFFSET
+ EN0
);
305 ipi_en0_regs
[4] = (void __iomem
*)
306 (SMP_CORE_GROUP1_BASE
+ SMP_CORE0_OFFSET
+ EN0
);
307 ipi_en0_regs
[5] = (void __iomem
*)
308 (SMP_CORE_GROUP1_BASE
+ SMP_CORE1_OFFSET
+ EN0
);
309 ipi_en0_regs
[6] = (void __iomem
*)
310 (SMP_CORE_GROUP1_BASE
+ SMP_CORE2_OFFSET
+ EN0
);
311 ipi_en0_regs
[7] = (void __iomem
*)
312 (SMP_CORE_GROUP1_BASE
+ SMP_CORE3_OFFSET
+ EN0
);
313 ipi_en0_regs
[8] = (void __iomem
*)
314 (SMP_CORE_GROUP2_BASE
+ SMP_CORE0_OFFSET
+ EN0
);
315 ipi_en0_regs
[9] = (void __iomem
*)
316 (SMP_CORE_GROUP2_BASE
+ SMP_CORE1_OFFSET
+ EN0
);
317 ipi_en0_regs
[10] = (void __iomem
*)
318 (SMP_CORE_GROUP2_BASE
+ SMP_CORE2_OFFSET
+ EN0
);
319 ipi_en0_regs
[11] = (void __iomem
*)
320 (SMP_CORE_GROUP2_BASE
+ SMP_CORE3_OFFSET
+ EN0
);
321 ipi_en0_regs
[12] = (void __iomem
*)
322 (SMP_CORE_GROUP3_BASE
+ SMP_CORE0_OFFSET
+ EN0
);
323 ipi_en0_regs
[13] = (void __iomem
*)
324 (SMP_CORE_GROUP3_BASE
+ SMP_CORE1_OFFSET
+ EN0
);
325 ipi_en0_regs
[14] = (void __iomem
*)
326 (SMP_CORE_GROUP3_BASE
+ SMP_CORE2_OFFSET
+ EN0
);
327 ipi_en0_regs
[15] = (void __iomem
*)
328 (SMP_CORE_GROUP3_BASE
+ SMP_CORE3_OFFSET
+ EN0
);
331 static void ipi_mailbox_buf_init(void)
333 ipi_mailbox_buf
[0] = (void __iomem
*)
334 (SMP_CORE_GROUP0_BASE
+ SMP_CORE0_OFFSET
+ BUF
);
335 ipi_mailbox_buf
[1] = (void __iomem
*)
336 (SMP_CORE_GROUP0_BASE
+ SMP_CORE1_OFFSET
+ BUF
);
337 ipi_mailbox_buf
[2] = (void __iomem
*)
338 (SMP_CORE_GROUP0_BASE
+ SMP_CORE2_OFFSET
+ BUF
);
339 ipi_mailbox_buf
[3] = (void __iomem
*)
340 (SMP_CORE_GROUP0_BASE
+ SMP_CORE3_OFFSET
+ BUF
);
341 ipi_mailbox_buf
[4] = (void __iomem
*)
342 (SMP_CORE_GROUP1_BASE
+ SMP_CORE0_OFFSET
+ BUF
);
343 ipi_mailbox_buf
[5] = (void __iomem
*)
344 (SMP_CORE_GROUP1_BASE
+ SMP_CORE1_OFFSET
+ BUF
);
345 ipi_mailbox_buf
[6] = (void __iomem
*)
346 (SMP_CORE_GROUP1_BASE
+ SMP_CORE2_OFFSET
+ BUF
);
347 ipi_mailbox_buf
[7] = (void __iomem
*)
348 (SMP_CORE_GROUP1_BASE
+ SMP_CORE3_OFFSET
+ BUF
);
349 ipi_mailbox_buf
[8] = (void __iomem
*)
350 (SMP_CORE_GROUP2_BASE
+ SMP_CORE0_OFFSET
+ BUF
);
351 ipi_mailbox_buf
[9] = (void __iomem
*)
352 (SMP_CORE_GROUP2_BASE
+ SMP_CORE1_OFFSET
+ BUF
);
353 ipi_mailbox_buf
[10] = (void __iomem
*)
354 (SMP_CORE_GROUP2_BASE
+ SMP_CORE2_OFFSET
+ BUF
);
355 ipi_mailbox_buf
[11] = (void __iomem
*)
356 (SMP_CORE_GROUP2_BASE
+ SMP_CORE3_OFFSET
+ BUF
);
357 ipi_mailbox_buf
[12] = (void __iomem
*)
358 (SMP_CORE_GROUP3_BASE
+ SMP_CORE0_OFFSET
+ BUF
);
359 ipi_mailbox_buf
[13] = (void __iomem
*)
360 (SMP_CORE_GROUP3_BASE
+ SMP_CORE1_OFFSET
+ BUF
);
361 ipi_mailbox_buf
[14] = (void __iomem
*)
362 (SMP_CORE_GROUP3_BASE
+ SMP_CORE2_OFFSET
+ BUF
);
363 ipi_mailbox_buf
[15] = (void __iomem
*)
364 (SMP_CORE_GROUP3_BASE
+ SMP_CORE3_OFFSET
+ BUF
);
368 * Simple enough, just poke the appropriate ipi register
370 static void loongson3_send_ipi_single(int cpu
, unsigned int action
)
372 ipi_write_action(cpu_logical_map(cpu
), (u32
)action
);
376 loongson3_send_ipi_mask(const struct cpumask
*mask
, unsigned int action
)
380 for_each_cpu(i
, mask
)
381 ipi_write_action(cpu_logical_map(i
), (u32
)action
);
384 static irqreturn_t
loongson3_ipi_interrupt(int irq
, void *dev_id
)
386 int cpu
= smp_processor_id();
389 action
= ipi_read_clear(cpu
);
391 if (action
& SMP_RESCHEDULE_YOURSELF
)
394 if (action
& SMP_CALL_FUNCTION
) {
396 generic_smp_call_function_interrupt();
404 * SMP init and finish on secondary CPUs
406 static void loongson3_init_secondary(void)
408 unsigned int cpu
= smp_processor_id();
409 unsigned int imask
= STATUSF_IP7
| STATUSF_IP6
|
410 STATUSF_IP3
| STATUSF_IP2
;
412 /* Set interrupt mask, but don't enable */
413 change_c0_status(ST0_IM
, imask
);
414 ipi_write_enable(cpu
);
416 per_cpu(cpu_state
, cpu
) = CPU_ONLINE
;
417 cpu_set_core(&cpu_data
[cpu
],
418 cpu_logical_map(cpu
) % loongson_sysconf
.cores_per_package
);
419 cpu_data
[cpu
].package
=
420 cpu_logical_map(cpu
) / loongson_sysconf
.cores_per_package
;
423 static void loongson3_smp_finish(void)
425 int cpu
= smp_processor_id();
427 write_c0_compare(read_c0_count() + mips_hpt_frequency
/HZ
);
431 pr_info("CPU#%d finished, CP0_ST=%x\n",
432 smp_processor_id(), read_c0_status());
435 static void __init
loongson3_smp_setup(void)
437 int i
= 0, num
= 0; /* i: physical id, num: logical id */
440 init_cpu_possible(cpu_none_mask
);
442 for (i
= 0; i
< ARRAY_SIZE(smp_group
); i
++) {
445 max_cpus
+= loongson_sysconf
.cores_per_node
;
448 if (max_cpus
< loongson_sysconf
.nr_cpus
) {
449 pr_err("SMP Groups are less than the number of CPUs\n");
450 loongson_sysconf
.nr_cpus
= max_cpus
? max_cpus
: 1;
453 /* For unified kernel, NR_CPUS is the maximum possible value,
454 * loongson_sysconf.nr_cpus is the really present value
457 while (i
< loongson_sysconf
.nr_cpus
) {
458 if (loongson_sysconf
.reserved_cpus_mask
& (1<<i
)) {
459 /* Reserved physical CPU cores */
460 __cpu_number_map
[i
] = -1;
462 __cpu_number_map
[i
] = num
;
463 __cpu_logical_map
[num
] = i
;
464 set_cpu_possible(num
, true);
465 /* Loongson processors are always grouped by 4 */
466 cpu_set_cluster(&cpu_data
[num
], i
/ 4);
471 pr_info("Detected %i available CPU(s)\n", num
);
473 while (num
< loongson_sysconf
.nr_cpus
) {
474 __cpu_logical_map
[num
] = -1;
478 ipi_set0_regs_init();
479 ipi_clear0_regs_init();
480 ipi_status0_regs_init();
482 ipi_mailbox_buf_init();
486 cpu_set_core(&cpu_data
[0],
487 cpu_logical_map(0) % loongson_sysconf
.cores_per_package
);
488 cpu_data
[0].package
= cpu_logical_map(0) / loongson_sysconf
.cores_per_package
;
491 static void __init
loongson3_prepare_cpus(unsigned int max_cpus
)
493 if (request_irq(LS_IPI_IRQ
, loongson3_ipi_interrupt
,
494 IRQF_PERCPU
| IRQF_NO_SUSPEND
, "SMP_IPI", NULL
))
495 pr_err("Failed to request IPI IRQ\n");
496 init_cpu_present(cpu_possible_mask
);
497 per_cpu(cpu_state
, smp_processor_id()) = CPU_ONLINE
;
501 * Setup the PC, SP, and GP of a secondary processor and start it running!
503 static int loongson3_boot_secondary(int cpu
, struct task_struct
*idle
)
505 pr_info("Booting CPU#%d...\n", cpu
);
507 ipi_write_buf(cpu
, idle
);
512 #ifdef CONFIG_HOTPLUG_CPU
514 static int loongson3_cpu_disable(void)
517 unsigned int cpu
= smp_processor_id();
519 set_cpu_online(cpu
, false);
520 calculate_cpu_foreign_map();
521 local_irq_save(flags
);
522 clear_c0_status(ST0_IM
);
523 local_irq_restore(flags
);
524 local_flush_tlb_all();
530 static void loongson3_cpu_die(unsigned int cpu
)
532 while (per_cpu(cpu_state
, cpu
) != CPU_DEAD
)
538 /* To shutdown a core in Loongson 3, the target core should go to CKSEG1 and
539 * flush all L1 entries at first. Then, another core (usually Core 0) can
540 * safely disable the clock of the target core. loongson3_play_dead() is
541 * called via CKSEG1 (uncached and unmmaped)
543 static void loongson3_type1_play_dead(int *state_addr
)
546 register long cpuid
, core
, node
, count
;
547 register void *addr
, *base
, *initfunc
;
549 __asm__
__volatile__(
552 " li %[addr], 0x80000000 \n" /* KSEG0 */
553 "1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
554 " cache 0, 1(%[addr]) \n"
555 " cache 0, 2(%[addr]) \n"
556 " cache 0, 3(%[addr]) \n"
557 " cache 1, 0(%[addr]) \n" /* flush L1 DCache */
558 " cache 1, 1(%[addr]) \n"
559 " cache 1, 2(%[addr]) \n"
560 " cache 1, 3(%[addr]) \n"
561 " addiu %[sets], %[sets], -1 \n"
562 " bnez %[sets], 1b \n"
563 " addiu %[addr], %[addr], 0x20 \n"
564 " li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
565 " sw %[val], (%[state_addr]) \n"
567 " cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
569 : [addr
] "=&r" (addr
), [val
] "=&r" (val
)
570 : [state_addr
] "r" (state_addr
),
571 [sets
] "r" (cpu_data
[smp_processor_id()].dcache
.sets
));
573 __asm__
__volatile__(
577 " mfc0 %[cpuid], $15, 1 \n"
578 " andi %[cpuid], 0x3ff \n"
579 " dli %[base], 0x900000003ff01000 \n"
580 " andi %[core], %[cpuid], 0x3 \n"
581 " sll %[core], 8 \n" /* get core id */
582 " or %[base], %[base], %[core] \n"
583 " andi %[node], %[cpuid], 0xc \n"
584 " dsll %[node], 42 \n" /* get node id */
585 " or %[base], %[base], %[node] \n"
586 "1: li %[count], 0x100 \n" /* wait for init loop */
587 "2: bnez %[count], 2b \n" /* limit mailbox access */
588 " addiu %[count], -1 \n"
589 " ld %[initfunc], 0x20(%[base]) \n" /* get PC via mailbox */
590 " beqz %[initfunc], 1b \n"
592 " ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
593 " ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
594 " ld $a1, 0x38(%[base]) \n"
595 " jr %[initfunc] \n" /* jump to initial PC */
598 : [core
] "=&r" (core
), [node
] "=&r" (node
),
599 [base
] "=&r" (base
), [cpuid
] "=&r" (cpuid
),
600 [count
] "=&r" (count
), [initfunc
] "=&r" (initfunc
)
605 static void loongson3_type2_play_dead(int *state_addr
)
608 register long cpuid
, core
, node
, count
;
609 register void *addr
, *base
, *initfunc
;
611 __asm__
__volatile__(
614 " li %[addr], 0x80000000 \n" /* KSEG0 */
615 "1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
616 " cache 0, 1(%[addr]) \n"
617 " cache 0, 2(%[addr]) \n"
618 " cache 0, 3(%[addr]) \n"
619 " cache 1, 0(%[addr]) \n" /* flush L1 DCache */
620 " cache 1, 1(%[addr]) \n"
621 " cache 1, 2(%[addr]) \n"
622 " cache 1, 3(%[addr]) \n"
623 " addiu %[sets], %[sets], -1 \n"
624 " bnez %[sets], 1b \n"
625 " addiu %[addr], %[addr], 0x20 \n"
626 " li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
627 " sw %[val], (%[state_addr]) \n"
629 " cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
631 : [addr
] "=&r" (addr
), [val
] "=&r" (val
)
632 : [state_addr
] "r" (state_addr
),
633 [sets
] "r" (cpu_data
[smp_processor_id()].dcache
.sets
));
635 __asm__
__volatile__(
639 " mfc0 %[cpuid], $15, 1 \n"
640 " andi %[cpuid], 0x3ff \n"
641 " dli %[base], 0x900000003ff01000 \n"
642 " andi %[core], %[cpuid], 0x3 \n"
643 " sll %[core], 8 \n" /* get core id */
644 " or %[base], %[base], %[core] \n"
645 " andi %[node], %[cpuid], 0xc \n"
646 " dsll %[node], 42 \n" /* get node id */
647 " or %[base], %[base], %[node] \n"
648 " dsrl %[node], 30 \n" /* 15:14 */
649 " or %[base], %[base], %[node] \n"
650 "1: li %[count], 0x100 \n" /* wait for init loop */
651 "2: bnez %[count], 2b \n" /* limit mailbox access */
652 " addiu %[count], -1 \n"
653 " ld %[initfunc], 0x20(%[base]) \n" /* get PC via mailbox */
654 " beqz %[initfunc], 1b \n"
656 " ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
657 " ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
658 " ld $a1, 0x38(%[base]) \n"
659 " jr %[initfunc] \n" /* jump to initial PC */
662 : [core
] "=&r" (core
), [node
] "=&r" (node
),
663 [base
] "=&r" (base
), [cpuid
] "=&r" (cpuid
),
664 [count
] "=&r" (count
), [initfunc
] "=&r" (initfunc
)
669 static void loongson3_type3_play_dead(int *state_addr
)
672 register long cpuid
, core
, node
, count
;
673 register void *addr
, *base
, *initfunc
;
675 __asm__
__volatile__(
678 " li %[addr], 0x80000000 \n" /* KSEG0 */
679 "1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
680 " cache 0, 1(%[addr]) \n"
681 " cache 0, 2(%[addr]) \n"
682 " cache 0, 3(%[addr]) \n"
683 " cache 1, 0(%[addr]) \n" /* flush L1 DCache */
684 " cache 1, 1(%[addr]) \n"
685 " cache 1, 2(%[addr]) \n"
686 " cache 1, 3(%[addr]) \n"
687 " addiu %[sets], %[sets], -1 \n"
688 " bnez %[sets], 1b \n"
689 " addiu %[addr], %[addr], 0x40 \n"
690 " li %[addr], 0x80000000 \n" /* KSEG0 */
691 "2: cache 2, 0(%[addr]) \n" /* flush L1 VCache */
692 " cache 2, 1(%[addr]) \n"
693 " cache 2, 2(%[addr]) \n"
694 " cache 2, 3(%[addr]) \n"
695 " cache 2, 4(%[addr]) \n"
696 " cache 2, 5(%[addr]) \n"
697 " cache 2, 6(%[addr]) \n"
698 " cache 2, 7(%[addr]) \n"
699 " cache 2, 8(%[addr]) \n"
700 " cache 2, 9(%[addr]) \n"
701 " cache 2, 10(%[addr]) \n"
702 " cache 2, 11(%[addr]) \n"
703 " cache 2, 12(%[addr]) \n"
704 " cache 2, 13(%[addr]) \n"
705 " cache 2, 14(%[addr]) \n"
706 " cache 2, 15(%[addr]) \n"
707 " addiu %[vsets], %[vsets], -1 \n"
708 " bnez %[vsets], 2b \n"
709 " addiu %[addr], %[addr], 0x40 \n"
710 " li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
711 " sw %[val], (%[state_addr]) \n"
713 " cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
715 : [addr
] "=&r" (addr
), [val
] "=&r" (val
)
716 : [state_addr
] "r" (state_addr
),
717 [sets
] "r" (cpu_data
[smp_processor_id()].dcache
.sets
),
718 [vsets
] "r" (cpu_data
[smp_processor_id()].vcache
.sets
));
720 __asm__
__volatile__(
724 " mfc0 %[cpuid], $15, 1 \n"
725 " andi %[cpuid], 0x3ff \n"
726 " dli %[base], 0x900000003ff01000 \n"
727 " andi %[core], %[cpuid], 0x3 \n"
728 " sll %[core], 8 \n" /* get core id */
729 " or %[base], %[base], %[core] \n"
730 " andi %[node], %[cpuid], 0xc \n"
731 " dsll %[node], 42 \n" /* get node id */
732 " or %[base], %[base], %[node] \n"
733 "1: li %[count], 0x100 \n" /* wait for init loop */
734 "2: bnez %[count], 2b \n" /* limit mailbox access */
735 " addiu %[count], -1 \n"
736 " lw %[initfunc], 0x20(%[base]) \n" /* check lower 32-bit as jump indicator */
737 " beqz %[initfunc], 1b \n"
739 " ld %[initfunc], 0x20(%[base]) \n" /* get PC (whole 64-bit) via mailbox */
740 " ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
741 " ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
742 " ld $a1, 0x38(%[base]) \n"
743 " jr %[initfunc] \n" /* jump to initial PC */
746 : [core
] "=&r" (core
), [node
] "=&r" (node
),
747 [base
] "=&r" (base
), [cpuid
] "=&r" (cpuid
),
748 [count
] "=&r" (count
), [initfunc
] "=&r" (initfunc
)
755 int prid_imp
, prid_rev
, *state_addr
;
756 unsigned int cpu
= smp_processor_id();
757 void (*play_dead_at_ckseg1
)(int *);
760 cpuhp_ap_report_dead();
762 prid_imp
= read_c0_prid() & PRID_IMP_MASK
;
763 prid_rev
= read_c0_prid() & PRID_REV_MASK
;
765 if (prid_imp
== PRID_IMP_LOONGSON_64G
) {
766 play_dead_at_ckseg1
=
767 (void *)CKSEG1ADDR((unsigned long)loongson3_type3_play_dead
);
772 case PRID_REV_LOONGSON3A_R1
:
774 play_dead_at_ckseg1
=
775 (void *)CKSEG1ADDR((unsigned long)loongson3_type1_play_dead
);
777 case PRID_REV_LOONGSON3B_R1
:
778 case PRID_REV_LOONGSON3B_R2
:
779 play_dead_at_ckseg1
=
780 (void *)CKSEG1ADDR((unsigned long)loongson3_type2_play_dead
);
782 case PRID_REV_LOONGSON3A_R2_0
:
783 case PRID_REV_LOONGSON3A_R2_1
:
784 case PRID_REV_LOONGSON3A_R3_0
:
785 case PRID_REV_LOONGSON3A_R3_1
:
786 play_dead_at_ckseg1
=
787 (void *)CKSEG1ADDR((unsigned long)loongson3_type3_play_dead
);
792 state_addr
= &per_cpu(cpu_state
, cpu
);
794 play_dead_at_ckseg1(state_addr
);
798 static int loongson3_disable_clock(unsigned int cpu
)
800 uint64_t core_id
= cpu_core(&cpu_data
[cpu
]);
801 uint64_t package_id
= cpu_data
[cpu
].package
;
803 if (!loongson_chipcfg
[package_id
] || !loongson_freqctrl
[package_id
])
806 if ((read_c0_prid() & PRID_REV_MASK
) == PRID_REV_LOONGSON3A_R1
) {
807 LOONGSON_CHIPCFG(package_id
) &= ~(1 << (12 + core_id
));
809 if (!(loongson_sysconf
.workarounds
& WORKAROUND_CPUHOTPLUG
))
810 LOONGSON_FREQCTRL(package_id
) &= ~(1 << (core_id
* 4 + 3));
815 static int loongson3_enable_clock(unsigned int cpu
)
817 uint64_t core_id
= cpu_core(&cpu_data
[cpu
]);
818 uint64_t package_id
= cpu_data
[cpu
].package
;
820 if (!loongson_chipcfg
[package_id
] || !loongson_freqctrl
[package_id
])
823 if ((read_c0_prid() & PRID_REV_MASK
) == PRID_REV_LOONGSON3A_R1
) {
824 LOONGSON_CHIPCFG(package_id
) |= 1 << (12 + core_id
);
826 if (!(loongson_sysconf
.workarounds
& WORKAROUND_CPUHOTPLUG
))
827 LOONGSON_FREQCTRL(package_id
) |= 1 << (core_id
* 4 + 3);
832 static int register_loongson3_notifier(void)
834 return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE
,
835 "mips/loongson:prepare",
836 loongson3_enable_clock
,
837 loongson3_disable_clock
);
839 early_initcall(register_loongson3_notifier
);
843 const struct plat_smp_ops loongson3_smp_ops
= {
844 .send_ipi_single
= loongson3_send_ipi_single
,
845 .send_ipi_mask
= loongson3_send_ipi_mask
,
846 .init_secondary
= loongson3_init_secondary
,
847 .smp_finish
= loongson3_smp_finish
,
848 .boot_secondary
= loongson3_boot_secondary
,
849 .smp_setup
= loongson3_smp_setup
,
850 .prepare_cpus
= loongson3_prepare_cpus
,
851 #ifdef CONFIG_HOTPLUG_CPU
852 .cpu_disable
= loongson3_cpu_disable
,
853 .cpu_die
= loongson3_cpu_die
,
855 #ifdef CONFIG_KEXEC_CORE
856 .kexec_nonboot_cpu
= kexec_nonboot_cpu_jump
,