treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / mips / kernel / pm-cps.c
blob9bf60d7d44d3621c6e0ad3923c378b974eb9070d
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2014 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
7 #include <linux/cpuhotplug.h>
8 #include <linux/init.h>
9 #include <linux/percpu.h>
10 #include <linux/slab.h>
11 #include <linux/suspend.h>
13 #include <asm/asm-offsets.h>
14 #include <asm/cacheflush.h>
15 #include <asm/cacheops.h>
16 #include <asm/idle.h>
17 #include <asm/mips-cps.h>
18 #include <asm/mipsmtregs.h>
19 #include <asm/pm.h>
20 #include <asm/pm-cps.h>
21 #include <asm/smp-cps.h>
22 #include <asm/uasm.h>
25 * cps_nc_entry_fn - type of a generated non-coherent state entry function
26 * @online: the count of online coupled VPEs
27 * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
29 * The code entering & exiting non-coherent states is generated at runtime
30 * using uasm, in order to ensure that the compiler cannot insert a stray
31 * memory access at an unfortunate time and to allow the generation of optimal
32 * core-specific code particularly for cache routines. If coupled_coherence
33 * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
34 * returns the number of VPEs that were in the wait state at the point this
35 * VPE left it. Returns garbage if coupled_coherence is zero or this is not
36 * the entry function for CPS_PM_NC_WAIT.
38 typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
41 * The entry point of the generated non-coherent idle state entry/exit
42 * functions. Actually per-core rather than per-CPU.
44 static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
45 nc_asm_enter);
47 /* Bitmap indicating which states are supported by the system */
48 static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
51 * Indicates the number of coupled VPEs ready to operate in a non-coherent
52 * state. Actually per-core rather than per-CPU.
54 static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
56 /* Indicates online CPUs coupled with the current CPU */
57 static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
60 * Used to synchronize entry to deep idle states. Actually per-core rather
61 * than per-CPU.
63 static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
65 /* Saved CPU state across the CPS_PM_POWER_GATED state */
66 DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
68 /* A somewhat arbitrary number of labels & relocs for uasm */
69 static struct uasm_label labels[32];
70 static struct uasm_reloc relocs[32];
72 enum mips_reg {
73 zero, at, v0, v1, a0, a1, a2, a3,
74 t0, t1, t2, t3, t4, t5, t6, t7,
75 s0, s1, s2, s3, s4, s5, s6, s7,
76 t8, t9, k0, k1, gp, sp, fp, ra,
79 bool cps_pm_support_state(enum cps_pm_state state)
81 return test_bit(state, state_support);
84 static void coupled_barrier(atomic_t *a, unsigned online)
87 * This function is effectively the same as
88 * cpuidle_coupled_parallel_barrier, which can't be used here since
89 * there's no cpuidle device.
92 if (!coupled_coherence)
93 return;
95 smp_mb__before_atomic();
96 atomic_inc(a);
98 while (atomic_read(a) < online)
99 cpu_relax();
101 if (atomic_inc_return(a) == online * 2) {
102 atomic_set(a, 0);
103 return;
106 while (atomic_read(a) > online)
107 cpu_relax();
110 int cps_pm_enter_state(enum cps_pm_state state)
112 unsigned cpu = smp_processor_id();
113 unsigned core = cpu_core(&current_cpu_data);
114 unsigned online, left;
115 cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
116 u32 *core_ready_count, *nc_core_ready_count;
117 void *nc_addr;
118 cps_nc_entry_fn entry;
119 struct core_boot_config *core_cfg;
120 struct vpe_boot_config *vpe_cfg;
122 /* Check that there is an entry function for this state */
123 entry = per_cpu(nc_asm_enter, core)[state];
124 if (!entry)
125 return -EINVAL;
127 /* Calculate which coupled CPUs (VPEs) are online */
128 #if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
129 if (cpu_online(cpu)) {
130 cpumask_and(coupled_mask, cpu_online_mask,
131 &cpu_sibling_map[cpu]);
132 online = cpumask_weight(coupled_mask);
133 cpumask_clear_cpu(cpu, coupled_mask);
134 } else
135 #endif
137 cpumask_clear(coupled_mask);
138 online = 1;
141 /* Setup the VPE to run mips_cps_pm_restore when started again */
142 if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
143 /* Power gating relies upon CPS SMP */
144 if (!mips_cps_smp_in_use())
145 return -EINVAL;
147 core_cfg = &mips_cps_core_bootcfg[core];
148 vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(&current_cpu_data)];
149 vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
150 vpe_cfg->gp = (unsigned long)current_thread_info();
151 vpe_cfg->sp = 0;
154 /* Indicate that this CPU might not be coherent */
155 cpumask_clear_cpu(cpu, &cpu_coherent_mask);
156 smp_mb__after_atomic();
158 /* Create a non-coherent mapping of the core ready_count */
159 core_ready_count = per_cpu(ready_count, core);
160 nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
161 (unsigned long)core_ready_count);
162 nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
163 nc_core_ready_count = nc_addr;
165 /* Ensure ready_count is zero-initialised before the assembly runs */
166 WRITE_ONCE(*nc_core_ready_count, 0);
167 coupled_barrier(&per_cpu(pm_barrier, core), online);
169 /* Run the generated entry code */
170 left = entry(online, nc_core_ready_count);
172 /* Remove the non-coherent mapping of ready_count */
173 kunmap_noncoherent();
175 /* Indicate that this CPU is definitely coherent */
176 cpumask_set_cpu(cpu, &cpu_coherent_mask);
179 * If this VPE is the first to leave the non-coherent wait state then
180 * it needs to wake up any coupled VPEs still running their wait
181 * instruction so that they return to cpuidle, which can then complete
182 * coordination between the coupled VPEs & provide the governor with
183 * a chance to reflect on the length of time the VPEs were in the
184 * idle state.
186 if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
187 arch_send_call_function_ipi_mask(coupled_mask);
189 return 0;
192 static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
193 struct uasm_reloc **pr,
194 const struct cache_desc *cache,
195 unsigned op, int lbl)
197 unsigned cache_size = cache->ways << cache->waybit;
198 unsigned i;
199 const unsigned unroll_lines = 32;
201 /* If the cache isn't present this function has it easy */
202 if (cache->flags & MIPS_CACHE_NOT_PRESENT)
203 return;
205 /* Load base address */
206 UASM_i_LA(pp, t0, (long)CKSEG0);
208 /* Calculate end address */
209 if (cache_size < 0x8000)
210 uasm_i_addiu(pp, t1, t0, cache_size);
211 else
212 UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
214 /* Start of cache op loop */
215 uasm_build_label(pl, *pp, lbl);
217 /* Generate the cache ops */
218 for (i = 0; i < unroll_lines; i++) {
219 if (cpu_has_mips_r6) {
220 uasm_i_cache(pp, op, 0, t0);
221 uasm_i_addiu(pp, t0, t0, cache->linesz);
222 } else {
223 uasm_i_cache(pp, op, i * cache->linesz, t0);
227 if (!cpu_has_mips_r6)
228 /* Update the base address */
229 uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
231 /* Loop if we haven't reached the end address yet */
232 uasm_il_bne(pp, pr, t0, t1, lbl);
233 uasm_i_nop(pp);
236 static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
237 struct uasm_reloc **pr,
238 const struct cpuinfo_mips *cpu_info,
239 int lbl)
241 unsigned i, fsb_size = 8;
242 unsigned num_loads = (fsb_size * 3) / 2;
243 unsigned line_stride = 2;
244 unsigned line_size = cpu_info->dcache.linesz;
245 unsigned perf_counter, perf_event;
246 unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
249 * Determine whether this CPU requires an FSB flush, and if so which
250 * performance counter/event reflect stalls due to a full FSB.
252 switch (__get_cpu_type(cpu_info->cputype)) {
253 case CPU_INTERAPTIV:
254 perf_counter = 1;
255 perf_event = 51;
256 break;
258 case CPU_PROAPTIV:
259 /* Newer proAptiv cores don't require this workaround */
260 if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
261 return 0;
263 /* On older ones it's unavailable */
264 return -1;
266 default:
267 /* Assume that the CPU does not need this workaround */
268 return 0;
272 * Ensure that the fill/store buffer (FSB) is not holding the results
273 * of a prefetch, since if it is then the CPC sequencer may become
274 * stuck in the D3 (ClrBus) state whilst entering a low power state.
277 /* Preserve perf counter setup */
278 uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
279 uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
281 /* Setup perf counter to count FSB full pipeline stalls */
282 uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
283 uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
284 uasm_i_ehb(pp);
285 uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
286 uasm_i_ehb(pp);
288 /* Base address for loads */
289 UASM_i_LA(pp, t0, (long)CKSEG0);
291 /* Start of clear loop */
292 uasm_build_label(pl, *pp, lbl);
294 /* Perform some loads to fill the FSB */
295 for (i = 0; i < num_loads; i++)
296 uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
299 * Invalidate the new D-cache entries so that the cache will need
300 * refilling (via the FSB) if the loop is executed again.
302 for (i = 0; i < num_loads; i++) {
303 uasm_i_cache(pp, Hit_Invalidate_D,
304 i * line_size * line_stride, t0);
305 uasm_i_cache(pp, Hit_Writeback_Inv_SD,
306 i * line_size * line_stride, t0);
309 /* Barrier ensuring previous cache invalidates are complete */
310 uasm_i_sync(pp, __SYNC_full);
311 uasm_i_ehb(pp);
313 /* Check whether the pipeline stalled due to the FSB being full */
314 uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
316 /* Loop if it didn't */
317 uasm_il_beqz(pp, pr, t1, lbl);
318 uasm_i_nop(pp);
320 /* Restore perf counter 1. The count may well now be wrong... */
321 uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
322 uasm_i_ehb(pp);
323 uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
324 uasm_i_ehb(pp);
326 return 0;
329 static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
330 struct uasm_reloc **pr,
331 unsigned r_addr, int lbl)
333 uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
334 uasm_build_label(pl, *pp, lbl);
335 uasm_i_ll(pp, t1, 0, r_addr);
336 uasm_i_or(pp, t1, t1, t0);
337 uasm_i_sc(pp, t1, 0, r_addr);
338 uasm_il_beqz(pp, pr, t1, lbl);
339 uasm_i_nop(pp);
342 static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
344 struct uasm_label *l = labels;
345 struct uasm_reloc *r = relocs;
346 u32 *buf, *p;
347 const unsigned r_online = a0;
348 const unsigned r_nc_count = a1;
349 const unsigned r_pcohctl = t7;
350 const unsigned max_instrs = 256;
351 unsigned cpc_cmd;
352 int err;
353 enum {
354 lbl_incready = 1,
355 lbl_poll_cont,
356 lbl_secondary_hang,
357 lbl_disable_coherence,
358 lbl_flush_fsb,
359 lbl_invicache,
360 lbl_flushdcache,
361 lbl_hang,
362 lbl_set_cont,
363 lbl_secondary_cont,
364 lbl_decready,
367 /* Allocate a buffer to hold the generated code */
368 p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
369 if (!buf)
370 return NULL;
372 /* Clear labels & relocs ready for (re)use */
373 memset(labels, 0, sizeof(labels));
374 memset(relocs, 0, sizeof(relocs));
376 if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
377 /* Power gating relies upon CPS SMP */
378 if (!mips_cps_smp_in_use())
379 goto out_err;
382 * Save CPU state. Note the non-standard calling convention
383 * with the return address placed in v0 to avoid clobbering
384 * the ra register before it is saved.
386 UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
387 uasm_i_jalr(&p, v0, t0);
388 uasm_i_nop(&p);
392 * Load addresses of required CM & CPC registers. This is done early
393 * because they're needed in both the enable & disable coherence steps
394 * but in the coupled case the enable step will only run on one VPE.
396 UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
398 if (coupled_coherence) {
399 /* Increment ready_count */
400 uasm_i_sync(&p, __SYNC_mb);
401 uasm_build_label(&l, p, lbl_incready);
402 uasm_i_ll(&p, t1, 0, r_nc_count);
403 uasm_i_addiu(&p, t2, t1, 1);
404 uasm_i_sc(&p, t2, 0, r_nc_count);
405 uasm_il_beqz(&p, &r, t2, lbl_incready);
406 uasm_i_addiu(&p, t1, t1, 1);
408 /* Barrier ensuring all CPUs see the updated r_nc_count value */
409 uasm_i_sync(&p, __SYNC_mb);
412 * If this is the last VPE to become ready for non-coherence
413 * then it should branch below.
415 uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
416 uasm_i_nop(&p);
418 if (state < CPS_PM_POWER_GATED) {
420 * Otherwise this is not the last VPE to become ready
421 * for non-coherence. It needs to wait until coherence
422 * has been disabled before proceeding, which it will do
423 * by polling for the top bit of ready_count being set.
425 uasm_i_addiu(&p, t1, zero, -1);
426 uasm_build_label(&l, p, lbl_poll_cont);
427 uasm_i_lw(&p, t0, 0, r_nc_count);
428 uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
429 uasm_i_ehb(&p);
430 if (cpu_has_mipsmt)
431 uasm_i_yield(&p, zero, t1);
432 uasm_il_b(&p, &r, lbl_poll_cont);
433 uasm_i_nop(&p);
434 } else {
436 * The core will lose power & this VPE will not continue
437 * so it can simply halt here.
439 if (cpu_has_mipsmt) {
440 /* Halt the VPE via C0 tchalt register */
441 uasm_i_addiu(&p, t0, zero, TCHALT_H);
442 uasm_i_mtc0(&p, t0, 2, 4);
443 } else if (cpu_has_vp) {
444 /* Halt the VP via the CPC VP_STOP register */
445 unsigned int vpe_id;
447 vpe_id = cpu_vpe_id(&cpu_data[cpu]);
448 uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
449 UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
450 uasm_i_sw(&p, t0, 0, t1);
451 } else {
452 BUG();
454 uasm_build_label(&l, p, lbl_secondary_hang);
455 uasm_il_b(&p, &r, lbl_secondary_hang);
456 uasm_i_nop(&p);
461 * This is the point of no return - this VPE will now proceed to
462 * disable coherence. At this point we *must* be sure that no other
463 * VPE within the core will interfere with the L1 dcache.
465 uasm_build_label(&l, p, lbl_disable_coherence);
467 /* Invalidate the L1 icache */
468 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
469 Index_Invalidate_I, lbl_invicache);
471 /* Writeback & invalidate the L1 dcache */
472 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
473 Index_Writeback_Inv_D, lbl_flushdcache);
475 /* Barrier ensuring previous cache invalidates are complete */
476 uasm_i_sync(&p, __SYNC_full);
477 uasm_i_ehb(&p);
479 if (mips_cm_revision() < CM_REV_CM3) {
481 * Disable all but self interventions. The load from COHCTL is
482 * defined by the interAptiv & proAptiv SUMs as ensuring that the
483 * operation resulting from the preceding store is complete.
485 uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
486 uasm_i_sw(&p, t0, 0, r_pcohctl);
487 uasm_i_lw(&p, t0, 0, r_pcohctl);
489 /* Barrier to ensure write to coherence control is complete */
490 uasm_i_sync(&p, __SYNC_full);
491 uasm_i_ehb(&p);
494 /* Disable coherence */
495 uasm_i_sw(&p, zero, 0, r_pcohctl);
496 uasm_i_lw(&p, t0, 0, r_pcohctl);
498 if (state >= CPS_PM_CLOCK_GATED) {
499 err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
500 lbl_flush_fsb);
501 if (err)
502 goto out_err;
504 /* Determine the CPC command to issue */
505 switch (state) {
506 case CPS_PM_CLOCK_GATED:
507 cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
508 break;
509 case CPS_PM_POWER_GATED:
510 cpc_cmd = CPC_Cx_CMD_PWRDOWN;
511 break;
512 default:
513 BUG();
514 goto out_err;
517 /* Issue the CPC command */
518 UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
519 uasm_i_addiu(&p, t1, zero, cpc_cmd);
520 uasm_i_sw(&p, t1, 0, t0);
522 if (state == CPS_PM_POWER_GATED) {
523 /* If anything goes wrong just hang */
524 uasm_build_label(&l, p, lbl_hang);
525 uasm_il_b(&p, &r, lbl_hang);
526 uasm_i_nop(&p);
529 * There's no point generating more code, the core is
530 * powered down & if powered back up will run from the
531 * reset vector not from here.
533 goto gen_done;
536 /* Barrier to ensure write to CPC command is complete */
537 uasm_i_sync(&p, __SYNC_full);
538 uasm_i_ehb(&p);
541 if (state == CPS_PM_NC_WAIT) {
543 * At this point it is safe for all VPEs to proceed with
544 * execution. This VPE will set the top bit of ready_count
545 * to indicate to the other VPEs that they may continue.
547 if (coupled_coherence)
548 cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
549 lbl_set_cont);
552 * VPEs which did not disable coherence will continue
553 * executing, after coherence has been disabled, from this
554 * point.
556 uasm_build_label(&l, p, lbl_secondary_cont);
558 /* Now perform our wait */
559 uasm_i_wait(&p, 0);
563 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
564 * will run this. The first will actually re-enable coherence & the
565 * rest will just be performing a rather unusual nop.
567 uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
568 ? CM_GCR_Cx_COHERENCE_COHDOMAINEN
569 : CM3_GCR_Cx_COHERENCE_COHEN);
571 uasm_i_sw(&p, t0, 0, r_pcohctl);
572 uasm_i_lw(&p, t0, 0, r_pcohctl);
574 /* Barrier to ensure write to coherence control is complete */
575 uasm_i_sync(&p, __SYNC_full);
576 uasm_i_ehb(&p);
578 if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
579 /* Decrement ready_count */
580 uasm_build_label(&l, p, lbl_decready);
581 uasm_i_sync(&p, __SYNC_mb);
582 uasm_i_ll(&p, t1, 0, r_nc_count);
583 uasm_i_addiu(&p, t2, t1, -1);
584 uasm_i_sc(&p, t2, 0, r_nc_count);
585 uasm_il_beqz(&p, &r, t2, lbl_decready);
586 uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
588 /* Barrier ensuring all CPUs see the updated r_nc_count value */
589 uasm_i_sync(&p, __SYNC_mb);
592 if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
594 * At this point it is safe for all VPEs to proceed with
595 * execution. This VPE will set the top bit of ready_count
596 * to indicate to the other VPEs that they may continue.
598 cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
601 * This core will be reliant upon another core sending a
602 * power-up command to the CPC in order to resume operation.
603 * Thus an arbitrary VPE can't trigger the core leaving the
604 * idle state and the one that disables coherence might as well
605 * be the one to re-enable it. The rest will continue from here
606 * after that has been done.
608 uasm_build_label(&l, p, lbl_secondary_cont);
610 /* Barrier ensuring all CPUs see the updated r_nc_count value */
611 uasm_i_sync(&p, __SYNC_mb);
614 /* The core is coherent, time to return to C code */
615 uasm_i_jr(&p, ra);
616 uasm_i_nop(&p);
618 gen_done:
619 /* Ensure the code didn't exceed the resources allocated for it */
620 BUG_ON((p - buf) > max_instrs);
621 BUG_ON((l - labels) > ARRAY_SIZE(labels));
622 BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
624 /* Patch branch offsets */
625 uasm_resolve_relocs(relocs, labels);
627 /* Flush the icache */
628 local_flush_icache_range((unsigned long)buf, (unsigned long)p);
630 return buf;
631 out_err:
632 kfree(buf);
633 return NULL;
636 static int cps_pm_online_cpu(unsigned int cpu)
638 enum cps_pm_state state;
639 unsigned core = cpu_core(&cpu_data[cpu]);
640 void *entry_fn, *core_rc;
642 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
643 if (per_cpu(nc_asm_enter, core)[state])
644 continue;
645 if (!test_bit(state, state_support))
646 continue;
648 entry_fn = cps_gen_entry_code(cpu, state);
649 if (!entry_fn) {
650 pr_err("Failed to generate core %u state %u entry\n",
651 core, state);
652 clear_bit(state, state_support);
655 per_cpu(nc_asm_enter, core)[state] = entry_fn;
658 if (!per_cpu(ready_count, core)) {
659 core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
660 if (!core_rc) {
661 pr_err("Failed allocate core %u ready_count\n", core);
662 return -ENOMEM;
664 per_cpu(ready_count, core) = core_rc;
667 return 0;
670 static int cps_pm_power_notifier(struct notifier_block *this,
671 unsigned long event, void *ptr)
673 unsigned int stat;
675 switch (event) {
676 case PM_SUSPEND_PREPARE:
677 stat = read_cpc_cl_stat_conf();
679 * If we're attempting to suspend the system and power down all
680 * of the cores, the JTAG detect bit indicates that the CPC will
681 * instead put the cores into clock-off state. In this state
682 * a connected debugger can cause the CPU to attempt
683 * interactions with the powered down system. At best this will
684 * fail. At worst, it can hang the NoC, requiring a hard reset.
685 * To avoid this, just block system suspend if a JTAG probe
686 * is detected.
688 if (stat & CPC_Cx_STAT_CONF_EJTAG_PROBE) {
689 pr_warn("JTAG probe is connected - abort suspend\n");
690 return NOTIFY_BAD;
692 return NOTIFY_DONE;
693 default:
694 return NOTIFY_DONE;
698 static int __init cps_pm_init(void)
700 /* A CM is required for all non-coherent states */
701 if (!mips_cm_present()) {
702 pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
703 return 0;
707 * If interrupts were enabled whilst running a wait instruction on a
708 * non-coherent core then the VPE may end up processing interrupts
709 * whilst non-coherent. That would be bad.
711 if (cpu_wait == r4k_wait_irqoff)
712 set_bit(CPS_PM_NC_WAIT, state_support);
713 else
714 pr_warn("pm-cps: non-coherent wait unavailable\n");
716 /* Detect whether a CPC is present */
717 if (mips_cpc_present()) {
718 /* Detect whether clock gating is implemented */
719 if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL)
720 set_bit(CPS_PM_CLOCK_GATED, state_support);
721 else
722 pr_warn("pm-cps: CPC does not support clock gating\n");
724 /* Power gating is available with CPS SMP & any CPC */
725 if (mips_cps_smp_in_use())
726 set_bit(CPS_PM_POWER_GATED, state_support);
727 else
728 pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
729 } else {
730 pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
733 pm_notifier(cps_pm_power_notifier, 0);
735 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online",
736 cps_pm_online_cpu, NULL);
738 arch_initcall(cps_pm_init);