2 * Copyright (C) 2014 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/init.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/cacheflush.h>
17 #include <asm/cacheops.h>
19 #include <asm/mips-cm.h>
20 #include <asm/mips-cpc.h>
21 #include <asm/mipsmtregs.h>
23 #include <asm/pm-cps.h>
24 #include <asm/smp-cps.h>
28 * cps_nc_entry_fn - type of a generated non-coherent state entry function
29 * @online: the count of online coupled VPEs
30 * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
32 * The code entering & exiting non-coherent states is generated at runtime
33 * using uasm, in order to ensure that the compiler cannot insert a stray
34 * memory access at an unfortunate time and to allow the generation of optimal
35 * core-specific code particularly for cache routines. If coupled_coherence
36 * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
37 * returns the number of VPEs that were in the wait state at the point this
38 * VPE left it. Returns garbage if coupled_coherence is zero or this is not
39 * the entry function for CPS_PM_NC_WAIT.
41 typedef unsigned (*cps_nc_entry_fn
)(unsigned online
, u32
*nc_ready_count
);
44 * The entry point of the generated non-coherent idle state entry/exit
45 * functions. Actually per-core rather than per-CPU.
47 static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn
[CPS_PM_STATE_COUNT
],
50 /* Bitmap indicating which states are supported by the system */
51 DECLARE_BITMAP(state_support
, CPS_PM_STATE_COUNT
);
54 * Indicates the number of coupled VPEs ready to operate in a non-coherent
55 * state. Actually per-core rather than per-CPU.
57 static DEFINE_PER_CPU_ALIGNED(u32
*, ready_count
);
58 static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc
);
60 /* Indicates online CPUs coupled with the current CPU */
61 static DEFINE_PER_CPU_ALIGNED(cpumask_t
, online_coupled
);
64 * Used to synchronize entry to deep idle states. Actually per-core rather
67 static DEFINE_PER_CPU_ALIGNED(atomic_t
, pm_barrier
);
69 /* Saved CPU state across the CPS_PM_POWER_GATED state */
70 DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state
, cps_cpu_state
);
72 /* A somewhat arbitrary number of labels & relocs for uasm */
73 static struct uasm_label labels
[32] __initdata
;
74 static struct uasm_reloc relocs
[32] __initdata
;
76 /* CPU dependant sync types */
77 static unsigned stype_intervention
;
78 static unsigned stype_memory
;
79 static unsigned stype_ordering
;
82 zero
, at
, v0
, v1
, a0
, a1
, a2
, a3
,
83 t0
, t1
, t2
, t3
, t4
, t5
, t6
, t7
,
84 s0
, s1
, s2
, s3
, s4
, s5
, s6
, s7
,
85 t8
, t9
, k0
, k1
, gp
, sp
, fp
, ra
,
88 bool cps_pm_support_state(enum cps_pm_state state
)
90 return test_bit(state
, state_support
);
93 static void coupled_barrier(atomic_t
*a
, unsigned online
)
96 * This function is effectively the same as
97 * cpuidle_coupled_parallel_barrier, which can't be used here since
98 * there's no cpuidle device.
101 if (!coupled_coherence
)
104 smp_mb__before_atomic();
107 while (atomic_read(a
) < online
)
110 if (atomic_inc_return(a
) == online
* 2) {
115 while (atomic_read(a
) > online
)
119 int cps_pm_enter_state(enum cps_pm_state state
)
121 unsigned cpu
= smp_processor_id();
122 unsigned core
= current_cpu_data
.core
;
123 unsigned online
, left
;
124 cpumask_t
*coupled_mask
= this_cpu_ptr(&online_coupled
);
125 u32
*core_ready_count
, *nc_core_ready_count
;
127 cps_nc_entry_fn entry
;
128 struct core_boot_config
*core_cfg
;
129 struct vpe_boot_config
*vpe_cfg
;
131 /* Check that there is an entry function for this state */
132 entry
= per_cpu(nc_asm_enter
, core
)[state
];
136 /* Calculate which coupled CPUs (VPEs) are online */
137 #ifdef CONFIG_MIPS_MT
138 if (cpu_online(cpu
)) {
139 cpumask_and(coupled_mask
, cpu_online_mask
,
140 &cpu_sibling_map
[cpu
]);
141 online
= cpumask_weight(coupled_mask
);
142 cpumask_clear_cpu(cpu
, coupled_mask
);
146 cpumask_clear(coupled_mask
);
150 /* Setup the VPE to run mips_cps_pm_restore when started again */
151 if (IS_ENABLED(CONFIG_CPU_PM
) && state
== CPS_PM_POWER_GATED
) {
152 /* Power gating relies upon CPS SMP */
153 if (!mips_cps_smp_in_use())
156 core_cfg
= &mips_cps_core_bootcfg
[core
];
157 vpe_cfg
= &core_cfg
->vpe_config
[cpu_vpe_id(¤t_cpu_data
)];
158 vpe_cfg
->pc
= (unsigned long)mips_cps_pm_restore
;
159 vpe_cfg
->gp
= (unsigned long)current_thread_info();
163 /* Indicate that this CPU might not be coherent */
164 cpumask_clear_cpu(cpu
, &cpu_coherent_mask
);
165 smp_mb__after_atomic();
167 /* Create a non-coherent mapping of the core ready_count */
168 core_ready_count
= per_cpu(ready_count
, core
);
169 nc_addr
= kmap_noncoherent(virt_to_page(core_ready_count
),
170 (unsigned long)core_ready_count
);
171 nc_addr
+= ((unsigned long)core_ready_count
& ~PAGE_MASK
);
172 nc_core_ready_count
= nc_addr
;
174 /* Ensure ready_count is zero-initialised before the assembly runs */
175 ACCESS_ONCE(*nc_core_ready_count
) = 0;
176 coupled_barrier(&per_cpu(pm_barrier
, core
), online
);
178 /* Run the generated entry code */
179 left
= entry(online
, nc_core_ready_count
);
181 /* Remove the non-coherent mapping of ready_count */
182 kunmap_noncoherent();
184 /* Indicate that this CPU is definitely coherent */
185 cpumask_set_cpu(cpu
, &cpu_coherent_mask
);
188 * If this VPE is the first to leave the non-coherent wait state then
189 * it needs to wake up any coupled VPEs still running their wait
190 * instruction so that they return to cpuidle, which can then complete
191 * coordination between the coupled VPEs & provide the governor with
192 * a chance to reflect on the length of time the VPEs were in the
195 if (coupled_coherence
&& (state
== CPS_PM_NC_WAIT
) && (left
== online
))
196 arch_send_call_function_ipi_mask(coupled_mask
);
201 static void __init
cps_gen_cache_routine(u32
**pp
, struct uasm_label
**pl
,
202 struct uasm_reloc
**pr
,
203 const struct cache_desc
*cache
,
204 unsigned op
, int lbl
)
206 unsigned cache_size
= cache
->ways
<< cache
->waybit
;
208 const unsigned unroll_lines
= 32;
210 /* If the cache isn't present this function has it easy */
211 if (cache
->flags
& MIPS_CACHE_NOT_PRESENT
)
214 /* Load base address */
215 UASM_i_LA(pp
, t0
, (long)CKSEG0
);
217 /* Calculate end address */
218 if (cache_size
< 0x8000)
219 uasm_i_addiu(pp
, t1
, t0
, cache_size
);
221 UASM_i_LA(pp
, t1
, (long)(CKSEG0
+ cache_size
));
223 /* Start of cache op loop */
224 uasm_build_label(pl
, *pp
, lbl
);
226 /* Generate the cache ops */
227 for (i
= 0; i
< unroll_lines
; i
++) {
228 if (cpu_has_mips_r6
) {
229 uasm_i_cache(pp
, op
, 0, t0
);
230 uasm_i_addiu(pp
, t0
, t0
, cache
->linesz
);
232 uasm_i_cache(pp
, op
, i
* cache
->linesz
, t0
);
236 if (!cpu_has_mips_r6
)
237 /* Update the base address */
238 uasm_i_addiu(pp
, t0
, t0
, unroll_lines
* cache
->linesz
);
240 /* Loop if we haven't reached the end address yet */
241 uasm_il_bne(pp
, pr
, t0
, t1
, lbl
);
245 static int __init
cps_gen_flush_fsb(u32
**pp
, struct uasm_label
**pl
,
246 struct uasm_reloc
**pr
,
247 const struct cpuinfo_mips
*cpu_info
,
250 unsigned i
, fsb_size
= 8;
251 unsigned num_loads
= (fsb_size
* 3) / 2;
252 unsigned line_stride
= 2;
253 unsigned line_size
= cpu_info
->dcache
.linesz
;
254 unsigned perf_counter
, perf_event
;
255 unsigned revision
= cpu_info
->processor_id
& PRID_REV_MASK
;
258 * Determine whether this CPU requires an FSB flush, and if so which
259 * performance counter/event reflect stalls due to a full FSB.
261 switch (__get_cpu_type(cpu_info
->cputype
)) {
268 /* Newer proAptiv cores don't require this workaround */
269 if (revision
>= PRID_REV_ENCODE_332(1, 1, 0))
272 /* On older ones it's unavailable */
275 /* CPUs which do not require the workaround */
281 WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
286 * Ensure that the fill/store buffer (FSB) is not holding the results
287 * of a prefetch, since if it is then the CPC sequencer may become
288 * stuck in the D3 (ClrBus) state whilst entering a low power state.
291 /* Preserve perf counter setup */
292 uasm_i_mfc0(pp
, t2
, 25, (perf_counter
* 2) + 0); /* PerfCtlN */
293 uasm_i_mfc0(pp
, t3
, 25, (perf_counter
* 2) + 1); /* PerfCntN */
295 /* Setup perf counter to count FSB full pipeline stalls */
296 uasm_i_addiu(pp
, t0
, zero
, (perf_event
<< 5) | 0xf);
297 uasm_i_mtc0(pp
, t0
, 25, (perf_counter
* 2) + 0); /* PerfCtlN */
299 uasm_i_mtc0(pp
, zero
, 25, (perf_counter
* 2) + 1); /* PerfCntN */
302 /* Base address for loads */
303 UASM_i_LA(pp
, t0
, (long)CKSEG0
);
305 /* Start of clear loop */
306 uasm_build_label(pl
, *pp
, lbl
);
308 /* Perform some loads to fill the FSB */
309 for (i
= 0; i
< num_loads
; i
++)
310 uasm_i_lw(pp
, zero
, i
* line_size
* line_stride
, t0
);
313 * Invalidate the new D-cache entries so that the cache will need
314 * refilling (via the FSB) if the loop is executed again.
316 for (i
= 0; i
< num_loads
; i
++) {
317 uasm_i_cache(pp
, Hit_Invalidate_D
,
318 i
* line_size
* line_stride
, t0
);
319 uasm_i_cache(pp
, Hit_Writeback_Inv_SD
,
320 i
* line_size
* line_stride
, t0
);
323 /* Completion barrier */
324 uasm_i_sync(pp
, stype_memory
);
327 /* Check whether the pipeline stalled due to the FSB being full */
328 uasm_i_mfc0(pp
, t1
, 25, (perf_counter
* 2) + 1); /* PerfCntN */
330 /* Loop if it didn't */
331 uasm_il_beqz(pp
, pr
, t1
, lbl
);
334 /* Restore perf counter 1. The count may well now be wrong... */
335 uasm_i_mtc0(pp
, t2
, 25, (perf_counter
* 2) + 0); /* PerfCtlN */
337 uasm_i_mtc0(pp
, t3
, 25, (perf_counter
* 2) + 1); /* PerfCntN */
343 static void __init
cps_gen_set_top_bit(u32
**pp
, struct uasm_label
**pl
,
344 struct uasm_reloc
**pr
,
345 unsigned r_addr
, int lbl
)
347 uasm_i_lui(pp
, t0
, uasm_rel_hi(0x80000000));
348 uasm_build_label(pl
, *pp
, lbl
);
349 uasm_i_ll(pp
, t1
, 0, r_addr
);
350 uasm_i_or(pp
, t1
, t1
, t0
);
351 uasm_i_sc(pp
, t1
, 0, r_addr
);
352 uasm_il_beqz(pp
, pr
, t1
, lbl
);
356 static void * __init
cps_gen_entry_code(unsigned cpu
, enum cps_pm_state state
)
358 struct uasm_label
*l
= labels
;
359 struct uasm_reloc
*r
= relocs
;
361 const unsigned r_online
= a0
;
362 const unsigned r_nc_count
= a1
;
363 const unsigned r_pcohctl
= t7
;
364 const unsigned max_instrs
= 256;
371 lbl_disable_coherence
,
381 /* Allocate a buffer to hold the generated code */
382 p
= buf
= kcalloc(max_instrs
, sizeof(u32
), GFP_KERNEL
);
386 /* Clear labels & relocs ready for (re)use */
387 memset(labels
, 0, sizeof(labels
));
388 memset(relocs
, 0, sizeof(relocs
));
390 if (IS_ENABLED(CONFIG_CPU_PM
) && state
== CPS_PM_POWER_GATED
) {
391 /* Power gating relies upon CPS SMP */
392 if (!mips_cps_smp_in_use())
396 * Save CPU state. Note the non-standard calling convention
397 * with the return address placed in v0 to avoid clobbering
398 * the ra register before it is saved.
400 UASM_i_LA(&p
, t0
, (long)mips_cps_pm_save
);
401 uasm_i_jalr(&p
, v0
, t0
);
406 * Load addresses of required CM & CPC registers. This is done early
407 * because they're needed in both the enable & disable coherence steps
408 * but in the coupled case the enable step will only run on one VPE.
410 UASM_i_LA(&p
, r_pcohctl
, (long)addr_gcr_cl_coherence());
412 if (coupled_coherence
) {
413 /* Increment ready_count */
414 uasm_i_sync(&p
, stype_ordering
);
415 uasm_build_label(&l
, p
, lbl_incready
);
416 uasm_i_ll(&p
, t1
, 0, r_nc_count
);
417 uasm_i_addiu(&p
, t2
, t1
, 1);
418 uasm_i_sc(&p
, t2
, 0, r_nc_count
);
419 uasm_il_beqz(&p
, &r
, t2
, lbl_incready
);
420 uasm_i_addiu(&p
, t1
, t1
, 1);
422 /* Ordering barrier */
423 uasm_i_sync(&p
, stype_ordering
);
426 * If this is the last VPE to become ready for non-coherence
427 * then it should branch below.
429 uasm_il_beq(&p
, &r
, t1
, r_online
, lbl_disable_coherence
);
432 if (state
< CPS_PM_POWER_GATED
) {
434 * Otherwise this is not the last VPE to become ready
435 * for non-coherence. It needs to wait until coherence
436 * has been disabled before proceeding, which it will do
437 * by polling for the top bit of ready_count being set.
439 uasm_i_addiu(&p
, t1
, zero
, -1);
440 uasm_build_label(&l
, p
, lbl_poll_cont
);
441 uasm_i_lw(&p
, t0
, 0, r_nc_count
);
442 uasm_il_bltz(&p
, &r
, t0
, lbl_secondary_cont
);
444 uasm_i_yield(&p
, zero
, t1
);
445 uasm_il_b(&p
, &r
, lbl_poll_cont
);
449 * The core will lose power & this VPE will not continue
450 * so it can simply halt here.
452 uasm_i_addiu(&p
, t0
, zero
, TCHALT_H
);
453 uasm_i_mtc0(&p
, t0
, 2, 4);
454 uasm_build_label(&l
, p
, lbl_secondary_hang
);
455 uasm_il_b(&p
, &r
, lbl_secondary_hang
);
461 * This is the point of no return - this VPE will now proceed to
462 * disable coherence. At this point we *must* be sure that no other
463 * VPE within the core will interfere with the L1 dcache.
465 uasm_build_label(&l
, p
, lbl_disable_coherence
);
467 /* Invalidate the L1 icache */
468 cps_gen_cache_routine(&p
, &l
, &r
, &cpu_data
[cpu
].icache
,
469 Index_Invalidate_I
, lbl_invicache
);
471 /* Writeback & invalidate the L1 dcache */
472 cps_gen_cache_routine(&p
, &l
, &r
, &cpu_data
[cpu
].dcache
,
473 Index_Writeback_Inv_D
, lbl_flushdcache
);
475 /* Completion barrier */
476 uasm_i_sync(&p
, stype_memory
);
480 * Disable all but self interventions. The load from COHCTL is defined
481 * by the interAptiv & proAptiv SUMs as ensuring that the operation
482 * resulting from the preceding store is complete.
484 uasm_i_addiu(&p
, t0
, zero
, 1 << cpu_data
[cpu
].core
);
485 uasm_i_sw(&p
, t0
, 0, r_pcohctl
);
486 uasm_i_lw(&p
, t0
, 0, r_pcohctl
);
488 /* Sync to ensure previous interventions are complete */
489 uasm_i_sync(&p
, stype_intervention
);
492 /* Disable coherence */
493 uasm_i_sw(&p
, zero
, 0, r_pcohctl
);
494 uasm_i_lw(&p
, t0
, 0, r_pcohctl
);
496 if (state
>= CPS_PM_CLOCK_GATED
) {
497 err
= cps_gen_flush_fsb(&p
, &l
, &r
, &cpu_data
[cpu
],
502 /* Determine the CPC command to issue */
504 case CPS_PM_CLOCK_GATED
:
505 cpc_cmd
= CPC_Cx_CMD_CLOCKOFF
;
507 case CPS_PM_POWER_GATED
:
508 cpc_cmd
= CPC_Cx_CMD_PWRDOWN
;
515 /* Issue the CPC command */
516 UASM_i_LA(&p
, t0
, (long)addr_cpc_cl_cmd());
517 uasm_i_addiu(&p
, t1
, zero
, cpc_cmd
);
518 uasm_i_sw(&p
, t1
, 0, t0
);
520 if (state
== CPS_PM_POWER_GATED
) {
521 /* If anything goes wrong just hang */
522 uasm_build_label(&l
, p
, lbl_hang
);
523 uasm_il_b(&p
, &r
, lbl_hang
);
527 * There's no point generating more code, the core is
528 * powered down & if powered back up will run from the
529 * reset vector not from here.
534 /* Completion barrier */
535 uasm_i_sync(&p
, stype_memory
);
539 if (state
== CPS_PM_NC_WAIT
) {
541 * At this point it is safe for all VPEs to proceed with
542 * execution. This VPE will set the top bit of ready_count
543 * to indicate to the other VPEs that they may continue.
545 if (coupled_coherence
)
546 cps_gen_set_top_bit(&p
, &l
, &r
, r_nc_count
,
550 * VPEs which did not disable coherence will continue
551 * executing, after coherence has been disabled, from this
554 uasm_build_label(&l
, p
, lbl_secondary_cont
);
556 /* Now perform our wait */
561 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
562 * will run this. The first will actually re-enable coherence & the
563 * rest will just be performing a rather unusual nop.
565 uasm_i_addiu(&p
, t0
, zero
, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK
);
566 uasm_i_sw(&p
, t0
, 0, r_pcohctl
);
567 uasm_i_lw(&p
, t0
, 0, r_pcohctl
);
569 /* Completion barrier */
570 uasm_i_sync(&p
, stype_memory
);
573 if (coupled_coherence
&& (state
== CPS_PM_NC_WAIT
)) {
574 /* Decrement ready_count */
575 uasm_build_label(&l
, p
, lbl_decready
);
576 uasm_i_sync(&p
, stype_ordering
);
577 uasm_i_ll(&p
, t1
, 0, r_nc_count
);
578 uasm_i_addiu(&p
, t2
, t1
, -1);
579 uasm_i_sc(&p
, t2
, 0, r_nc_count
);
580 uasm_il_beqz(&p
, &r
, t2
, lbl_decready
);
581 uasm_i_andi(&p
, v0
, t1
, (1 << fls(smp_num_siblings
)) - 1);
583 /* Ordering barrier */
584 uasm_i_sync(&p
, stype_ordering
);
587 if (coupled_coherence
&& (state
== CPS_PM_CLOCK_GATED
)) {
589 * At this point it is safe for all VPEs to proceed with
590 * execution. This VPE will set the top bit of ready_count
591 * to indicate to the other VPEs that they may continue.
593 cps_gen_set_top_bit(&p
, &l
, &r
, r_nc_count
, lbl_set_cont
);
596 * This core will be reliant upon another core sending a
597 * power-up command to the CPC in order to resume operation.
598 * Thus an arbitrary VPE can't trigger the core leaving the
599 * idle state and the one that disables coherence might as well
600 * be the one to re-enable it. The rest will continue from here
601 * after that has been done.
603 uasm_build_label(&l
, p
, lbl_secondary_cont
);
605 /* Ordering barrier */
606 uasm_i_sync(&p
, stype_ordering
);
609 /* The core is coherent, time to return to C code */
614 /* Ensure the code didn't exceed the resources allocated for it */
615 BUG_ON((p
- buf
) > max_instrs
);
616 BUG_ON((l
- labels
) > ARRAY_SIZE(labels
));
617 BUG_ON((r
- relocs
) > ARRAY_SIZE(relocs
));
619 /* Patch branch offsets */
620 uasm_resolve_relocs(relocs
, labels
);
622 /* Flush the icache */
623 local_flush_icache_range((unsigned long)buf
, (unsigned long)p
);
631 static int __init
cps_gen_core_entries(unsigned cpu
)
633 enum cps_pm_state state
;
634 unsigned core
= cpu_data
[cpu
].core
;
635 unsigned dlinesz
= cpu_data
[cpu
].dcache
.linesz
;
636 void *entry_fn
, *core_rc
;
638 for (state
= CPS_PM_NC_WAIT
; state
< CPS_PM_STATE_COUNT
; state
++) {
639 if (per_cpu(nc_asm_enter
, core
)[state
])
641 if (!test_bit(state
, state_support
))
644 entry_fn
= cps_gen_entry_code(cpu
, state
);
646 pr_err("Failed to generate core %u state %u entry\n",
648 clear_bit(state
, state_support
);
651 per_cpu(nc_asm_enter
, core
)[state
] = entry_fn
;
654 if (!per_cpu(ready_count
, core
)) {
655 core_rc
= kmalloc(dlinesz
* 2, GFP_KERNEL
);
657 pr_err("Failed allocate core %u ready_count\n", core
);
660 per_cpu(ready_count_alloc
, core
) = core_rc
;
662 /* Ensure ready_count is aligned to a cacheline boundary */
663 core_rc
+= dlinesz
- 1;
664 core_rc
= (void *)((unsigned long)core_rc
& ~(dlinesz
- 1));
665 per_cpu(ready_count
, core
) = core_rc
;
671 static int __init
cps_pm_init(void)
676 /* Detect appropriate sync types for the system */
677 switch (current_cpu_data
.cputype
) {
683 stype_intervention
= 0x2;
685 stype_ordering
= 0x10;
689 pr_warn("Power management is using heavyweight sync 0\n");
692 /* A CM is required for all non-coherent states */
693 if (!mips_cm_present()) {
694 pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
699 * If interrupts were enabled whilst running a wait instruction on a
700 * non-coherent core then the VPE may end up processing interrupts
701 * whilst non-coherent. That would be bad.
703 if (cpu_wait
== r4k_wait_irqoff
)
704 set_bit(CPS_PM_NC_WAIT
, state_support
);
706 pr_warn("pm-cps: non-coherent wait unavailable\n");
708 /* Detect whether a CPC is present */
709 if (mips_cpc_present()) {
710 /* Detect whether clock gating is implemented */
711 if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK
)
712 set_bit(CPS_PM_CLOCK_GATED
, state_support
);
714 pr_warn("pm-cps: CPC does not support clock gating\n");
716 /* Power gating is available with CPS SMP & any CPC */
717 if (mips_cps_smp_in_use())
718 set_bit(CPS_PM_POWER_GATED
, state_support
);
720 pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
722 pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
725 for_each_present_cpu(cpu
) {
726 err
= cps_gen_core_entries(cpu
);
733 arch_initcall(cps_pm_init
);