4 * Copyright 2015 IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/types.h>
14 #include <linux/slab.h>
16 #include <linux/device.h>
17 #include <linux/cpu.h>
19 #include <asm/firmware.h>
20 #include <asm/machdep.h>
22 #include <asm/cputhreads.h>
23 #include <asm/cpuidle.h>
24 #include <asm/code-patching.h>
30 /* Power ISA 3.0 allows for stop states 0x0 - 0xF */
31 #define MAX_STOP_STATE 0xF
33 static u32 supported_cpuidle_states
;
35 static int pnv_save_sprs_for_deep_states(void)
41 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across
42 * all cpus at boot. Get these reg values of current cpu and use the
43 * same across all cpus.
45 uint64_t lpcr_val
= mfspr(SPRN_LPCR
) & ~(u64
)LPCR_PECE1
;
46 uint64_t hid0_val
= mfspr(SPRN_HID0
);
47 uint64_t hid1_val
= mfspr(SPRN_HID1
);
48 uint64_t hid4_val
= mfspr(SPRN_HID4
);
49 uint64_t hid5_val
= mfspr(SPRN_HID5
);
50 uint64_t hmeer_val
= mfspr(SPRN_HMEER
);
52 for_each_possible_cpu(cpu
) {
53 uint64_t pir
= get_hard_smp_processor_id(cpu
);
54 uint64_t hsprg0_val
= (uint64_t)&paca
[cpu
];
56 if (!cpu_has_feature(CPU_FTR_ARCH_300
)) {
58 * HSPRG0 is used to store the cpu's pointer to paca.
59 * Hence last 3 bits are guaranteed to be 0. Program
60 * slw to restore HSPRG0 with 63rd bit set, so that
61 * when a thread wakes up at 0x100 we can use this bit
62 * to distinguish between fastsleep and deep winkle.
63 * This is not necessary with stop/psscr since PLS
64 * field of psscr indicates which state we are waking
69 rc
= opal_slw_set_reg(pir
, SPRN_HSPRG0
, hsprg0_val
);
73 rc
= opal_slw_set_reg(pir
, SPRN_LPCR
, lpcr_val
);
77 /* HIDs are per core registers */
78 if (cpu_thread_in_core(cpu
) == 0) {
80 rc
= opal_slw_set_reg(pir
, SPRN_HMEER
, hmeer_val
);
84 rc
= opal_slw_set_reg(pir
, SPRN_HID0
, hid0_val
);
88 rc
= opal_slw_set_reg(pir
, SPRN_HID1
, hid1_val
);
92 rc
= opal_slw_set_reg(pir
, SPRN_HID4
, hid4_val
);
96 rc
= opal_slw_set_reg(pir
, SPRN_HID5
, hid5_val
);
105 static void pnv_alloc_idle_core_states(void)
108 int nr_cores
= cpu_nr_cores();
109 u32
*core_idle_state
;
112 * core_idle_state - First 8 bits track the idle state of each thread
113 * of the core. The 8th bit is the lock bit. Initially all thread bits
114 * are set. They are cleared when the thread enters deep idle state
115 * like sleep and winkle. Initially the lock bit is cleared.
116 * The lock bit has 2 purposes
117 * a. While the first thread is restoring core state, it prevents
118 * other threads in the core from switching to process context.
119 * b. While the last thread in the core is saving the core state, it
120 * prevents a different thread from waking up.
122 for (i
= 0; i
< nr_cores
; i
++) {
123 int first_cpu
= i
* threads_per_core
;
124 int node
= cpu_to_node(first_cpu
);
126 core_idle_state
= kmalloc_node(sizeof(u32
), GFP_KERNEL
, node
);
127 *core_idle_state
= PNV_CORE_IDLE_THREAD_BITS
;
129 for (j
= 0; j
< threads_per_core
; j
++) {
130 int cpu
= first_cpu
+ j
;
132 paca
[cpu
].core_idle_state_ptr
= core_idle_state
;
133 paca
[cpu
].thread_idle_state
= PNV_THREAD_RUNNING
;
134 paca
[cpu
].thread_mask
= 1 << j
;
138 update_subcore_sibling_mask();
140 if (supported_cpuidle_states
& OPAL_PM_LOSE_FULL_CONTEXT
)
141 pnv_save_sprs_for_deep_states();
144 u32
pnv_get_supported_cpuidle_states(void)
146 return supported_cpuidle_states
;
148 EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states
);
151 static void pnv_fastsleep_workaround_apply(void *info
)
157 rc
= opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP
,
158 OPAL_CONFIG_IDLE_APPLY
);
164 * Used to store fastsleep workaround state
165 * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
166 * 1 - Workaround applied once, never undone.
168 static u8 fastsleep_workaround_applyonce
;
170 static ssize_t
show_fastsleep_workaround_applyonce(struct device
*dev
,
171 struct device_attribute
*attr
, char *buf
)
173 return sprintf(buf
, "%u\n", fastsleep_workaround_applyonce
);
176 static ssize_t
store_fastsleep_workaround_applyonce(struct device
*dev
,
177 struct device_attribute
*attr
, const char *buf
,
180 cpumask_t primary_thread_mask
;
184 if (kstrtou8(buf
, 0, &val
) || val
!= 1)
187 if (fastsleep_workaround_applyonce
== 1)
191 * fastsleep_workaround_applyonce = 1 implies
192 * fastsleep workaround needs to be left in 'applied' state on all
193 * the cores. Do this by-
194 * 1. Patching out the call to 'undo' workaround in fastsleep exit path
195 * 2. Sending ipi to all the cores which have at least one online thread
196 * 3. Patching out the call to 'apply' workaround in fastsleep entry
198 * There is no need to send ipi to cores which have all threads
199 * offlined, as last thread of the core entering fastsleep or deeper
200 * state would have applied workaround.
202 err
= patch_instruction(
203 (unsigned int *)pnv_fastsleep_workaround_at_exit
,
206 pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_exit");
211 primary_thread_mask
= cpu_online_cores_map();
212 on_each_cpu_mask(&primary_thread_mask
,
213 pnv_fastsleep_workaround_apply
,
217 pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
221 err
= patch_instruction(
222 (unsigned int *)pnv_fastsleep_workaround_at_entry
,
225 pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_entry");
229 fastsleep_workaround_applyonce
= 1;
236 static DEVICE_ATTR(fastsleep_workaround_applyonce
, 0600,
237 show_fastsleep_workaround_applyonce
,
238 store_fastsleep_workaround_applyonce
);
242 * Used for ppc_md.power_save which needs a function with no parameters
244 static void power9_idle(void)
246 /* Requesting stop state 0 */
250 * First deep stop state. Used to figure out when to save/restore
251 * hypervisor context.
253 u64 pnv_first_deep_stop_state
= MAX_STOP_STATE
;
256 * Deepest stop idle state. Used when a cpu is offlined
258 u64 pnv_deepest_stop_state
;
261 * Power ISA 3.0 idle initialization.
263 * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
264 * Register (PSSCR) to control idle behavior.
267 * ----------------------------------------------------------
268 * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
269 * ----------------------------------------------------------
270 * 0 4 41 42 43 44 48 54 56 60
273 * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the
274 * lowest power-saving state the thread entered since stop instruction was
277 * Bit 41 - Status Disable(SD)
278 * 0 - Shows PLS entries
279 * 1 - PLS entries are all 0
281 * Bit 42 - Enable State Loss
282 * 0 - No state is lost irrespective of other fields
283 * 1 - Allows state loss
285 * Bit 43 - Exit Criterion
286 * 0 - Exit from power-save mode on any interrupt
287 * 1 - Exit from power-save mode controlled by LPCR's PECE bits
289 * Bits 44:47 - Power-Saving Level Limit
290 * This limits the power-saving level that can be entered into.
292 * Bits 60:63 - Requested Level
293 * Used to specify which power-saving level must be entered on executing
296 * @np: /ibm,opal/power-mgt device node
297 * @flags: cpu-idle-state-flags array
298 * @dt_idle_states: Number of idle state entries
299 * Returns 0 on success
301 static int __init
pnv_arch300_idle_init(struct device_node
*np
, u32
*flags
,
304 u64
*psscr_val
= NULL
;
307 psscr_val
= kcalloc(dt_idle_states
, sizeof(*psscr_val
),
313 if (of_property_read_u64_array(np
,
314 "ibm,cpu-idle-state-psscr",
315 psscr_val
, dt_idle_states
)) {
316 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-states-psscr in DT\n");
322 * Set pnv_first_deep_stop_state and pnv_deepest_stop_state.
323 * pnv_first_deep_stop_state should be set to the first stop
324 * level to cause hypervisor state loss.
325 * pnv_deepest_stop_state should be set to the deepest stop
328 pnv_first_deep_stop_state
= MAX_STOP_STATE
;
329 for (i
= 0; i
< dt_idle_states
; i
++) {
330 u64 psscr_rl
= psscr_val
[i
] & PSSCR_RL_MASK
;
332 if ((flags
[i
] & OPAL_PM_LOSE_FULL_CONTEXT
) &&
333 (pnv_first_deep_stop_state
> psscr_rl
))
334 pnv_first_deep_stop_state
= psscr_rl
;
336 if (pnv_deepest_stop_state
< psscr_rl
)
337 pnv_deepest_stop_state
= psscr_rl
;
346 * Probe device tree for supported idle states
348 static void __init
pnv_probe_idle_states(void)
350 struct device_node
*np
;
355 np
= of_find_node_by_path("/ibm,opal/power-mgt");
357 pr_warn("opal: PowerMgmt Node not found\n");
360 dt_idle_states
= of_property_count_u32_elems(np
,
361 "ibm,cpu-idle-state-flags");
362 if (dt_idle_states
< 0) {
363 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
367 flags
= kcalloc(dt_idle_states
, sizeof(*flags
), GFP_KERNEL
);
369 if (of_property_read_u32_array(np
,
370 "ibm,cpu-idle-state-flags", flags
, dt_idle_states
)) {
371 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
375 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
376 if (pnv_arch300_idle_init(np
, flags
, dt_idle_states
))
380 for (i
= 0; i
< dt_idle_states
; i
++)
381 supported_cpuidle_states
|= flags
[i
];
386 static int __init
pnv_init_idle_states(void)
389 supported_cpuidle_states
= 0;
391 if (cpuidle_disable
!= IDLE_NO_OVERRIDE
)
394 pnv_probe_idle_states();
396 if (!(supported_cpuidle_states
& OPAL_PM_SLEEP_ENABLED_ER1
)) {
398 (unsigned int *)pnv_fastsleep_workaround_at_entry
,
401 (unsigned int *)pnv_fastsleep_workaround_at_exit
,
405 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
406 * workaround is needed to use fastsleep. Provide sysfs
407 * control to choose how this workaround has to be applied.
409 device_create_file(cpu_subsys
.dev_root
,
410 &dev_attr_fastsleep_workaround_applyonce
);
413 pnv_alloc_idle_core_states();
415 if (supported_cpuidle_states
& OPAL_PM_NAP_ENABLED
)
416 ppc_md
.power_save
= power7_idle
;
417 else if (supported_cpuidle_states
& OPAL_PM_STOP_INST_FAST
)
418 ppc_md
.power_save
= power9_idle
;
423 machine_subsys_initcall(powernv
, pnv_init_idle_states
);