1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * This file contains the power_save function for 6xx & 7xxx CPUs
4 * rewritten in assembler
6 * Warning ! This code assumes that if your machine has a 750fx
7 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
8 * if this is not the case some additional changes will have to
9 * be done to check a runtime var (a bit like powersave-nap)
12 #include <linux/threads.h>
15 #include <asm/cputable.h>
16 #include <asm/thread_info.h>
17 #include <asm/ppc_asm.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/feature-fixups.h>
24 * Init idle, called at early CPU setup time from head.S for each CPU
25 * Make sure no rest of NAP mode remains in HID0, save default
26 * values for some CPU specific registers. Called with r24
27 * containing CPU number and r3 reloc offset
29 _GLOBAL(init_idle_6xx)
32 rlwinm r4,r4,0,10,8 /* Clear NAP */
35 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
42 addis r6,r5, nap_save_msscr0@ha
43 stw r4,nap_save_msscr0@l(r6)
44 END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
47 addis r6,r5,nap_save_hid1@ha
48 stw r4,nap_save_hid1@l(r6)
49 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
53 * Here is the power_save_6xx function. This could eventually be
54 * split into several functions & changing the function pointer
55 * depending on the various features.
58 /* Check if we can nap or doze, put HID0 mask in r3
63 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
65 /* We must dynamically check for the NAP feature as it
66 * can be cleared by CPU init after the fixups are done
68 lis r4,cur_cpu_spec@ha
69 lwz r4,cur_cpu_spec@l(r4)
70 lwz r4,CPU_SPEC_FEATURES(r4)
71 andi. r0,r4,CPU_FTR_CAN_NAP
73 /* Now check if user or arch enabled NAP mode */
74 lis r4,powersave_nap@ha
75 lwz r4,powersave_nap@l(r4)
80 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
84 /* Some pre-nap cleanups needed on some CPUs */
85 andis. r0,r3,HID0_NAP@h
88 /* Disable L2 prefetch on some 745x and try to ensure
89 * L2 prefetch engines are idle. As explained by errata
90 * text, we can't be sure they are, we just hope very hard
91 * that well be enough (sic !). At least I noticed Apple
92 * doesn't even bother doing the dcbf's here...
105 END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
108 /* Go to low speed mode on some 750FX */
109 lis r4,powersave_lowspeed@ha
110 lwz r4,powersave_lowspeed@l(r4)
117 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
119 /* Go to NAP or DOZE now */
121 lis r5,(HID0_NAP|HID0_SLEEP)@h
123 oris r5,r5,HID0_DOZE@h
124 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
128 oris r4,r4,HID0_DPM@h /* that should be done once for all */
129 END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
134 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
135 lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */
136 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
137 stw r8,TI_LOCAL_FLAGS(r2) /* it will return to our caller */
147 * Return from NAP/DOZE mode, restore some CPU specific registers,
148 * we are called with DR/IR still off and r2 containing physical
149 * address of current. R11 points to the exception frame (physical
150 * address). We have to preserve r10.
152 _GLOBAL(power_save_ppc32_restore)
153 lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
154 stw r9,_NIP(r11) /* make it do a blr */
157 lwz r11,TASK_CPU(r2) /* get cpu number * 4 */
162 /* Todo make sure all these are in the same page
163 * and load r11 (@ha part + CPU offset) only once
167 andis. r9,r9,HID0_NAP@h
169 #ifdef CONFIG_VMAP_STACK
170 addis r9, r11, nap_save_msscr0@ha
172 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
174 lwz r9,nap_save_msscr0@l(r9)
175 mtspr SPRN_MSSCR0, r9
179 END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
181 #ifdef CONFIG_VMAP_STACK
182 addis r9, r11, nap_save_hid1@ha
184 addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
186 lwz r9,nap_save_hid1@l(r9)
188 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
189 b transfer_to_handler_cont
190 _ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
194 _GLOBAL(nap_save_msscr0)
197 _GLOBAL(nap_save_hid1)
200 _GLOBAL(powersave_lowspeed)