2 * PowerPC64 SLB support.
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code written by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <asm/pgtable.h>
19 #include <asm/mmu_context.h>
21 #include <asm/cputable.h>
22 #include <asm/cacheflush.h>
24 #include <asm/firmware.h>
25 #include <linux/compiler.h>
27 #include <asm/code-patching.h>
30 extern void slb_allocate_realmode(unsigned long ea
);
31 extern void slb_allocate_user(unsigned long ea
);
33 static void slb_allocate(unsigned long ea
)
35 /* Currently, we do real mode for all SLBs including user, but
36 * that will change if we bring back dynamic VSIDs
38 slb_allocate_realmode(ea
);
41 #define slb_esid_mask(ssize) \
42 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
44 static inline unsigned long mk_esid_data(unsigned long ea
, int ssize
,
47 return (ea
& slb_esid_mask(ssize
)) | SLB_ESID_V
| slot
;
50 #define slb_vsid_shift(ssize) \
51 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
53 static inline unsigned long mk_vsid_data(unsigned long ea
, int ssize
,
56 return (get_kernel_vsid(ea
, ssize
) << slb_vsid_shift(ssize
)) | flags
|
57 ((unsigned long) ssize
<< SLB_VSID_SSIZE_SHIFT
);
60 static inline void slb_shadow_update(unsigned long ea
, int ssize
,
65 * Clear the ESID first so the entry is not valid while we are
66 * updating it. No write barriers are needed here, provided
67 * we only update the current CPU's SLB shadow buffer.
69 get_slb_shadow()->save_area
[entry
].esid
= 0;
70 get_slb_shadow()->save_area
[entry
].vsid
= mk_vsid_data(ea
, ssize
, flags
);
71 get_slb_shadow()->save_area
[entry
].esid
= mk_esid_data(ea
, ssize
, entry
);
74 static inline void slb_shadow_clear(unsigned long entry
)
76 get_slb_shadow()->save_area
[entry
].esid
= 0;
79 static inline void create_shadowed_slbe(unsigned long ea
, int ssize
,
84 * Updating the shadow buffer before writing the SLB ensures
85 * we don't get a stale entry here if we get preempted by PHYP
86 * between these two statements.
88 slb_shadow_update(ea
, ssize
, flags
, entry
);
90 asm volatile("slbmte %0,%1" :
91 : "r" (mk_vsid_data(ea
, ssize
, flags
)),
92 "r" (mk_esid_data(ea
, ssize
, entry
))
96 static void __slb_flush_and_rebolt(void)
98 /* If you change this make sure you change SLB_NUM_BOLTED
99 * appropriately too. */
100 unsigned long linear_llp
, vmalloc_llp
, lflags
, vflags
;
101 unsigned long ksp_esid_data
, ksp_vsid_data
;
103 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
104 vmalloc_llp
= mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
105 lflags
= SLB_VSID_KERNEL
| linear_llp
;
106 vflags
= SLB_VSID_KERNEL
| vmalloc_llp
;
108 ksp_esid_data
= mk_esid_data(get_paca()->kstack
, mmu_kernel_ssize
, 2);
109 if ((ksp_esid_data
& ~0xfffffffUL
) <= PAGE_OFFSET
) {
110 ksp_esid_data
&= ~SLB_ESID_V
;
114 /* Update stack entry; others don't change */
115 slb_shadow_update(get_paca()->kstack
, mmu_kernel_ssize
, lflags
, 2);
116 ksp_vsid_data
= get_slb_shadow()->save_area
[2].vsid
;
119 /* We need to do this all in asm, so we're sure we don't touch
120 * the stack between the slbia and rebolting it. */
121 asm volatile("isync\n"
123 /* Slot 1 - first VMALLOC segment */
125 /* Slot 2 - kernel stack */
128 :: "r"(mk_vsid_data(VMALLOC_START
, mmu_kernel_ssize
, vflags
)),
129 "r"(mk_esid_data(VMALLOC_START
, mmu_kernel_ssize
, 1)),
135 void slb_flush_and_rebolt(void)
138 WARN_ON(!irqs_disabled());
141 * We can't take a PMU exception in the following code, so hard
142 * disable interrupts.
146 __slb_flush_and_rebolt();
147 get_paca()->slb_cache_ptr
= 0;
150 void slb_vmalloc_update(void)
152 unsigned long vflags
;
154 vflags
= SLB_VSID_KERNEL
| mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
155 slb_shadow_update(VMALLOC_START
, mmu_kernel_ssize
, vflags
, 1);
156 slb_flush_and_rebolt();
159 /* Helper function to compare esids. There are four cases to handle.
160 * 1. The system is not 1T segment size capable. Use the GET_ESID compare.
161 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
162 * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match.
163 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
165 static inline int esids_match(unsigned long addr1
, unsigned long addr2
)
169 /* System is not 1T segment size capable. */
170 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT
))
171 return (GET_ESID(addr1
) == GET_ESID(addr2
));
173 esid_1t_count
= (((addr1
>> SID_SHIFT_1T
) != 0) +
174 ((addr2
>> SID_SHIFT_1T
) != 0));
176 /* both addresses are < 1T */
177 if (esid_1t_count
== 0)
178 return (GET_ESID(addr1
) == GET_ESID(addr2
));
180 /* One address < 1T, the other > 1T. Not a match */
181 if (esid_1t_count
== 1)
184 /* Both addresses are > 1T. */
185 return (GET_ESID_1T(addr1
) == GET_ESID_1T(addr2
));
188 /* Flush all user entries from the segment table of the current processor. */
189 void switch_slb(struct task_struct
*tsk
, struct mm_struct
*mm
)
191 unsigned long offset
;
192 unsigned long slbie_data
= 0;
193 unsigned long pc
= KSTK_EIP(tsk
);
194 unsigned long stack
= KSTK_ESP(tsk
);
195 unsigned long exec_base
;
198 * We need interrupts hard-disabled here, not just soft-disabled,
199 * so that a PMU interrupt can't occur, which might try to access
200 * user memory (to get a stack trace) and possible cause an SLB miss
201 * which would update the slb_cache/slb_cache_ptr fields in the PACA.
204 offset
= get_paca()->slb_cache_ptr
;
205 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B
) &&
206 offset
<= SLB_CACHE_ENTRIES
) {
208 asm volatile("isync" : : : "memory");
209 for (i
= 0; i
< offset
; i
++) {
210 slbie_data
= (unsigned long)get_paca()->slb_cache
[i
]
211 << SID_SHIFT
; /* EA */
212 slbie_data
|= user_segment_size(slbie_data
)
213 << SLBIE_SSIZE_SHIFT
;
214 slbie_data
|= SLBIE_C
; /* C set for user addresses */
215 asm volatile("slbie %0" : : "r" (slbie_data
));
217 asm volatile("isync" : : : "memory");
219 __slb_flush_and_rebolt();
222 /* Workaround POWER5 < DD2.1 issue */
223 if (offset
== 1 || offset
> SLB_CACHE_ENTRIES
)
224 asm volatile("slbie %0" : : "r" (slbie_data
));
226 get_paca()->slb_cache_ptr
= 0;
227 get_paca()->context
= mm
->context
;
230 * preload some userspace segments into the SLB.
231 * Almost all 32 and 64bit PowerPC executables are linked at
232 * 0x10000000 so it makes sense to preload this segment.
234 exec_base
= 0x10000000;
236 if (is_kernel_addr(pc
) || is_kernel_addr(stack
) ||
237 is_kernel_addr(exec_base
))
242 if (!esids_match(pc
, stack
))
245 if (!esids_match(pc
, exec_base
) &&
246 !esids_match(stack
, exec_base
))
247 slb_allocate(exec_base
);
250 static inline void patch_slb_encoding(unsigned int *insn_addr
,
253 int insn
= (*insn_addr
& 0xffff0000) | immed
;
254 patch_instruction(insn_addr
, insn
);
257 void slb_set_size(u16 size
)
259 extern unsigned int *slb_compare_rr_to_size
;
261 if (mmu_slb_size
== size
)
265 patch_slb_encoding(slb_compare_rr_to_size
, mmu_slb_size
);
268 void slb_initialize(void)
270 unsigned long linear_llp
, vmalloc_llp
, io_llp
;
271 unsigned long lflags
, vflags
;
272 static int slb_encoding_inited
;
273 extern unsigned int *slb_miss_kernel_load_linear
;
274 extern unsigned int *slb_miss_kernel_load_io
;
275 extern unsigned int *slb_compare_rr_to_size
;
276 #ifdef CONFIG_SPARSEMEM_VMEMMAP
277 extern unsigned int *slb_miss_kernel_load_vmemmap
;
278 unsigned long vmemmap_llp
;
281 /* Prepare our SLB miss handler based on our page size */
282 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
283 io_llp
= mmu_psize_defs
[mmu_io_psize
].sllp
;
284 vmalloc_llp
= mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
285 get_paca()->vmalloc_sllp
= SLB_VSID_KERNEL
| vmalloc_llp
;
286 #ifdef CONFIG_SPARSEMEM_VMEMMAP
287 vmemmap_llp
= mmu_psize_defs
[mmu_vmemmap_psize
].sllp
;
289 if (!slb_encoding_inited
) {
290 slb_encoding_inited
= 1;
291 patch_slb_encoding(slb_miss_kernel_load_linear
,
292 SLB_VSID_KERNEL
| linear_llp
);
293 patch_slb_encoding(slb_miss_kernel_load_io
,
294 SLB_VSID_KERNEL
| io_llp
);
295 patch_slb_encoding(slb_compare_rr_to_size
,
298 pr_devel("SLB: linear LLP = %04lx\n", linear_llp
);
299 pr_devel("SLB: io LLP = %04lx\n", io_llp
);
301 #ifdef CONFIG_SPARSEMEM_VMEMMAP
302 patch_slb_encoding(slb_miss_kernel_load_vmemmap
,
303 SLB_VSID_KERNEL
| vmemmap_llp
);
304 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp
);
308 get_paca()->stab_rr
= SLB_NUM_BOLTED
;
310 /* On iSeries the bolted entries have already been set up by
311 * the hypervisor from the lparMap data in head.S */
312 if (firmware_has_feature(FW_FEATURE_ISERIES
))
315 lflags
= SLB_VSID_KERNEL
| linear_llp
;
316 vflags
= SLB_VSID_KERNEL
| vmalloc_llp
;
318 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
319 asm volatile("isync":::"memory");
320 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
321 asm volatile("isync; slbia; isync":::"memory");
322 create_shadowed_slbe(PAGE_OFFSET
, mmu_kernel_ssize
, lflags
, 0);
324 create_shadowed_slbe(VMALLOC_START
, mmu_kernel_ssize
, vflags
, 1);
326 /* For the boot cpu, we're running on the stack in init_thread_union,
327 * which is in the first segment of the linear mapping, and also
328 * get_paca()->kstack hasn't been initialized yet.
329 * For secondary cpus, we need to bolt the kernel stack entry now.
332 if (raw_smp_processor_id() != boot_cpuid
&&
333 (get_paca()->kstack
& slb_esid_mask(mmu_kernel_ssize
)) > PAGE_OFFSET
)
334 create_shadowed_slbe(get_paca()->kstack
,
335 mmu_kernel_ssize
, lflags
, 2);
337 asm volatile("isync":::"memory");