2 * PowerPC64 SLB support.
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code written by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <asm/pgtable.h>
19 #include <asm/mmu_context.h>
21 #include <asm/cputable.h>
22 #include <asm/cacheflush.h>
24 #include <linux/compiler.h>
26 #include <asm/code-patching.h>
29 extern void slb_allocate_realmode(unsigned long ea
);
30 extern void slb_allocate_user(unsigned long ea
);
32 static void slb_allocate(unsigned long ea
)
34 /* Currently, we do real mode for all SLBs including user, but
35 * that will change if we bring back dynamic VSIDs
37 slb_allocate_realmode(ea
);
40 #define slb_esid_mask(ssize) \
41 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
43 static inline unsigned long mk_esid_data(unsigned long ea
, int ssize
,
46 return (ea
& slb_esid_mask(ssize
)) | SLB_ESID_V
| slot
;
49 #define slb_vsid_shift(ssize) \
50 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
52 static inline unsigned long mk_vsid_data(unsigned long ea
, int ssize
,
55 return (get_kernel_vsid(ea
, ssize
) << slb_vsid_shift(ssize
)) | flags
|
56 ((unsigned long) ssize
<< SLB_VSID_SSIZE_SHIFT
);
59 static inline void slb_shadow_update(unsigned long ea
, int ssize
,
64 * Clear the ESID first so the entry is not valid while we are
65 * updating it. No write barriers are needed here, provided
66 * we only update the current CPU's SLB shadow buffer.
68 get_slb_shadow()->save_area
[entry
].esid
= 0;
69 get_slb_shadow()->save_area
[entry
].vsid
=
70 cpu_to_be64(mk_vsid_data(ea
, ssize
, flags
));
71 get_slb_shadow()->save_area
[entry
].esid
=
72 cpu_to_be64(mk_esid_data(ea
, ssize
, entry
));
75 static inline void slb_shadow_clear(unsigned long entry
)
77 get_slb_shadow()->save_area
[entry
].esid
= 0;
80 static inline void create_shadowed_slbe(unsigned long ea
, int ssize
,
85 * Updating the shadow buffer before writing the SLB ensures
86 * we don't get a stale entry here if we get preempted by PHYP
87 * between these two statements.
89 slb_shadow_update(ea
, ssize
, flags
, entry
);
91 asm volatile("slbmte %0,%1" :
92 : "r" (mk_vsid_data(ea
, ssize
, flags
)),
93 "r" (mk_esid_data(ea
, ssize
, entry
))
97 static void __slb_flush_and_rebolt(void)
99 /* If you change this make sure you change SLB_NUM_BOLTED
100 * and PR KVM appropriately too. */
101 unsigned long linear_llp
, vmalloc_llp
, lflags
, vflags
;
102 unsigned long ksp_esid_data
, ksp_vsid_data
;
104 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
105 vmalloc_llp
= mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
106 lflags
= SLB_VSID_KERNEL
| linear_llp
;
107 vflags
= SLB_VSID_KERNEL
| vmalloc_llp
;
109 ksp_esid_data
= mk_esid_data(get_paca()->kstack
, mmu_kernel_ssize
, 2);
110 if ((ksp_esid_data
& ~0xfffffffUL
) <= PAGE_OFFSET
) {
111 ksp_esid_data
&= ~SLB_ESID_V
;
115 /* Update stack entry; others don't change */
116 slb_shadow_update(get_paca()->kstack
, mmu_kernel_ssize
, lflags
, 2);
118 be64_to_cpu(get_slb_shadow()->save_area
[2].vsid
);
121 /* We need to do this all in asm, so we're sure we don't touch
122 * the stack between the slbia and rebolting it. */
123 asm volatile("isync\n"
125 /* Slot 1 - first VMALLOC segment */
127 /* Slot 2 - kernel stack */
130 :: "r"(mk_vsid_data(VMALLOC_START
, mmu_kernel_ssize
, vflags
)),
131 "r"(mk_esid_data(VMALLOC_START
, mmu_kernel_ssize
, 1)),
137 void slb_flush_and_rebolt(void)
140 WARN_ON(!irqs_disabled());
143 * We can't take a PMU exception in the following code, so hard
144 * disable interrupts.
148 __slb_flush_and_rebolt();
149 get_paca()->slb_cache_ptr
= 0;
152 void slb_vmalloc_update(void)
154 unsigned long vflags
;
156 vflags
= SLB_VSID_KERNEL
| mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
157 slb_shadow_update(VMALLOC_START
, mmu_kernel_ssize
, vflags
, 1);
158 slb_flush_and_rebolt();
161 /* Helper function to compare esids. There are four cases to handle.
162 * 1. The system is not 1T segment size capable. Use the GET_ESID compare.
163 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
164 * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match.
165 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
167 static inline int esids_match(unsigned long addr1
, unsigned long addr2
)
171 /* System is not 1T segment size capable. */
172 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT
))
173 return (GET_ESID(addr1
) == GET_ESID(addr2
));
175 esid_1t_count
= (((addr1
>> SID_SHIFT_1T
) != 0) +
176 ((addr2
>> SID_SHIFT_1T
) != 0));
178 /* both addresses are < 1T */
179 if (esid_1t_count
== 0)
180 return (GET_ESID(addr1
) == GET_ESID(addr2
));
182 /* One address < 1T, the other > 1T. Not a match */
183 if (esid_1t_count
== 1)
186 /* Both addresses are > 1T. */
187 return (GET_ESID_1T(addr1
) == GET_ESID_1T(addr2
));
190 /* Flush all user entries from the segment table of the current processor. */
191 void switch_slb(struct task_struct
*tsk
, struct mm_struct
*mm
)
193 unsigned long offset
;
194 unsigned long slbie_data
= 0;
195 unsigned long pc
= KSTK_EIP(tsk
);
196 unsigned long stack
= KSTK_ESP(tsk
);
197 unsigned long exec_base
;
200 * We need interrupts hard-disabled here, not just soft-disabled,
201 * so that a PMU interrupt can't occur, which might try to access
202 * user memory (to get a stack trace) and possible cause an SLB miss
203 * which would update the slb_cache/slb_cache_ptr fields in the PACA.
206 offset
= get_paca()->slb_cache_ptr
;
207 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B
) &&
208 offset
<= SLB_CACHE_ENTRIES
) {
210 asm volatile("isync" : : : "memory");
211 for (i
= 0; i
< offset
; i
++) {
212 slbie_data
= (unsigned long)get_paca()->slb_cache
[i
]
213 << SID_SHIFT
; /* EA */
214 slbie_data
|= user_segment_size(slbie_data
)
215 << SLBIE_SSIZE_SHIFT
;
216 slbie_data
|= SLBIE_C
; /* C set for user addresses */
217 asm volatile("slbie %0" : : "r" (slbie_data
));
219 asm volatile("isync" : : : "memory");
221 __slb_flush_and_rebolt();
224 /* Workaround POWER5 < DD2.1 issue */
225 if (offset
== 1 || offset
> SLB_CACHE_ENTRIES
)
226 asm volatile("slbie %0" : : "r" (slbie_data
));
228 get_paca()->slb_cache_ptr
= 0;
229 get_paca()->context
= mm
->context
;
232 * preload some userspace segments into the SLB.
233 * Almost all 32 and 64bit PowerPC executables are linked at
234 * 0x10000000 so it makes sense to preload this segment.
236 exec_base
= 0x10000000;
238 if (is_kernel_addr(pc
) || is_kernel_addr(stack
) ||
239 is_kernel_addr(exec_base
))
244 if (!esids_match(pc
, stack
))
247 if (!esids_match(pc
, exec_base
) &&
248 !esids_match(stack
, exec_base
))
249 slb_allocate(exec_base
);
252 static inline void patch_slb_encoding(unsigned int *insn_addr
,
255 int insn
= (*insn_addr
& 0xffff0000) | immed
;
256 patch_instruction(insn_addr
, insn
);
259 extern u32 slb_compare_rr_to_size
[];
260 extern u32 slb_miss_kernel_load_linear
[];
261 extern u32 slb_miss_kernel_load_io
[];
262 extern u32 slb_compare_rr_to_size
[];
263 extern u32 slb_miss_kernel_load_vmemmap
[];
265 void slb_set_size(u16 size
)
267 if (mmu_slb_size
== size
)
271 patch_slb_encoding(slb_compare_rr_to_size
, mmu_slb_size
);
274 void slb_initialize(void)
276 unsigned long linear_llp
, vmalloc_llp
, io_llp
;
277 unsigned long lflags
, vflags
;
278 static int slb_encoding_inited
;
279 #ifdef CONFIG_SPARSEMEM_VMEMMAP
280 unsigned long vmemmap_llp
;
283 /* Prepare our SLB miss handler based on our page size */
284 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
285 io_llp
= mmu_psize_defs
[mmu_io_psize
].sllp
;
286 vmalloc_llp
= mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
287 get_paca()->vmalloc_sllp
= SLB_VSID_KERNEL
| vmalloc_llp
;
288 #ifdef CONFIG_SPARSEMEM_VMEMMAP
289 vmemmap_llp
= mmu_psize_defs
[mmu_vmemmap_psize
].sllp
;
291 if (!slb_encoding_inited
) {
292 slb_encoding_inited
= 1;
293 patch_slb_encoding(slb_miss_kernel_load_linear
,
294 SLB_VSID_KERNEL
| linear_llp
);
295 patch_slb_encoding(slb_miss_kernel_load_io
,
296 SLB_VSID_KERNEL
| io_llp
);
297 patch_slb_encoding(slb_compare_rr_to_size
,
300 pr_devel("SLB: linear LLP = %04lx\n", linear_llp
);
301 pr_devel("SLB: io LLP = %04lx\n", io_llp
);
303 #ifdef CONFIG_SPARSEMEM_VMEMMAP
304 patch_slb_encoding(slb_miss_kernel_load_vmemmap
,
305 SLB_VSID_KERNEL
| vmemmap_llp
);
306 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp
);
310 get_paca()->stab_rr
= SLB_NUM_BOLTED
;
312 lflags
= SLB_VSID_KERNEL
| linear_llp
;
313 vflags
= SLB_VSID_KERNEL
| vmalloc_llp
;
315 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
316 asm volatile("isync":::"memory");
317 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
318 asm volatile("isync; slbia; isync":::"memory");
319 create_shadowed_slbe(PAGE_OFFSET
, mmu_kernel_ssize
, lflags
, 0);
321 create_shadowed_slbe(VMALLOC_START
, mmu_kernel_ssize
, vflags
, 1);
323 /* For the boot cpu, we're running on the stack in init_thread_union,
324 * which is in the first segment of the linear mapping, and also
325 * get_paca()->kstack hasn't been initialized yet.
326 * For secondary cpus, we need to bolt the kernel stack entry now.
329 if (raw_smp_processor_id() != boot_cpuid
&&
330 (get_paca()->kstack
& slb_esid_mask(mmu_kernel_ssize
)) > PAGE_OFFSET
)
331 create_shadowed_slbe(get_paca()->kstack
,
332 mmu_kernel_ssize
, lflags
, 2);
334 asm volatile("isync":::"memory");