2 * PowerPC64 SLB support.
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code written by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <asm/pgtable.h>
19 #include <asm/mmu_context.h>
21 #include <asm/cputable.h>
22 #include <asm/cacheflush.h>
24 #include <asm/firmware.h>
25 #include <linux/compiler.h>
29 extern void slb_allocate_realmode(unsigned long ea
);
30 extern void slb_allocate_user(unsigned long ea
);
32 static void slb_allocate(unsigned long ea
)
34 /* Currently, we do real mode for all SLBs including user, but
35 * that will change if we bring back dynamic VSIDs
37 slb_allocate_realmode(ea
);
40 #define slb_esid_mask(ssize) \
41 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
43 static inline unsigned long mk_esid_data(unsigned long ea
, int ssize
,
46 return (ea
& slb_esid_mask(ssize
)) | SLB_ESID_V
| slot
;
49 #define slb_vsid_shift(ssize) \
50 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
52 static inline unsigned long mk_vsid_data(unsigned long ea
, int ssize
,
55 return (get_kernel_vsid(ea
, ssize
) << slb_vsid_shift(ssize
)) | flags
|
56 ((unsigned long) ssize
<< SLB_VSID_SSIZE_SHIFT
);
59 static inline void slb_shadow_update(unsigned long ea
, int ssize
,
64 * Clear the ESID first so the entry is not valid while we are
65 * updating it. No write barriers are needed here, provided
66 * we only update the current CPU's SLB shadow buffer.
68 get_slb_shadow()->save_area
[entry
].esid
= 0;
69 get_slb_shadow()->save_area
[entry
].vsid
= mk_vsid_data(ea
, ssize
, flags
);
70 get_slb_shadow()->save_area
[entry
].esid
= mk_esid_data(ea
, ssize
, entry
);
73 static inline void slb_shadow_clear(unsigned long entry
)
75 get_slb_shadow()->save_area
[entry
].esid
= 0;
78 static inline void create_shadowed_slbe(unsigned long ea
, int ssize
,
83 * Updating the shadow buffer before writing the SLB ensures
84 * we don't get a stale entry here if we get preempted by PHYP
85 * between these two statements.
87 slb_shadow_update(ea
, ssize
, flags
, entry
);
89 asm volatile("slbmte %0,%1" :
90 : "r" (mk_vsid_data(ea
, ssize
, flags
)),
91 "r" (mk_esid_data(ea
, ssize
, entry
))
95 void slb_flush_and_rebolt(void)
97 /* If you change this make sure you change SLB_NUM_BOLTED
98 * appropriately too. */
99 unsigned long linear_llp
, vmalloc_llp
, lflags
, vflags
;
100 unsigned long ksp_esid_data
, ksp_vsid_data
;
102 WARN_ON(!irqs_disabled());
104 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
105 vmalloc_llp
= mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
106 lflags
= SLB_VSID_KERNEL
| linear_llp
;
107 vflags
= SLB_VSID_KERNEL
| vmalloc_llp
;
109 ksp_esid_data
= mk_esid_data(get_paca()->kstack
, mmu_kernel_ssize
, 2);
110 if ((ksp_esid_data
& ~0xfffffffUL
) <= PAGE_OFFSET
) {
111 ksp_esid_data
&= ~SLB_ESID_V
;
115 /* Update stack entry; others don't change */
116 slb_shadow_update(get_paca()->kstack
, mmu_kernel_ssize
, lflags
, 2);
117 ksp_vsid_data
= get_slb_shadow()->save_area
[2].vsid
;
121 * We can't take a PMU exception in the following code, so hard
122 * disable interrupts.
126 /* We need to do this all in asm, so we're sure we don't touch
127 * the stack between the slbia and rebolting it. */
128 asm volatile("isync\n"
130 /* Slot 1 - first VMALLOC segment */
132 /* Slot 2 - kernel stack */
135 :: "r"(mk_vsid_data(VMALLOC_START
, mmu_kernel_ssize
, vflags
)),
136 "r"(mk_esid_data(VMALLOC_START
, mmu_kernel_ssize
, 1)),
142 void slb_vmalloc_update(void)
144 unsigned long vflags
;
146 vflags
= SLB_VSID_KERNEL
| mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
147 slb_shadow_update(VMALLOC_START
, mmu_kernel_ssize
, vflags
, 1);
148 slb_flush_and_rebolt();
151 /* Helper function to compare esids. There are four cases to handle.
152 * 1. The system is not 1T segment size capable. Use the GET_ESID compare.
153 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
154 * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match.
155 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
157 static inline int esids_match(unsigned long addr1
, unsigned long addr2
)
161 /* System is not 1T segment size capable. */
162 if (!cpu_has_feature(CPU_FTR_1T_SEGMENT
))
163 return (GET_ESID(addr1
) == GET_ESID(addr2
));
165 esid_1t_count
= (((addr1
>> SID_SHIFT_1T
) != 0) +
166 ((addr2
>> SID_SHIFT_1T
) != 0));
168 /* both addresses are < 1T */
169 if (esid_1t_count
== 0)
170 return (GET_ESID(addr1
) == GET_ESID(addr2
));
172 /* One address < 1T, the other > 1T. Not a match */
173 if (esid_1t_count
== 1)
176 /* Both addresses are > 1T. */
177 return (GET_ESID_1T(addr1
) == GET_ESID_1T(addr2
));
180 /* Flush all user entries from the segment table of the current processor. */
181 void switch_slb(struct task_struct
*tsk
, struct mm_struct
*mm
)
183 unsigned long offset
= get_paca()->slb_cache_ptr
;
184 unsigned long slbie_data
= 0;
185 unsigned long pc
= KSTK_EIP(tsk
);
186 unsigned long stack
= KSTK_ESP(tsk
);
187 unsigned long unmapped_base
;
189 if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B
) &&
190 offset
<= SLB_CACHE_ENTRIES
) {
192 asm volatile("isync" : : : "memory");
193 for (i
= 0; i
< offset
; i
++) {
194 slbie_data
= (unsigned long)get_paca()->slb_cache
[i
]
195 << SID_SHIFT
; /* EA */
196 slbie_data
|= user_segment_size(slbie_data
)
197 << SLBIE_SSIZE_SHIFT
;
198 slbie_data
|= SLBIE_C
; /* C set for user addresses */
199 asm volatile("slbie %0" : : "r" (slbie_data
));
201 asm volatile("isync" : : : "memory");
203 slb_flush_and_rebolt();
206 /* Workaround POWER5 < DD2.1 issue */
207 if (offset
== 1 || offset
> SLB_CACHE_ENTRIES
)
208 asm volatile("slbie %0" : : "r" (slbie_data
));
210 get_paca()->slb_cache_ptr
= 0;
211 get_paca()->context
= mm
->context
;
214 * preload some userspace segments into the SLB.
216 if (test_tsk_thread_flag(tsk
, TIF_32BIT
))
217 unmapped_base
= TASK_UNMAPPED_BASE_USER32
;
219 unmapped_base
= TASK_UNMAPPED_BASE_USER64
;
221 if (is_kernel_addr(pc
))
225 if (esids_match(pc
,stack
))
228 if (is_kernel_addr(stack
))
232 if (esids_match(pc
,unmapped_base
) || esids_match(stack
,unmapped_base
))
235 if (is_kernel_addr(unmapped_base
))
237 slb_allocate(unmapped_base
);
240 static inline void patch_slb_encoding(unsigned int *insn_addr
,
243 /* Assume the instruction had a "0" immediate value, just
244 * "or" in the new value
247 flush_icache_range((unsigned long)insn_addr
, 4+
248 (unsigned long)insn_addr
);
251 void slb_initialize(void)
253 unsigned long linear_llp
, vmalloc_llp
, io_llp
;
254 unsigned long lflags
, vflags
;
255 static int slb_encoding_inited
;
256 extern unsigned int *slb_miss_kernel_load_linear
;
257 extern unsigned int *slb_miss_kernel_load_io
;
258 extern unsigned int *slb_compare_rr_to_size
;
259 #ifdef CONFIG_SPARSEMEM_VMEMMAP
260 extern unsigned int *slb_miss_kernel_load_vmemmap
;
261 unsigned long vmemmap_llp
;
264 /* Prepare our SLB miss handler based on our page size */
265 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
266 io_llp
= mmu_psize_defs
[mmu_io_psize
].sllp
;
267 vmalloc_llp
= mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
268 get_paca()->vmalloc_sllp
= SLB_VSID_KERNEL
| vmalloc_llp
;
269 #ifdef CONFIG_SPARSEMEM_VMEMMAP
270 vmemmap_llp
= mmu_psize_defs
[mmu_vmemmap_psize
].sllp
;
272 if (!slb_encoding_inited
) {
273 slb_encoding_inited
= 1;
274 patch_slb_encoding(slb_miss_kernel_load_linear
,
275 SLB_VSID_KERNEL
| linear_llp
);
276 patch_slb_encoding(slb_miss_kernel_load_io
,
277 SLB_VSID_KERNEL
| io_llp
);
278 patch_slb_encoding(slb_compare_rr_to_size
,
281 pr_devel("SLB: linear LLP = %04lx\n", linear_llp
);
282 pr_devel("SLB: io LLP = %04lx\n", io_llp
);
284 #ifdef CONFIG_SPARSEMEM_VMEMMAP
285 patch_slb_encoding(slb_miss_kernel_load_vmemmap
,
286 SLB_VSID_KERNEL
| vmemmap_llp
);
287 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp
);
291 get_paca()->stab_rr
= SLB_NUM_BOLTED
;
293 /* On iSeries the bolted entries have already been set up by
294 * the hypervisor from the lparMap data in head.S */
295 if (firmware_has_feature(FW_FEATURE_ISERIES
))
298 lflags
= SLB_VSID_KERNEL
| linear_llp
;
299 vflags
= SLB_VSID_KERNEL
| vmalloc_llp
;
301 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
302 asm volatile("isync":::"memory");
303 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
304 asm volatile("isync; slbia; isync":::"memory");
305 create_shadowed_slbe(PAGE_OFFSET
, mmu_kernel_ssize
, lflags
, 0);
307 create_shadowed_slbe(VMALLOC_START
, mmu_kernel_ssize
, vflags
, 1);
309 /* For the boot cpu, we're running on the stack in init_thread_union,
310 * which is in the first segment of the linear mapping, and also
311 * get_paca()->kstack hasn't been initialized yet.
312 * For secondary cpus, we need to bolt the kernel stack entry now.
315 if (raw_smp_processor_id() != boot_cpuid
&&
316 (get_paca()->kstack
& slb_esid_mask(mmu_kernel_ssize
)) > PAGE_OFFSET
)
317 create_shadowed_slbe(get_paca()->kstack
,
318 mmu_kernel_ssize
, lflags
, 2);
320 asm volatile("isync":::"memory");