2 * PowerPC64 SLB support.
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
19 #include <asm/pgtable.h>
21 #include <asm/mmu_context.h>
23 #include <asm/cputable.h>
24 #include <asm/cacheflush.h>
26 #include <linux/compiler.h>
29 #define DBG(fmt...) udbg_printf(fmt)
34 extern void slb_allocate_realmode(unsigned long ea
);
35 extern void slb_allocate_user(unsigned long ea
);
37 static void slb_allocate(unsigned long ea
)
39 /* Currently, we do real mode for all SLBs including user, but
40 * that will change if we bring back dynamic VSIDs
42 slb_allocate_realmode(ea
);
45 static inline unsigned long mk_esid_data(unsigned long ea
, unsigned long slot
)
47 return (ea
& ESID_MASK
) | SLB_ESID_V
| slot
;
50 static inline unsigned long mk_vsid_data(unsigned long ea
, unsigned long flags
)
52 return (get_kernel_vsid(ea
) << SLB_VSID_SHIFT
) | flags
;
55 static inline void slb_shadow_update(unsigned long esid
, unsigned long vsid
,
59 * Clear the ESID first so the entry is not valid while we are
62 get_slb_shadow()->save_area
[entry
].esid
= 0;
64 get_slb_shadow()->save_area
[entry
].vsid
= vsid
;
66 get_slb_shadow()->save_area
[entry
].esid
= esid
;
70 static inline void create_shadowed_slbe(unsigned long ea
, unsigned long flags
,
74 * Updating the shadow buffer before writing the SLB ensures
75 * we don't get a stale entry here if we get preempted by PHYP
76 * between these two statements.
78 slb_shadow_update(mk_esid_data(ea
, entry
), mk_vsid_data(ea
, flags
),
81 asm volatile("slbmte %0,%1" :
82 : "r" (mk_vsid_data(ea
, flags
)),
83 "r" (mk_esid_data(ea
, entry
))
87 void slb_flush_and_rebolt(void)
89 /* If you change this make sure you change SLB_NUM_BOLTED
90 * appropriately too. */
91 unsigned long linear_llp
, vmalloc_llp
, lflags
, vflags
;
92 unsigned long ksp_esid_data
;
94 WARN_ON(!irqs_disabled());
96 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
97 vmalloc_llp
= mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
98 lflags
= SLB_VSID_KERNEL
| linear_llp
;
99 vflags
= SLB_VSID_KERNEL
| vmalloc_llp
;
101 ksp_esid_data
= mk_esid_data(get_paca()->kstack
, 2);
102 if ((ksp_esid_data
& ESID_MASK
) == PAGE_OFFSET
)
103 ksp_esid_data
&= ~SLB_ESID_V
;
105 /* Only third entry (stack) may change here so only resave that */
106 slb_shadow_update(ksp_esid_data
,
107 mk_vsid_data(ksp_esid_data
, lflags
), 2);
109 /* We need to do this all in asm, so we're sure we don't touch
110 * the stack between the slbia and rebolting it. */
111 asm volatile("isync\n"
113 /* Slot 1 - first VMALLOC segment */
115 /* Slot 2 - kernel stack */
118 :: "r"(mk_vsid_data(VMALLOC_START
, vflags
)),
119 "r"(mk_esid_data(VMALLOC_START
, 1)),
120 "r"(mk_vsid_data(ksp_esid_data
, lflags
)),
125 /* Flush all user entries from the segment table of the current processor. */
126 void switch_slb(struct task_struct
*tsk
, struct mm_struct
*mm
)
128 unsigned long offset
= get_paca()->slb_cache_ptr
;
129 unsigned long esid_data
= 0;
130 unsigned long pc
= KSTK_EIP(tsk
);
131 unsigned long stack
= KSTK_ESP(tsk
);
132 unsigned long unmapped_base
;
134 if (offset
<= SLB_CACHE_ENTRIES
) {
136 asm volatile("isync" : : : "memory");
137 for (i
= 0; i
< offset
; i
++) {
138 esid_data
= ((unsigned long)get_paca()->slb_cache
[i
]
139 << SID_SHIFT
) | SLBIE_C
;
140 asm volatile("slbie %0" : : "r" (esid_data
));
142 asm volatile("isync" : : : "memory");
144 slb_flush_and_rebolt();
147 /* Workaround POWER5 < DD2.1 issue */
148 if (offset
== 1 || offset
> SLB_CACHE_ENTRIES
)
149 asm volatile("slbie %0" : : "r" (esid_data
));
151 get_paca()->slb_cache_ptr
= 0;
152 get_paca()->context
= mm
->context
;
155 * preload some userspace segments into the SLB.
157 if (test_tsk_thread_flag(tsk
, TIF_32BIT
))
158 unmapped_base
= TASK_UNMAPPED_BASE_USER32
;
160 unmapped_base
= TASK_UNMAPPED_BASE_USER64
;
162 if (is_kernel_addr(pc
))
166 if (GET_ESID(pc
) == GET_ESID(stack
))
169 if (is_kernel_addr(stack
))
173 if ((GET_ESID(pc
) == GET_ESID(unmapped_base
))
174 || (GET_ESID(stack
) == GET_ESID(unmapped_base
)))
177 if (is_kernel_addr(unmapped_base
))
179 slb_allocate(unmapped_base
);
182 static inline void patch_slb_encoding(unsigned int *insn_addr
,
185 /* Assume the instruction had a "0" immediate value, just
186 * "or" in the new value
189 flush_icache_range((unsigned long)insn_addr
, 4+
190 (unsigned long)insn_addr
);
193 void slb_initialize(void)
195 unsigned long linear_llp
, vmalloc_llp
, io_llp
;
196 static int slb_encoding_inited
;
197 extern unsigned int *slb_miss_kernel_load_linear
;
198 extern unsigned int *slb_miss_kernel_load_io
;
199 #ifdef CONFIG_HUGETLB_PAGE
200 extern unsigned int *slb_miss_user_load_huge
;
201 unsigned long huge_llp
;
203 huge_llp
= mmu_psize_defs
[mmu_huge_psize
].sllp
;
206 /* Prepare our SLB miss handler based on our page size */
207 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
208 io_llp
= mmu_psize_defs
[mmu_io_psize
].sllp
;
209 vmalloc_llp
= mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
210 get_paca()->vmalloc_sllp
= SLB_VSID_KERNEL
| vmalloc_llp
;
212 if (!slb_encoding_inited
) {
213 slb_encoding_inited
= 1;
214 patch_slb_encoding(slb_miss_kernel_load_linear
,
215 SLB_VSID_KERNEL
| linear_llp
);
216 patch_slb_encoding(slb_miss_kernel_load_io
,
217 SLB_VSID_KERNEL
| io_llp
);
219 DBG("SLB: linear LLP = %04x\n", linear_llp
);
220 DBG("SLB: io LLP = %04x\n", io_llp
);
221 #ifdef CONFIG_HUGETLB_PAGE
222 patch_slb_encoding(slb_miss_user_load_huge
,
223 SLB_VSID_USER
| huge_llp
);
224 DBG("SLB: huge LLP = %04x\n", huge_llp
);
228 /* On iSeries the bolted entries have already been set up by
229 * the hypervisor from the lparMap data in head.S */
230 #ifndef CONFIG_PPC_ISERIES
232 unsigned long lflags
, vflags
;
234 lflags
= SLB_VSID_KERNEL
| linear_llp
;
235 vflags
= SLB_VSID_KERNEL
| vmalloc_llp
;
237 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
238 asm volatile("isync":::"memory");
239 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
240 asm volatile("isync; slbia; isync":::"memory");
241 create_shadowed_slbe(PAGE_OFFSET
, lflags
, 0);
243 create_shadowed_slbe(VMALLOC_START
, vflags
, 1);
245 /* We don't bolt the stack for the time being - we're in boot,
246 * so the stack is in the bolted segment. By the time it goes
247 * elsewhere, we'll call _switch() which will bolt in the new
249 asm volatile("isync":::"memory");
251 #endif /* CONFIG_PPC_ISERIES */
253 get_paca()->stab_rr
= SLB_NUM_BOLTED
;