2 * PowerPC64 SLB support.
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
19 #include <linux/config.h>
20 #include <asm/pgtable.h>
22 #include <asm/mmu_context.h>
24 #include <asm/cputable.h>
25 #include <asm/cacheflush.h>
28 #define DBG(fmt...) udbg_printf(fmt)
33 extern void slb_allocate_realmode(unsigned long ea
);
34 extern void slb_allocate_user(unsigned long ea
);
36 static void slb_allocate(unsigned long ea
)
38 /* Currently, we do real mode for all SLBs including user, but
39 * that will change if we bring back dynamic VSIDs
41 slb_allocate_realmode(ea
);
44 static inline unsigned long mk_esid_data(unsigned long ea
, unsigned long slot
)
46 return (ea
& ESID_MASK
) | SLB_ESID_V
| slot
;
49 static inline unsigned long mk_vsid_data(unsigned long ea
, unsigned long flags
)
51 return (get_kernel_vsid(ea
) << SLB_VSID_SHIFT
) | flags
;
54 static inline void create_slbe(unsigned long ea
, unsigned long flags
,
57 asm volatile("slbmte %0,%1" :
58 : "r" (mk_vsid_data(ea
, flags
)),
59 "r" (mk_esid_data(ea
, entry
))
63 static void slb_flush_and_rebolt(void)
65 /* If you change this make sure you change SLB_NUM_BOLTED
66 * appropriately too. */
67 unsigned long linear_llp
, virtual_llp
, lflags
, vflags
;
68 unsigned long ksp_esid_data
;
70 WARN_ON(!irqs_disabled());
72 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
73 virtual_llp
= mmu_psize_defs
[mmu_virtual_psize
].sllp
;
74 lflags
= SLB_VSID_KERNEL
| linear_llp
;
75 vflags
= SLB_VSID_KERNEL
| virtual_llp
;
77 ksp_esid_data
= mk_esid_data(get_paca()->kstack
, 2);
78 if ((ksp_esid_data
& ESID_MASK
) == PAGE_OFFSET
)
79 ksp_esid_data
&= ~SLB_ESID_V
;
81 /* We need to do this all in asm, so we're sure we don't touch
82 * the stack between the slbia and rebolting it. */
83 asm volatile("isync\n"
85 /* Slot 1 - first VMALLOC segment */
87 /* Slot 2 - kernel stack */
90 :: "r"(mk_vsid_data(VMALLOC_START
, vflags
)),
91 "r"(mk_esid_data(VMALLOC_START
, 1)),
92 "r"(mk_vsid_data(ksp_esid_data
, lflags
)),
97 /* Flush all user entries from the segment table of the current processor. */
98 void switch_slb(struct task_struct
*tsk
, struct mm_struct
*mm
)
100 unsigned long offset
= get_paca()->slb_cache_ptr
;
101 unsigned long esid_data
= 0;
102 unsigned long pc
= KSTK_EIP(tsk
);
103 unsigned long stack
= KSTK_ESP(tsk
);
104 unsigned long unmapped_base
;
106 if (offset
<= SLB_CACHE_ENTRIES
) {
108 asm volatile("isync" : : : "memory");
109 for (i
= 0; i
< offset
; i
++) {
110 esid_data
= ((unsigned long)get_paca()->slb_cache
[i
]
111 << SID_SHIFT
) | SLBIE_C
;
112 asm volatile("slbie %0" : : "r" (esid_data
));
114 asm volatile("isync" : : : "memory");
116 slb_flush_and_rebolt();
119 /* Workaround POWER5 < DD2.1 issue */
120 if (offset
== 1 || offset
> SLB_CACHE_ENTRIES
)
121 asm volatile("slbie %0" : : "r" (esid_data
));
123 get_paca()->slb_cache_ptr
= 0;
124 get_paca()->context
= mm
->context
;
125 #ifdef CONFIG_PPC_64K_PAGES
126 get_paca()->pgdir
= mm
->pgd
;
127 #endif /* CONFIG_PPC_64K_PAGES */
130 * preload some userspace segments into the SLB.
132 if (test_tsk_thread_flag(tsk
, TIF_32BIT
))
133 unmapped_base
= TASK_UNMAPPED_BASE_USER32
;
135 unmapped_base
= TASK_UNMAPPED_BASE_USER64
;
137 if (is_kernel_addr(pc
))
141 if (GET_ESID(pc
) == GET_ESID(stack
))
144 if (is_kernel_addr(stack
))
148 if ((GET_ESID(pc
) == GET_ESID(unmapped_base
))
149 || (GET_ESID(stack
) == GET_ESID(unmapped_base
)))
152 if (is_kernel_addr(unmapped_base
))
154 slb_allocate(unmapped_base
);
157 static inline void patch_slb_encoding(unsigned int *insn_addr
,
160 /* Assume the instruction had a "0" immediate value, just
161 * "or" in the new value
164 flush_icache_range((unsigned long)insn_addr
, 4+
165 (unsigned long)insn_addr
);
168 void slb_initialize(void)
170 unsigned long linear_llp
, virtual_llp
;
171 static int slb_encoding_inited
;
172 extern unsigned int *slb_miss_kernel_load_linear
;
173 extern unsigned int *slb_miss_kernel_load_virtual
;
174 extern unsigned int *slb_miss_user_load_normal
;
175 #ifdef CONFIG_HUGETLB_PAGE
176 extern unsigned int *slb_miss_user_load_huge
;
177 unsigned long huge_llp
;
179 huge_llp
= mmu_psize_defs
[mmu_huge_psize
].sllp
;
182 /* Prepare our SLB miss handler based on our page size */
183 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
184 virtual_llp
= mmu_psize_defs
[mmu_virtual_psize
].sllp
;
185 if (!slb_encoding_inited
) {
186 slb_encoding_inited
= 1;
187 patch_slb_encoding(slb_miss_kernel_load_linear
,
188 SLB_VSID_KERNEL
| linear_llp
);
189 patch_slb_encoding(slb_miss_kernel_load_virtual
,
190 SLB_VSID_KERNEL
| virtual_llp
);
191 patch_slb_encoding(slb_miss_user_load_normal
,
192 SLB_VSID_USER
| virtual_llp
);
194 DBG("SLB: linear LLP = %04x\n", linear_llp
);
195 DBG("SLB: virtual LLP = %04x\n", virtual_llp
);
196 #ifdef CONFIG_HUGETLB_PAGE
197 patch_slb_encoding(slb_miss_user_load_huge
,
198 SLB_VSID_USER
| huge_llp
);
199 DBG("SLB: huge LLP = %04x\n", huge_llp
);
203 /* On iSeries the bolted entries have already been set up by
204 * the hypervisor from the lparMap data in head.S */
205 #ifndef CONFIG_PPC_ISERIES
207 unsigned long lflags
, vflags
;
209 lflags
= SLB_VSID_KERNEL
| linear_llp
;
210 vflags
= SLB_VSID_KERNEL
| virtual_llp
;
212 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
213 asm volatile("isync":::"memory");
214 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
215 asm volatile("isync; slbia; isync":::"memory");
216 create_slbe(PAGE_OFFSET
, lflags
, 0);
218 /* VMALLOC space has 4K pages always for now */
219 create_slbe(VMALLOC_START
, vflags
, 1);
221 /* We don't bolt the stack for the time being - we're in boot,
222 * so the stack is in the bolted segment. By the time it goes
223 * elsewhere, we'll call _switch() which will bolt in the new
225 asm volatile("isync":::"memory");
227 #endif /* CONFIG_PPC_ISERIES */
229 get_paca()->stab_rr
= SLB_NUM_BOLTED
;