2 * PowerPC64 Segment Translation Support.
4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5 * Copyright (c) 2001 Dave Engebretsen
7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/memblock.h>
17 #include <asm/pgtable.h>
19 #include <asm/mmu_context.h>
21 #include <asm/cputable.h>
23 #include <asm/abs_addr.h>
26 unsigned long esid_data
;
27 unsigned long vsid_data
;
30 #define NR_STAB_CACHE_ENTRIES 8
31 static DEFINE_PER_CPU(long, stab_cache_ptr
);
32 static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES
], stab_cache
);
35 * Create a segment table entry for the given esid/vsid pair.
37 static int make_ste(unsigned long stab
, unsigned long esid
, unsigned long vsid
)
39 unsigned long esid_data
, vsid_data
;
40 unsigned long entry
, group
, old_esid
, castout_entry
, i
;
41 unsigned int global_entry
;
42 struct stab_entry
*ste
, *castout_ste
;
43 unsigned long kernel_segment
= (esid
<< SID_SHIFT
) >= PAGE_OFFSET
;
45 vsid_data
= vsid
<< STE_VSID_SHIFT
;
46 esid_data
= esid
<< SID_SHIFT
| STE_ESID_KP
| STE_ESID_V
;
48 esid_data
|= STE_ESID_KS
;
50 /* Search the primary group first. */
51 global_entry
= (esid
& 0x1f) << 3;
52 ste
= (struct stab_entry
*)(stab
| ((esid
& 0x1f) << 7));
54 /* Find an empty entry, if one exists. */
55 for (group
= 0; group
< 2; group
++) {
56 for (entry
= 0; entry
< 8; entry
++, ste
++) {
57 if (!(ste
->esid_data
& STE_ESID_V
)) {
58 ste
->vsid_data
= vsid_data
;
60 ste
->esid_data
= esid_data
;
61 return (global_entry
| entry
);
64 /* Now search the secondary group. */
65 global_entry
= ((~esid
) & 0x1f) << 3;
66 ste
= (struct stab_entry
*)(stab
| (((~esid
) & 0x1f) << 7));
70 * Could not find empty entry, pick one with a round robin selection.
71 * Search all entries in the two groups.
73 castout_entry
= get_paca()->stab_rr
;
74 for (i
= 0; i
< 16; i
++) {
75 if (castout_entry
< 8) {
76 global_entry
= (esid
& 0x1f) << 3;
77 ste
= (struct stab_entry
*)(stab
| ((esid
& 0x1f) << 7));
78 castout_ste
= ste
+ castout_entry
;
80 global_entry
= ((~esid
) & 0x1f) << 3;
81 ste
= (struct stab_entry
*)(stab
| (((~esid
) & 0x1f) << 7));
82 castout_ste
= ste
+ (castout_entry
- 8);
85 /* Dont cast out the first kernel segment */
86 if ((castout_ste
->esid_data
& ESID_MASK
) != PAGE_OFFSET
)
89 castout_entry
= (castout_entry
+ 1) & 0xf;
92 get_paca()->stab_rr
= (castout_entry
+ 1) & 0xf;
94 /* Modify the old entry to the new value. */
96 /* Force previous translations to complete. DRENG */
97 asm volatile("isync" : : : "memory");
99 old_esid
= castout_ste
->esid_data
>> SID_SHIFT
;
100 castout_ste
->esid_data
= 0; /* Invalidate old entry */
102 asm volatile("sync" : : : "memory"); /* Order update */
104 castout_ste
->vsid_data
= vsid_data
;
105 eieio(); /* Order update */
106 castout_ste
->esid_data
= esid_data
;
108 asm volatile("slbie %0" : : "r" (old_esid
<< SID_SHIFT
));
109 /* Ensure completion of slbie */
110 asm volatile("sync" : : : "memory");
112 return (global_entry
| (castout_entry
& 0x7));
116 * Allocate a segment table entry for the given ea and mm
118 static int __ste_allocate(unsigned long ea
, struct mm_struct
*mm
)
121 unsigned char stab_entry
;
122 unsigned long offset
;
124 /* Kernel or user address? */
125 if (is_kernel_addr(ea
)) {
126 vsid
= get_kernel_vsid(ea
, MMU_SEGSIZE_256M
);
128 if ((ea
>= TASK_SIZE_USER64
) || (! mm
))
131 vsid
= get_vsid(mm
->context
.id
, ea
, MMU_SEGSIZE_256M
);
134 stab_entry
= make_ste(get_paca()->stab_addr
, GET_ESID(ea
), vsid
);
136 if (!is_kernel_addr(ea
)) {
137 offset
= __get_cpu_var(stab_cache_ptr
);
138 if (offset
< NR_STAB_CACHE_ENTRIES
)
139 __get_cpu_var(stab_cache
[offset
++]) = stab_entry
;
141 offset
= NR_STAB_CACHE_ENTRIES
+1;
142 __get_cpu_var(stab_cache_ptr
) = offset
;
145 asm volatile("sync":::"memory");
151 int ste_allocate(unsigned long ea
)
153 return __ste_allocate(ea
, current
->mm
);
157 * Do the segment table work for a context switch: flush all user
158 * entries from the table, then preload some probably useful entries
161 void switch_stab(struct task_struct
*tsk
, struct mm_struct
*mm
)
163 struct stab_entry
*stab
= (struct stab_entry
*) get_paca()->stab_addr
;
164 struct stab_entry
*ste
;
165 unsigned long offset
;
166 unsigned long pc
= KSTK_EIP(tsk
);
167 unsigned long stack
= KSTK_ESP(tsk
);
168 unsigned long unmapped_base
;
170 /* Force previous translations to complete. DRENG */
171 asm volatile("isync" : : : "memory");
174 * We need interrupts hard-disabled here, not just soft-disabled,
175 * so that a PMU interrupt can't occur, which might try to access
176 * user memory (to get a stack trace) and possible cause an STAB miss
177 * which would update the stab_cache/stab_cache_ptr per-cpu variables.
181 offset
= __get_cpu_var(stab_cache_ptr
);
182 if (offset
<= NR_STAB_CACHE_ENTRIES
) {
185 for (i
= 0; i
< offset
; i
++) {
186 ste
= stab
+ __get_cpu_var(stab_cache
[i
]);
187 ste
->esid_data
= 0; /* invalidate entry */
192 /* Invalidate all entries. */
195 /* Never flush the first entry. */
198 entry
< (HW_PAGE_SIZE
/ sizeof(struct stab_entry
));
201 ea
= ste
->esid_data
& ESID_MASK
;
202 if (!is_kernel_addr(ea
)) {
208 asm volatile("sync; slbia; sync":::"memory");
210 __get_cpu_var(stab_cache_ptr
) = 0;
212 /* Now preload some entries for the new task */
213 if (test_tsk_thread_flag(tsk
, TIF_32BIT
))
214 unmapped_base
= TASK_UNMAPPED_BASE_USER32
;
216 unmapped_base
= TASK_UNMAPPED_BASE_USER64
;
218 __ste_allocate(pc
, mm
);
220 if (GET_ESID(pc
) == GET_ESID(stack
))
223 __ste_allocate(stack
, mm
);
225 if ((GET_ESID(pc
) == GET_ESID(unmapped_base
))
226 || (GET_ESID(stack
) == GET_ESID(unmapped_base
)))
229 __ste_allocate(unmapped_base
, mm
);
232 asm volatile("sync" : : : "memory");
236 * Allocate segment tables for secondary CPUs. These must all go in
237 * the first (bolted) segment, so that do_stab_bolted won't get a
238 * recursive segment miss on the segment table itself.
240 void __init
stabs_alloc(void)
244 if (mmu_has_feature(MMU_FTR_SLB
))
247 for_each_possible_cpu(cpu
) {
248 unsigned long newstab
;
251 continue; /* stab for CPU 0 is statically allocated */
253 newstab
= memblock_alloc_base(HW_PAGE_SIZE
, HW_PAGE_SIZE
,
255 newstab
= (unsigned long)__va(newstab
);
257 memset((void *)newstab
, 0, HW_PAGE_SIZE
);
259 paca
[cpu
].stab_addr
= newstab
;
260 paca
[cpu
].stab_real
= virt_to_abs(newstab
);
261 printk(KERN_INFO
"Segment table for CPU %d at 0x%llx "
262 "virtual, 0x%llx absolute\n",
263 cpu
, paca
[cpu
].stab_addr
, paca
[cpu
].stab_real
);
268 * Build an entry for the base kernel segment and put it into
269 * the segment table or SLB. All other segment table or SLB
270 * entries are faulted in.
272 void stab_initialize(unsigned long stab
)
274 unsigned long vsid
= get_kernel_vsid(PAGE_OFFSET
, MMU_SEGSIZE_256M
);
275 unsigned long stabreal
;
277 asm volatile("isync; slbia; isync":::"memory");
278 make_ste(stab
, GET_ESID(PAGE_OFFSET
), vsid
);
281 asm volatile("sync":::"memory");
284 stabreal
= get_paca()->stab_real
| 0x1ul
;
286 mtspr(SPRN_ASR
, stabreal
);