[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / ppc64 / mm / stab.c
blobdf4bbe14153c9af9200368add9a0c48999096242
1 /*
2 * PowerPC64 Segment Translation Support.
4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5 * Copyright (c) 2001 Dave Engebretsen
7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <asm/pgtable.h>
17 #include <asm/mmu.h>
18 #include <asm/mmu_context.h>
19 #include <asm/paca.h>
20 #include <asm/cputable.h>
22 struct stab_entry {
23 unsigned long esid_data;
24 unsigned long vsid_data;
27 /* Both the segment table and SLB code uses the following cache */
28 #define NR_STAB_CACHE_ENTRIES 8
29 DEFINE_PER_CPU(long, stab_cache_ptr);
30 DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
33 * Create a segment table entry for the given esid/vsid pair.
35 static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
37 unsigned long esid_data, vsid_data;
38 unsigned long entry, group, old_esid, castout_entry, i;
39 unsigned int global_entry;
40 struct stab_entry *ste, *castout_ste;
41 unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE;
43 vsid_data = vsid << STE_VSID_SHIFT;
44 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
45 if (! kernel_segment)
46 esid_data |= STE_ESID_KS;
48 /* Search the primary group first. */
49 global_entry = (esid & 0x1f) << 3;
50 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
52 /* Find an empty entry, if one exists. */
53 for (group = 0; group < 2; group++) {
54 for (entry = 0; entry < 8; entry++, ste++) {
55 if (!(ste->esid_data & STE_ESID_V)) {
56 ste->vsid_data = vsid_data;
57 asm volatile("eieio":::"memory");
58 ste->esid_data = esid_data;
59 return (global_entry | entry);
62 /* Now search the secondary group. */
63 global_entry = ((~esid) & 0x1f) << 3;
64 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
68 * Could not find empty entry, pick one with a round robin selection.
69 * Search all entries in the two groups.
71 castout_entry = get_paca()->stab_rr;
72 for (i = 0; i < 16; i++) {
73 if (castout_entry < 8) {
74 global_entry = (esid & 0x1f) << 3;
75 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
76 castout_ste = ste + castout_entry;
77 } else {
78 global_entry = ((~esid) & 0x1f) << 3;
79 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
80 castout_ste = ste + (castout_entry - 8);
83 /* Dont cast out the first kernel segment */
84 if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
85 break;
87 castout_entry = (castout_entry + 1) & 0xf;
90 get_paca()->stab_rr = (castout_entry + 1) & 0xf;
92 /* Modify the old entry to the new value. */
94 /* Force previous translations to complete. DRENG */
95 asm volatile("isync" : : : "memory");
97 old_esid = castout_ste->esid_data >> SID_SHIFT;
98 castout_ste->esid_data = 0; /* Invalidate old entry */
100 asm volatile("sync" : : : "memory"); /* Order update */
102 castout_ste->vsid_data = vsid_data;
103 asm volatile("eieio" : : : "memory"); /* Order update */
104 castout_ste->esid_data = esid_data;
106 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
107 /* Ensure completion of slbie */
108 asm volatile("sync" : : : "memory");
110 return (global_entry | (castout_entry & 0x7));
114 * Allocate a segment table entry for the given ea and mm
116 static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
118 unsigned long vsid;
119 unsigned char stab_entry;
120 unsigned long offset;
122 /* Kernel or user address? */
123 if (ea >= KERNELBASE) {
124 vsid = get_kernel_vsid(ea);
125 } else {
126 if ((ea >= TASK_SIZE_USER64) || (! mm))
127 return 1;
129 vsid = get_vsid(mm->context.id, ea);
132 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
134 if (ea < KERNELBASE) {
135 offset = __get_cpu_var(stab_cache_ptr);
136 if (offset < NR_STAB_CACHE_ENTRIES)
137 __get_cpu_var(stab_cache[offset++]) = stab_entry;
138 else
139 offset = NR_STAB_CACHE_ENTRIES+1;
140 __get_cpu_var(stab_cache_ptr) = offset;
142 /* Order update */
143 asm volatile("sync":::"memory");
146 return 0;
149 int ste_allocate(unsigned long ea)
151 return __ste_allocate(ea, current->mm);
155 * Do the segment table work for a context switch: flush all user
156 * entries from the table, then preload some probably useful entries
157 * for the new task
159 void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
161 struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
162 struct stab_entry *ste;
163 unsigned long offset = __get_cpu_var(stab_cache_ptr);
164 unsigned long pc = KSTK_EIP(tsk);
165 unsigned long stack = KSTK_ESP(tsk);
166 unsigned long unmapped_base;
168 /* Force previous translations to complete. DRENG */
169 asm volatile("isync" : : : "memory");
171 if (offset <= NR_STAB_CACHE_ENTRIES) {
172 int i;
174 for (i = 0; i < offset; i++) {
175 ste = stab + __get_cpu_var(stab_cache[i]);
176 ste->esid_data = 0; /* invalidate entry */
178 } else {
179 unsigned long entry;
181 /* Invalidate all entries. */
182 ste = stab;
184 /* Never flush the first entry. */
185 ste += 1;
186 for (entry = 1;
187 entry < (PAGE_SIZE / sizeof(struct stab_entry));
188 entry++, ste++) {
189 unsigned long ea;
190 ea = ste->esid_data & ESID_MASK;
191 if (ea < KERNELBASE) {
192 ste->esid_data = 0;
197 asm volatile("sync; slbia; sync":::"memory");
199 __get_cpu_var(stab_cache_ptr) = 0;
201 /* Now preload some entries for the new task */
202 if (test_tsk_thread_flag(tsk, TIF_32BIT))
203 unmapped_base = TASK_UNMAPPED_BASE_USER32;
204 else
205 unmapped_base = TASK_UNMAPPED_BASE_USER64;
207 __ste_allocate(pc, mm);
209 if (GET_ESID(pc) == GET_ESID(stack))
210 return;
212 __ste_allocate(stack, mm);
214 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
215 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
216 return;
218 __ste_allocate(unmapped_base, mm);
220 /* Order update */
221 asm volatile("sync" : : : "memory");
224 extern void slb_initialize(void);
227 * Build an entry for the base kernel segment and put it into
228 * the segment table or SLB. All other segment table or SLB
229 * entries are faulted in.
231 void stab_initialize(unsigned long stab)
233 unsigned long vsid = get_kernel_vsid(KERNELBASE);
235 if (cpu_has_feature(CPU_FTR_SLB)) {
236 slb_initialize();
237 } else {
238 asm volatile("isync; slbia; isync":::"memory");
239 make_ste(stab, GET_ESID(KERNELBASE), vsid);
241 /* Order update */
242 asm volatile("sync":::"memory");