* better
[mascara-docs.git] / i386 / linux-2.3.21 / arch / m68k / sun3 / mmu_emu.c
blob857aba4f1c3abeb02e6b39f4ad80b84f03d2eb2b
1 /*
2 ** Tablewalk MMU emulator
3 **
4 ** by Toshiyasu Morita
5 **
6 ** Started 1/16/98 @ 2:22 am
7 */
9 #include <linux/mman.h>
10 #include <linux/mm.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/delay.h>
15 #include <asm/setup.h>
16 #include <asm/traps.h>
17 #include <asm/system.h>
18 #include <asm/uaccess.h>
19 #include <asm/page.h>
20 #include <asm/pgtable.h>
21 #include <asm/sun3mmu.h>
22 #include <asm/segment.h>
23 #include <asm/bitops.h>
24 #include <asm/oplib.h>
25 #include <asm/mmu_context.h>
27 extern void prom_reboot (char *) __attribute__ ((__noreturn__));
29 #undef DEBUG_MMU_EMU
32 ** Defines
35 #define CONTEXTS_NUM 8
36 #define SEGMAPS_PER_CONTEXT_NUM 2048
37 #define PAGES_PER_SEGMENT 16
38 #define PMEGS_NUM 256
39 #define PMEG_MASK 0xFF
42 ** Globals
45 unsigned long vmalloc_end = 0;
46 unsigned long pmeg_vaddr[PMEGS_NUM];
47 unsigned char pmeg_alloc[PMEGS_NUM];
48 unsigned char pmeg_ctx[PMEGS_NUM];
50 /* pointers to the mm structs for each task in each
51 context. 0xffffffff is a marker for kernel context */
52 struct mm_struct *ctx_alloc[CONTEXTS_NUM] = {0xffffffff, 0, 0, 0, 0, 0, 0, 0};
53 /* has this context been mmdrop'd? */
54 unsigned char ctx_live[CONTEXTS_NUM] = {1, 0, 0, 0, 0, 0, 0, 0};
55 static unsigned char ctx_avail = CONTEXTS_NUM-1;
56 unsigned char ctx_next_to_die = 1;
58 /* array of pages to be marked off for the rom when we do mem_init later */
59 /* 256 pages lets the rom take up to 2mb of physical ram.. I really
60 hope it never wants mote than that. */
61 unsigned long rom_pages[256];
63 /* Print a PTE value in symbolic form. For debugging. */
64 void print_pte (pte_t pte)
66 #if 0
67 /* Verbose version. */
68 unsigned long val = pte_val (pte);
69 printk (" pte=%lx [addr=%lx",
70 val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT);
71 if (val & SUN3_PAGE_VALID) printk (" valid");
72 if (val & SUN3_PAGE_WRITEABLE) printk (" write");
73 if (val & SUN3_PAGE_SYSTEM) printk (" sys");
74 if (val & SUN3_PAGE_NOCACHE) printk (" nocache");
75 if (val & SUN3_PAGE_ACCESSED) printk (" accessed");
76 if (val & SUN3_PAGE_MODIFIED) printk (" modified");
77 switch (val & SUN3_PAGE_TYPE_MASK) {
78 case SUN3_PAGE_TYPE_MEMORY: printk (" memory"); break;
79 case SUN3_PAGE_TYPE_IO: printk (" io"); break;
80 case SUN3_PAGE_TYPE_VME16: printk (" vme16"); break;
81 case SUN3_PAGE_TYPE_VME32: printk (" vme32"); break;
83 printk ("]\n");
84 #else
85 /* Terse version. More likely to fit on a line. */
86 unsigned long val = pte_val (pte);
87 char flags[7], *type;
89 flags[0] = (val & SUN3_PAGE_VALID) ? 'v' : '-';
90 flags[1] = (val & SUN3_PAGE_WRITEABLE) ? 'w' : '-';
91 flags[2] = (val & SUN3_PAGE_SYSTEM) ? 's' : '-';
92 flags[3] = (val & SUN3_PAGE_NOCACHE) ? 'x' : '-';
93 flags[4] = (val & SUN3_PAGE_ACCESSED) ? 'a' : '-';
94 flags[5] = (val & SUN3_PAGE_MODIFIED) ? 'm' : '-';
95 flags[6] = '\0';
97 switch (val & SUN3_PAGE_TYPE_MASK) {
98 case SUN3_PAGE_TYPE_MEMORY: type = "memory"; break;
99 case SUN3_PAGE_TYPE_IO: type = "io" ; break;
100 case SUN3_PAGE_TYPE_VME16: type = "vme16" ; break;
101 case SUN3_PAGE_TYPE_VME32: type = "vme32" ; break;
102 default: type = "unknown?"; break;
105 printk (" pte=%08lx [%07lx %s %s]\n",
106 val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT, flags, type);
107 #endif
110 /* Print the PTE value for a given virtual address. For debugging. */
111 void print_pte_vaddr (unsigned long vaddr)
113 printk (" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr));
114 print_pte (__pte (sun3_get_pte (vaddr)));
118 * Initialise the MMU emulator.
120 void mmu_emu_init(void)
122 unsigned long seg, num;
123 int i,j;
124 extern char _stext, _etext;
125 unsigned long page;
127 memset(rom_pages, 0, sizeof(rom_pages));
128 memset(pmeg_vaddr, 0, sizeof(pmeg_vaddr));
129 memset(pmeg_alloc, 0, sizeof(pmeg_alloc));
130 memset(pmeg_ctx, 0, sizeof(pmeg_ctx));
132 #ifdef DEBUG_MMU_EMU
133 printk ("mmu_emu_init: stext=%p etext=%p pmegs=%u\n", &_stext,
134 &_etext, (&_etext-&_stext+SUN3_PMEG_SIZE-1) >>
135 SUN3_PMEG_SIZE_BITS);
136 #endif
138 /* mark the pmegs copied in sun3-head.S as used */
139 for (i=0; i<10; ++i)
140 pmeg_alloc[i] = 2;
142 /* I'm thinking that most of the top pmeg's are going to be
143 used for something, and we probably shouldn't risk it */
144 for(num = 0xf0; num <= 0xff; num++)
145 pmeg_alloc[num] = 2;
147 j = 0;
148 for (num=0, seg=0x0F800000; seg<0x10000000; seg+=16*PAGE_SIZE) {
149 if (sun3_get_segmap (seg) != SUN3_INVALID_PMEG) {
150 #ifdef DEBUG_MMU_EMU
151 printk ("mapped:");
152 print_pte_vaddr (seg);
153 #endif
154 // the lowest mapping here is the end of our
155 // vmalloc region
156 if(!vmalloc_end)
157 vmalloc_end = seg;
159 // mark the segmap alloc'd, and reserve any
160 // of the first 0xbff pages the hardware is
161 // already using... does any sun3 support > 24mb?
162 pmeg_alloc[sun3_get_segmap(seg)] = 2;
163 for(i = 0; i < SUN3_PMEG_SIZE; i += PAGE_SIZE)
165 page = (sun3_get_pte(seg+i) &
166 SUN3_PAGE_PGNUM_MASK);
168 if((page) && (page < 0xbff)) {
169 rom_pages[j] = page;
170 j++;
177 /* blank everything below the kernel, and we've got the base
178 mapping to start all the contexts off with... */
179 for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
180 sun3_put_segmap(seg, SUN3_INVALID_PMEG);
182 set_fs(MAKE_MM_SEG(3));
183 for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
184 i = sun3_get_segmap(seg);
185 for(j = 1; j < CONTEXTS_NUM; j++)
186 (*(romvec->pv_setctxt))(j, (void *)seg, i);
188 set_fs(KERNEL_DS);
192 /* called during mem_init to create the needed holes in the mem
193 mappings */
194 void mmu_emu_reserve_pages(unsigned long max_page)
196 int i = 0;
198 while(rom_pages[i] != 0) {
199 // don't tamper with pages that wound up after end_mem
200 if(rom_pages[i] < max_page)
201 set_bit(PG_reserved, &mem_map[rom_pages[i]].flags);
202 i++;
206 /* erase the mappings for a dead context. Uses the pg_dir for hints
207 as the pmeg tables proved somewhat unreliable, and unmapping all of
208 TASK_SIZE was much slower and no more stable. */
209 /* todo: find a better way to keep track of the pmegs used by a
210 context for when they're cleared */
211 void clear_context(unsigned long context)
213 unsigned char oldctx;
214 unsigned long i;
216 if(!ctx_alloc[context])
217 panic("clear_context: context not allocated\n");
219 oldctx = sun3_get_context();
221 sun3_put_context(context);
223 /* ctx_live denotes if we're clearing a context with an active
224 mm, in which case we can use the pgd for clues and also should
225 mark that mm as lacking a context. if the context has been
226 mmdrop'd, then flush outright. */
228 if(!ctx_live[context]) {
229 for(i = 0; i < TASK_SIZE ; i += SUN3_PMEG_SIZE)
230 sun3_put_segmap(i, SUN3_INVALID_PMEG);
231 } else {
232 pgd_t *pgd;
234 pgd = ctx_alloc[context]->pgd;
235 ctx_alloc[context]->context = SUN3_INVALID_CONTEXT;
236 for(i = 0; i < (TASK_SIZE>>PGDIR_SHIFT); i++, pgd++)
238 if(pgd_val(*pgd)) {
239 sun3_put_segmap(i<<PGDIR_SHIFT,
240 SUN3_INVALID_PMEG);
245 for(i = 0; i < SUN3_INVALID_PMEG; i++) {
246 if((pmeg_ctx[i] == context) && (pmeg_alloc[i] != 2)) {
247 pmeg_ctx[i] = 0;
248 pmeg_alloc[i] = 0;
249 pmeg_vaddr[i] = 0;
253 ctx_alloc[context] = (struct mm_struct *)0;
254 ctx_avail++;
257 /* gets an empty context. if full, kills the next context listed to
258 die first */
259 /* This context invalidation scheme is, well, totally arbitrary, I'm
260 sure it could be much more intellegent... but it gets the job done
261 for now without much overhead in making it's decision. */
262 /* todo: come up with optimized scheme for flushing contexts */
263 unsigned long get_free_context(struct mm_struct *mm)
265 unsigned long new = 1;
267 if(!ctx_avail) {
268 /* kill someone to get our context */
269 new = ctx_next_to_die;
270 clear_context(new);
271 ctx_next_to_die = (ctx_next_to_die + 1) & 0x7;
272 if(!ctx_next_to_die)
273 ctx_next_to_die++;
274 } else {
275 while(new < CONTEXTS_NUM) {
276 if(ctx_alloc[new])
277 new++;
278 else
279 break;
281 // check to make sure one was really free...
282 if(new == CONTEXTS_NUM)
283 panic("get_free_context: failed to find free context");
286 ctx_alloc[new] = mm;
287 ctx_live[new] = 1;
288 ctx_avail--;
290 return new;
294 * Dynamically select a `spare' PMEG and use it to map virtual `vaddr' in
295 * `context'. Maintain internal PMEG management structures. This doesn't
296 * actually map the physical address, but does clear the old mappings.
298 //todo: better allocation scheme? but is extra complexity worthwhile?
299 //todo: only clear old entries if necessary? how to tell?
301 static inline void mmu_emu_map_pmeg (int context, int vaddr)
303 static unsigned char curr_pmeg = 128;
304 int i;
306 /* Round address to PMEG boundary. */
307 vaddr &= ~SUN3_PMEG_MASK;
309 /* Find a spare one. */
310 while (pmeg_alloc[curr_pmeg] == 2)
311 ++curr_pmeg;
314 #ifdef DEBUG_MMU_EMU
315 printk("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n",
316 curr_pmeg, context, vaddr);
317 #endif
319 /* Invalidate old mapping for the pmeg, if any */
320 if (pmeg_alloc[curr_pmeg] == 1) {
321 sun3_put_context(pmeg_ctx[curr_pmeg]);
322 sun3_put_segmap (pmeg_vaddr[curr_pmeg], SUN3_INVALID_PMEG);
323 sun3_put_context(context);
326 /* Update PMEG management structures. */
327 // don't take pmeg's away from the kernel...
328 if(vaddr >= PAGE_OFFSET) {
329 /* map kernel pmegs into all contexts */
330 unsigned char i;
332 for(i = 0; i < CONTEXTS_NUM; i++) {
333 sun3_put_context(i);
334 sun3_put_segmap (vaddr, curr_pmeg);
336 sun3_put_context(context);
337 pmeg_alloc[curr_pmeg] = 2;
338 pmeg_ctx[curr_pmeg] = 0;
341 else {
342 pmeg_alloc[curr_pmeg] = 1;
343 pmeg_ctx[curr_pmeg] = context;
344 sun3_put_segmap (vaddr, curr_pmeg);
347 pmeg_vaddr[curr_pmeg] = vaddr;
349 /* Set hardware mapping and clear the old PTE entries. */
350 for (i=0; i<SUN3_PMEG_SIZE; i+=SUN3_PTE_SIZE)
351 sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM);
353 /* Consider a different one next time. */
354 ++curr_pmeg;
358 * Handle a pagefault at virtual address `vaddr'; check if there should be a
359 * page there (specifically, whether the software pagetables indicate that
360 * there is). This is necessary due to the limited size of the second-level
361 * Sun3 hardware pagetables (256 groups of 16 pages). If there should be a
362 * mapping present, we select a `spare' PMEG and use it to create a mapping.
363 * `read_flag' is nonzero for a read fault; zero for a write. Returns nonzero
364 * if we successfully handled the fault.
366 //todo: should we bump minor pagefault counter? if so, here or in caller?
367 //todo: possibly inline this into bus_error030 in <asm/buserror.h> ?
369 // kernel_fault is set when a kernel page couldn't be demand mapped,
370 // and forces another try using the kernel page table. basically a
371 // hack so that vmalloc would work correctly.
373 int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
375 unsigned long segment, offset;
376 unsigned char context;
377 pte_t *pte;
378 pgd_t * crp;
380 if(current->mm == NULL) {
381 crp = swapper_pg_dir;
382 context = 0;
383 } else {
384 context = current->mm->context;
385 if(kernel_fault)
386 crp = swapper_pg_dir;
387 else
388 crp = current->mm->pgd;
391 #ifdef DEBUG_MMU_EMU
392 printk ("mmu_emu_handle_fault: vaddr=%lx type=%s crp=%p\n",
393 vaddr, read_flag ? "read" : "write", crp);
394 #endif
396 segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF;
397 offset = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF;
399 #ifdef DEBUG_MMU_EMU
400 printk ("mmu_emu_handle_fault: segment=%lx offset=%lx\n", segment, offset);
401 #endif
403 pte = (pte_t *) pgd_val (*(crp + segment));
405 //todo: next line should check for valid pmd properly.
406 if (!pte) {
407 // printk ("mmu_emu_handle_fault: invalid pmd\n");
408 return 0;
411 pte = (pte_t *) __va ((unsigned long)(pte + offset));
413 /* Make sure this is a valid page */
414 if (!(pte_val (*pte) & SUN3_PAGE_VALID))
415 return 0;
417 /* Make sure there's a pmeg allocated for the page */
418 if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG)
419 mmu_emu_map_pmeg (context, vaddr);
421 /* Write the pte value to hardware MMU */
422 sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte));
424 /* Update software copy of the pte value */
425 // I'm not sure this is necessary. If this is required, we ought to simply
426 // copy this out when we reuse the PMEG or at some other convenient time.
427 // Doing it here is fairly meaningless, anyway, as we only know about the
428 // first access to a given page. --m
429 if (!read_flag) {
430 if (pte_val (*pte) & SUN3_PAGE_WRITEABLE)
431 pte_val (*pte) |= (SUN3_PAGE_ACCESSED
432 | SUN3_PAGE_MODIFIED);
433 else
434 return 0; /* Write-protect error. */
435 } else
436 pte_val (*pte) |= SUN3_PAGE_ACCESSED;
438 #ifdef DEBUG_MMU_EMU
439 printk ("seg:%d crp:%p ->", get_fs().seg, crp);
440 print_pte_vaddr (vaddr);
441 printk ("\n");
442 #endif
444 return 1;