2 * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
3 * Based on code written by Travis Geiselbrecht for NewOS.
5 * Distributed under the terms of the MIT License.
11 #include <boot/platform.h>
12 #include <boot/stdio.h>
13 #include <boot/kernel_args.h>
14 #include <boot/stage2.h>
16 #include <arch_kernel.h>
23 #include "arch_040_mmu.h"
28 # define TRACE(x) dprintf x
34 extern page_root_entry
*gPageRoot
;
36 //XXX: the milan BIOS uses the mmu for itself,
37 // likely to virtualize missing Atari I/O ports...
38 // tcr: c000 (enabled, 8k pages :()
39 // dtt0: 803fe140 0x80000000 & ~0x3f... en ignFC2 U=1 CI,S RW
40 // dtt1: 403fe060 0x40000000 & ~0x3f... en ignFC2 U=0 CI,NS RW
41 // itt0: 803fe040 0x80000000 & ~0x3f... en ignFC2 U=0 CI,S RW
42 // itt1: 403fe000 0x40000000 & ~0x3f... en ignFC2 U=0 C,WT RW
54 TRACE(("mmu_040:dump:\n"));
56 asm volatile("movec %%tcr,%0\n" : "=d"(tcr
) :);
57 TRACE(("tcr:\t%08lx\n", tcr
));
59 asm volatile("movec %%dtt0,%0\n" : "=d"(dttr0
) :);
60 TRACE(("dtt0:\t%08lx\n", dttr0
));
61 asm volatile("movec %%dtt1,%0\n" : "=d"(dttr1
) :);
62 TRACE(("dtt1:\t%08lx\n", dttr1
));
64 asm volatile("movec %%itt0,%0\n" : "=d"(ittr0
) :);
65 TRACE(("itt0:\t%08lx\n", ittr0
));
66 asm volatile("movec %%itt1,%0\n" : "=d"(ittr1
) :);
67 TRACE(("itt1:\t%08lx\n", ittr1
));
69 asm volatile("movec %%srp,%0\n" : "=d"(srp
) :);
70 TRACE(("srp:\t%08lx\n", srp
));
71 asm volatile("movec %%urp,%0\n" : "=d"(urp
) :);
72 TRACE(("urp:\t%08lx\n", urp
));
74 TRACE(("mmu_040:dump:\n"));
82 TRACE(("mmu_040:initialize\n"));
87 set_tt(int which
, addr_t pa
, size_t len
, uint32 perms
/* NOTUSED */)
89 TRACE(("mmu_040:set_tt(%d, 0x%lx, 0x%lx, 0x%08lx)\n", which
, pa
, len
, perms
));
94 len
= (len
>> 24) & 0x00ff;
97 // enable, super only, upa=0,
98 // cachable write-through, rw
100 ttr
|= (pa
& 0xff000000);
101 ttr
|= (mask
& 0x00ff0000);
103 TRACE(("mmu_040:set_tt: 0x%08lx\n", ttr
));
109 "movec %0,%%dtt0\n" \
110 "movec %0,%%itt0\n" \
115 "movec %0,%%dtt1\n" \
116 "movec %0,%%itt1\n" \
129 TRACE(("mmu_040:load_rp(0x%lx)\n", pa
));
131 if (pa
& ((1 << 9) - 1)) {
132 panic("mmu root pointer missaligned!");
135 // make sure it's empty
136 page_directory_entry
*pr
= (page_directory_entry
*)pa
;
137 for (int32 j
= 0; j
< NUM_ROOTENT_PER_TBL
; j
++)
138 pr
[j
] = DFL_ROOTENT_VAL
;
140 /* mc68040 user's manual, 6-37 */
141 /* pflush before... why not after ? */
153 allocate_kernel_pgdirs(void)
155 page_root_entry
*pr
= gPageRoot
;
156 page_directory_entry
*pd
;
160 // we'll fill in the 2nd half with ready made page dirs
161 for (i
= NUM_ROOTENT_PER_TBL
/2; i
< NUM_ROOTENT_PER_TBL
; i
++) {
162 if (i
% NUM_DIRTBL_PER_PAGE
)
165 tbl
= mmu_get_next_page_tables();
166 pr
[i
] = DT_ROOT
| TA_TO_PREA(tbl
);
167 pd
= (page_directory_entry
*)tbl
;
168 for (int32 j
= 0; j
< NUM_DIRENT_PER_TBL
; j
++)
169 pd
[j
] = DFL_DIRENT_VAL
;
178 TRACE(("mmu_040:enable_paging\n"));
179 uint16 tcr
= 0x8000; // Enable, 4K page size
190 add_page_table(addr_t virtualAddress
)
192 page_root_entry
*pr
= gPageRoot
;
193 page_directory_entry
*pd
;
194 page_table_entry
*pt
;
199 TRACE(("mmu->add_page_table(base = %p)\n", (void *)virtualAddress
));
201 // everything much simpler here because pa = va
202 // thanks to transparent translation
204 index
= VADDR_TO_PRENT(virtualAddress
);
205 if (PRE_TYPE(pr
[index
]) != DT_ROOT
)
206 panic("invalid page root entry %d\n", index
);
208 // not needed anymore
209 if (PRE_TYPE(pr
[index
]) != DT_ROOT
) {
210 unsigned aindex
= index
& ~(NUM_DIRTBL_PER_PAGE
-1); /* aligned */
211 //TRACE(("missing page root entry %d ai %d\n", index, aindex));
212 tbl
= mmu_get_next_page_tables();
215 // for each pgdir on the allocated page:
216 for (i
= 0; i
< NUM_DIRTBL_PER_PAGE
; i
++) {
217 page_root_entry
*apr
= &pr
[aindex
+ i
];
218 apr
->addr
= TA_TO_PREA(tbl
);
220 //TRACE(("inserting tbl @ %p as %08x pr[%d] %08x\n", tbl, TA_TO_PREA(tbl), aindex + i, *(uint32 *)apr));
222 //TRACE(("clearing table[%d]\n", i));
223 pd
= (page_directory_entry
*)tbl
;
224 for (int32 j
= 0; j
< NUM_DIRENT_PER_TBL
; j
++)
225 *(page_directory_entry_scalar
*)(&pd
[j
]) = DFL_DIRENT_VAL
;
230 pd
= (page_directory_entry
*)PRE_TO_TA(pr
[index
]);
232 index
= VADDR_TO_PDENT(virtualAddress
);
233 if (PDE_TYPE(pd
[index
]) != DT_DIR
) {
234 unsigned aindex
= index
& ~(NUM_PAGETBL_PER_PAGE
-1); /* aligned */
235 //TRACE(("missing page dir entry %d ai %d\n", index, aindex));
236 tbl
= mmu_get_next_page_tables();
239 // for each pgdir on the allocated page:
240 for (i
= 0; i
< NUM_PAGETBL_PER_PAGE
; i
++) {
241 page_directory_entry
*apd
= &pd
[aindex
+ i
];
242 pd
[aindex
+ i
] = DT_DIR
| TA_TO_PDEA(tbl
);
244 //TRACE(("clearing table[%d]\n", i));
245 pt
= (page_table_entry
*)tbl
;
246 for (int32 j
= 0; j
< NUM_PAGEENT_PER_TBL
; j
++)
247 pt
[j
] = DFL_PAGEENT_VAL
;
252 pt
= PDE_TO_TA(pd
[index
]);
254 index
= VADDR_TO_PTENT(virtualAddress
);
255 pt
[index
].addr
= TA_TO_PTEA(0xdeadb00b);
256 pt
[index
].supervisor
= 1;
257 pt
[index
].type
= DT_PAGE
;
263 static page_table_entry
*
264 lookup_pte(addr_t virtualAddress
)
266 page_root_entry
*pr
= gPageRoot
;
267 page_directory_entry
*pd
;
268 page_table_entry
*pt
;
269 uint32 rindex
, dindex
, pindex
;
271 rindex
= VADDR_TO_PRENT(virtualAddress
);
272 if (PRE_TYPE(pr
[rindex
]) != DT_ROOT
)
273 panic("lookup_pte: invalid entry pgrt[%d]", rindex
);
274 pd
= (page_directory_entry
*)PRE_TO_TA(pr
[rindex
]);
276 dindex
= VADDR_TO_PDENT(virtualAddress
);
277 if (PDE_TYPE(pd
[dindex
]) != DT_DIR
)
278 panic("lookup_pte: invalid entry pgrt[%d] prdir[%d]", rindex
, dindex
);
279 pt
= (page_table_entry
*)PDE_TO_TA(pd
[dindex
]);
281 pindex
= VADDR_TO_PTENT(virtualAddress
);
282 #if 0 // of course, it's used in map_page!
283 if (PTE_TYPE(pt
[pindex
]) != DT_PAGE
)
284 panic("lookup_pte: invalid entry pgrt[%d] prdir[%d] pgtbl[%d]",
285 rindex
, dindex
, pindex
);
288 return (&pt
[pindex
]);
293 unmap_page(addr_t virtualAddress
)
295 page_table_entry
*pt
;
297 TRACE(("mmu->unmap_page(virtualAddress = %p)\n", (void *)virtualAddress
));
299 if (virtualAddress
< KERNEL_BASE
)
300 panic("unmap_page: asked to unmap invalid page %p!\n",
301 (void *)virtualAddress
);
303 // unmap the page from the correct page table
304 pt
= lookup_pte(virtualAddress
);
306 if (PTE_TYPE(*pt
) != DT_PAGE
)
307 panic("unmap_page: asked to map non-existing page for %08x\n",
310 *pt
= DT_INVALID
| TA_TO_PTEA(0xdeadb00b);
313 asm volatile("pflush (%0)" : : "a" (virtualAddress
));
317 /** insert the physical address into existing page table */
319 map_page(addr_t virtualAddress
, addr_t physicalAddress
, uint32 flags
)
321 page_table_entry
*pt
;
323 TRACE(("mmu->map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress
, physicalAddress
));
326 physicalAddress
&= ~(B_PAGE_SIZE
- 1);
328 // map the page to the correct page table
330 pt
= lookup_pte(virtualAddress
);
332 if (PTE_TYPE(*pt
) != DT_INVALID
)
333 panic("map_page: asked to map existing page for %08x\n",
336 TRACE(("map_page: inserting pageTableEntry %p, physicalAddress %p\n",
337 pt
, physicalAddress
));
341 | TA_TO_PTEA(physicalAddress
)
342 #ifdef MMU_HAS_GLOBAL_PAGES
345 | M68K_PTE_SUPERVISOR
;
346 // XXX: are flags needed ? ro ? global ?
349 asm volatile("pflush (%0)" : : "a" (virtualAddress
));
351 TRACE(("mmu->map_page: done\n"));
357 const struct boot_mmu_ops k040MMUOps
= {
361 &allocate_kernel_pgdirs
,