vfs: check userland buffers before reading them.
[haiku.git] / src / system / boot / arch / m68k / mmu_040.cpp
blob64243ec85264a85748958c0dfbd824329e82e67d
1 /*
2 * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
3 * Based on code written by Travis Geiselbrecht for NewOS.
5 * Distributed under the terms of the MIT License.
6 */
9 #include "mmu.h"
11 #include <boot/platform.h>
12 #include <boot/stdio.h>
13 #include <boot/kernel_args.h>
14 #include <boot/stage2.h>
15 #include <arch/cpu.h>
16 #include <arch_kernel.h>
17 #include <kernel.h>
19 #include <OS.h>
21 #include <string.h>
23 #include "arch_040_mmu.h"
26 //#define TRACE_MMU
27 #ifdef TRACE_MMU
28 # define TRACE(x) dprintf x
29 #else
30 # define TRACE(x) ;
31 #endif
34 extern page_root_entry *gPageRoot;
36 //XXX: the milan BIOS uses the mmu for itself,
37 // likely to virtualize missing Atari I/O ports...
38 // tcr: c000 (enabled, 8k pages :()
39 // dtt0: 803fe140 0x80000000 & ~0x3f... en ignFC2 U=1 CI,S RW
40 // dtt1: 403fe060 0x40000000 & ~0x3f... en ignFC2 U=0 CI,NS RW
41 // itt0: 803fe040 0x80000000 & ~0x3f... en ignFC2 U=0 CI,S RW
42 // itt1: 403fe000 0x40000000 & ~0x3f... en ignFC2 U=0 C,WT RW
43 // srp: 00dfde00
44 // urp: 00dfde00
46 static void
47 dump_mmu(void)
49 uint32 dttr0, dttr1;
50 uint32 ittr0, ittr1;
51 uint32 srp, urp;
52 uint32 tcr;
54 TRACE(("mmu_040:dump:\n"));
56 asm volatile("movec %%tcr,%0\n" : "=d"(tcr) :);
57 TRACE(("tcr:\t%08lx\n", tcr));
59 asm volatile("movec %%dtt0,%0\n" : "=d"(dttr0) :);
60 TRACE(("dtt0:\t%08lx\n", dttr0));
61 asm volatile("movec %%dtt1,%0\n" : "=d"(dttr1) :);
62 TRACE(("dtt1:\t%08lx\n", dttr1));
64 asm volatile("movec %%itt0,%0\n" : "=d"(ittr0) :);
65 TRACE(("itt0:\t%08lx\n", ittr0));
66 asm volatile("movec %%itt1,%0\n" : "=d"(ittr1) :);
67 TRACE(("itt1:\t%08lx\n", ittr1));
69 asm volatile("movec %%srp,%0\n" : "=d"(srp) :);
70 TRACE(("srp:\t%08lx\n", srp));
71 asm volatile("movec %%urp,%0\n" : "=d"(urp) :);
72 TRACE(("urp:\t%08lx\n", urp));
74 TRACE(("mmu_040:dump:\n"));
78 static void
79 initialize(void)
81 dump_mmu();
82 TRACE(("mmu_040:initialize\n"));
86 static status_t
87 set_tt(int which, addr_t pa, size_t len, uint32 perms /* NOTUSED */)
89 TRACE(("mmu_040:set_tt(%d, 0x%lx, 0x%lx, 0x%08lx)\n", which, pa, len, perms));
90 uint32 mask;
91 uint32 ttr = 0;
92 mask = 0x0000ffff;
93 if (len) {
94 len = (len >> 24) & 0x00ff;
95 while (len >>= 1)
96 mask <<= 1;
97 // enable, super only, upa=0,
98 // cachable write-through, rw
99 ttr = 0x0a000;
100 ttr |= (pa & 0xff000000);
101 ttr |= (mask & 0x00ff0000);
103 TRACE(("mmu_040:set_tt: 0x%08lx\n", ttr));
106 switch (which) {
107 case 0:
108 asm volatile( \
109 "movec %0,%%dtt0\n" \
110 "movec %0,%%itt0\n" \
111 : : "d"(ttr));
112 break;
113 case 1:
114 asm volatile( \
115 "movec %0,%%dtt1\n" \
116 "movec %0,%%itt1\n" \
117 : : "d"(ttr));
118 break;
119 default:
120 return EINVAL;
122 return B_OK;
126 static status_t
127 load_rp(addr_t pa)
129 TRACE(("mmu_040:load_rp(0x%lx)\n", pa));
130 // sanity check
131 if (pa & ((1 << 9) - 1)) {
132 panic("mmu root pointer missaligned!");
133 return EINVAL;
135 // make sure it's empty
136 page_directory_entry *pr = (page_directory_entry *)pa;
137 for (int32 j = 0; j < NUM_ROOTENT_PER_TBL; j++)
138 pr[j] = DFL_ROOTENT_VAL;
140 /* mc68040 user's manual, 6-37 */
141 /* pflush before... why not after ? */
142 asm volatile( \
143 "pflusha\n" \
144 "movec %0,%%srp\n" \
145 "movec %0,%%urp\n" \
146 "pflusha\n" \
147 : : "d"(pa));
148 return B_OK;
152 static status_t
153 allocate_kernel_pgdirs(void)
155 page_root_entry *pr = gPageRoot;
156 page_directory_entry *pd;
157 addr_t tbl;
158 int i;
160 // we'll fill in the 2nd half with ready made page dirs
161 for (i = NUM_ROOTENT_PER_TBL/2; i < NUM_ROOTENT_PER_TBL; i++) {
162 if (i % NUM_DIRTBL_PER_PAGE)
163 tbl += SIZ_DIRTBL;
164 else
165 tbl = mmu_get_next_page_tables();
166 pr[i] = DT_ROOT | TA_TO_PREA(tbl);
167 pd = (page_directory_entry *)tbl;
168 for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
169 pd[j] = DFL_DIRENT_VAL;
171 return B_OK;
175 static status_t
176 enable_paging(void)
178 TRACE(("mmu_040:enable_paging\n"));
179 uint16 tcr = 0x8000; // Enable, 4K page size
180 asm volatile( \
181 "pflusha\n" \
182 "movec %0,%%tcr\n" \
183 "pflusha\n" \
184 : : "d"(tcr));
185 return B_OK;
189 static status_t
190 add_page_table(addr_t virtualAddress)
192 page_root_entry *pr = gPageRoot;
193 page_directory_entry *pd;
194 page_table_entry *pt;
195 addr_t tbl;
196 uint32 index;
197 uint32 i;
199 TRACE(("mmu->add_page_table(base = %p)\n", (void *)virtualAddress));
201 // everything much simpler here because pa = va
202 // thanks to transparent translation
204 index = VADDR_TO_PRENT(virtualAddress);
205 if (PRE_TYPE(pr[index]) != DT_ROOT)
206 panic("invalid page root entry %d\n", index);
207 #if 0
208 // not needed anymore
209 if (PRE_TYPE(pr[index]) != DT_ROOT) {
210 unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
211 //TRACE(("missing page root entry %d ai %d\n", index, aindex));
212 tbl = mmu_get_next_page_tables();
213 if (!tbl)
214 return ENOMEM;
215 // for each pgdir on the allocated page:
216 for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
217 page_root_entry *apr = &pr[aindex + i];
218 apr->addr = TA_TO_PREA(tbl);
219 apr->type = DT_ROOT;
220 //TRACE(("inserting tbl @ %p as %08x pr[%d] %08x\n", tbl, TA_TO_PREA(tbl), aindex + i, *(uint32 *)apr));
221 // clear the table
222 //TRACE(("clearing table[%d]\n", i));
223 pd = (page_directory_entry *)tbl;
224 for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
225 *(page_directory_entry_scalar *)(&pd[j]) = DFL_DIRENT_VAL;
226 tbl += SIZ_DIRTBL;
229 #endif
230 pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
232 index = VADDR_TO_PDENT(virtualAddress);
233 if (PDE_TYPE(pd[index]) != DT_DIR) {
234 unsigned aindex = index & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
235 //TRACE(("missing page dir entry %d ai %d\n", index, aindex));
236 tbl = mmu_get_next_page_tables();
237 if (!tbl)
238 return ENOMEM;
239 // for each pgdir on the allocated page:
240 for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
241 page_directory_entry *apd = &pd[aindex + i];
242 pd[aindex + i] = DT_DIR | TA_TO_PDEA(tbl);
243 // clear the table
244 //TRACE(("clearing table[%d]\n", i));
245 pt = (page_table_entry *)tbl;
246 for (int32 j = 0; j < NUM_PAGEENT_PER_TBL; j++)
247 pt[j] = DFL_PAGEENT_VAL;
248 tbl += SIZ_PAGETBL;
251 #if 0
252 pt = PDE_TO_TA(pd[index]);
254 index = VADDR_TO_PTENT(virtualAddress);
255 pt[index].addr = TA_TO_PTEA(0xdeadb00b);
256 pt[index].supervisor = 1;
257 pt[index].type = DT_PAGE;
258 #endif
259 return B_OK;
263 static page_table_entry *
264 lookup_pte(addr_t virtualAddress)
266 page_root_entry *pr = gPageRoot;
267 page_directory_entry *pd;
268 page_table_entry *pt;
269 uint32 rindex, dindex, pindex;
271 rindex = VADDR_TO_PRENT(virtualAddress);
272 if (PRE_TYPE(pr[rindex]) != DT_ROOT)
273 panic("lookup_pte: invalid entry pgrt[%d]", rindex);
274 pd = (page_directory_entry *)PRE_TO_TA(pr[rindex]);
276 dindex = VADDR_TO_PDENT(virtualAddress);
277 if (PDE_TYPE(pd[dindex]) != DT_DIR)
278 panic("lookup_pte: invalid entry pgrt[%d] prdir[%d]", rindex, dindex);
279 pt = (page_table_entry *)PDE_TO_TA(pd[dindex]);
281 pindex = VADDR_TO_PTENT(virtualAddress);
282 #if 0 // of course, it's used in map_page!
283 if (PTE_TYPE(pt[pindex]) != DT_PAGE)
284 panic("lookup_pte: invalid entry pgrt[%d] prdir[%d] pgtbl[%d]",
285 rindex, dindex, pindex);
286 #endif
288 return (&pt[pindex]);
292 static void
293 unmap_page(addr_t virtualAddress)
295 page_table_entry *pt;
297 TRACE(("mmu->unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
299 if (virtualAddress < KERNEL_BASE)
300 panic("unmap_page: asked to unmap invalid page %p!\n",
301 (void *)virtualAddress);
303 // unmap the page from the correct page table
304 pt = lookup_pte(virtualAddress);
306 if (PTE_TYPE(*pt) != DT_PAGE)
307 panic("unmap_page: asked to map non-existing page for %08x\n",
308 virtualAddress);
310 *pt = DT_INVALID | TA_TO_PTEA(0xdeadb00b);
312 // flush ATC
313 asm volatile("pflush (%0)" : : "a" (virtualAddress));
317 /** insert the physical address into existing page table */
318 static void
319 map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
321 page_table_entry *pt;
323 TRACE(("mmu->map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
326 physicalAddress &= ~(B_PAGE_SIZE - 1);
328 // map the page to the correct page table
330 pt = lookup_pte(virtualAddress);
332 if (PTE_TYPE(*pt) != DT_INVALID)
333 panic("map_page: asked to map existing page for %08x\n",
334 virtualAddress);
336 TRACE(("map_page: inserting pageTableEntry %p, physicalAddress %p\n",
337 pt, physicalAddress));
340 *pt = DT_PAGE
341 | TA_TO_PTEA(physicalAddress)
342 #ifdef MMU_HAS_GLOBAL_PAGES
343 | M68K_PTE_GLOBAL
344 #endif
345 | M68K_PTE_SUPERVISOR;
346 // XXX: are flags needed ? ro ? global ?
348 // flush ATC
349 asm volatile("pflush (%0)" : : "a" (virtualAddress));
351 TRACE(("mmu->map_page: done\n"));
357 const struct boot_mmu_ops k040MMUOps = {
358 &initialize,
359 &set_tt,
360 &load_rp,
361 &allocate_kernel_pgdirs,
362 &enable_paging,
363 &add_page_table,
364 &unmap_page,
365 &map_page