2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
6 #include <aros/debug.h>
7 #include <proto/exec.h>
9 #include "kernel_base.h"
10 #include "kernel_intern.h"
12 /* 68030 (68851), 68040 and 68060 supported, 68030 (68851) is configured like a 68040,
13 * no 68030 special features used, not worth the extra complexity */
18 #define PAGE_SIZE 12 // = 1 << 12 = 4096
20 /* Macros that hopefully make MMU magic a bit easier to understand.. */
22 #define LEVELA_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE ))) & ((1 << LEVELA_SIZE) - 1))
23 #define LEVELB_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE ))) & ((1 << LEVELB_SIZE) - 1))
24 #define LEVELC_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE + LEVELC_SIZE))) & ((1 << LEVELC_SIZE) - 1))
26 #define LEVELA(root, x) (root[LEVELA_VAL(x)])
27 #define LEVELB(a, x) (((ULONG*)(((ULONG)a) & ~((1 << (LEVELB_SIZE + 2)) - 1)))[LEVELB_VAL(x)])
28 #define LEVELC(b, x) (((ULONG*)(((ULONG)b) & ~((1 << (LEVELC_SIZE + 2)) - 1)))[LEVELC_VAL(x)])
30 #define INVALID_DESCRIPTOR 0xDEAD0000
31 #define ISINVALID(x) ((((ULONG)x) & 3) == 0)
33 static BOOL
map_region2(struct KernelBase
*kb
, void *addr
, void *physaddr
, ULONG size
, BOOL invalid
, BOOL writeprotect
, BOOL supervisor
, UBYTE cachemode
);
36 static void map_pagetable(struct KernelBase
*kb
, void *addr
, ULONG size
)
38 /* 68040+ MMU tables should be serialized */
39 map_region2(kb
, addr
, NULL
, size
, FALSE
, FALSE
, FALSE
, CM_SERIALIZED
);
42 /* Allocate MMU descriptor page, it needs to be (1 << bits) * sizeof(ULONG) aligned */
43 static ULONG
alloc_descriptor(struct KernelBase
*kb
, UBYTE mmutype
, UBYTE bits
, UBYTE level
)
45 struct PlatformData
*pd
= kb
->kb_PlatformData
;
47 ULONG size
= sizeof(ULONG
) * (1 << bits
);
48 ULONG ps
= 1 << PAGE_SIZE
;
51 while (pd
->page_free
>= size
&& (((ULONG
)pd
->page_ptr
) & (size
- 1))) {
52 pd
->page_ptr
+= 0x100;
53 pd
->page_free
-= 0x100;
55 while (pd
->page_free
< size
) {
56 /* allocate in aligned blocks of PAGE_SIZE */
57 UBYTE
*mem
, *newmem
, *pagemem
;
59 mem
= AllocMem(2 * ps
, MEMF_PUBLIC
);
64 newmem
= (UBYTE
*)((((ULONG
)mem
) + ps
- 1) & ~(ps
- 1));
65 pagemem
= AllocAbs(ps
, newmem
);
69 pd
->page_ptr
= pagemem
;
71 // bug("New chunk %p-%p\n", pagemem, pagemem + ps - 1);
72 if (level
> 0 && mmutype
>= MMU040
)
73 map_pagetable(kb
, pagemem
, ps
);
75 desc
= (ULONG
*)pd
->page_ptr
;
76 for (i
= 0; i
< (1 << bits
); i
++)
77 desc
[i
] = INVALID_DESCRIPTOR
;
79 if (mmutype
== MMU030
)
80 dout
|= 2; /* Valid 4 byte descriptor */
82 dout
|= 3; /* Resident descriptor */
83 // bug("Level%c %p-%p: %08x\n", level + 'A', pd->page_ptr, pd->page_ptr + size - 1, dout);
85 pd
->page_free
-= size
;
89 BOOL
init_mmu(struct KernelBase
*kb
)
91 UBYTE mmutype
= kb
->kb_PlatformData
->mmu_type
;
95 kb
->kb_PlatformData
->MMU_Level_A
= (ULONG
*)(alloc_descriptor(kb
, mmutype
, LEVELA_SIZE
, 0) & ~3);
96 if (!kb
->kb_PlatformData
->MMU_Level_A
) {
97 kb
->kb_PlatformData
->mmu_type
= 0;
100 if (mmutype
>= MMU040
)
101 map_pagetable(kb
, kb
->kb_PlatformData
->MMU_Level_A
, 1 << PAGE_SIZE
);
105 static void enable_mmu030(ULONG
*levela
)
112 "lea .esuper030(%%pc),%%a5\n"
116 /* Do not interrupt us */
119 /* Disable MMU, setup root pointers,
120 * uses 68040 MMU descriptor levels (7/7/6, 4K page size) */
121 "move.l #0x00c07760,%%d1\n"
122 "move.l %%d1,%%a7@\n"
124 /* Set bus error exception vector */
126 "move.l #addrerror030,%%a5@(12)\n"
127 "move.l #buserror030,%%a5@(8)\n"
128 /* Configure CRP. Valid 4 byte descriptor, other features disabled. */
129 "move.l #0x80000002,%%a7@\n"
130 /* First level descriptor pointer */
131 "move.l %%d0,%%a7@(4)\n"
133 "pmove %%a7@,%%crp\n"
134 /* Set MMU enabled bit */
136 "move.l %%d1,%%a7@\n"
139 /* Clear transparent translation */
141 "pmove %%a7@,%%tt0\n"
142 "pmove %%a7@,%%tt1\n"
147 : : "m" (levela
) : "d0", "d1", "a4", "a6");
150 static void disable_mmu030(void)
156 "lea .dsuper030(%%pc),%%a5\n"
160 /* Do not interrupt us */
170 : : : "d0", "d1", "a4", "a6");
173 static void enable_mmu040(ULONG
*levela
, UBYTE cpu060
, UBYTE
*zeropagedescriptor
)
182 "lea .esuper040(%%pc),%%a5\n"
186 /* Do not interrupt us */
189 "move.l %%a1,253*4(%%a5)\n"
190 "lea buserror040,%%a6\n"
191 "lea addrerror040,%%a0\n"
194 "lea buserror060,%%a6\n"
195 "lea addrerror060,%%a0\n"
197 "move.l %%a6,%%a5@(8)\n"
198 "move.l %%a0,%%a5@(12)\n"
200 /* Disable MMU, setup root pointers */
204 /* Flush data caches and ATC */
208 /* Enable MMU, 4K page size */
209 "move.l #0x00008000,%%d0\n"
211 /* Disable transparent translation */
212 "movec %%d1,%%itt0\n"
213 "movec %%d1,%%itt1\n"
214 "movec %%d1,%%dtt0\n"
215 "movec %%d1,%%dtt1\n"
219 : : "m" (levela
), "m" (cpu060
), "m" (zeropagedescriptor
) : "d0", "d1", "a1", "a4", "a6");
222 static void disable_mmu040(void)
228 "lea .dsuper040(%%pc),%%a5\n"
232 /* Do not interrupt us */
241 : : : "d0", "d1", "a4", "a6");
244 void enable_mmu(struct KernelBase
*kb
)
246 if (!kb
->kb_PlatformData
->mmu_type
)
249 if (kb
->kb_PlatformData
->mmu_type
== MMU030
)
250 enable_mmu030(kb
->kb_PlatformData
->MMU_Level_A
);
252 enable_mmu040(kb
->kb_PlatformData
->MMU_Level_A
, kb
->kb_PlatformData
->mmu_type
== MMU060
, kb
->kb_PlatformData
->zeropagedescriptor
);
255 void disable_mmu(struct KernelBase
*kb
)
257 if (!kb
->kb_PlatformData
->mmu_type
)
259 if (kb
->kb_PlatformData
->mmu_type
== MMU030
)
266 static ULONG
getdesc(struct KernelBase
*kb
, ULONG addr
)
270 desc
= LEVELA(kb
->kb_PlatformData
->MMU_Level_A
, addr
);
273 desc
= LEVELB(desc
, addr
);
276 desc
= LEVELC(desc
, addr
);
281 void debug_mmu(struct KernelBase
*kb
)
289 ULONG pagemask
= (1 << PAGE_SIZE
) - 1;
291 mmutype
= kb
->kb_PlatformData
->mmu_type
;
292 if (!mmutype
|| kb
->kb_PlatformData
->MMU_Level_A
== NULL
)
294 bug("MMU dump start. Root = %p\n", kb
->kb_PlatformData
->MMU_Level_A
);
295 totalpages
= 1 << (32 - PAGE_SIZE
);
297 odesc
= getdesc(kb
, startaddr
);
298 for (i
= 0; i
<= totalpages
; i
++) {
299 ULONG addr
= i
<< PAGE_SIZE
;
302 desc
= getdesc(kb
, addr
);
303 if ((desc
& pagemask
) != (odesc
& pagemask
) || i
== totalpages
) {
305 if (mmutype
== MMU030
) {
306 cm
= (odesc
>> 6) & 1;
309 cm
= (odesc
>> 5) & 3;
310 sp
= (odesc
>> 7) & 1;
312 bug("%p - %p: %p WP=%d S=%d CM=%d (%08x)\n",
313 startaddr
, addr
- 1, odesc
& ~((1 << PAGE_SIZE
) - 1),
314 (odesc
& 4) ? 1 : 0, sp
, cm
, odesc
);
319 bug("MMU dump end\n");
323 static BOOL
map_region2(struct KernelBase
*kb
, void *addr
, void *physaddr
, ULONG size
, BOOL invalid
, BOOL writeprotect
, BOOL supervisor
, UBYTE cachemode
)
325 struct PlatformData
*pd
= kb
->kb_PlatformData
;
326 ULONG desca
, descb
, descc
, pagedescriptor
;
327 ULONG page_size
= 1 << PAGE_SIZE
;
328 ULONG page_mask
= page_size
- 1;
331 mmutype
= pd
->mmu_type
;
334 if (kb
->kb_PlatformData
->MMU_Level_A
== NULL
)
337 if ((size
& page_mask
) || (((ULONG
)addr
) & page_mask
) || (((ULONG
)physaddr
) & page_mask
)) {
338 D(bug("unaligned MMU page request! %p (%p) %08x\n", addr
, physaddr
, size
));
341 if (physaddr
== NULL
)
345 desca
= LEVELA(kb
->kb_PlatformData
->MMU_Level_A
, addr
);
346 if (ISINVALID(desca
))
347 desca
= LEVELA(kb
->kb_PlatformData
->MMU_Level_A
, addr
) = alloc_descriptor(kb
, mmutype
, LEVELB_SIZE
, 1);
348 if (ISINVALID(desca
))
350 descb
= LEVELB(desca
, addr
);
351 if (ISINVALID(descb
))
352 descb
= LEVELB(desca
, addr
) = alloc_descriptor(kb
, mmutype
, LEVELC_SIZE
, 2);
353 if (ISINVALID(descb
))
355 descc
= LEVELC(descb
, addr
);
357 if (addr
== 0 && pd
->zeropagedescriptor
== NULL
) {
358 /* special case zero page handling */
359 pd
->zeropagedescriptor
= (UBYTE
*)(& LEVELC(descb
, addr
)) + 3;
363 pagedescriptor
= INVALID_DESCRIPTOR
;
364 if (addr
== 0 && size
== page_size
) {
365 pagedescriptor
= ((ULONG
)physaddr
) & ~page_mask
;
366 if (mmutype
== MMU030
) {
368 pagedescriptor
|= 1 << 6;
370 pagedescriptor
|= 4; // write-protected
371 pagedescriptor
|= CM_SERIALIZED
<< 5;
375 BOOL wasinvalid
= ISINVALID(descc
);
376 pagedescriptor
= ((ULONG
)physaddr
) & ~page_mask
;
377 if (mmutype
== MMU030
) {
378 pagedescriptor
|= 1; // page descriptor
379 if (writeprotect
|| (!wasinvalid
&& (descc
& 4)))
380 pagedescriptor
|= 4; // write-protected
381 /* 68030 can only enable or disable caching */
382 if (cachemode
>= CM_SERIALIZED
|| (!wasinvalid
&& (descc
& (1 << 6))))
383 pagedescriptor
|= 1 << 6;
385 pagedescriptor
|= 3; // resident page
386 if (writeprotect
|| (!wasinvalid
&& (descc
& 4)))
387 pagedescriptor
|= 4; // write-protected
388 if (supervisor
|| (!wasinvalid
&& (descc
& (1 << 7))))
389 pagedescriptor
|= 1 << 7;
390 // do not override non-cached
391 if (wasinvalid
|| cachemode
> ((descc
>> 5) & 3))
392 pagedescriptor
|= cachemode
<< 5;
394 pagedescriptor
|= ((descc
>> 5) & 3) << 5;
395 if (addr
!= 0 || size
!= page_size
)
396 pagedescriptor
|= 1 << 10; // global if not zero page
400 LEVELC(descb
, addr
) = pagedescriptor
;
403 physaddr
+= page_size
;
409 BOOL
map_region(struct KernelBase
*kb
, void *addr
, void *physaddr
, ULONG size
, BOOL invalid
, BOOL writeprotect
, BOOL supervisor
, UBYTE cachemode
)
411 D(bug("map_region(%p, %p, %08x, in=%d, wp=%d, s=%d cm=%d\n",
412 addr
, physaddr
, size
, invalid
? 1 : 0, writeprotect
? 1 : 0, supervisor
? 1 : 0, cachemode
));
413 return map_region2(kb
, addr
, physaddr
, size
, invalid
, writeprotect
, supervisor
, cachemode
);
416 BOOL
unmap_region(struct KernelBase
*kb
, void *addr
, ULONG size
)
418 D(bug("unmap_region(%p, %08x)\n", addr
, size
));
419 return map_region2(kb
, addr
, NULL
, size
, TRUE
, FALSE
, FALSE
, 0);