Test initialisation of MUIA_List_AdjustWidth and MUIA_List_AdjustHeight, and
[AROS.git] / arch / m68k-all / kernel / mmu.c
blob987e87ae207e7af3821f1e4600927bb447c8bb94
1 /*
2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <aros/debug.h>
7 #include <proto/exec.h>
9 #include "kernel_base.h"
10 #include "kernel_intern.h"
12 /* 68030 (68851), 68040 and 68060 supported, 68030 (68851) is configured like a 68040,
13 * no 68030 special features used, not worth the extra complexity */
15 #define LEVELA_SIZE 7
16 #define LEVELB_SIZE 7
17 #define LEVELC_SIZE 6
18 #define PAGE_SIZE 12 // = 1 << 12 = 4096
20 /* Macros that hopefully make MMU magic a bit easier to understand.. */
22 #define LEVELA_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE ))) & ((1 << LEVELA_SIZE) - 1))
23 #define LEVELB_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE ))) & ((1 << LEVELB_SIZE) - 1))
24 #define LEVELC_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE + LEVELC_SIZE))) & ((1 << LEVELC_SIZE) - 1))
26 #define LEVELA(root, x) (root[LEVELA_VAL(x)])
27 #define LEVELB(a, x) (((ULONG*)(((ULONG)a) & ~((1 << (LEVELB_SIZE + 2)) - 1)))[LEVELB_VAL(x)])
28 #define LEVELC(b, x) (((ULONG*)(((ULONG)b) & ~((1 << (LEVELC_SIZE + 2)) - 1)))[LEVELC_VAL(x)])
30 #define INVALID_DESCRIPTOR 0xDEAD0000
31 #define ISINVALID(x) ((((ULONG)x) & 3) == 0)
33 static BOOL map_region2(struct KernelBase *kb, void *addr, void *physaddr, ULONG size, BOOL invalid, BOOL writeprotect, BOOL supervisor, UBYTE cachemode);
36 static void map_pagetable(struct KernelBase *kb, void *addr, ULONG size)
38 /* 68040+ MMU tables should be serialized */
39 map_region2(kb, addr, NULL, size, FALSE, FALSE, FALSE, CM_SERIALIZED);
42 /* Allocate MMU descriptor page, it needs to be (1 << bits) * sizeof(ULONG) aligned */
43 static ULONG alloc_descriptor(struct KernelBase *kb, UBYTE mmutype, UBYTE bits, UBYTE level)
45 struct PlatformData *pd = kb->kb_PlatformData;
46 ULONG *desc, dout;
47 ULONG size = sizeof(ULONG) * (1 << bits);
48 ULONG ps = 1 << PAGE_SIZE;
49 UWORD i;
51 while (pd->page_free >= size && (((ULONG)pd->page_ptr) & (size - 1))) {
52 pd->page_ptr += 0x100;
53 pd->page_free -= 0x100;
55 while (pd->page_free < size) {
56 /* allocate in aligned blocks of PAGE_SIZE */
57 UBYTE *mem, *newmem, *pagemem;
59 mem = AllocMem(2 * ps, MEMF_PUBLIC);
60 if (!mem)
61 return 0;
62 Forbid();
63 FreeMem(mem, 2 * ps);
64 newmem = (UBYTE*)((((ULONG)mem) + ps - 1) & ~(ps - 1));
65 pagemem = AllocAbs(ps, newmem);
66 Permit();
67 if (!pagemem)
68 return 0;
69 pd->page_ptr = pagemem;
70 pd->page_free = ps;
71 // bug("New chunk %p-%p\n", pagemem, pagemem + ps - 1);
72 if (level > 0 && mmutype >= MMU040)
73 map_pagetable(kb, pagemem, ps);
75 desc = (ULONG*)pd->page_ptr;
76 for (i = 0; i < (1 << bits); i++)
77 desc[i] = INVALID_DESCRIPTOR;
78 dout = (ULONG)desc;
79 if (mmutype == MMU030)
80 dout |= 2; /* Valid 4 byte descriptor */
81 else
82 dout |= 3; /* Resident descriptor */
83 // bug("Level%c %p-%p: %08x\n", level + 'A', pd->page_ptr, pd->page_ptr + size - 1, dout);
84 pd->page_ptr += size;
85 pd->page_free -= size;
86 return dout;
89 BOOL init_mmu(struct KernelBase *kb)
91 UBYTE mmutype = kb->kb_PlatformData->mmu_type;
93 if (!mmutype)
94 return FALSE;
95 kb->kb_PlatformData->MMU_Level_A = (ULONG*)(alloc_descriptor(kb, mmutype, LEVELA_SIZE, 0) & ~3);
96 if (!kb->kb_PlatformData->MMU_Level_A) {
97 kb->kb_PlatformData->mmu_type = 0;
98 return FALSE;
100 if (mmutype >= MMU040)
101 map_pagetable(kb, kb->kb_PlatformData->MMU_Level_A, 1 << PAGE_SIZE);
102 return TRUE;
105 static void enable_mmu030(ULONG *levela)
107 asm volatile (
108 ".chip 68030\n"
109 "move.l %%a5,%%a4\n"
110 "move.l %0,%%d0\n"
111 "move.l 4.w,%%a6\n"
112 "lea .esuper030(%%pc),%%a5\n"
113 "jsr -0x1e(%%a6)\n"
114 "bra.s 0f\n"
115 ".esuper030:\n"
116 /* Do not interrupt us */
117 "or #0x0700,%%sr\n"
118 "subq.l #8,%%a7\n"
119 /* Disable MMU, setup root pointers,
120 * uses 68040 MMU descriptor levels (7/7/6, 4K page size) */
121 "move.l #0x00c07760,%%d1\n"
122 "move.l %%d1,%%a7@\n"
123 "pmove %%a7@,%%tc\n"
124 /* Set bus error exception vector */
125 "movec %%vbr,%%a5\n"
126 "move.l #addrerror030,%%a5@(12)\n"
127 "move.l #buserror030,%%a5@(8)\n"
128 /* Configure CRP. Valid 4 byte descriptor, other features disabled. */
129 "move.l #0x80000002,%%a7@\n"
130 /* First level descriptor pointer */
131 "move.l %%d0,%%a7@(4)\n"
132 /* Set CRP */
133 "pmove %%a7@,%%crp\n"
134 /* Set MMU enabled bit */
135 "bset #31,%%d1\n"
136 "move.l %%d1,%%a7@\n"
137 /* MMU on! */
138 "pmove %%a7@,%%tc\n"
139 /* Clear transparent translation */
140 "clr.l %%a7@\n"
141 "pmove %%a7@,%%tt0\n"
142 "pmove %%a7@,%%tt1\n"
143 "addq.l #8,%%a7\n"
144 "rte\n"
145 "0:\n"
146 "move.l %%a4,%%a5\n"
147 : : "m" (levela) : "d0", "d1", "a4", "a6");
150 static void disable_mmu030(void)
152 asm volatile (
153 ".chip 68030\n"
154 "move.l %%a5,%%a4\n"
155 "move.l 4.w,%%a6\n"
156 "lea .dsuper030(%%pc),%%a5\n"
157 "jsr -0x1e(%%a6)\n"
158 "bra.s 0f\n"
159 ".dsuper030:\n"
160 /* Do not interrupt us */
161 "or #0x0700,%%sr\n"
162 /* Disable MMU */
163 "subq.l #4,%%a7\n"
164 "clr.l %%a7@\n"
165 "pmove %%a7@,%%tc\n"
166 "addq.l #4,%%a7\n"
167 "rte\n"
168 "0:\n"
169 "move.l %%a4,%%a5\n"
170 : : : "d0", "d1", "a4", "a6");
173 static void enable_mmu040(ULONG *levela, UBYTE cpu060, UBYTE *zeropagedescriptor)
175 asm volatile (
176 ".chip 68060\n"
177 "move.l %%a5,%%a4\n"
178 "move.l %0,%%d0\n"
179 "move.b %1,%%d1\n"
180 "move.l %2,%%a1\n"
181 "move.l 4.w,%%a6\n"
182 "lea .esuper040(%%pc),%%a5\n"
183 "jsr -0x1e(%%a6)\n"
184 "bra.s 0f\n"
185 ".esuper040:\n"
186 /* Do not interrupt us */
187 "or #0x0700,%%sr\n"
188 "movec %%vbr,%%a5\n"
189 "move.l %%a1,253*4(%%a5)\n"
190 "lea buserror040,%%a6\n"
191 "lea addrerror040,%%a0\n"
192 "tst.b %%d1\n"
193 "beq.s .cpu040\n"
194 "lea buserror060,%%a6\n"
195 "lea addrerror060,%%a0\n"
196 ".cpu040:\n"
197 "move.l %%a6,%%a5@(8)\n"
198 "move.l %%a0,%%a5@(12)\n"
199 "moveq #0,%%d1\n"
200 /* Disable MMU, setup root pointers */
201 "movec %%d1,%%tc\n"
202 "movec %%d0,%%urp\n"
203 "movec %%d0,%%srp\n"
204 /* Flush data caches and ATC */
205 "cpusha %%dc\n"
206 "cinva %%dc\n"
207 "pflusha\n"
208 /* Enable MMU, 4K page size */
209 "move.l #0x00008000,%%d0\n"
210 "movec %%d0,%%tc\n"
211 /* Disable transparent translation */
212 "movec %%d1,%%itt0\n"
213 "movec %%d1,%%itt1\n"
214 "movec %%d1,%%dtt0\n"
215 "movec %%d1,%%dtt1\n"
216 "rte\n"
217 "0:\n"
218 "move.l %%a4,%%a5\n"
219 : : "m" (levela), "m" (cpu060), "m" (zeropagedescriptor) : "d0", "d1", "a1", "a4", "a6");
222 static void disable_mmu040(void)
224 asm volatile (
225 ".chip 68060\n"
226 "move.l %%a5,%%a4\n"
227 "move.l 4.w,%%a6\n"
228 "lea .dsuper040(%%pc),%%a5\n"
229 "jsr -0x1e(%%a6)\n"
230 "bra.s 0f\n"
231 ".dsuper040:\n"
232 /* Do not interrupt us */
233 "or #0x0700,%%sr\n"
234 /* Disable MMU */
235 "moveq #0,%%d0\n"
236 "movec %%d0,%%tc\n"
237 "pflusha\n"
238 "rte\n"
239 "0:\n"
240 "move.l %%a4,%%a5\n"
241 : : : "d0", "d1", "a4", "a6");
244 void enable_mmu(struct KernelBase *kb)
246 if (!kb->kb_PlatformData->mmu_type)
247 return;
249 if (kb->kb_PlatformData->mmu_type == MMU030)
250 enable_mmu030(kb->kb_PlatformData->MMU_Level_A);
251 else
252 enable_mmu040(kb->kb_PlatformData->MMU_Level_A, kb->kb_PlatformData->mmu_type == MMU060, kb->kb_PlatformData->zeropagedescriptor);
255 void disable_mmu(struct KernelBase *kb)
257 if (!kb->kb_PlatformData->mmu_type)
258 return;
259 if (kb->kb_PlatformData->mmu_type == MMU030)
260 disable_mmu030();
261 else
262 disable_mmu040();
265 #if DEBUG
266 static ULONG getdesc(struct KernelBase *kb, ULONG addr)
268 ULONG desc;
270 desc = LEVELA(kb->kb_PlatformData->MMU_Level_A, addr);
271 if (ISINVALID(desc))
272 return desc;
273 desc = LEVELB(desc, addr);
274 if (ISINVALID(desc))
275 return desc;
276 desc = LEVELC(desc, addr);
277 return desc;
279 #endif
281 void debug_mmu(struct KernelBase *kb)
283 #if DEBUG
284 UBYTE mmutype;
285 ULONG i;
286 ULONG startaddr;
287 ULONG odesc;
288 ULONG totalpages;
289 ULONG pagemask = (1 << PAGE_SIZE) - 1;
291 mmutype = kb->kb_PlatformData->mmu_type;
292 if (!mmutype || kb->kb_PlatformData->MMU_Level_A == NULL)
293 return;
294 bug("MMU dump start. Root = %p\n", kb->kb_PlatformData->MMU_Level_A);
295 totalpages = 1 << (32 - PAGE_SIZE);
296 startaddr = 0;
297 odesc = getdesc(kb, startaddr);
298 for (i = 0; i <= totalpages; i++) {
299 ULONG addr = i << PAGE_SIZE;
300 ULONG desc = 0;
301 if (i < totalpages)
302 desc = getdesc(kb, addr);
303 if ((desc & pagemask) != (odesc & pagemask) || i == totalpages) {
304 UBYTE cm, sp;
305 if (mmutype == MMU030) {
306 cm = (odesc >> 6) & 1;
307 sp = 0;
308 } else {
309 cm = (odesc >> 5) & 3;
310 sp = (odesc >> 7) & 1;
312 bug("%p - %p: %p WP=%d S=%d CM=%d (%08x)\n",
313 startaddr, addr - 1, odesc & ~((1 << PAGE_SIZE) - 1),
314 (odesc & 4) ? 1 : 0, sp, cm, odesc);
315 startaddr = addr;
316 odesc = desc;
319 bug("MMU dump end\n");
320 #endif
323 static BOOL map_region2(struct KernelBase *kb, void *addr, void *physaddr, ULONG size, BOOL invalid, BOOL writeprotect, BOOL supervisor, UBYTE cachemode)
325 struct PlatformData *pd = kb->kb_PlatformData;
326 ULONG desca, descb, descc, pagedescriptor;
327 ULONG page_size = 1 << PAGE_SIZE;
328 ULONG page_mask = page_size - 1;
329 UBYTE mmutype;
331 mmutype = pd->mmu_type;
332 if (!mmutype)
333 return FALSE;
334 if (kb->kb_PlatformData->MMU_Level_A == NULL)
335 return FALSE;
337 if ((size & page_mask) || (((ULONG)addr) & page_mask) || (((ULONG)physaddr) & page_mask)) {
338 D(bug("unaligned MMU page request! %p (%p) %08x\n", addr, physaddr, size));
339 return FALSE;
341 if (physaddr == NULL)
342 physaddr = addr;
344 while (size) {
345 desca = LEVELA(kb->kb_PlatformData->MMU_Level_A, addr);
346 if (ISINVALID(desca))
347 desca = LEVELA(kb->kb_PlatformData->MMU_Level_A, addr) = alloc_descriptor(kb, mmutype, LEVELB_SIZE, 1);
348 if (ISINVALID(desca))
349 return FALSE;
350 descb = LEVELB(desca, addr);
351 if (ISINVALID(descb))
352 descb = LEVELB(desca, addr) = alloc_descriptor(kb, mmutype, LEVELC_SIZE, 2);
353 if (ISINVALID(descb))
354 return FALSE;
355 descc = LEVELC(descb, addr);
357 if (addr == 0 && pd->zeropagedescriptor == NULL) {
358 /* special case zero page handling */
359 pd->zeropagedescriptor = (UBYTE*)(& LEVELC(descb, addr)) + 3;
362 if (invalid) {
363 pagedescriptor = INVALID_DESCRIPTOR;
364 if (addr == 0 && size == page_size) {
365 pagedescriptor = ((ULONG)physaddr) & ~page_mask;
366 if (mmutype == MMU030) {
367 pagedescriptor |= 4;
368 pagedescriptor |= 1 << 6;
369 } else {
370 pagedescriptor |= 4; // write-protected
371 pagedescriptor |= CM_SERIALIZED << 5;
374 } else {
375 BOOL wasinvalid = ISINVALID(descc);
376 pagedescriptor = ((ULONG)physaddr) & ~page_mask;
377 if (mmutype == MMU030) {
378 pagedescriptor |= 1; // page descriptor
379 if (writeprotect || (!wasinvalid && (descc & 4)))
380 pagedescriptor |= 4; // write-protected
381 /* 68030 can only enable or disable caching */
382 if (cachemode >= CM_SERIALIZED || (!wasinvalid && (descc & (1 << 6))))
383 pagedescriptor |= 1 << 6;
384 } else {
385 pagedescriptor |= 3; // resident page
386 if (writeprotect || (!wasinvalid && (descc & 4)))
387 pagedescriptor |= 4; // write-protected
388 if (supervisor || (!wasinvalid && (descc & (1 << 7))))
389 pagedescriptor |= 1 << 7;
390 // do not override non-cached
391 if (wasinvalid || cachemode > ((descc >> 5) & 3))
392 pagedescriptor |= cachemode << 5;
393 else
394 pagedescriptor |= ((descc >> 5) & 3) << 5;
395 if (addr != 0 || size != page_size)
396 pagedescriptor |= 1 << 10; // global if not zero page
400 LEVELC(descb, addr) = pagedescriptor;
401 size -= page_size;
402 addr += page_size;
403 physaddr += page_size;
406 return TRUE;
409 BOOL map_region(struct KernelBase *kb, void *addr, void *physaddr, ULONG size, BOOL invalid, BOOL writeprotect, BOOL supervisor, UBYTE cachemode)
411 D(bug("map_region(%p, %p, %08x, in=%d, wp=%d, s=%d cm=%d\n",
412 addr, physaddr, size, invalid ? 1 : 0, writeprotect ? 1 : 0, supervisor ? 1 : 0, cachemode));
413 return map_region2(kb, addr, physaddr, size, invalid, writeprotect, supervisor, cachemode);
416 BOOL unmap_region(struct KernelBase *kb, void *addr, ULONG size)
418 D(bug("unmap_region(%p, %08x)\n", addr, size));
419 return map_region2(kb, addr, NULL, size, TRUE, FALSE, FALSE, 0);