2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
7 #include <exec/types.h>
8 #include "kernel_base.h"
9 #include "kernel_bootmem.h"
10 #include "kernel_debug.h"
11 #include "kernel_intern.h"
17 void core_SetupMMU(struct KernBootPrivate
*__KernBootPrivate
, IPTR memtop
)
25 * how many PDE entries shall be created? Detault is 2048 (4GB), unless more RAM
28 int pde_page_count
= 2048;
30 /* Does RAM exceed 4GB? adjust amount of PDE pages */
31 if (((memtop
+ (1 << 21) - 1) >> 21) > pde_page_count
)
32 pde_page_count
= (memtop
+ (1 << 21) - 1) >> 21;
34 D(bug("[Kernel] core_SetupMMU: Re-creating the MMU pages for first %dMB area\n", pde_page_count
<< 1));
36 if (!__KernBootPrivate
->PML4
)
39 * Allocate MMU pages and directories. Four PDE directories (PDE2M structures)
40 * are enough to map whole 4GB address space.
42 __KernBootPrivate
->PML4
= krnAllocBootMemAligned(sizeof(struct PML4E
) * 512, PAGE_SIZE
);
43 __KernBootPrivate
->PDP
= krnAllocBootMemAligned(sizeof(struct PDPE
) * 512, PAGE_SIZE
);
44 __KernBootPrivate
->PDE
= krnAllocBootMemAligned(sizeof(struct PDE2M
) * pde_page_count
, PAGE_SIZE
);
45 __KernBootPrivate
->PTE
= krnAllocBootMemAligned(sizeof(struct PTE
) * 512 * 32, PAGE_SIZE
);
47 D(bug("[Kernel] Allocated PML4 0x%p, PDP 0x%p, PDE 0x%p PTE 0x%p\n", __KernBootPrivate
->PML4
, __KernBootPrivate
->PDP
, __KernBootPrivate
->PDE
, __KernBootPrivate
->PTE
));
50 PML4
= __KernBootPrivate
->PML4
;
51 PDP
= __KernBootPrivate
->PDP
;
52 PDE
= __KernBootPrivate
->PDE
;
54 /* PML4 Entry - we need only the first out of 16 entries */
55 PML4
[0].p
= 1; /* present */
56 PML4
[0].rw
= 1; /* read/write */
57 PML4
[0].us
= 1; /* accessible for user */
58 PML4
[0].pwt
= 0; /* write-through cache */
59 PML4
[0].pcd
= 0; /* cache enabled */
60 PML4
[0].a
= 0; /* not yet accessed */
61 PML4
[0].mbz
= 0; /* must be zero */
62 PML4
[0].base_low
= (unsigned long)PDP
>> 12;
66 PML4
[0].base_high
= ((unsigned long)PDP
>> 32) & 0x000FFFFF;
68 for (i
= 0; i
< pde_page_count
; i
++)
70 /* For every 512th page create the directory entry */
73 IPTR pdes
= (IPTR
)&PDE
[i
];
76 /* Set the PDP entry up and point to the PDE table */
84 PDP
[idx
].base_low
= (unsigned long)pdes
>> 12;
88 PDP
[idx
].base_high
= ((unsigned long)pdes
>> 32) & 0x000FFFFF;
91 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
92 unsigned long base
= (((IPTR
)i
) << 21);
104 PDE
[i
].base_low
= base
>> 13;
108 PDE
[i
].base_high
= (base
>> 32) & 0x000FFFFF;
112 /* PDP Entries. There are four of them used in order to define 2048 pages of 2MB each. */
113 for (i
= 0; i
< 4; i
++)
115 struct PDE2M
*pdes
= &PDE
[512 * i
];
118 /* Set the PDP entry up and point to the PDE table */
126 PDP
[i
].base_low
= (unsigned long)pdes
>> 12;
130 PDP
[i
].base_high
= ((unsigned long)pdes
>> 32) & 0x000FFFFF;
132 for (j
=0; j
< 512; j
++)
134 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
135 unsigned long base
= (i
<< 30) + (j
<< 21);
147 pdes
[j
].base_low
= base
>> 13;
151 pdes
[j
].base_high
= (base
>> 32) & 0x000FFFFF;
156 __KernBootPrivate
->used_page
= 0;
158 D(bug("[Kernel] core_SetupMMU: Registering New PML4 @ 0x%p\n", __KernBootPrivate
->PML4
));
159 wrcr(cr3
, __KernBootPrivate
->PML4
);
161 D(bug("[Kernel] core_SetupMMU: Done\n"));
164 void core_ProtPage(intptr_t addr
, char p
, char rw
, char us
)
166 unsigned long pml4_off
= (addr
>> 39) & 0x1ff;
167 unsigned long pdpe_off
= (addr
>> 30) & 0x1ff;
168 unsigned long pde_off
= (addr
>> 21) & 0x1ff;
169 unsigned long pte_off
= (addr
>> 12) & 0x1ff;
171 struct PML4E
*pml4
= __KernBootPrivate
->PML4
;
172 struct PDPE
*pdpe
= (struct PDPE
*)((pml4
[pml4_off
].base_low
<< 12) | ((unsigned long)pml4
[pml4_off
].base_high
<< 32));
173 struct PDE4K
*pde
= (struct PDE4K
*)((pdpe
[pdpe_off
].base_low
<< 12) | ((unsigned long)pdpe
[pdpe_off
].base_high
<< 32));
174 struct PTE
*Pages4K
= __KernBootPrivate
->PTE
;
177 DMMU(bug("[Kernel] Marking page 0x%p as read-only\n", addr
));
181 /* work on local copy of the affected PDE */
182 struct PDE4K tmp_pde
= pde
[pde_off
];
183 struct PDE2M
*pde2
= (struct PDE2M
*)pde
;
184 intptr_t base
= (pde2
[pde_off
].base_low
<< 13) | ((unsigned long)pde2
[pde_off
].base_high
<< 32);
187 pte
= &Pages4K
[512 * __KernBootPrivate
->used_page
++];
189 D(bug("[Kernel] The page for address 0x%p was a big one. Splitting it into 4K pages\n", addr
));
190 D(bug("[Kernel] Base=0x%p, pte=0x%p\n", base
, pte
));
192 for (i
= 0; i
< 512; i
++)
195 pte
[i
].rw
= pde2
[pde_off
].rw
;
196 pte
[i
].us
= pde2
[pde_off
].us
;
197 pte
[i
].pwt
= pde2
[pde_off
].pwt
;
198 pte
[i
].pcd
= pde2
[pde_off
].pcd
;
199 pte
[i
].base_low
= base
>> 12;
200 pte
[i
].base_high
= (base
>> 32) & 0x0FFFFF;
206 tmp_pde
.base_low
= (intptr_t)pte
>> 12;
207 tmp_pde
.base_high
= ((intptr_t)pte
>> 32) & 0x0FFFFF;
209 pde
[pde_off
] = tmp_pde
;
212 pte
= (struct PTE
*)((pde
[pde_off
].base_low
<< 12) | ((unsigned long)pde
[pde_off
].base_high
<< 32));
214 pte
[pte_off
].rw
= rw
? 1:0;
215 pte
[pte_off
].us
= us
? 1:0;
216 pte
[pte_off
].p
= p
? 1:0;
217 asm volatile ("invlpg (%0)"::"r"(addr
));
220 void core_ProtKernelArea(intptr_t addr
, intptr_t length
, char p
, char rw
, char us
)
222 D(bug("[Kernel] Protecting area 0x%p - 0x%p\n", addr
, addr
+ length
- 1));
226 core_ProtPage(addr
, p
, rw
, us
);