2 Copyright © 1995-2017, The AROS Development Team. All rights reserved.
7 #include <exec/types.h>
8 #include "kernel_base.h"
9 #include "kernel_intern.h"
10 #include "kernel_bootmem.h"
11 #include "kernel_debug.h"
17 void core_InitMMU(struct CPUMMUConfig
*MMU
)
28 /* PML4 Entry - we need only the first out of 16 entries */
29 PML4
[0].p
= 1; /* present */
30 PML4
[0].rw
= 1; /* read/write */
31 PML4
[0].us
= 1; /* accessible for user */
32 PML4
[0].pwt
= 0; /* write-through cache */
33 PML4
[0].pcd
= 0; /* cache enabled */
34 PML4
[0].a
= 0; /* not yet accessed */
35 PML4
[0].mbz
= 0; /* must be zero */
36 PML4
[0].base_low
= ((IPTR
)PDP
) >> 12;
40 PML4
[0].base_high
= (((IPTR
)PDP
) >> 32) & 0x000FFFFF;
42 for (i
= 0; i
< MMU
->mmu_PDEPageCount
; i
++)
44 /* For every 512th page create the directory entry */
47 IPTR pdes
= (IPTR
)&PDE
[i
];
50 /* Set the PDP entry up and point to the PDE table */
58 PDP
[idx
].base_low
= pdes
>> 12;
62 PDP
[idx
].base_high
= (pdes
>> 32) & 0x000FFFFF;
65 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
66 unsigned long base
= (((IPTR
)i
) << 21);
78 PDE
[i
].base_low
= base
>> 13;
82 PDE
[i
].base_high
= (base
>> 32) & 0x000FFFFF;
86 /* PDP Entries. There are four of them used in order to define 2048 pages of 2MB each. */
87 for (i
= 0; i
< 4; i
++)
89 struct PDE2M
*pdes
= &PDE
[512 * i
];
92 /* Set the PDP entry up and point to the PDE table */
100 PDP
[i
].base_low
= (unsigned long)pdes
>> 12;
104 PDP
[i
].base_high
= ((unsigned long)pdes
>> 32) & 0x000FFFFF;
106 for (j
=0; j
< 512; j
++)
108 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
109 unsigned long base
= (i
<< 30) + (j
<< 21);
121 pdes
[j
].base_low
= base
>> 13;
125 pdes
[j
].base_high
= (base
>> 32) & 0x000FFFFF;
130 MMU
->mmu_PDEPageUsed
= 0;
133 void core_LoadMMU(struct CPUMMUConfig
*MMU
)
135 D(bug("[Kernel] %s: Registering PML4 @ 0x%p\n", __func__
, MMU
->mmu_PML4
));
136 wrcr(cr3
, MMU
->mmu_PML4
);
139 void core_SetupMMU(struct CPUMMUConfig
*MMU
, IPTR memtop
)
142 * how many PDE entries shall be created? Detault is 2048 (4GB), unless more RAM
145 MMU
->mmu_PDEPageCount
= 2048;
147 /* Does RAM exceed 4GB? adjust amount of PDE pages */
148 if (((memtop
+ (1 << 21) - 1) >> 21) > MMU
->mmu_PDEPageCount
)
149 MMU
->mmu_PDEPageCount
= (memtop
+ (1 << 21) - 1) >> 21;
151 D(bug("[Kernel] core_SetupMMU: Re-creating the MMU pages for first %dMB area\n", MMU
->mmu_PDEPageCount
<< 1));
156 * Allocate MMU pages and directories. Four PDE directories (PDE2M structures)
157 * are enough to map whole 4GB address space.
159 MMU
->mmu_PML4
= krnAllocBootMemAligned(sizeof(struct PML4E
) * 512, PAGE_SIZE
);
160 MMU
->mmu_PDP
= krnAllocBootMemAligned(sizeof(struct PDPE
) * 512, PAGE_SIZE
);
161 MMU
->mmu_PDE
= krnAllocBootMemAligned(sizeof(struct PDE2M
) * MMU
->mmu_PDEPageCount
, PAGE_SIZE
);
162 MMU
->mmu_PTE
= krnAllocBootMemAligned(sizeof(struct PTE
) * 512 * 32, PAGE_SIZE
);
164 D(bug("[Kernel] Allocated PML4 0x%p, PDP 0x%p, PDE 0x%p PTE 0x%p\n", MMU
->mmu_PML4
, MMU
->mmu_PDP
, MMU
->mmu_PDE
, MMU
->mmu_PTE
));
171 D(bug("[Kernel] core_SetupMMU: Done\n"));
174 void core_ProtPage(intptr_t addr
, char p
, char rw
, char us
)
176 struct CPUMMUConfig
*MMU
;
183 unsigned long pml4_off
= (addr
>> 39) & 0x1ff;
184 unsigned long pdpe_off
= (addr
>> 30) & 0x1ff;
185 unsigned long pde_off
= (addr
>> 21) & 0x1ff;
186 unsigned long pte_off
= (addr
>> 12) & 0x1ff;
188 DMMU(bug("[Kernel] Marking page 0x%p as read-only\n", addr
));
190 MMU
= &__KernBootPrivate
->MMU
;
191 pml4
= MMU
->mmu_PML4
;
192 pdpe
= (struct PDPE
*)((((IPTR
)pml4
[pml4_off
].base_low
) << 12) | (((IPTR
)pml4
[pml4_off
].base_high
) << 32));
193 pde
= (struct PDE4K
*)((((IPTR
)pdpe
[pdpe_off
].base_low
) << 12) | (((IPTR
)pdpe
[pdpe_off
].base_high
) << 32));
194 Pages4K
= MMU
->mmu_PTE
;
198 /* work on local copy of the affected PDE */
199 struct PDE4K tmp_pde
= pde
[pde_off
];
200 struct PDE2M
*pde2
= (struct PDE2M
*)pde
;
201 intptr_t base
= ((IPTR
)pde2
[pde_off
].base_low
<< 13) | ((IPTR
)pde2
[pde_off
].base_high
<< 32);
204 pte
= &Pages4K
[512 * MMU
->mmu_PDEPageUsed
++];
206 D(bug("[Kernel] The page for address 0x%p was a big one. Splitting it into 4K pages\n", addr
));
207 D(bug("[Kernel] Base=0x%p, pte=0x%p\n", base
, pte
));
209 for (i
= 0; i
< 512; i
++)
212 pte
[i
].rw
= pde2
[pde_off
].rw
;
213 pte
[i
].us
= pde2
[pde_off
].us
;
214 pte
[i
].pwt
= pde2
[pde_off
].pwt
;
215 pte
[i
].pcd
= pde2
[pde_off
].pcd
;
216 pte
[i
].base_low
= base
>> 12;
217 pte
[i
].base_high
= (base
>> 32) & 0x0FFFFF;
223 tmp_pde
.base_low
= (intptr_t)pte
>> 12;
224 tmp_pde
.base_high
= ((intptr_t)pte
>> 32) & 0x0FFFFF;
226 pde
[pde_off
] = tmp_pde
;
229 pte
= (struct PTE
*)((((IPTR
)pde
[pde_off
].base_low
) << 12) | (((IPTR
)pde
[pde_off
].base_high
) << 32));
231 pte
[pte_off
].rw
= rw
? 1:0;
232 pte
[pte_off
].us
= us
? 1:0;
233 pte
[pte_off
].p
= p
? 1:0;
234 asm volatile ("invlpg (%0)"::"r"(addr
));
237 void core_ProtKernelArea(intptr_t addr
, intptr_t length
, char p
, char rw
, char us
)
239 D(bug("[Kernel] Protecting area 0x%p - 0x%p\n", addr
, addr
+ length
- 1));
243 core_ProtPage(addr
, p
, rw
, us
);