3 #include "kernel_intern.h"
6 The MMU pages and directories. They are stored at fixed location and may be either reused in the
7 64-bit kernel, or replaced by it. Four PDE directories (PDE2M structures) are enough to map whole
10 static struct PML4E PML4
[512] __attribute__((used
,aligned(4096)));
11 static struct PDPE PDP
[512] __attribute__((used
,aligned(4096)));
12 static struct PDE2M PDE
[4][512] __attribute__((used
,aligned(4096)));
14 extern IPTR _Kern_APICTrampolineBase
;
19 struct PDE2M
*pdes
[] = { &PDE
[0], &PDE
[1], &PDE
[2], &PDE
[3] };
21 rkprintf("[Kernel] Re-creating the MMU pages for first 4GB area\n");
23 /* PML4 Entry - we need only the first out of 16 entries */
24 PML4
[0].p
= 1; /* present */
25 PML4
[0].rw
= 1; /* read/write */
26 PML4
[0].us
= 1; /* accessible for user */
27 PML4
[0].pwt
= 0; /* write-through cache */
28 PML4
[0].pcd
= 0; /* cache enabled */
29 PML4
[0].a
= 0; /* not yet accessed */
30 PML4
[0].mbz
= 0; /* must be zero */
31 PML4
[0].base_low
= (unsigned int)PDP
>> 12;
35 PML4
[0].base_high
= 0;
38 PDP Entries. There are four of them used in order to define 2048 pages of 2MB each.
44 /* Set the PDP entry up and point to the PDE table */
52 PDP
[i
].base_low
= (unsigned int)pdes
[i
] >> 12;
58 for (j
=0; j
< 512; j
++)
60 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
62 struct PDE2M
*PDE
= pdes
[i
];
73 PDE
[j
].base_low
= ((i
<< 30) + (j
<< 21)) >> 13;
83 /* HACK! Store the PML4 address in smp trampoline area */
84 *(ULONG
*)(_Kern_APICTrampolineBase
+ 0x0014) = (ULONG
)&PML4
;
86 rkprintf("[Kernel] PML4 @ %012p\n", &PML4
);
89 static struct PTE Pages4K
[32][512] __attribute__((used
,aligned(4096)));
92 void core_ProtPage(intptr_t addr
, char p
, char rw
, char us
)
94 struct PML4E
*pml4
= rdcr(cr3
);
95 struct PDPE
*pdpe
= pml4
[(addr
>> 39) & 0x1ff].base_low
<< 12;
96 struct PDE4K
*pde
= pdpe
[(addr
>> 30) & 0x1ff].base_low
<< 12;
98 rkprintf("[Kernel] Marking page %012p as read-only\n",addr
);
100 if (pde
[(addr
>> 21) & 0x1ff].ps
)
102 struct PTE
*pte
= Pages4K
[used_page
++];
103 struct PDE2M
*pde2
= (struct PDE2M
*)pde
;
105 /* work on local copy of the affected PDE */
106 struct PDE4K tmp_pde
= pde
[(addr
>> 21) & 0x1ff];
108 intptr_t base
= pde2
[(addr
>> 21) & 0x1ff].base_low
<< 13;
111 rkprintf("[Kernel] The page for address %012p was a big one. Splitting it into 4K pages\n",
113 rkprintf("[Kernel] Base=%012p, pte=%012p\n", base
, pte
);
115 for (i
= 0; i
< 512; i
++)
118 pte
[i
].rw
= pde2
[(addr
>> 21) & 0x1ff].rw
;
119 pte
[i
].us
= pde2
[(addr
>> 21) & 0x1ff].us
;
120 pte
[i
].pwt
= pde2
[(addr
>> 21) & 0x1ff].pwt
;
121 pte
[i
].pcd
= pde2
[(addr
>> 21) & 0x1ff].pcd
;
122 pte
[i
].base_low
= base
>> 12;
127 tmp_pde
.base_low
= ((intptr_t)pte
) >> 12;
129 pde
[(addr
>> 21) & 0x1ff] = tmp_pde
;
132 struct PTE
*pte
= pde
[(addr
>> 21) & 0x1ff].base_low
<< 12;
133 pte
[(addr
>> 12) & 0x1ff].rw
= rw
? 1:0;
134 pte
[(addr
>> 12) & 0x1ff].us
= us
? 1:0;
135 pte
[(addr
>> 12) & 0x1ff].p
= p
? 1:0;
136 asm volatile ("invlpg (%0)"::"r"(addr
));
139 void core_ProtKernelArea(intptr_t addr
, intptr_t length
, char p
, char rw
, char us
)
141 rkprintf("[Kernel] Protecting area %012p-%012p\n", addr
, addr
+ length
- 1);
145 core_ProtPage(addr
, p
, rw
, us
);