3 #include "kernel_intern.h"
6 The MMU pages and directories. They are stored at fixed location and may be either reused in the
7 64-bit kernel, or replaced by it. Four PDE directories (PDE2M structures) are enough to map whole
10 static struct PML4E PML4
[512] __attribute__((used
,aligned(4096)));
11 static struct PDPE PDP
[512] __attribute__((used
,aligned(4096)));
12 static struct PDE2M PDE
[4][512] __attribute__((used
,aligned(4096)));
17 struct PDE2M
*pdes
[] = { &PDE
[0], &PDE
[1], &PDE
[2], &PDE
[3] };
19 rkprintf("[Kernel] Re-creating the MMU pages for first 4GB area\n");
21 /* PML4 Entry - we need only the first out of 16 entries */
22 PML4
[0].p
= 1; /* present */
23 PML4
[0].rw
= 1; /* read/write */
24 PML4
[0].us
= 1; /* accessible for user */
25 PML4
[0].pwt
= 0; /* write-through cache */
26 PML4
[0].pcd
= 0; /* cache enabled */
27 PML4
[0].a
= 0; /* not yet accessed */
28 PML4
[0].mbz
= 0; /* must be zero */
29 PML4
[0].base_low
= (unsigned int)PDP
>> 12;
33 PML4
[0].base_high
= 0;
36 PDP Entries. There are four of them used in order to define 2048 pages of 2MB each.
42 /* Set the PDP entry up and point to the PDE table */
50 PDP
[i
].base_low
= (unsigned int)pdes
[i
] >> 12;
56 for (j
=0; j
< 512; j
++)
58 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
60 struct PDE2M
*PDE
= pdes
[i
];
71 PDE
[j
].base_low
= ((i
<< 30) + (j
<< 21)) >> 13;
80 rkprintf("[Kernel] PML4 @ %012p\n", &PML4
);
83 static struct PTE Pages4K
[32][512] __attribute__((used
,aligned(4096)));
86 void core_ProtPage(intptr_t addr
, char p
, char rw
, char us
)
88 struct PML4E
*pml4
= rdcr(cr3
);
89 struct PDPE
*pdpe
= pml4
[(addr
>> 39) & 0x1ff].base_low
<< 12;
90 struct PDE4K
*pde
= pdpe
[(addr
>> 30) & 0x1ff].base_low
<< 12;
92 rkprintf("[Kernel] Marking page %012p as read-only\n",addr
);
94 if (pde
[(addr
>> 21) & 0x1ff].ps
)
96 struct PTE
*pte
= Pages4K
[used_page
++];
97 struct PDE2M
*pde2
= (struct PDE2M
*)pde
;
99 /* work on local copy of the affected PDE */
100 struct PDE4K tmp_pde
= pde
[(addr
>> 21) & 0x1ff];
102 intptr_t base
= pde2
[(addr
>> 21) & 0x1ff].base_low
<< 13;
105 rkprintf("[Kernel] The page for address %012p was a big one. Splitting it into 4K pages\n",
107 rkprintf("[Kernel] Base=%012p, pte=%012p\n", base
, pte
);
109 for (i
= 0; i
< 512; i
++)
112 pte
[i
].rw
= pde2
[(addr
>> 21) & 0x1ff].rw
;
113 pte
[i
].us
= pde2
[(addr
>> 21) & 0x1ff].us
;
114 pte
[i
].pwt
= pde2
[(addr
>> 21) & 0x1ff].pwt
;
115 pte
[i
].pcd
= pde2
[(addr
>> 21) & 0x1ff].pcd
;
116 pte
[i
].base_low
= base
>> 12;
121 tmp_pde
.base_low
= ((intptr_t)pte
) >> 12;
123 pde
[(addr
>> 21) & 0x1ff] = tmp_pde
;
126 struct PTE
*pte
= pde
[(addr
>> 21) & 0x1ff].base_low
<< 12;
127 pte
[(addr
>> 12) & 0x1ff].rw
= rw
? 1:0;
128 pte
[(addr
>> 12) & 0x1ff].us
= us
? 1:0;
129 pte
[(addr
>> 12) & 0x1ff].p
= p
? 1:0;
130 asm volatile ("invlpg (%0)"::"r"(addr
));
133 void core_ProtKernelArea(intptr_t addr
, intptr_t length
, char p
, char rw
, char us
)
135 rkprintf("[Kernel] Protecting area %012p-%012p\n", addr
, addr
+ length
- 1);
139 core_ProtPage(addr
, p
, rw
, us
);