Indentation fix, cleanup.
[AROS.git] / arch / x86_64-pc / kernel / mmu.c
blobe2ec433d48689a95964e0a7d4ca87a597af4cf39
1 /*
2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <asm/cpu.h>
7 #include <exec/types.h>
8 #include "kernel_base.h"
9 #include "kernel_bootmem.h"
10 #include "kernel_debug.h"
11 #include "kernel_intern.h"
12 #include "apic.h"
14 #define D(x) x
15 #define DMMU(x) x
17 void core_SetupMMU(struct KernBootPrivate *__KernBootPrivate, IPTR memtop)
19 unsigned int i;
20 struct PML4E *PML4;
21 struct PDPE *PDP;
22 struct PDE2M *PDE;
25 * how many PDE entries shall be created? Detault is 2048 (4GB), unless more RAM
26 * is available...
28 int pde_page_count = 2048;
30 /* Does RAM exceed 4GB? adjust amount of PDE pages */
31 if (((memtop + (1 << 21) - 1) >> 21) > pde_page_count)
32 pde_page_count = (memtop + (1 << 21) - 1) >> 21;
34 D(bug("[Kernel] core_SetupMMU: Re-creating the MMU pages for first %dMB area\n", pde_page_count << 1));
36 if (!__KernBootPrivate->PML4)
39 * Allocate MMU pages and directories. Four PDE directories (PDE2M structures)
40 * are enough to map whole 4GB address space.
42 __KernBootPrivate->PML4 = krnAllocBootMemAligned(sizeof(struct PML4E) * 512, PAGE_SIZE);
43 __KernBootPrivate->PDP = krnAllocBootMemAligned(sizeof(struct PDPE) * 512, PAGE_SIZE);
44 __KernBootPrivate->PDE = krnAllocBootMemAligned(sizeof(struct PDE2M) * pde_page_count, PAGE_SIZE);
45 __KernBootPrivate->PTE = krnAllocBootMemAligned(sizeof(struct PTE) * 512 * 32, PAGE_SIZE);
47 D(bug("[Kernel] Allocated PML4 0x%p, PDP 0x%p, PDE 0x%p PTE 0x%p\n", __KernBootPrivate->PML4, __KernBootPrivate->PDP, __KernBootPrivate->PDE, __KernBootPrivate->PTE));
50 PML4 = __KernBootPrivate->PML4;
51 PDP = __KernBootPrivate->PDP;
52 PDE = __KernBootPrivate->PDE;
54 /* PML4 Entry - we need only the first out of 16 entries */
55 PML4[0].p = 1; /* present */
56 PML4[0].rw = 1; /* read/write */
57 PML4[0].us = 1; /* accessible for user */
58 PML4[0].pwt= 0; /* write-through cache */
59 PML4[0].pcd= 0; /* cache enabled */
60 PML4[0].a = 0; /* not yet accessed */
61 PML4[0].mbz= 0; /* must be zero */
62 PML4[0].base_low = (unsigned long)PDP >> 12;
63 PML4[0].avl= 0;
64 PML4[0].nx = 0;
65 PML4[0].avail = 0;
66 PML4[0].base_high = ((unsigned long)PDP >> 32) & 0x000FFFFF;
68 for (i = 0; i < pde_page_count; i++)
70 /* For every 512th page create the directory entry */
71 if ((i % 512) == 0)
73 IPTR pdes = (IPTR)&PDE[i];
74 int idx = i / 512;
76 /* Set the PDP entry up and point to the PDE table */
77 PDP[idx].p = 1;
78 PDP[idx].rw = 1;
79 PDP[idx].us = 1;
80 PDP[idx].pwt= 0;
81 PDP[idx].pcd= 0;
82 PDP[idx].a = 0;
83 PDP[idx].mbz= 0;
84 PDP[idx].base_low = (unsigned long)pdes >> 12;
86 PDP[idx].nx = 0;
87 PDP[idx].avail = 0;
88 PDP[idx].base_high = ((unsigned long)pdes >> 32) & 0x000FFFFF;
91 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
92 unsigned long base = (((IPTR)i) << 21);
94 PDE[i].p = 1;
95 PDE[i].rw = 1;
96 PDE[i].us = 1;
97 PDE[i].pwt= 0; // 1
98 PDE[i].pcd= 0; // 1
99 PDE[i].a = 0;
100 PDE[i].d = 0;
101 PDE[i].g = 0;
102 PDE[i].pat= 0;
103 PDE[i].ps = 1;
104 PDE[i].base_low = base >> 13;
106 PDE[i].avail = 0;
107 PDE[i].nx = 0;
108 PDE[i].base_high = (base >> 32) & 0x000FFFFF;
111 #if 0
112 /* PDP Entries. There are four of them used in order to define 2048 pages of 2MB each. */
113 for (i = 0; i < 4; i++)
115 struct PDE2M *pdes = &PDE[512 * i];
116 unsigned int j;
118 /* Set the PDP entry up and point to the PDE table */
119 PDP[i].p = 1;
120 PDP[i].rw = 1;
121 PDP[i].us = 1;
122 PDP[i].pwt= 0;
123 PDP[i].pcd= 0;
124 PDP[i].a = 0;
125 PDP[i].mbz= 0;
126 PDP[i].base_low = (unsigned long)pdes >> 12;
128 PDP[i].nx = 0;
129 PDP[i].avail = 0;
130 PDP[i].base_high = ((unsigned long)pdes >> 32) & 0x000FFFFF;
132 for (j=0; j < 512; j++)
134 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
135 unsigned long base = (i << 30) + (j << 21);
137 pdes[j].p = 1;
138 pdes[j].rw = 1;
139 pdes[j].us = 1;
140 pdes[j].pwt= 0; // 1
141 pdes[j].pcd= 0; // 1
142 pdes[j].a = 0;
143 pdes[j].d = 0;
144 pdes[j].g = 0;
145 pdes[j].pat= 0;
146 pdes[j].ps = 1;
147 pdes[j].base_low = base >> 13;
149 pdes[j].avail = 0;
150 pdes[j].nx = 0;
151 pdes[j].base_high = (base >> 32) & 0x000FFFFF;
154 #endif
156 __KernBootPrivate->used_page = 0;
158 D(bug("[Kernel] core_SetupMMU: Registering New PML4 @ 0x%p\n", __KernBootPrivate->PML4));
159 wrcr(cr3, __KernBootPrivate->PML4);
161 D(bug("[Kernel] core_SetupMMU: Done\n"));
164 void core_ProtPage(intptr_t addr, char p, char rw, char us)
166 unsigned long pml4_off = (addr >> 39) & 0x1ff;
167 unsigned long pdpe_off = (addr >> 30) & 0x1ff;
168 unsigned long pde_off = (addr >> 21) & 0x1ff;
169 unsigned long pte_off = (addr >> 12) & 0x1ff;
171 struct PML4E *pml4 = __KernBootPrivate->PML4;
172 struct PDPE *pdpe = (struct PDPE *)((pml4[pml4_off].base_low << 12) | ((unsigned long)pml4[pml4_off].base_high << 32));
173 struct PDE4K *pde = (struct PDE4K *)((pdpe[pdpe_off].base_low << 12) | ((unsigned long)pdpe[pdpe_off].base_high << 32));
174 struct PTE *Pages4K = __KernBootPrivate->PTE;
175 struct PTE *pte;
177 DMMU(bug("[Kernel] Marking page 0x%p as read-only\n", addr));
179 if (pde[pde_off].ps)
181 /* work on local copy of the affected PDE */
182 struct PDE4K tmp_pde = pde[pde_off];
183 struct PDE2M *pde2 = (struct PDE2M *)pde;
184 intptr_t base = (pde2[pde_off].base_low << 13) | ((unsigned long)pde2[pde_off].base_high << 32);
185 int i;
187 pte = &Pages4K[512 * __KernBootPrivate->used_page++];
189 D(bug("[Kernel] The page for address 0x%p was a big one. Splitting it into 4K pages\n", addr));
190 D(bug("[Kernel] Base=0x%p, pte=0x%p\n", base, pte));
192 for (i = 0; i < 512; i++)
194 pte[i].p = 1;
195 pte[i].rw = pde2[pde_off].rw;
196 pte[i].us = pde2[pde_off].us;
197 pte[i].pwt = pde2[pde_off].pwt;
198 pte[i].pcd = pde2[pde_off].pcd;
199 pte[i].base_low = base >> 12;
200 pte[i].base_high = (base >> 32) & 0x0FFFFF;
202 base += PAGE_SIZE;
205 tmp_pde.ps = 0;
206 tmp_pde.base_low = (intptr_t)pte >> 12;
207 tmp_pde.base_high = ((intptr_t)pte >> 32) & 0x0FFFFF;
209 pde[pde_off] = tmp_pde;
212 pte = (struct PTE *)((pde[pde_off].base_low << 12) | ((unsigned long)pde[pde_off].base_high << 32));
214 pte[pte_off].rw = rw ? 1:0;
215 pte[pte_off].us = us ? 1:0;
216 pte[pte_off].p = p ? 1:0;
217 asm volatile ("invlpg (%0)"::"r"(addr));
220 void core_ProtKernelArea(intptr_t addr, intptr_t length, char p, char rw, char us)
222 D(bug("[Kernel] Protecting area 0x%p - 0x%p\n", addr, addr + length - 1));
224 while (length > 0)
226 core_ProtPage(addr, p, rw, us);
227 addr += 4096;
228 length -= 4096;