2 Copyright (C) 2008 Mathias Gottschlag
4 Permission is hereby granted, free of charge, to any person obtaining a copy of
5 this software and associated documentation files (the "Software"), to deal in the
6 Software without restriction, including without limitation the rights to use,
7 copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
8 Software, and to permit persons to whom the Software is furnished to do so,
9 subject to the following conditions:
11 The above copyright notice and this permission notice shall be included in all
12 copies or substantial portions of the Software.
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
15 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
16 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
17 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
18 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
19 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 #include "mm/memory.h"
23 #include "ke/errors.h"
27 uint32_t kernel_directory
[1024] __attribute__ ((aligned (4096)));
29 #define MM_KERNEL_PAGE_TABLES 0xF0000000
31 static int mmInitMemoryRange(uintptr_t phys
, uintptr_t virt
, uint32_t size
, uint32_t mode
)
33 if (phys
+ size
> 0x400000) return KE_ERROR_INVALIDADDR
;
35 for (i
= 0; i
< (size
+ 4095) / 0x1000; ++i
)
37 uintptr_t target
= virt
+ i
* 0x1000;
38 uintptr_t src
= phys
+ i
* 0x1000;
39 // Write directly onto the page table using the physical address
40 uint32_t *page_table
= (uint32_t*)(kernel_directory
[target
>> 22] & ~0xFFF);
42 page_table
[(target
/ 0x1000) % 1024] = src
| 0x3;
47 int mmInitVirtualMemory(MultibootInfo
*mbinfo
, uintptr_t kernel_begin
,
50 // Zero the page directory
52 for (i
= 0; i
< 1024; i
++)
54 kernel_directory
[i
] = 0;
57 // Map the directory onto itself
58 kernel_directory
[MM_KERNEL_PAGE_TABLES
>> 22] = ((uintptr_t)kernel_directory
- MM_KERNEL_BASE
) | 0x3;
60 // Initialize page tables
61 // TODO: We waste way too much memory here
62 uintptr_t page_table_phys
= mmAllocPhysicalMemory(MM_MEMORY_ALLOC_DMA
, 0, 0x1000);
63 memset((void*)(page_table_phys
+ MM_KERNEL_BASE
), 0, 0x1000);
64 kernel_directory
[0] = page_table_phys
| 0x3;
65 for (i
= 768; i
< 1024; i
++)
67 if (kernel_directory
[i
] == 0)
69 page_table_phys
= mmAllocPhysicalMemory(MM_MEMORY_ALLOC_DMA
, 0, 0x1000);
70 memset((void*)(page_table_phys
+ MM_KERNEL_BASE
), 0, 0x1000);
71 kernel_directory
[i
] = page_table_phys
| 0x3;
74 kernel_directory
[MM_KERNEL_PAGE_TABLES
>> 22] = ((uintptr_t)kernel_directory
- MM_KERNEL_BASE
) | 0x3;
77 // TODO: Don't allow patching kernel image
78 uint32_t error
= mmInitMemoryRange(kernel_begin
- MM_KERNEL_BASE
, kernel_begin
,
79 (kernel_end
- kernel_begin
+ 4095) & ~0xFFF,
80 MM_MAP_READ
| MM_MAP_WRITE
| MM_MAP_EXECUTE
);
81 if (error
) return error
;
82 error
= mmInitMemoryRange(0xB8000, 0xC00B8000,
84 MM_MAP_READ
| MM_MAP_WRITE
);
85 if (error
) return error
;
88 asm volatile("mov %%eax, %%cr3" :: "a" ((uintptr_t)kernel_directory
- MM_KERNEL_BASE
));
92 int mmCreateAddressSpace(MmAddressSpace
*addrspace
)
95 addrspace
->phys_addr
= mmAllocPhysicalMemory(0, 0, 0x1000);
96 if (!addrspace
->phys_addr
) return KE_ERROR_OOM
;
97 // Map page directory into kernel space
98 uintptr_t kernel_addr
= mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE
, MM_MIN_KERNEL_PAGE
, 1, 0x1000);
99 if (!kernel_addr
) return KE_ERROR_KERNEL_OOM
;
100 mmMapKernelMemory(addrspace
->phys_addr
, kernel_addr
, MM_MAP_READ
| MM_MAP_WRITE
);
101 addrspace
->virt_addr
= (uint32_t*)kernel_addr
;
102 memset(addrspace
->virt_addr
, 0, 768 * 4);
103 memcpy(addrspace
->virt_addr
+ 768, kernel_directory
+ 768, 256 * 4);
106 static int mmClonePageTable(uintptr_t *dest
, uintptr_t *src
)
109 for (i
= 0; i
< 1024; i
++)
111 uintptr_t page_phys
= src
[i
] & ~0xFFF;
112 uintptr_t page_new_phys
= 0;
114 uintptr_t page_new
= 0;
118 page
= mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE
, MM_MIN_KERNEL_PAGE
, 1, 0x1000);
119 if (!page
) return KE_ERROR_KERNEL_OOM
;
120 mmMapKernelMemory(page_phys
, page
, MM_MAP_READ
| MM_MAP_WRITE
);
122 page_new_phys
= mmAllocPhysicalMemory(0, 0, 0x1000);
123 if (!page_new_phys
) return KE_ERROR_OOM
;
124 dest
[i
] = page_new_phys
| (src
[i
] & 0xFFF);
126 page_new
= mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE
, MM_MIN_KERNEL_PAGE
, 1, 0x1000);
127 if (!page_new
) return KE_ERROR_KERNEL_OOM
;
128 mmMapKernelMemory(page_new_phys
, page_new
, MM_MAP_READ
| MM_MAP_WRITE
);
130 memcpy((void*)page_new
, (void*)page
, 0x1000);
132 mmMapKernelMemory(0, page_new
, 0);
133 mmMapKernelMemory(0, page
, 0);
138 int mmCloneAddressSpace(MmAddressSpace
*dest
, MmAddressSpace
*src
)
141 for (i
= 0; i
< 768; i
++)
143 uintptr_t page_table_phys
= src
->virt_addr
[i
] & ~0xFFF;
144 uintptr_t page_table_new_phys
= 0;
145 uintptr_t page_table
= 0;
146 uintptr_t page_table_new
= 0;
150 page_table
= mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE
, MM_MIN_KERNEL_PAGE
, 1, 0x1000);
151 if (!page_table
) return KE_ERROR_KERNEL_OOM
;
152 mmMapKernelMemory(page_table_phys
, page_table
, MM_MAP_READ
| MM_MAP_WRITE
);
153 // Create new page table
154 page_table_new_phys
= mmAllocPhysicalMemory(0, 0, 0x1000);
155 if (!page_table_new_phys
) return KE_ERROR_OOM
;
156 dest
->virt_addr
[i
] = page_table_new_phys
| 0x7;
157 // Map new page table
158 page_table_new
= mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE
, MM_MIN_KERNEL_PAGE
, 1, 0x1000);
159 if (!page_table_new
) return KE_ERROR_KERNEL_OOM
;
160 mmMapKernelMemory(page_table_new_phys
, page_table_new
, MM_MAP_READ
| MM_MAP_WRITE
);
162 memset((void*)page_table_new
, 0, 0x1000);
163 int status
= mmClonePageTable((uintptr_t*)page_table_new
, (uintptr_t*)page_table
);
165 mmMapKernelMemory(0, page_table_new
, 0);
166 mmMapKernelMemory(0, page_table
, 0);
167 if (status
) return status
;
172 int mmDestroyAddressSpace(MmAddressSpace
*addrspace
)
174 mmClearAddressSpace(addrspace
);
175 mmMapKernelMemory(0, (uintptr_t)addrspace
->virt_addr
, 0);
176 mmFreePhysicalMemory(addrspace
->phys_addr
, 0x1000);
179 static void mmClearPageTable(uintptr_t *page_table
)
182 for (i
= 0; i
< 1024; i
++)
184 uintptr_t page_phys
= page_table
[i
] & ~0xFFF;
186 mmFreePhysicalMemory(page_phys
, 0x1000);
189 int mmClearAddressSpace(MmAddressSpace
*addrspace
)
191 // Get addr which we can use to map the page table
192 uintptr_t page_table
= mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE
, MM_MIN_KERNEL_PAGE
, 1, 0x1000);
193 if (!page_table
) return KE_ERROR_KERNEL_OOM
;
194 // Loop through page tables
196 for (i
= 0; i
< 768; i
++)
198 uintptr_t page_table_phys
= addrspace
->virt_addr
[i
] & ~0xFFF;
202 mmMapKernelMemory(page_table_phys
, page_table
, MM_MAP_READ
| MM_MAP_WRITE
);
204 mmClearPageTable((uintptr_t*)page_table
);
206 mmMapKernelMemory(0, page_table
, 0);
207 mmFreePhysicalMemory(page_table_phys
, 0x1000);
210 memset(addrspace
->virt_addr
, 0, 768 * sizeof(uintptr_t));
214 int mmMapMemory(MmAddressSpace
*addrspace
, uintptr_t phys
, uintptr_t virt
,
220 uintptr_t page_table_phys
= addrspace
->virt_addr
[virt
>> 22];
221 uintptr_t page_table
= 0;
222 if (!page_table_phys
)
224 // No page table present, create one
225 page_table_phys
= mmAllocPhysicalMemory(0, 0, 0x1000);
226 if (!page_table_phys
) return KE_ERROR_OOM
;
227 addrspace
->virt_addr
[virt
>> 22] = page_table_phys
| 0x7;
228 page_table
= mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE
, MM_MIN_KERNEL_PAGE
, 1, 0x1000);
229 if (!page_table
) return KE_ERROR_KERNEL_OOM
;
230 mmMapKernelMemory(page_table_phys
, page_table
, MM_MAP_READ
| MM_MAP_WRITE
);
231 memset((void*)page_table
, 0, 0x1000);
236 page_table
= mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE
, MM_MIN_KERNEL_PAGE
, 1, 0x1000);
237 if (!page_table
) return KE_ERROR_KERNEL_OOM
;
238 mmMapKernelMemory(page_table_phys
, page_table
, MM_MAP_READ
| MM_MAP_WRITE
);
240 uint32_t table_index
= (virt
/ 0x1000) % 1024;
241 if (((uintptr_t*)page_table
)[table_index
])
243 kePrint("Warning: %x already mapped!\n", virt
);
246 phys
|= 0x5; // Present | User
247 if (mode
& MM_MAP_WRITE
) phys
|= 0x2;
248 if (mode
& MM_MAP_UNCACHEABLE
) phys
|= 0x18;
250 ((uintptr_t*)page_table
)[table_index
] = phys
;
253 mmMapKernelMemory(0, page_table
, 0);
255 return KE_ERROR_NOTIMPLEMENTED
;
257 int mmMapKernelMemory(uintptr_t phys
, uintptr_t virt
,
262 uint32_t pgdir_index
= virt
>> 22;
263 uint32_t pgtab_index
= (virt
/ 0x1000) % 1024;
264 uint32_t *page_tables
= (uint32_t*)MM_KERNEL_PAGE_TABLES
;
265 phys
|= 0x1; // Present
266 if (mode
& MM_MAP_WRITE
) phys
|= 0x2;
267 if (mode
& MM_MAP_UNCACHEABLE
) phys
|= 0x18;
269 *(page_tables
+ pgdir_index
* 1024 + pgtab_index
) = phys
;
271 *(page_tables
+ pgdir_index
* 1024 + pgtab_index
) = 0;
272 asm volatile("invlpg %0" :: "m" (*(char*)virt
));
276 uintptr_t mmGetPhysAddress(MmAddressSpace
*addrspace
, uintptr_t virt
)
278 uint32_t pgdir_index
= virt
>> 22;
279 uint32_t pgtab_index
= (virt
/ 0x1000) % 1024;
281 uintptr_t page_table_phys
= addrspace
->virt_addr
[pgdir_index
];
282 if (!page_table_phys
) return 0;
284 uintptr_t page_table
= mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE
, MM_MIN_KERNEL_PAGE
, 1, 0x1000);
285 if (!page_table
) return 0;
286 mmMapKernelMemory(page_table_phys
, page_table
, MM_MAP_READ
| MM_MAP_WRITE
);
287 uint32_t entry
= ((uintptr_t*)page_table
)[pgtab_index
];
289 mmMapKernelMemory(0, page_table
, 0);
290 return entry
& ~0xFFF;
292 uintptr_t mmKernelGetPhysAddress(uintptr_t virt
)
294 if (virt
< MM_KERNEL_BASE
) return 0;
295 uint32_t pgdir_index
= virt
>> 22;
296 uint32_t pgtab_index
= (virt
/ 0x1000) % 1024;
297 uint32_t *page_tables
= (uint32_t*)MM_KERNEL_PAGE_TABLES
;
298 return *(page_tables
+ pgdir_index
* 1024 + pgtab_index
) & ~0xFFF;
300 static int mmPageIsFree(MmAddressSpace
*addrspace
, uintptr_t addr
)
302 uint32_t pgdir_index
= addr
>> 22;
303 uint32_t pgtab_index
= (addr
/ 0x1000) % 1024;
305 uintptr_t page_table_phys
= addrspace
->virt_addr
[pgdir_index
];
306 if (!page_table_phys
) return 1;
308 uintptr_t page_table
= mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE
, MM_MIN_KERNEL_PAGE
, 1, 0x1000);
309 if (!page_table
) return KE_ERROR_KERNEL_OOM
;
310 mmMapKernelMemory(page_table_phys
, page_table
, MM_MAP_READ
| MM_MAP_WRITE
);
311 uint32_t entry
= ((uintptr_t*)page_table
)[pgtab_index
];
313 mmMapKernelMemory(0, page_table
, 0);
316 uintptr_t mmFindFreePages(MmAddressSpace
*addrspace
, uintptr_t max
, uintptr_t min
,
317 int downwards
, uint32_t size
)
319 // TODO: This is horribly inefficient
321 if (max
< min
) return KE_ERROR_INVALID_ARG
;
322 if (max
== min
) return 0;
326 uint32_t pagecount
= 0;
328 // Search for free pages
331 for (page
= max
; page
>= min
; page
-= 0x1000)
333 if (mmPageIsFree(addrspace
, page
))
336 if (pagecount
== size
) return page
;
342 if (page
== MM_MIN_USER_PAGE
) break;
347 for (page
= min
; page
<= max
; page
+= 0x1000)
349 if (mmPageIsFree(addrspace
, page
))
352 if (pagecount
== size
) return page
- pagecount
* 0x1000 + 0x1000;
358 if (page
== MM_MAX_USER_PAGE
) break;
363 int mmKernelPageIsFree(uintptr_t addr
)
365 uint32_t pgdir_index
= addr
>> 22;
366 uint32_t pgtab_index
= (addr
/ 0x1000) % 1024;
367 uint32_t *page_tables
= (uint32_t*)MM_KERNEL_PAGE_TABLES
;
368 return *(page_tables
+ pgdir_index
* 1024 + pgtab_index
) == 0;
370 uintptr_t mmFindFreeKernelPages(uintptr_t max
, uintptr_t min
,
371 int downwards
, uint32_t size
)
373 size
= (size
+ 0xFFF) / 0x1000;
375 if (max
< min
) return KE_ERROR_INVALID_ARG
;
376 if (max
== min
) return 0;
380 uint32_t pagecount
= 0;
382 // Search for free pages
385 for (page
= max
; page
>= min
; page
-= 0x1000)
387 if (mmKernelPageIsFree(page
))
390 if (pagecount
== size
+ 1) return page
;
396 if (page
== MM_MIN_KERNEL_PAGE
) break;
401 for (page
= min
; page
<= max
; page
+= 0x1000)
403 if (mmKernelPageIsFree(page
))
406 if (pagecount
== size
) return page
- pagecount
* 0x1000 + 0x1000;
412 if (page
== MM_MAX_KERNEL_PAGE
) break;
418 void keSyncVirtMemory(void)