- Fixed memory management.
[planlOS.git] / system / kernel / mm / virt.c
blob62d9878e85ac9086afc8d4e6934d801163693981
1 /*
2 Copyright (C) 2008 Mathias Gottschlag
4 Permission is hereby granted, free of charge, to any person obtaining a copy of
5 this software and associated documentation files (the "Software"), to deal in the
6 Software without restriction, including without limitation the rights to use,
7 copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
8 Software, and to permit persons to whom the Software is furnished to do so,
9 subject to the following conditions:
11 The above copyright notice and this permission notice shall be included in all
12 copies or substantial portions of the Software.
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
15 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
16 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
17 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
18 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
19 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 #include "mm/memory.h"
23 #include "ke/errors.h"
24 #include "ke/debug.h"
25 #include <string.h>
27 uint32_t kernel_directory[1024] __attribute__ ((aligned (4096)));
29 #define MM_KERNEL_PAGE_TABLES 0xF0000000
31 static int mmInitMemoryRange(uintptr_t phys, uintptr_t virt, uint32_t size, uint32_t mode)
33 if (phys + size > 0x400000) return KE_ERROR_INVALIDADDR;
34 uint32_t i;
35 for (i = 0; i < (size + 4095) / 0x1000; ++i)
37 uintptr_t target = virt + i * 0x1000;
38 uintptr_t src = phys + i * 0x1000;
39 // Write directly onto the page table using the physical address
40 uint32_t *page_table = (uint32_t*)(kernel_directory[target >> 22] & ~0xFFF);
41 // TODO: Access mode
42 page_table[(target / 0x1000) % 1024] = src | 0x3;
44 return 0;
47 int mmInitVirtualMemory(MultibootInfo *mbinfo, uintptr_t kernel_begin,
48 uintptr_t kernel_end)
50 // Zero the page directory
51 int i;
52 for (i = 0; i < 1024; i++)
54 kernel_directory[i] = 0;
57 // Map the directory onto itself
58 kernel_directory[MM_KERNEL_PAGE_TABLES >> 22] = ((uintptr_t)kernel_directory - MM_KERNEL_BASE) | 0x3;
60 // Initialize page tables
61 // TODO: We waste way too much memory here
62 uintptr_t page_table_phys = mmAllocPhysicalMemory(MM_MEMORY_ALLOC_DMA, 0, 0x1000);
63 memset((void*)(page_table_phys + MM_KERNEL_BASE), 0, 0x1000);
64 kernel_directory[0] = page_table_phys | 0x3;
65 for (i = 768; i < 1024; i++)
67 if (kernel_directory[i] == 0)
69 page_table_phys = mmAllocPhysicalMemory(MM_MEMORY_ALLOC_DMA, 0, 0x1000);
70 memset((void*)(page_table_phys + MM_KERNEL_BASE), 0, 0x1000);
71 kernel_directory[i] = page_table_phys | 0x3;
74 kernel_directory[MM_KERNEL_PAGE_TABLES >> 22] = ((uintptr_t)kernel_directory - MM_KERNEL_BASE) | 0x3;
76 // Map kernel image
77 // TODO: Don't allow patching kernel image
78 uint32_t error = mmInitMemoryRange(kernel_begin - MM_KERNEL_BASE, kernel_begin,
79 (kernel_end - kernel_begin + 4095) & ~0xFFF,
80 MM_MAP_READ | MM_MAP_WRITE | MM_MAP_EXECUTE);
81 if (error) return error;
82 error = mmInitMemoryRange(0xB8000, 0xC00B8000,
83 0x4000,
84 MM_MAP_READ | MM_MAP_WRITE);
85 if (error) return error;
87 // Enable paging
88 asm volatile("mov %%eax, %%cr3" :: "a" ((uintptr_t)kernel_directory - MM_KERNEL_BASE));
89 return 0;
92 int mmCreateAddressSpace(MmAddressSpace *addrspace)
94 // Allocate memory
95 addrspace->phys_addr = mmAllocPhysicalMemory(0, 0, 0x1000);
96 if (!addrspace->phys_addr) return KE_ERROR_OOM;
97 // Map page directory into kernel space
98 uintptr_t kernel_addr = mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE, MM_MIN_KERNEL_PAGE, 1, 0x1000);
99 if (!kernel_addr) return KE_ERROR_KERNEL_OOM;
100 mmMapKernelMemory(addrspace->phys_addr, kernel_addr, MM_MAP_READ | MM_MAP_WRITE);
101 addrspace->virt_addr = (uint32_t*)kernel_addr;
102 memset(addrspace->virt_addr, 0, 768 * 4);
103 memcpy(addrspace->virt_addr + 768, kernel_directory + 768, 256 * 4);
104 return 0;
106 static int mmClonePageTable(uintptr_t *dest, uintptr_t *src)
108 uint32_t i;
109 for (i = 0; i < 1024; i++)
111 uintptr_t page_phys = src[i] & ~0xFFF;
112 uintptr_t page_new_phys = 0;
113 uintptr_t page = 0;
114 uintptr_t page_new = 0;
115 if (page_phys)
117 // Map page
118 page = mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE, MM_MIN_KERNEL_PAGE, 1, 0x1000);
119 if (!page) return KE_ERROR_KERNEL_OOM;
120 mmMapKernelMemory(page_phys, page, MM_MAP_READ | MM_MAP_WRITE);
121 // Create new page
122 page_new_phys = mmAllocPhysicalMemory(0, 0, 0x1000);
123 if (!page_new_phys) return KE_ERROR_OOM;
124 dest[i] = page_new_phys | (src[i] & 0xFFF);
125 // Map new page
126 page_new = mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE, MM_MIN_KERNEL_PAGE, 1, 0x1000);
127 if (!page_new) return KE_ERROR_KERNEL_OOM;
128 mmMapKernelMemory(page_new_phys, page_new, MM_MAP_READ | MM_MAP_WRITE);
129 // Copy page
130 memcpy((void*)page_new, (void*)page, 0x1000);
131 // Unmap pages
132 mmMapKernelMemory(0, page_new, 0);
133 mmMapKernelMemory(0, page, 0);
136 return 0;
138 int mmCloneAddressSpace(MmAddressSpace *dest, MmAddressSpace *src)
140 uint32_t i;
141 for (i = 0; i < 768; i++)
143 uintptr_t page_table_phys = src->virt_addr[i] & ~0xFFF;
144 uintptr_t page_table_new_phys = 0;
145 uintptr_t page_table = 0;
146 uintptr_t page_table_new = 0;
147 if (page_table_phys)
149 // Map page table
150 page_table = mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE, MM_MIN_KERNEL_PAGE, 1, 0x1000);
151 if (!page_table) return KE_ERROR_KERNEL_OOM;
152 mmMapKernelMemory(page_table_phys, page_table, MM_MAP_READ | MM_MAP_WRITE);
153 // Create new page table
154 page_table_new_phys = mmAllocPhysicalMemory(0, 0, 0x1000);
155 if (!page_table_new_phys) return KE_ERROR_OOM;
156 dest->virt_addr[i] = page_table_new_phys | 0x7;
157 // Map new page table
158 page_table_new = mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE, MM_MIN_KERNEL_PAGE, 1, 0x1000);
159 if (!page_table_new) return KE_ERROR_KERNEL_OOM;
160 mmMapKernelMemory(page_table_new_phys, page_table_new, MM_MAP_READ | MM_MAP_WRITE);
161 // Clone page table
162 memset((void*)page_table_new, 0, 0x1000);
163 int status = mmClonePageTable((uintptr_t*)page_table_new, (uintptr_t*)page_table);
164 // Unmap tables
165 mmMapKernelMemory(0, page_table_new, 0);
166 mmMapKernelMemory(0, page_table, 0);
167 if (status) return status;
170 return 0;
172 int mmDestroyAddressSpace(MmAddressSpace *addrspace)
174 mmClearAddressSpace(addrspace);
175 mmMapKernelMemory(0, (uintptr_t)addrspace->virt_addr, 0);
176 mmFreePhysicalMemory(addrspace->phys_addr, 0x1000);
177 return 0;
179 static void mmClearPageTable(uintptr_t *page_table)
181 uint32_t i;
182 for (i = 0; i < 1024; i++)
184 uintptr_t page_phys = page_table[i] & ~0xFFF;
185 if (page_phys)
186 mmFreePhysicalMemory(page_phys, 0x1000);
189 int mmClearAddressSpace(MmAddressSpace *addrspace)
191 // Get addr which we can use to map the page table
192 uintptr_t page_table = mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE, MM_MIN_KERNEL_PAGE, 1, 0x1000);
193 if (!page_table) return KE_ERROR_KERNEL_OOM;
194 // Loop through page tables
195 uint32_t i;
196 for (i = 0; i < 768; i++)
198 uintptr_t page_table_phys = addrspace->virt_addr[i] & ~0xFFF;
199 if (page_table_phys)
201 // Map page table
202 mmMapKernelMemory(page_table_phys, page_table, MM_MAP_READ | MM_MAP_WRITE);
203 // Clear page table
204 mmClearPageTable((uintptr_t*)page_table);
205 // Unmap page table
206 mmMapKernelMemory(0, page_table, 0);
207 mmFreePhysicalMemory(page_table_phys, 0x1000);
210 memset(addrspace->virt_addr, 0, 768 * sizeof(uintptr_t));
211 return 0;
214 int mmMapMemory(MmAddressSpace *addrspace, uintptr_t phys, uintptr_t virt,
215 uint32_t mode)
217 virt &= ~0xFFF;
218 phys &= ~0xFFF;
219 // Get page table
220 uintptr_t page_table_phys = addrspace->virt_addr[virt >> 22];
221 uintptr_t page_table = 0;
222 if (!page_table_phys)
224 // No page table present, create one
225 page_table_phys = mmAllocPhysicalMemory(0, 0, 0x1000);
226 if (!page_table_phys) return KE_ERROR_OOM;
227 addrspace->virt_addr[virt >> 22] = page_table_phys | 0x7;
228 page_table = mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE, MM_MIN_KERNEL_PAGE, 1, 0x1000);
229 if (!page_table) return KE_ERROR_KERNEL_OOM;
230 mmMapKernelMemory(page_table_phys, page_table, MM_MAP_READ | MM_MAP_WRITE);
231 memset((void*)page_table, 0, 0x1000);
233 else
235 // Map page table
236 page_table = mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE, MM_MIN_KERNEL_PAGE, 1, 0x1000);
237 if (!page_table) return KE_ERROR_KERNEL_OOM;
238 mmMapKernelMemory(page_table_phys, page_table, MM_MAP_READ | MM_MAP_WRITE);
240 uint32_t table_index = (virt / 0x1000) % 1024;
241 if (((uintptr_t*)page_table)[table_index])
243 kePrint("Warning: %x already mapped!\n", virt);
245 // Set flags
246 phys |= 0x5; // Present | User
247 if (mode & MM_MAP_WRITE) phys |= 0x2;
248 if (mode & MM_MAP_UNCACHEABLE) phys |= 0x18;
249 // Fill table entry
250 ((uintptr_t*)page_table)[table_index] = phys;
252 // Unmap page table
253 mmMapKernelMemory(0, page_table, 0);
254 keSyncVirtMemory();
255 return KE_ERROR_NOTIMPLEMENTED;
257 int mmMapKernelMemory(uintptr_t phys, uintptr_t virt,
258 uint32_t mode)
260 virt &= ~0xFFF;
261 phys &= ~0xFFF;
262 uint32_t pgdir_index = virt >> 22;
263 uint32_t pgtab_index = (virt / 0x1000) % 1024;
264 uint32_t *page_tables = (uint32_t*)MM_KERNEL_PAGE_TABLES;
265 phys |= 0x1; // Present
266 if (mode & MM_MAP_WRITE) phys |= 0x2;
267 if (mode & MM_MAP_UNCACHEABLE) phys |= 0x18;
268 if (mode)
269 *(page_tables + pgdir_index * 1024 + pgtab_index) = phys;
270 else
271 *(page_tables + pgdir_index * 1024 + pgtab_index) = 0;
272 asm volatile("invlpg %0" :: "m" (*(char*)virt));
273 keSyncVirtMemory();
274 return 0;
276 uintptr_t mmGetPhysAddress(MmAddressSpace *addrspace, uintptr_t virt)
278 uint32_t pgdir_index = virt >> 22;
279 uint32_t pgtab_index = (virt / 0x1000) % 1024;
280 // Get page table
281 uintptr_t page_table_phys = addrspace->virt_addr[pgdir_index];
282 if (!page_table_phys) return 0;
283 // Map page table
284 uintptr_t page_table = mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE, MM_MIN_KERNEL_PAGE, 1, 0x1000);
285 if (!page_table) return 0;
286 mmMapKernelMemory(page_table_phys, page_table, MM_MAP_READ | MM_MAP_WRITE);
287 uint32_t entry = ((uintptr_t*)page_table)[pgtab_index];
288 // Unmap page table
289 mmMapKernelMemory(0, page_table, 0);
290 return entry & ~0xFFF;
292 uintptr_t mmKernelGetPhysAddress(uintptr_t virt)
294 if (virt < MM_KERNEL_BASE) return 0;
295 uint32_t pgdir_index = virt >> 22;
296 uint32_t pgtab_index = (virt / 0x1000) % 1024;
297 uint32_t *page_tables = (uint32_t*)MM_KERNEL_PAGE_TABLES;
298 return *(page_tables + pgdir_index * 1024 + pgtab_index) & ~0xFFF;
300 static int mmPageIsFree(MmAddressSpace *addrspace, uintptr_t addr)
302 uint32_t pgdir_index = addr >> 22;
303 uint32_t pgtab_index = (addr / 0x1000) % 1024;
304 // Get page table
305 uintptr_t page_table_phys = addrspace->virt_addr[pgdir_index];
306 if (!page_table_phys) return 1;
307 // Map page table
308 uintptr_t page_table = mmFindFreeKernelPages(MM_MAX_KERNEL_PAGE, MM_MIN_KERNEL_PAGE, 1, 0x1000);
309 if (!page_table) return KE_ERROR_KERNEL_OOM;
310 mmMapKernelMemory(page_table_phys, page_table, MM_MAP_READ | MM_MAP_WRITE);
311 uint32_t entry = ((uintptr_t*)page_table)[pgtab_index];
312 // Unmap page table
313 mmMapKernelMemory(0, page_table, 0);
314 return entry == 0;
316 uintptr_t mmFindFreePages(MmAddressSpace *addrspace, uintptr_t max, uintptr_t min,
317 int downwards, uint32_t size)
319 // TODO: This is horribly inefficient
320 // Check arguments
321 if (max < min) return KE_ERROR_INVALID_ARG;
322 if (max == min) return 0;
323 max &= ~0xFFF;
324 min &= ~0xFFF;
325 uint32_t page;
326 uint32_t pagecount = 0;
328 // Search for free pages
329 if (downwards)
331 for (page = max; page >= min; page -= 0x1000)
333 if (mmPageIsFree(addrspace, page))
335 pagecount++;
336 if (pagecount == size) return page;
338 else
340 pagecount = 0;
342 if (page == MM_MIN_USER_PAGE) break;
345 else
347 for (page = min; page <= max; page += 0x1000)
349 if (mmPageIsFree(addrspace, page))
351 pagecount++;
352 if (pagecount == size) return page - pagecount * 0x1000 + 0x1000;
354 else
356 pagecount = 0;
358 if (page == MM_MAX_USER_PAGE) break;
361 return 0;
363 int mmKernelPageIsFree(uintptr_t addr)
365 uint32_t pgdir_index = addr >> 22;
366 uint32_t pgtab_index = (addr / 0x1000) % 1024;
367 uint32_t *page_tables = (uint32_t*)MM_KERNEL_PAGE_TABLES;
368 return *(page_tables + pgdir_index * 1024 + pgtab_index) == 0;
370 uintptr_t mmFindFreeKernelPages(uintptr_t max, uintptr_t min,
371 int downwards, uint32_t size)
373 size = (size + 0xFFF) / 0x1000;
374 // Check arguments
375 if (max < min) return KE_ERROR_INVALID_ARG;
376 if (max == min) return 0;
377 max &= ~0xFFF;
378 min &= ~0xFFF;
379 uint32_t page;
380 uint32_t pagecount = 0;
382 // Search for free pages
383 if (downwards)
385 for (page = max; page >= min; page -= 0x1000)
387 if (mmKernelPageIsFree(page))
389 pagecount++;
390 if (pagecount == size + 1) return page;
392 else
394 pagecount = 0;
396 if (page == MM_MIN_KERNEL_PAGE) break;
399 else
401 for (page = min; page <= max; page += 0x1000)
403 if (mmKernelPageIsFree(page))
405 pagecount++;
406 if (pagecount == size) return page - pagecount * 0x1000 + 0x1000;
408 else
410 pagecount = 0;
412 if (page == MM_MAX_KERNEL_PAGE) break;
415 return 0;
418 void keSyncVirtMemory(void)
420 // TODO