headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / arch / x86 / paging / 64bit / X86PagingMethod64Bit.cpp
blob2a6ef7986d4c705c64a910381c4f587262c4c17a
1 /*
2 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5 * Distributed under the terms of the MIT License.
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
12 #include "paging/64bit/X86PagingMethod64Bit.h"
14 #include <stdlib.h>
15 #include <string.h>
17 #include <boot/kernel_args.h>
18 #include <util/AutoLock.h>
19 #include <vm/vm.h>
20 #include <vm/vm_page.h>
21 #include <vm/VMAddressSpace.h>
23 #include "paging/64bit/X86PagingStructures64Bit.h"
24 #include "paging/64bit/X86VMTranslationMap64Bit.h"
25 #include "paging/x86_physical_page_mapper.h"
26 #include "paging/x86_physical_page_mapper_mapped.h"
29 //#define TRACE_X86_PAGING_METHOD_64BIT
30 #ifdef TRACE_X86_PAGING_METHOD_64BIT
31 # define TRACE(x...) dprintf(x)
32 #else
33 # define TRACE(x...) ;
34 #endif
37 // #pragma mark - X86PagingMethod64Bit
40 X86PagingMethod64Bit::X86PagingMethod64Bit()
42 fKernelPhysicalPML4(0),
43 fKernelVirtualPML4(NULL),
44 fPhysicalPageMapper(NULL),
45 fKernelPhysicalPageMapper(NULL)
50 X86PagingMethod64Bit::~X86PagingMethod64Bit()
55 status_t
56 X86PagingMethod64Bit::Init(kernel_args* args,
57 VMPhysicalPageMapper** _physicalPageMapper)
59 fKernelPhysicalPML4 = args->arch_args.phys_pgdir;
60 fKernelVirtualPML4 = (uint64*)(addr_t)args->arch_args.vir_pgdir;
62 // if availalbe enable NX-bit (No eXecute)
63 if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD))
64 call_all_cpus_sync(&_EnableExecutionDisable, NULL);
66 // Ensure that the user half of the address space is clear. This removes
67 // the temporary identity mapping made by the boot loader.
68 memset(fKernelVirtualPML4, 0, sizeof(uint64) * 256);
69 arch_cpu_global_TLB_invalidate();
71 // Create the physical page mapper.
72 mapped_physical_page_ops_init(args, fPhysicalPageMapper,
73 fKernelPhysicalPageMapper);
75 *_physicalPageMapper = fPhysicalPageMapper;
76 return B_ERROR;
80 status_t
81 X86PagingMethod64Bit::InitPostArea(kernel_args* args)
83 // Create an area covering the physical map area.
84 void* address = (void*)KERNEL_PMAP_BASE;
85 area_id area = vm_create_null_area(VMAddressSpace::KernelID(),
86 "physical map area", &address, B_EXACT_ADDRESS,
87 KERNEL_PMAP_SIZE, 0);
88 if (area < B_OK)
89 return area;
91 // Create an area to represent the kernel PML4.
92 area = create_area("kernel pml4", (void**)&fKernelVirtualPML4,
93 B_EXACT_ADDRESS, B_PAGE_SIZE, B_ALREADY_WIRED,
94 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
95 if (area < B_OK)
96 return area;
98 return B_OK;
102 status_t
103 X86PagingMethod64Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
105 X86VMTranslationMap64Bit* map = new(std::nothrow) X86VMTranslationMap64Bit;
106 if (map == NULL)
107 return B_NO_MEMORY;
109 status_t error = map->Init(kernel);
110 if (error != B_OK) {
111 delete map;
112 return error;
115 *_map = map;
116 return B_OK;
120 status_t
121 X86PagingMethod64Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
122 phys_addr_t physicalAddress, uint8 attributes,
123 page_num_t (*get_free_page)(kernel_args*))
125 TRACE("X86PagingMethod64Bit::MapEarly(%#" B_PRIxADDR ", %#" B_PRIxPHYSADDR
126 ", %#" B_PRIx8 ")\n", virtualAddress, physicalAddress, attributes);
128 // Get the PDPT. We should be mapping on an existing PDPT at this stage.
129 uint64* pml4e = &fKernelVirtualPML4[VADDR_TO_PML4E(virtualAddress)];
130 ASSERT((*pml4e & X86_64_PML4E_PRESENT) != 0);
131 uint64* virtualPDPT = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
132 *pml4e & X86_64_PML4E_ADDRESS_MASK);
134 // Get the page directory.
135 uint64* pdpte = &virtualPDPT[VADDR_TO_PDPTE(virtualAddress)];
136 uint64* virtualPageDir;
137 if ((*pdpte & X86_64_PDPTE_PRESENT) == 0) {
138 phys_addr_t physicalPageDir = get_free_page(args) * B_PAGE_SIZE;
140 TRACE("X86PagingMethod64Bit::MapEarly(): creating page directory for va"
141 " %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
142 physicalPageDir);
144 SetTableEntry(pdpte, (physicalPageDir & X86_64_PDPTE_ADDRESS_MASK)
145 | X86_64_PDPTE_PRESENT
146 | X86_64_PDPTE_WRITABLE
147 | X86_64_PDPTE_USER);
149 // Map it and zero it.
150 virtualPageDir = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
151 physicalPageDir);
152 memset(virtualPageDir, 0, B_PAGE_SIZE);
153 } else {
154 virtualPageDir = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
155 *pdpte & X86_64_PDPTE_ADDRESS_MASK);
158 // Get the page table.
159 uint64* pde = &virtualPageDir[VADDR_TO_PDE(virtualAddress)];
160 uint64* virtualPageTable;
161 if ((*pde & X86_64_PDE_PRESENT) == 0) {
162 phys_addr_t physicalPageTable = get_free_page(args) * B_PAGE_SIZE;
164 TRACE("X86PagingMethod64Bit::MapEarly(): creating page table for va"
165 " %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
166 physicalPageTable);
168 SetTableEntry(pde, (physicalPageTable & X86_64_PDE_ADDRESS_MASK)
169 | X86_64_PDE_PRESENT
170 | X86_64_PDE_WRITABLE
171 | X86_64_PDE_USER);
173 // Map it and zero it.
174 virtualPageTable = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
175 physicalPageTable);
176 memset(virtualPageTable, 0, B_PAGE_SIZE);
177 } else {
178 virtualPageTable = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
179 *pde & X86_64_PDE_ADDRESS_MASK);
182 // The page table entry must not already be mapped.
183 uint64* pte = &virtualPageTable[VADDR_TO_PTE(virtualAddress)];
184 ASSERT_PRINT(
185 (*pte & X86_64_PTE_PRESENT) == 0,
186 "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
187 virtualAddress, *pte);
189 // Fill in the table entry.
190 PutPageTableEntryInTable(pte, physicalAddress, attributes, 0,
191 IS_KERNEL_ADDRESS(virtualAddress));
193 return B_OK;
197 bool
198 X86PagingMethod64Bit::IsKernelPageAccessible(addr_t virtualAddress,
199 uint32 protection)
201 return true;
205 /*! Traverses down the paging structure hierarchy to find the page directory
206 for a virtual address, allocating new tables if required.
208 /*static*/ uint64*
209 X86PagingMethod64Bit::PageDirectoryForAddress(uint64* virtualPML4,
210 addr_t virtualAddress, bool isKernel, bool allocateTables,
211 vm_page_reservation* reservation,
212 TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
214 // Get the PDPT.
215 uint64* pml4e = &virtualPML4[VADDR_TO_PML4E(virtualAddress)];
216 if ((*pml4e & X86_64_PML4E_PRESENT) == 0) {
217 if (!allocateTables)
218 return NULL;
220 // Allocate a new PDPT.
221 vm_page* page = vm_page_allocate_page(reservation,
222 PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
224 DEBUG_PAGE_ACCESS_END(page);
226 phys_addr_t physicalPDPT
227 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
229 TRACE("X86PagingMethod64Bit::PageTableForAddress(): creating PDPT "
230 "for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
231 physicalPDPT);
233 SetTableEntry(pml4e, (physicalPDPT & X86_64_PML4E_ADDRESS_MASK)
234 | X86_64_PML4E_PRESENT
235 | X86_64_PML4E_WRITABLE
236 | X86_64_PML4E_USER);
238 mapCount++;
241 uint64* virtualPDPT = (uint64*)pageMapper->GetPageTableAt(
242 *pml4e & X86_64_PML4E_ADDRESS_MASK);
244 // Get the page directory.
245 uint64* pdpte = &virtualPDPT[VADDR_TO_PDPTE(virtualAddress)];
246 if ((*pdpte & X86_64_PDPTE_PRESENT) == 0) {
247 if (!allocateTables)
248 return NULL;
250 // Allocate a new page directory.
251 vm_page* page = vm_page_allocate_page(reservation,
252 PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
254 DEBUG_PAGE_ACCESS_END(page);
256 phys_addr_t physicalPageDir
257 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
259 TRACE("X86PagingMethod64Bit::PageTableForAddress(): creating page "
260 "directory for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n",
261 virtualAddress, physicalPageDir);
263 SetTableEntry(pdpte, (physicalPageDir & X86_64_PDPTE_ADDRESS_MASK)
264 | X86_64_PDPTE_PRESENT
265 | X86_64_PDPTE_WRITABLE
266 | X86_64_PDPTE_USER);
268 mapCount++;
271 return (uint64*)pageMapper->GetPageTableAt(
272 *pdpte & X86_64_PDPTE_ADDRESS_MASK);
276 /*static*/ uint64*
277 X86PagingMethod64Bit::PageDirectoryEntryForAddress(uint64* virtualPML4,
278 addr_t virtualAddress, bool isKernel, bool allocateTables,
279 vm_page_reservation* reservation,
280 TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
282 uint64* virtualPageDirectory = PageDirectoryForAddress(virtualPML4,
283 virtualAddress, isKernel, allocateTables, reservation, pageMapper,
284 mapCount);
285 if (virtualPageDirectory == NULL)
286 return NULL;
288 return &virtualPageDirectory[VADDR_TO_PDE(virtualAddress)];
292 /*! Traverses down the paging structure hierarchy to find the page table for a
293 virtual address, allocating new tables if required.
295 /*static*/ uint64*
296 X86PagingMethod64Bit::PageTableForAddress(uint64* virtualPML4,
297 addr_t virtualAddress, bool isKernel, bool allocateTables,
298 vm_page_reservation* reservation,
299 TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
301 TRACE("X86PagingMethod64Bit::PageTableForAddress(%#" B_PRIxADDR ", "
302 "%d)\n", virtualAddress, allocateTables);
304 uint64* pde = PageDirectoryEntryForAddress(virtualPML4, virtualAddress,
305 isKernel, allocateTables, reservation, pageMapper, mapCount);
306 if (pde == NULL)
307 return NULL;
309 if ((*pde & X86_64_PDE_PRESENT) == 0) {
310 if (!allocateTables)
311 return NULL;
313 // Allocate a new page table.
314 vm_page* page = vm_page_allocate_page(reservation,
315 PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
317 DEBUG_PAGE_ACCESS_END(page);
319 phys_addr_t physicalPageTable
320 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
322 TRACE("X86PagingMethod64Bit::PageTableForAddress(): creating page "
323 "table for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n",
324 virtualAddress, physicalPageTable);
326 SetTableEntry(pde, (physicalPageTable & X86_64_PDE_ADDRESS_MASK)
327 | X86_64_PDE_PRESENT
328 | X86_64_PDE_WRITABLE
329 | X86_64_PDE_USER);
331 mapCount++;
334 // No proper large page support at the moment, but they are used for the
335 // physical map area. Ensure that nothing tries to treat that as normal
336 // address space.
337 ASSERT(!(*pde & X86_64_PDE_LARGE_PAGE));
339 return (uint64*)pageMapper->GetPageTableAt(*pde & X86_64_PDE_ADDRESS_MASK);
343 /*static*/ uint64*
344 X86PagingMethod64Bit::PageTableEntryForAddress(uint64* virtualPML4,
345 addr_t virtualAddress, bool isKernel, bool allocateTables,
346 vm_page_reservation* reservation,
347 TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
349 uint64* virtualPageTable = PageTableForAddress(virtualPML4, virtualAddress,
350 isKernel, allocateTables, reservation, pageMapper, mapCount);
351 if (virtualPageTable == NULL)
352 return NULL;
354 return &virtualPageTable[VADDR_TO_PTE(virtualAddress)];
358 /*static*/ void
359 X86PagingMethod64Bit::PutPageTableEntryInTable(uint64* entry,
360 phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
361 bool globalPage)
363 uint64 page = (physicalAddress & X86_64_PTE_ADDRESS_MASK)
364 | X86_64_PTE_PRESENT | (globalPage ? X86_64_PTE_GLOBAL : 0)
365 | MemoryTypeToPageTableEntryFlags(memoryType);
367 // if the page is user accessible, it's automatically
368 // accessible in kernel space, too (but with the same
369 // protection)
370 if ((attributes & B_USER_PROTECTION) != 0) {
371 page |= X86_64_PTE_USER;
372 if ((attributes & B_WRITE_AREA) != 0)
373 page |= X86_64_PTE_WRITABLE;
374 if ((attributes & B_EXECUTE_AREA) == 0
375 && x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) {
376 page |= X86_64_PTE_NOT_EXECUTABLE;
378 } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
379 page |= X86_64_PTE_WRITABLE;
381 // put it in the page table
382 SetTableEntry(entry, page);
386 /*static*/ void
387 X86PagingMethod64Bit::_EnableExecutionDisable(void* dummy, int cpu)
389 x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER)
390 | IA32_MSR_EFER_NX);