btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / arch / x86 / paging / 32bit / X86PagingMethod32Bit.cpp
blobc75942adf486670fee6b11c99137d4cdf38e431a
1 /*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
11 #include "paging/32bit/X86PagingMethod32Bit.h"
13 #include <stdlib.h>
14 #include <string.h>
16 #include <AutoDeleter.h>
18 #include <arch/smp.h>
19 #include <arch_system_info.h>
20 #include <boot/kernel_args.h>
21 #include <int.h>
22 #include <thread.h>
23 #include <vm/vm.h>
24 #include <vm/VMAddressSpace.h>
26 #include "paging/32bit/X86PagingStructures32Bit.h"
27 #include "paging/32bit/X86VMTranslationMap32Bit.h"
28 #include "paging/x86_physical_page_mapper.h"
29 #include "paging/x86_physical_page_mapper_large_memory.h"
32 //#define TRACE_X86_PAGING_METHOD_32_BIT
33 #ifdef TRACE_X86_PAGING_METHOD_32_BIT
34 # define TRACE(x...) dprintf(x)
35 #else
36 # define TRACE(x...) ;
37 #endif
40 #define MAX_INITIAL_POOLS \
41 (ROUNDUP(SMP_MAX_CPUS * TOTAL_SLOTS_PER_CPU + EXTRA_SLOTS, 1024) / 1024)
44 using X86LargePhysicalPageMapper::PhysicalPageSlot;
47 // #pragma mark - X86PagingMethod32Bit::PhysicalPageSlotPool
50 struct X86PagingMethod32Bit::PhysicalPageSlotPool
51 : X86LargePhysicalPageMapper::PhysicalPageSlotPool {
52 public:
53 virtual ~PhysicalPageSlotPool();
55 status_t InitInitial(kernel_args* args);
56 status_t InitInitialPostArea(kernel_args* args);
58 void Init(area_id dataArea, void* data,
59 area_id virtualArea, addr_t virtualBase);
61 virtual status_t AllocatePool(
62 X86LargePhysicalPageMapper
63 ::PhysicalPageSlotPool*& _pool);
64 virtual void Map(phys_addr_t physicalAddress,
65 addr_t virtualAddress);
67 public:
68 static PhysicalPageSlotPool sInitialPhysicalPagePool[MAX_INITIAL_POOLS];
70 private:
71 area_id fDataArea;
72 area_id fVirtualArea;
73 addr_t fVirtualBase;
74 page_table_entry* fPageTable;
78 X86PagingMethod32Bit::PhysicalPageSlotPool
79 X86PagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool[
80 MAX_INITIAL_POOLS];
83 X86PagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool()
88 status_t
89 X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
91 // allocate a virtual address range for the pages to be mapped into
92 addr_t virtualBase = vm_allocate_early(args, 1024 * B_PAGE_SIZE, 0, 0,
93 kPageTableAlignment);
94 if (virtualBase == 0) {
95 panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
96 "physical page pool space in virtual address space!");
97 return B_ERROR;
100 // allocate memory for the page table and data
101 size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
102 page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
103 areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
104 if (pageTable == 0) {
105 panic("X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(): "
106 "Failed to allocate memory for page table!");
107 return B_ERROR;
110 // prepare the page table
111 _EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);
113 // init the pool structure and add the initial pool
114 Init(-1, pageTable, -1, (addr_t)virtualBase);
116 return B_OK;
120 status_t
121 X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitialPostArea(
122 kernel_args* args)
124 // create an area for the (already allocated) data
125 size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
126 void* temp = fPageTable;
127 area_id area = create_area("physical page pool", &temp,
128 B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED,
129 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
130 if (area < B_OK) {
131 panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
132 "create area for physical page pool.");
133 return area;
135 fDataArea = area;
137 // create an area for the virtual address space
138 temp = (void*)fVirtualBase;
139 area = vm_create_null_area(VMAddressSpace::KernelID(),
140 "physical page pool space", &temp, B_EXACT_ADDRESS,
141 1024 * B_PAGE_SIZE, 0);
142 if (area < B_OK) {
143 panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
144 "create area for physical page pool space.");
145 return area;
147 fVirtualArea = area;
149 return B_OK;
153 void
154 X86PagingMethod32Bit::PhysicalPageSlotPool::Init(area_id dataArea, void* data,
155 area_id virtualArea, addr_t virtualBase)
157 fDataArea = dataArea;
158 fVirtualArea = virtualArea;
159 fVirtualBase = virtualBase;
160 fPageTable = (page_table_entry*)data;
162 // init slot list
163 fSlots = (PhysicalPageSlot*)(fPageTable + 1024);
164 addr_t slotAddress = virtualBase;
165 for (int32 i = 0; i < 1024; i++, slotAddress += B_PAGE_SIZE) {
166 PhysicalPageSlot* slot = &fSlots[i];
167 slot->next = slot + 1;
168 slot->pool = this;
169 slot->address = slotAddress;
172 fSlots[1023].next = NULL;
173 // terminate list
177 void
178 X86PagingMethod32Bit::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
179 addr_t virtualAddress)
181 page_table_entry& pte = fPageTable[
182 (virtualAddress - fVirtualBase) / B_PAGE_SIZE];
183 pte = (physicalAddress & X86_PTE_ADDRESS_MASK)
184 | X86_PTE_WRITABLE | X86_PTE_GLOBAL | X86_PTE_PRESENT;
186 invalidate_TLB(virtualAddress);
190 status_t
191 X86PagingMethod32Bit::PhysicalPageSlotPool::AllocatePool(
192 X86LargePhysicalPageMapper::PhysicalPageSlotPool*& _pool)
194 // create the pool structure
195 PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool;
196 if (pool == NULL)
197 return B_NO_MEMORY;
198 ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool);
200 // create an area that can contain the page table and the slot
201 // structures
202 size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
203 void* data;
204 virtual_address_restrictions virtualRestrictions = {};
205 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
206 physical_address_restrictions physicalRestrictions = {};
207 area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
208 PAGE_ALIGN(areaSize), B_FULL_LOCK,
209 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
210 &virtualRestrictions, &physicalRestrictions, &data);
211 if (dataArea < 0)
212 return dataArea;
214 // create the null area for the virtual address space
215 void* virtualBase;
216 area_id virtualArea = vm_create_null_area(
217 VMAddressSpace::KernelID(), "physical page pool space",
218 &virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE,
219 CREATE_AREA_PRIORITY_VIP);
220 if (virtualArea < 0) {
221 delete_area(dataArea);
222 return virtualArea;
225 // prepare the page table
226 memset(data, 0, B_PAGE_SIZE);
228 // get the page table's physical address
229 phys_addr_t physicalTable;
230 X86VMTranslationMap32Bit* map = static_cast<X86VMTranslationMap32Bit*>(
231 VMAddressSpace::Kernel()->TranslationMap());
232 uint32 dummyFlags;
233 cpu_status state = disable_interrupts();
234 map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags);
235 restore_interrupts(state);
237 // put the page table into the page directory
238 int32 index = (addr_t)virtualBase / (B_PAGE_SIZE * 1024);
239 page_directory_entry* entry
240 = &map->PagingStructures32Bit()->pgdir_virt[index];
241 PutPageTableInPageDir(entry, physicalTable,
242 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
243 X86PagingStructures32Bit::UpdateAllPageDirs(index, *entry);
245 // init the pool structure
246 pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase);
247 poolDeleter.Detach();
248 _pool = pool;
249 return B_OK;
253 // #pragma mark - X86PagingMethod32Bit
256 X86PagingMethod32Bit::X86PagingMethod32Bit()
258 fPageHole(NULL),
259 fPageHolePageDir(NULL),
260 fKernelPhysicalPageDirectory(0),
261 fKernelVirtualPageDirectory(NULL),
262 fPhysicalPageMapper(NULL),
263 fKernelPhysicalPageMapper(NULL)
268 X86PagingMethod32Bit::~X86PagingMethod32Bit()
273 status_t
274 X86PagingMethod32Bit::Init(kernel_args* args,
275 VMPhysicalPageMapper** _physicalPageMapper)
277 TRACE("X86PagingMethod32Bit::Init(): entry\n");
279 // page hole set up in stage2
280 fPageHole = (page_table_entry*)(addr_t)args->arch_args.page_hole;
281 // calculate where the pgdir would be
282 fPageHolePageDir = (page_directory_entry*)
283 (((addr_t)args->arch_args.page_hole)
284 + (B_PAGE_SIZE * 1024 - B_PAGE_SIZE));
285 // clear out the bottom 2 GB, unmap everything
286 memset(fPageHolePageDir + FIRST_USER_PGDIR_ENT, 0,
287 sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS);
289 fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
290 fKernelVirtualPageDirectory = (page_directory_entry*)(addr_t)
291 args->arch_args.vir_pgdir;
293 #ifdef TRACE_X86_PAGING_METHOD_32_BIT
294 TRACE("page hole: %p, page dir: %p\n", fPageHole, fPageHolePageDir);
295 TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
296 fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
297 #endif
299 X86PagingStructures32Bit::StaticInit();
301 // create the initial pools for the physical page mapper
302 int32 poolCount = _GetInitialPoolCount();
303 PhysicalPageSlotPool* pool = PhysicalPageSlotPool::sInitialPhysicalPagePool;
305 for (int32 i = 0; i < poolCount; i++) {
306 new(&pool[i]) PhysicalPageSlotPool;
307 status_t error = pool[i].InitInitial(args);
308 if (error != B_OK) {
309 panic("X86PagingMethod32Bit::Init(): Failed to create initial pool "
310 "for physical page mapper!");
311 return error;
315 // create physical page mapper
316 large_memory_physical_page_ops_init(args, pool, poolCount, sizeof(*pool),
317 fPhysicalPageMapper, fKernelPhysicalPageMapper);
318 // TODO: Select the best page mapper!
320 // enable global page feature if available
321 if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) {
322 // this prevents kernel pages from being flushed from TLB on
323 // context-switch
324 x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES);
327 TRACE("X86PagingMethod32Bit::Init(): done\n");
329 *_physicalPageMapper = fPhysicalPageMapper;
330 return B_OK;
334 status_t
335 X86PagingMethod32Bit::InitPostArea(kernel_args* args)
337 // now that the vm is initialized, create an area that represents
338 // the page hole
339 void *temp;
340 area_id area;
342 // unmap the page hole hack we were using before
343 fKernelVirtualPageDirectory[1023] = 0;
344 fPageHolePageDir = NULL;
345 fPageHole = NULL;
347 temp = (void*)fKernelVirtualPageDirectory;
348 area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
349 B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
350 if (area < B_OK)
351 return area;
353 int32 poolCount = _GetInitialPoolCount();
354 for (int32 i = 0; i < poolCount; i++) {
355 status_t error = PhysicalPageSlotPool::sInitialPhysicalPagePool[i]
356 .InitInitialPostArea(args);
357 if (error != B_OK)
358 return error;
361 return B_OK;
365 status_t
366 X86PagingMethod32Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
368 X86VMTranslationMap32Bit* map = new(std::nothrow) X86VMTranslationMap32Bit;
369 if (map == NULL)
370 return B_NO_MEMORY;
372 status_t error = map->Init(kernel);
373 if (error != B_OK) {
374 delete map;
375 return error;
378 *_map = map;
379 return B_OK;
383 status_t
384 X86PagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
385 phys_addr_t physicalAddress, uint8 attributes,
386 page_num_t (*get_free_page)(kernel_args*))
388 // XXX horrible back door to map a page quickly regardless of translation
389 // map object, etc. used only during VM setup.
390 // uses a 'page hole' set up in the stage 2 bootloader. The page hole is
391 // created by pointing one of the pgdir entries back at itself, effectively
392 // mapping the contents of all of the 4MB of pagetables into a 4 MB region.
393 // It's only used here, and is later unmapped.
395 // check to see if a page table exists for this range
396 int index = VADDR_TO_PDENT(virtualAddress);
397 if ((fPageHolePageDir[index] & X86_PDE_PRESENT) == 0) {
398 phys_addr_t pgtable;
399 page_directory_entry *e;
400 // we need to allocate a pgtable
401 pgtable = get_free_page(args);
402 // pgtable is in pages, convert to physical address
403 pgtable *= B_PAGE_SIZE;
405 TRACE("X86PagingMethod32Bit::MapEarly(): asked for free page for "
406 "pgtable. %#" B_PRIxPHYSADDR "\n", pgtable);
408 // put it in the pgdir
409 e = &fPageHolePageDir[index];
410 PutPageTableInPageDir(e, pgtable, attributes);
412 // zero it out in it's new mapping
413 memset((unsigned int*)((addr_t)fPageHole
414 + (virtualAddress / B_PAGE_SIZE / 1024) * B_PAGE_SIZE),
415 0, B_PAGE_SIZE);
418 ASSERT_PRINT(
419 (fPageHole[virtualAddress / B_PAGE_SIZE] & X86_PTE_PRESENT) == 0,
420 "virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx32
421 ", existing pte: %#" B_PRIx32, virtualAddress, fPageHolePageDir[index],
422 fPageHole[virtualAddress / B_PAGE_SIZE]);
424 // now, fill in the pentry
425 PutPageTableEntryInTable(fPageHole + virtualAddress / B_PAGE_SIZE,
426 physicalAddress, attributes, 0, IS_KERNEL_ADDRESS(virtualAddress));
428 return B_OK;
432 bool
433 X86PagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
434 uint32 protection)
436 // We only trust the kernel team's page directory. So switch to it first.
437 // Always set it to make sure the TLBs don't contain obsolete data.
438 uint32 physicalPageDirectory = x86_read_cr3();
439 x86_write_cr3(fKernelPhysicalPageDirectory);
441 // get the page directory entry for the address
442 page_directory_entry pageDirectoryEntry;
443 uint32 index = VADDR_TO_PDENT(virtualAddress);
445 if (physicalPageDirectory == fKernelPhysicalPageDirectory) {
446 pageDirectoryEntry = fKernelVirtualPageDirectory[index];
447 } else if (fPhysicalPageMapper != NULL) {
448 // map the original page directory and get the entry
449 void* handle;
450 addr_t virtualPageDirectory;
451 status_t error = fPhysicalPageMapper->GetPageDebug(
452 physicalPageDirectory, &virtualPageDirectory, &handle);
453 if (error == B_OK) {
454 pageDirectoryEntry
455 = ((page_directory_entry*)virtualPageDirectory)[index];
456 fPhysicalPageMapper->PutPageDebug(virtualPageDirectory, handle);
457 } else
458 pageDirectoryEntry = 0;
459 } else
460 pageDirectoryEntry = 0;
462 // map the page table and get the entry
463 page_table_entry pageTableEntry;
464 index = VADDR_TO_PTENT(virtualAddress);
466 if ((pageDirectoryEntry & X86_PDE_PRESENT) != 0
467 && fPhysicalPageMapper != NULL) {
468 void* handle;
469 addr_t virtualPageTable;
470 status_t error = fPhysicalPageMapper->GetPageDebug(
471 pageDirectoryEntry & X86_PDE_ADDRESS_MASK, &virtualPageTable,
472 &handle);
473 if (error == B_OK) {
474 pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
475 fPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
476 } else
477 pageTableEntry = 0;
478 } else
479 pageTableEntry = 0;
481 // switch back to the original page directory
482 if (physicalPageDirectory != fKernelPhysicalPageDirectory)
483 x86_write_cr3(physicalPageDirectory);
485 if ((pageTableEntry & X86_PTE_PRESENT) == 0)
486 return false;
488 // present means kernel-readable, so check for writable
489 return (protection & B_KERNEL_WRITE_AREA) == 0
490 || (pageTableEntry & X86_PTE_WRITABLE) != 0;
494 /*static*/ void
495 X86PagingMethod32Bit::PutPageTableInPageDir(page_directory_entry* entry,
496 phys_addr_t pgtablePhysical, uint32 attributes)
498 *entry = (pgtablePhysical & X86_PDE_ADDRESS_MASK)
499 | X86_PDE_PRESENT
500 | X86_PDE_WRITABLE
501 | X86_PDE_USER;
502 // TODO: we ignore the attributes of the page table - for compatibility
503 // with BeOS we allow having user accessible areas in the kernel address
504 // space. This is currently being used by some drivers, mainly for the
505 // frame buffer. Our current real time data implementation makes use of
506 // this fact, too.
507 // We might want to get rid of this possibility one day, especially if
508 // we intend to port it to a platform that does not support this.
512 /*static*/ void
513 X86PagingMethod32Bit::PutPageTableEntryInTable(page_table_entry* entry,
514 phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
515 bool globalPage)
517 page_table_entry page = (physicalAddress & X86_PTE_ADDRESS_MASK)
518 | X86_PTE_PRESENT | (globalPage ? X86_PTE_GLOBAL : 0)
519 | MemoryTypeToPageTableEntryFlags(memoryType);
521 // if the page is user accessible, it's automatically
522 // accessible in kernel space, too (but with the same
523 // protection)
524 if ((attributes & B_USER_PROTECTION) != 0) {
525 page |= X86_PTE_USER;
526 if ((attributes & B_WRITE_AREA) != 0)
527 page |= X86_PTE_WRITABLE;
528 } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
529 page |= X86_PTE_WRITABLE;
531 // put it in the page table
532 *(volatile page_table_entry*)entry = page;
536 inline int32
537 X86PagingMethod32Bit::_GetInitialPoolCount()
539 int32 requiredSlots = smp_get_num_cpus() * TOTAL_SLOTS_PER_CPU
540 + EXTRA_SLOTS;
541 return (requiredSlots + 1023) / 1024;
545 /*static*/ void
546 X86PagingMethod32Bit::_EarlyPreparePageTables(page_table_entry* pageTables,
547 addr_t address, size_t size)
549 memset(pageTables, 0, B_PAGE_SIZE * (size / (B_PAGE_SIZE * 1024)));
551 // put the array of pgtables directly into the kernel pagedir
552 // these will be wired and kept mapped into virtual space to be easy to get
553 // to
555 addr_t virtualTable = (addr_t)pageTables;
557 page_directory_entry* pageHolePageDir
558 = X86PagingMethod32Bit::Method()->PageHolePageDir();
560 for (size_t i = 0; i < (size / (B_PAGE_SIZE * 1024));
561 i++, virtualTable += B_PAGE_SIZE) {
562 phys_addr_t physicalTable = 0;
563 _EarlyQuery(virtualTable, &physicalTable);
564 page_directory_entry* entry = &pageHolePageDir[
565 (address / (B_PAGE_SIZE * 1024)) + i];
566 PutPageTableInPageDir(entry, physicalTable,
567 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
573 //! TODO: currently assumes this translation map is active
574 /*static*/ status_t
575 X86PagingMethod32Bit::_EarlyQuery(addr_t virtualAddress,
576 phys_addr_t *_physicalAddress)
578 X86PagingMethod32Bit* method = X86PagingMethod32Bit::Method();
579 int index = VADDR_TO_PDENT(virtualAddress);
580 if ((method->PageHolePageDir()[index] & X86_PDE_PRESENT) == 0) {
581 // no pagetable here
582 return B_ERROR;
585 page_table_entry* entry = method->PageHole() + virtualAddress / B_PAGE_SIZE;
586 if ((*entry & X86_PTE_PRESENT) == 0) {
587 // page mapping not valid
588 return B_ERROR;
591 *_physicalAddress = *entry & X86_PTE_ADDRESS_MASK;
592 return B_OK;