2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
11 #include "paging/pae/X86PagingMethodPAE.h"
16 #include <AutoDeleter.h>
19 #include <boot/kernel_args.h>
20 #include <util/AutoLock.h>
22 #include <vm/vm_page.h>
23 #include <vm/VMAddressSpace.h>
25 #include "paging/32bit/paging.h"
26 #include "paging/32bit/X86PagingMethod32Bit.h"
27 #include "paging/pae/X86PagingStructuresPAE.h"
28 #include "paging/pae/X86VMTranslationMapPAE.h"
29 #include "paging/x86_physical_page_mapper.h"
30 #include "paging/x86_physical_page_mapper_large_memory.h"
33 //#define TRACE_X86_PAGING_METHOD_PAE
34 #ifdef TRACE_X86_PAGING_METHOD_PAE
35 # define TRACE(x...) dprintf(x)
37 # define TRACE(x...) ;
41 #if B_HAIKU_PHYSICAL_BITS == 64
44 #define MAX_INITIAL_POOLS \
45 (ROUNDUP(SMP_MAX_CPUS * TOTAL_SLOTS_PER_CPU + EXTRA_SLOTS, \
46 kPAEPageTableEntryCount) \
47 / kPAEPageTableEntryCount)
50 using X86LargePhysicalPageMapper::PhysicalPageSlot
;
53 // number of 32 bit pages that will be cached
54 static const page_num_t kMaxFree32BitPagesCount
= 32;
57 // #pragma mark - ToPAESwitcher
60 struct X86PagingMethodPAE::ToPAESwitcher
{
61 ToPAESwitcher(kernel_args
* args
)
65 // page hole set up in the boot loader
66 fPageHole
= (page_table_entry
*)
67 (addr_t
)fKernelArgs
->arch_args
.page_hole
;
69 // calculate where the page dir would be
70 fPageHolePageDir
= (page_directory_entry
*)
71 (((addr_t
)fKernelArgs
->arch_args
.page_hole
)
72 + (B_PAGE_SIZE
* 1024 - B_PAGE_SIZE
));
74 fPhysicalPageDir
= fKernelArgs
->arch_args
.phys_pgdir
;
76 TRACE("page hole: %p\n", fPageHole
);
77 TRACE("page dir: %p (physical: %#" B_PRIxPHYSADDR
")\n",
78 fPageHolePageDir
, fPhysicalPageDir
);
81 void Switch(pae_page_directory_pointer_table_entry
*& _virtualPDPT
,
82 phys_addr_t
& _physicalPDPT
, void*& _pageStructures
,
83 size_t& _pageStructuresSize
, pae_page_directory_entry
** pageDirs
,
84 phys_addr_t
* physicalPageDirs
, addr_t
& _freeVirtualSlot
,
85 pae_page_table_entry
*& _freeVirtualSlotPTE
)
87 // count the page tables we have to translate
88 uint32 pageTableCount
= 0;
89 for (uint32 i
= FIRST_KERNEL_PGDIR_ENT
;
90 i
< FIRST_KERNEL_PGDIR_ENT
+ NUM_KERNEL_PGDIR_ENTS
; i
++) {
91 page_directory_entry entry
= fPageHolePageDir
[i
];
92 if ((entry
& X86_PDE_PRESENT
) != 0)
96 TRACE("page tables to translate: %" B_PRIu32
"\n", pageTableCount
);
98 // The pages we need to allocate to do our job:
99 // + 1 page dir pointer table
101 // + 2 * page tables (each has 512 instead of 1024 entries)
102 // + 1 page for the free virtual slot (no physical page needed)
103 uint32 pagesNeeded
= 1 + 4 + pageTableCount
* 2 + 1;
105 // We need additional PAE page tables for the new pages we're going to
106 // allocate: Two tables for every 1024 pages to map, i.e. 2 additional
107 // pages for every 1022 pages we want to allocate. We also need 32 bit
108 // page tables, but we don't need additional virtual space for them,
109 // since we can access then via the page hole.
110 pagesNeeded
+= ((pagesNeeded
+ 1021) / 1022) * 2;
112 TRACE("pages needed: %" B_PRIu32
"\n", pagesNeeded
);
114 // allocate the pages we need
115 _AllocateNeededPages(pagesNeeded
);
117 // prepare the page directory pointer table
118 phys_addr_t physicalPDPT
= 0;
119 pae_page_directory_pointer_table_entry
* pdpt
120 = (pae_page_directory_pointer_table_entry
*)_NextPage(true,
123 for (int32 i
= 0; i
< 4; i
++) {
124 fPageDirs
[i
] = (pae_page_directory_entry
*)_NextPage(true,
125 fPhysicalPageDirs
[i
]);
127 pdpt
[i
] = X86_PAE_PDPTE_PRESENT
128 | (fPhysicalPageDirs
[i
] & X86_PAE_PDPTE_ADDRESS_MASK
);
131 // Since we have to enable PAE in two steps -- setting cr3 to the PDPT
132 // and setting the cr4 PAE bit -- we copy the kernel page dir entries to
133 // the PDPT page, so after setting cr3, we continue to have working
134 // kernel mappings. This requires that the PDPTE registers and the
135 // page dir entries don't interect, obviously.
136 ASSERT(4 * sizeof(pae_page_directory_pointer_table_entry
)
137 <= FIRST_KERNEL_PGDIR_ENT
* sizeof(page_directory_entry
));
139 // translate the page tables
140 for (uint32 i
= FIRST_KERNEL_PGDIR_ENT
;
141 i
< FIRST_KERNEL_PGDIR_ENT
+ NUM_KERNEL_PGDIR_ENTS
; i
++) {
142 if ((fPageHolePageDir
[i
] & X86_PDE_PRESENT
) != 0) {
143 // two PAE page tables per 32 bit page table
144 _TranslatePageTable((addr_t
)i
* 1024 * B_PAGE_SIZE
);
145 _TranslatePageTable(((addr_t
)i
* 1024 + 512) * B_PAGE_SIZE
);
147 // copy the page directory entry to the PDPT page
148 ((page_directory_entry
*)pdpt
)[i
] = fPageHolePageDir
[i
];
152 TRACE("free virtual slot: %#" B_PRIxADDR
", PTE: %p\n",
153 fFreeVirtualSlot
, fFreeVirtualSlotPTE
);
155 // enable PAE on all CPUs
156 call_all_cpus_sync(&_EnablePAE
, (void*)(addr_t
)physicalPDPT
);
158 // if availalbe enable NX-bit (No eXecute)
159 if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX
, FEATURE_EXT_AMD
))
160 call_all_cpus_sync(&_EnableExecutionDisable
, NULL
);
164 _physicalPDPT
= physicalPDPT
;
165 _pageStructures
= fAllocatedPages
;
166 _pageStructuresSize
= (size_t)fUsedPagesCount
* B_PAGE_SIZE
;
167 memcpy(pageDirs
, fPageDirs
, sizeof(fPageDirs
));
168 memcpy(physicalPageDirs
, fPhysicalPageDirs
, sizeof(fPhysicalPageDirs
));
170 _freeVirtualSlot
= fFreeVirtualSlot
;
171 _freeVirtualSlotPTE
= fFreeVirtualSlotPTE
;
175 static void _EnablePAE(void* physicalPDPT
, int cpu
)
177 x86_write_cr3((addr_t
)physicalPDPT
);
178 x86_write_cr4(x86_read_cr4() | IA32_CR4_PAE
| IA32_CR4_GLOBAL_PAGES
);
181 static void _EnableExecutionDisable(void* dummy
, int cpu
)
183 x86_write_msr(IA32_MSR_EFER
, x86_read_msr(IA32_MSR_EFER
)
187 void _TranslatePageTable(addr_t virtualBase
)
189 page_table_entry
* entry
= &fPageHole
[virtualBase
/ B_PAGE_SIZE
];
191 // allocate a PAE page table
192 phys_addr_t physicalTable
= 0;
193 pae_page_table_entry
* paeTable
= (pae_page_table_entry
*)_NextPage(false,
196 // enter it into the page dir
197 pae_page_directory_entry
* pageDirEntry
= PageDirEntryForAddress(
198 fPageDirs
, virtualBase
);
199 PutPageTableInPageDir(pageDirEntry
, physicalTable
,
200 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
202 pae_page_table_entry
* paeEntry
= paeTable
;
203 for (uint32 i
= 0; i
< kPAEPageTableEntryCount
;
204 i
++, entry
++, paeEntry
++) {
205 if ((*entry
& X86_PTE_PRESENT
) != 0
206 && _IsVirtualAddressAllocated(virtualBase
+ i
* B_PAGE_SIZE
)) {
207 // Note, we use the fact that the PAE flags are defined to the
209 *paeEntry
= *entry
& (X86_PTE_PRESENT
212 | X86_PTE_WRITE_THROUGH
213 | X86_PTE_CACHING_DISABLED
215 | X86_PTE_ADDRESS_MASK
);
220 if (fFreeVirtualSlot
/ kPAEPageTableRange
221 == virtualBase
/ kPAEPageTableRange
) {
222 fFreeVirtualSlotPTE
= paeTable
223 + fFreeVirtualSlot
/ B_PAGE_SIZE
% kPAEPageTableEntryCount
;
227 void _AllocateNeededPages(uint32 pagesNeeded
)
229 size_t virtualSize
= ROUNDUP(pagesNeeded
, 1024) * B_PAGE_SIZE
;
230 addr_t virtualBase
= vm_allocate_early(fKernelArgs
, virtualSize
, 0, 0,
231 kPageTableAlignment
);
232 if (virtualBase
== 0) {
233 panic("Failed to reserve virtual address space for the switch to "
237 TRACE("virtual space: %#" B_PRIxADDR
", size: %#" B_PRIxSIZE
"\n",
238 virtualBase
, virtualSize
);
240 // allocate pages for the 32 bit page tables and prepare the tables
241 uint32 oldPageTableCount
= virtualSize
/ B_PAGE_SIZE
/ 1024;
242 for (uint32 i
= 0; i
< oldPageTableCount
; i
++) {
244 phys_addr_t physicalTable
=_AllocatePage32Bit();
246 // put the page into the page dir
247 page_directory_entry
* entry
= &fPageHolePageDir
[
248 virtualBase
/ B_PAGE_SIZE
/ 1024 + i
];
249 X86PagingMethod32Bit::PutPageTableInPageDir(entry
, physicalTable
,
250 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
253 memset((void*)((addr_t
)fPageHole
254 + (virtualBase
/ B_PAGE_SIZE
/ 1024 + i
) * B_PAGE_SIZE
),
258 // We don't need a physical page for the free virtual slot.
261 // allocate and map the pages we need
262 for (uint32 i
= 0; i
< pagesNeeded
; i
++) {
264 phys_addr_t physicalAddress
=_AllocatePage32Bit();
266 // put the page into the page table
267 page_table_entry
* entry
= fPageHole
+ virtualBase
/ B_PAGE_SIZE
+ i
;
268 X86PagingMethod32Bit::PutPageTableEntryInTable(entry
,
269 physicalAddress
, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, 0,
272 // Write the page's physical address into the page itself, so we
273 // don't need to look it up later.
274 *(phys_addr_t
*)(virtualBase
+ i
* B_PAGE_SIZE
) = physicalAddress
;
277 fAllocatedPages
= (uint8
*)virtualBase
;
278 fAllocatedPagesCount
= pagesNeeded
;
281 = (addr_t
)(fAllocatedPages
+ pagesNeeded
* B_PAGE_SIZE
);
284 phys_addr_t
_AllocatePage()
286 phys_addr_t physicalAddress
287 = (phys_addr_t
)vm_allocate_early_physical_page(fKernelArgs
)
289 if (physicalAddress
== 0)
290 panic("Failed to allocate page for the switch to PAE!");
291 return physicalAddress
;
294 phys_addr_t
_AllocatePage32Bit()
296 phys_addr_t physicalAddress
= _AllocatePage();
297 if (physicalAddress
> 0xffffffff) {
298 panic("Failed to allocate 32 bit addressable page for the switch "
302 return physicalAddress
;
305 void* _NextPage(bool clearPage
, phys_addr_t
& _physicalAddress
)
307 if (fUsedPagesCount
>= fAllocatedPagesCount
) {
308 panic("X86PagingMethodPAE::ToPAESwitcher::_NextPage(): no more "
313 void* page
= fAllocatedPages
+ (fUsedPagesCount
++) * B_PAGE_SIZE
;
314 _physicalAddress
= *((phys_addr_t
*)page
);
317 memset(page
, 0, B_PAGE_SIZE
);
322 bool _IsVirtualAddressAllocated(addr_t address
) const
324 for (uint32 i
= 0; i
< fKernelArgs
->num_virtual_allocated_ranges
; i
++) {
325 addr_t start
= fKernelArgs
->virtual_allocated_range
[i
].start
;
326 addr_t end
= start
+ fKernelArgs
->virtual_allocated_range
[i
].size
;
329 if (address
<= end
- 1)
337 kernel_args
* fKernelArgs
;
338 page_table_entry
* fPageHole
;
339 page_directory_entry
* fPageHolePageDir
;
340 phys_addr_t fPhysicalPageDir
;
341 uint8
* fAllocatedPages
;
342 uint32 fAllocatedPagesCount
;
343 uint32 fUsedPagesCount
;
344 addr_t fFreeVirtualSlot
;
345 pae_page_table_entry
* fFreeVirtualSlotPTE
;
346 pae_page_directory_entry
* fPageDirs
[4];
347 phys_addr_t fPhysicalPageDirs
[4];
351 // #pragma mark - PhysicalPageSlotPool
354 struct X86PagingMethodPAE::PhysicalPageSlotPool
355 : X86LargePhysicalPageMapper::PhysicalPageSlotPool
{
357 virtual ~PhysicalPageSlotPool();
359 status_t
InitInitial(X86PagingMethodPAE
* method
,
361 status_t
InitInitialPostArea(kernel_args
* args
);
363 void Init(area_id dataArea
,
364 pae_page_table_entry
* pageTable
,
365 area_id virtualArea
, addr_t virtualBase
);
367 virtual status_t
AllocatePool(
368 X86LargePhysicalPageMapper
369 ::PhysicalPageSlotPool
*& _pool
);
370 virtual void Map(phys_addr_t physicalAddress
,
371 addr_t virtualAddress
);
374 static PhysicalPageSlotPool sInitialPhysicalPagePool
[MAX_INITIAL_POOLS
];
378 area_id fVirtualArea
;
380 pae_page_table_entry
* fPageTable
;
384 X86PagingMethodPAE::PhysicalPageSlotPool
385 X86PagingMethodPAE::PhysicalPageSlotPool::sInitialPhysicalPagePool
[
389 X86PagingMethodPAE::PhysicalPageSlotPool::~PhysicalPageSlotPool()
395 X86PagingMethodPAE::PhysicalPageSlotPool::InitInitial(
396 X86PagingMethodPAE
* method
, kernel_args
* args
)
398 // allocate a virtual address range for the pages to be mapped into
399 addr_t virtualBase
= vm_allocate_early(args
, kPAEPageTableRange
, 0, 0,
401 if (virtualBase
== 0) {
402 panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
403 "physical page pool space in virtual address space!");
407 // allocate memory for the page table and data
408 size_t areaSize
= B_PAGE_SIZE
409 + sizeof(PhysicalPageSlot
[kPAEPageTableEntryCount
]);
410 pae_page_table_entry
* pageTable
= (pae_page_table_entry
*)vm_allocate_early(
411 args
, areaSize
, ~0L, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, 0);
412 if (pageTable
== 0) {
413 panic("X86PagingMethodPAE::PhysicalPageSlotPool::InitInitial(): Failed "
414 "to allocate memory for page table!");
418 // clear the page table and put it in the page dir
419 memset(pageTable
, 0, B_PAGE_SIZE
);
421 phys_addr_t physicalTable
= 0;
422 method
->_EarlyQuery((addr_t
)pageTable
, &physicalTable
);
424 pae_page_directory_entry
* entry
= PageDirEntryForAddress(
425 method
->KernelVirtualPageDirs(), virtualBase
);
426 PutPageTableInPageDir(entry
, physicalTable
,
427 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
429 // init the pool structure and add the initial pool
430 Init(-1, pageTable
, -1, (addr_t
)virtualBase
);
437 X86PagingMethodPAE::PhysicalPageSlotPool::InitInitialPostArea(
440 // create an area for the (already allocated) data
441 size_t areaSize
= B_PAGE_SIZE
442 + sizeof(PhysicalPageSlot
[kPAEPageTableEntryCount
]);
443 void* temp
= fPageTable
;
444 area_id area
= create_area("physical page pool", &temp
,
445 B_EXACT_ADDRESS
, areaSize
, B_ALREADY_WIRED
,
446 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
448 panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
449 "create area for physical page pool.");
454 // create an area for the virtual address space
455 temp
= (void*)fVirtualBase
;
456 area
= vm_create_null_area(VMAddressSpace::KernelID(),
457 "physical page pool space", &temp
, B_EXACT_ADDRESS
,
458 kPAEPageTableRange
, 0);
460 panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
461 "create area for physical page pool space.");
471 X86PagingMethodPAE::PhysicalPageSlotPool::Init(area_id dataArea
,
472 pae_page_table_entry
* pageTable
, area_id virtualArea
, addr_t virtualBase
)
474 fDataArea
= dataArea
;
475 fVirtualArea
= virtualArea
;
476 fVirtualBase
= virtualBase
;
477 fPageTable
= pageTable
;
480 fSlots
= (PhysicalPageSlot
*)(fPageTable
+ kPAEPageTableEntryCount
);
481 addr_t slotAddress
= virtualBase
;
482 for (uint32 i
= 0; i
< kPAEPageTableEntryCount
;
483 i
++, slotAddress
+= B_PAGE_SIZE
) {
484 PhysicalPageSlot
* slot
= &fSlots
[i
];
485 slot
->next
= slot
+ 1;
487 slot
->address
= slotAddress
;
490 fSlots
[kPAEPageTableEntryCount
- 1].next
= NULL
;
496 X86PagingMethodPAE::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress
,
497 addr_t virtualAddress
)
499 pae_page_table_entry
& pte
= fPageTable
[
500 (virtualAddress
- fVirtualBase
) / B_PAGE_SIZE
];
501 pte
= (physicalAddress
& X86_PAE_PTE_ADDRESS_MASK
)
502 | X86_PAE_PTE_WRITABLE
| X86_PAE_PTE_GLOBAL
| X86_PAE_PTE_PRESENT
;
504 invalidate_TLB(virtualAddress
);
509 X86PagingMethodPAE::PhysicalPageSlotPool::AllocatePool(
510 X86LargePhysicalPageMapper::PhysicalPageSlotPool
*& _pool
)
512 // create the pool structure
513 PhysicalPageSlotPool
* pool
= new(std::nothrow
) PhysicalPageSlotPool
;
516 ObjectDeleter
<PhysicalPageSlotPool
> poolDeleter(pool
);
518 // create an area that can contain the page table and the slot
520 size_t areaSize
= B_PAGE_SIZE
521 + sizeof(PhysicalPageSlot
[kPAEPageTableEntryCount
]);
523 virtual_address_restrictions virtualRestrictions
= {};
524 virtualRestrictions
.address_specification
= B_ANY_KERNEL_ADDRESS
;
525 physical_address_restrictions physicalRestrictions
= {};
526 area_id dataArea
= create_area_etc(B_SYSTEM_TEAM
, "physical page pool",
527 PAGE_ALIGN(areaSize
), B_FULL_LOCK
,
528 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, CREATE_AREA_DONT_WAIT
, 0,
529 &virtualRestrictions
, &physicalRestrictions
, &data
);
533 // create the null area for the virtual address space
535 area_id virtualArea
= vm_create_null_area(
536 VMAddressSpace::KernelID(), "physical page pool space",
537 &virtualBase
, B_ANY_KERNEL_BLOCK_ADDRESS
, kPAEPageTableRange
,
538 CREATE_AREA_PRIORITY_VIP
);
539 if (virtualArea
< 0) {
540 delete_area(dataArea
);
544 // prepare the page table
545 memset(data
, 0, B_PAGE_SIZE
);
547 // get the page table's physical address
548 phys_addr_t physicalTable
;
549 X86VMTranslationMapPAE
* map
= static_cast<X86VMTranslationMapPAE
*>(
550 VMAddressSpace::Kernel()->TranslationMap());
552 cpu_status state
= disable_interrupts();
553 map
->QueryInterrupt((addr_t
)data
, &physicalTable
, &dummyFlags
);
554 restore_interrupts(state
);
556 // put the page table into the page directory
557 pae_page_directory_entry
* pageDirEntry
558 = X86PagingMethodPAE::PageDirEntryForAddress(
559 map
->PagingStructuresPAE()->VirtualPageDirs(), (addr_t
)virtualBase
);
560 PutPageTableInPageDir(pageDirEntry
, physicalTable
,
561 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
563 // init the pool structure
564 pool
->Init(dataArea
, (pae_page_table_entry
*)data
, virtualArea
,
565 (addr_t
)virtualBase
);
566 poolDeleter
.Detach();
572 // #pragma mark - X86PagingMethodPAE
575 X86PagingMethodPAE::X86PagingMethodPAE()
577 fPhysicalPageMapper(NULL
),
578 fKernelPhysicalPageMapper(NULL
),
582 mutex_init(&fFreePagesLock
, "x86 PAE free pages");
586 X86PagingMethodPAE::~X86PagingMethodPAE()
592 X86PagingMethodPAE::Init(kernel_args
* args
,
593 VMPhysicalPageMapper
** _physicalPageMapper
)
596 ToPAESwitcher(args
).Switch(fKernelVirtualPageDirPointerTable
,
597 fKernelPhysicalPageDirPointerTable
, fEarlyPageStructures
,
598 fEarlyPageStructuresSize
, fKernelVirtualPageDirs
,
599 fKernelPhysicalPageDirs
, fFreeVirtualSlot
, fFreeVirtualSlotPTE
);
601 // create the initial pools for the physical page mapper
602 int32 poolCount
= _GetInitialPoolCount();
603 PhysicalPageSlotPool
* pool
= PhysicalPageSlotPool::sInitialPhysicalPagePool
;
605 for (int32 i
= 0; i
< poolCount
; i
++) {
606 new(&pool
[i
]) PhysicalPageSlotPool
;
607 status_t error
= pool
[i
].InitInitial(this, args
);
609 panic("X86PagingMethodPAE::Init(): Failed to create initial pool "
610 "for physical page mapper!");
615 // create physical page mapper
616 large_memory_physical_page_ops_init(args
, pool
, poolCount
, sizeof(*pool
),
617 fPhysicalPageMapper
, fKernelPhysicalPageMapper
);
619 *_physicalPageMapper
= fPhysicalPageMapper
;
625 X86PagingMethodPAE::InitPostArea(kernel_args
* args
)
627 // wrap the kernel paging structures in an area
628 area_id area
= create_area("kernel paging structs", &fEarlyPageStructures
,
629 B_EXACT_ADDRESS
, fEarlyPageStructuresSize
, B_ALREADY_WIRED
,
630 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
634 // let the initial page pools create areas for its structures
635 int32 poolCount
= _GetInitialPoolCount();
636 for (int32 i
= 0; i
< poolCount
; i
++) {
637 status_t error
= PhysicalPageSlotPool::sInitialPhysicalPagePool
[i
]
638 .InitInitialPostArea(args
);
643 // The early physical page mapping mechanism is no longer needed. Unmap the
645 *fFreeVirtualSlotPTE
= 0;
646 invalidate_TLB(fFreeVirtualSlot
);
648 fFreeVirtualSlotPTE
= NULL
;
649 fFreeVirtualSlot
= 0;
656 X86PagingMethodPAE::CreateTranslationMap(bool kernel
, VMTranslationMap
** _map
)
658 X86VMTranslationMapPAE
* map
= new(std::nothrow
) X86VMTranslationMapPAE
;
662 status_t error
= map
->Init(kernel
);
674 X86PagingMethodPAE::MapEarly(kernel_args
* args
, addr_t virtualAddress
,
675 phys_addr_t physicalAddress
, uint8 attributes
,
676 page_num_t (*get_free_page
)(kernel_args
*))
678 // check to see if a page table exists for this range
679 pae_page_directory_entry
* pageDirEntry
= PageDirEntryForAddress(
680 fKernelVirtualPageDirs
, virtualAddress
);
681 pae_page_table_entry
* pageTable
;
682 if ((*pageDirEntry
& X86_PAE_PDE_PRESENT
) == 0) {
683 // we need to allocate a page table
684 phys_addr_t physicalPageTable
= get_free_page(args
) * B_PAGE_SIZE
;
686 TRACE("X86PagingMethodPAE::MapEarly(): asked for free page for "
687 "page table: %#" B_PRIxPHYSADDR
"\n", physicalPageTable
);
689 // put it in the page dir
690 PutPageTableInPageDir(pageDirEntry
, physicalPageTable
, attributes
);
693 pageTable
= _EarlyGetPageTable(physicalPageTable
);
694 memset(pageTable
, 0, B_PAGE_SIZE
);
696 // table already exists -- map it
697 pageTable
= _EarlyGetPageTable(
698 *pageDirEntry
& X86_PAE_PDE_ADDRESS_MASK
);
701 pae_page_table_entry
* entry
= pageTable
702 + virtualAddress
/ B_PAGE_SIZE
% kPAEPageTableEntryCount
;
705 (*entry
& X86_PAE_PTE_PRESENT
) == 0,
706 "virtual address: %#" B_PRIxADDR
", pde: %#" B_PRIx64
707 ", existing pte: %#" B_PRIx64
, virtualAddress
, *pageDirEntry
, *entry
);
709 // now, fill in the pentry
710 PutPageTableEntryInTable(entry
, physicalAddress
, attributes
, 0,
711 IS_KERNEL_ADDRESS(virtualAddress
));
718 X86PagingMethodPAE::IsKernelPageAccessible(addr_t virtualAddress
,
721 // we can't check much without the physical page mapper
722 if (fPhysicalPageMapper
== NULL
)
725 // We only trust the kernel team's page directories. So switch to the
726 // kernel PDPT first. Always set it to make sure the TLBs don't contain
728 uint32 physicalPDPT
= x86_read_cr3();
729 x86_write_cr3(fKernelPhysicalPageDirPointerTable
);
731 // get the PDPT entry for the address
732 pae_page_directory_pointer_table_entry pdptEntry
= 0;
733 if (physicalPDPT
== fKernelPhysicalPageDirPointerTable
) {
734 pdptEntry
= fKernelVirtualPageDirPointerTable
[
735 virtualAddress
/ kPAEPageDirRange
];
737 // map the original PDPT and get the entry
740 status_t error
= fPhysicalPageMapper
->GetPageDebug(physicalPDPT
,
741 &virtualPDPT
, &handle
);
743 pdptEntry
= ((pae_page_directory_pointer_table_entry
*)
744 virtualPDPT
)[virtualAddress
/ kPAEPageDirRange
];
745 fPhysicalPageMapper
->PutPageDebug(virtualPDPT
, handle
);
749 // map the page dir and get the entry
750 pae_page_directory_entry pageDirEntry
= 0;
751 if ((pdptEntry
& X86_PAE_PDPTE_PRESENT
) != 0) {
753 addr_t virtualPageDir
;
754 status_t error
= fPhysicalPageMapper
->GetPageDebug(
755 pdptEntry
& X86_PAE_PDPTE_ADDRESS_MASK
, &virtualPageDir
, &handle
);
757 pageDirEntry
= ((pae_page_directory_entry
*)virtualPageDir
)[
758 virtualAddress
/ kPAEPageTableRange
% kPAEPageDirEntryCount
];
759 fPhysicalPageMapper
->PutPageDebug(virtualPageDir
, handle
);
763 // map the page table and get the entry
764 pae_page_table_entry pageTableEntry
= 0;
765 if ((pageDirEntry
& X86_PAE_PDE_PRESENT
) != 0) {
767 addr_t virtualPageTable
;
768 status_t error
= fPhysicalPageMapper
->GetPageDebug(
769 pageDirEntry
& X86_PAE_PDE_ADDRESS_MASK
, &virtualPageTable
,
772 pageTableEntry
= ((pae_page_table_entry
*)virtualPageTable
)[
773 virtualAddress
/ B_PAGE_SIZE
% kPAEPageTableEntryCount
];
774 fPhysicalPageMapper
->PutPageDebug(virtualPageTable
, handle
);
778 // switch back to the original page directory
779 if (physicalPDPT
!= fKernelPhysicalPageDirPointerTable
)
780 x86_write_cr3(physicalPDPT
);
782 if ((pageTableEntry
& X86_PAE_PTE_PRESENT
) == 0)
785 // present means kernel-readable, so check for writable
786 return (protection
& B_KERNEL_WRITE_AREA
) == 0
787 || (pageTableEntry
& X86_PAE_PTE_WRITABLE
) != 0;
792 X86PagingMethodPAE::PutPageTableInPageDir(pae_page_directory_entry
* entry
,
793 phys_addr_t physicalTable
, uint32 attributes
)
795 *entry
= (physicalTable
& X86_PAE_PDE_ADDRESS_MASK
)
796 | X86_PAE_PDE_PRESENT
797 | X86_PAE_PDE_WRITABLE
799 // TODO: We ignore the attributes of the page table -- for compatibility
800 // with BeOS we allow having user accessible areas in the kernel address
801 // space. This is currently being used by some drivers, mainly for the
802 // frame buffer. Our current real time data implementation makes use of
804 // We might want to get rid of this possibility one day, especially if
805 // we intend to port it to a platform that does not support this.
810 X86PagingMethodPAE::PutPageTableEntryInTable(pae_page_table_entry
* entry
,
811 phys_addr_t physicalAddress
, uint32 attributes
, uint32 memoryType
,
814 pae_page_table_entry page
= (physicalAddress
& X86_PAE_PTE_ADDRESS_MASK
)
815 | X86_PAE_PTE_PRESENT
| (globalPage
? X86_PAE_PTE_GLOBAL
: 0)
816 | MemoryTypeToPageTableEntryFlags(memoryType
);
818 // if the page is user accessible, it's automatically
819 // accessible in kernel space, too (but with the same
821 if ((attributes
& B_USER_PROTECTION
) != 0) {
822 page
|= X86_PAE_PTE_USER
;
823 if ((attributes
& B_WRITE_AREA
) != 0)
824 page
|= X86_PAE_PTE_WRITABLE
;
825 if ((attributes
& B_EXECUTE_AREA
) == 0
826 && x86_check_feature(IA32_FEATURE_AMD_EXT_NX
, FEATURE_EXT_AMD
)) {
827 page
|= X86_PAE_PTE_NOT_EXECUTABLE
;
829 } else if ((attributes
& B_KERNEL_WRITE_AREA
) != 0)
830 page
|= X86_PAE_PTE_WRITABLE
;
832 // put it in the page table
833 *(volatile pae_page_table_entry
*)entry
= page
;
838 X86PagingMethodPAE::Allocate32BitPage(phys_addr_t
& _physicalAddress
,
842 MutexLocker
locker(fFreePagesLock
);
844 if (fFreePages
!= NULL
) {
846 fFreePages
= page
->cache_next
;
850 // no pages -- allocate one
853 physical_address_restrictions restrictions
= {};
854 restrictions
.high_address
= 0x100000000LL
;
855 page
= vm_page_allocate_page_run(PAGE_STATE_UNUSED
, 1, &restrictions
,
860 DEBUG_PAGE_ACCESS_END(page
);
864 phys_addr_t physicalAddress
865 = (phys_addr_t
)page
->physical_page_number
* B_PAGE_SIZE
;
866 addr_t virtualAddress
;
867 if (fPhysicalPageMapper
->GetPage(physicalAddress
, &virtualAddress
, &_handle
)
869 // mapping failed -- free page
871 page
->cache_next
= fFreePages
;
877 _physicalAddress
= physicalAddress
;
878 return (void*)virtualAddress
;
883 X86PagingMethodPAE::Free32BitPage(void* address
, phys_addr_t physicalAddress
,
887 fPhysicalPageMapper
->PutPage((addr_t
)address
, handle
);
890 vm_page
* page
= vm_lookup_page(physicalAddress
/ B_PAGE_SIZE
);
891 MutexLocker
locker(fFreePagesLock
);
892 if (fFreePagesCount
< kMaxFree32BitPagesCount
) {
893 // cache not full yet -- cache it
894 page
->cache_next
= fFreePages
;
898 // cache full -- free it
900 DEBUG_PAGE_ACCESS_START(page
);
901 vm_page_free(NULL
, page
);
907 X86PagingMethodPAE::_GetInitialPoolCount()
909 int32 requiredSlots
= smp_get_num_cpus() * TOTAL_SLOTS_PER_CPU
911 return (requiredSlots
+ kPAEPageTableEntryCount
- 1)
912 / kPAEPageTableEntryCount
;
917 X86PagingMethodPAE::_EarlyQuery(addr_t virtualAddress
,
918 phys_addr_t
* _physicalAddress
)
920 pae_page_directory_entry
* pageDirEntry
= PageDirEntryForAddress(
921 fKernelVirtualPageDirs
, virtualAddress
);
922 if ((*pageDirEntry
& X86_PAE_PDE_PRESENT
) == 0) {
927 pae_page_table_entry
* entry
= _EarlyGetPageTable(
928 *pageDirEntry
& X86_PAE_PDE_ADDRESS_MASK
)
929 + virtualAddress
/ B_PAGE_SIZE
% kPAEPageTableEntryCount
;
930 if ((*entry
& X86_PAE_PTE_PRESENT
) == 0) {
931 // page mapping not valid
935 *_physicalAddress
= *entry
& X86_PAE_PTE_ADDRESS_MASK
;
940 pae_page_table_entry
*
941 X86PagingMethodPAE::_EarlyGetPageTable(phys_addr_t address
)
943 *fFreeVirtualSlotPTE
= (address
& X86_PAE_PTE_ADDRESS_MASK
)
944 | X86_PAE_PTE_PRESENT
| X86_PAE_PTE_WRITABLE
| X86_PAE_PTE_GLOBAL
;
946 invalidate_TLB(fFreeVirtualSlot
);
948 return (pae_page_table_entry
*)fFreeVirtualSlot
;
952 #endif // B_HAIKU_PHYSICAL_BITS == 64