headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / arch / x86 / paging / pae / X86VMTranslationMapPAE.cpp
blobef6c54dd4864b657eb6b5ae8e577e592bebe0798
1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
11 #include "paging/pae/X86VMTranslationMapPAE.h"
13 #include <int.h>
14 #include <slab/Slab.h>
15 #include <thread.h>
16 #include <tracing.h>
17 #include <util/AutoLock.h>
18 #include <vm/vm_page.h>
19 #include <vm/VMAddressSpace.h>
20 #include <vm/VMCache.h>
22 #include "paging/pae/X86PagingMethodPAE.h"
23 #include "paging/pae/X86PagingStructuresPAE.h"
24 #include "paging/x86_physical_page_mapper.h"
27 //#define TRACE_X86_VM_TRANSLATION_MAP_PAE
28 #ifdef TRACE_X86_VM_TRANSLATION_MAP_PAE
29 # define TRACE(x...) dprintf(x)
30 #else
31 # define TRACE(x...) ;
32 #endif
35 #if B_HAIKU_PHYSICAL_BITS == 64
38 #if TRANSLATION_MAP_TRACING
41 namespace TranslationMapTracing {
44 class TranslationMapTraceEntryBase
45 : public TRACE_ENTRY_SELECTOR(TRANSLATION_MAP_TRACING_STACK_TRACE) {
46 public:
47 TranslationMapTraceEntryBase()
49 TraceEntryBase(TRANSLATION_MAP_TRACING_STACK_TRACE, 0, true)
53 void PrintPageTableEntry(TraceOutput& out, pae_page_table_entry entry)
55 out.Print("%#" B_PRIx64 " %c%c%c%c%c %s %s %c%c",
56 entry & X86_PAE_PTE_ADDRESS_MASK,
57 (entry & X86_PAE_PTE_PRESENT) != 0 ? 'P' : '-',
58 (entry & X86_PAE_PTE_WRITABLE) != 0 ? 'W' : '-',
59 (entry & X86_PAE_PTE_USER) != 0 ? 'U' : '-',
60 (entry & X86_PAE_PTE_NOT_EXECUTABLE) != 0 ? '-' : 'X',
61 (entry & X86_PAE_PTE_GLOBAL) != 0 ? 'G' : '-',
62 (entry & X86_PAE_PTE_WRITE_THROUGH) != 0 ? "WT" : "--",
63 (entry & X86_PAE_PTE_CACHING_DISABLED) != 0 ? "UC" : "--",
64 (entry & X86_PAE_PTE_ACCESSED) != 0 ? 'A' : '-',
65 (entry & X86_PAE_PTE_DIRTY) != 0 ? 'D' : '-');
70 class Map : public TranslationMapTraceEntryBase {
71 public:
72 Map(X86VMTranslationMapPAE* map, addr_t virtualAddress,
73 pae_page_table_entry entry)
75 TranslationMapTraceEntryBase(),
76 fMap(map),
77 fVirtualAddress(virtualAddress),
78 fEntry(entry)
80 Initialized();
83 virtual void AddDump(TraceOutput& out)
85 out.Print("translation map map: %p: %#" B_PRIxADDR " -> ", fMap,
86 fVirtualAddress);
87 PrintPageTableEntry(out, fEntry);
90 private:
91 X86VMTranslationMapPAE* fMap;
92 addr_t fVirtualAddress;
93 pae_page_table_entry fEntry;
97 class Unmap : public TranslationMapTraceEntryBase {
98 public:
99 Unmap(X86VMTranslationMapPAE* map, addr_t virtualAddress,
100 pae_page_table_entry entry)
102 TranslationMapTraceEntryBase(),
103 fMap(map),
104 fVirtualAddress(virtualAddress),
105 fEntry(entry)
107 Initialized();
110 virtual void AddDump(TraceOutput& out)
112 out.Print("translation map unmap: %p: %#" B_PRIxADDR
113 " -> ", fMap, fVirtualAddress);
114 PrintPageTableEntry(out, fEntry);
117 private:
118 X86VMTranslationMapPAE* fMap;
119 addr_t fVirtualAddress;
120 pae_page_table_entry fEntry;
124 class Protect : public TranslationMapTraceEntryBase {
125 public:
126 Protect(X86VMTranslationMapPAE* map, addr_t virtualAddress,
127 pae_page_table_entry oldEntry, pae_page_table_entry newEntry)
129 TranslationMapTraceEntryBase(),
130 fMap(map),
131 fVirtualAddress(virtualAddress),
132 fOldEntry(oldEntry),
133 fNewEntry(newEntry)
135 Initialized();
138 virtual void AddDump(TraceOutput& out)
140 out.Print("translation map protect: %p: %#" B_PRIxADDR
141 " -> ", fMap, fVirtualAddress);
142 PrintPageTableEntry(out, fNewEntry);
143 out.Print(" (%c%c%c)",
144 (fOldEntry & X86_PAE_PTE_WRITABLE) != 0 ? 'W' : '-',
145 (fOldEntry & X86_PAE_PTE_USER) != 0 ? 'U' : '-',
146 (fOldEntry & X86_PAE_PTE_NOT_EXECUTABLE) != 0 ? '-' : 'X');
149 private:
150 X86VMTranslationMapPAE* fMap;
151 addr_t fVirtualAddress;
152 pae_page_table_entry fOldEntry;
153 pae_page_table_entry fNewEntry;
157 class ClearFlags : public TranslationMapTraceEntryBase {
158 public:
159 ClearFlags(X86VMTranslationMapPAE* map, addr_t virtualAddress,
160 pae_page_table_entry oldEntry, pae_page_table_entry flagsCleared)
162 TranslationMapTraceEntryBase(),
163 fMap(map),
164 fVirtualAddress(virtualAddress),
165 fOldEntry(oldEntry),
166 fFlagsCleared(flagsCleared)
168 Initialized();
171 virtual void AddDump(TraceOutput& out)
173 out.Print("translation map clear flags: %p: %#" B_PRIxADDR
174 " -> ", fMap, fVirtualAddress);
175 PrintPageTableEntry(out, fOldEntry & ~fFlagsCleared);
176 out.Print(", cleared %c%c (%c%c)",
177 (fOldEntry & fFlagsCleared & X86_PAE_PTE_ACCESSED) != 0 ? 'A' : '-',
178 (fOldEntry & fFlagsCleared & X86_PAE_PTE_DIRTY) != 0 ? 'D' : '-',
179 (fFlagsCleared & X86_PAE_PTE_ACCESSED) != 0 ? 'A' : '-',
180 (fFlagsCleared & X86_PAE_PTE_DIRTY) != 0 ? 'D' : '-');
183 private:
184 X86VMTranslationMapPAE* fMap;
185 addr_t fVirtualAddress;
186 pae_page_table_entry fOldEntry;
187 pae_page_table_entry fFlagsCleared;
191 class ClearFlagsUnmap : public TranslationMapTraceEntryBase {
192 public:
193 ClearFlagsUnmap(X86VMTranslationMapPAE* map, addr_t virtualAddress,
194 pae_page_table_entry entry)
196 TranslationMapTraceEntryBase(),
197 fMap(map),
198 fVirtualAddress(virtualAddress),
199 fEntry(entry)
201 Initialized();
204 virtual void AddDump(TraceOutput& out)
206 out.Print("translation map clear flags unmap: %p: %#" B_PRIxADDR
207 " -> ", fMap, fVirtualAddress);
208 PrintPageTableEntry(out, fEntry);
211 private:
212 X86VMTranslationMapPAE* fMap;
213 addr_t fVirtualAddress;
214 pae_page_table_entry fEntry;
218 } // namespace TranslationMapTracing
220 # define T(x) new(std::nothrow) TranslationMapTracing::x
222 #else
223 # define T(x)
224 #endif // TRANSLATION_MAP_TRACING
228 X86VMTranslationMapPAE::X86VMTranslationMapPAE()
230 fPagingStructures(NULL)
235 X86VMTranslationMapPAE::~X86VMTranslationMapPAE()
237 if (fPagingStructures == NULL)
238 return;
240 if (fPageMapper != NULL)
241 fPageMapper->Delete();
243 // cycle through and free all of the user space page tables
245 STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
246 // assuming 1-1 split of the address space
248 for (uint32 k = 0; k < 2; k++) {
249 pae_page_directory_entry* pageDir
250 = fPagingStructures->VirtualPageDirs()[k];
251 if (pageDir == NULL)
252 continue;
254 for (uint32 i = 0; i < kPAEPageDirEntryCount; i++) {
255 if ((pageDir[i] & X86_PAE_PDE_PRESENT) != 0) {
256 phys_addr_t address = pageDir[i] & X86_PAE_PDE_ADDRESS_MASK;
257 vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
258 if (page == NULL)
259 panic("X86VMTranslationMapPAE::~X86VMTranslationMapPAE: "
260 "didn't find page table page: page address: %#"
261 B_PRIxPHYSADDR ", virtual base: %#" B_PRIxADDR "\n",
262 address,
263 (k * kPAEPageDirEntryCount + i) * kPAEPageTableRange);
264 DEBUG_PAGE_ACCESS_START(page);
265 vm_page_set_state(page, PAGE_STATE_FREE);
270 fPagingStructures->RemoveReference();
274 status_t
275 X86VMTranslationMapPAE::Init(bool kernel)
277 TRACE("X86VMTranslationMapPAE::Init()\n");
279 X86VMTranslationMap::Init(kernel);
281 fPagingStructures = new(std::nothrow) X86PagingStructuresPAE;
282 if (fPagingStructures == NULL)
283 return B_NO_MEMORY;
285 X86PagingMethodPAE* method = X86PagingMethodPAE::Method();
287 if (kernel) {
288 // kernel
289 // get the physical page mapper
290 fPageMapper = method->KernelPhysicalPageMapper();
292 // we already know the kernel pgdir mapping
293 fPagingStructures->Init(method->KernelVirtualPageDirPointerTable(),
294 method->KernelPhysicalPageDirPointerTable(), NULL,
295 method->KernelVirtualPageDirs(), method->KernelPhysicalPageDirs());
296 } else {
297 // user
298 // allocate a physical page mapper
299 status_t error = method->PhysicalPageMapper()
300 ->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
301 if (error != B_OK)
302 return error;
304 // The following code assumes that the kernel address space occupies the
305 // upper half of the virtual address space. This simplifies things a
306 // lot, since it allows us to just use the upper two page directories
307 // of the kernel and create two new lower page directories for the
308 // userland.
309 STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
311 // allocate the page directories (both at once)
312 pae_page_directory_entry* virtualPageDirs[4];
313 phys_addr_t physicalPageDirs[4];
314 virtualPageDirs[0] = (pae_page_directory_entry*)memalign(B_PAGE_SIZE,
315 2 * B_PAGE_SIZE);
316 if (virtualPageDirs[0] == NULL)
317 return B_NO_MEMORY;
318 virtualPageDirs[1] = virtualPageDirs[0] + kPAEPageTableEntryCount;
320 // clear the userland page directories
321 memset(virtualPageDirs[0], 0, 2 * B_PAGE_SIZE);
323 // use the upper two kernel page directories
324 for (int32 i = 2; i < 4; i++) {
325 virtualPageDirs[i] = method->KernelVirtualPageDirs()[i];
326 physicalPageDirs[i] = method->KernelPhysicalPageDirs()[i];
329 // look up the page directories' physical addresses
330 for (int32 i = 0; i < 2; i++) {
331 vm_get_page_mapping(VMAddressSpace::KernelID(),
332 (addr_t)virtualPageDirs[i], &physicalPageDirs[i]);
335 // allocate the PDPT -- needs to have a 32 bit physical address
336 phys_addr_t physicalPDPT;
337 void* pdptHandle;
338 pae_page_directory_pointer_table_entry* pdpt
339 = (pae_page_directory_pointer_table_entry*)
340 method->Allocate32BitPage(physicalPDPT, pdptHandle);
341 if (pdpt == NULL) {
342 free(virtualPageDirs[0]);
343 return B_NO_MEMORY;
346 // init the PDPT entries
347 for (int32 i = 0; i < 4; i++) {
348 pdpt[i] = (physicalPageDirs[i] & X86_PAE_PDPTE_ADDRESS_MASK)
349 | X86_PAE_PDPTE_PRESENT;
352 // init the paging structures
353 fPagingStructures->Init(pdpt, physicalPDPT, pdptHandle, virtualPageDirs,
354 physicalPageDirs);
357 return B_OK;
361 size_t
362 X86VMTranslationMapPAE::MaxPagesNeededToMap(addr_t start, addr_t end) const
364 // If start == 0, the actual base address is not yet known to the caller and
365 // we shall assume the worst case.
366 if (start == 0) {
367 // offset the range so it has the worst possible alignment
368 start = kPAEPageTableRange - B_PAGE_SIZE;
369 end += kPAEPageTableRange - B_PAGE_SIZE;
372 return end / kPAEPageTableRange + 1 - start / kPAEPageTableRange;
376 status_t
377 X86VMTranslationMapPAE::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
378 uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
380 TRACE("X86VMTranslationMapPAE::Map(): %#" B_PRIxADDR " -> %#" B_PRIxPHYSADDR
381 "\n", virtualAddress, physicalAddress);
383 // check to see if a page table exists for this range
384 pae_page_directory_entry* pageDirEntry
385 = X86PagingMethodPAE::PageDirEntryForAddress(
386 fPagingStructures->VirtualPageDirs(), virtualAddress);
387 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
388 // we need to allocate a page table
389 vm_page *page = vm_page_allocate_page(reservation,
390 PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
392 DEBUG_PAGE_ACCESS_END(page);
394 phys_addr_t physicalPageTable
395 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
397 TRACE("X86VMTranslationMapPAE::Map(): asked for free page for "
398 "page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable);
400 // put it in the page dir
401 X86PagingMethodPAE::PutPageTableInPageDir(pageDirEntry,
402 physicalPageTable,
403 attributes
404 | ((attributes & B_USER_PROTECTION) != 0
405 ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
407 fMapCount++;
410 // now, fill in the page table entry
411 Thread* thread = thread_get_current_thread();
412 ThreadCPUPinner pinner(thread);
414 pae_page_table_entry* pageTable
415 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
416 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
417 pae_page_table_entry* entry = pageTable
418 + virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
420 ASSERT_PRINT((*entry & X86_PAE_PTE_PRESENT) == 0,
421 "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64 " @ %p",
422 virtualAddress, *entry, entry);
424 X86PagingMethodPAE::PutPageTableEntryInTable(entry, physicalAddress,
425 attributes, memoryType, fIsKernelMap);
427 T(Map(this, virtualAddress, *entry));
429 pinner.Unlock();
431 // Note: We don't need to invalidate the TLB for this address, as previously
432 // the entry was not present and the TLB doesn't cache those entries.
434 fMapCount++;
436 return 0;
440 status_t
441 X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
443 start = ROUNDDOWN(start, B_PAGE_SIZE);
444 if (start >= end)
445 return B_OK;
447 TRACE("X86VMTranslationMapPAE::Unmap(): %#" B_PRIxADDR " - %#" B_PRIxADDR
448 "\n", start, end);
450 do {
451 pae_page_directory_entry* pageDirEntry
452 = X86PagingMethodPAE::PageDirEntryForAddress(
453 fPagingStructures->VirtualPageDirs(), start);
454 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
455 // no page table here, move the start up to access the next page
456 // table
457 start = ROUNDUP(start + 1, kPAEPageTableRange);
458 continue;
461 Thread* thread = thread_get_current_thread();
462 ThreadCPUPinner pinner(thread);
464 pae_page_table_entry* pageTable
465 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
466 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
468 uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
469 for (; index < kPAEPageTableEntryCount && start < end;
470 index++, start += B_PAGE_SIZE) {
471 if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
472 // page mapping not valid
473 continue;
476 TRACE("X86VMTranslationMapPAE::Unmap(): removing page %#"
477 B_PRIxADDR "\n", start);
479 pae_page_table_entry oldEntry
480 = X86PagingMethodPAE::ClearPageTableEntryFlags(
481 &pageTable[index], X86_PAE_PTE_PRESENT);
483 T(Unmap(this, start, oldEntry));
485 fMapCount--;
487 if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
488 // Note, that we only need to invalidate the address, if the
489 // accessed flags was set, since only then the entry could have
490 // been in any TLB.
491 InvalidatePage(start);
494 } while (start != 0 && start < end);
496 return B_OK;
500 status_t
501 X86VMTranslationMapPAE::DebugMarkRangePresent(addr_t start, addr_t end,
502 bool markPresent)
504 start = ROUNDDOWN(start, B_PAGE_SIZE);
505 if (start >= end)
506 return B_OK;
508 do {
509 pae_page_directory_entry* pageDirEntry
510 = X86PagingMethodPAE::PageDirEntryForAddress(
511 fPagingStructures->VirtualPageDirs(), start);
512 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
513 // no page table here, move the start up to access the next page
514 // table
515 start = ROUNDUP(start + 1, kPAEPageTableRange);
516 continue;
519 Thread* thread = thread_get_current_thread();
520 ThreadCPUPinner pinner(thread);
522 pae_page_table_entry* pageTable
523 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
524 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
526 uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
527 for (; index < kPAEPageTableEntryCount && start < end;
528 index++, start += B_PAGE_SIZE) {
530 if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
531 if (!markPresent)
532 continue;
534 X86PagingMethodPAE::SetPageTableEntryFlags(
535 &pageTable[index], X86_PAE_PTE_PRESENT);
536 } else {
537 if (markPresent)
538 continue;
540 pae_page_table_entry oldEntry
541 = X86PagingMethodPAE::ClearPageTableEntryFlags(
542 &pageTable[index], X86_PAE_PTE_PRESENT);
544 if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
545 // Note, that we only need to invalidate the address, if the
546 // accessed flags was set, since only then the entry could
547 // have been in any TLB.
548 InvalidatePage(start);
552 } while (start != 0 && start < end);
554 return B_OK;
558 /*! Caller must have locked the cache of the page to be unmapped.
559 This object shouldn't be locked.
561 status_t
562 X86VMTranslationMapPAE::UnmapPage(VMArea* area, addr_t address,
563 bool updatePageQueue)
565 ASSERT(address % B_PAGE_SIZE == 0);
567 pae_page_directory_entry* pageDirEntry
568 = X86PagingMethodPAE::PageDirEntryForAddress(
569 fPagingStructures->VirtualPageDirs(), address);
571 TRACE("X86VMTranslationMapPAE::UnmapPage(%#" B_PRIxADDR ")\n", address);
573 RecursiveLocker locker(fLock);
575 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
576 return B_ENTRY_NOT_FOUND;
578 ThreadCPUPinner pinner(thread_get_current_thread());
580 pae_page_table_entry* pageTable
581 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
582 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
584 pae_page_table_entry oldEntry = X86PagingMethodPAE::ClearPageTableEntry(
585 &pageTable[address / B_PAGE_SIZE % kPAEPageTableEntryCount]);
587 T(Unmap(this, address, oldEntry));
589 pinner.Unlock();
591 if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
592 // page mapping not valid
593 return B_ENTRY_NOT_FOUND;
596 fMapCount--;
598 if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
599 // Note, that we only need to invalidate the address, if the
600 // accessed flags was set, since only then the entry could have been
601 // in any TLB.
602 InvalidatePage(address);
604 Flush();
606 // NOTE: Between clearing the page table entry and Flush() other
607 // processors (actually even this processor with another thread of the
608 // same team) could still access the page in question via their cached
609 // entry. We can obviously lose a modified flag in this case, with the
610 // effect that the page looks unmodified (and might thus be recycled),
611 // but is actually modified.
612 // In most cases this is harmless, but for vm_remove_all_page_mappings()
613 // this is actually a problem.
614 // Interestingly FreeBSD seems to ignore this problem as well
615 // (cf. pmap_remove_all()), unless I've missed something.
618 locker.Detach();
619 // PageUnmapped() will unlock for us
621 PageUnmapped(area, (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
622 (oldEntry & X86_PAE_PTE_ACCESSED) != 0,
623 (oldEntry & X86_PAE_PTE_DIRTY) != 0, updatePageQueue);
625 return B_OK;
629 void
630 X86VMTranslationMapPAE::UnmapPages(VMArea* area, addr_t base, size_t size,
631 bool updatePageQueue)
633 if (size == 0)
634 return;
636 addr_t start = base;
637 addr_t end = base + size - 1;
639 TRACE("X86VMTranslationMapPAE::UnmapPages(%p, %#" B_PRIxADDR ", %#"
640 B_PRIxADDR ")\n", area, start, end);
642 VMAreaMappings queue;
644 RecursiveLocker locker(fLock);
646 do {
647 pae_page_directory_entry* pageDirEntry
648 = X86PagingMethodPAE::PageDirEntryForAddress(
649 fPagingStructures->VirtualPageDirs(), start);
650 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
651 // no page table here, move the start up to access the next page
652 // table
653 start = ROUNDUP(start + 1, kPAEPageTableRange);
654 continue;
657 Thread* thread = thread_get_current_thread();
658 ThreadCPUPinner pinner(thread);
660 pae_page_table_entry* pageTable
661 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
662 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
664 uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
665 for (; index < kPAEPageTableEntryCount && start < end;
666 index++, start += B_PAGE_SIZE) {
667 pae_page_table_entry oldEntry
668 = X86PagingMethodPAE::ClearPageTableEntry(&pageTable[index]);
669 if ((oldEntry & X86_PAE_PTE_PRESENT) == 0)
670 continue;
672 T(Unmap(this, start, oldEntry));
674 fMapCount--;
676 if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
677 // Note, that we only need to invalidate the address, if the
678 // accessed flags was set, since only then the entry could have
679 // been in any TLB.
680 InvalidatePage(start);
683 if (area->cache_type != CACHE_TYPE_DEVICE) {
684 // get the page
685 vm_page* page = vm_lookup_page(
686 (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
687 ASSERT(page != NULL);
689 DEBUG_PAGE_ACCESS_START(page);
691 // transfer the accessed/dirty flags to the page
692 if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0)
693 page->accessed = true;
694 if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
695 page->modified = true;
697 // remove the mapping object/decrement the wired_count of the
698 // page
699 if (area->wiring == B_NO_LOCK) {
700 vm_page_mapping* mapping = NULL;
701 vm_page_mappings::Iterator iterator
702 = page->mappings.GetIterator();
703 while ((mapping = iterator.Next()) != NULL) {
704 if (mapping->area == area)
705 break;
708 ASSERT(mapping != NULL);
710 area->mappings.Remove(mapping);
711 page->mappings.Remove(mapping);
712 queue.Add(mapping);
713 } else
714 page->DecrementWiredCount();
716 if (!page->IsMapped()) {
717 atomic_add(&gMappedPagesCount, -1);
719 if (updatePageQueue) {
720 if (page->Cache()->temporary)
721 vm_page_set_state(page, PAGE_STATE_INACTIVE);
722 else if (page->modified)
723 vm_page_set_state(page, PAGE_STATE_MODIFIED);
724 else
725 vm_page_set_state(page, PAGE_STATE_CACHED);
729 DEBUG_PAGE_ACCESS_END(page);
733 Flush();
734 // flush explicitly, since we directly use the lock
735 } while (start != 0 && start < end);
737 // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
738 // really critical here, as in all cases this method is used, the unmapped
739 // area range is unmapped for good (resized/cut) and the pages will likely
740 // be freed.
742 locker.Unlock();
744 // free removed mappings
745 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
746 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
747 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
748 while (vm_page_mapping* mapping = queue.RemoveHead())
749 object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
753 void
754 X86VMTranslationMapPAE::UnmapArea(VMArea* area, bool deletingAddressSpace,
755 bool ignoreTopCachePageFlags)
757 if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
758 X86VMTranslationMapPAE::UnmapPages(area, area->Base(), area->Size(),
759 true);
760 return;
763 bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
765 RecursiveLocker locker(fLock);
767 VMAreaMappings mappings;
768 mappings.MoveFrom(&area->mappings);
770 for (VMAreaMappings::Iterator it = mappings.GetIterator();
771 vm_page_mapping* mapping = it.Next();) {
772 vm_page* page = mapping->page;
773 page->mappings.Remove(mapping);
775 VMCache* cache = page->Cache();
777 bool pageFullyUnmapped = false;
778 if (!page->IsMapped()) {
779 atomic_add(&gMappedPagesCount, -1);
780 pageFullyUnmapped = true;
783 if (unmapPages || cache != area->cache) {
784 addr_t address = area->Base()
785 + ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
787 pae_page_directory_entry* pageDirEntry
788 = X86PagingMethodPAE::PageDirEntryForAddress(
789 fPagingStructures->VirtualPageDirs(), address);
790 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
791 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
792 "has no page dir entry", page, area, address);
793 continue;
796 ThreadCPUPinner pinner(thread_get_current_thread());
798 pae_page_table_entry* pageTable
799 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
800 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
801 pae_page_table_entry oldEntry
802 = X86PagingMethodPAE::ClearPageTableEntry(
803 &pageTable[address / B_PAGE_SIZE
804 % kPAEPageTableEntryCount]);
806 pinner.Unlock();
808 if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
809 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
810 "has no page table entry", page, area, address);
811 continue;
814 T(Unmap(this, address, oldEntry));
816 // transfer the accessed/dirty flags to the page and invalidate
817 // the mapping, if necessary
818 if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
819 page->accessed = true;
821 if (!deletingAddressSpace)
822 InvalidatePage(address);
825 if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
826 page->modified = true;
828 if (pageFullyUnmapped) {
829 DEBUG_PAGE_ACCESS_START(page);
831 if (cache->temporary)
832 vm_page_set_state(page, PAGE_STATE_INACTIVE);
833 else if (page->modified)
834 vm_page_set_state(page, PAGE_STATE_MODIFIED);
835 else
836 vm_page_set_state(page, PAGE_STATE_CACHED);
838 DEBUG_PAGE_ACCESS_END(page);
840 } else {
841 #if TRANSLATION_MAP_TRACING
842 addr_t address = area->Base()
843 + ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
845 ThreadCPUPinner pinner(thread_get_current_thread());
847 pae_page_directory_entry* pageDirEntry
848 = X86PagingMethodPAE::PageDirEntryForAddress(
849 fPagingStructures->VirtualPageDirs(), address);
850 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) != 0) {
851 pae_page_table_entry* pageTable
852 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
853 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
854 pae_page_table_entry oldEntry = pageTable[
855 address / B_PAGE_SIZE % kPAEPageTableEntryCount];
857 pinner.Unlock();
859 if ((oldEntry & X86_PAE_PTE_PRESENT) != 0)
860 T(Unmap(this, address, oldEntry));
862 #endif
865 fMapCount--;
868 Flush();
869 // flush explicitely, since we directly use the lock
871 locker.Unlock();
873 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
874 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
875 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
876 while (vm_page_mapping* mapping = mappings.RemoveHead())
877 object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
881 status_t
882 X86VMTranslationMapPAE::Query(addr_t virtualAddress,
883 phys_addr_t* _physicalAddress, uint32* _flags)
885 // default the flags to not present
886 *_flags = 0;
887 *_physicalAddress = 0;
889 // get the page directory entry
890 pae_page_directory_entry* pageDirEntry
891 = X86PagingMethodPAE::PageDirEntryForAddress(
892 fPagingStructures->VirtualPageDirs(), virtualAddress);
893 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
894 // no pagetable here
895 return B_OK;
898 // get the page table entry
899 Thread* thread = thread_get_current_thread();
900 ThreadCPUPinner pinner(thread);
902 pae_page_table_entry* pageTable
903 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
904 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
905 pae_page_table_entry entry
906 = pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
908 pinner.Unlock();
910 *_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
912 // translate the page state flags
913 if ((entry & X86_PAE_PTE_USER) != 0) {
914 *_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
915 | B_READ_AREA
916 | ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
919 *_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
920 | B_KERNEL_READ_AREA
921 | ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0
922 ? B_KERNEL_EXECUTE_AREA : 0)
923 | ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
924 | ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
925 | ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
927 TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
928 B_PRIxPHYSADDR ":\n", virtualAddress, *_physicalAddress);
930 return B_OK;
934 status_t
935 X86VMTranslationMapPAE::QueryInterrupt(addr_t virtualAddress,
936 phys_addr_t* _physicalAddress, uint32* _flags)
938 // default the flags to not present
939 *_flags = 0;
940 *_physicalAddress = 0;
942 // get the page directory entry
943 pae_page_directory_entry* pageDirEntry
944 = X86PagingMethodPAE::PageDirEntryForAddress(
945 fPagingStructures->VirtualPageDirs(), virtualAddress);
946 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
947 // no pagetable here
948 return B_OK;
951 // get the page table entry
952 pae_page_table_entry* pageTable
953 = (pae_page_table_entry*)X86PagingMethodPAE::Method()
954 ->PhysicalPageMapper()->InterruptGetPageTableAt(
955 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
956 pae_page_table_entry entry
957 = pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
959 *_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
961 // translate the page state flags
962 if ((entry & X86_PAE_PTE_USER) != 0) {
963 *_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
964 | B_READ_AREA
965 | ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
968 *_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
969 | B_KERNEL_READ_AREA
970 | ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0
971 ? B_KERNEL_EXECUTE_AREA : 0)
972 | ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
973 | ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
974 | ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
976 TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
977 B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
979 return B_OK;
983 status_t
984 X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
985 uint32 memoryType)
987 start = ROUNDDOWN(start, B_PAGE_SIZE);
988 if (start >= end)
989 return B_OK;
991 TRACE("X86VMTranslationMapPAE::Protect(): %#" B_PRIxADDR " - %#" B_PRIxADDR
992 ", attributes: %#" B_PRIx32 "\n", start, end, attributes);
994 // compute protection/memory type flags
995 uint64 newFlags
996 = X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(memoryType);
997 if ((attributes & B_USER_PROTECTION) != 0) {
998 newFlags |= X86_PAE_PTE_USER;
999 if ((attributes & B_WRITE_AREA) != 0)
1000 newFlags |= X86_PAE_PTE_WRITABLE;
1001 if ((attributes & B_EXECUTE_AREA) == 0
1002 && x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) {
1003 newFlags |= X86_PAE_PTE_NOT_EXECUTABLE;
1005 } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
1006 newFlags |= X86_PAE_PTE_WRITABLE;
1008 do {
1009 pae_page_directory_entry* pageDirEntry
1010 = X86PagingMethodPAE::PageDirEntryForAddress(
1011 fPagingStructures->VirtualPageDirs(), start);
1012 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
1013 // no page table here, move the start up to access the next page
1014 // table
1015 start = ROUNDUP(start + 1, kPAEPageTableRange);
1016 continue;
1019 Thread* thread = thread_get_current_thread();
1020 ThreadCPUPinner pinner(thread);
1022 pae_page_table_entry* pageTable
1023 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
1024 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1026 uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
1027 for (; index < kPAEPageTableEntryCount && start < end;
1028 index++, start += B_PAGE_SIZE) {
1029 pae_page_table_entry entry = pageTable[index];
1030 if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
1031 // page mapping not valid
1032 continue;
1035 TRACE("X86VMTranslationMapPAE::Protect(): protect page %#"
1036 B_PRIxADDR "\n", start);
1038 // set the new protection flags -- we want to do that atomically,
1039 // without changing the accessed or dirty flag
1040 pae_page_table_entry oldEntry;
1041 while (true) {
1042 oldEntry = X86PagingMethodPAE::TestAndSetPageTableEntry(
1043 &pageTable[index],
1044 (entry & ~(X86_PAE_PTE_PROTECTION_MASK
1045 | X86_PAE_PTE_MEMORY_TYPE_MASK))
1046 | newFlags,
1047 entry);
1048 if (oldEntry == entry)
1049 break;
1050 entry = oldEntry;
1053 T(Protect(this, start, entry,
1054 (entry & ~(X86_PAE_PTE_PROTECTION_MASK
1055 | X86_PAE_PTE_MEMORY_TYPE_MASK))
1056 | newFlags));
1058 if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
1059 // Note, that we only need to invalidate the address, if the
1060 // accessed flag was set, since only then the entry could have been
1061 // in any TLB.
1062 InvalidatePage(start);
1065 } while (start != 0 && start < end);
1067 return B_OK;
1071 status_t
1072 X86VMTranslationMapPAE::ClearFlags(addr_t address, uint32 flags)
1074 pae_page_directory_entry* pageDirEntry
1075 = X86PagingMethodPAE::PageDirEntryForAddress(
1076 fPagingStructures->VirtualPageDirs(), address);
1077 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
1078 // no pagetable here
1079 return B_OK;
1082 uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PAE_PTE_DIRTY : 0)
1083 | ((flags & PAGE_ACCESSED) ? X86_PAE_PTE_ACCESSED : 0);
1085 Thread* thread = thread_get_current_thread();
1086 ThreadCPUPinner pinner(thread);
1088 pae_page_table_entry* entry
1089 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
1090 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
1091 + address / B_PAGE_SIZE % kPAEPageTableEntryCount;
1093 // clear out the flags we've been requested to clear
1094 pae_page_table_entry oldEntry
1095 = X86PagingMethodPAE::ClearPageTableEntryFlags(entry, flagsToClear);
1097 pinner.Unlock();
1099 T(ClearFlags(this, address, oldEntry, flagsToClear));
1101 if ((oldEntry & flagsToClear) != 0)
1102 InvalidatePage(address);
1104 return B_OK;
1108 bool
1109 X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
1110 bool unmapIfUnaccessed, bool& _modified)
1112 ASSERT(address % B_PAGE_SIZE == 0);
1114 TRACE("X86VMTranslationMapPAE::ClearAccessedAndModified(%#" B_PRIxADDR
1115 ")\n", address);
1117 pae_page_directory_entry* pageDirEntry
1118 = X86PagingMethodPAE::PageDirEntryForAddress(
1119 fPagingStructures->VirtualPageDirs(), address);
1121 RecursiveLocker locker(fLock);
1123 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
1124 return false;
1126 ThreadCPUPinner pinner(thread_get_current_thread());
1128 pae_page_table_entry* entry
1129 = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
1130 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
1131 + address / B_PAGE_SIZE % kPAEPageTableEntryCount;
1133 // perform the deed
1134 pae_page_table_entry oldEntry;
1136 if (unmapIfUnaccessed) {
1137 while (true) {
1138 oldEntry = *entry;
1139 if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
1140 // page mapping not valid
1141 return false;
1144 if (oldEntry & X86_PAE_PTE_ACCESSED) {
1145 // page was accessed -- just clear the flags
1146 oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
1147 X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
1148 T(ClearFlags(this, address, oldEntry,
1149 X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY));
1150 break;
1153 // page hasn't been accessed -- unmap it
1154 if (X86PagingMethodPAE::TestAndSetPageTableEntry(entry, 0, oldEntry)
1155 == oldEntry) {
1156 T(ClearFlagsUnmap(this, address, oldEntry));
1157 break;
1160 // something changed -- check again
1162 } else {
1163 oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
1164 X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
1165 T(ClearFlags(this, address, oldEntry,
1166 X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY));
1169 pinner.Unlock();
1171 _modified = (oldEntry & X86_PAE_PTE_DIRTY) != 0;
1173 if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
1174 // Note, that we only need to invalidate the address, if the
1175 // accessed flags was set, since only then the entry could have been
1176 // in any TLB.
1177 InvalidatePage(address);
1178 Flush();
1180 return true;
1183 if (!unmapIfUnaccessed)
1184 return false;
1186 // We have unmapped the address. Do the "high level" stuff.
1188 fMapCount--;
1190 locker.Detach();
1191 // UnaccessedPageUnmapped() will unlock for us
1193 UnaccessedPageUnmapped(area,
1194 (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
1196 return false;
1200 void
1201 X86VMTranslationMapPAE::DebugPrintMappingInfo(addr_t virtualAddress)
1203 // get the page directory
1204 pae_page_directory_entry* const* pdpt
1205 = fPagingStructures->VirtualPageDirs();
1206 pae_page_directory_entry* pageDirectory = pdpt[virtualAddress >> 30];
1207 kprintf("page directory: %p (PDPT[%zu])\n", pageDirectory,
1208 virtualAddress >> 30);
1210 // get the page directory entry
1211 pae_page_directory_entry* pageDirEntry
1212 = X86PagingMethodPAE::PageDirEntryForAddress(pdpt, virtualAddress);
1213 kprintf("page directory entry %zu (%p): %#" B_PRIx64 "\n",
1214 pageDirEntry - pageDirectory, pageDirEntry, *pageDirEntry);
1216 kprintf(" access: ");
1217 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) != 0)
1218 kprintf(" present");
1219 if ((*pageDirEntry & X86_PAE_PDE_WRITABLE) != 0)
1220 kprintf(" writable");
1221 if ((*pageDirEntry & X86_PAE_PDE_USER) != 0)
1222 kprintf(" user");
1223 if ((*pageDirEntry & X86_PAE_PDE_NOT_EXECUTABLE) == 0)
1224 kprintf(" executable");
1225 if ((*pageDirEntry & X86_PAE_PDE_LARGE_PAGE) != 0)
1226 kprintf(" large");
1228 kprintf("\n caching:");
1229 if ((*pageDirEntry & X86_PAE_PDE_WRITE_THROUGH) != 0)
1230 kprintf(" write-through");
1231 if ((*pageDirEntry & X86_PAE_PDE_CACHING_DISABLED) != 0)
1232 kprintf(" uncached");
1234 kprintf("\n flags: ");
1235 if ((*pageDirEntry & X86_PAE_PDE_ACCESSED) != 0)
1236 kprintf(" accessed");
1237 kprintf("\n");
1239 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
1240 return;
1242 // get the page table entry
1243 pae_page_table_entry* pageTable
1244 = (pae_page_table_entry*)X86PagingMethodPAE::Method()
1245 ->PhysicalPageMapper()->InterruptGetPageTableAt(
1246 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1247 kprintf("page table: %#" B_PRIx64 "\n",
1248 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1249 size_t pteIndex = virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
1250 pae_page_table_entry entry = pageTable[pteIndex];
1251 kprintf("page table entry %zu (phys: %#" B_PRIx64 "): %#" B_PRIx64 "\n",
1252 pteIndex,
1253 (*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
1254 + pteIndex * sizeof(pae_page_table_entry),
1255 entry);
1257 kprintf(" access: ");
1258 if ((entry & X86_PAE_PTE_PRESENT) != 0)
1259 kprintf(" present");
1260 if ((entry & X86_PAE_PTE_WRITABLE) != 0)
1261 kprintf(" writable");
1262 if ((entry & X86_PAE_PTE_USER) != 0)
1263 kprintf(" user");
1264 if ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0)
1265 kprintf(" executable");
1266 if ((entry & X86_PAE_PTE_GLOBAL) == 0)
1267 kprintf(" global");
1269 kprintf("\n caching:");
1270 if ((entry & X86_PAE_PTE_WRITE_THROUGH) != 0)
1271 kprintf(" write-through");
1272 if ((entry & X86_PAE_PTE_CACHING_DISABLED) != 0)
1273 kprintf(" uncached");
1274 if ((entry & X86_PAE_PTE_PAT) != 0)
1275 kprintf(" PAT");
1277 kprintf("\n flags: ");
1278 if ((entry & X86_PAE_PTE_ACCESSED) != 0)
1279 kprintf(" accessed");
1280 if ((entry & X86_PAE_PTE_DIRTY) != 0)
1281 kprintf(" dirty");
1282 kprintf("\n");
1284 if ((entry & X86_PAE_PTE_PRESENT) != 0) {
1285 kprintf(" address: %#" B_PRIx64 "\n",
1286 entry & X86_PAE_PTE_ADDRESS_MASK);
1291 bool
1292 X86VMTranslationMapPAE::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
1293 ReverseMappingInfoCallback& callback)
1295 pae_page_directory_entry* const* pdpt
1296 = fPagingStructures->VirtualPageDirs();
1297 for (uint32 pageDirIndex = fIsKernelMap ? 2 : 0;
1298 pageDirIndex < uint32(fIsKernelMap ? 4 : 2); pageDirIndex++) {
1299 // iterate through the page directory
1300 pae_page_directory_entry* pageDirectory = pdpt[pageDirIndex];
1301 for (uint32 pdeIndex = 0; pdeIndex < kPAEPageDirEntryCount;
1302 pdeIndex++) {
1303 pae_page_directory_entry& pageDirEntry = pageDirectory[pdeIndex];
1304 if ((pageDirEntry & X86_PAE_PDE_ADDRESS_MASK) == 0)
1305 continue;
1307 // get and iterate through the page table
1308 pae_page_table_entry* pageTable
1309 = (pae_page_table_entry*)X86PagingMethodPAE::Method()
1310 ->PhysicalPageMapper()->InterruptGetPageTableAt(
1311 pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1312 for (uint32 pteIndex = 0; pteIndex < kPAEPageTableEntryCount;
1313 pteIndex++) {
1314 pae_page_table_entry entry = pageTable[pteIndex];
1315 if ((entry & X86_PAE_PTE_PRESENT) != 0
1316 && (entry & X86_PAE_PTE_ADDRESS_MASK) == physicalAddress) {
1317 addr_t virtualAddress = pageDirIndex * kPAEPageDirRange
1318 + pdeIndex * kPAEPageTableRange
1319 + pteIndex * B_PAGE_SIZE;
1320 if (callback.HandleVirtualAddress(virtualAddress))
1321 return true;
1327 return false;
1331 X86PagingStructures*
1332 X86VMTranslationMapPAE::PagingStructures() const
1334 return fPagingStructures;
1338 #endif // B_HAIKU_PHYSICAL_BITS == 64