headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / arch / x86 / paging / 64bit / X86VMTranslationMap64Bit.cpp
blob64eb688172ea5f1ae1224cb38faccd71e81e3b18
1 /*
2 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk
3 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5 * Distributed under the terms of the MIT License.
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
12 #include "paging/64bit/X86VMTranslationMap64Bit.h"
14 #include <int.h>
15 #include <slab/Slab.h>
16 #include <thread.h>
17 #include <util/AutoLock.h>
18 #include <vm/vm_page.h>
19 #include <vm/VMAddressSpace.h>
20 #include <vm/VMCache.h>
22 #include "paging/64bit/X86PagingMethod64Bit.h"
23 #include "paging/64bit/X86PagingStructures64Bit.h"
24 #include "paging/x86_physical_page_mapper.h"
27 //#define TRACE_X86_VM_TRANSLATION_MAP_64BIT
28 #ifdef TRACE_X86_VM_TRANSLATION_MAP_64BIT
29 # define TRACE(x...) dprintf(x)
30 #else
31 # define TRACE(x...) ;
32 #endif
35 // #pragma mark - X86VMTranslationMap64Bit
38 X86VMTranslationMap64Bit::X86VMTranslationMap64Bit()
40 fPagingStructures(NULL)
45 X86VMTranslationMap64Bit::~X86VMTranslationMap64Bit()
47 TRACE("X86VMTranslationMap64Bit::~X86VMTranslationMap64Bit()\n");
49 if (fPagingStructures == NULL)
50 return;
52 if (fPageMapper != NULL) {
53 phys_addr_t address;
54 vm_page* page;
56 // Free all structures in the bottom half of the PML4 (user memory).
57 uint64* virtualPML4 = fPagingStructures->VirtualPML4();
58 for (uint32 i = 0; i < 256; i++) {
59 if ((virtualPML4[i] & X86_64_PML4E_PRESENT) == 0)
60 continue;
62 uint64* virtualPDPT = (uint64*)fPageMapper->GetPageTableAt(
63 virtualPML4[i] & X86_64_PML4E_ADDRESS_MASK);
64 for (uint32 j = 0; j < 512; j++) {
65 if ((virtualPDPT[j] & X86_64_PDPTE_PRESENT) == 0)
66 continue;
68 uint64* virtualPageDir = (uint64*)fPageMapper->GetPageTableAt(
69 virtualPDPT[j] & X86_64_PDPTE_ADDRESS_MASK);
70 for (uint32 k = 0; k < 512; k++) {
71 if ((virtualPageDir[k] & X86_64_PDE_PRESENT) == 0)
72 continue;
74 address = virtualPageDir[k] & X86_64_PDE_ADDRESS_MASK;
75 page = vm_lookup_page(address / B_PAGE_SIZE);
76 if (page == NULL) {
77 panic("page table %u %u %u on invalid page %#"
78 B_PRIxPHYSADDR "\n", i, j, k, address);
81 DEBUG_PAGE_ACCESS_START(page);
82 vm_page_set_state(page, PAGE_STATE_FREE);
85 address = virtualPDPT[j] & X86_64_PDPTE_ADDRESS_MASK;
86 page = vm_lookup_page(address / B_PAGE_SIZE);
87 if (page == NULL) {
88 panic("page directory %u %u on invalid page %#"
89 B_PRIxPHYSADDR "\n", i, j, address);
92 DEBUG_PAGE_ACCESS_START(page);
93 vm_page_set_state(page, PAGE_STATE_FREE);
96 address = virtualPML4[i] & X86_64_PML4E_ADDRESS_MASK;
97 page = vm_lookup_page(address / B_PAGE_SIZE);
98 if (page == NULL) {
99 panic("PDPT %u on invalid page %#" B_PRIxPHYSADDR "\n", i,
100 address);
103 DEBUG_PAGE_ACCESS_START(page);
104 vm_page_set_state(page, PAGE_STATE_FREE);
107 fPageMapper->Delete();
110 fPagingStructures->RemoveReference();
114 status_t
115 X86VMTranslationMap64Bit::Init(bool kernel)
117 TRACE("X86VMTranslationMap64Bit::Init()\n");
119 X86VMTranslationMap::Init(kernel);
121 fPagingStructures = new(std::nothrow) X86PagingStructures64Bit;
122 if (fPagingStructures == NULL)
123 return B_NO_MEMORY;
125 X86PagingMethod64Bit* method = X86PagingMethod64Bit::Method();
127 if (kernel) {
128 // Get the page mapper.
129 fPageMapper = method->KernelPhysicalPageMapper();
131 // Kernel PML4 is already mapped.
132 fPagingStructures->Init(method->KernelVirtualPML4(),
133 method->KernelPhysicalPML4());
134 } else {
135 // Allocate a physical page mapper.
136 status_t error = method->PhysicalPageMapper()
137 ->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
138 if (error != B_OK)
139 return error;
141 // Assuming that only the top 2 PML4 entries are occupied for the
142 // kernel.
143 STATIC_ASSERT(KERNEL_PMAP_BASE == 0xffffff0000000000);
144 STATIC_ASSERT(KERNEL_BASE == 0xffffff0000000000);
146 // Allocate and clear the PML4.
147 uint64* virtualPML4 = (uint64*)memalign(B_PAGE_SIZE, B_PAGE_SIZE);
148 if (virtualPML4 == NULL)
149 return B_NO_MEMORY;
150 memset(virtualPML4, 0, B_PAGE_SIZE);
152 // Copy the top 2 PML4 entries.
153 virtualPML4[510] = method->KernelVirtualPML4()[510];
154 virtualPML4[511] = method->KernelVirtualPML4()[511];
156 // Look up the PML4 physical address.
157 phys_addr_t physicalPML4;
158 vm_get_page_mapping(VMAddressSpace::KernelID(), (addr_t)virtualPML4,
159 &physicalPML4);
161 // Initialize the paging structures.
162 fPagingStructures->Init(virtualPML4, physicalPML4);
165 return B_OK;
169 size_t
170 X86VMTranslationMap64Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
172 // If start == 0, the actual base address is not yet known to the caller and
173 // we shall assume the worst case, which is where the start address is the
174 // last page covered by a PDPT.
175 if (start == 0) {
176 start = k64BitPDPTRange - B_PAGE_SIZE;
177 end += start;
180 size_t requiredPDPTs = end / k64BitPDPTRange + 1
181 - start / k64BitPDPTRange;
182 size_t requiredPageDirs = end / k64BitPageDirectoryRange + 1
183 - start / k64BitPageDirectoryRange;
184 size_t requiredPageTables = end / k64BitPageTableRange + 1
185 - start / k64BitPageTableRange;
187 return requiredPDPTs + requiredPageDirs + requiredPageTables;
191 status_t
192 X86VMTranslationMap64Bit::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
193 uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
195 TRACE("X86VMTranslationMap64Bit::Map(%#" B_PRIxADDR ", %#" B_PRIxPHYSADDR
196 ")\n", virtualAddress, physicalAddress);
198 ThreadCPUPinner pinner(thread_get_current_thread());
200 // Look up the page table for the virtual address, allocating new tables
201 // if required. Shouldn't fail.
202 uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
203 fPagingStructures->VirtualPML4(), virtualAddress, fIsKernelMap,
204 true, reservation, fPageMapper, fMapCount);
205 ASSERT(entry != NULL);
207 // The entry should not already exist.
208 ASSERT_PRINT((*entry & X86_64_PTE_PRESENT) == 0,
209 "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
210 virtualAddress, *entry);
212 // Fill in the table entry.
213 X86PagingMethod64Bit::PutPageTableEntryInTable(entry, physicalAddress,
214 attributes, memoryType, fIsKernelMap);
216 // Note: We don't need to invalidate the TLB for this address, as previously
217 // the entry was not present and the TLB doesn't cache those entries.
219 fMapCount++;
221 return 0;
225 status_t
226 X86VMTranslationMap64Bit::Unmap(addr_t start, addr_t end)
228 start = ROUNDDOWN(start, B_PAGE_SIZE);
229 if (start >= end)
230 return B_OK;
232 TRACE("X86VMTranslationMap64Bit::Unmap(%#" B_PRIxADDR ", %#" B_PRIxADDR
233 ")\n", start, end);
235 ThreadCPUPinner pinner(thread_get_current_thread());
237 do {
238 uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress(
239 fPagingStructures->VirtualPML4(), start, fIsKernelMap, false,
240 NULL, fPageMapper, fMapCount);
241 if (pageTable == NULL) {
242 // Move on to the next page table.
243 start = ROUNDUP(start + 1, k64BitPageTableRange);
244 continue;
247 for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount;
248 index < k64BitTableEntryCount && start < end;
249 index++, start += B_PAGE_SIZE) {
250 if ((pageTable[index] & X86_64_PTE_PRESENT) == 0)
251 continue;
253 TRACE("X86VMTranslationMap64Bit::Unmap(): removing page %#"
254 B_PRIxADDR " (%#" B_PRIxPHYSADDR ")\n", start,
255 pageTable[index] & X86_64_PTE_ADDRESS_MASK);
257 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(
258 &pageTable[index], X86_64_PTE_PRESENT);
259 fMapCount--;
261 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
262 // Note, that we only need to invalidate the address, if the
263 // accessed flags was set, since only then the entry could have
264 // been in any TLB.
265 InvalidatePage(start);
268 } while (start != 0 && start < end);
270 return B_OK;
274 status_t
275 X86VMTranslationMap64Bit::DebugMarkRangePresent(addr_t start, addr_t end,
276 bool markPresent)
278 start = ROUNDDOWN(start, B_PAGE_SIZE);
279 if (start >= end)
280 return B_OK;
282 TRACE("X86VMTranslationMap64Bit::DebugMarkRangePresent(%#" B_PRIxADDR
283 ", %#" B_PRIxADDR ")\n", start, end);
285 ThreadCPUPinner pinner(thread_get_current_thread());
287 do {
288 uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress(
289 fPagingStructures->VirtualPML4(), start, fIsKernelMap, false,
290 NULL, fPageMapper, fMapCount);
291 if (pageTable == NULL) {
292 // Move on to the next page table.
293 start = ROUNDUP(start + 1, k64BitPageTableRange);
294 continue;
297 for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount;
298 index < k64BitTableEntryCount && start < end;
299 index++, start += B_PAGE_SIZE) {
300 if ((pageTable[index] & X86_64_PTE_PRESENT) == 0) {
301 if (!markPresent)
302 continue;
304 X86PagingMethod64Bit::SetTableEntryFlags(&pageTable[index],
305 X86_64_PTE_PRESENT);
306 } else {
307 if (markPresent)
308 continue;
310 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(
311 &pageTable[index], X86_64_PTE_PRESENT);
313 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
314 // Note, that we only need to invalidate the address, if the
315 // accessed flags was set, since only then the entry could
316 // have been in any TLB.
317 InvalidatePage(start);
321 } while (start != 0 && start < end);
323 return B_OK;
327 status_t
328 X86VMTranslationMap64Bit::UnmapPage(VMArea* area, addr_t address,
329 bool updatePageQueue)
331 ASSERT(address % B_PAGE_SIZE == 0);
333 TRACE("X86VMTranslationMap64Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);
335 ThreadCPUPinner pinner(thread_get_current_thread());
337 // Look up the page table for the virtual address.
338 uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
339 fPagingStructures->VirtualPML4(), address, fIsKernelMap,
340 false, NULL, fPageMapper, fMapCount);
341 if (entry == NULL)
342 return B_ENTRY_NOT_FOUND;
344 RecursiveLocker locker(fLock);
346 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntry(entry);
348 pinner.Unlock();
350 if ((oldEntry & X86_64_PTE_PRESENT) == 0)
351 return B_ENTRY_NOT_FOUND;
353 fMapCount--;
355 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
356 // Note, that we only need to invalidate the address, if the
357 // accessed flags was set, since only then the entry could have been
358 // in any TLB.
359 InvalidatePage(address);
361 Flush();
363 // NOTE: Between clearing the page table entry and Flush() other
364 // processors (actually even this processor with another thread of the
365 // same team) could still access the page in question via their cached
366 // entry. We can obviously lose a modified flag in this case, with the
367 // effect that the page looks unmodified (and might thus be recycled),
368 // but is actually modified.
369 // In most cases this is harmless, but for vm_remove_all_page_mappings()
370 // this is actually a problem.
371 // Interestingly FreeBSD seems to ignore this problem as well
372 // (cf. pmap_remove_all()), unless I've missed something.
375 locker.Detach();
376 // PageUnmapped() will unlock for us
378 PageUnmapped(area, (oldEntry & X86_64_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
379 (oldEntry & X86_64_PTE_ACCESSED) != 0,
380 (oldEntry & X86_64_PTE_DIRTY) != 0, updatePageQueue);
382 return B_OK;
386 void
387 X86VMTranslationMap64Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
388 bool updatePageQueue)
390 if (size == 0)
391 return;
393 addr_t start = base;
394 addr_t end = base + size - 1;
396 TRACE("X86VMTranslationMap64Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
397 B_PRIxADDR ")\n", area, start, end);
399 VMAreaMappings queue;
401 RecursiveLocker locker(fLock);
402 ThreadCPUPinner pinner(thread_get_current_thread());
404 do {
405 uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress(
406 fPagingStructures->VirtualPML4(), start, fIsKernelMap, false,
407 NULL, fPageMapper, fMapCount);
408 if (pageTable == NULL) {
409 // Move on to the next page table.
410 start = ROUNDUP(start + 1, k64BitPageTableRange);
411 continue;
414 for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount;
415 index < k64BitTableEntryCount && start < end;
416 index++, start += B_PAGE_SIZE) {
417 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntry(
418 &pageTable[index]);
419 if ((oldEntry & X86_64_PTE_PRESENT) == 0)
420 continue;
422 fMapCount--;
424 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
425 // Note, that we only need to invalidate the address, if the
426 // accessed flags was set, since only then the entry could have
427 // been in any TLB.
428 InvalidatePage(start);
431 if (area->cache_type != CACHE_TYPE_DEVICE) {
432 // get the page
433 vm_page* page = vm_lookup_page(
434 (oldEntry & X86_64_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
435 ASSERT(page != NULL);
437 DEBUG_PAGE_ACCESS_START(page);
439 // transfer the accessed/dirty flags to the page
440 if ((oldEntry & X86_64_PTE_ACCESSED) != 0)
441 page->accessed = true;
442 if ((oldEntry & X86_64_PTE_DIRTY) != 0)
443 page->modified = true;
445 // remove the mapping object/decrement the wired_count of the
446 // page
447 if (area->wiring == B_NO_LOCK) {
448 vm_page_mapping* mapping = NULL;
449 vm_page_mappings::Iterator iterator
450 = page->mappings.GetIterator();
451 while ((mapping = iterator.Next()) != NULL) {
452 if (mapping->area == area)
453 break;
456 ASSERT(mapping != NULL);
458 area->mappings.Remove(mapping);
459 page->mappings.Remove(mapping);
460 queue.Add(mapping);
461 } else
462 page->DecrementWiredCount();
464 if (!page->IsMapped()) {
465 atomic_add(&gMappedPagesCount, -1);
467 if (updatePageQueue) {
468 if (page->Cache()->temporary)
469 vm_page_set_state(page, PAGE_STATE_INACTIVE);
470 else if (page->modified)
471 vm_page_set_state(page, PAGE_STATE_MODIFIED);
472 else
473 vm_page_set_state(page, PAGE_STATE_CACHED);
477 DEBUG_PAGE_ACCESS_END(page);
481 Flush();
482 // flush explicitly, since we directly use the lock
483 } while (start != 0 && start < end);
485 // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
486 // really critical here, as in all cases this method is used, the unmapped
487 // area range is unmapped for good (resized/cut) and the pages will likely
488 // be freed.
490 locker.Unlock();
492 // free removed mappings
493 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
494 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
495 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
496 while (vm_page_mapping* mapping = queue.RemoveHead())
497 object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
501 void
502 X86VMTranslationMap64Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
503 bool ignoreTopCachePageFlags)
505 TRACE("X86VMTranslationMap64Bit::UnmapArea(%p)\n", area);
507 if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
508 X86VMTranslationMap64Bit::UnmapPages(area, area->Base(), area->Size(),
509 true);
510 return;
513 bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
515 RecursiveLocker locker(fLock);
516 ThreadCPUPinner pinner(thread_get_current_thread());
518 VMAreaMappings mappings;
519 mappings.MoveFrom(&area->mappings);
521 for (VMAreaMappings::Iterator it = mappings.GetIterator();
522 vm_page_mapping* mapping = it.Next();) {
523 vm_page* page = mapping->page;
524 page->mappings.Remove(mapping);
526 VMCache* cache = page->Cache();
528 bool pageFullyUnmapped = false;
529 if (!page->IsMapped()) {
530 atomic_add(&gMappedPagesCount, -1);
531 pageFullyUnmapped = true;
534 if (unmapPages || cache != area->cache) {
535 addr_t address = area->Base()
536 + ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
538 uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
539 fPagingStructures->VirtualPML4(), address, fIsKernelMap,
540 false, NULL, fPageMapper, fMapCount);
541 if (entry == NULL) {
542 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
543 "has no page table", page, area, address);
544 continue;
547 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntry(entry);
549 if ((oldEntry & X86_64_PTE_PRESENT) == 0) {
550 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
551 "has no page table entry", page, area, address);
552 continue;
555 // transfer the accessed/dirty flags to the page and invalidate
556 // the mapping, if necessary
557 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
558 page->accessed = true;
560 if (!deletingAddressSpace)
561 InvalidatePage(address);
564 if ((oldEntry & X86_64_PTE_DIRTY) != 0)
565 page->modified = true;
567 if (pageFullyUnmapped) {
568 DEBUG_PAGE_ACCESS_START(page);
570 if (cache->temporary)
571 vm_page_set_state(page, PAGE_STATE_INACTIVE);
572 else if (page->modified)
573 vm_page_set_state(page, PAGE_STATE_MODIFIED);
574 else
575 vm_page_set_state(page, PAGE_STATE_CACHED);
577 DEBUG_PAGE_ACCESS_END(page);
581 fMapCount--;
584 Flush();
585 // flush explicitely, since we directly use the lock
587 locker.Unlock();
589 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
590 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
591 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
592 while (vm_page_mapping* mapping = mappings.RemoveHead())
593 object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
597 status_t
598 X86VMTranslationMap64Bit::Query(addr_t virtualAddress,
599 phys_addr_t* _physicalAddress, uint32* _flags)
601 *_flags = 0;
602 *_physicalAddress = 0;
604 ThreadCPUPinner pinner(thread_get_current_thread());
606 // This function may be called on the physical map area, so we must handle
607 // large pages here. Look up the page directory entry for the virtual
608 // address.
609 uint64* pde = X86PagingMethod64Bit::PageDirectoryEntryForAddress(
610 fPagingStructures->VirtualPML4(), virtualAddress, fIsKernelMap,
611 false, NULL, fPageMapper, fMapCount);
612 if (pde == NULL || (*pde & X86_64_PDE_PRESENT) == 0)
613 return B_OK;
615 uint64 entry;
616 if ((*pde & X86_64_PDE_LARGE_PAGE) != 0) {
617 entry = *pde;
618 *_physicalAddress = (entry & X86_64_PDE_ADDRESS_MASK)
619 + (virtualAddress % 0x200000);
620 } else {
621 uint64* virtualPageTable = (uint64*)fPageMapper->GetPageTableAt(
622 *pde & X86_64_PDE_ADDRESS_MASK);
623 entry = virtualPageTable[VADDR_TO_PTE(virtualAddress)];
624 *_physicalAddress = entry & X86_64_PTE_ADDRESS_MASK;
627 // Translate the page state flags.
628 if ((entry & X86_64_PTE_USER) != 0) {
629 *_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
630 | B_READ_AREA
631 | ((entry & X86_64_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
634 *_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
635 | B_KERNEL_READ_AREA
636 | ((entry & X86_64_PTE_NOT_EXECUTABLE) == 0 ? B_KERNEL_EXECUTE_AREA : 0)
637 | ((entry & X86_64_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
638 | ((entry & X86_64_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
639 | ((entry & X86_64_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
641 TRACE("X86VMTranslationMap64Bit::Query(%#" B_PRIxADDR ") -> %#"
642 B_PRIxPHYSADDR " %#" B_PRIx32 " (entry: %#" B_PRIx64 ")\n",
643 virtualAddress, *_physicalAddress, *_flags, entry);
645 return B_OK;
649 status_t
650 X86VMTranslationMap64Bit::QueryInterrupt(addr_t virtualAddress,
651 phys_addr_t* _physicalAddress, uint32* _flags)
653 // With our page mapper, there is no difference in getting a page table
654 // when interrupts are enabled or disabled, so just call Query().
655 return Query(virtualAddress, _physicalAddress, _flags);
659 status_t
660 X86VMTranslationMap64Bit::Protect(addr_t start, addr_t end, uint32 attributes,
661 uint32 memoryType)
663 start = ROUNDDOWN(start, B_PAGE_SIZE);
664 if (start >= end)
665 return B_OK;
667 TRACE("X86VMTranslationMap64Bit::Protect(%#" B_PRIxADDR ", %#" B_PRIxADDR
668 ", %#" B_PRIx32 ")\n", start, end, attributes);
670 // compute protection flags
671 uint64 newProtectionFlags = 0;
672 if ((attributes & B_USER_PROTECTION) != 0) {
673 newProtectionFlags = X86_64_PTE_USER;
674 if ((attributes & B_WRITE_AREA) != 0)
675 newProtectionFlags |= X86_64_PTE_WRITABLE;
676 if ((attributes & B_EXECUTE_AREA) == 0
677 && x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) {
678 newProtectionFlags |= X86_64_PTE_NOT_EXECUTABLE;
680 } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
681 newProtectionFlags = X86_64_PTE_WRITABLE;
683 ThreadCPUPinner pinner(thread_get_current_thread());
685 do {
686 uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress(
687 fPagingStructures->VirtualPML4(), start, fIsKernelMap, false,
688 NULL, fPageMapper, fMapCount);
689 if (pageTable == NULL) {
690 // Move on to the next page table.
691 start = ROUNDUP(start + 1, k64BitPageTableRange);
692 continue;
695 for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount;
696 index < k64BitTableEntryCount && start < end;
697 index++, start += B_PAGE_SIZE) {
698 uint64 entry = pageTable[index];
699 if ((entry & X86_64_PTE_PRESENT) == 0)
700 continue;
702 TRACE("X86VMTranslationMap64Bit::Protect(): protect page %#"
703 B_PRIxADDR "\n", start);
705 // set the new protection flags -- we want to do that atomically,
706 // without changing the accessed or dirty flag
707 uint64 oldEntry;
708 while (true) {
709 oldEntry = X86PagingMethod64Bit::TestAndSetTableEntry(
710 &pageTable[index],
711 (entry & ~(X86_64_PTE_PROTECTION_MASK
712 | X86_64_PTE_MEMORY_TYPE_MASK))
713 | newProtectionFlags
714 | X86PagingMethod64Bit::MemoryTypeToPageTableEntryFlags(
715 memoryType),
716 entry);
717 if (oldEntry == entry)
718 break;
719 entry = oldEntry;
722 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
723 // Note, that we only need to invalidate the address, if the
724 // accessed flag was set, since only then the entry could have
725 // been in any TLB.
726 InvalidatePage(start);
729 } while (start != 0 && start < end);
731 return B_OK;
735 status_t
736 X86VMTranslationMap64Bit::ClearFlags(addr_t address, uint32 flags)
738 TRACE("X86VMTranslationMap64Bit::ClearFlags(%#" B_PRIxADDR ", %#" B_PRIx32
739 ")\n", address, flags);
741 ThreadCPUPinner pinner(thread_get_current_thread());
743 uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
744 fPagingStructures->VirtualPML4(), address, fIsKernelMap,
745 false, NULL, fPageMapper, fMapCount);
746 if (entry == NULL)
747 return B_OK;
749 uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_64_PTE_DIRTY : 0)
750 | ((flags & PAGE_ACCESSED) ? X86_64_PTE_ACCESSED : 0);
752 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(entry,
753 flagsToClear);
755 if ((oldEntry & flagsToClear) != 0)
756 InvalidatePage(address);
758 return B_OK;
762 bool
763 X86VMTranslationMap64Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
764 bool unmapIfUnaccessed, bool& _modified)
766 ASSERT(address % B_PAGE_SIZE == 0);
768 TRACE("X86VMTranslationMap64Bit::ClearAccessedAndModified(%#" B_PRIxADDR
769 ")\n", address);
771 RecursiveLocker locker(fLock);
772 ThreadCPUPinner pinner(thread_get_current_thread());
774 uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
775 fPagingStructures->VirtualPML4(), address, fIsKernelMap,
776 false, NULL, fPageMapper, fMapCount);
777 if (entry == NULL)
778 return false;
780 uint64 oldEntry;
782 if (unmapIfUnaccessed) {
783 while (true) {
784 oldEntry = *entry;
785 if ((oldEntry & X86_64_PTE_PRESENT) == 0) {
786 // page mapping not valid
787 return false;
790 if (oldEntry & X86_64_PTE_ACCESSED) {
791 // page was accessed -- just clear the flags
792 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(entry,
793 X86_64_PTE_ACCESSED | X86_64_PTE_DIRTY);
794 break;
797 // page hasn't been accessed -- unmap it
798 if (X86PagingMethod64Bit::TestAndSetTableEntry(entry, 0, oldEntry)
799 == oldEntry) {
800 break;
803 // something changed -- check again
805 } else {
806 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(entry,
807 X86_64_PTE_ACCESSED | X86_64_PTE_DIRTY);
810 pinner.Unlock();
812 _modified = (oldEntry & X86_64_PTE_DIRTY) != 0;
814 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
815 // Note, that we only need to invalidate the address, if the
816 // accessed flags was set, since only then the entry could have been
817 // in any TLB.
818 InvalidatePage(address);
819 Flush();
821 return true;
824 if (!unmapIfUnaccessed)
825 return false;
827 // We have unmapped the address. Do the "high level" stuff.
829 fMapCount--;
831 locker.Detach();
832 // UnaccessedPageUnmapped() will unlock for us
834 UnaccessedPageUnmapped(area,
835 (oldEntry & X86_64_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
837 return false;
841 X86PagingStructures*
842 X86VMTranslationMap64Bit::PagingStructures() const
844 return fPagingStructures;