2 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
7 #include <vm/VMTranslationMap.h>
10 #include <vm/vm_page.h>
11 #include <vm/vm_priv.h>
12 #include <vm/VMAddressSpace.h>
13 #include <vm/VMArea.h>
14 #include <vm/VMCache.h>
17 // #pragma mark - VMTranslationMap
20 VMTranslationMap::VMTranslationMap()
24 recursive_lock_init(&fLock
, "translation map");
28 VMTranslationMap::~VMTranslationMap()
30 recursive_lock_destroy(&fLock
);
35 VMTranslationMap::DebugMarkRangePresent(addr_t start
, addr_t end
,
38 return B_NOT_SUPPORTED
;
42 /*! Unmaps a range of pages of an area.
44 The default implementation just iterates over all virtual pages of the
45 range and calls UnmapPage(). This is obviously not particularly efficient.
48 VMTranslationMap::UnmapPages(VMArea
* area
, addr_t base
, size_t size
,
51 ASSERT(base
% B_PAGE_SIZE
== 0);
52 ASSERT(size
% B_PAGE_SIZE
== 0);
54 addr_t address
= base
;
55 addr_t end
= address
+ size
;
57 for (; address
!= end
; address
+= B_PAGE_SIZE
) {
58 phys_addr_t physicalAddress
;
60 if (Query(address
, &physicalAddress
, &flags
) == B_OK
61 && (flags
& PAGE_PRESENT
) != 0) {
62 vm_page
* page
= vm_lookup_page(physicalAddress
/ B_PAGE_SIZE
);
64 DEBUG_PAGE_ACCESS_START(page
);
65 UnmapPage(area
, address
, updatePageQueue
);
66 DEBUG_PAGE_ACCESS_END(page
);
68 UnmapPage(area
, address
, updatePageQueue
);
72 for (; address
!= end
; address
+= B_PAGE_SIZE
)
73 UnmapPage(area
, address
, updatePageQueue
);
78 /*! Unmaps all of an area's pages.
79 If \a deletingAddressSpace is \c true, the address space the area belongs to
80 is in the process of being destroyed and isn't used by anyone anymore. For
81 some architectures this can be used for optimizations (e.g. not unmapping
82 pages or at least not needing to invalidate TLB entries).
83 If \a ignoreTopCachePageFlags is \c true, the area is in the process of
84 being destroyed and its top cache is otherwise unreferenced. I.e. all mapped
85 pages that live in the top cache area going to be freed and the page
86 accessed and modified flags don't need to be propagated.
88 The default implementation just iterates over all virtual pages of the
89 area and calls UnmapPage(). This is obviously not particularly efficient.
92 VMTranslationMap::UnmapArea(VMArea
* area
, bool deletingAddressSpace
,
93 bool ignoreTopCachePageFlags
)
95 addr_t address
= area
->Base();
96 addr_t end
= address
+ area
->Size();
98 for (; address
!= end
; address
+= B_PAGE_SIZE
) {
99 phys_addr_t physicalAddress
;
101 if (Query(address
, &physicalAddress
, &flags
) == B_OK
102 && (flags
& PAGE_PRESENT
) != 0) {
103 vm_page
* page
= vm_lookup_page(physicalAddress
/ B_PAGE_SIZE
);
105 DEBUG_PAGE_ACCESS_START(page
);
106 UnmapPage(area
, address
, true);
107 DEBUG_PAGE_ACCESS_END(page
);
109 UnmapPage(area
, address
, true);
113 for (; address
!= end
; address
+= B_PAGE_SIZE
)
114 UnmapPage(area
, address
, true);
119 /*! Print mapping information for a virtual address.
120 The method navigates the paging structures and prints all relevant
121 information on the way.
122 The method is invoked from a KDL command. The default implementation is a
124 \param virtualAddress The virtual address to look up.
127 VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress
)
132 /*! Find virtual addresses mapped to the given physical address.
133 For each virtual address the method finds, it invokes the callback object's
134 HandleVirtualAddress() method. When that method returns \c true, the search
135 is terminated and \c true is returned.
136 The method is invoked from a KDL command. The default implementation is a
138 \param physicalAddress The physical address to search for.
139 \param callback Callback object to be notified of each found virtual
141 \return \c true, if for a found virtual address the callback's
142 HandleVirtualAddress() returned \c true, \c false otherwise.
145 VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress
,
146 ReverseMappingInfoCallback
& callback
)
152 /*! Called by UnmapPage() after performing the architecture specific part.
153 Looks up the page, updates its flags, removes the page-area mapping, and
154 requeues the page, if necessary.
157 VMTranslationMap::PageUnmapped(VMArea
* area
, page_num_t pageNumber
,
158 bool accessed
, bool modified
, bool updatePageQueue
)
160 if (area
->cache_type
== CACHE_TYPE_DEVICE
) {
161 recursive_lock_unlock(&fLock
);
166 vm_page
* page
= vm_lookup_page(pageNumber
);
167 ASSERT_PRINT(page
!= NULL
, "page number: %#" B_PRIxPHYSADDR
168 ", accessed: %d, modified: %d", pageNumber
, accessed
, modified
);
170 // transfer the accessed/dirty flags to the page
171 page
->accessed
|= accessed
;
172 page
->modified
|= modified
;
174 // remove the mapping object/decrement the wired_count of the page
175 vm_page_mapping
* mapping
= NULL
;
176 if (area
->wiring
== B_NO_LOCK
) {
177 vm_page_mappings::Iterator iterator
= page
->mappings
.GetIterator();
178 while ((mapping
= iterator
.Next()) != NULL
) {
179 if (mapping
->area
== area
) {
180 area
->mappings
.Remove(mapping
);
181 page
->mappings
.Remove(mapping
);
186 ASSERT_PRINT(mapping
!= NULL
, "page: %p, page number: %#"
187 B_PRIxPHYSADDR
", accessed: %d, modified: %d", page
,
188 pageNumber
, accessed
, modified
);
190 page
->DecrementWiredCount();
192 recursive_lock_unlock(&fLock
);
194 if (!page
->IsMapped()) {
195 atomic_add(&gMappedPagesCount
, -1);
197 if (updatePageQueue
) {
198 if (page
->Cache()->temporary
)
199 vm_page_set_state(page
, PAGE_STATE_INACTIVE
);
200 else if (page
->modified
)
201 vm_page_set_state(page
, PAGE_STATE_MODIFIED
);
203 vm_page_set_state(page
, PAGE_STATE_CACHED
);
207 if (mapping
!= NULL
) {
208 bool isKernelSpace
= area
->address_space
== VMAddressSpace::Kernel();
209 object_cache_free(gPageMappingsObjectCache
, mapping
,
210 CACHE_DONT_WAIT_FOR_MEMORY
211 | (isKernelSpace
? CACHE_DONT_LOCK_KERNEL_SPACE
: 0));
216 /*! Called by ClearAccessedAndModified() after performing the architecture
218 Looks up the page and removes the page-area mapping.
221 VMTranslationMap::UnaccessedPageUnmapped(VMArea
* area
, page_num_t pageNumber
)
223 if (area
->cache_type
== CACHE_TYPE_DEVICE
) {
224 recursive_lock_unlock(&fLock
);
229 vm_page
* page
= vm_lookup_page(pageNumber
);
230 ASSERT_PRINT(page
!= NULL
, "page number: %#" B_PRIxPHYSADDR
, pageNumber
);
232 // remove the mapping object/decrement the wired_count of the page
233 vm_page_mapping
* mapping
= NULL
;
234 if (area
->wiring
== B_NO_LOCK
) {
235 vm_page_mappings::Iterator iterator
= page
->mappings
.GetIterator();
236 while ((mapping
= iterator
.Next()) != NULL
) {
237 if (mapping
->area
== area
) {
238 area
->mappings
.Remove(mapping
);
239 page
->mappings
.Remove(mapping
);
244 ASSERT_PRINT(mapping
!= NULL
, "page: %p, page number: %#"
245 B_PRIxPHYSADDR
, page
, pageNumber
);
247 page
->DecrementWiredCount();
249 recursive_lock_unlock(&fLock
);
251 if (!page
->IsMapped())
252 atomic_add(&gMappedPagesCount
, -1);
254 if (mapping
!= NULL
) {
255 object_cache_free(gPageMappingsObjectCache
, mapping
,
256 CACHE_DONT_WAIT_FOR_MEMORY
| CACHE_DONT_LOCK_KERNEL_SPACE
);
257 // Since this is called by the page daemon, we never want to lock
258 // the kernel address space.
263 // #pragma mark - ReverseMappingInfoCallback
266 VMTranslationMap::ReverseMappingInfoCallback::~ReverseMappingInfoCallback()
271 // #pragma mark - VMPhysicalPageMapper
274 VMPhysicalPageMapper::VMPhysicalPageMapper()
279 VMPhysicalPageMapper::~VMPhysicalPageMapper()