2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
11 #include <vm/VMCache.h>
19 #include <condition_variable.h>
23 #include <slab/Slab.h>
26 #include <util/AutoLock.h>
29 #include <vm/vm_page.h>
30 #include <vm/vm_priv.h>
31 #include <vm/vm_types.h>
32 #include <vm/VMAddressSpace.h>
33 #include <vm/VMArea.h>
35 // needed for the factory only
36 #include "VMAnonymousCache.h"
37 #include "VMAnonymousNoSwapCache.h"
38 #include "VMDeviceCache.h"
39 #include "VMNullCache.h"
40 #include "../cache/vnode_store.h"
43 //#define TRACE_VM_CACHE
45 # define TRACE(x) dprintf x
52 VMCache
* gDebugCacheList
;
54 static mutex sCacheListLock
= MUTEX_INITIALIZER("global VMCache list");
55 // The lock is also needed when the debug feature is disabled.
57 ObjectCache
* gCacheRefObjectCache
;
58 ObjectCache
* gAnonymousCacheObjectCache
;
59 ObjectCache
* gAnonymousNoSwapCacheObjectCache
;
60 ObjectCache
* gVnodeCacheObjectCache
;
61 ObjectCache
* gDeviceCacheObjectCache
;
62 ObjectCache
* gNullCacheObjectCache
;
65 struct VMCache::PageEventWaiter
{
67 PageEventWaiter
* next
;
75 namespace VMCacheTracing
{
77 class VMCacheTraceEntry
: public AbstractTraceEntry
{
79 VMCacheTraceEntry(VMCache
* cache
)
83 #if VM_CACHE_TRACING_STACK_TRACE
84 fStackTrace
= capture_tracing_stack_trace(
85 VM_CACHE_TRACING_STACK_TRACE
, 0, true);
86 // Don't capture userland stack trace to avoid potential
91 #if VM_CACHE_TRACING_STACK_TRACE
92 virtual void DumpStackTrace(TraceOutput
& out
)
94 out
.PrintStackTrace(fStackTrace
);
98 VMCache
* Cache() const
105 #if VM_CACHE_TRACING_STACK_TRACE
106 tracing_stack_trace
* fStackTrace
;
111 class Create
: public VMCacheTraceEntry
{
113 Create(VMCache
* cache
)
115 VMCacheTraceEntry(cache
)
120 virtual void AddDump(TraceOutput
& out
)
122 out
.Print("vm cache create: -> cache: %p", fCache
);
127 class Delete
: public VMCacheTraceEntry
{
129 Delete(VMCache
* cache
)
131 VMCacheTraceEntry(cache
)
136 virtual void AddDump(TraceOutput
& out
)
138 out
.Print("vm cache delete: cache: %p", fCache
);
143 class SetMinimalCommitment
: public VMCacheTraceEntry
{
145 SetMinimalCommitment(VMCache
* cache
, off_t commitment
)
147 VMCacheTraceEntry(cache
),
148 fOldCommitment(cache
->committed_size
),
149 fCommitment(commitment
)
154 virtual void AddDump(TraceOutput
& out
)
156 out
.Print("vm cache set min commitment: cache: %p, "
157 "commitment: %" B_PRIdOFF
" -> %" B_PRIdOFF
, fCache
,
158 fOldCommitment
, fCommitment
);
162 off_t fOldCommitment
;
167 class Resize
: public VMCacheTraceEntry
{
169 Resize(VMCache
* cache
, off_t size
)
171 VMCacheTraceEntry(cache
),
172 fOldSize(cache
->virtual_end
),
178 virtual void AddDump(TraceOutput
& out
)
180 out
.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF
" -> %"
181 B_PRIdOFF
, fCache
, fOldSize
, fSize
);
190 class AddConsumer
: public VMCacheTraceEntry
{
192 AddConsumer(VMCache
* cache
, VMCache
* consumer
)
194 VMCacheTraceEntry(cache
),
200 virtual void AddDump(TraceOutput
& out
)
202 out
.Print("vm cache add consumer: cache: %p, consumer: %p", fCache
,
206 VMCache
* Consumer() const
216 class RemoveConsumer
: public VMCacheTraceEntry
{
218 RemoveConsumer(VMCache
* cache
, VMCache
* consumer
)
220 VMCacheTraceEntry(cache
),
226 virtual void AddDump(TraceOutput
& out
)
228 out
.Print("vm cache remove consumer: cache: %p, consumer: %p",
237 class Merge
: public VMCacheTraceEntry
{
239 Merge(VMCache
* cache
, VMCache
* consumer
)
241 VMCacheTraceEntry(cache
),
247 virtual void AddDump(TraceOutput
& out
)
249 out
.Print("vm cache merge with consumer: cache: %p, consumer: %p",
258 class InsertArea
: public VMCacheTraceEntry
{
260 InsertArea(VMCache
* cache
, VMArea
* area
)
262 VMCacheTraceEntry(cache
),
268 virtual void AddDump(TraceOutput
& out
)
270 out
.Print("vm cache insert area: cache: %p, area: %p", fCache
,
284 class RemoveArea
: public VMCacheTraceEntry
{
286 RemoveArea(VMCache
* cache
, VMArea
* area
)
288 VMCacheTraceEntry(cache
),
294 virtual void AddDump(TraceOutput
& out
)
296 out
.Print("vm cache remove area: cache: %p, area: %p", fCache
,
304 } // namespace VMCacheTracing
306 # define T(x) new(std::nothrow) VMCacheTracing::x;
308 # if VM_CACHE_TRACING >= 2
310 namespace VMCacheTracing
{
312 class InsertPage
: public VMCacheTraceEntry
{
314 InsertPage(VMCache
* cache
, vm_page
* page
, off_t offset
)
316 VMCacheTraceEntry(cache
),
323 virtual void AddDump(TraceOutput
& out
)
325 out
.Print("vm cache insert page: cache: %p, page: %p, offset: %"
326 B_PRIdOFF
, fCache
, fPage
, fOffset
);
335 class RemovePage
: public VMCacheTraceEntry
{
337 RemovePage(VMCache
* cache
, vm_page
* page
)
339 VMCacheTraceEntry(cache
),
345 virtual void AddDump(TraceOutput
& out
)
347 out
.Print("vm cache remove page: cache: %p, page: %p", fCache
,
355 } // namespace VMCacheTracing
357 # define T2(x) new(std::nothrow) VMCacheTracing::x;
367 // #pragma mark - debugger commands
374 cache_stack_find_area_cache(const TraceEntryIterator
& baseIterator
, void* area
)
376 using namespace VMCacheTracing
;
378 // find the previous "insert area" entry for the given area
379 TraceEntryIterator iterator
= baseIterator
;
380 TraceEntry
* entry
= iterator
.Current();
381 while (entry
!= NULL
) {
382 if (InsertArea
* insertAreaEntry
= dynamic_cast<InsertArea
*>(entry
)) {
383 if (insertAreaEntry
->Area() == area
)
384 return insertAreaEntry
->Cache();
387 entry
= iterator
.Previous();
395 cache_stack_find_consumer(const TraceEntryIterator
& baseIterator
, void* cache
)
397 using namespace VMCacheTracing
;
399 // find the previous "add consumer" or "create" entry for the given cache
400 TraceEntryIterator iterator
= baseIterator
;
401 TraceEntry
* entry
= iterator
.Current();
402 while (entry
!= NULL
) {
403 if (Create
* createEntry
= dynamic_cast<Create
*>(entry
)) {
404 if (createEntry
->Cache() == cache
)
406 } else if (AddConsumer
* addEntry
= dynamic_cast<AddConsumer
*>(entry
)) {
407 if (addEntry
->Consumer() == cache
)
408 return addEntry
->Cache();
411 entry
= iterator
.Previous();
419 command_cache_stack(int argc
, char** argv
)
421 if (argc
< 3 || argc
> 4) {
422 print_debugger_command_usage(argv
[0]);
430 if (strcmp(argv
[argi
], "area") != 0) {
431 print_debugger_command_usage(argv
[0]);
440 uint64 debugEntryIndex
;
441 if (!evaluate_debug_expression(argv
[argi
++], &addressValue
, false)
442 || !evaluate_debug_expression(argv
[argi
++], &debugEntryIndex
, false)) {
446 TraceEntryIterator baseIterator
;
447 if (baseIterator
.MoveTo((int32
)debugEntryIndex
) == NULL
) {
448 kprintf("Invalid tracing entry index %" B_PRIu64
"\n", debugEntryIndex
);
452 void* address
= (void*)(addr_t
)addressValue
;
454 kprintf("cache stack for %s %p at %" B_PRIu64
":\n",
455 isArea
? "area" : "cache", address
, debugEntryIndex
);
457 address
= cache_stack_find_area_cache(baseIterator
, address
);
458 if (address
== NULL
) {
459 kprintf(" cache not found\n");
464 while (address
!= NULL
) {
465 kprintf(" %p\n", address
);
466 address
= cache_stack_find_consumer(baseIterator
, address
);
473 #endif // VM_CACHE_TRACING
480 vm_cache_init(kernel_args
* args
)
482 // Create object caches for the structures we allocate here.
483 gCacheRefObjectCache
= create_object_cache("cache refs", sizeof(VMCacheRef
),
484 0, NULL
, NULL
, NULL
);
485 gAnonymousCacheObjectCache
= create_object_cache("anon caches",
486 sizeof(VMAnonymousCache
), 0, NULL
, NULL
, NULL
);
487 gAnonymousNoSwapCacheObjectCache
= create_object_cache(
488 "anon no-swap caches", sizeof(VMAnonymousNoSwapCache
), 0, NULL
, NULL
,
490 gVnodeCacheObjectCache
= create_object_cache("vnode caches",
491 sizeof(VMVnodeCache
), 0, NULL
, NULL
, NULL
);
492 gDeviceCacheObjectCache
= create_object_cache("device caches",
493 sizeof(VMDeviceCache
), 0, NULL
, NULL
, NULL
);
494 gNullCacheObjectCache
= create_object_cache("null caches",
495 sizeof(VMNullCache
), 0, NULL
, NULL
, NULL
);
497 if (gCacheRefObjectCache
== NULL
|| gAnonymousCacheObjectCache
== NULL
498 || gAnonymousNoSwapCacheObjectCache
== NULL
499 || gVnodeCacheObjectCache
== NULL
500 || gDeviceCacheObjectCache
== NULL
501 || gNullCacheObjectCache
== NULL
) {
502 panic("vm_cache_init(): Failed to create object caches!");
511 vm_cache_init_post_heap()
514 add_debugger_command_etc("cache_stack", &command_cache_stack
,
515 "List the ancestors (sources) of a VMCache at the time given by "
516 "tracing entry index",
517 "[ \"area\" ] <address> <tracing entry index>\n"
518 "All ancestors (sources) of a given VMCache at the time given by the\n"
519 "tracing entry index are listed. If \"area\" is given the supplied\n"
520 "address is an area instead of a cache address. The listing will\n"
521 "start with the area's cache at that point.\n",
523 #endif // VM_CACHE_TRACING
528 vm_cache_acquire_locked_page_cache(vm_page
* page
, bool dontWait
)
530 mutex_lock(&sCacheListLock
);
533 VMCacheRef
* cacheRef
= page
->CacheRef();
534 if (cacheRef
== NULL
) {
535 mutex_unlock(&sCacheListLock
);
539 VMCache
* cache
= cacheRef
->cache
;
540 if (!cache
->TryLock()) {
541 mutex_unlock(&sCacheListLock
);
545 if (cacheRef
== page
->CacheRef()) {
546 mutex_unlock(&sCacheListLock
);
547 cache
->AcquireRefLocked();
551 // the cache changed in the meantime
556 VMCacheRef
* cacheRef
= page
->CacheRef();
557 if (cacheRef
== NULL
) {
558 mutex_unlock(&sCacheListLock
);
562 VMCache
* cache
= cacheRef
->cache
;
563 if (!cache
->SwitchLock(&sCacheListLock
)) {
564 // cache has been deleted
565 mutex_lock(&sCacheListLock
);
569 mutex_lock(&sCacheListLock
);
570 if (cache
== page
->Cache()) {
571 mutex_unlock(&sCacheListLock
);
572 cache
->AcquireRefLocked();
576 // the cache changed in the meantime
582 // #pragma mark - VMCache
585 VMCacheRef::VMCacheRef(VMCache
* cache
)
593 // #pragma mark - VMCache
597 VMCache::_IsMergeable() const
599 return areas
== NULL
&& temporary
&& !consumers
.IsEmpty()
600 && consumers
.Head() == consumers
.Tail();
613 object_cache_delete(gCacheRefObjectCache
, fCacheRef
);
618 VMCache::Init(uint32 cacheType
, uint32 allocationFlags
)
620 mutex_init(&fLock
, "VMCache");
630 fWiredPagesCount
= 0;
632 fPageEventWaiters
= NULL
;
635 debug_previous
= NULL
;
637 // initialize in case the following fails
640 fCacheRef
= new(gCacheRefObjectCache
, allocationFlags
) VMCacheRef(this);
641 if (fCacheRef
== NULL
)
645 mutex_lock(&sCacheListLock
);
647 if (gDebugCacheList
!= NULL
)
648 gDebugCacheList
->debug_previous
= this;
649 debug_next
= gDebugCacheList
;
650 gDebugCacheList
= this;
652 mutex_unlock(&sCacheListLock
);
663 panic("cache %p to be deleted still has areas", this);
664 if (!consumers
.IsEmpty())
665 panic("cache %p to be deleted still has consumers", this);
669 // free all of the pages in the cache
670 while (vm_page
* page
= pages
.Root()) {
671 if (!page
->mappings
.IsEmpty() || page
->WiredCount() != 0) {
672 panic("remove page %p from cache %p: page still has mappings!\n"
673 "@!page %p; cache %p", page
, this, page
, this);
678 page
->SetCacheRef(NULL
);
680 TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
681 page
->physical_page_number
));
682 DEBUG_PAGE_ACCESS_START(page
);
683 vm_page_free(this, page
);
686 // remove the ref to the source
688 source
->_RemoveConsumer(this);
690 // We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
691 // not enabled. This synchronization point is needed for
692 // vm_cache_acquire_locked_page_cache().
693 mutex_lock(&sCacheListLock
);
697 debug_previous
->debug_next
= debug_next
;
699 debug_next
->debug_previous
= debug_previous
;
700 if (this == gDebugCacheList
)
701 gDebugCacheList
= debug_next
;
704 mutex_destroy(&fLock
);
706 mutex_unlock(&sCacheListLock
);
713 VMCache::Unlock(bool consumerLocked
)
715 while (fRefCount
== 1 && _IsMergeable()) {
716 VMCache
* consumer
= consumers
.Head();
717 if (consumerLocked
) {
718 _MergeWithOnlyConsumer();
719 } else if (consumer
->TryLock()) {
720 _MergeWithOnlyConsumer();
723 // Someone else has locked the consumer ATM. Unlock this cache and
724 // wait for the consumer lock. Increment the cache's ref count
725 // temporarily, so that no one else will try what we are doing or
728 bool consumerLockedTemp
= consumer
->SwitchLock(&fLock
);
732 if (consumerLockedTemp
) {
733 if (fRefCount
== 1 && _IsMergeable()
734 && consumer
== consumers
.Head()) {
735 // nothing has changed in the meantime -- merge
736 _MergeWithOnlyConsumer();
744 if (fRefCount
== 0) {
748 mutex_unlock(&fLock
);
753 VMCache::LookupPage(off_t offset
)
757 vm_page
* page
= pages
.Lookup((page_num_t
)(offset
>> PAGE_SHIFT
));
760 if (page
!= NULL
&& page
->Cache() != this)
761 panic("page %p not in cache %p\n", page
, this);
769 VMCache::InsertPage(vm_page
* page
, off_t offset
)
771 TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF
"\n",
772 this, page
, offset
));
775 if (page
->CacheRef() != NULL
) {
776 panic("insert page %p into cache %p: page cache is set to %p\n",
777 page
, this, page
->Cache());
780 T2(InsertPage(this, page
, offset
));
782 page
->cache_offset
= (page_num_t
)(offset
>> PAGE_SHIFT
);
784 page
->SetCacheRef(fCacheRef
);
787 vm_page
* otherPage
= pages
.Lookup(page
->cache_offset
);
788 if (otherPage
!= NULL
) {
789 panic("VMCache::InsertPage(): there's already page %p with cache "
790 "offset %" B_PRIuPHYSADDR
" in cache %p; inserting page %p",
791 otherPage
, page
->cache_offset
, this, page
);
797 if (page
->WiredCount() > 0)
798 IncrementWiredPagesCount();
802 /*! Removes the vm_page from this cache. Of course, the page must
803 really be in this cache or evil things will happen.
804 The cache lock must be held.
807 VMCache::RemovePage(vm_page
* page
)
809 TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page
));
812 if (page
->Cache() != this) {
813 panic("remove page %p from cache %p: page cache is set to %p\n", page
,
814 this, page
->Cache());
817 T2(RemovePage(this, page
));
821 page
->SetCacheRef(NULL
);
823 if (page
->WiredCount() > 0)
824 DecrementWiredPagesCount();
828 /*! Moves the given page from its current cache inserts it into this cache.
829 Both caches must be locked.
832 VMCache::MovePage(vm_page
* page
)
834 VMCache
* oldCache
= page
->Cache();
837 oldCache
->AssertLocked();
839 // remove from old cache
840 oldCache
->pages
.Remove(page
);
841 oldCache
->page_count
--;
842 T2(RemovePage(oldCache
, page
));
847 page
->SetCacheRef(fCacheRef
);
849 if (page
->WiredCount() > 0) {
850 IncrementWiredPagesCount();
851 oldCache
->DecrementWiredPagesCount();
854 T2(InsertPage(this, page
, page
->cache_offset
<< PAGE_SHIFT
));
858 /*! Moves all pages from the given cache to this one.
859 Both caches must be locked. This cache must be empty.
862 VMCache::MoveAllPages(VMCache
* fromCache
)
865 fromCache
->AssertLocked();
866 ASSERT(page_count
== 0);
868 std::swap(fromCache
->pages
, pages
);
869 page_count
= fromCache
->page_count
;
870 fromCache
->page_count
= 0;
871 fWiredPagesCount
= fromCache
->fWiredPagesCount
;
872 fromCache
->fWiredPagesCount
= 0;
874 // swap the VMCacheRefs
875 mutex_lock(&sCacheListLock
);
876 std::swap(fCacheRef
, fromCache
->fCacheRef
);
877 fCacheRef
->cache
= this;
878 fromCache
->fCacheRef
->cache
= fromCache
;
879 mutex_unlock(&sCacheListLock
);
881 #if VM_CACHE_TRACING >= 2
882 for (VMCachePagesTree::Iterator it
= pages
.GetIterator();
883 vm_page
* page
= it
.Next();) {
884 T2(RemovePage(fromCache
, page
));
885 T2(InsertPage(this, page
, page
->cache_offset
<< PAGE_SHIFT
));
891 /*! Waits until one or more events happened for a given page which belongs to
893 The cache must be locked. It will be unlocked by the method. \a relock
894 specifies whether the method shall re-lock the cache before returning.
895 \param page The page for which to wait.
896 \param events The mask of events the caller is interested in.
897 \param relock If \c true, the cache will be locked when returning,
898 otherwise it won't be locked.
901 VMCache::WaitForPageEvents(vm_page
* page
, uint32 events
, bool relock
)
903 PageEventWaiter waiter
;
904 waiter
.thread
= thread_get_current_thread();
905 waiter
.next
= fPageEventWaiters
;
907 waiter
.events
= events
;
909 fPageEventWaiters
= &waiter
;
911 thread_prepare_to_block(waiter
.thread
, 0, THREAD_BLOCK_TYPE_OTHER
,
912 "cache page events");
922 /*! Makes this case the source of the \a consumer cache,
923 and adds the \a consumer to its list.
924 This also grabs a reference to the source cache.
925 Assumes you have the cache and the consumer's lock held.
928 VMCache::AddConsumer(VMCache
* consumer
)
930 TRACE(("add consumer vm cache %p to cache %p\n", consumer
, this));
932 consumer
->AssertLocked();
934 T(AddConsumer(this, consumer
));
936 consumer
->source
= this;
937 consumers
.Add(consumer
);
944 /*! Adds the \a area to this cache.
945 Assumes you have the locked the cache.
948 VMCache::InsertAreaLocked(VMArea
* area
)
950 TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area
));
953 T(InsertArea(this, area
));
955 area
->cache_next
= areas
;
956 if (area
->cache_next
)
957 area
->cache_next
->cache_prev
= area
;
958 area
->cache_prev
= NULL
;
968 VMCache::RemoveArea(VMArea
* area
)
970 TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area
));
972 T(RemoveArea(this, area
));
974 // We release the store reference first, since otherwise we would reverse
975 // the locking order or even deadlock ourselves (... -> free_vnode() -> ...
976 // -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
977 // Also cf. _RemoveConsumer().
980 AutoLocker
<VMCache
> locker(this);
982 if (area
->cache_prev
)
983 area
->cache_prev
->cache_next
= area
->cache_next
;
984 if (area
->cache_next
)
985 area
->cache_next
->cache_prev
= area
->cache_prev
;
987 areas
= area
->cache_next
;
993 /*! Transfers the areas from \a fromCache to this cache. This cache must not
994 have areas yet. Both caches must be locked.
997 VMCache::TransferAreas(VMCache
* fromCache
)
1000 fromCache
->AssertLocked();
1001 ASSERT(areas
== NULL
);
1003 areas
= fromCache
->areas
;
1004 fromCache
->areas
= NULL
;
1006 for (VMArea
* area
= areas
; area
!= NULL
; area
= area
->cache_next
) {
1009 fromCache
->ReleaseRefLocked();
1011 T(RemoveArea(fromCache
, area
));
1012 T(InsertArea(this, area
));
1018 VMCache::CountWritableAreas(VMArea
* ignoreArea
) const
1022 for (VMArea
* area
= areas
; area
!= NULL
; area
= area
->cache_next
) {
1023 if (area
!= ignoreArea
1024 && (area
->protection
& (B_WRITE_AREA
| B_KERNEL_WRITE_AREA
)) != 0) {
1034 VMCache::WriteModified()
1036 TRACE(("VMCache::WriteModified(cache = %p)\n", this));
1042 status_t status
= vm_page_write_modified_pages(this);
1049 /*! Commits the memory to the store if the \a commitment is larger than
1050 what's committed already.
1051 Assumes you have the cache's lock held.
1054 VMCache::SetMinimalCommitment(off_t commitment
, int priority
)
1056 TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
1057 ")\n", this, commitment
));
1060 T(SetMinimalCommitment(this, commitment
));
1062 status_t status
= B_OK
;
1064 // If we don't have enough committed space to cover through to the new end
1066 if (committed_size
< commitment
) {
1067 // ToDo: should we check if the cache's virtual size is large
1068 // enough for a commitment of that size?
1070 // try to commit more memory
1071 status
= Commit(commitment
, priority
);
1078 /*! This function updates the size field of the cache.
1079 If needed, it will free up all pages that don't belong to the cache anymore.
1080 The cache lock must be held when you call it.
1081 Since removed pages don't belong to the cache any longer, they are not
1082 written back before they will be removed.
1084 Note, this function may temporarily release the cache lock in case it
1085 has to wait for busy pages.
1088 VMCache::Resize(off_t newSize
, int priority
)
1090 TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF
") old size %"
1091 B_PRIdOFF
"\n", this, newSize
, this->virtual_end
));
1092 this->AssertLocked();
1094 T(Resize(this, newSize
));
1096 status_t status
= Commit(newSize
- virtual_base
, priority
);
1100 uint32 oldPageCount
= (uint32
)((virtual_end
+ B_PAGE_SIZE
- 1)
1102 uint32 newPageCount
= (uint32
)((newSize
+ B_PAGE_SIZE
- 1) >> PAGE_SHIFT
);
1104 if (newPageCount
< oldPageCount
) {
1105 // we need to remove all pages in the cache outside of the new virtual
1107 for (VMCachePagesTree::Iterator it
1108 = pages
.GetIterator(newPageCount
, true, true);
1109 vm_page
* page
= it
.Next();) {
1111 if (page
->busy_writing
) {
1112 // We cannot wait for the page to become available
1113 // as we might cause a deadlock this way
1114 page
->busy_writing
= false;
1115 // this will notify the writer to free the page
1117 // wait for page to become unbusy
1118 WaitForPageEvents(page
, PAGE_EVENT_NOT_BUSY
, true);
1120 // restart from the start of the list
1121 it
= pages
.GetIterator(newPageCount
, true, true);
1126 // remove the page and put it into the free queue
1127 DEBUG_PAGE_ACCESS_START(page
);
1128 vm_remove_all_page_mappings(page
);
1129 ASSERT(page
->WiredCount() == 0);
1130 // TODO: Find a real solution! If the page is wired
1131 // temporarily (e.g. by lock_memory()), we actually must not
1134 vm_page_free(this, page
);
1135 // Note: When iterating through a IteratableSplayTree
1136 // removing the current node is safe.
1140 virtual_end
= newSize
;
1145 /*! You have to call this function with the VMCache lock held. */
1147 VMCache::FlushAndRemoveAllPages()
1149 ASSERT_LOCKED_MUTEX(&fLock
);
1151 while (page_count
> 0) {
1152 // write back modified pages
1153 status_t status
= vm_page_write_modified_pages(this);
1158 for (VMCachePagesTree::Iterator it
= pages
.GetIterator();
1159 vm_page
* page
= it
.Next();) {
1161 // wait for page to become unbusy
1162 WaitForPageEvents(page
, PAGE_EVENT_NOT_BUSY
, true);
1164 // restart from the start of the list
1165 it
= pages
.GetIterator();
1169 // skip modified pages -- they will be written back in the next
1171 if (page
->State() == PAGE_STATE_MODIFIED
)
1174 // We can't remove mapped pages.
1175 if (page
->IsMapped())
1178 DEBUG_PAGE_ACCESS_START(page
);
1180 vm_page_free(this, page
);
1181 // Note: When iterating through a IteratableSplayTree
1182 // removing the current node is safe.
1191 VMCache::Commit(off_t size
, int priority
)
1193 committed_size
= size
;
1198 /*! Returns whether the cache's underlying backing store could deliver the
1199 page at the given offset.
1201 Basically it returns whether a Read() at \a offset would at least read a
1202 partial page (assuming that no unexpected errors occur or the situation
1203 changes in the meantime).
1206 VMCache::HasPage(off_t offset
)
1208 // In accordance with Fault() the default implementation doesn't have a
1209 // backing store and doesn't allow faults.
1215 VMCache::Read(off_t offset
, const generic_io_vec
*vecs
, size_t count
,
1216 uint32 flags
, generic_size_t
*_numBytes
)
1223 VMCache::Write(off_t offset
, const generic_io_vec
*vecs
, size_t count
,
1224 uint32 flags
, generic_size_t
*_numBytes
)
1231 VMCache::WriteAsync(off_t offset
, const generic_io_vec
* vecs
, size_t count
,
1232 generic_size_t numBytes
, uint32 flags
, AsyncIOCallback
* callback
)
1234 // Not supported, fall back to the synchronous hook.
1235 generic_size_t transferred
= numBytes
;
1236 status_t error
= Write(offset
, vecs
, count
, flags
, &transferred
);
1238 if (callback
!= NULL
)
1239 callback
->IOFinished(error
, transferred
!= numBytes
, transferred
);
1245 /*! \brief Returns whether the cache can write the page at the given offset.
1247 The cache must be locked when this function is invoked.
1249 @param offset The page offset.
1250 @return \c true, if the page can be written, \c false otherwise.
1253 VMCache::CanWritePage(off_t offset
)
1260 VMCache::Fault(struct VMAddressSpace
*aspace
, off_t offset
)
1262 return B_BAD_ADDRESS
;
1267 VMCache::Merge(VMCache
* source
)
1269 for (VMCachePagesTree::Iterator it
= source
->pages
.GetIterator();
1270 vm_page
* page
= it
.Next();) {
1271 // Note: Removing the current node while iterating through a
1272 // IteratableSplayTree is safe.
1273 vm_page
* consumerPage
= LookupPage(
1274 (off_t
)page
->cache_offset
<< PAGE_SHIFT
);
1275 if (consumerPage
== NULL
) {
1276 // the page is not yet in the consumer cache - move it upwards
1284 VMCache::AcquireUnreferencedStoreRef()
1291 VMCache::AcquireStoreRef()
1297 VMCache::ReleaseStoreRef()
1302 /*! Kernel debugger version of HasPage().
1303 Does not do any locking.
1306 VMCache::DebugHasPage(off_t offset
)
1308 // default that works for all subclasses that don't lock anyway
1309 return HasPage(offset
);
1313 /*! Kernel debugger version of LookupPage().
1314 Does not do any locking.
1317 VMCache::DebugLookupPage(off_t offset
)
1319 return pages
.Lookup((page_num_t
)(offset
>> PAGE_SHIFT
));
1324 VMCache::Dump(bool showPages
) const
1326 kprintf("CACHE %p:\n", this);
1327 kprintf(" ref_count: %" B_PRId32
"\n", RefCount());
1328 kprintf(" source: %p\n", source
);
1329 kprintf(" type: %s\n", vm_cache_type_to_string(type
));
1330 kprintf(" virtual_base: 0x%" B_PRIx64
"\n", virtual_base
);
1331 kprintf(" virtual_end: 0x%" B_PRIx64
"\n", virtual_end
);
1332 kprintf(" temporary: %" B_PRIu32
"\n", temporary
);
1333 kprintf(" lock: %p\n", &fLock
);
1335 kprintf(" lock.holder: %" B_PRId32
"\n", fLock
.holder
);
1337 kprintf(" areas:\n");
1339 for (VMArea
* area
= areas
; area
!= NULL
; area
= area
->cache_next
) {
1340 kprintf(" area 0x%" B_PRIx32
", %s\n", area
->id
, area
->name
);
1341 kprintf("\tbase_addr: 0x%lx, size: 0x%lx\n", area
->Base(),
1343 kprintf("\tprotection: 0x%" B_PRIx32
"\n", area
->protection
);
1344 kprintf("\towner: 0x%" B_PRIx32
"\n", area
->address_space
->ID());
1347 kprintf(" consumers:\n");
1348 for (ConsumerList::ConstIterator it
= consumers
.GetIterator();
1349 VMCache
* consumer
= it
.Next();) {
1350 kprintf("\t%p\n", consumer
);
1353 kprintf(" pages:\n");
1355 for (VMCachePagesTree::ConstIterator it
= pages
.GetIterator();
1356 vm_page
* page
= it
.Next();) {
1357 if (!vm_page_is_dummy(page
)) {
1358 kprintf("\t%p ppn %#" B_PRIxPHYSADDR
" offset %#" B_PRIxPHYSADDR
1359 " state %u (%s) wired_count %u\n", page
,
1360 page
->physical_page_number
, page
->cache_offset
,
1361 page
->State(), page_state_to_string(page
->State()),
1362 page
->WiredCount());
1364 kprintf("\t%p DUMMY PAGE state %u (%s)\n",
1365 page
, page
->State(), page_state_to_string(page
->State()));
1369 kprintf("\t%" B_PRIu32
" in cache\n", page_count
);
1373 /*! Wakes up threads waiting for page events.
1374 \param page The page for which events occurred.
1375 \param events The mask of events that occurred.
1378 VMCache::_NotifyPageEvents(vm_page
* page
, uint32 events
)
1380 PageEventWaiter
** it
= &fPageEventWaiters
;
1381 while (PageEventWaiter
* waiter
= *it
) {
1382 if (waiter
->page
== page
&& (waiter
->events
& events
) != 0) {
1383 // remove from list and unblock
1385 thread_unblock(waiter
->thread
, B_OK
);
1392 /*! Merges the given cache with its only consumer.
1393 The caller must hold both the cache's and the consumer's lock. The method
1394 does release neither lock.
1397 VMCache::_MergeWithOnlyConsumer()
1399 VMCache
* consumer
= consumers
.RemoveHead();
1401 TRACE(("merge vm cache %p (ref == %" B_PRId32
") with vm cache %p\n",
1402 this, this->fRefCount
, consumer
));
1404 T(Merge(this, consumer
));
1407 consumer
->Merge(this);
1409 // The remaining consumer has got a new source.
1410 if (source
!= NULL
) {
1411 VMCache
* newSource
= source
;
1415 newSource
->consumers
.Remove(this);
1416 newSource
->consumers
.Add(consumer
);
1417 consumer
->source
= newSource
;
1420 newSource
->Unlock();
1422 consumer
->source
= NULL
;
1424 // Release the reference the cache's consumer owned. The consumer takes
1425 // over the cache's ref to its source (if any) instead.
1430 /*! Removes the \a consumer from this cache.
1431 It will also release the reference to the cache owned by the consumer.
1432 Assumes you have the consumer's cache lock held. This cache must not be
1436 VMCache::_RemoveConsumer(VMCache
* consumer
)
1438 TRACE(("remove consumer vm cache %p from cache %p\n", consumer
, this));
1439 consumer
->AssertLocked();
1441 T(RemoveConsumer(this, consumer
));
1443 // Remove the store ref before locking the cache. Otherwise we'd call into
1444 // the VFS while holding the cache lock, which would reverse the usual
1448 // remove the consumer from the cache, but keep its reference until later
1450 consumers
.Remove(consumer
);
1451 consumer
->source
= NULL
;
1453 ReleaseRefAndUnlock();
1457 // #pragma mark - VMCacheFactory
1458 // TODO: Move to own source file!
1462 VMCacheFactory::CreateAnonymousCache(VMCache
*& _cache
, bool canOvercommit
,
1463 int32 numPrecommittedPages
, int32 numGuardPages
, bool swappable
,
1466 uint32 allocationFlags
= HEAP_DONT_WAIT_FOR_MEMORY
1467 | HEAP_DONT_LOCK_KERNEL_SPACE
;
1468 if (priority
>= VM_PRIORITY_VIP
)
1469 allocationFlags
|= HEAP_PRIORITY_VIP
;
1471 #if ENABLE_SWAP_SUPPORT
1473 VMAnonymousCache
* cache
1474 = new(gAnonymousCacheObjectCache
, allocationFlags
) VMAnonymousCache
;
1478 status_t error
= cache
->Init(canOvercommit
, numPrecommittedPages
,
1479 numGuardPages
, allocationFlags
);
1480 if (error
!= B_OK
) {
1492 VMAnonymousNoSwapCache
* cache
1493 = new(gAnonymousNoSwapCacheObjectCache
, allocationFlags
)
1494 VMAnonymousNoSwapCache
;
1498 status_t error
= cache
->Init(canOvercommit
, numPrecommittedPages
,
1499 numGuardPages
, allocationFlags
);
1500 if (error
!= B_OK
) {
1513 VMCacheFactory::CreateVnodeCache(VMCache
*& _cache
, struct vnode
* vnode
)
1515 const uint32 allocationFlags
= HEAP_DONT_WAIT_FOR_MEMORY
1516 | HEAP_DONT_LOCK_KERNEL_SPACE
;
1517 // Note: Vnode cache creation is never VIP.
1520 = new(gVnodeCacheObjectCache
, allocationFlags
) VMVnodeCache
;
1524 status_t error
= cache
->Init(vnode
, allocationFlags
);
1525 if (error
!= B_OK
) {
1538 VMCacheFactory::CreateDeviceCache(VMCache
*& _cache
, addr_t baseAddress
)
1540 const uint32 allocationFlags
= HEAP_DONT_WAIT_FOR_MEMORY
1541 | HEAP_DONT_LOCK_KERNEL_SPACE
;
1542 // Note: Device cache creation is never VIP.
1544 VMDeviceCache
* cache
1545 = new(gDeviceCacheObjectCache
, allocationFlags
) VMDeviceCache
;
1549 status_t error
= cache
->Init(baseAddress
, allocationFlags
);
1550 if (error
!= B_OK
) {
1563 VMCacheFactory::CreateNullCache(int priority
, VMCache
*& _cache
)
1565 uint32 allocationFlags
= HEAP_DONT_WAIT_FOR_MEMORY
1566 | HEAP_DONT_LOCK_KERNEL_SPACE
;
1567 if (priority
>= VM_PRIORITY_VIP
)
1568 allocationFlags
|= HEAP_PRIORITY_VIP
;
1571 = new(gNullCacheObjectCache
, allocationFlags
) VMNullCache
;
1575 status_t error
= cache
->Init(allocationFlags
);
1576 if (error
!= B_OK
) {