2 * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
9 #ifndef _KERNEL_VM_VM_CACHE_H
10 #define _KERNEL_VM_VM_CACHE_H
15 #include <util/DoublyLinkedList.h>
17 #include <vm/vm_types.h>
19 #include "kernel_debug_config.h"
34 PAGE_EVENT_NOT_BUSY
= 0x01 // page not busy anymore
38 extern ObjectCache
* gCacheRefObjectCache
;
39 extern ObjectCache
* gAnonymousCacheObjectCache
;
40 extern ObjectCache
* gAnonymousNoSwapCacheObjectCache
;
41 extern ObjectCache
* gVnodeCacheObjectCache
;
42 extern ObjectCache
* gDeviceCacheObjectCache
;
43 extern ObjectCache
* gNullCacheObjectCache
;
46 struct VMCachePagesTreeDefinition
{
47 typedef page_num_t KeyType
;
48 typedef vm_page NodeType
;
50 static page_num_t
GetKey(const NodeType
* node
)
52 return node
->cache_offset
;
55 static SplayTreeLink
<NodeType
>* GetLink(NodeType
* node
)
57 return &node
->cache_link
;
60 static int Compare(page_num_t key
, const NodeType
* node
)
62 return key
== node
->cache_offset
? 0
63 : (key
< node
->cache_offset
? -1 : 1);
66 static NodeType
** GetListLink(NodeType
* node
)
68 return &node
->cache_next
;
72 typedef IteratableSplayTree
<VMCachePagesTreeDefinition
> VMCachePagesTree
;
75 struct VMCache
: public DoublyLinkedListLinkImpl
<VMCache
> {
77 typedef DoublyLinkedList
<VMCache
> ConsumerList
;
83 status_t
Init(uint32 cacheType
, uint32 allocationFlags
);
85 virtual void Delete();
88 inline bool TryLock();
89 inline bool SwitchLock(mutex
* from
);
90 inline bool SwitchFromReadLock(rw_lock
* from
);
91 void Unlock(bool consumerLocked
= false);
92 inline void AssertLocked();
94 inline void AcquireRefLocked();
95 inline void AcquireRef();
96 inline void ReleaseRefLocked();
97 inline void ReleaseRef();
98 inline void ReleaseRefAndUnlock(
99 bool consumerLocked
= false);
101 inline VMCacheRef
* CacheRef() const { return fCacheRef
; }
103 void WaitForPageEvents(vm_page
* page
, uint32 events
,
105 void NotifyPageEvents(vm_page
* page
, uint32 events
)
106 { if (fPageEventWaiters
!= NULL
)
107 _NotifyPageEvents(page
, events
); }
108 inline void MarkPageUnbusy(vm_page
* page
);
110 vm_page
* LookupPage(off_t offset
);
111 void InsertPage(vm_page
* page
, off_t offset
);
112 void RemovePage(vm_page
* page
);
113 void MovePage(vm_page
* page
);
114 void MoveAllPages(VMCache
* fromCache
);
116 inline page_num_t
WiredPagesCount() const;
117 inline void IncrementWiredPagesCount();
118 inline void DecrementWiredPagesCount();
120 virtual int32
GuardSize() { return 0; }
122 void AddConsumer(VMCache
* consumer
);
124 status_t
InsertAreaLocked(VMArea
* area
);
125 status_t
RemoveArea(VMArea
* area
);
126 void TransferAreas(VMCache
* fromCache
);
127 uint32
CountWritableAreas(VMArea
* ignoreArea
) const;
129 status_t
WriteModified();
130 status_t
SetMinimalCommitment(off_t commitment
,
132 virtual status_t
Resize(off_t newSize
, int priority
);
134 status_t
FlushAndRemoveAllPages();
136 void* UserData() { return fUserData
; }
137 void SetUserData(void* data
) { fUserData
= data
; }
138 // Settable by the lock owner and valid as
139 // long as the lock is owned.
141 // for debugging only
142 int32
RefCount() const
143 { return fRefCount
; }
145 // backing store operations
146 virtual status_t
Commit(off_t size
, int priority
);
147 virtual bool HasPage(off_t offset
);
149 virtual status_t
Read(off_t offset
, const generic_io_vec
*vecs
,
150 size_t count
, uint32 flags
,
151 generic_size_t
*_numBytes
);
152 virtual status_t
Write(off_t offset
, const generic_io_vec
*vecs
,
153 size_t count
, uint32 flags
,
154 generic_size_t
*_numBytes
);
155 virtual status_t
WriteAsync(off_t offset
,
156 const generic_io_vec
* vecs
, size_t count
,
157 generic_size_t numBytes
, uint32 flags
,
158 AsyncIOCallback
* callback
);
159 virtual bool CanWritePage(off_t offset
);
161 virtual int32
MaxPagesPerWrite() const
162 { return -1; } // no restriction
163 virtual int32
MaxPagesPerAsyncWrite() const
164 { return -1; } // no restriction
166 virtual status_t
Fault(struct VMAddressSpace
*aspace
,
169 virtual void Merge(VMCache
* source
);
171 virtual status_t
AcquireUnreferencedStoreRef();
172 virtual void AcquireStoreRef();
173 virtual void ReleaseStoreRef();
175 virtual bool DebugHasPage(off_t offset
);
176 vm_page
* DebugLookupPage(off_t offset
);
178 virtual void Dump(bool showPages
) const;
181 virtual void DeleteObject() = 0;
185 ConsumerList consumers
;
186 // list of caches that use this cache as a source
187 VMCachePagesTree pages
;
191 off_t committed_size
;
194 uint32 temporary
: 1;
198 VMCache
* debug_previous
;
203 struct PageEventWaiter
;
204 friend struct VMCacheRef
;
207 void _NotifyPageEvents(vm_page
* page
, uint32 events
);
209 inline bool _IsMergeable() const;
211 void _MergeWithOnlyConsumer();
212 void _RemoveConsumer(VMCache
* consumer
);
217 PageEventWaiter
* fPageEventWaiters
;
219 VMCacheRef
* fCacheRef
;
220 page_num_t fWiredPagesCount
;
225 extern VMCache
* gDebugCacheList
;
229 class VMCacheFactory
{
231 static status_t
CreateAnonymousCache(VMCache
*& cache
,
232 bool canOvercommit
, int32 numPrecommittedPages
,
233 int32 numGuardPages
, bool swappable
,
235 static status_t
CreateVnodeCache(VMCache
*& cache
,
236 struct vnode
* vnode
);
237 static status_t
CreateDeviceCache(VMCache
*& cache
,
239 static status_t
CreateNullCache(int priority
, VMCache
*& cache
);
247 return mutex_lock(&fLock
) == B_OK
;
254 return mutex_trylock(&fLock
) == B_OK
;
259 VMCache::SwitchLock(mutex
* from
)
261 return mutex_switch_lock(from
, &fLock
) == B_OK
;
266 VMCache::SwitchFromReadLock(rw_lock
* from
)
268 return mutex_switch_from_read_lock(from
, &fLock
) == B_OK
;
273 VMCache::AssertLocked()
275 ASSERT_LOCKED_MUTEX(&fLock
);
280 VMCache::AcquireRefLocked()
282 ASSERT_LOCKED_MUTEX(&fLock
);
289 VMCache::AcquireRef()
298 VMCache::ReleaseRefLocked()
300 ASSERT_LOCKED_MUTEX(&fLock
);
307 VMCache::ReleaseRef()
316 VMCache::ReleaseRefAndUnlock(bool consumerLocked
)
319 Unlock(consumerLocked
);
324 VMCache::MarkPageUnbusy(vm_page
* page
)
328 NotifyPageEvents(page
, PAGE_EVENT_NOT_BUSY
);
333 VMCache::WiredPagesCount() const
335 return fWiredPagesCount
;
340 VMCache::IncrementWiredPagesCount()
342 ASSERT(fWiredPagesCount
< page_count
);
349 VMCache::DecrementWiredPagesCount()
351 ASSERT(fWiredPagesCount
> 0);
357 // vm_page methods implemented here to avoid VMCache.h inclusion in vm_types.h
360 vm_page::IncrementWiredCount()
362 if (fWiredCount
++ == 0)
363 cache_ref
->cache
->IncrementWiredPagesCount();
368 vm_page::DecrementWiredCount()
370 ASSERT(fWiredCount
> 0);
372 if (--fWiredCount
== 0)
373 cache_ref
->cache
->DecrementWiredPagesCount();
381 status_t
vm_cache_init(struct kernel_args
*args
);
382 void vm_cache_init_post_heap();
383 struct VMCache
*vm_cache_acquire_locked_page_cache(struct vm_page
*page
,
391 #endif /* _KERNEL_VM_VM_CACHE_H */