vfs: check userland buffers before reading them.
[haiku.git] / headers / private / kernel / vm / VMCache.h
blob46bedf8907979e250d6675d8f3b89c51767d128a
1 /*
2 * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9 #ifndef _KERNEL_VM_VM_CACHE_H
10 #define _KERNEL_VM_VM_CACHE_H
13 #include <debug.h>
14 #include <kernel.h>
15 #include <util/DoublyLinkedList.h>
16 #include <vm/vm.h>
17 #include <vm/vm_types.h>
19 #include "kernel_debug_config.h"
22 struct kernel_args;
23 struct ObjectCache;
26 enum {
27 CACHE_TYPE_RAM = 0,
28 CACHE_TYPE_VNODE,
29 CACHE_TYPE_DEVICE,
30 CACHE_TYPE_NULL
33 enum {
34 PAGE_EVENT_NOT_BUSY = 0x01 // page not busy anymore
38 extern ObjectCache* gCacheRefObjectCache;
39 extern ObjectCache* gAnonymousCacheObjectCache;
40 extern ObjectCache* gAnonymousNoSwapCacheObjectCache;
41 extern ObjectCache* gVnodeCacheObjectCache;
42 extern ObjectCache* gDeviceCacheObjectCache;
43 extern ObjectCache* gNullCacheObjectCache;
46 struct VMCachePagesTreeDefinition {
47 typedef page_num_t KeyType;
48 typedef vm_page NodeType;
50 static page_num_t GetKey(const NodeType* node)
52 return node->cache_offset;
55 static SplayTreeLink<NodeType>* GetLink(NodeType* node)
57 return &node->cache_link;
60 static int Compare(page_num_t key, const NodeType* node)
62 return key == node->cache_offset ? 0
63 : (key < node->cache_offset ? -1 : 1);
66 static NodeType** GetListLink(NodeType* node)
68 return &node->cache_next;
72 typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
75 struct VMCache : public DoublyLinkedListLinkImpl<VMCache> {
76 public:
77 typedef DoublyLinkedList<VMCache> ConsumerList;
79 public:
80 VMCache();
81 virtual ~VMCache();
83 status_t Init(uint32 cacheType, uint32 allocationFlags);
85 virtual void Delete();
87 inline bool Lock();
88 inline bool TryLock();
89 inline bool SwitchLock(mutex* from);
90 inline bool SwitchFromReadLock(rw_lock* from);
91 void Unlock(bool consumerLocked = false);
92 inline void AssertLocked();
94 inline void AcquireRefLocked();
95 inline void AcquireRef();
96 inline void ReleaseRefLocked();
97 inline void ReleaseRef();
98 inline void ReleaseRefAndUnlock(
99 bool consumerLocked = false);
101 inline VMCacheRef* CacheRef() const { return fCacheRef; }
103 void WaitForPageEvents(vm_page* page, uint32 events,
104 bool relock);
105 void NotifyPageEvents(vm_page* page, uint32 events)
106 { if (fPageEventWaiters != NULL)
107 _NotifyPageEvents(page, events); }
108 inline void MarkPageUnbusy(vm_page* page);
110 vm_page* LookupPage(off_t offset);
111 void InsertPage(vm_page* page, off_t offset);
112 void RemovePage(vm_page* page);
113 void MovePage(vm_page* page);
114 void MoveAllPages(VMCache* fromCache);
116 inline page_num_t WiredPagesCount() const;
117 inline void IncrementWiredPagesCount();
118 inline void DecrementWiredPagesCount();
120 virtual int32 GuardSize() { return 0; }
122 void AddConsumer(VMCache* consumer);
124 status_t InsertAreaLocked(VMArea* area);
125 status_t RemoveArea(VMArea* area);
126 void TransferAreas(VMCache* fromCache);
127 uint32 CountWritableAreas(VMArea* ignoreArea) const;
129 status_t WriteModified();
130 status_t SetMinimalCommitment(off_t commitment,
131 int priority);
132 virtual status_t Resize(off_t newSize, int priority);
134 status_t FlushAndRemoveAllPages();
136 void* UserData() { return fUserData; }
137 void SetUserData(void* data) { fUserData = data; }
138 // Settable by the lock owner and valid as
139 // long as the lock is owned.
141 // for debugging only
142 int32 RefCount() const
143 { return fRefCount; }
145 // backing store operations
146 virtual status_t Commit(off_t size, int priority);
147 virtual bool HasPage(off_t offset);
149 virtual status_t Read(off_t offset, const generic_io_vec *vecs,
150 size_t count, uint32 flags,
151 generic_size_t *_numBytes);
152 virtual status_t Write(off_t offset, const generic_io_vec *vecs,
153 size_t count, uint32 flags,
154 generic_size_t *_numBytes);
155 virtual status_t WriteAsync(off_t offset,
156 const generic_io_vec* vecs, size_t count,
157 generic_size_t numBytes, uint32 flags,
158 AsyncIOCallback* callback);
159 virtual bool CanWritePage(off_t offset);
161 virtual int32 MaxPagesPerWrite() const
162 { return -1; } // no restriction
163 virtual int32 MaxPagesPerAsyncWrite() const
164 { return -1; } // no restriction
166 virtual status_t Fault(struct VMAddressSpace *aspace,
167 off_t offset);
169 virtual void Merge(VMCache* source);
171 virtual status_t AcquireUnreferencedStoreRef();
172 virtual void AcquireStoreRef();
173 virtual void ReleaseStoreRef();
175 virtual bool DebugHasPage(off_t offset);
176 vm_page* DebugLookupPage(off_t offset);
178 virtual void Dump(bool showPages) const;
180 protected:
181 virtual void DeleteObject() = 0;
183 public:
184 VMArea* areas;
185 ConsumerList consumers;
186 // list of caches that use this cache as a source
187 VMCachePagesTree pages;
188 VMCache* source;
189 off_t virtual_base;
190 off_t virtual_end;
191 off_t committed_size;
192 // TODO: Remove!
193 uint32 page_count;
194 uint32 temporary : 1;
195 uint32 type : 6;
197 #if DEBUG_CACHE_LIST
198 VMCache* debug_previous;
199 VMCache* debug_next;
200 #endif
202 private:
203 struct PageEventWaiter;
204 friend struct VMCacheRef;
206 private:
207 void _NotifyPageEvents(vm_page* page, uint32 events);
209 inline bool _IsMergeable() const;
211 void _MergeWithOnlyConsumer();
212 void _RemoveConsumer(VMCache* consumer);
214 private:
215 int32 fRefCount;
216 mutex fLock;
217 PageEventWaiter* fPageEventWaiters;
218 void* fUserData;
219 VMCacheRef* fCacheRef;
220 page_num_t fWiredPagesCount;
224 #if DEBUG_CACHE_LIST
225 extern VMCache* gDebugCacheList;
226 #endif
229 class VMCacheFactory {
230 public:
231 static status_t CreateAnonymousCache(VMCache*& cache,
232 bool canOvercommit, int32 numPrecommittedPages,
233 int32 numGuardPages, bool swappable,
234 int priority);
235 static status_t CreateVnodeCache(VMCache*& cache,
236 struct vnode* vnode);
237 static status_t CreateDeviceCache(VMCache*& cache,
238 addr_t baseAddress);
239 static status_t CreateNullCache(int priority, VMCache*& cache);
244 bool
245 VMCache::Lock()
247 return mutex_lock(&fLock) == B_OK;
251 bool
252 VMCache::TryLock()
254 return mutex_trylock(&fLock) == B_OK;
258 bool
259 VMCache::SwitchLock(mutex* from)
261 return mutex_switch_lock(from, &fLock) == B_OK;
265 bool
266 VMCache::SwitchFromReadLock(rw_lock* from)
268 return mutex_switch_from_read_lock(from, &fLock) == B_OK;
272 void
273 VMCache::AssertLocked()
275 ASSERT_LOCKED_MUTEX(&fLock);
279 void
280 VMCache::AcquireRefLocked()
282 ASSERT_LOCKED_MUTEX(&fLock);
284 fRefCount++;
288 void
289 VMCache::AcquireRef()
291 Lock();
292 fRefCount++;
293 Unlock();
297 void
298 VMCache::ReleaseRefLocked()
300 ASSERT_LOCKED_MUTEX(&fLock);
302 fRefCount--;
306 void
307 VMCache::ReleaseRef()
309 Lock();
310 fRefCount--;
311 Unlock();
315 void
316 VMCache::ReleaseRefAndUnlock(bool consumerLocked)
318 ReleaseRefLocked();
319 Unlock(consumerLocked);
323 void
324 VMCache::MarkPageUnbusy(vm_page* page)
326 ASSERT(page->busy);
327 page->busy = false;
328 NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
332 page_num_t
333 VMCache::WiredPagesCount() const
335 return fWiredPagesCount;
339 void
340 VMCache::IncrementWiredPagesCount()
342 ASSERT(fWiredPagesCount < page_count);
344 fWiredPagesCount++;
348 void
349 VMCache::DecrementWiredPagesCount()
351 ASSERT(fWiredPagesCount > 0);
353 fWiredPagesCount--;
357 // vm_page methods implemented here to avoid VMCache.h inclusion in vm_types.h
359 inline void
360 vm_page::IncrementWiredCount()
362 if (fWiredCount++ == 0)
363 cache_ref->cache->IncrementWiredPagesCount();
367 inline void
368 vm_page::DecrementWiredCount()
370 ASSERT(fWiredCount > 0);
372 if (--fWiredCount == 0)
373 cache_ref->cache->DecrementWiredPagesCount();
377 #ifdef __cplusplus
378 extern "C" {
379 #endif
381 status_t vm_cache_init(struct kernel_args *args);
382 void vm_cache_init_post_heap();
383 struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
384 bool dontWait);
386 #ifdef __cplusplus
388 #endif
391 #endif /* _KERNEL_VM_VM_CACHE_H */