2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
10 #include "VMAnonymousNoSwapCache.h"
14 #include <arch_config.h>
16 #include <KernelExport.h>
17 #include <slab/Slab.h>
18 #include <vm/vm_priv.h>
19 #include <vm/VMAddressSpace.h>
24 # define TRACE(x) dprintf x
29 // The stack functionality looks like a good candidate to put into its own
30 // store. I have not done this because once we have a swap file backing up
31 // the memory, it would probably not be a good idea to separate this
35 VMAnonymousNoSwapCache::~VMAnonymousNoSwapCache()
37 vm_unreserve_memory(committed_size
);
42 VMAnonymousNoSwapCache::Init(bool canOvercommit
, int32 numPrecommittedPages
,
43 int32 numGuardPages
, uint32 allocationFlags
)
45 TRACE(("VMAnonymousNoSwapCache::Init(canOvercommit = %s, numGuardPages = %ld) "
46 "at %p\n", canOvercommit
? "yes" : "no", numGuardPages
, store
));
48 status_t error
= VMCache::Init(CACHE_TYPE_RAM
, allocationFlags
);
52 fCanOvercommit
= canOvercommit
;
53 fHasPrecommitted
= false;
54 fPrecommittedPages
= min_c(numPrecommittedPages
, 255);
55 fGuardedSize
= numGuardPages
* B_PAGE_SIZE
;
62 VMAnonymousNoSwapCache::Commit(off_t size
, int priority
)
64 // If we can overcommit, we don't commit here, but in Fault(). We always
65 // unreserve memory, if we're asked to shrink our commitment, though.
66 if (fCanOvercommit
&& size
> committed_size
) {
70 // pre-commit some pages to make a later failure less probable
71 fHasPrecommitted
= true;
72 uint32 precommitted
= fPrecommittedPages
* B_PAGE_SIZE
;
73 if (size
> precommitted
)
77 // Check to see how much we could commit - we need real memory
79 if (size
> committed_size
) {
81 if (vm_try_reserve_memory(size
- committed_size
, priority
, 1000000)
86 // we can release some
87 vm_unreserve_memory(committed_size
- size
);
90 committed_size
= size
;
96 VMAnonymousNoSwapCache::HasPage(off_t offset
)
103 VMAnonymousNoSwapCache::Read(off_t offset
, const generic_io_vec
* vecs
, size_t count
,
104 uint32 flags
, generic_size_t
* _numBytes
)
106 panic("anonymous_store: read called. Invalid!\n");
112 VMAnonymousNoSwapCache::Write(off_t offset
, const generic_io_vec
* vecs
, size_t count
,
113 uint32 flags
, generic_size_t
* _numBytes
)
115 // no place to write, this will cause the page daemon to skip this store
121 VMAnonymousNoSwapCache::Fault(struct VMAddressSpace
* aspace
, off_t offset
)
123 if (fGuardedSize
> 0) {
126 #ifdef STACK_GROWS_DOWNWARDS
128 #elif defined(STACK_GROWS_UPWARDS)
129 guardOffset
= virtual_size
- fGuardedSize
;
131 # error Stack direction has not been defined in arch_config.h
133 // report stack fault, guard page hit!
134 if (offset
>= guardOffset
&& offset
< guardOffset
+ fGuardedSize
) {
135 TRACE(("stack overflow!\n"));
136 return B_BAD_ADDRESS
;
140 if (fCanOvercommit
) {
141 if (fPrecommittedPages
== 0) {
142 // never commit more than needed
143 if (committed_size
/ B_PAGE_SIZE
> page_count
)
144 return B_BAD_HANDLER
;
146 // try to commit additional memory
147 int priority
= aspace
== VMAddressSpace::Kernel()
148 ? VM_PRIORITY_SYSTEM
: VM_PRIORITY_USER
;
149 if (vm_try_reserve_memory(B_PAGE_SIZE
, priority
, 0) != B_OK
) {
150 dprintf("%p->VMAnonymousNoSwapCache::Fault(): Failed to "
151 "reserve %d bytes of RAM.\n", this, (int)B_PAGE_SIZE
);
155 committed_size
+= B_PAGE_SIZE
;
157 fPrecommittedPages
--;
160 // This will cause vm_soft_fault() to handle the fault
161 return B_BAD_HANDLER
;
166 VMAnonymousNoSwapCache::MergeStore(VMCache
* _source
)
168 VMAnonymousNoSwapCache
* source
169 = dynamic_cast<VMAnonymousNoSwapCache
*>(_source
);
170 if (source
== NULL
) {
171 panic("VMAnonymousNoSwapCache::MergeStore(): merge with incompatible "
172 "cache %p requested", _source
);
176 // take over the source' committed size
177 committed_size
+= source
->committed_size
;
178 source
->committed_size
= 0;
180 off_t actualSize
= virtual_end
- virtual_base
;
181 if (committed_size
> actualSize
) {
182 vm_unreserve_memory(committed_size
- actualSize
);
183 committed_size
= actualSize
;
189 VMAnonymousNoSwapCache::DeleteObject()
191 object_cache_delete(gAnonymousNoSwapCacheObjectCache
, this);