In all of the cases where we were doing pointer math to decide next pointer
[newos.git] / kernel / vm / vm_cache.c
blobaad3961348bf3dd86c415175b1291e1259e00119
1 /*
2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/vm.h>
7 #include <kernel/vm_priv.h>
8 #include <kernel/vm_cache.h>
9 #include <kernel/vm_page.h>
10 #include <kernel/heap.h>
11 #include <kernel/int.h>
12 #include <kernel/khash.h>
13 #include <kernel/lock.h>
14 #include <kernel/debug.h>
15 #include <kernel/lock.h>
16 #include <kernel/smp.h>
17 #include <kernel/arch/cpu.h>
18 #include <newos/errors.h>
20 /* hash table of pages keyed by cache they're in and offset */
21 #define PAGE_TABLE_SIZE 1024 /* make this dynamic */
22 static void *page_cache_table;
23 static spinlock_t page_cache_table_lock;
25 struct page_lookup_key {
26 off_t offset;
27 vm_cache_ref *ref;
30 static int page_compare_func(void *_p, const void *_key)
32 vm_page *p = _p;
33 const struct page_lookup_key *key = _key;
35 // dprintf("page_compare_func: p 0x%x, key 0x%x\n", p, key);
37 #if DEBUG > 1
38 VERIFY_VM_PAGE(p);
39 #endif
41 if(p->cache_ref == key->ref && p->offset == key->offset)
42 return 0;
43 else
44 return -1;
47 #define HASH(offset, ref) ((unsigned int)(offset >> 12) ^ ((unsigned int)(ref)>>4))
49 static unsigned int page_hash_func(void *_p, const void *_key, unsigned int range)
51 vm_page *p = _p;
52 const struct page_lookup_key *key = _key;
53 #if 0
54 if(p)
55 dprintf("page_hash_func: p 0x%x, key 0x%x, HASH = 0x%x\n", p, key, HASH(p->offset, p->cache_ref) % range);
56 else
57 dprintf("page_hash_func: p 0x%x, key 0x%x, HASH = 0x%x\n", p, key, HASH(key->offset, key->ref) % range);
58 #endif
60 #if DEBUG > 1
61 if(p != NULL)
62 VERIFY_VM_PAGE(p);
63 #endif
65 if(p)
66 return HASH(p->offset, p->cache_ref) % range;
67 else
68 return HASH(key->offset, key->ref) % range;
71 int vm_cache_init(kernel_args *ka)
73 page_cache_table = hash_init(PAGE_TABLE_SIZE,
74 offsetof(vm_page, hash_next),
75 &page_compare_func,
76 &page_hash_func);
77 if(!page_cache_table)
78 panic("vm_cache_init: cannot allocate memory for page cache hash table\n");
79 page_cache_table_lock = 0;
81 return 0;
84 vm_cache *vm_cache_create(vm_store *store)
86 vm_cache *cache;
88 cache = kmalloc(sizeof(vm_cache));
89 if(cache == NULL)
90 return NULL;
92 cache->magic = VM_CACHE_MAGIC;
93 cache->page_list = NULL;
94 cache->ref = NULL;
95 cache->source = NULL;
96 cache->store = store;
97 if(store != NULL)
98 store->cache = cache;
99 cache->virtual_size = 0;
100 cache->temporary = 0;
101 cache->scan_skip = 0;
104 return cache;
107 vm_cache_ref *vm_cache_ref_create(vm_cache *cache)
109 vm_cache_ref *ref;
111 ref = kmalloc(sizeof(vm_cache_ref));
112 if(ref == NULL)
113 return NULL;
115 ref->magic = VM_CACHE_REF_MAGIC;
116 ref->cache = cache;
117 mutex_init(&ref->lock, "cache_ref_mutex");
118 ref->region_list = NULL;
119 ref->ref_count = 0;
120 cache->ref = ref;
122 return ref;
125 void vm_cache_acquire_ref(vm_cache_ref *cache_ref, bool acquire_store_ref)
127 // dprintf("vm_cache_acquire_ref: cache_ref 0x%x, ref will be %d\n", cache_ref, cache_ref->ref_count+1);
129 if(cache_ref == NULL)
130 panic("vm_cache_acquire_ref: passed NULL\n");
131 VERIFY_VM_CACHE_REF(cache_ref);
132 VERIFY_VM_CACHE(cache_ref->cache);
133 VERIFY_VM_STORE(cache_ref->cache->store);
135 if(acquire_store_ref && cache_ref->cache->store->ops->acquire_ref) {
136 cache_ref->cache->store->ops->acquire_ref(cache_ref->cache->store);
138 atomic_add(&cache_ref->ref_count, 1);
141 void vm_cache_release_ref(vm_cache_ref *cache_ref)
143 vm_page *page;
145 // dprintf("vm_cache_release_ref: cache_ref 0x%x, ref will be %d\n", cache_ref, cache_ref->ref_count-1);
147 if(cache_ref == NULL)
148 panic("vm_cache_release_ref: passed NULL\n");
149 VERIFY_VM_CACHE_REF(cache_ref);
150 VERIFY_VM_CACHE(cache_ref->cache);
152 if(atomic_add(&cache_ref->ref_count, -1) == 1) {
153 // delete this cache
154 // delete the cache's backing store, if it has one
155 off_t store_committed_size = 0;
156 if(cache_ref->cache->store) {
157 VERIFY_VM_STORE(cache_ref->cache->store);
158 store_committed_size = cache_ref->cache->store->committed_size;
159 (*cache_ref->cache->store->ops->destroy)(cache_ref->cache->store);
162 // free all of the pages in the cache
163 page = cache_ref->cache->page_list;
164 while(page) {
165 vm_page *old_page = page;
167 VERIFY_VM_PAGE(page);
169 page = page->cache_next;
171 // remove it from the hash table
172 int_disable_interrupts();
173 acquire_spinlock(&page_cache_table_lock);
175 hash_remove(page_cache_table, old_page);
177 release_spinlock(&page_cache_table_lock);
178 int_restore_interrupts();
180 // dprintf("vm_cache_release_ref: freeing page 0x%x\n", old_page->ppn);
181 vm_page_set_state(old_page, PAGE_STATE_FREE);
183 vm_increase_max_commit(cache_ref->cache->virtual_size - store_committed_size);
185 // remove the ref to the source
186 if(cache_ref->cache->source)
187 vm_cache_release_ref(cache_ref->cache->source->ref);
189 mutex_destroy(&cache_ref->lock);
190 kfree(cache_ref->cache);
191 kfree(cache_ref);
193 return;
195 if(cache_ref->cache->store->ops->release_ref) {
196 cache_ref->cache->store->ops->release_ref(cache_ref->cache->store);
200 vm_page *vm_cache_lookup_page(vm_cache_ref *cache_ref, off_t offset)
202 vm_page *page;
203 struct page_lookup_key key;
205 VERIFY_VM_CACHE_REF(cache_ref);
207 key.offset = offset;
208 key.ref = cache_ref;
210 int_disable_interrupts();
211 acquire_spinlock(&page_cache_table_lock);
213 page = hash_lookup(page_cache_table, &key);
215 release_spinlock(&page_cache_table_lock);
216 int_restore_interrupts();
218 return page;
221 void vm_cache_insert_page(vm_cache_ref *cache_ref, vm_page *page, off_t offset)
224 // dprintf("vm_cache_insert_page: cache 0x%x, page 0x%x, offset 0x%x 0x%x\n", cache_ref, page, offset);
226 VERIFY_VM_CACHE_REF(cache_ref);
227 VERIFY_VM_CACHE(cache_ref->cache);
228 VERIFY_VM_PAGE(page);
230 page->offset = offset;
232 if(cache_ref->cache->page_list != NULL) {
233 cache_ref->cache->page_list->cache_prev = page;
235 page->cache_next = cache_ref->cache->page_list;
236 page->cache_prev = NULL;
237 cache_ref->cache->page_list = page;
239 page->cache_ref = cache_ref;
241 int_disable_interrupts();
242 acquire_spinlock(&page_cache_table_lock);
244 hash_insert(page_cache_table, page);
246 release_spinlock(&page_cache_table_lock);
247 int_restore_interrupts();
251 void vm_cache_remove_page(vm_cache_ref *cache_ref, vm_page *page)
254 // dprintf("vm_cache_remove_page: cache 0x%x, page 0x%x\n", cache_ref, page);
256 VERIFY_VM_CACHE_REF(cache_ref);
257 VERIFY_VM_CACHE(cache_ref->cache);
258 VERIFY_VM_PAGE(page);
260 int_disable_interrupts();
261 acquire_spinlock(&page_cache_table_lock);
263 hash_remove(page_cache_table, page);
265 release_spinlock(&page_cache_table_lock);
266 int_restore_interrupts();
268 if(cache_ref->cache->page_list == page) {
269 if(page->cache_next != NULL)
270 page->cache_next->cache_prev = NULL;
271 cache_ref->cache->page_list = page->cache_next;
272 } else {
273 if(page->cache_prev != NULL)
274 page->cache_prev->cache_next = page->cache_next;
275 if(page->cache_next != NULL)
276 page->cache_next->cache_prev = page->cache_prev;
278 page->cache_ref = NULL;
281 int vm_cache_insert_region(vm_cache_ref *cache_ref, vm_region *region)
283 mutex_lock(&cache_ref->lock);
285 VERIFY_VM_CACHE_REF(cache_ref);
286 VERIFY_VM_REGION(region);
288 region->cache_next = cache_ref->region_list;
289 if(region->cache_next)
290 region->cache_next->cache_prev = region;
291 region->cache_prev = NULL;
292 cache_ref->region_list = region;
294 mutex_unlock(&cache_ref->lock);
295 return 0;
298 int vm_cache_remove_region(vm_cache_ref *cache_ref, vm_region *region)
300 mutex_lock(&cache_ref->lock);
302 VERIFY_VM_CACHE_REF(cache_ref);
303 VERIFY_VM_REGION(region);
305 if(region->cache_prev)
306 region->cache_prev->cache_next = region->cache_next;
307 if(region->cache_next)
308 region->cache_next->cache_prev = region->cache_prev;
309 if(cache_ref->region_list == region)
310 cache_ref->region_list = region->cache_next;
312 mutex_unlock(&cache_ref->lock);
313 return 0;