2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
7 #include <kernel/vm_priv.h>
8 #include <kernel/vm_cache.h>
9 #include <kernel/vm_page.h>
10 #include <kernel/heap.h>
11 #include <kernel/int.h>
12 #include <kernel/khash.h>
13 #include <kernel/lock.h>
14 #include <kernel/debug.h>
15 #include <kernel/lock.h>
16 #include <kernel/smp.h>
17 #include <kernel/arch/cpu.h>
18 #include <newos/errors.h>
20 /* hash table of pages keyed by cache they're in and offset */
21 #define PAGE_TABLE_SIZE 1024 /* make this dynamic */
22 static void *page_cache_table
;
23 static spinlock_t page_cache_table_lock
;
25 struct page_lookup_key
{
30 static int page_compare_func(void *_p
, const void *_key
)
33 const struct page_lookup_key
*key
= _key
;
35 // dprintf("page_compare_func: p 0x%x, key 0x%x\n", p, key);
41 if(p
->cache_ref
== key
->ref
&& p
->offset
== key
->offset
)
47 #define HASH(offset, ref) ((unsigned int)(offset >> 12) ^ ((unsigned int)(ref)>>4))
49 static unsigned int page_hash_func(void *_p
, const void *_key
, unsigned int range
)
52 const struct page_lookup_key
*key
= _key
;
55 dprintf("page_hash_func: p 0x%x, key 0x%x, HASH = 0x%x\n", p
, key
, HASH(p
->offset
, p
->cache_ref
) % range
);
57 dprintf("page_hash_func: p 0x%x, key 0x%x, HASH = 0x%x\n", p
, key
, HASH(key
->offset
, key
->ref
) % range
);
66 return HASH(p
->offset
, p
->cache_ref
) % range
;
68 return HASH(key
->offset
, key
->ref
) % range
;
71 int vm_cache_init(kernel_args
*ka
)
73 page_cache_table
= hash_init(PAGE_TABLE_SIZE
,
74 offsetof(vm_page
, hash_next
),
78 panic("vm_cache_init: cannot allocate memory for page cache hash table\n");
79 page_cache_table_lock
= 0;
84 vm_cache
*vm_cache_create(vm_store
*store
)
88 cache
= kmalloc(sizeof(vm_cache
));
92 cache
->magic
= VM_CACHE_MAGIC
;
93 cache
->page_list
= NULL
;
99 cache
->virtual_size
= 0;
100 cache
->temporary
= 0;
101 cache
->scan_skip
= 0;
107 vm_cache_ref
*vm_cache_ref_create(vm_cache
*cache
)
111 ref
= kmalloc(sizeof(vm_cache_ref
));
115 ref
->magic
= VM_CACHE_REF_MAGIC
;
117 mutex_init(&ref
->lock
, "cache_ref_mutex");
118 ref
->region_list
= NULL
;
125 void vm_cache_acquire_ref(vm_cache_ref
*cache_ref
, bool acquire_store_ref
)
127 // dprintf("vm_cache_acquire_ref: cache_ref 0x%x, ref will be %d\n", cache_ref, cache_ref->ref_count+1);
129 if(cache_ref
== NULL
)
130 panic("vm_cache_acquire_ref: passed NULL\n");
131 VERIFY_VM_CACHE_REF(cache_ref
);
132 VERIFY_VM_CACHE(cache_ref
->cache
);
133 VERIFY_VM_STORE(cache_ref
->cache
->store
);
135 if(acquire_store_ref
&& cache_ref
->cache
->store
->ops
->acquire_ref
) {
136 cache_ref
->cache
->store
->ops
->acquire_ref(cache_ref
->cache
->store
);
138 atomic_add(&cache_ref
->ref_count
, 1);
141 void vm_cache_release_ref(vm_cache_ref
*cache_ref
)
145 // dprintf("vm_cache_release_ref: cache_ref 0x%x, ref will be %d\n", cache_ref, cache_ref->ref_count-1);
147 if(cache_ref
== NULL
)
148 panic("vm_cache_release_ref: passed NULL\n");
149 VERIFY_VM_CACHE_REF(cache_ref
);
150 VERIFY_VM_CACHE(cache_ref
->cache
);
152 if(atomic_add(&cache_ref
->ref_count
, -1) == 1) {
154 // delete the cache's backing store, if it has one
155 off_t store_committed_size
= 0;
156 if(cache_ref
->cache
->store
) {
157 VERIFY_VM_STORE(cache_ref
->cache
->store
);
158 store_committed_size
= cache_ref
->cache
->store
->committed_size
;
159 (*cache_ref
->cache
->store
->ops
->destroy
)(cache_ref
->cache
->store
);
162 // free all of the pages in the cache
163 page
= cache_ref
->cache
->page_list
;
165 vm_page
*old_page
= page
;
167 VERIFY_VM_PAGE(page
);
169 page
= page
->cache_next
;
171 // remove it from the hash table
172 int_disable_interrupts();
173 acquire_spinlock(&page_cache_table_lock
);
175 hash_remove(page_cache_table
, old_page
);
177 release_spinlock(&page_cache_table_lock
);
178 int_restore_interrupts();
180 // dprintf("vm_cache_release_ref: freeing page 0x%x\n", old_page->ppn);
181 vm_page_set_state(old_page
, PAGE_STATE_FREE
);
183 vm_increase_max_commit(cache_ref
->cache
->virtual_size
- store_committed_size
);
185 // remove the ref to the source
186 if(cache_ref
->cache
->source
)
187 vm_cache_release_ref(cache_ref
->cache
->source
->ref
);
189 mutex_destroy(&cache_ref
->lock
);
190 kfree(cache_ref
->cache
);
195 if(cache_ref
->cache
->store
->ops
->release_ref
) {
196 cache_ref
->cache
->store
->ops
->release_ref(cache_ref
->cache
->store
);
200 vm_page
*vm_cache_lookup_page(vm_cache_ref
*cache_ref
, off_t offset
)
203 struct page_lookup_key key
;
205 VERIFY_VM_CACHE_REF(cache_ref
);
210 int_disable_interrupts();
211 acquire_spinlock(&page_cache_table_lock
);
213 page
= hash_lookup(page_cache_table
, &key
);
215 release_spinlock(&page_cache_table_lock
);
216 int_restore_interrupts();
221 void vm_cache_insert_page(vm_cache_ref
*cache_ref
, vm_page
*page
, off_t offset
)
224 // dprintf("vm_cache_insert_page: cache 0x%x, page 0x%x, offset 0x%x 0x%x\n", cache_ref, page, offset);
226 VERIFY_VM_CACHE_REF(cache_ref
);
227 VERIFY_VM_CACHE(cache_ref
->cache
);
228 VERIFY_VM_PAGE(page
);
230 page
->offset
= offset
;
232 if(cache_ref
->cache
->page_list
!= NULL
) {
233 cache_ref
->cache
->page_list
->cache_prev
= page
;
235 page
->cache_next
= cache_ref
->cache
->page_list
;
236 page
->cache_prev
= NULL
;
237 cache_ref
->cache
->page_list
= page
;
239 page
->cache_ref
= cache_ref
;
241 int_disable_interrupts();
242 acquire_spinlock(&page_cache_table_lock
);
244 hash_insert(page_cache_table
, page
);
246 release_spinlock(&page_cache_table_lock
);
247 int_restore_interrupts();
251 void vm_cache_remove_page(vm_cache_ref
*cache_ref
, vm_page
*page
)
254 // dprintf("vm_cache_remove_page: cache 0x%x, page 0x%x\n", cache_ref, page);
256 VERIFY_VM_CACHE_REF(cache_ref
);
257 VERIFY_VM_CACHE(cache_ref
->cache
);
258 VERIFY_VM_PAGE(page
);
260 int_disable_interrupts();
261 acquire_spinlock(&page_cache_table_lock
);
263 hash_remove(page_cache_table
, page
);
265 release_spinlock(&page_cache_table_lock
);
266 int_restore_interrupts();
268 if(cache_ref
->cache
->page_list
== page
) {
269 if(page
->cache_next
!= NULL
)
270 page
->cache_next
->cache_prev
= NULL
;
271 cache_ref
->cache
->page_list
= page
->cache_next
;
273 if(page
->cache_prev
!= NULL
)
274 page
->cache_prev
->cache_next
= page
->cache_next
;
275 if(page
->cache_next
!= NULL
)
276 page
->cache_next
->cache_prev
= page
->cache_prev
;
278 page
->cache_ref
= NULL
;
281 int vm_cache_insert_region(vm_cache_ref
*cache_ref
, vm_region
*region
)
283 mutex_lock(&cache_ref
->lock
);
285 VERIFY_VM_CACHE_REF(cache_ref
);
286 VERIFY_VM_REGION(region
);
288 region
->cache_next
= cache_ref
->region_list
;
289 if(region
->cache_next
)
290 region
->cache_next
->cache_prev
= region
;
291 region
->cache_prev
= NULL
;
292 cache_ref
->region_list
= region
;
294 mutex_unlock(&cache_ref
->lock
);
298 int vm_cache_remove_region(vm_cache_ref
*cache_ref
, vm_region
*region
)
300 mutex_lock(&cache_ref
->lock
);
302 VERIFY_VM_CACHE_REF(cache_ref
);
303 VERIFY_VM_REGION(region
);
305 if(region
->cache_prev
)
306 region
->cache_prev
->cache_next
= region
->cache_next
;
307 if(region
->cache_next
)
308 region
->cache_next
->cache_prev
= region
->cache_prev
;
309 if(cache_ref
->region_list
== region
)
310 cache_ref
->region_list
= region
->cache_next
;
312 mutex_unlock(&cache_ref
->lock
);