Drop main() prototype. Syncs with NetBSD-8
[minix.git] / minix / servers / vm / cache.c
blobecd7529eedc1709ba8b103810b178f94b637d16d
2 /* File that implements the data structure, insert, lookup and remove
3 * functions for file system cache blocks.
5 * Cache blocks can be mapped into the memory of processes by the
6 * 'cache' and 'file' memory types.
7 */
9 #include <assert.h>
10 #include <string.h>
12 #include <minix/hash.h>
14 #include "proto.h"
15 #include "vm.h"
16 #include "region.h"
17 #include "glo.h"
18 #include "cache.h"
20 /* cache datastructure */
21 #define HASHSIZE 65536
23 static struct cached_page *cache_hash_bydev[HASHSIZE];
24 static struct cached_page *cache_hash_byino[HASHSIZE];
25 static struct cached_page *lru_oldest = NULL, *lru_newest = NULL;
27 static u32_t cached_pages = 0;
29 static void lru_rm(struct cached_page *hb)
31 struct cached_page *newer = hb->newer, *older = hb->older;
32 assert(lru_newest);
33 assert(lru_oldest);
34 if(newer) {
35 assert(newer->older == hb);
36 newer->older = older;
38 if(older) {
39 assert(older->newer == hb);
40 older->newer = newer;
43 if(lru_newest == hb) { assert(!newer); lru_newest = older; }
44 if(lru_oldest == hb) { assert(!older); lru_oldest = newer; }
46 if(lru_newest) assert(lru_newest->newer == NULL);
47 if(lru_oldest) assert(lru_oldest->older == NULL);
49 cached_pages--;
52 static void lru_add(struct cached_page *hb)
54 if(lru_newest) {
55 assert(lru_oldest);
56 assert(!lru_newest->newer);
57 lru_newest->newer = hb;
58 } else {
59 assert(!lru_oldest);
60 lru_oldest = hb;
63 hb->older = lru_newest;
64 hb->newer = NULL;
65 lru_newest = hb;
67 cached_pages++;
70 void cache_lru_touch(struct cached_page *hb)
72 lru_rm(hb);
73 lru_add(hb);
76 static __inline u32_t makehash(u32_t p1, u64_t p2)
78 u32_t offlo = ex64lo(p2), offhi = ex64hi(p2),
79 v = 0x12345678;
80 hash_mix(p1, offlo, offhi);
81 hash_final(offlo, offhi, v);
83 return v % HASHSIZE;
86 #if CACHE_SANITY
87 void cache_sanitycheck_internal(void)
89 int h;
90 int n = 0;
91 int byino = 0;
92 int withino = 0;
93 int bydev_total = 0, lru_total = 0;
94 struct cached_page *cp;
96 for(h = 0; h < HASHSIZE; h++) {
97 for(cp = cache_hash_bydev[h]; cp; cp = cp->hash_next_dev) {
98 assert(cp->dev != NO_DEV);
99 assert(h == makehash(cp->dev, cp->dev_offset));
100 assert(cp == find_cached_page_bydev(cp->dev, cp->dev_offset, cp->ino, cp->ino_offset));
101 if(cp->ino != VMC_NO_INODE) withino++;
102 bydev_total++;
103 n++;
104 assert(n < 1500000);
106 for(cp = cache_hash_byino[h]; cp; cp = cp->hash_next_ino) {
107 assert(cp->dev != NO_DEV);
108 assert(cp->ino != VMC_NO_INODE);
109 assert(h == makehash(cp->ino, cp->ino_offset));
110 byino++;
111 n++;
112 assert(n < 1500000);
116 assert(byino == withino);
118 if(lru_newest) {
119 assert(lru_oldest);
120 assert(!lru_newest->newer);
121 assert(!lru_oldest->older);
122 } else {
123 assert(!lru_oldest);
126 for(cp = lru_oldest; cp; cp = cp->newer) {
127 struct cached_page *newer = cp->newer,
128 *older = cp->older;
129 if(newer) assert(newer->older == cp);
130 if(older) assert(older->newer == cp);
131 lru_total++;
134 assert(lru_total == bydev_total);
136 assert(lru_total == cached_pages);
138 #endif
140 #define rmhash_f(fname, nextfield) \
141 static void \
142 fname(struct cached_page *cp, struct cached_page **head) \
144 struct cached_page *hb; \
145 if(*head == cp) { *head = cp->nextfield; return; } \
146 for(hb = *head; hb && cp != hb->nextfield; hb = hb->nextfield) ; \
147 assert(hb); assert(hb->nextfield == cp); \
148 hb->nextfield = cp->nextfield; \
149 return; \
152 rmhash_f(rmhash_byino, hash_next_ino)
153 rmhash_f(rmhash_bydev, hash_next_dev)
155 static void addcache_byino(struct cached_page *hb)
157 int hv_ino = makehash(hb->ino, hb->ino_offset);
158 assert(hb->ino != VMC_NO_INODE);
159 hb->hash_next_ino = cache_hash_byino[hv_ino];
160 cache_hash_byino[hv_ino] = hb;
163 static void
164 update_inohash(struct cached_page *hb, ino_t ino, u64_t ino_off)
166 assert(ino != VMC_NO_INODE);
167 if(hb->ino != VMC_NO_INODE) {
168 int h = makehash(hb->ino, hb->ino_offset);
169 rmhash_byino(hb, &cache_hash_byino[h]);
171 hb->ino = ino;
172 hb->ino_offset = ino_off;
173 addcache_byino(hb);
176 struct cached_page *
177 find_cached_page_bydev(dev_t dev, u64_t dev_off, ino_t ino, u64_t ino_off, int touchlru)
179 struct cached_page *hb;
181 for(hb = cache_hash_bydev[makehash(dev, dev_off)]; hb; hb=hb->hash_next_dev) {
182 if(hb->dev == dev && hb->dev_offset == dev_off) {
183 if(ino != VMC_NO_INODE) {
184 if(hb->ino != ino || hb->ino_offset != ino_off) {
185 update_inohash(hb, ino, ino_off);
189 if(touchlru) cache_lru_touch(hb);
191 return hb;
195 return NULL;
198 struct cached_page *find_cached_page_byino(dev_t dev, ino_t ino, u64_t ino_off, int touchlru)
200 struct cached_page *hb;
202 assert(ino != VMC_NO_INODE);
203 assert(dev != NO_DEV);
205 for(hb = cache_hash_byino[makehash(ino, ino_off)]; hb; hb=hb->hash_next_ino) {
206 if(hb->dev == dev && hb->ino == ino && hb->ino_offset == ino_off) {
207 if(touchlru) cache_lru_touch(hb);
209 return hb;
213 return NULL;
216 int addcache(dev_t dev, u64_t dev_off, ino_t ino, u64_t ino_off, int flags,
217 struct phys_block *pb)
219 int hv_dev;
220 struct cached_page *hb;
222 if(pb->flags & PBF_INCACHE) {
223 printf("VM: already in cache\n");
224 return EINVAL;
227 if(!SLABALLOC(hb)) {
228 printf("VM: no memory for cache node\n");
229 return ENOMEM;
232 assert(dev != NO_DEV);
233 #if CACHE_SANITY
234 assert(!find_cached_page_bydev(dev, dev_off, ino, ino_off));
235 #endif
237 hb->dev = dev;
238 hb->dev_offset = dev_off;
239 hb->ino = ino;
240 hb->ino_offset = ino_off;
241 hb->flags = flags & VMSF_ONCE;
242 hb->page = pb;
243 hb->page->refcount++; /* block also referenced by cache now */
244 hb->page->flags |= PBF_INCACHE;
246 hv_dev = makehash(dev, dev_off);
248 hb->hash_next_dev = cache_hash_bydev[hv_dev];
249 cache_hash_bydev[hv_dev] = hb;
251 if(hb->ino != VMC_NO_INODE)
252 addcache_byino(hb);
254 lru_add(hb);
256 return OK;
259 void rmcache(struct cached_page *cp)
261 struct phys_block *pb = cp->page;
262 int hv_dev = makehash(cp->dev, cp->dev_offset);
264 assert(cp->page->flags & PBF_INCACHE);
266 cp->page->flags &= ~PBF_INCACHE;
268 rmhash_bydev(cp, &cache_hash_bydev[hv_dev]);
269 if(cp->ino != VMC_NO_INODE) {
270 int hv_ino = makehash(cp->ino, cp->ino_offset);
271 rmhash_byino(cp, &cache_hash_byino[hv_ino]);
274 assert(cp->page->refcount >= 1);
275 cp->page->refcount--;
277 lru_rm(cp);
279 if(pb->refcount == 0) {
280 assert(pb->phys != MAP_NONE);
281 free_mem(ABS2CLICK(pb->phys), 1);
282 SLABFREE(pb);
285 SLABFREE(cp);
288 int cache_freepages(int pages)
290 struct cached_page *cp, *newercp;
291 int freed = 0;
292 int oldsteps = 0;
293 int skips = 0;
295 for(cp = lru_oldest; cp && freed < pages; cp = newercp) {
296 newercp = cp->newer;
297 assert(cp->page->refcount >= 1);
298 if(cp->page->refcount == 1) {
299 rmcache(cp);
300 freed++;
301 skips = 0;
302 } else skips++;
303 oldsteps++;
306 return freed;
310 * Remove all pages that are associated with the given device.
312 void
313 clear_cache_bydev(dev_t dev)
315 struct cached_page *cp, *ncp;
316 int h;
318 for (h = 0; h < HASHSIZE; h++) {
319 for (cp = cache_hash_bydev[h]; cp != NULL; cp = ncp) {
320 ncp = cp->hash_next_dev;
322 if (cp->dev == dev)
323 rmcache(cp);
328 void get_stats_info(struct vm_stats_info *vsi)
330 vsi->vsi_cached = cached_pages;