tools/llvm: Do not build with symbols
[minix3.git] / minix / servers / vm / cache.c
blob6935357d0924a91de6aee0495e7d2c4f02df11fd
2 /* File that implements the data structure, insert, lookup and remove
3 * functions for file system cache blocks.
5 * Cache blocks can be mapped into the memory of processes by the
6 * 'cache' and 'file' memory types.
7 */
9 #include <assert.h>
10 #include <string.h>
12 #include <minix/hash.h>
14 #include "proto.h"
15 #include "vm.h"
16 #include "region.h"
17 #include "glo.h"
18 #include "cache.h"
20 /* cache datastructure */
21 #define HASHSIZE 65536
23 static struct cached_page *cache_hash_bydev[HASHSIZE];
24 static struct cached_page *cache_hash_byino[HASHSIZE];
25 static struct cached_page *lru_oldest = NULL, *lru_newest = NULL;
27 static u32_t cached_pages = 0;
29 static void lru_rm(struct cached_page *hb)
31 struct cached_page *newer = hb->newer, *older = hb->older;
32 assert(lru_newest);
33 assert(lru_oldest);
34 if(newer) {
35 assert(newer->older == hb);
36 newer->older = older;
38 if(older) {
39 assert(older->newer == hb);
40 older->newer = newer;
43 if(lru_newest == hb) { assert(!newer); lru_newest = older; }
44 if(lru_oldest == hb) { assert(!older); lru_oldest = newer; }
46 if(lru_newest) assert(lru_newest->newer == NULL);
47 if(lru_oldest) assert(lru_oldest->older == NULL);
49 cached_pages--;
52 static void lru_add(struct cached_page *hb)
54 if(lru_newest) {
55 assert(lru_oldest);
56 assert(!lru_newest->newer);
57 lru_newest->newer = hb;
58 } else {
59 assert(!lru_oldest);
60 lru_oldest = hb;
63 hb->older = lru_newest;
64 hb->newer = NULL;
65 lru_newest = hb;
67 cached_pages++;
70 void cache_lru_touch(struct cached_page *hb)
72 lru_rm(hb);
73 lru_add(hb);
76 static __inline u32_t makehash(u32_t p1, u64_t p2)
78 u32_t offlo = ex64lo(p2), offhi = ex64hi(p2),
79 v = 0x12345678;
80 hash_mix(p1, offlo, offhi);
81 hash_final(offlo, offhi, v);
83 return v % HASHSIZE;
86 #if CACHE_SANITY
87 void cache_sanitycheck_internal(void)
89 int h;
90 int n = 0;
91 int byino = 0;
92 int withino = 0;
93 int bydev_total = 0, lru_total = 0;
94 struct cached_page *cp;
96 for(h = 0; h < HASHSIZE; h++) {
97 for(cp = cache_hash_bydev[h]; cp; cp = cp->hash_next_dev) {
98 assert(cp->dev != NO_DEV);
99 assert(h == makehash(cp->dev, cp->dev_offset));
100 assert(cp == find_cached_page_bydev(cp->dev, cp->dev_offset, cp->ino, cp->ino_offset));
101 if(cp->ino != VMC_NO_INODE) withino++;
102 bydev_total++;
103 n++;
104 assert(n < 1500000);
106 for(cp = cache_hash_byino[h]; cp; cp = cp->hash_next_ino) {
107 assert(cp->dev != NO_DEV);
108 assert(cp->ino != VMC_NO_INODE);
109 assert(h == makehash(cp->ino, cp->ino_offset));
110 byino++;
111 n++;
112 assert(n < 1500000);
116 assert(byino == withino);
118 if(lru_newest) {
119 assert(lru_oldest);
120 assert(!lru_newest->newer);
121 assert(!lru_oldest->older);
122 } else {
123 assert(!lru_oldest);
126 for(cp = lru_oldest; cp; cp = cp->newer) {
127 struct cached_page *newer = cp->newer,
128 *older = cp->older;
129 if(newer) assert(newer->older == cp);
130 if(older) assert(older->newer == cp);
131 lru_total++;
134 assert(lru_total == bydev_total);
136 assert(lru_total == cached_pages);
138 #endif
140 #define rmhash_f(fname, nextfield) \
141 static void \
142 fname(struct cached_page *cp, struct cached_page **head) \
144 struct cached_page *hb; \
145 if(*head == cp) { *head = cp->nextfield; return; } \
146 for(hb = *head; hb && cp != hb->nextfield; hb = hb->nextfield) ; \
147 assert(hb); assert(hb->nextfield == cp); \
148 hb->nextfield = cp->nextfield; \
149 return; \
152 rmhash_f(rmhash_byino, hash_next_ino)
153 rmhash_f(rmhash_bydev, hash_next_dev)
155 static void addcache_byino(struct cached_page *hb)
157 int hv_ino = makehash(hb->ino, hb->ino_offset);
158 assert(hb->ino != VMC_NO_INODE);
159 hb->hash_next_ino = cache_hash_byino[hv_ino];
160 cache_hash_byino[hv_ino] = hb;
163 static void
164 update_inohash(struct cached_page *hb, ino_t ino, u64_t ino_off)
166 assert(ino != VMC_NO_INODE);
167 if(hb->ino != VMC_NO_INODE) {
168 int h = makehash(hb->ino, hb->ino_offset);
169 rmhash_byino(hb, &cache_hash_byino[h]);
171 hb->ino = ino;
172 hb->ino_offset = ino_off;
173 addcache_byino(hb);
176 struct cached_page *
177 find_cached_page_bydev(dev_t dev, u64_t dev_off, ino_t ino, u64_t ino_off, int touchlru)
179 struct cached_page *hb;
181 for(hb = cache_hash_bydev[makehash(dev, dev_off)]; hb; hb=hb->hash_next_dev) {
182 if(hb->dev == dev && hb->dev_offset == dev_off) {
183 if(ino != VMC_NO_INODE) {
184 if(hb->ino != ino || hb->ino_offset != ino_off) {
185 update_inohash(hb, ino, ino_off);
189 if(touchlru) cache_lru_touch(hb);
191 return hb;
195 return NULL;
198 struct cached_page *find_cached_page_byino(dev_t dev, ino_t ino, u64_t ino_off, int touchlru)
200 struct cached_page *hb;
202 assert(ino != VMC_NO_INODE);
203 assert(dev != NO_DEV);
205 for(hb = cache_hash_byino[makehash(ino, ino_off)]; hb; hb=hb->hash_next_ino) {
206 if(hb->dev == dev && hb->ino == ino && hb->ino_offset == ino_off) {
207 if(touchlru) cache_lru_touch(hb);
209 return hb;
213 return NULL;
216 int addcache(dev_t dev, u64_t dev_off, ino_t ino, u64_t ino_off, struct phys_block *pb)
218 int hv_dev;
219 struct cached_page *hb;
221 if(pb->flags & PBF_INCACHE) {
222 printf("VM: already in cache\n");
223 return EINVAL;
226 if(!SLABALLOC(hb)) {
227 printf("VM: no memory for cache node\n");
228 return ENOMEM;
231 assert(dev != NO_DEV);
232 #if CACHE_SANITY
233 assert(!find_cached_page_bydev(dev, dev_off, ino, ino_off));
234 #endif
236 hb->dev = dev;
237 hb->dev_offset = dev_off;
238 hb->ino = ino;
239 hb->ino_offset = ino_off;
240 hb->page = pb;
241 hb->page->refcount++; /* block also referenced by cache now */
242 hb->page->flags |= PBF_INCACHE;
244 hv_dev = makehash(dev, dev_off);
246 hb->hash_next_dev = cache_hash_bydev[hv_dev];
247 cache_hash_bydev[hv_dev] = hb;
249 if(hb->ino != VMC_NO_INODE)
250 addcache_byino(hb);
252 lru_add(hb);
254 return OK;
257 void rmcache(struct cached_page *cp)
259 struct phys_block *pb = cp->page;
260 int hv_dev = makehash(cp->dev, cp->dev_offset);
262 assert(cp->page->flags & PBF_INCACHE);
264 cp->page->flags &= ~PBF_INCACHE;
266 rmhash_bydev(cp, &cache_hash_bydev[hv_dev]);
267 if(cp->ino != VMC_NO_INODE) {
268 int hv_ino = makehash(cp->ino, cp->ino_offset);
269 rmhash_byino(cp, &cache_hash_byino[hv_ino]);
272 assert(cp->page->refcount >= 1);
273 cp->page->refcount--;
275 lru_rm(cp);
277 if(pb->refcount == 0) {
278 assert(pb->phys != MAP_NONE);
279 free_mem(ABS2CLICK(pb->phys), 1);
280 SLABFREE(pb);
283 SLABFREE(cp);
286 int cache_freepages(int pages)
288 struct cached_page *cp, *newercp;
289 int freed = 0;
290 int oldsteps = 0;
291 int skips = 0;
293 for(cp = lru_oldest; cp && freed < pages; cp = newercp) {
294 newercp = cp->newer;
295 assert(cp->page->refcount >= 1);
296 if(cp->page->refcount == 1) {
297 rmcache(cp);
298 freed++;
299 skips = 0;
300 } else skips++;
301 oldsteps++;
304 return freed;
308 * Remove all pages that are associated with the given device.
310 void
311 clear_cache_bydev(dev_t dev)
313 struct cached_page *cp, *ncp;
314 int h;
316 for (h = 0; h < HASHSIZE; h++) {
317 for (cp = cache_hash_bydev[h]; cp != NULL; cp = ncp) {
318 ncp = cp->hash_next_dev;
320 if (cp->dev == dev)
321 rmcache(cp);
326 void get_stats_info(struct vm_stats_info *vsi)
328 vsi->vsi_cached = cached_pages;