2 /* File that implements the data structure, insert, lookup and remove
3 * functions for file system cache blocks.
5 * Cache blocks can be mapped into the memory of processes by the
6 * 'cache' and 'file' memory types.
12 #include <minix/hash.h>
20 /* cache datastructure */
21 #define HASHSIZE 65536
23 static struct cached_page
*cache_hash_bydev
[HASHSIZE
];
24 static struct cached_page
*cache_hash_byino
[HASHSIZE
];
25 static struct cached_page
*lru_oldest
= NULL
, *lru_newest
= NULL
;
27 static u32_t cached_pages
= 0;
29 static void lru_rm(struct cached_page
*hb
)
31 struct cached_page
*newer
= hb
->newer
, *older
= hb
->older
;
35 assert(newer
->older
== hb
);
39 assert(older
->newer
== hb
);
43 if(lru_newest
== hb
) { assert(!newer
); lru_newest
= older
; }
44 if(lru_oldest
== hb
) { assert(!older
); lru_oldest
= newer
; }
46 if(lru_newest
) assert(lru_newest
->newer
== NULL
);
47 if(lru_oldest
) assert(lru_oldest
->older
== NULL
);
52 static void lru_add(struct cached_page
*hb
)
56 assert(!lru_newest
->newer
);
57 lru_newest
->newer
= hb
;
63 hb
->older
= lru_newest
;
70 void cache_lru_touch(struct cached_page
*hb
)
76 static __inline u32_t
makehash(u32_t p1
, u64_t p2
)
78 u32_t offlo
= ex64lo(p2
), offhi
= ex64hi(p2
),
80 hash_mix(p1
, offlo
, offhi
);
81 hash_final(offlo
, offhi
, v
);
87 void cache_sanitycheck_internal(void)
93 int bydev_total
= 0, lru_total
= 0;
94 struct cached_page
*cp
;
96 for(h
= 0; h
< HASHSIZE
; h
++) {
97 for(cp
= cache_hash_bydev
[h
]; cp
; cp
= cp
->hash_next_dev
) {
98 assert(cp
->dev
!= NO_DEV
);
99 assert(h
== makehash(cp
->dev
, cp
->dev_offset
));
100 assert(cp
== find_cached_page_bydev(cp
->dev
, cp
->dev_offset
, cp
->ino
, cp
->ino_offset
));
101 if(cp
->ino
!= VMC_NO_INODE
) withino
++;
106 for(cp
= cache_hash_byino
[h
]; cp
; cp
= cp
->hash_next_ino
) {
107 assert(cp
->dev
!= NO_DEV
);
108 assert(cp
->ino
!= VMC_NO_INODE
);
109 assert(h
== makehash(cp
->ino
, cp
->ino_offset
));
116 assert(byino
== withino
);
120 assert(!lru_newest
->newer
);
121 assert(!lru_oldest
->older
);
126 for(cp
= lru_oldest
; cp
; cp
= cp
->newer
) {
127 struct cached_page
*newer
= cp
->newer
,
129 if(newer
) assert(newer
->older
== cp
);
130 if(older
) assert(older
->newer
== cp
);
134 assert(lru_total
== bydev_total
);
136 assert(lru_total
== cached_pages
);
140 #define rmhash_f(fname, nextfield) \
142 fname(struct cached_page *cp, struct cached_page **head) \
144 struct cached_page *hb; \
145 if(*head == cp) { *head = cp->nextfield; return; } \
146 for(hb = *head; hb && cp != hb->nextfield; hb = hb->nextfield) ; \
147 assert(hb); assert(hb->nextfield == cp); \
148 hb->nextfield = cp->nextfield; \
152 rmhash_f(rmhash_byino
, hash_next_ino
)
153 rmhash_f(rmhash_bydev
, hash_next_dev
)
155 static void addcache_byino(struct cached_page
*hb
)
157 int hv_ino
= makehash(hb
->ino
, hb
->ino_offset
);
158 assert(hb
->ino
!= VMC_NO_INODE
);
159 hb
->hash_next_ino
= cache_hash_byino
[hv_ino
];
160 cache_hash_byino
[hv_ino
] = hb
;
164 update_inohash(struct cached_page
*hb
, ino_t ino
, u64_t ino_off
)
166 assert(ino
!= VMC_NO_INODE
);
167 if(hb
->ino
!= VMC_NO_INODE
) {
168 int h
= makehash(hb
->ino
, hb
->ino_offset
);
169 rmhash_byino(hb
, &cache_hash_byino
[h
]);
172 hb
->ino_offset
= ino_off
;
177 find_cached_page_bydev(dev_t dev
, u64_t dev_off
, ino_t ino
, u64_t ino_off
, int touchlru
)
179 struct cached_page
*hb
;
181 for(hb
= cache_hash_bydev
[makehash(dev
, dev_off
)]; hb
; hb
=hb
->hash_next_dev
) {
182 if(hb
->dev
== dev
&& hb
->dev_offset
== dev_off
) {
183 if(ino
!= VMC_NO_INODE
) {
184 if(hb
->ino
!= ino
|| hb
->ino_offset
!= ino_off
) {
185 update_inohash(hb
, ino
, ino_off
);
189 if(touchlru
) cache_lru_touch(hb
);
198 struct cached_page
*find_cached_page_byino(dev_t dev
, ino_t ino
, u64_t ino_off
, int touchlru
)
200 struct cached_page
*hb
;
202 assert(ino
!= VMC_NO_INODE
);
203 assert(dev
!= NO_DEV
);
205 for(hb
= cache_hash_byino
[makehash(ino
, ino_off
)]; hb
; hb
=hb
->hash_next_ino
) {
206 if(hb
->dev
== dev
&& hb
->ino
== ino
&& hb
->ino_offset
== ino_off
) {
207 if(touchlru
) cache_lru_touch(hb
);
216 int addcache(dev_t dev
, u64_t dev_off
, ino_t ino
, u64_t ino_off
, struct phys_block
*pb
)
219 struct cached_page
*hb
;
221 if(pb
->flags
& PBF_INCACHE
) {
222 printf("VM: already in cache\n");
227 printf("VM: no memory for cache node\n");
231 assert(dev
!= NO_DEV
);
233 assert(!find_cached_page_bydev(dev
, dev_off
, ino
, ino_off
));
237 hb
->dev_offset
= dev_off
;
239 hb
->ino_offset
= ino_off
;
241 hb
->page
->refcount
++; /* block also referenced by cache now */
242 hb
->page
->flags
|= PBF_INCACHE
;
244 hv_dev
= makehash(dev
, dev_off
);
246 hb
->hash_next_dev
= cache_hash_bydev
[hv_dev
];
247 cache_hash_bydev
[hv_dev
] = hb
;
249 if(hb
->ino
!= VMC_NO_INODE
)
257 void rmcache(struct cached_page
*cp
)
259 struct phys_block
*pb
= cp
->page
;
260 int hv_dev
= makehash(cp
->dev
, cp
->dev_offset
);
262 assert(cp
->page
->flags
& PBF_INCACHE
);
264 cp
->page
->flags
&= ~PBF_INCACHE
;
266 rmhash_bydev(cp
, &cache_hash_bydev
[hv_dev
]);
267 if(cp
->ino
!= VMC_NO_INODE
) {
268 int hv_ino
= makehash(cp
->ino
, cp
->ino_offset
);
269 rmhash_byino(cp
, &cache_hash_byino
[hv_ino
]);
272 assert(cp
->page
->refcount
>= 1);
273 cp
->page
->refcount
--;
277 if(pb
->refcount
== 0) {
278 assert(pb
->phys
!= MAP_NONE
);
279 free_mem(ABS2CLICK(pb
->phys
), 1);
286 int cache_freepages(int pages
)
288 struct cached_page
*cp
, *newercp
;
293 for(cp
= lru_oldest
; cp
&& freed
< pages
; cp
= newercp
) {
295 assert(cp
->page
->refcount
>= 1);
296 if(cp
->page
->refcount
== 1) {
308 * Remove all pages that are associated with the given device.
311 clear_cache_bydev(dev_t dev
)
313 struct cached_page
*cp
, *ncp
;
316 for (h
= 0; h
< HASHSIZE
; h
++) {
317 for (cp
= cache_hash_bydev
[h
]; cp
!= NULL
; cp
= ncp
) {
318 ncp
= cp
->hash_next_dev
;
326 void get_stats_info(struct vm_stats_info
*vsi
)
328 vsi
->vsi_cached
= cached_pages
;