Updated PCI IDs to latest snapshot.
[tangerine.git] / workbench / fs / fat / cache.c
blob1cb4fc5d57f71b615737e17d976246cdb622663d
1 /*
2 * Disk buffer cache
4 * Copyright © 2007 Robert Norris
5 * Copyright © 2008 Pavel Fedin
7 * This program is free software; you can redistribute it and/or modify it
8 * under the same terms as AROS itself.
10 * $Id$
13 /*
14 * This is an attempt at a generic buffer cache for filesystems. It's a
15 * traditional cache of the type described in Tanenbaum 2nd Ed. Eventually I
16 * hope it will be a system-wide facility (eg cache.resource), but for the
17 * moment it's just part of fat.handler.
19 * The idea is that we have pile of blocks (struct cache_block), a hashtable
20 * and a free list. Initially all the blocks are free, and reside on the free
21 * list (a doubly-linked list). The hashtable is empty.
23 * When a request for a block comes in (cache_get_block()), we extract the
24 * bottom N bits of the block number, and then scan the linked-list in that
25 * cache bucket for the block. If it's there, the use count for the block is
26 * incremented and the block is returned.
28 * If not found, then we check the free list. The free list is stored in order
29 * of use time, with the least-recently-used block at the front (head) and the
30 * most-recently-used block at the back (tail). We scan the free list from
31 * back to front. A block that was used recently is likely to be used again in
32 * the future, so this is the fastest way to find it. Once found, its use
33 * count is incremented and it's re-inserted into the appropriate hash
34 * bucket/list, then returned to the caller.
36 * If the block isn't found in the free list, then we don't have it anywhere,
37 * and have to load it from disk. We take an empty buffer from the beginning
38 * of the free list (that is, the oldest unused block available). If it's dirty
39 * (ie was written to), we write out it and all the other blocks for the
40 * device/unit (to improve the odds of the filesystem remaining consistent).
41 * Then, we load the block from the disk into the buffer, fiddle with the
42 * block metadata, add it to the hash and return it.
44 * When the caller is finished with the block, it calls cache_put_block() to
45 * return the block to the cache. The blocks' use count is decremented. If it's
46 * still >0 (ie the block is in use), then the function just returns. If not,
47 * it removes the block from the hash and adds it to the end of the freelist.
49 * A task periodically scans the free list for dirty buffers, and writes them
50 * out to disk.
53 #include <exec/types.h>
54 #include <exec/nodes.h>
55 #include <exec/lists.h>
56 #include <exec/io.h>
57 #include <exec/ports.h>
58 #include <exec/errors.h>
59 #include <dos/dos.h>
61 #include <proto/exec.h>
63 #include "cache.h"
64 #include "fat_fs.h"
65 #include "fat_protos.h"
66 #include "timer.h"
68 #define DEBUG 0
69 #include "debug.h"
71 struct cache *cache_new(ULONG hash_size, ULONG num_blocks, ULONG block_size, ULONG flags) {
72 struct cache *c;
73 ULONG i;
75 D(bug("cache_new: hash_size %ld num_blocks %ld block_size %ld flags 0x%08x\n", hash_size, num_blocks, block_size, flags));
77 if ((c = (struct cache *) AllocVec(sizeof(struct cache), MEMF_PUBLIC)) == NULL)
78 return NULL;
80 c->hash_size = hash_size;
81 c->hash_mask = hash_size-1;
83 c->block_size = block_size;
85 c->flags = CACHE_WRITETHROUGH;
86 if ((flags & (CACHE_WRITEBACK | CACHE_WRITETHROUGH)) == CACHE_WRITEBACK)
87 c->flags = CACHE_WRITEBACK;
89 c->num_blocks = num_blocks;
90 c->num_in_use = 0;
92 c->blocks = (struct cache_block **) AllocVec(sizeof(struct cache_block *) * num_blocks, MEMF_PUBLIC);
94 c->hits = c->misses = 0;
96 for (i = 0; i < num_blocks; i++) {
97 struct cache_block *b;
99 b = (struct cache_block *) AllocVec(sizeof(struct cache_block) + block_size, MEMF_PUBLIC);
100 b->use_count = 0;
101 b->is_dirty = FALSE;
102 b->num = 0;
103 b->data = (UBYTE *) b + sizeof(struct cache_block);
105 b->hash_next = b->hash_prev = NULL;
107 c->blocks[i] = b;
109 b->free_next = NULL;
110 if (i == 0)
111 b->free_prev = NULL;
112 else {
113 b->free_prev = c->blocks[i-1];
114 c->blocks[i-1]->free_next = b;
118 c->free_head = c->blocks[0];
119 c->free_tail = c->blocks[num_blocks-1];
121 c->hash = (struct cache_block **) AllocVec(sizeof(struct cache_block *) * hash_size, MEMF_PUBLIC | MEMF_CLEAR);
123 c->dirty = NULL;
125 return c;
128 void cache_free(struct cache *c) {
129 ULONG i;
131 cache_flush(c);
133 for (i = 0; i < c->num_blocks; i++)
134 FreeVec(c->blocks[i]);
135 FreeVec(c->blocks);
136 FreeVec(c->hash);
137 FreeVec(c);
140 ULONG cache_get_block(struct cache *c, ULONG num, ULONG flags, struct cache_block **rb) {
141 struct cache_block *b;
142 ULONG err;
144 D(bug("cache_get_block: looking for block %d, flags 0x%08x\n", num, flags));
146 /* first check the in-use blocks */
147 for (b = c->hash[num & c->hash_mask]; b != NULL; b = b->hash_next)
148 if (b->num == num) {
149 b->use_count++;
151 D(bug("cache_get_block: found in-use block, use count is now %d\n", b->use_count));
152 c->hits++;
154 *rb = b;
155 return 0;
158 D(bug("cache_get_block: in-use block not found, checking free list\n"));
160 /* not there, check the recently-freed list. recently used stuff is placed
161 * at the back, so we start there */
162 for (b = c->free_tail; b != NULL; b = b->free_prev) {
163 if (b->num == num) {
164 D(bug("cache_get_block: found it in the free list\n"));
165 c->hits++;
167 /* remove it from the free list */
168 if (b->free_prev != NULL)
169 b->free_prev->free_next = b->free_next;
170 else
171 c->free_head = b->free_next;
173 if (b->free_next != NULL)
174 b->free_next->free_prev = b->free_prev;
175 else
176 c->free_tail = b->free_prev;
178 b->free_prev = b->free_next = NULL;
180 /* place it at the front of the hash */
181 b->hash_prev = NULL;
182 b->hash_next = c->hash[num & c->hash_mask];
183 if (b->hash_next != NULL)
184 b->hash_next->hash_prev = b;
185 c->hash[num & c->hash_mask] = b;
187 /* if it's dirty, flush everything */
188 if (b->is_dirty)
189 cache_flush(c);
191 b->use_count = 1;
192 c->num_in_use++;
194 D(bug("cache_get_block: total %d blocks in use\n", c->num_in_use));
196 *rb = b;
197 return 0;
201 D(bug("cache_get_block: not found, loading from disk\n"));
202 c->misses++;
204 /* gotta read it from disk. get an empty buffer */
205 b = c->free_head;
206 if (b == NULL) {
207 D(bug("cache_get_block: no free blocks left\n"));
208 return ERROR_NO_FREE_STORE;
211 /* detach it */
212 if (c->free_head == c->free_tail)
213 c->free_head = c->free_tail = NULL;
214 else {
215 c->free_head = b->free_next;
216 if (c->free_head != NULL)
217 c->free_head->free_prev = NULL;
220 b->free_prev = b->free_next = NULL;
222 /* write it out if it's dirty */
223 if (b->is_dirty)
224 cache_flush(c);
226 /* read the block from disk */
227 if ((err = AccessDisk(FALSE, num, 1, c->block_size, b->data)) != 0) {
228 /* read failed, put the block back on the free list */
229 b->free_next = c->free_head;
230 c->free_head = b;
232 /* and bail */
233 *rb = NULL;
234 return err;
237 /* setup the rest of it */
238 b->num = num;
239 b->is_dirty = FALSE;
241 /* add it to the hash */
242 b->hash_next = c->hash[num & c->hash_mask];
243 if (b->hash_next != NULL)
244 b->hash_next->hash_prev = b;
245 c->hash[num & c->hash_mask] = b;
247 b->use_count = 1;
248 c->num_in_use++;
250 D(bug("cache_get_block: loaded from disk, total %d blocks in use\n", c->num_in_use));
252 *rb = b;
253 return 0;
256 ULONG cache_put_block(struct cache *c, struct cache_block *b, ULONG flags) {
257 D(bug("cache_put_block: returning block %d, flags 0x%08x\n", b->num, flags));
259 /* if it's still in use, then we've got it easy */
260 b->use_count--;
261 if (b->use_count != 0) {
262 D(bug("cache_put_block: new use count is %d, nothing else to do\n", b->use_count));
263 return 0;
266 /* no longer in use, remove it from its hash */
267 if (b->hash_prev != NULL)
268 b->hash_prev->hash_next = b->hash_next;
269 else
270 c->hash[b->num & c->hash_mask] = b->hash_next;
272 if (b->hash_next != NULL)
273 b->hash_next->hash_prev = b->hash_prev;
275 b->hash_prev = b->hash_next = NULL;
277 /* put it at the end of the free list */
278 b->free_next = NULL;
279 b->free_prev = c->free_tail;
280 if (b->free_prev != NULL)
281 b->free_prev->free_next = b;
282 c->free_tail = b;
283 if (c->free_head == NULL)
284 c->free_head = b;
286 /* one down */
287 c->num_in_use--;
289 D(bug("cache_put_block: no longer in use, moved to free list, total %ld blocks in use\n", c->num_in_use));
291 return 0;
294 ULONG cache_get_blocks(struct cache *c, ULONG num, ULONG nblocks, ULONG flags, struct cache_block **rb) {
295 ULONG err, i;
297 D(bug("cache_get_blocks: loading %d blocks starting at %d, flags 0x%08x\n", nblocks, num, flags));
299 /* XXX optimise this to get contiguous blocks in one hit */
300 for (i = 0; i < nblocks; i++) {
301 if ((err = cache_get_block(c, num+i, flags, &rb[i])) != 0) {
302 D(bug("cache_get_blocks: block load failed, freeing everything we got so far\n"));
304 for (; i >= 0; i--)
305 cache_put_block(c, rb[i], 0);
307 return err;
312 return 0;
315 ULONG cache_put_blocks(struct cache *c, struct cache_block **b, ULONG nblocks, ULONG flags) {
316 ULONG i;
318 D(bug("cache_put_blocks: returning %d blocks starting at %d, flags 0x%08x\n", nblocks, b[0]->num, flags));
320 for (i = 0; i < nblocks; i++)
321 cache_put_block(c, b[i], 0);
323 return 0;
326 ULONG cache_mark_block_dirty(struct cache *c, struct cache_block *b) {
327 ULONG err;
329 if (c->flags & CACHE_WRITEBACK) {
330 if (!b->is_dirty) {
331 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b->num));
333 b->dirty_next = c->dirty;
334 c->dirty = b;
336 b->is_dirty = TRUE;
339 else
340 D(bug("cache_mark_block_dirty: block %d already dirty\n", b->num));
342 return 0;
345 D(bug("cache_mark_block_dirty: writing dirty block %d\n", b->num));
347 if ((err = AccessDisk(TRUE, b->num, 1, c->block_size, b->data)) != 0) {
348 D(bug("cache_mark_block_dirty: write failed, leaving block marked dirty\n"));
350 if (!b->is_dirty) {
351 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b->num));
353 b->dirty_next = c->dirty;
354 c->dirty = b;
356 b->is_dirty = TRUE;
359 else
360 D(bug("cache_mark_block_dirty: block %d already dirty\n", b->num));
362 return err;
365 return 0;
368 ULONG cache_mark_blocks_dirty(struct cache *c, struct cache_block **b, ULONG nblocks) {
369 ULONG err, i;
371 if (c->flags & CACHE_WRITEBACK) {
372 for (i = 0; i < nblocks; i++) {
373 if (!b[i]->is_dirty) {
374 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b[i]->num));
376 b[i]->dirty_next = c->dirty;
377 c->dirty = b[i];
379 b[i]->is_dirty = TRUE;
382 else
383 D(bug("cache_mark_block_dirty: block %d already dirty\n", b[i]->num));
386 return 0;
389 /* XXX optimise this to do contiguous blocks in one hit */
390 for (i = 0; i < nblocks; i++) {
391 D(bug("cache_mark_blocks_dirty: writing dirty block %d\n", b[i]->num));
393 if ((err = AccessDisk(TRUE, b[i]->num, 1, c->block_size, b[i]->data)) != 0) {
394 D(bug("cache_mark_blocks_dirty: write failed, leaving this and remaining blocks marked dirty\n"));
396 for (; i < nblocks; i++) {
397 if (!b[i]->is_dirty) {
398 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b[i]->num));
400 b[i]->dirty_next = c->dirty;
401 c->dirty = b[i];
403 b[i]->is_dirty = TRUE;
406 else
407 D(bug("cache_mark_block_dirty: block %d already dirty\n", b[i]->num));
413 return 0;
416 ULONG cache_flush(struct cache *c) {
417 struct cache_block *b, *next;
418 ULONG err = 0;
419 int count = 0;
421 D(bug("cache_flush: flushing dirty blocks\n"));
423 b = c->dirty;
424 while (b != NULL) {
425 next = b->dirty_next;
427 if ((err = AccessDisk(TRUE, b->num, 1, c->block_size, b->data)) != 0) {
428 D(bug("cache_flush: write failed, leaving this and remaining blocks marked dirty\n"));
429 break;
432 b->dirty_next = NULL;
434 count++;
435 b = next;
438 c->dirty = b;
440 D(bug("cache_flush: %d blocks flushed\n", count));
442 return err;
445 #if DEBUG_CACHESTATS > 0
446 void cache_stats(struct cache *c) {
447 struct cache_block *b;
448 ULONG count, i;
450 kprintf("[cache] statistics for cache 0x%08x\n", c);
452 kprintf(" %ld hash buckets (mask 0x%x)\n", c->hash_size, c->hash_mask);
453 kprintf(" block size: %ld bytes\n", c->block_size);
454 kprintf(" total blocks: %ld\n", c->num_blocks);
455 kprintf(" flags:%s%s\n", c->flags & CACHE_WRITETHROUGH ? " CACHE_WRITETHROUGH" : "",
456 c->flags & CACHE_WRITEBACK ? " CACHE_WRITEBACK" : "");
457 kprintf(" total blocks in use: %ld\n", c->num_in_use);
459 count = 0;
460 for (i = 0; i < c->hash_size; i++)
461 for (b = c->hash[i]; b != NULL; b = b->hash_next)
462 count++;
464 if (count != c->num_in_use)
465 kprintf("WARNING: in-use count (%ld) doesn't match hash contents (%ld)\n", c->num_in_use, count);
467 count = 0;
468 for (b = c->free_head; b != NULL; b = b->free_next)
469 count++;
471 kprintf(" blocks on free list: %ld\n", count);
473 count = 0;
474 for (b = c->dirty; b != NULL; b = b->dirty_next)
475 count++;
477 kprintf(" blocks on dirty list: %ld\n", count);
479 kprintf(" hits: %ld misses: %ld\n", c->hits, c->misses);
481 #endif