4 * Copyright © 2007 Robert Norris
5 * Copyright © 2008 Pavel Fedin
7 * This program is free software; you can redistribute it and/or modify it
8 * under the same terms as AROS itself.
14 * This is an attempt at a generic buffer cache for filesystems. It's a
15 * traditional cache of the type described in Tanenbaum 2nd Ed. Eventually I
16 * hope it will be a system-wide facility (eg cache.resource), but for the
17 * moment it's just part of fat.handler.
19 * The idea is that we have pile of blocks (struct cache_block), a hashtable
20 * and a free list. Initially all the blocks are free, and reside on the free
21 * list (a doubly-linked list). The hashtable is empty.
23 * When a request for a block comes in (cache_get_block()), we extract the
24 * bottom N bits of the block number, and then scan the linked-list in that
25 * cache bucket for the block. If it's there, the use count for the block is
26 * incremented and the block is returned.
28 * If not found, then we check the free list. The free list is stored in order
29 * of use time, with the least-recently-used block at the front (head) and the
30 * most-recently-used block at the back (tail). We scan the free list from
31 * back to front. A block that was used recently is likely to be used again in
32 * the future, so this is the fastest way to find it. Once found, its use
33 * count is incremented and it's re-inserted into the appropriate hash
34 * bucket/list, then returned to the caller.
36 * If the block isn't found in the free list, then we don't have it anywhere,
37 * and have to load it from disk. We take an empty buffer from the beginning
38 * of the free list (that is, the oldest unused block available). If it's dirty
39 * (ie was written to), we write out it and all the other blocks for the
40 * device/unit (to improve the odds of the filesystem remaining consistent).
41 * Then, we load the block from the disk into the buffer, fiddle with the
42 * block metadata, add it to the hash and return it.
44 * When the caller is finished with the block, it calls cache_put_block() to
45 * return the block to the cache. The blocks' use count is decremented. If it's
46 * still >0 (ie the block is in use), then the function just returns. If not,
47 * it removes the block from the hash and adds it to the end of the freelist.
49 * A task periodically scans the free list for dirty buffers, and writes them
53 #include <exec/types.h>
54 #include <exec/nodes.h>
55 #include <exec/lists.h>
57 #include <exec/ports.h>
58 #include <exec/errors.h>
61 #include <proto/exec.h>
65 #include "fat_protos.h"
71 struct cache
*cache_new(ULONG hash_size
, ULONG num_blocks
, ULONG block_size
, ULONG flags
) {
75 D(bug("cache_new: hash_size %ld num_blocks %ld block_size %ld flags 0x%08x\n", hash_size
, num_blocks
, block_size
, flags
));
77 if ((c
= (struct cache
*) AllocVec(sizeof(struct cache
), MEMF_PUBLIC
)) == NULL
)
80 c
->hash_size
= hash_size
;
81 c
->hash_mask
= hash_size
-1;
83 c
->block_size
= block_size
;
85 c
->flags
= CACHE_WRITETHROUGH
;
86 if ((flags
& (CACHE_WRITEBACK
| CACHE_WRITETHROUGH
)) == CACHE_WRITEBACK
)
87 c
->flags
= CACHE_WRITEBACK
;
89 c
->num_blocks
= num_blocks
;
92 c
->blocks
= (struct cache_block
**) AllocVec(sizeof(struct cache_block
*) * num_blocks
, MEMF_PUBLIC
);
94 c
->hits
= c
->misses
= 0;
96 for (i
= 0; i
< num_blocks
; i
++) {
97 struct cache_block
*b
;
99 b
= (struct cache_block
*) AllocVec(sizeof(struct cache_block
) + block_size
, MEMF_PUBLIC
);
103 b
->data
= (UBYTE
*) b
+ sizeof(struct cache_block
);
105 b
->hash_next
= b
->hash_prev
= NULL
;
113 b
->free_prev
= c
->blocks
[i
-1];
114 c
->blocks
[i
-1]->free_next
= b
;
118 c
->free_head
= c
->blocks
[0];
119 c
->free_tail
= c
->blocks
[num_blocks
-1];
121 c
->hash
= (struct cache_block
**) AllocVec(sizeof(struct cache_block
*) * hash_size
, MEMF_PUBLIC
| MEMF_CLEAR
);
128 void cache_free(struct cache
*c
) {
133 for (i
= 0; i
< c
->num_blocks
; i
++)
134 FreeVec(c
->blocks
[i
]);
140 ULONG
cache_get_block(struct cache
*c
, ULONG num
, ULONG flags
, struct cache_block
**rb
) {
141 struct cache_block
*b
;
144 D(bug("cache_get_block: looking for block %d, flags 0x%08x\n", num
, flags
));
146 /* first check the in-use blocks */
147 for (b
= c
->hash
[num
& c
->hash_mask
]; b
!= NULL
; b
= b
->hash_next
)
151 D(bug("cache_get_block: found in-use block, use count is now %d\n", b
->use_count
));
158 D(bug("cache_get_block: in-use block not found, checking free list\n"));
160 /* not there, check the recently-freed list. recently used stuff is placed
161 * at the back, so we start there */
162 for (b
= c
->free_tail
; b
!= NULL
; b
= b
->free_prev
) {
164 D(bug("cache_get_block: found it in the free list\n"));
167 /* remove it from the free list */
168 if (b
->free_prev
!= NULL
)
169 b
->free_prev
->free_next
= b
->free_next
;
171 c
->free_head
= b
->free_next
;
173 if (b
->free_next
!= NULL
)
174 b
->free_next
->free_prev
= b
->free_prev
;
176 c
->free_tail
= b
->free_prev
;
178 b
->free_prev
= b
->free_next
= NULL
;
180 /* place it at the front of the hash */
182 b
->hash_next
= c
->hash
[num
& c
->hash_mask
];
183 if (b
->hash_next
!= NULL
)
184 b
->hash_next
->hash_prev
= b
;
185 c
->hash
[num
& c
->hash_mask
] = b
;
187 /* if it's dirty, flush everything */
194 D(bug("cache_get_block: total %d blocks in use\n", c
->num_in_use
));
201 D(bug("cache_get_block: not found, loading from disk\n"));
204 /* gotta read it from disk. get an empty buffer */
207 D(bug("cache_get_block: no free blocks left\n"));
208 return ERROR_NO_FREE_STORE
;
212 if (c
->free_head
== c
->free_tail
)
213 c
->free_head
= c
->free_tail
= NULL
;
215 c
->free_head
= b
->free_next
;
216 if (c
->free_head
!= NULL
)
217 c
->free_head
->free_prev
= NULL
;
220 b
->free_prev
= b
->free_next
= NULL
;
222 /* write it out if it's dirty */
226 /* read the block from disk */
227 if ((err
= AccessDisk(FALSE
, num
, 1, c
->block_size
, b
->data
)) != 0) {
228 /* read failed, put the block back on the free list */
229 b
->free_next
= c
->free_head
;
237 /* setup the rest of it */
241 /* add it to the hash */
242 b
->hash_next
= c
->hash
[num
& c
->hash_mask
];
243 if (b
->hash_next
!= NULL
)
244 b
->hash_next
->hash_prev
= b
;
245 c
->hash
[num
& c
->hash_mask
] = b
;
250 D(bug("cache_get_block: loaded from disk, total %d blocks in use\n", c
->num_in_use
));
256 ULONG
cache_put_block(struct cache
*c
, struct cache_block
*b
, ULONG flags
) {
257 D(bug("cache_put_block: returning block %d, flags 0x%08x\n", b
->num
, flags
));
259 /* if it's still in use, then we've got it easy */
261 if (b
->use_count
!= 0) {
262 D(bug("cache_put_block: new use count is %d, nothing else to do\n", b
->use_count
));
266 /* no longer in use, remove it from its hash */
267 if (b
->hash_prev
!= NULL
)
268 b
->hash_prev
->hash_next
= b
->hash_next
;
270 c
->hash
[b
->num
& c
->hash_mask
] = b
->hash_next
;
272 if (b
->hash_next
!= NULL
)
273 b
->hash_next
->hash_prev
= b
->hash_prev
;
275 b
->hash_prev
= b
->hash_next
= NULL
;
277 /* put it at the end of the free list */
279 b
->free_prev
= c
->free_tail
;
280 if (b
->free_prev
!= NULL
)
281 b
->free_prev
->free_next
= b
;
283 if (c
->free_head
== NULL
)
289 D(bug("cache_put_block: no longer in use, moved to free list, total %ld blocks in use\n", c
->num_in_use
));
294 ULONG
cache_get_blocks(struct cache
*c
, ULONG num
, ULONG nblocks
, ULONG flags
, struct cache_block
**rb
) {
297 D(bug("cache_get_blocks: loading %d blocks starting at %d, flags 0x%08x\n", nblocks
, num
, flags
));
299 /* XXX optimise this to get contiguous blocks in one hit */
300 for (i
= 0; i
< nblocks
; i
++) {
301 if ((err
= cache_get_block(c
, num
+i
, flags
, &rb
[i
])) != 0) {
302 D(bug("cache_get_blocks: block load failed, freeing everything we got so far\n"));
305 cache_put_block(c
, rb
[i
], 0);
315 ULONG
cache_put_blocks(struct cache
*c
, struct cache_block
**b
, ULONG nblocks
, ULONG flags
) {
318 D(bug("cache_put_blocks: returning %d blocks starting at %d, flags 0x%08x\n", nblocks
, b
[0]->num
, flags
));
320 for (i
= 0; i
< nblocks
; i
++)
321 cache_put_block(c
, b
[i
], 0);
326 ULONG
cache_mark_block_dirty(struct cache
*c
, struct cache_block
*b
) {
329 if (c
->flags
& CACHE_WRITEBACK
) {
331 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b
->num
));
333 b
->dirty_next
= c
->dirty
;
340 D(bug("cache_mark_block_dirty: block %d already dirty\n", b
->num
));
345 D(bug("cache_mark_block_dirty: writing dirty block %d\n", b
->num
));
347 if ((err
= AccessDisk(TRUE
, b
->num
, 1, c
->block_size
, b
->data
)) != 0) {
348 D(bug("cache_mark_block_dirty: write failed, leaving block marked dirty\n"));
351 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b
->num
));
353 b
->dirty_next
= c
->dirty
;
360 D(bug("cache_mark_block_dirty: block %d already dirty\n", b
->num
));
368 ULONG
cache_mark_blocks_dirty(struct cache
*c
, struct cache_block
**b
, ULONG nblocks
) {
371 if (c
->flags
& CACHE_WRITEBACK
) {
372 for (i
= 0; i
< nblocks
; i
++) {
373 if (!b
[i
]->is_dirty
) {
374 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b
[i
]->num
));
376 b
[i
]->dirty_next
= c
->dirty
;
379 b
[i
]->is_dirty
= TRUE
;
383 D(bug("cache_mark_block_dirty: block %d already dirty\n", b
[i
]->num
));
389 /* XXX optimise this to do contiguous blocks in one hit */
390 for (i
= 0; i
< nblocks
; i
++) {
391 D(bug("cache_mark_blocks_dirty: writing dirty block %d\n", b
[i
]->num
));
393 if ((err
= AccessDisk(TRUE
, b
[i
]->num
, 1, c
->block_size
, b
[i
]->data
)) != 0) {
394 D(bug("cache_mark_blocks_dirty: write failed, leaving this and remaining blocks marked dirty\n"));
396 for (; i
< nblocks
; i
++) {
397 if (!b
[i
]->is_dirty
) {
398 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b
[i
]->num
));
400 b
[i
]->dirty_next
= c
->dirty
;
403 b
[i
]->is_dirty
= TRUE
;
407 D(bug("cache_mark_block_dirty: block %d already dirty\n", b
[i
]->num
));
416 ULONG
cache_flush(struct cache
*c
) {
417 struct cache_block
*b
, *next
;
421 D(bug("cache_flush: flushing dirty blocks\n"));
425 next
= b
->dirty_next
;
427 if ((err
= AccessDisk(TRUE
, b
->num
, 1, c
->block_size
, b
->data
)) != 0) {
428 D(bug("cache_flush: write failed, leaving this and remaining blocks marked dirty\n"));
432 b
->dirty_next
= NULL
;
440 D(bug("cache_flush: %d blocks flushed\n", count
));
445 #if DEBUG_CACHESTATS > 0
446 void cache_stats(struct cache
*c
) {
447 struct cache_block
*b
;
450 kprintf("[cache] statistics for cache 0x%08x\n", c
);
452 kprintf(" %ld hash buckets (mask 0x%x)\n", c
->hash_size
, c
->hash_mask
);
453 kprintf(" block size: %ld bytes\n", c
->block_size
);
454 kprintf(" total blocks: %ld\n", c
->num_blocks
);
455 kprintf(" flags:%s%s\n", c
->flags
& CACHE_WRITETHROUGH
? " CACHE_WRITETHROUGH" : "",
456 c
->flags
& CACHE_WRITEBACK
? " CACHE_WRITEBACK" : "");
457 kprintf(" total blocks in use: %ld\n", c
->num_in_use
);
460 for (i
= 0; i
< c
->hash_size
; i
++)
461 for (b
= c
->hash
[i
]; b
!= NULL
; b
= b
->hash_next
)
464 if (count
!= c
->num_in_use
)
465 kprintf("WARNING: in-use count (%ld) doesn't match hash contents (%ld)\n", c
->num_in_use
, count
);
468 for (b
= c
->free_head
; b
!= NULL
; b
= b
->free_next
)
471 kprintf(" blocks on free list: %ld\n", count
);
474 for (b
= c
->dirty
; b
!= NULL
; b
= b
->dirty_next
)
477 kprintf(" blocks on dirty list: %ld\n", count
);
479 kprintf(" hits: %ld misses: %ld\n", c
->hits
, c
->misses
);