4 * Copyright © 2007 Robert Norris
6 * This program is free software; you can redistribute it and/or modify it
7 * under the same terms as AROS itself.
13 * This is an attempt at a generic buffer cache for filesystems. It's a
14 * traditional cache of the type described in Tanenbaum 2nd Ed. Eventually I
15 * hope it will be a system-wide facility (eg cache.resource), but for the
16 * moment its just part of fat.handler.
18 * The idea is that we have pile of blocks (struct cache_block), a hashtable
19 * and a free list. Initially all the blocks are free, and reside on the free
20 * list (a doubly-linked list). The hashtable is empty.
22 * When a request for a block comes in (cache_get_block()), we extract the
23 * bottom N bits of the block number, and then scan the linked-list in that
24 * cache bucket for the block. If its there, the use count for the block is
25 * incremented and the block is returned.
27 * If not found, then we check the free list. The free list is stored in order
28 * of use time, with the least-recently-used block at the front (head) and the
29 * most-recently-used block at the back (tail). We scan the free list from
30 * back to front. A block that was used recently is likely to be used again in
31 * the future, so this is the fastest way to find it. Once found, its use
32 * count is incremented and its re-inserted into the appropriate hash
33 * bucket/list, then returned to the caller.
35 * If the block isn't found in the free list, then we don't have it anywhere,
36 * and have to load it from disk. We take an empty buffer from the beginning
37 * of the free list (that is, the oldest unused block available). If its dirty
38 * (ie was written to), we write out it and all the other blocks for the
39 * device/unit (to improve the odds of the filesystem remaining consistent).
40 * Then, we load the block from the disk into the buffer, fiddle with the
41 * block metadata, add it to the hash and return it.
43 * When the caller is finished with the block, it calls cache_put_block() to
44 * return the block to the cache. The blocks' use count is decremented. If its
45 * still >0 (ie the block is in use), then the function just returns. If not,
46 * it removes the block from the hash and adds it to the end of the freelist.
48 * A task periodically scans the free list for dirty buffers, and writes them
52 #include <exec/types.h>
53 #include <exec/nodes.h>
54 #include <exec/lists.h>
56 #include <exec/ports.h>
57 #include <exec/errors.h>
59 #include <devices/trackdisk.h>
60 #include <devices/newstyle.h>
62 #include <proto/exec.h>
67 #include <aros/debug.h>
72 # define TD_WRITE64 25
75 /* prototype for lowlevel block function */
76 static ULONG
_cache_do_blocks_ll(struct cache
*c
, BOOL do_write
, ULONG num
, ULONG nblocks
, ULONG block_size
, UBYTE
*data
);
77 static void _cache_64bit_support(struct cache
*c
);
79 struct cache
*cache_new(struct IOStdReq
*req
, ULONG hash_size
, ULONG num_blocks
, ULONG block_size
, ULONG flags
) {
83 D(bug("cache_new: hash_size %ld num_blocks %ld block_size %ld flags 0x%08x\n", hash_size
, num_blocks
, block_size
, flags
));
85 if ((c
= (struct cache
*) AllocVec(sizeof(struct cache
), MEMF_PUBLIC
)) == NULL
)
90 c
->hash_size
= hash_size
;
91 c
->hash_mask
= hash_size
-1;
93 c
->block_size
= block_size
;
95 c
->flags
= CACHE_WRITETHROUGH
;
96 if ((flags
& (CACHE_WRITEBACK
| CACHE_WRITETHROUGH
)) == CACHE_WRITEBACK
)
97 c
->flags
= CACHE_WRITEBACK
;
99 _cache_64bit_support(c
);
101 c
->num_blocks
= num_blocks
;
104 c
->blocks
= (struct cache_block
**) AllocVec(sizeof(struct cache_block
*) * num_blocks
, MEMF_PUBLIC
);
106 c
->hits
= c
->misses
= 0;
108 for (i
= 0; i
< num_blocks
; i
++) {
109 struct cache_block
*b
;
111 b
= (struct cache_block
*) AllocVec(sizeof(struct cache_block
) + block_size
, MEMF_PUBLIC
);
115 b
->data
= (UBYTE
*) b
+ sizeof(struct cache_block
);
117 b
->hash_next
= b
->hash_prev
= NULL
;
125 b
->free_prev
= c
->blocks
[i
-1];
126 c
->blocks
[i
-1]->free_next
= b
;
130 c
->free_head
= c
->blocks
[0];
131 c
->free_tail
= c
->blocks
[num_blocks
-1];
133 c
->hash
= (struct cache_block
**) AllocVec(sizeof(struct cache_block
*) * hash_size
, MEMF_PUBLIC
| MEMF_CLEAR
);
140 void cache_free(struct cache
*c
) {
145 for (i
= 0; i
< c
->num_blocks
; i
++)
146 FreeVec(c
->blocks
[i
]);
152 ULONG
cache_get_block(struct cache
*c
, ULONG num
, ULONG flags
, struct cache_block
**rb
) {
153 struct cache_block
*b
;
156 D(bug("cache_get_block: looking for block %d, flags 0x%08x\n", num
, flags
));
158 /* first check the in-use blocks */
159 for (b
= c
->hash
[num
& c
->hash_mask
]; b
!= NULL
; b
= b
->hash_next
)
163 D(bug("cache_get_block: found in-use block, use count is now %d\n", b
->use_count
));
170 D(bug("cache_get_block: in-use block not found, checking free list\n"));
172 /* not there, check the recently-freed list. recently used stuff is placed
173 * at the back, so we start there */
174 for (b
= c
->free_tail
; b
!= NULL
; b
= b
->free_prev
) {
176 D(bug("cache_get_block: found it in the free list\n"));
179 /* remove it from the free list */
180 if (b
->free_prev
!= NULL
)
181 b
->free_prev
->free_next
= b
->free_next
;
183 c
->free_head
= b
->free_next
;
185 if (b
->free_next
!= NULL
)
186 b
->free_next
->free_prev
= b
->free_prev
;
188 c
->free_tail
= b
->free_prev
;
190 b
->free_prev
= b
->free_next
= NULL
;
192 /* place it at the front of the hash */
194 b
->hash_next
= c
->hash
[num
& c
->hash_mask
];
195 if (b
->hash_next
!= NULL
)
196 b
->hash_next
->hash_prev
= b
;
197 c
->hash
[num
& c
->hash_mask
] = b
;
199 /* if its dirty, flush everything */
206 D(bug("cache_get_block: total %d blocks in use\n", c
->num_in_use
));
213 D(bug("cache_get_block: not found, loading from disk\n"));
216 /* gotta read it from disk. get an empty buffer */
219 D(bug("cache_get_block: no free blocks left\n"));
220 return ERROR_NO_FREE_STORE
;
224 if (c
->free_head
== c
->free_tail
)
225 c
->free_head
= c
->free_tail
= NULL
;
227 c
->free_head
= b
->free_next
;
228 if (c
->free_head
!= NULL
)
229 c
->free_head
->free_prev
= NULL
;
232 b
->free_prev
= b
->free_next
= NULL
;
234 /* write it out if its dirty */
238 /* read the block from disk */
239 if ((err
= _cache_do_blocks_ll(c
, FALSE
, num
, 1, c
->block_size
, b
->data
)) != 0) {
240 /* read failed, put the block back on the free list */
241 b
->free_next
= c
->free_head
;
249 /* setup the rest of it */
253 /* add it to the hash */
254 b
->hash_next
= c
->hash
[num
& c
->hash_mask
];
255 if (b
->hash_next
!= NULL
)
256 b
->hash_next
->hash_prev
= b
;
257 c
->hash
[num
& c
->hash_mask
] = b
;
262 D(bug("cache_get_block: loaded from disk, total %d blocks in use\n", c
->num_in_use
));
268 ULONG
cache_put_block(struct cache
*c
, struct cache_block
*b
, ULONG flags
) {
269 D(bug("cache_put_block: returning block %d, flags 0x%08x\n", b
->num
, flags
));
271 /* if its still in use, then we've got it easy */
273 if (b
->use_count
!= 0) {
274 D(bug("cache_put_block: new use count is %d, nothing else to do\n", b
->use_count
));
278 /* no longer in use, remove it from its hash */
279 if (b
->hash_prev
!= NULL
)
280 b
->hash_prev
->hash_next
= b
->hash_next
;
282 c
->hash
[b
->num
& c
->hash_mask
] = b
->hash_next
;
284 if (b
->hash_next
!= NULL
)
285 b
->hash_next
->hash_prev
= b
->hash_prev
;
287 b
->hash_prev
= b
->hash_next
= NULL
;
289 /* put it at the end of the free list */
291 b
->free_prev
= c
->free_tail
;
292 if (b
->free_prev
!= NULL
)
293 b
->free_prev
->free_next
= b
;
295 if (c
->free_head
== NULL
)
301 D(bug("cache_put_block: no longer in use, moved to free list, total %ld blocks in use\n", c
->num_in_use
));
306 ULONG
cache_get_blocks(struct cache
*c
, ULONG num
, ULONG nblocks
, ULONG flags
, struct cache_block
**rb
) {
309 D(bug("cache_get_blocks: loading %d blocks starting at %d, flags 0x%08x\n", nblocks
, num
, flags
));
311 /* XXX optimise this to get contiguous blocks in one hit */
312 for (i
= 0; i
< nblocks
; i
++) {
313 if ((err
= cache_get_block(c
, num
+i
, flags
, &rb
[i
])) != 0) {
314 D(bug("cache_get_blocks: block load failed, freeing everything we got so far\n"));
317 cache_put_block(c
, rb
[i
], 0);
327 ULONG
cache_put_blocks(struct cache
*c
, struct cache_block
**b
, ULONG nblocks
, ULONG flags
) {
330 D(bug("cache_put_blocks: returning %d blocks starting at %d, flags 0x%08x\n", nblocks
, b
[0]->num
, flags
));
332 for (i
= 0; i
< nblocks
; i
++)
333 cache_put_block(c
, b
[i
], 0);
338 ULONG
cache_mark_block_dirty(struct cache
*c
, struct cache_block
*b
) {
341 if (c
->flags
& CACHE_WRITEBACK
) {
343 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b
->num
));
345 b
->dirty_next
= c
->dirty
;
352 D(bug("cache_mark_block_dirty: block %d already dirty\n", b
->num
));
357 D(bug("cache_mark_block_dirty: writing dirty block %d\n", b
->num
));
359 if ((err
= _cache_do_blocks_ll(c
, TRUE
, b
->num
, 1, c
->block_size
, b
->data
)) != 0) {
360 D(bug("cache_mark_block_dirty: write failed, leaving block marked dirty\n"));
363 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b
->num
));
365 b
->dirty_next
= c
->dirty
;
372 D(bug("cache_mark_block_dirty: block %d already dirty\n", b
->num
));
380 ULONG
cache_mark_blocks_dirty(struct cache
*c
, struct cache_block
**b
, ULONG nblocks
) {
383 if (c
->flags
& CACHE_WRITEBACK
) {
384 for (i
= 0; i
< nblocks
; i
++) {
385 if (!b
[i
]->is_dirty
) {
386 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b
[i
]->num
));
388 b
[i
]->dirty_next
= c
->dirty
;
391 b
[i
]->is_dirty
= TRUE
;
395 D(bug("cache_mark_block_dirty: block %d already dirty\n", b
[i
]->num
));
401 /* XXX optimise this to do contiguous blocks in one hit */
402 for (i
= 0; i
< nblocks
; i
++) {
403 D(bug("cache_mark_blocks_dirty: writing dirty block %d\n", b
[i
]->num
));
405 if ((err
= _cache_do_blocks_ll(c
, TRUE
, b
[i
]->num
, 1, c
->block_size
, b
[i
]->data
)) != 0) {
406 D(bug("cache_mark_blocks_dirty: write failed, leaving this and remaining blocks marked dirty\n"));
408 for (; i
< nblocks
; i
++) {
409 if (!b
[i
]->is_dirty
) {
410 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b
[i
]->num
));
412 b
[i
]->dirty_next
= c
->dirty
;
415 b
[i
]->is_dirty
= TRUE
;
419 D(bug("cache_mark_block_dirty: block %d already dirty\n", b
[i
]->num
));
428 ULONG
cache_flush(struct cache
*c
) {
429 struct cache_block
*b
, *next
;
433 D(bug("cache_flush: flushing dirty blocks\n"));
437 next
= b
->dirty_next
;
439 if ((err
= _cache_do_blocks_ll(c
, TRUE
, b
->num
, 1, c
->block_size
, b
->data
)) != 0) {
440 D(bug("cache_flush: write failed, leaving this and remaining blocks marked dirty\n"));
444 b
->dirty_next
= NULL
;
452 D(bug("cache_flush: %d blocks flushed\n", count
));
457 void cache_stats(struct cache
*c
) {
458 struct cache_block
*b
;
461 kprintf("[cache] statistics for cache 0x%08x\n", c
);
463 kprintf(" %ld hash buckets (mask 0x%x)\n", c
->hash_size
, c
->hash_mask
);
464 kprintf(" block size: %ld bytes\n", c
->block_size
);
465 kprintf(" total blocks: %ld\n", c
->num_blocks
);
466 kprintf(" flags:%s%s\n", c
->flags
& CACHE_WRITETHROUGH
? " CACHE_WRITETHROUGH" : "",
467 c
->flags
& CACHE_WRITEBACK
? " CACHE_WRITEBACK" : "",
468 c
->flags
& CACHE_64_TD64
? " CACHE_64_TD64" : "",
469 c
->flags
& CACHE_64_NSD
? " CACHE_64_NSD" : "",
470 c
->flags
& CACHE_64_SCSI
? " CACHE_64_SCSI" : "");
471 kprintf(" total blocks in use: %ld\n", c
->num_in_use
);
474 for (i
= 0; i
< c
->hash_size
; i
++)
475 for (b
= c
->hash
[i
]; b
!= NULL
; b
= b
->hash_next
)
478 if (count
!= c
->num_in_use
)
479 kprintf("WARNING: in-use count (%ld) doesn't match hash contents (%ld)\n", c
->num_in_use
, count
);
482 for (b
= c
->free_head
; b
!= NULL
; b
= b
->free_next
)
485 kprintf(" blocks on free list: %ld\n", count
);
488 for (b
= c
->dirty
; b
!= NULL
; b
= b
->dirty_next
)
491 kprintf(" blocks on dirty list: %ld\n", count
);
493 kprintf(" hits: %ld misses: %ld\n", c
->hits
, c
->misses
);
496 /* lowlevel block function */
497 static ULONG
_cache_do_blocks_ll(struct cache
*c
, BOOL do_write
, ULONG num
, ULONG nblocks
, ULONG block_size
, UBYTE
*data
) {
502 BOOL want_64
= FALSE
;
504 D(bug("_cache_do_blocks_ll: request to %s %ld blocks starting from %ld (block_size %ld)\n", do_write
? "write" : "read", nblocks
, num
, block_size
));
506 off
= ((UQUAD
) num
) * 512;
508 low
= off
& 0xffffffff;
511 length
= nblocks
* block_size
;
513 if (high
> 0 || low
+ length
< length
) {
514 if (!(c
->flags
& CACHE_64_MASK
)) {
515 D(bug("_cache_do_blocks_ll: 64-bit operation requested but underlying device doesn't support it\n"));
522 /* XXX support DirectSCSI */
524 c
->req
->io_Offset
= low
;
525 c
->req
->io_Actual
= high
;
526 c
->req
->io_Length
= nblocks
* block_size
;
527 c
->req
->io_Data
= data
;
528 c
->req
->io_Flags
= IOF_QUICK
;
531 c
->req
->io_Command
= do_write
? CMD_WRITE
: CMD_READ
;
532 else if (c
->flags
& CACHE_64_TD64
)
533 c
->req
->io_Command
= do_write
? TD_WRITE64
: TD_READ64
;
534 else if (c
->flags
& CACHE_64_NSD
)
535 c
->req
->io_Command
= do_write
? NSCMD_TD_WRITE64
: NSCMD_TD_READ64
;
537 DoIO((struct IORequest
*) c
->req
);
539 err
= c
->req
->io_Error
;
541 D(bug("_cache_do_blocks_ll: returning error %ld\n", err
));
546 /* probe the device to determine 64-bit support */
547 static void _cache_64bit_support(struct cache
*c
) {
548 struct NSDeviceQueryResult nsd_query
;
552 c
->req
->io_Command
= TD_READ64
;
553 c
->req
->io_Offset
= 0;
554 c
->req
->io_Length
= 0;
555 c
->req
->io_Actual
= 0;
557 c
->req
->io_Flags
= IOF_QUICK
;
559 if (DoIO((struct IORequest
*) c
->req
) == 0 && c
->req
->io_Error
!= IOERR_NOCMD
) {
560 D(bug("_cache_64bit_support: device supports 64-bit trackdisk extensions\n"));
561 c
->flags
|= CACHE_64_TD64
;
565 c
->req
->io_Command
= NSCMD_DEVICEQUERY
;
566 c
->req
->io_Length
= sizeof(struct NSDeviceQueryResult
);
567 c
->req
->io_Data
= (APTR
) &nsd_query
;
569 if (DoIO((struct IORequest
*) c
->req
) == 0 && c
->req
->io_Error
!= IOERR_NOCMD
)
570 for (nsd_cmd
= nsd_query
.SupportedCommands
; *nsd_cmd
!= 0; nsd_cmd
++) {
571 if (*nsd_cmd
== NSCMD_TD_READ64
) {
572 D(bug("_cache_64bit_support: device supports NSD 64-bit trackdisk extensions\n"));
573 c
->flags
|= CACHE_64_NSD
;
578 /* XXX probe DirectSCSI */
580 if (c
->flags
& CACHE_64_MASK
)
581 D(bug("_cache_64bit_support: 64-bit access available\n"));
583 D(bug("_cache_64bit_support: 64-bit access not available\n"));