added concrete implementations of putc(), getc(), getchar() and gets()
[tangerine.git] / workbench / fs / fat / cache.c
blobf28003e705cafa02bfe893ef973bdde07748dea4
1 /*
2 * Disk buffer cache
4 * Copyright © 2007 Robert Norris
6 * This program is free software; you can redistribute it and/or modify it
7 * under the same terms as AROS itself.
9 * $Id$
12 /*
13 * This is an attempt at a generic buffer cache for filesystems. It's a
14 * traditional cache of the type described in Tanenbaum 2nd Ed. Eventually I
15 * hope it will be a system-wide facility (eg cache.resource), but for the
16 * moment its just part of fat.handler.
18 * The idea is that we have pile of blocks (struct cache_block), a hashtable
19 * and a free list. Initially all the blocks are free, and reside on the free
20 * list (a doubly-linked list). The hashtable is empty.
22 * When a request for a block comes in (cache_get_block()), we extract the
23 * bottom N bits of the block number, and then scan the linked-list in that
24 * cache bucket for the block. If its there, the use count for the block is
25 * incremented and the block is returned.
27 * If not found, then we check the free list. The free list is stored in order
28 * of use time, with the least-recently-used block at the front (head) and the
29 * most-recently-used block at the back (tail). We scan the free list from
30 * back to front. A block that was used recently is likely to be used again in
31 * the future, so this is the fastest way to find it. Once found, its use
32 * count is incremented and its re-inserted into the appropriate hash
33 * bucket/list, then returned to the caller.
35 * If the block isn't found in the free list, then we don't have it anywhere,
36 * and have to load it from disk. We take an empty buffer from the beginning
37 * of the free list (that is, the oldest unused block available). If its dirty
38 * (ie was written to), we write out it and all the other blocks for the
39 * device/unit (to improve the odds of the filesystem remaining consistent).
40 * Then, we load the block from the disk into the buffer, fiddle with the
41 * block metadata, add it to the hash and return it.
43 * When the caller is finished with the block, it calls cache_put_block() to
44 * return the block to the cache. The blocks' use count is decremented. If its
45 * still >0 (ie the block is in use), then the function just returns. If not,
46 * it removes the block from the hash and adds it to the end of the freelist.
48 * A task periodically scans the free list for dirty buffers, and writes them
49 * out to disk.
52 #include <exec/types.h>
53 #include <exec/nodes.h>
54 #include <exec/lists.h>
55 #include <exec/io.h>
56 #include <exec/ports.h>
57 #include <exec/errors.h>
58 #include <dos/dos.h>
59 #include <devices/trackdisk.h>
60 #include <devices/newstyle.h>
62 #include <proto/exec.h>
64 #include "cache.h"
66 #define DEBUG 0
67 #include <aros/debug.h>
69 /* TD64 commands */
70 #ifndef TD_READ64
71 # define TD_READ64 24
72 # define TD_WRITE64 25
73 #endif
75 /* prototype for lowlevel block function */
76 static ULONG _cache_do_blocks_ll(struct cache *c, BOOL do_write, ULONG num, ULONG nblocks, ULONG block_size, UBYTE *data);
77 static void _cache_64bit_support(struct cache *c);
79 struct cache *cache_new(struct IOStdReq *req, ULONG hash_size, ULONG num_blocks, ULONG block_size, ULONG flags) {
80 struct cache *c;
81 ULONG i;
83 D(bug("cache_new: hash_size %ld num_blocks %ld block_size %ld flags 0x%08x\n", hash_size, num_blocks, block_size, flags));
85 if ((c = (struct cache *) AllocVec(sizeof(struct cache), MEMF_PUBLIC)) == NULL)
86 return NULL;
88 c->req = req;
90 c->hash_size = hash_size;
91 c->hash_mask = hash_size-1;
93 c->block_size = block_size;
95 c->flags = CACHE_WRITETHROUGH;
96 if ((flags & (CACHE_WRITEBACK | CACHE_WRITETHROUGH)) == CACHE_WRITEBACK)
97 c->flags = CACHE_WRITEBACK;
99 _cache_64bit_support(c);
101 c->num_blocks = num_blocks;
102 c->num_in_use = 0;
104 c->blocks = (struct cache_block **) AllocVec(sizeof(struct cache_block *) * num_blocks, MEMF_PUBLIC);
106 c->hits = c->misses = 0;
108 for (i = 0; i < num_blocks; i++) {
109 struct cache_block *b;
111 b = (struct cache_block *) AllocVec(sizeof(struct cache_block) + block_size, MEMF_PUBLIC);
112 b->use_count = 0;
113 b->is_dirty = FALSE;
114 b->num = 0;
115 b->data = (UBYTE *) b + sizeof(struct cache_block);
117 b->hash_next = b->hash_prev = NULL;
119 c->blocks[i] = b;
121 b->free_next = NULL;
122 if (i == 0)
123 b->free_prev = NULL;
124 else {
125 b->free_prev = c->blocks[i-1];
126 c->blocks[i-1]->free_next = b;
130 c->free_head = c->blocks[0];
131 c->free_tail = c->blocks[num_blocks-1];
133 c->hash = (struct cache_block **) AllocVec(sizeof(struct cache_block *) * hash_size, MEMF_PUBLIC | MEMF_CLEAR);
135 c->dirty = NULL;
137 return c;
140 void cache_free(struct cache *c) {
141 ULONG i;
143 cache_flush(c);
145 for (i = 0; i < c->num_blocks; i++)
146 FreeVec(c->blocks[i]);
147 FreeVec(c->blocks);
148 FreeVec(c->hash);
149 FreeVec(c);
152 ULONG cache_get_block(struct cache *c, ULONG num, ULONG flags, struct cache_block **rb) {
153 struct cache_block *b;
154 ULONG err;
156 D(bug("cache_get_block: looking for block %d, flags 0x%08x\n", num, flags));
158 /* first check the in-use blocks */
159 for (b = c->hash[num & c->hash_mask]; b != NULL; b = b->hash_next)
160 if (b->num == num) {
161 b->use_count++;
163 D(bug("cache_get_block: found in-use block, use count is now %d\n", b->use_count));
164 c->hits++;
166 *rb = b;
167 return 0;
170 D(bug("cache_get_block: in-use block not found, checking free list\n"));
172 /* not there, check the recently-freed list. recently used stuff is placed
173 * at the back, so we start there */
174 for (b = c->free_tail; b != NULL; b = b->free_prev) {
175 if (b->num == num) {
176 D(bug("cache_get_block: found it in the free list\n"));
177 c->hits++;
179 /* remove it from the free list */
180 if (b->free_prev != NULL)
181 b->free_prev->free_next = b->free_next;
182 else
183 c->free_head = b->free_next;
185 if (b->free_next != NULL)
186 b->free_next->free_prev = b->free_prev;
187 else
188 c->free_tail = b->free_prev;
190 b->free_prev = b->free_next = NULL;
192 /* place it at the front of the hash */
193 b->hash_prev = NULL;
194 b->hash_next = c->hash[num & c->hash_mask];
195 if (b->hash_next != NULL)
196 b->hash_next->hash_prev = b;
197 c->hash[num & c->hash_mask] = b;
199 /* if its dirty, flush everything */
200 if (b->is_dirty)
201 cache_flush(c);
203 b->use_count = 1;
204 c->num_in_use++;
206 D(bug("cache_get_block: total %d blocks in use\n", c->num_in_use));
208 *rb = b;
209 return 0;
213 D(bug("cache_get_block: not found, loading from disk\n"));
214 c->misses++;
216 /* gotta read it from disk. get an empty buffer */
217 b = c->free_head;
218 if (b == NULL) {
219 D(bug("cache_get_block: no free blocks left\n"));
220 return ERROR_NO_FREE_STORE;
223 /* detach it */
224 if (c->free_head == c->free_tail)
225 c->free_head = c->free_tail = NULL;
226 else {
227 c->free_head = b->free_next;
228 if (c->free_head != NULL)
229 c->free_head->free_prev = NULL;
232 b->free_prev = b->free_next = NULL;
234 /* write it out if its dirty */
235 if (b->is_dirty)
236 cache_flush(c);
238 /* read the block from disk */
239 if ((err = _cache_do_blocks_ll(c, FALSE, num, 1, c->block_size, b->data)) != 0) {
240 /* read failed, put the block back on the free list */
241 b->free_next = c->free_head;
242 c->free_head = b;
244 /* and bail */
245 *rb = NULL;
246 return err;
249 /* setup the rest of it */
250 b->num = num;
251 b->is_dirty = FALSE;
253 /* add it to the hash */
254 b->hash_next = c->hash[num & c->hash_mask];
255 if (b->hash_next != NULL)
256 b->hash_next->hash_prev = b;
257 c->hash[num & c->hash_mask] = b;
259 b->use_count = 1;
260 c->num_in_use++;
262 D(bug("cache_get_block: loaded from disk, total %d blocks in use\n", c->num_in_use));
264 *rb = b;
265 return 0;
268 ULONG cache_put_block(struct cache *c, struct cache_block *b, ULONG flags) {
269 D(bug("cache_put_block: returning block %d, flags 0x%08x\n", b->num, flags));
271 /* if its still in use, then we've got it easy */
272 b->use_count--;
273 if (b->use_count != 0) {
274 D(bug("cache_put_block: new use count is %d, nothing else to do\n", b->use_count));
275 return 0;
278 /* no longer in use, remove it from its hash */
279 if (b->hash_prev != NULL)
280 b->hash_prev->hash_next = b->hash_next;
281 else
282 c->hash[b->num & c->hash_mask] = b->hash_next;
284 if (b->hash_next != NULL)
285 b->hash_next->hash_prev = b->hash_prev;
287 b->hash_prev = b->hash_next = NULL;
289 /* put it at the end of the free list */
290 b->free_next = NULL;
291 b->free_prev = c->free_tail;
292 if (b->free_prev != NULL)
293 b->free_prev->free_next = b;
294 c->free_tail = b;
295 if (c->free_head == NULL)
296 c->free_head = b;
298 /* one down */
299 c->num_in_use--;
301 D(bug("cache_put_block: no longer in use, moved to free list, total %ld blocks in use\n", c->num_in_use));
303 return 0;
306 ULONG cache_get_blocks(struct cache *c, ULONG num, ULONG nblocks, ULONG flags, struct cache_block **rb) {
307 ULONG err, i;
309 D(bug("cache_get_blocks: loading %d blocks starting at %d, flags 0x%08x\n", nblocks, num, flags));
311 /* XXX optimise this to get contiguous blocks in one hit */
312 for (i = 0; i < nblocks; i++) {
313 if ((err = cache_get_block(c, num+i, flags, &rb[i])) != 0) {
314 D(bug("cache_get_blocks: block load failed, freeing everything we got so far\n"));
316 for (; i >= 0; i--)
317 cache_put_block(c, rb[i], 0);
319 return err;
324 return 0;
327 ULONG cache_put_blocks(struct cache *c, struct cache_block **b, ULONG nblocks, ULONG flags) {
328 ULONG i;
330 D(bug("cache_put_blocks: returning %d blocks starting at %d, flags 0x%08x\n", nblocks, b[0]->num, flags));
332 for (i = 0; i < nblocks; i++)
333 cache_put_block(c, b[i], 0);
335 return 0;
338 ULONG cache_mark_block_dirty(struct cache *c, struct cache_block *b) {
339 ULONG err;
341 if (c->flags & CACHE_WRITEBACK) {
342 if (!b->is_dirty) {
343 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b->num));
345 b->dirty_next = c->dirty;
346 c->dirty = b;
348 b->is_dirty = TRUE;
351 else
352 D(bug("cache_mark_block_dirty: block %d already dirty\n", b->num));
354 return 0;
357 D(bug("cache_mark_block_dirty: writing dirty block %d\n", b->num));
359 if ((err = _cache_do_blocks_ll(c, TRUE, b->num, 1, c->block_size, b->data)) != 0) {
360 D(bug("cache_mark_block_dirty: write failed, leaving block marked dirty\n"));
362 if (!b->is_dirty) {
363 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b->num));
365 b->dirty_next = c->dirty;
366 c->dirty = b;
368 b->is_dirty = TRUE;
371 else
372 D(bug("cache_mark_block_dirty: block %d already dirty\n", b->num));
374 return err;
377 return 0;
380 ULONG cache_mark_blocks_dirty(struct cache *c, struct cache_block **b, ULONG nblocks) {
381 ULONG err, i;
383 if (c->flags & CACHE_WRITEBACK) {
384 for (i = 0; i < nblocks; i++) {
385 if (!b[i]->is_dirty) {
386 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b[i]->num));
388 b[i]->dirty_next = c->dirty;
389 c->dirty = b[i];
391 b[i]->is_dirty = TRUE;
394 else
395 D(bug("cache_mark_block_dirty: block %d already dirty\n", b[i]->num));
398 return 0;
401 /* XXX optimise this to do contiguous blocks in one hit */
402 for (i = 0; i < nblocks; i++) {
403 D(bug("cache_mark_blocks_dirty: writing dirty block %d\n", b[i]->num));
405 if ((err = _cache_do_blocks_ll(c, TRUE, b[i]->num, 1, c->block_size, b[i]->data)) != 0) {
406 D(bug("cache_mark_blocks_dirty: write failed, leaving this and remaining blocks marked dirty\n"));
408 for (; i < nblocks; i++) {
409 if (!b[i]->is_dirty) {
410 D(bug("cache_mark_block_dirty: adding block %d to dirty list\n", b[i]->num));
412 b[i]->dirty_next = c->dirty;
413 c->dirty = b[i];
415 b[i]->is_dirty = TRUE;
418 else
419 D(bug("cache_mark_block_dirty: block %d already dirty\n", b[i]->num));
425 return 0;
428 ULONG cache_flush(struct cache *c) {
429 struct cache_block *b, *next;
430 ULONG err;
431 int count = 0;
433 D(bug("cache_flush: flushing dirty blocks\n"));
435 b = c->dirty;
436 while (b != NULL) {
437 next = b->dirty_next;
439 if ((err = _cache_do_blocks_ll(c, TRUE, b->num, 1, c->block_size, b->data)) != 0) {
440 D(bug("cache_flush: write failed, leaving this and remaining blocks marked dirty\n"));
441 break;
444 b->dirty_next = NULL;
446 count++;
447 b = next;
450 c->dirty = b;
452 D(bug("cache_flush: %d blocks flushed\n", count));
454 return err;
457 void cache_stats(struct cache *c) {
458 struct cache_block *b;
459 ULONG count, i;
461 kprintf("[cache] statistics for cache 0x%08x\n", c);
463 kprintf(" %ld hash buckets (mask 0x%x)\n", c->hash_size, c->hash_mask);
464 kprintf(" block size: %ld bytes\n", c->block_size);
465 kprintf(" total blocks: %ld\n", c->num_blocks);
466 kprintf(" flags:%s%s\n", c->flags & CACHE_WRITETHROUGH ? " CACHE_WRITETHROUGH" : "",
467 c->flags & CACHE_WRITEBACK ? " CACHE_WRITEBACK" : "",
468 c->flags & CACHE_64_TD64 ? " CACHE_64_TD64" : "",
469 c->flags & CACHE_64_NSD ? " CACHE_64_NSD" : "",
470 c->flags & CACHE_64_SCSI ? " CACHE_64_SCSI" : "");
471 kprintf(" total blocks in use: %ld\n", c->num_in_use);
473 count = 0;
474 for (i = 0; i < c->hash_size; i++)
475 for (b = c->hash[i]; b != NULL; b = b->hash_next)
476 count++;
478 if (count != c->num_in_use)
479 kprintf("WARNING: in-use count (%ld) doesn't match hash contents (%ld)\n", c->num_in_use, count);
481 count = 0;
482 for (b = c->free_head; b != NULL; b = b->free_next)
483 count++;
485 kprintf(" blocks on free list: %ld\n", count);
487 count = 0;
488 for (b = c->dirty; b != NULL; b = b->dirty_next)
489 count++;
491 kprintf(" blocks on dirty list: %ld\n", count);
493 kprintf(" hits: %ld misses: %ld\n", c->hits, c->misses);
496 /* lowlevel block function */
497 static ULONG _cache_do_blocks_ll(struct cache *c, BOOL do_write, ULONG num, ULONG nblocks, ULONG block_size, UBYTE *data) {
498 ULONG err;
499 UQUAD off;
500 ULONG low, high;
501 ULONG length;
502 BOOL want_64 = FALSE;
504 D(bug("_cache_do_blocks_ll: request to %s %ld blocks starting from %ld (block_size %ld)\n", do_write ? "write" : "read", nblocks, num, block_size));
506 off = ((UQUAD) num) * 512;
508 low = off & 0xffffffff;
509 high = off >> 32;
511 length = nblocks * block_size;
513 if (high > 0 || low + length < length) {
514 if (!(c->flags & CACHE_64_MASK)) {
515 D(bug("_cache_do_blocks_ll: 64-bit operation requested but underlying device doesn't support it\n"));
516 return IOERR_NOCMD;
519 want_64 = TRUE;
522 /* XXX support DirectSCSI */
524 c->req->io_Offset = low;
525 c->req->io_Actual = high;
526 c->req->io_Length = nblocks * block_size;
527 c->req->io_Data = data;
528 c->req->io_Flags = IOF_QUICK;
530 if (!want_64)
531 c->req->io_Command = do_write ? CMD_WRITE : CMD_READ;
532 else if (c->flags & CACHE_64_TD64)
533 c->req->io_Command = do_write ? TD_WRITE64 : TD_READ64;
534 else if (c->flags & CACHE_64_NSD)
535 c->req->io_Command = do_write ? NSCMD_TD_WRITE64 : NSCMD_TD_READ64;
537 DoIO((struct IORequest *) c->req);
539 err = c->req->io_Error;
541 D(bug("_cache_do_blocks_ll: returning error %ld\n", err));
543 return err;
546 /* probe the device to determine 64-bit support */
547 static void _cache_64bit_support(struct cache *c) {
548 struct NSDeviceQueryResult nsd_query;
549 UWORD *nsd_cmd;
551 /* probe TD64 */
552 c->req->io_Command = TD_READ64;
553 c->req->io_Offset = 0;
554 c->req->io_Length = 0;
555 c->req->io_Actual = 0;
556 c->req->io_Data = 0;
557 c->req->io_Flags = IOF_QUICK;
559 if (DoIO((struct IORequest *) c->req) == 0 && c->req->io_Error != IOERR_NOCMD) {
560 D(bug("_cache_64bit_support: device supports 64-bit trackdisk extensions\n"));
561 c->flags |= CACHE_64_TD64;
564 /* probe NSD */
565 c->req->io_Command = NSCMD_DEVICEQUERY;
566 c->req->io_Length = sizeof(struct NSDeviceQueryResult);
567 c->req->io_Data = (APTR) &nsd_query;
569 if (DoIO((struct IORequest *) c->req) == 0 && c->req->io_Error != IOERR_NOCMD)
570 for (nsd_cmd = nsd_query.SupportedCommands; *nsd_cmd != 0; nsd_cmd++) {
571 if (*nsd_cmd == NSCMD_TD_READ64) {
572 D(bug("_cache_64bit_support: device supports NSD 64-bit trackdisk extensions\n"));
573 c->flags |= CACHE_64_NSD;
574 break;
578 /* XXX probe DirectSCSI */
580 if (c->flags & CACHE_64_MASK)
581 D(bug("_cache_64bit_support: 64-bit access available\n"));
582 else
583 D(bug("_cache_64bit_support: 64-bit access not available\n"));