2 Copyright © 2010-2015, The AROS Development Team. All rights reserved.
9 * This is an LRU copyback cache.
11 * The cache consists of a fixed number of cache blocks, each of which can
12 * hold a contiguous range of disk blocks (each range is of a fixed
13 * length). Each range is addressed by its base block number, calculated by
14 * applying a mask to the requested block number.
16 * Each cache block contains two Exec list nodes, so it can be part of two
17 * lists simultaneously. The first node links a block into a hash table
18 * list, while the second links it into either the free list or the dirty
19 * list. Initially, all cache blocks are present in the free list, and are
20 * absent from the hash table.
22 * When a disk block is requested by the client, the range that contains
23 * that disk block is calculated. The range is then sought by looking up
24 * the hash table. Each position in the hash table holds a list containing
25 * all cache blocks (henceforth referred to as a block: individual disk
26 * blocks are not used internally) that map to that hash location. Each
27 * list in the hash table is typically very short, so look-up time is
30 * If the requested range is not in the hash table, a cache block is
31 * removed from the head of the free list and from any hash table list it
32 * is in, and used to read the appropriate range from disk. This block is
33 * then added to the hash table at its new location.
35 * Two elements are returned to the client when a requested block is found:
36 * an opaque cache block handle, and a pointer to the data buffer.
38 * When a range is freed, it remains in the hash table so that it can be
39 * found quickly if needed again. Unless dirty, it is also added to the
40 * tail of the free list through its second list node. The block then
41 * remains in the hash table until reused for a different range.
43 * If a disk block is marked dirty by the client, the entire containing
44 * range is marked dirty and added to the tail of the dirty list through
45 * the cache block's second node. The dirty list is flushed periodically
46 * (currently once per second), and additionally whenever the free list
53 #include <proto/exec.h>
54 #include <proto/dos.h>
55 #include <clib/alib_protos.h>
59 #define SysBase (c->sys_base)
60 #define DOSBase (c->dos_base)
63 #define RANGE_SIZE (1 << RANGE_SHIFT)
64 #define RANGE_MASK (RANGE_SIZE - 1)
67 ((struct BlockRange *)(((A) != NULL) ? \
68 (((BYTE *)(A)) - (IPTR)&((struct BlockRange *)NULL)->node2) : NULL))
71 APTR
Cache_CreateCache(APTR priv
, ULONG hash_size
, ULONG block_count
,
72 ULONG block_size
, struct ExecBase
*sys_base
, struct DosLibrary
*dos_base
)
74 struct Cache _c
, *c
= &_c
;
79 /* Allocate cache structure */
81 c
->sys_base
= sys_base
;
82 if((c
= AllocVec(sizeof(struct Cache
), MEMF_PUBLIC
| MEMF_CLEAR
)) == NULL
)
88 c
->sys_base
= sys_base
;
89 c
->dos_base
= dos_base
;
90 c
->block_size
= block_size
;
91 c
->block_count
= block_count
;
92 c
->hash_size
= hash_size
;
94 /* Allocate hash table */
96 c
->hash_table
= AllocVec(sizeof(struct MinList
) * hash_size
,
97 MEMF_PUBLIC
| MEMF_CLEAR
);
98 if(c
->hash_table
== NULL
)
101 /* Initialise each hash location's list, and free and dirty lists */
103 for(i
= 0; i
< hash_size
&& success
; i
++)
104 NewList((struct List
*)&c
->hash_table
[i
]);
105 NewList((struct List
*)&c
->free_list
);
106 NewList((struct List
*)&c
->dirty_list
);
108 /* Allocate cache blocks and add them to the free list */
110 c
->blocks
= AllocVec(sizeof(APTR
) * block_count
,
111 MEMF_PUBLIC
| MEMF_CLEAR
);
115 for(i
= 0; i
< block_count
&& success
; i
++)
117 b
= AllocVec(sizeof(struct BlockRange
)
118 + (c
->block_size
<< RANGE_SHIFT
), MEMF_PUBLIC
);
122 b
->data
= (UBYTE
*)b
+ sizeof(struct BlockRange
);
130 AddTail((struct List
*)&c
->free_list
,
131 (struct Node
*)&b
->node2
);
137 Cache_DestroyCache(c
);
145 VOID
Cache_DestroyCache(APTR cache
)
147 struct Cache
*c
= cache
;
152 for(i
= 0; i
< c
->block_count
; i
++)
153 FreeVec(c
->blocks
[i
]);
155 FreeVec(c
->hash_table
);
160 APTR
Cache_GetBlock(APTR cache
, ULONG blockNum
, UBYTE
**data
)
162 struct Cache
*c
= cache
;
163 struct BlockRange
*b
= NULL
, *b2
;
164 LONG error
= 0, data_offset
;
166 &c
->hash_table
[(blockNum
>> RANGE_SHIFT
) & (c
->hash_size
- 1)];
169 /* Change block number to the start block of a range and get byte offset
172 data_offset
= (blockNum
- (blockNum
& ~RANGE_MASK
)) * c
->block_size
;
173 blockNum
&= ~RANGE_MASK
;
175 /* Check existing valid blocks first */
179 if(b2
->num
== blockNum
)
185 /* Block found, so increment its usage count and remove it from the
188 if(b
->use_count
++ == 0)
190 if(b
->state
!= BS_DIRTY
)
191 Remove((struct Node
*)&b
->node2
);
196 /* Get a free buffer to read block from disk */
198 n
= (struct MinNode
*)RemHead((struct List
*)&c
->free_list
);
201 /* No free blocks, so flush dirty list to try and free up some
202 * more blocks, then try again */
206 n
= (struct MinNode
*)RemHead((struct List
*)&c
->free_list
);
211 b
= (struct BlockRange
*)NODE2(n
);
213 /* Read the block from disk */
215 if(AccessDisk(FALSE
, blockNum
, RANGE_SIZE
, c
->block_size
,
216 b
->data
, c
->priv
) == 0)
218 /* Remove block from its old position in the hash */
220 if(b
->state
== BS_VALID
)
221 Remove((struct Node
*)b
);
223 /* Add it to the hash at the new location */
225 AddHead((struct List
*)l
, (struct Node
*)&b
->node1
);
232 /* Read failed, so put the block back on the free list */
235 AddHead((struct List
*)&c
->free_list
,
236 (struct Node
*)&b
->node2
);
238 error
= ERROR_UNKNOWN
;
242 error
= ERROR_NO_FREE_STORE
;
245 /* Set data pointer and error, and return cache block handle */
247 *data
= b
? (b
->data
+ data_offset
) : NULL
;
254 VOID
Cache_FreeBlock(APTR cache
, APTR block
)
256 struct Cache
*c
= cache
;
257 struct BlockRange
*b
= block
;
259 /* Decrement usage count */
263 /* Put an unused block at the end of the free list unless it's dirty */
265 if(b
->use_count
== 0 && b
->state
!= BS_DIRTY
)
266 AddTail((struct List
*)&c
->free_list
, (struct Node
*)&b
->node2
);
272 VOID
Cache_MarkBlockDirty(APTR cache
, APTR block
)
274 struct Cache
*c
= cache
;
275 struct BlockRange
*b
= block
;
277 if(b
->state
!= BS_DIRTY
)
280 AddTail((struct List
*)&c
->dirty_list
, (struct Node
*)&b
->node2
);
287 BOOL
Cache_Flush(APTR cache
)
289 struct Cache
*c
= cache
;
290 LONG error
= 0, td_error
;
292 struct BlockRange
*b
;
294 while((n
= (struct MinNode
*)RemHead((struct List
*)&c
->dirty_list
))
295 != NULL
&& error
== 0)
297 /* Write dirty block range to disk */
300 td_error
= AccessDisk(TRUE
, b
->num
, RANGE_SIZE
, c
->block_size
,
303 /* Transfer block range to free list if unused, or put back on dirty
304 * list upon an error */
309 if(b
->use_count
== 0)
310 AddTail((struct List
*)&c
->free_list
,
311 (struct Node
*)&b
->node2
);
315 AddHead((struct List
*)&c
->dirty_list
, (struct Node
*)&b
->node2
);
316 error
= ERROR_UNKNOWN
;