Added a test for MUIA_Listview_SelectChange.
[AROS.git] / rom / filesys / pfs3 / fs / lru.c
blob71d5e82d65c88045a8935b692ee9c4d0d61ca6a7
1 /* $Id$ */
2 /* $Log: lru.c $
3 * Revision 10.21 1999/05/14 11:31:34 Michiel
4 * Long filename support implemented; bugfixes
6 * Revision 10.20 1998/09/27 11:26:37 Michiel
7 * ErrorMsg param
9 * Revision 10.19 1998/06/12 21:30:29 Michiel
10 * Fixed bug 116: FreeResBlock
12 * Revision 10.18 1998/05/29 19:31:18 Michiel
13 * fixed bug 108
15 * Revision 10.17 1997/03/03 22:04:04 Michiel
16 * Release 16.21
18 * Revision 10.16 1995/11/15 15:52:10 Michiel
19 * UpdateLE and UpdateLE_exa now call MakeLRU
21 * Revision 10.15 1995/11/07 15:00:58 Michiel
22 * ResToBeFreed added
23 * AllocLRU() changed for atomic update
25 * Revision 10.14 1995/10/04 14:05:41 Michiel
26 * checking buffermemory against memorymask
28 * Revision 10.13 1995/09/01 11:22:53 Michiel
29 * ErrorMsg adaption (see disk.c and volume.c)
31 * Revision 10.12 1995/07/28 07:58:58 Michiel
32 * using SPECIAL_FLUSHED, needed for UpdateLE to recognize flushed
33 * entries against deldirentries
35 * Revision 10.11 1995/07/21 06:53:54 Michiel
36 * DELDIR adaptions
38 * Revision 10.10 1995/07/11 17:29:31 Michiel
39 * ErrorMsg () calls use messages.c variables now.
41 * Revision 10.9 1995/06/23 17:27:53 Michiel
42 * MIN_BUFFERS <= number of LRU buffers <= MAX_BUFFERS
44 * Revision 10.8 1995/02/28 18:35:32 Michiel
45 * '12.6' bugfix in AllocLRU (direct write if dirty while updating)
47 * Revision 10.7 1995/02/15 16:43:39 Michiel
48 * Release version
49 * Using new headers (struct.h & blocks.h)
51 * Revision 10.6 1995/02/01 16:04:00 Michiel
52 * UpdateLE_exa enforcer hit fixed
54 * Revision 10.5 1995/01/29 07:34:57 Michiel
55 * Minbuffers now is a minimum, no longer an offset
56 * ChechCache routine using hash table added.
58 * Revision 10.4 1995/01/18 04:29:34 Michiel
59 * Bugfixes. Now ready for beta release.
61 * Revision 10.3 1994/11/15 17:48:34 Michiel
62 * Flush block / UpdateReference bug fixed
64 * Revision 10.2 1994/10/27 11:32:46 Michiel
65 * *** empty log message ***
67 * Revision 10.1 1994/10/24 11:16:28 Michiel
68 * first RCS revision
69 * */
71 #define __USE_SYSBASE
73 #include <exec/memory.h>
74 #include <exec/lists.h>
75 #include <dos/filehandler.h>
76 #include "debug.h"
77 #include <math.h>
78 #include <clib/alib_protos.h>
80 #include "blocks.h"
81 #include "struct.h"
82 #include "volume_protos.h"
83 #include "lru_protos.h"
84 #include "directory_protos.h"
85 #include "update_protos.h"
86 #include "disk_protos.h"
87 #include "allocation_protos.h"
91 * prototypes
96 /* Allocate LRU queue
98 BOOL InitLRU (globaldata *g, UWORD reserved_blksize)
100 int i;
101 UBYTE *array;
103 ENTER("InitLRU");
105 if (g->glob_lrudata.LRUarray && g->glob_lrudata.reserved_blksize == reserved_blksize)
106 return TRUE;
108 DeallocLRU(g);
110 g->glob_lrudata.reserved_blksize = reserved_blksize;
112 NewList((struct List *)&g->glob_lrudata.LRUqueue);
113 NewList((struct List *)&g->glob_lrudata.LRUpool);
115 i = g->dosenvec->de_NumBuffers;
117 /* sanity checks. If HDToolbox default of 30, then 150,
118 * otherwise round in range 70 -- 600
120 if (i==30) i=150;
121 if (i<MIN_BUFFERS) i = MIN_BUFFERS;
122 if (i>MAX_BUFFERS) i = MAX_BUFFERS;
123 g->dosenvec->de_NumBuffers = g->glob_lrudata.poolsize = i;
124 g->uip = FALSE;
125 g->locknr = 1;
127 if (!(g->glob_lrudata.LRUarray = AllocVec((sizeof(struct lru_cachedblock) + reserved_blksize) * g->glob_lrudata.poolsize,
128 g->dosenvec->de_BufMemType | MEMF_CLEAR)))
129 return FALSE;
131 /* check memory against mask */
132 if (((SIPTR)g->glob_lrudata.LRUarray) & ~g->dosenvec->de_Mask)
133 ErrorMsg (AFS_WARNING_MEMORY_MASK, NULL, g);
135 array = (UBYTE *)g->glob_lrudata.LRUarray;
136 for(i=0;i<g->glob_lrudata.poolsize;i++)
137 MinAddHead(&g->glob_lrudata.LRUpool, array + i * (sizeof(struct lru_cachedblock) + reserved_blksize));
139 return TRUE;
142 void DeallocLRU(globaldata *g)
144 FreeVec (g->glob_lrudata.LRUarray);
145 g->glob_lrudata.LRUarray = NULL;
149 /* Allocate a block from the LRU chain and make
150 ** it current LRU.
151 ** Returns NULL if none available
153 struct cachedblock *AllocLRU (globaldata *g)
155 struct lru_cachedblock *lrunode;
156 ULONG error;
158 ENTER("AllocLRU");
160 if (g->glob_lrudata.LRUarray == NULL)
161 return NULL;
163 /* Use free block from pool or flush lru unused
164 ** block (there MUST be one!)
166 // retry:
167 if (IsMinListEmpty(&g->glob_lrudata.LRUpool))
169 for (lrunode = (struct lru_cachedblock *)g->glob_lrudata.LRUqueue.mlh_TailPred; lrunode->prev; lrunode = lrunode->prev)
171 /* skip locked blocks */
172 if (ISLOCKED(&lrunode->cblk))
173 continue;
175 if (lrunode->cblk.changeflag)
177 DB(Trace(1,"AllocLRU","ResToBeFreed %lx\n",&lrunode->cblk));
178 ResToBeFreed(lrunode->cblk.oldblocknr, g);
179 UpdateDatestamp(&lrunode->cblk, g);
180 error = RawWrite ((UBYTE *)&lrunode->cblk.data, RESCLUSTER, lrunode->cblk.blocknr, g);
181 if (error) {
182 ULONG args[2] = { lrunode->cblk.blocknr, error };
183 ErrorMsg (AFS_ERROR_LRU_UPDATE_FAIL, args, g);
187 FlushBlock(&lrunode->cblk, g);
188 goto ready;
191 else
193 lrunode = HeadOf(&g->glob_lrudata.LRUpool);
194 goto ready;
197 /* No suitable block found -> we are in trouble */
198 NormalErrorMsg (AFS_ERROR_OUT_OF_BUFFERS, NULL, 1);
199 return NULL;
201 ready:
202 MinRemove(lrunode);
203 MinAddHead(&g->glob_lrudata.LRUqueue, lrunode);
205 DB(Trace(1,"AllocLRU","Allocated block %lx\n", &lrunode->cblk));
207 // LOCK(&lrunode->cblk);
208 return &lrunode->cblk;
212 /* Adds a block to the ReservedToBeFreedCache
214 void ResToBeFreed(ULONG blocknr, globaldata *g)
216 /* bug 00116, 13 June 1998 */
217 if (blocknr)
219 /* check if cache has space left */
220 if (alloc_data.rtbf_index < alloc_data.rtbf_size)
222 alloc_data.reservedtobefreed[alloc_data.rtbf_index++] = blocknr;
224 else
226 /* reallocate cache */
227 ULONG newsize = alloc_data.rtbf_size ? alloc_data.rtbf_size * 2 : RTBF_CACHE_SIZE;
228 ULONG *newbuffer = AllocMem(sizeof(*newbuffer) * newsize, MEMF_ANY);
229 if (newbuffer)
231 if (alloc_data.reservedtobefreed)
233 CopyMem(alloc_data.reservedtobefreed, newbuffer, sizeof(*newbuffer) * alloc_data.rtbf_index);
234 FreeMem(alloc_data.reservedtobefreed, sizeof(*newbuffer) * alloc_data.rtbf_size);
236 alloc_data.reservedtobefreed = newbuffer;
237 alloc_data.rtbf_size = newsize;
238 alloc_data.reservedtobefreed[alloc_data.rtbf_index++] = blocknr;
239 return;
242 /* this should never happen */
243 DB(Trace(10,"ResToBeFreed","reserved to be freed cache full\n"));
244 #ifdef BETAVERSION
245 ErrorMsg (AFS_BETA_WARNING_1, NULL, g);
246 #endif
247 /* hope nobody allocates this block before the disk has been
248 * updated
250 FreeReservedBlock (blocknr, g);
256 /* Makes a cached block ready for reuse:
257 ** - Remove from queue
258 ** - (dirblock) Decouple all references to the block
259 ** - wipe memory
260 ** NOTE: NOT REMOVED FROM LRU!
262 void FlushBlock (struct cachedblock *block, globaldata *g)
264 lockentry_t *le;
266 DB(Trace(10,"FlushBlock","Flushing block %lx\n", block->blocknr));
268 /* remove block from blockqueue */
269 MinRemove(block);
271 /* decouple references */
272 if (IsDirBlock(block))
274 /* check fileinfo references */
275 for (le = (lockentry_t *)HeadOf(&block->volume->fileentries); le->le.next; le = (lockentry_t *)le->le.next)
277 /* only dirs and files have fileinfos that need to be updated,
278 ** but the volume * pointer of volumeinfos never points to
279 ** a cached block, so the type != ETF_VOLUME check is not
280 ** necessary. Just check the dirblockpointer
282 if (le->le.info.file.dirblock == (struct cdirblock *)block)
284 le->le.dirblocknr = block->blocknr;
285 le->le.dirblockoffset = (UBYTE *)le->le.info.file.direntry - (UBYTE *)block;
286 #if DELDIR
287 le->le.info.deldir.special = SPECIAL_FLUSHED; /* flushed reference */
288 #else
289 le->le.info.direntry = NULL;
290 #endif
291 le->le.info.file.dirblock = NULL;
294 /* exnext references */
295 if (le->le.type.flags.dir && le->nextentry.dirblock == (struct cdirblock *)block)
297 le->nextdirblocknr = block->blocknr;
298 le->nextdirblockoffset = (UBYTE *)le->nextentry.direntry - (UBYTE *)block;
299 #if DELDIR
300 le->nextentry.direntry = (struct direntry *)SPECIAL_FLUSHED;
301 #else
302 le->nextentry.direntry = NULL;
303 #endif
304 le->nextentry.dirblock = NULL;
309 /* wipe memory */
310 memset(block, 0, SIZEOF_CACHEDBLOCK);
313 /* updates references of listentries to dirblock
315 void UpdateReference (ULONG blocknr, struct cdirblock *blk, globaldata *g)
317 lockentry_t *le;
319 DB(Trace(1,"UpdateReference","block %lx\n", blocknr));
321 for (le = (lockentry_t *)HeadOf(&blk->volume->fileentries); le->le.next; le = (lockentry_t *)le->le.next)
323 /* ignoring the fact that not all objectinfos are fileinfos, but the
324 ** 'volumeinfo.volume' and 'deldirinfo.deldir' fields never are NULL anyway, so ...
325 ** maybe better to check for SPECIAL_FLUSHED
327 if (le->le.info.file.dirblock == NULL && le->le.dirblocknr == blocknr)
329 le->le.info.file.dirblock = blk;
330 le->le.info.file.direntry = (struct direntry *)((UBYTE *)blk + le->le.dirblockoffset);
331 le->le.dirblocknr =
332 le->le.dirblockoffset = 0;
335 /* exnext references */
336 if (le->le.type.flags.dir && le->nextdirblocknr == blocknr)
338 le->nextentry.dirblock = blk;
339 le->nextentry.direntry = (struct direntry *)((UBYTE *)blk + le->nextdirblockoffset);
340 le->nextdirblocknr =
341 le->nextdirblockoffset = 0;
346 /* Updates objectinfo of a listentry (if necessary)
347 * This function only reloads the flushed directory block referred to. The
348 * load directory block routine will actually restore the reference.
350 void UpdateLE (listentry_t *le, globaldata *g)
352 //DB(Trace(1,"UpdateLE","Listentry %lx\n", le));
354 /* don't update volumeentries or deldirs!! */
355 #if DELDIR
356 if (!le || le->info.deldir.special <= SPECIAL_DELFILE)
357 #else
358 if (!le || IsVolumeEntry(le))
359 #endif
360 return;
362 if (le->dirblocknr)
363 LoadDirBlock (le->dirblocknr, g);
365 MakeLRU (le->info.file.dirblock);
366 LOCK(le->info.file.dirblock);
369 void UpdateLE_exa (lockentry_t *le, globaldata *g)
371 //DB(Trace(1,"UpdateLE_exa","LE %lx\n", le));
373 if (!le) return;
375 if (le->le.type.flags.dir)
377 #if DELDIR
378 if (IsDelDir(le->le.info))
379 return;
380 #endif
382 if (le->nextdirblocknr)
383 LoadDirBlock (le->nextdirblocknr, g);
385 if (le->nextentry.dirblock)
387 MakeLRU (le->nextentry.dirblock);
388 LOCK(le->nextentry.dirblock);
394 * Cache check ..
395 * The 'mask' is used as a fast modulo operator for the hash table size.
398 struct cachedblock *CheckCache (struct MinList *list, UWORD mask, ULONG blocknr, globaldata *g)
400 struct cachedblock *block;
402 for (block = HeadOf(&list[(blocknr/2)&mask]); block->next; block=block->next)
404 if (block->blocknr == blocknr)
406 MakeLRU(block);
407 return block;
411 return NULL;