3 * Revision 10.21 1999/05/14 11:31:34 Michiel
4 * Long filename support implemented; bugfixes
6 * Revision 10.20 1998/09/27 11:26:37 Michiel
9 * Revision 10.19 1998/06/12 21:30:29 Michiel
10 * Fixed bug 116: FreeResBlock
12 * Revision 10.18 1998/05/29 19:31:18 Michiel
15 * Revision 10.17 1997/03/03 22:04:04 Michiel
18 * Revision 10.16 1995/11/15 15:52:10 Michiel
19 * UpdateLE and UpdateLE_exa now call MakeLRU
21 * Revision 10.15 1995/11/07 15:00:58 Michiel
23 * AllocLRU() changed for atomic update
25 * Revision 10.14 1995/10/04 14:05:41 Michiel
26 * checking buffermemory against memorymask
28 * Revision 10.13 1995/09/01 11:22:53 Michiel
29 * ErrorMsg adaption (see disk.c and volume.c)
31 * Revision 10.12 1995/07/28 07:58:58 Michiel
32 * using SPECIAL_FLUSHED, needed for UpdateLE to recognize flushed
33 * entries against deldirentries
35 * Revision 10.11 1995/07/21 06:53:54 Michiel
38 * Revision 10.10 1995/07/11 17:29:31 Michiel
39 * ErrorMsg () calls use messages.c variables now.
41 * Revision 10.9 1995/06/23 17:27:53 Michiel
42 * MIN_BUFFERS <= number of LRU buffers <= MAX_BUFFERS
44 * Revision 10.8 1995/02/28 18:35:32 Michiel
45 * '12.6' bugfix in AllocLRU (direct write if dirty while updating)
47 * Revision 10.7 1995/02/15 16:43:39 Michiel
49 * Using new headers (struct.h & blocks.h)
51 * Revision 10.6 1995/02/01 16:04:00 Michiel
52 * UpdateLE_exa enforcer hit fixed
54 * Revision 10.5 1995/01/29 07:34:57 Michiel
55 * Minbuffers now is a minimum, no longer an offset
56 * ChechCache routine using hash table added.
58 * Revision 10.4 1995/01/18 04:29:34 Michiel
59 * Bugfixes. Now ready for beta release.
61 * Revision 10.3 1994/11/15 17:48:34 Michiel
62 * Flush block / UpdateReference bug fixed
64 * Revision 10.2 1994/10/27 11:32:46 Michiel
65 * *** empty log message ***
67 * Revision 10.1 1994/10/24 11:16:28 Michiel
73 #include <exec/memory.h>
74 #include <exec/lists.h>
75 #include <dos/filehandler.h>
78 #include <clib/alib_protos.h>
82 #include "volume_protos.h"
83 #include "lru_protos.h"
84 #include "directory_protos.h"
85 #include "update_protos.h"
86 #include "disk_protos.h"
87 #include "allocation_protos.h"
98 BOOL
InitLRU (globaldata
*g
, UWORD reserved_blksize
)
105 if (g
->glob_lrudata
.LRUarray
&& g
->glob_lrudata
.reserved_blksize
== reserved_blksize
)
110 g
->glob_lrudata
.reserved_blksize
= reserved_blksize
;
112 NewList((struct List
*)&g
->glob_lrudata
.LRUqueue
);
113 NewList((struct List
*)&g
->glob_lrudata
.LRUpool
);
115 i
= g
->dosenvec
->de_NumBuffers
;
117 /* sanity checks. If HDToolbox default of 30, then 150,
118 * otherwise round in range 70 -- 600
121 if (i
<MIN_BUFFERS
) i
= MIN_BUFFERS
;
122 if (i
>MAX_BUFFERS
) i
= MAX_BUFFERS
;
123 g
->dosenvec
->de_NumBuffers
= g
->glob_lrudata
.poolsize
= i
;
127 if (!(g
->glob_lrudata
.LRUarray
= AllocVec((sizeof(struct lru_cachedblock
) + reserved_blksize
) * g
->glob_lrudata
.poolsize
,
128 g
->dosenvec
->de_BufMemType
| MEMF_CLEAR
)))
131 /* check memory against mask */
132 if (((SIPTR
)g
->glob_lrudata
.LRUarray
) & ~g
->dosenvec
->de_Mask
)
133 ErrorMsg (AFS_WARNING_MEMORY_MASK
, NULL
, g
);
135 array
= (UBYTE
*)g
->glob_lrudata
.LRUarray
;
136 for(i
=0;i
<g
->glob_lrudata
.poolsize
;i
++)
137 MinAddHead(&g
->glob_lrudata
.LRUpool
, array
+ i
* (sizeof(struct lru_cachedblock
) + reserved_blksize
));
142 void DeallocLRU(globaldata
*g
)
144 FreeVec (g
->glob_lrudata
.LRUarray
);
145 g
->glob_lrudata
.LRUarray
= NULL
;
149 /* Allocate a block from the LRU chain and make
151 ** Returns NULL if none available
153 struct cachedblock
*AllocLRU (globaldata
*g
)
155 struct lru_cachedblock
*lrunode
;
160 if (g
->glob_lrudata
.LRUarray
== NULL
)
163 /* Use free block from pool or flush lru unused
164 ** block (there MUST be one!)
167 if (IsMinListEmpty(&g
->glob_lrudata
.LRUpool
))
169 for (lrunode
= (struct lru_cachedblock
*)g
->glob_lrudata
.LRUqueue
.mlh_TailPred
; lrunode
->prev
; lrunode
= lrunode
->prev
)
171 /* skip locked blocks */
172 if (ISLOCKED(&lrunode
->cblk
))
175 if (lrunode
->cblk
.changeflag
)
177 DB(Trace(1,"AllocLRU","ResToBeFreed %lx\n",&lrunode
->cblk
));
178 ResToBeFreed(lrunode
->cblk
.oldblocknr
, g
);
179 UpdateDatestamp(&lrunode
->cblk
, g
);
180 error
= RawWrite ((UBYTE
*)&lrunode
->cblk
.data
, RESCLUSTER
, lrunode
->cblk
.blocknr
, g
);
182 ULONG args
[2] = { lrunode
->cblk
.blocknr
, error
};
183 ErrorMsg (AFS_ERROR_LRU_UPDATE_FAIL
, args
, g
);
187 FlushBlock(&lrunode
->cblk
, g
);
193 lrunode
= HeadOf(&g
->glob_lrudata
.LRUpool
);
197 /* No suitable block found -> we are in trouble */
198 NormalErrorMsg (AFS_ERROR_OUT_OF_BUFFERS
, NULL
, 1);
203 MinAddHead(&g
->glob_lrudata
.LRUqueue
, lrunode
);
205 DB(Trace(1,"AllocLRU","Allocated block %lx\n", &lrunode
->cblk
));
207 // LOCK(&lrunode->cblk);
208 return &lrunode
->cblk
;
212 /* Adds a block to the ReservedToBeFreedCache
214 void ResToBeFreed(ULONG blocknr
, globaldata
*g
)
216 /* bug 00116, 13 June 1998 */
219 /* check if cache has space left */
220 if (alloc_data
.rtbf_index
< alloc_data
.rtbf_size
)
222 alloc_data
.reservedtobefreed
[alloc_data
.rtbf_index
++] = blocknr
;
226 /* reallocate cache */
227 ULONG newsize
= alloc_data
.rtbf_size
? alloc_data
.rtbf_size
* 2 : RTBF_CACHE_SIZE
;
228 ULONG
*newbuffer
= AllocMem(sizeof(*newbuffer
) * newsize
, MEMF_ANY
);
231 if (alloc_data
.reservedtobefreed
)
233 CopyMem(alloc_data
.reservedtobefreed
, newbuffer
, sizeof(*newbuffer
) * alloc_data
.rtbf_index
);
234 FreeMem(alloc_data
.reservedtobefreed
, sizeof(*newbuffer
) * alloc_data
.rtbf_size
);
236 alloc_data
.reservedtobefreed
= newbuffer
;
237 alloc_data
.rtbf_size
= newsize
;
238 alloc_data
.reservedtobefreed
[alloc_data
.rtbf_index
++] = blocknr
;
242 /* this should never happen */
243 DB(Trace(10,"ResToBeFreed","reserved to be freed cache full\n"));
245 ErrorMsg (AFS_BETA_WARNING_1
, NULL
, g
);
247 /* hope nobody allocates this block before the disk has been
250 FreeReservedBlock (blocknr
, g
);
256 /* Makes a cached block ready for reuse:
257 ** - Remove from queue
258 ** - (dirblock) Decouple all references to the block
260 ** NOTE: NOT REMOVED FROM LRU!
262 void FlushBlock (struct cachedblock
*block
, globaldata
*g
)
266 DB(Trace(10,"FlushBlock","Flushing block %lx\n", block
->blocknr
));
268 /* remove block from blockqueue */
271 /* decouple references */
272 if (IsDirBlock(block
))
274 /* check fileinfo references */
275 for (le
= (lockentry_t
*)HeadOf(&block
->volume
->fileentries
); le
->le
.next
; le
= (lockentry_t
*)le
->le
.next
)
277 /* only dirs and files have fileinfos that need to be updated,
278 ** but the volume * pointer of volumeinfos never points to
279 ** a cached block, so the type != ETF_VOLUME check is not
280 ** necessary. Just check the dirblockpointer
282 if (le
->le
.info
.file
.dirblock
== (struct cdirblock
*)block
)
284 le
->le
.dirblocknr
= block
->blocknr
;
285 le
->le
.dirblockoffset
= (UBYTE
*)le
->le
.info
.file
.direntry
- (UBYTE
*)block
;
287 le
->le
.info
.deldir
.special
= SPECIAL_FLUSHED
; /* flushed reference */
289 le
->le
.info
.direntry
= NULL
;
291 le
->le
.info
.file
.dirblock
= NULL
;
294 /* exnext references */
295 if (le
->le
.type
.flags
.dir
&& le
->nextentry
.dirblock
== (struct cdirblock
*)block
)
297 le
->nextdirblocknr
= block
->blocknr
;
298 le
->nextdirblockoffset
= (UBYTE
*)le
->nextentry
.direntry
- (UBYTE
*)block
;
300 le
->nextentry
.direntry
= (struct direntry
*)SPECIAL_FLUSHED
;
302 le
->nextentry
.direntry
= NULL
;
304 le
->nextentry
.dirblock
= NULL
;
310 memset(block
, 0, SIZEOF_CACHEDBLOCK
);
313 /* updates references of listentries to dirblock
315 void UpdateReference (ULONG blocknr
, struct cdirblock
*blk
, globaldata
*g
)
319 DB(Trace(1,"UpdateReference","block %lx\n", blocknr
));
321 for (le
= (lockentry_t
*)HeadOf(&blk
->volume
->fileentries
); le
->le
.next
; le
= (lockentry_t
*)le
->le
.next
)
323 /* ignoring the fact that not all objectinfos are fileinfos, but the
324 ** 'volumeinfo.volume' and 'deldirinfo.deldir' fields never are NULL anyway, so ...
325 ** maybe better to check for SPECIAL_FLUSHED
327 if (le
->le
.info
.file
.dirblock
== NULL
&& le
->le
.dirblocknr
== blocknr
)
329 le
->le
.info
.file
.dirblock
= blk
;
330 le
->le
.info
.file
.direntry
= (struct direntry
*)((UBYTE
*)blk
+ le
->le
.dirblockoffset
);
332 le
->le
.dirblockoffset
= 0;
335 /* exnext references */
336 if (le
->le
.type
.flags
.dir
&& le
->nextdirblocknr
== blocknr
)
338 le
->nextentry
.dirblock
= blk
;
339 le
->nextentry
.direntry
= (struct direntry
*)((UBYTE
*)blk
+ le
->nextdirblockoffset
);
341 le
->nextdirblockoffset
= 0;
346 /* Updates objectinfo of a listentry (if necessary)
347 * This function only reloads the flushed directory block referred to. The
348 * load directory block routine will actually restore the reference.
350 void UpdateLE (listentry_t
*le
, globaldata
*g
)
352 //DB(Trace(1,"UpdateLE","Listentry %lx\n", le));
354 /* don't update volumeentries or deldirs!! */
356 if (!le
|| le
->info
.deldir
.special
<= SPECIAL_DELFILE
)
358 if (!le
|| IsVolumeEntry(le
))
363 LoadDirBlock (le
->dirblocknr
, g
);
365 MakeLRU (le
->info
.file
.dirblock
);
366 LOCK(le
->info
.file
.dirblock
);
369 void UpdateLE_exa (lockentry_t
*le
, globaldata
*g
)
371 //DB(Trace(1,"UpdateLE_exa","LE %lx\n", le));
375 if (le
->le
.type
.flags
.dir
)
378 if (IsDelDir(le
->le
.info
))
382 if (le
->nextdirblocknr
)
383 LoadDirBlock (le
->nextdirblocknr
, g
);
385 if (le
->nextentry
.dirblock
)
387 MakeLRU (le
->nextentry
.dirblock
);
388 LOCK(le
->nextentry
.dirblock
);
395 * The 'mask' is used as a fast modulo operator for the hash table size.
398 struct cachedblock
*CheckCache (struct MinList
*list
, UWORD mask
, ULONG blocknr
, globaldata
*g
)
400 struct cachedblock
*block
;
402 for (block
= HeadOf(&list
[(blocknr
/2)&mask
]); block
->next
; block
=block
->next
)
404 if (block
->blocknr
== blocknr
)