1 #include "asmsupport.h"
3 #include <exec/memory.h>
4 #include <exec/types.h>
5 #include <proto/exec.h>
7 #include "cachebuffers.h"
8 #include "cachebuffers_protos.h"
10 #include "bitmap_protos.h"
13 #include "cachedio_protos.h"
14 #include "req_protos.h"
15 #include "support_protos.h"
16 #include "transactions_protos.h"
17 #include "transactions.h"
20 extern void outputcachebuffer(struct CacheBuffer
*cb
);
21 extern void dumpcachebuffers(void);
23 extern void setchecksum(struct CacheBuffer
*);
25 /* Internal globals */
27 LONG
initcachebuffers(void) {
30 initlist((struct List
*)&globals
->cblrulist
);
32 initlist((struct List
*)&globals
->cbhashlist
[n
]);
40 static void checkcb(struct CacheBuffer
*cb
,UBYTE
*string
) {
41 // if(cb->id!=0x4A48 || cb->data!=&cb->attached_data[0] || (cb->bits & (CB_ORIGINAL|CB_EMPTY))==(CB_ORIGINAL|CB_EMPTY) || (cb->bits & (CB_ORIGINAL|CB_LATEST))==(CB_ORIGINAL|CB_LATEST) || (cb->bits & (CB_ORIGINAL|CB_LATEST|CB_EMPTY))==CB_EMPTY) {
42 if(cb
->id
!=0x4A48 || cb
->data
!=&cb
->attached_data
[0] || (cb
->bits
& (CB_ORIGINAL
|CB_EMPTY
))==(CB_ORIGINAL
|CB_EMPTY
) || (cb
->bits
& (CB_ORIGINAL
|CB_LATEST
|CB_EMPTY
))==CB_EMPTY
) {
44 /* Aargh, this doesn't seem to be a REAL cachebuffer... */
46 req_unusual("Function '%s' detected an invalid CacheBuffer!", string
);
48 _DEBUG(("checkcb: *** Not a valid cachebuffer!! ***\nDetected by function '%s'\n",string
));
49 outputcachebuffer(cb
);
55 struct CacheBuffer
*findoriginalcachebuffer(BLCK blckno
) {
56 struct CacheBuffer
*cb
;
58 cb
=(struct CacheBuffer
*)(globals
->cbhashlist
[blckno
& (HASHSIZE
-1)].mlh_Head
-1);
60 while(cb
->hashnode
.mln_Succ
!=0) {
61 if(cb
->blckno
==blckno
&& (cb
->bits
& CB_ORIGINAL
)!=0) {
64 cb
=(struct CacheBuffer
*)(cb
->hashnode
.mln_Succ
-1);
72 struct CacheBuffer
*findlatestcachebuffer(BLCK blckno
) {
73 struct CacheBuffer
*cb
;
75 cb
=(struct CacheBuffer
*)(globals
->cbhashlist
[blckno
& (HASHSIZE
-1)].mlh_Head
-1);
77 while(cb
->hashnode
.mln_Succ
!=0) {
78 if(cb
->blckno
==blckno
&& (cb
->bits
& CB_LATEST
)!=0) {
81 cb
=(struct CacheBuffer
*)(cb
->hashnode
.mln_Succ
-1);
89 static __inline
void mrucachebuffer(struct CacheBuffer
*cb
) {
91 /* Moves the passed in CacheBuffer to the end of the LRU list.
92 This means it becomes the MRU CacheBuffer. */
95 addtailm(&globals
->cblrulist
,&cb
->node
);
100 LONG
readcachebuffer(struct CacheBuffer
**returned_cb
, BLCK block
) {
102 /* Obtains the specified cachebuffer by any means necessary. This
103 function will always obtain the latest version of the block in
104 question. It first looks for a Cachebuffer currently being
105 modified and returns that if found. Otherwise it reads the
106 original cachebuffer and applies the most recent changes to it. */
108 _XDEBUG((DEBUG_CACHEBUFFER
," readcb: block %ld\n",block
));
110 globals
->statistics
.cache_accesses
++;
112 if((*returned_cb
=findlatestcachebuffer(block
))==0) {
113 return(applyoperation(block
, returned_cb
));
116 checkcb(*returned_cb
,"readcachebuffer");
118 mrucachebuffer(*returned_cb
);
120 if(((*returned_cb
)->bits
& CB_LATEST
)==0) {
121 dreq("readcachebuffer didn't return the latest cachebuffer!\nPlease notify the author!");
122 outputcachebuffer(*returned_cb
);
130 LONG
readoriginalcachebuffer(struct CacheBuffer
**returned_cb
,BLCK blckno
) {
131 struct CacheBuffer
*cb
;
134 /* Reads a cachebuffer from disk (if needed). The cachebuffer will not
135 be locked. Note that this function returns the original cachebuffer
136 (as currently stored on disk)! */
138 if((cb
=findoriginalcachebuffer(blckno
))!=0) {
139 /* We managed to find the original! */
141 _XDEBUG((DEBUG_CACHEBUFFER
," readorgcb: block %ld (from cache)\n",blckno
));
145 else if((cb
=getcachebuffer())!=0) {
147 _XDEBUG((DEBUG_CACHEBUFFER
," readorgcb: block %ld (from disk)\n",blckno
));
150 if(findlatestcachebuffer(blckno
)!=0) {
151 dreq("readoriginalcachebuffer: Fatal error!\nPlease notify the author!");
155 /* We found an empty cachebuffer in which we can read the original now. */
157 globals
->statistics
.cache_misses
++;
159 if((errorcode
=read(blckno
,cb
->data
,1))!=0) {
164 cb
->bits
=CB_ORIGINAL
;
166 if(isthereanoperationfor(blckno
)==FALSE
) {
170 addtailm(&globals
->cbhashlist
[blckno
& (HASHSIZE
-1)],&cb
->hashnode
);
173 return(ERROR_NO_FREE_STORE
);
176 /* We either found the original, or just read it succesfully. */
184 void emptyoriginalcachebuffer(BLCK blckno
) {
185 struct CacheBuffer
*cb
;
187 if((cb
=findoriginalcachebuffer(blckno
))!=0) {
188 /* We managed to find the original! */
189 emptycachebuffer(cb
);
195 void resetcachebuffer(struct CacheBuffer
*cb
) {
196 /* Resets the CacheBuffer to its default state. All fields are resetted
197 to their defaults, the CacheBuffer will be properly delinked */
199 checkcb(cb
,"resetcachebuffer");
203 dreq("resetcachebuffer: CacheBuffer is still locked!\nPlease notify the author!");
204 outputcachebuffer(cb
);
208 if(cb
->hashnode
.mln_Succ
!=0 && cb
->hashnode
.mln_Pred
!=0) {
209 removem(&cb
->hashnode
);
212 cb
->hashnode
.mln_Succ
=0;
213 cb
->hashnode
.mln_Pred
=0;
221 void emptycachebuffer(struct CacheBuffer
*cb
) {
222 /* Empties the CacheBuffer so it can be used for new data. All fields are
223 resetted to their defaults, the CacheBuffer will be properly delinked */
225 resetcachebuffer(cb
);
227 /* Add empty buffer to head of LRU chain. This will not only increase
228 performance when looking for a free buffer, but it will also ensure
229 that EMPTY buffers are reused first, while otherwise a potentially
230 useful buffer could be reused. */
233 // addheadm(&cblrulist,&cb->node);
234 AddHead((struct List
*)&globals
->cblrulist
,(struct Node
*)&cb
->node
);
239 void lockcachebuffer(struct CacheBuffer
*cb
) {
240 /* Make absolutely sure the cachebuffer in question is unlocked
241 again (as many times as it was locked!), or face the
245 checkcb(cb
,"lockcachebuffer");
249 if((cb
->bits
& (CB_ORIGINAL
|CB_LATEST
))==CB_ORIGINAL
) {
250 dreq("Original non-latest cachebuffers may not be locked.\nPlease notify the author!");
251 outputcachebuffer(cb
);
260 void unlockcachebuffer(struct CacheBuffer
*cb
) {
261 if(cb
!=0 && cb
->locked
!=0) {
263 checkcb(cb
,"unlockcachebuffer");
270 dreq("unlockcachebuffer: cb->locked was zero!");
271 outputcachebuffer(cb
);
278 struct CacheBuffer
*createnewcachebuffer(BLCK block
) {
279 struct CacheBuffer
*cb
;
282 if(findlatestcachebuffer(block
)!=0) {
283 dreq("createnewcachebuffer: Fatal error!\nPlease notify the author!");
290 cb
->bits
=CB_LATEST
|CB_EMPTY
;
292 addtailm(&globals
->cbhashlist
[block
& (HASHSIZE
-1)],&cb
->hashnode
);
299 struct CacheBuffer
*newcachebuffer(BLCK block
) {
300 struct CacheBuffer
*cb
;
302 /* Looks for an unused cachebuffer and clears it. This cachebufer
303 doesn't have an original (it was marked free, and thus contains
304 junk) so it will be treated as if it was zero-filled. */
308 CB_ORIGINAL -> Indicates there IS a later version (even if it isn't currently in cache).
309 CB_ORIGINAL|CB_LATEST -> Indicates there were no modifications to this block ever.
310 CB_LATEST -> Impossible, there must be a CB_ORIGINAL as well then.
311 (nothing found) -> Create from empty.
315 /*** We probably should prevent CB_EMPTY style cachebuffers and normal
316 style to be together in the cache at the same time. Blocks which
317 have been marked free should never be written out to disk anyway... */
319 if((cb
=findlatestcachebuffer(block
))!=0) {
320 preparecachebuffer(cb
);
321 clearcachebuffer(cb
);
324 cb
=createnewcachebuffer(block
);
326 clearcachebuffer(cb
);
335 void preparecachebuffer(struct CacheBuffer
*cb
) {
336 /* Prepares a cachebuffer to be changed. A copy of the original is kept
337 for later comparison, but the copy is available for reuse. If the
338 cachebuffer is already a newer version then no copy of the original
341 Note: Because the this function doesn't require both the original and
342 the modified version to be locked in memory, there will ALWAYS
343 be enough room to succesfully execute this function. In the
344 worst case it will simply immediately use the current version! */
347 if(globals
->transactionnestcount
==0) {
348 dreq("No transaction was started when preparecachebuffer() was called!\nPlease notify the author!");
349 outputcachebuffer(cb
);
352 if((cb
->bits
& CB_LATEST
)==0) {
353 dreq("preparecachebuffer(): Only latest cachebuffers may be prepared!\nPlease notify the author!");
354 outputcachebuffer(cb
);
357 checkcb(cb
,"preparecachebuffer");
363 CB_ORIGINAL -> Aren't allowed to be locked (and thus prepared).
364 CB_ORIGINAL|CB_LATEST -> Make copy, and copy becomes CB_ORIGINAL. cb becomes CB_LATEST.
365 CB_LATEST -> Do nothing.
371 if((cb
->bits
& CB_ORIGINAL
)!=0) {
372 saveoriginalcachebuffer(cb
);
380 struct CacheBuffer
*saveoriginalcachebuffer(struct CacheBuffer
*cb
) {
381 struct CacheBuffer
*cb_new
;
383 /* Makes a copy of the original CacheBuffer into another free CacheBuffer.
384 The new location of the original CacheBuffer is returned. The old
385 location is converted into a CB_LATEST CacheBuffer. */
388 if((cb
->bits
& CB_ORIGINAL
)==0) {
389 dreq("saveoriginalcachebuffer: Only original cachebuffers may be saved.\nPlease notify the author!");
393 // lockcachebuffer(cb); /* Lock original */
396 cb_new
=getcachebuffer();
398 unlockcachebuffer(cb
);
400 cb_new
->blckno
=cb
->blckno
;
401 cb_new
->bits
=CB_ORIGINAL
;
403 cb
->bits
&=~(CB_ORIGINAL
|CB_CHECKSUM
);
406 CopyMemQuick(cb
->data
, cb_new
->data
, globals
->bytes_block
);
408 addtailm(&globals
->cbhashlist
[cb_new
->blckno
& (HASHSIZE
-1)],&cb_new
->hashnode
);
415 static LONG
compresscachebuffer(struct CacheBuffer
*cb_org
,struct CacheBuffer
*cb_new
) {
419 /* cb_org can be 0, in which case we mean a CacheBuffer filled with zeroes. */
421 /* This function creates a new transaction using the 2 cachebuffers passed in */
424 length
=compress(cb_org
->data
,cb_new
->data
,globals
->compressbuffer
);
427 length
=compressfromzero(cb_new
->data
,globals
->compressbuffer
);
430 if((cb_new
->bits
& CB_EMPTY
)!=0) {
434 return(addoperation(cb_new
->blckno
,globals
->compressbuffer
,length
,bits
));
439 LONG
storecachebuffer_nochecksum(struct CacheBuffer
*cb
) {
440 struct CacheBuffer
*cb_org
=0;
445 if(globals
->transactionnestcount
==0) {
446 dreq("No transaction was started when storecachebuffer() was called!\nPlease notify the author!");
447 outputcachebuffer(cb
);
450 checkcb(cb
,"storecachebuffer");
453 /* This function guarantees that the passed in cachebuffer is stil valid
454 after calling this function. This is because there still is a lock
455 imposed by preparecachebuffer() which is only removed at the end of
458 if((errorcode
=getfreeblocks(&blocksfree
))==0) {
460 if(blocksfree
>= transactionspace()) {
461 /* Any changes made to this cachebuffer are stored in the transaction
464 if((cb
->bits
& CB_EMPTY
)==0) {
465 errorcode
=readoriginalcachebuffer(&cb_org
,cb
->blckno
);
470 errorcode
=addoperation2(cb_org
,cb
);
472 errorcode
=compresscachebuffer(cb_org
,cb
);
477 errorcode
=ERROR_DISK_FULL
;
486 unlockcachebuffer(cb
);
494 LONG
storecachebuffer(struct CacheBuffer
*cb
) {
496 /* This function guarantees that the passed in cachebuffer is stil valid
497 after calling this function. This is because there still is a lock
498 imposed by preparecachebuffer() which is only removed at the end of
503 return(storecachebuffer_nochecksum(cb
));
508 #ifdef BLOCKCOMPRESSION
510 LONG
changecachebuffer(struct CacheBuffer
*cb
, UBYTE
*modifiedblocks
) {
511 struct CacheBuffer
*cb_org
=0;
517 if(transactionnestcount
==0) {
518 dreq("No transaction was started when changecachebuffer() was called!\nPlease notify the author!");
519 outputcachebuffer(cb
);
522 checkcb(cb
,"changecachebuffer");
525 /* This function guarantees that the passed in cachebuffer is stil valid
526 after calling this function. This is because there still is a lock
527 imposed by preparecachebuffer() which is only removed at the end of
532 if((o
=getlatestoperation(cb
->blckno
))==0) {
533 _DEBUG(("changecachebuffer: Using storecachebuffer()\n"));
534 return(storecachebuffer(cb
));
537 END("getlatestoperation()");
539 // _DEBUG(("changecachebuffer: Using mergediffs()\n"));
543 if((errorcode
=getfreeblocks(&blocksfree
))==0) {
545 if(blocksfree
>= transactionspace()) {
546 /* Any changes made to this cachebuffer are stored in the transaction
549 if((cb
->bits
& CB_EMPTY
)==0) {
550 errorcode
=readoriginalcachebuffer(&cb_org
,cb
->blckno
);
553 END("getfreeblocks");
561 length
=mergediffs(&o
->oi
.data
[0], compressbuffer
, o
->oi
.length
, cb
->data
, cb_org
->data
, modifiedblocks
);
565 if((cb
->bits
& CB_EMPTY
)!=0) {
571 errorcode
=addoperation(cb
->blckno
, compressbuffer
, length
, bits
);
577 errorcode
=ERROR_DISK_FULL
;
582 unlockcachebuffer(cb
);
590 void dumpcachebuffer(struct CacheBuffer
*cb
) {
591 /* Any changes made to this cachebuffer will not be stored, and this
592 cachebuffer will be emptied. */
594 unlockcachebuffer(cb
);
595 restorecachebuffer(cb
);
600 static void dumpcachebuffers3(void) {
601 struct CacheBuffer
*cb
;
603 cb
=(struct CacheBuffer
*)globals
->cblrulist
.mlh_Head
;
605 while(cb
->node
.mln_Succ
!=0) {
606 checkcb(cb
,"dump/getcachebuffer");
607 cb
=(struct CacheBuffer
*)(cb
->node
.mln_Succ
);
614 void killunlockedcachebuffers(void) {
615 struct CacheBuffer
*cb
;
617 cb
=(struct CacheBuffer
*)globals
->cblrulist
.mlh_Head
;
619 while(cb
->node
.mln_Succ
!=0) {
623 ((((cb
->bits
& (CB_ORIGINAL
|CB_LATEST
)))==CB_ORIGINAL
) &&
624 (findlatestcachebuffer(cb
->blckno
)!=0))
627 clearcachebuffer(cb
);
628 resetcachebuffer(cb
);
631 cb
=(struct CacheBuffer
*)(cb
->node
.mln_Succ
);
636 struct CacheBuffer
*getcachebuffer() {
637 struct CacheBuffer
*cb
;
638 LONG buffers
=globals
->totalbuffers
;
640 /* It's absolutely essential that getcachebuffer always uses the
641 LEAST recently used cachebuffer which isn't currently used in
642 an operation. The reason for this is not only because this is
643 a good algorithm to ensure the cache contains blocks which are
644 the most often used, but also because a lot of functions -rely-
645 on the fact that a buffer which has been used (read!) recently
646 remains in the cache for a while longer(!).
648 Because this process of 'relying' on a recently read buffer to
649 still be in cache is a bit tricky business, we recently added
650 locking functions. These functions (lockcachebuffer() &
651 unlockcachebuffer() can prevent this function from returning
652 the cachebuffer in question for re-use. Only this function is
653 effected by this locking process. Always make sure the cache
654 buffer is unlocked again! */
656 /* a CacheBuffer can be reused if the following criteria are met:
658 - The CacheBuffer is not locked, and
660 - If the CacheBuffer is CB_ORIGINAL, but not CB_LATEST
661 then it must not have a corresponding CB_LATEST
662 CacheBuffer. In other words, originals don't get
663 reused if there is a later version of the same block
666 // killunlockedcachebuffers();
669 // cb=(struct CacheBuffer *)globals->cblrulist.mlh_Head;
670 // mrucachebuffer(cb);
672 /* weissms: changed to trick gcc-4.4.4 optimizer */
673 cb
=(struct CacheBuffer
*)RemHead((struct List
*)&globals
->cblrulist
);
674 addtailm(&globals
->cblrulist
,&cb
->node
);
676 } while((cb
->locked
>0 || ((cb
->bits
& (CB_ORIGINAL
|CB_LATEST
))==CB_ORIGINAL
&& findlatestcachebuffer(cb
->blckno
)!=0)) && buffers
-->0);
679 _XDEBUG((DEBUG_CACHEBUFFER
,"getcachebuffer: No more cachebuffers available!\n"));
682 req_unusual("SFS has ran out of cache buffers.");
687 resetcachebuffer(cb
); /* emptycachebuffer also adds cachebuffer at top of LRU list... we don't want that. */
693 void clearcachebuffer(struct CacheBuffer
*cb
) {
694 ULONG blocksize
=globals
->bytes_block
>>4;
695 ULONG
*block
=cb
->data
;
697 checkcb(cb
,"clearcachebuffer");
699 while(blocksize
--!=0) {
709 LONG
writecachebuffer(struct CacheBuffer
*cb
) {
711 checkcb(cb
,"writecachebuffer");
713 return(write(cb
->blckno
,cb
->data
,1));
718 void dumpcachebuffers(void) {
719 struct CacheBuffer
*cb
;
721 cb
=(struct CacheBuffer
*)globals
->cblrulist
.mlh_Head
;
723 _DEBUG(("Blck-- Lock Bits Data---- ID------ cb-adr-- Hashed?\n"));
724 while(cb
->node
.mln_Succ
!=0) {
725 _DEBUG(("%6ld %4ld %4ld %08lx %08lx %08lx ",cb
->blckno
,(LONG
)cb
->locked
,(LONG
)cb
->bits
,cb
->data
,*(ULONG
*)cb
->data
,cb
));
726 if(cb
->hashnode
.mln_Succ
==0 && cb
->hashnode
.mln_Pred
==0) {
733 cb
=(struct CacheBuffer
*)(cb
->node
.mln_Succ
);
739 static void dumpcachebuffers2(void) {
740 struct CacheBuffer
*cb
;
743 cb
=(struct CacheBuffer
*)globals
->cblrulist
.mlh_Head
;
745 while(cb
->node
.mln_Succ
!=0) {
747 cb
=(struct CacheBuffer
*)(cb
->node
.mln_Succ
);
750 if(cnt
!=globals
->totalbuffers
) {
751 _DEBUG(("------------ cachebuffers have been killed!! ---------------\n"));
760 LONG
addcachebuffers(LONG buffers
) {
761 struct CacheBuffer
*cb
;
766 newbuffers
=globals
->totalbuffers
+buffers
;
768 if(newbuffers
<MINCACHESIZE
) {
769 newbuffers
=MINCACHESIZE
;
772 buffers
=newbuffers
-globals
->totalbuffers
;
773 /* if buffers is positive than add 'buffers' buffers, else free some */
776 if((errorcode
=flushtransaction())!=0) {
779 invalidatecachebuffers();
783 _DEBUG(("Allocating buffers\n"));
785 while(buffers
!=0 && (cb
=AllocMem(globals
->bytes_block
+sizeof(struct CacheBuffer
),MEMF_CLEAR
|globals
->bufmemtype
))!=0) {
788 addtailm(&globals
->cblrulist
,&cb
->node
);
791 cb
->data
=&cb
->attached_data
[0];
797 _DEBUG(("Allocation failed!\n"));
799 buffers
=-counter
; /* This makes sure that the already allocated buffers are freed again */
800 newbuffers
=globals
->totalbuffers
;
801 errorcode
=ERROR_NO_FREE_STORE
;
806 while(buffers
++!=0) {
807 cb
=(struct CacheBuffer
*)globals
->cblrulist
.mlh_TailPred
;
808 RemTail((struct List
*)&globals
->cblrulist
);
809 resetcachebuffer(cb
);
810 FreeMem(cb
,sizeof(struct CacheBuffer
)+globals
->bytes_block
);
814 globals
->totalbuffers
=newbuffers
;
821 void invalidatecachebuffers() {
822 struct CacheBuffer
*cb
;
824 for(cb
=(struct CacheBuffer
*)globals
->cblrulist
.mlh_Head
; cb
!=(struct CacheBuffer
*)&globals
->cblrulist
.mlh_Tail
; cb
=(struct CacheBuffer
*)cb
->node
.mln_Succ
) {
825 resetcachebuffer(cb
);
836 CB_ORIGINAL - Indicates that the CacheBuffer is a direct
837 copy of the one stored on disk.
839 CB_LATEST - Indicates that this CacheBuffer is the most
840 recent version of the block. It also means that this
841 CacheBuffer can be locked.
843 If neither of these flags is set then the CacheBuffer is
846 For every CacheBuffer with CB_LATEST set, there must be a
847 CacheBuffer (for the same block) with CB_ORIGINAL set (this
848 could be the same CacheBuffer).
850 A CacheBuffer can be reused if the following criteria are
853 - The CacheBuffer is not locked, and
855 - If the CacheBuffer is CB_ORIGINAL, but not CB_LATEST
856 then it must not have a corresponding CB_LATEST
857 CacheBuffer. In other words, originals don't get
858 reused if there is a later version of the same block
864 Returns a cachebuffer with the latest version of a block
865 (CB_LATEST set). The original cachebuffer is read if not
870 Requires a cachebuffer and if needed an original version of
873 readoriginalcachebuffer()
875 Returns a cachebuffer with the original version of a block.