1 #include "asmsupport.h"
4 #include <exec/lists.h>
5 #include <exec/types.h>
6 #include <proto/exec.h>
8 #include "cachedio_protos.h"
10 #include "deviceio_protos.h"
11 #include "req_protos.h"
13 #define removem(n) (n)->mln_Succ->mln_Pred=(n)->mln_Pred; (n)->mln_Pred->mln_Succ=(n)->mln_Succ
14 #define addtailm(l,n) (n)->mln_Succ=(l)->mlh_TailPred->mln_Succ; (l)->mlh_TailPred->mln_Succ=(n); (n)->mln_Pred=(l)->mlh_TailPred; (l)->mlh_TailPred=(n)
15 #define addheadm(l,n) (n)->mln_Succ=(l)->mlh_Head; (n)->mln_Pred=(struct MinNode *)(l); (l)->mlh_Head->mln_Pred=(n); (l)->mlh_Head=(n);
21 static LONG
copybackiocache(struct IOCache
*ioc
);
23 /* Internal structures */
26 struct MinNode node
; /* LRU chain */
28 struct IOCache
*nexthash
;
29 struct IOCache
*prevhash
;
33 ULONG block
; /* Unused IOCache has blocks = 0 */
36 ULONG dirty
[4]; /* Set bits indicate blocks which need to be written to disk. */
37 ULONG valid
[4]; /* Set bits indicate blocks which contain up-to-date data. */
39 UBYTE bits
; /* See defines below */
40 UBYTE locked
; /* Indicates that lruiocache should not return this iocache */
43 /* Possible combinations for dirty and valid:
45 dirty : Illegal value.
46 valid : Block is the same as on disk.
47 valid & dirty : Block is newer than the one on disk and must be flushed.
48 (none) : Block has not yet been read from disk. */
52 /* defines for IOCache bits */
54 #define IOC_DIRTY (1) /* IOCache contains dirty data */
58 Functions making use of the IOCache mechanism:
60 read - reads one or more blocks into a buffer.
61 write - writes one or more blocks from a buffer.
63 The IOCache system is a simple caching mechanism intended to
64 make maximum use of small reads. The filesystem likes to
65 read single blocks, which are usually only 512 bytes in
66 size. Most harddisks however can read a lot more data in a
67 single access without a speed penalty.
69 This is where the IOCaches come in. Instead of reading just
70 512 bytes, the system will read for example 4096 bytes. It
71 reads this information into IOCache buffers. The data
72 actually required is copied from the IOCache into the
75 This makes Writes a little bit more complex. Because data
76 can reside in the cache each Write must be checked to see if
77 one of the IOCaches doesn't need to be updated or flushed.
78 Whether an IOCache must be updated or flushed depends on the
79 mode the cache operates in. WriteThrough caching means the
80 buffers are just updated and kept in memory. Otherwise a
81 buffer is simply marked invalid.
87 void hashit(struct IOCache
*ioc
) {
88 WORD hashentry
=(ioc
->block
>>globals
->iocache_shift
) & (IOC_HASHSIZE
-1);
90 ioc
->nexthash
=globals
->ioc_hashtable
[hashentry
];
91 if(ioc
->nexthash
!=0) {
92 ioc
->nexthash
->prevhash
=ioc
;
95 globals
->ioc_hashtable
[hashentry
]=ioc
;
100 static void dehash(struct IOCache
*ioc
) {
102 if(ioc
->nexthash
!=0) {
103 ioc
->nexthash
->prevhash
=ioc
->prevhash
;
105 if(ioc
->prevhash
!=0) {
106 ioc
->prevhash
->nexthash
=ioc
->nexthash
;
109 globals
->ioc_hashtable
[ (ioc
->block
>>globals
->iocache_shift
) & (IOC_HASHSIZE
-1)]=ioc
->nexthash
; /* Aug 11 1998: changed '=0' to '=ioc->nexthash' !! */
119 void invalidateiocache(struct IOCache
*ioc
) {
124 // ioc->dirtylow=255;
140 void freeIOCache(struct MinList
*lruhead
) {
142 /* Frees all IOCache buffers attached to the passed in list and the
143 listheader itself. If lruhead is zero then this function does
147 while(lruhead
->mlh_TailPred
!= (struct MinNode
*)lruhead
) {
148 struct IOCache
*ioc
=(struct IOCache
*)lruhead
->mlh_Head
;
150 removem(lruhead
->mlh_Head
);
155 FreeMem(lruhead
, sizeof(struct MinList
));
161 struct MinList
*allocate(ULONG size
, LONG n
) {
162 struct MinList
*lruhead
;
164 /* Allocates n IOCache buffers of /size/ bytes and attaches
165 them to the returned MinList. The MinList is returned
166 and is zero if there wasn't enough memory. */
168 if((lruhead
=AllocMem(sizeof(struct MinList
), globals
->bufmemtype
))!=0) {
171 lruhead
->mlh_Head
=(struct MinNode
*)&lruhead
->mlh_Tail
;
173 lruhead
->mlh_TailPred
=(struct MinNode
*)lruhead
;
175 size
+=sizeof(struct IOCache
)+16;
177 while(--n
>=0 && (ioc
=AllocVec(size
, globals
->bufmemtype
))!=0) {
178 ioc
->blocks
=0; // Mar 11 1999: Added this line to avoid that dehash() makes a mess of things.
182 invalidateiocache(ioc
);
184 /* ioc->data is aligned to 16 byte boundaries. */
185 ioc
->data
=(UBYTE
*)((IPTR
)((UBYTE
*)ioc
+sizeof(struct IOCache
)+15) & (~0x0F));
187 addtailm(lruhead
, &ioc
->node
);
194 freeIOCache(lruhead
);
202 ULONG
queryiocache_lines(void) {
203 return(globals
->iocache_lines
);
208 ULONG
queryiocache_readaheadsize(void) {
209 return(globals
->iocache_sizeinblocks
<< globals
->shifts_block
);
214 BYTE
queryiocache_copyback(void) {
215 return(globals
->iocache_copyback
);
220 LONG
setiocache(ULONG lines
, ULONG readahead
, BYTE copyback
) {
221 struct MinList
*lruhead
;
222 ULONG sizeinblocks
=readahead
>>globals
->shifts_block
;
225 /* This function changes the size and type of the IOCache. The
226 old Cache settings will remain in effect if the new ones
227 couldn't be applied due to lack of memory.
229 When this function is called first time, there are no old
235 else if(sizeinblocks
<8) {
238 else if(sizeinblocks
<16) {
241 else if(sizeinblocks
<32) {
244 else if(sizeinblocks
<64) {
247 else if(sizeinblocks
<128) {
254 sizeinblocks
=1<<shift
;
259 else if(lines
>1024) {
263 if((lruhead
=allocate(sizeinblocks
<<globals
->shifts_block
, lines
))!=0) {
266 if(globals
->iocache_lruhead
==0 || (errorcode
=flushiocache())==0) {
269 globals
->iocache_sizeinblocks
=sizeinblocks
;
270 globals
->iocache_lines
=lines
;
271 globals
->iocache_copyback
=copyback
;
272 if(copyback
==FALSE
) {
273 globals
->iocache_readonwrite
=TRUE
;
275 globals
->iocache_mask
=sizeinblocks
-1;
276 globals
->iocache_shift
=shift
;
278 freeIOCache(globals
->iocache_lruhead
);
279 globals
->iocache_lruhead
=lruhead
;
281 if(globals
->iocache_readonwrite
==FALSE
&& globals
->iocache_copyback
!=FALSE
) {
282 globals
->ioc_buffer
=(struct IOCache
*)globals
->iocache_lruhead
->mlh_Head
;
283 globals
->ioc_buffer
->locked
=TRUE
;
287 globals
->ioc_hashtable
[m
]=0;
291 freeIOCache(lruhead
);
297 return(ERROR_NO_FREE_STORE
);
303 /* The IOCache is automatically disabled when iocache_lines == 0 */
305 LONG
initcachedio(UBYTE
*devicename
, IPTR unit
, ULONG flags
, struct DosEnvec
*de
)
309 if((errorcode
=initdeviceio(devicename
, unit
, flags
, de
))==0) {
311 /* Note: There MUST be atleast 4 IOCache_lines for cachedio to work correctly at the moment!! */
313 if((setiocache(8, 8192, TRUE
))==0) {
323 void cleanupcachedio(void) {
325 /* Only call this if initcachedio() was succesful. */
327 flushiocache(); /*** returns an errorcode... */
328 freeIOCache(globals
->iocache_lruhead
);
329 globals
->iocache_lruhead
=0;
331 globals
->iocache_lines
=0;
338 struct IOCache
*findiocache(BLCK block
) {
339 struct IOCache
*ioc
=globals
->ioc_hashtable
[ (block
>>globals
->iocache_shift
) & (IOC_HASHSIZE
-1) ];
341 /* For internal use only. This function will find the IOCache, if available.
342 It won't move the block to the end of the LRU chain though -- use locateiocache
346 if(block
>=ioc
->block
&& block
<ioc
->block
+ioc
->blocks
) {
358 struct IOCache
*locateiocache(BLCK block
) {
361 if((ioc
=findiocache(block
))!=0) {
363 addtailm(globals
->iocache_lruhead
, &ioc
->node
);
371 LONG
lruiocache(struct IOCache
**returned_ioc
) {
375 /* Must be volatile to keep ioc variable value up to date regardless of
376 compiler optimizations */
377 volatile struct MinList
*ioclist
= globals
->iocache_lruhead
;
379 /* Returns the least recently used IOCache */
382 ioc
=(struct IOCache
*) ioclist
->mlh_Head
;
385 addtailm(globals
->iocache_lruhead
, &ioc
->node
);
387 } while(ioc
->locked
!=0);
389 if((errorcode
=copybackiocache(ioc
))!=0) {
392 invalidateiocache(ioc
);
401 void reuseiocache(struct IOCache
*ioc
) {
403 /* This function makes sure that the passed in IOCache is reused
404 as quickly as possible. This is a good idea if you used an IOCache
405 and you're certain it won't be needed again. In such a case you
406 can call this function so this cache is the first to be reused. */
409 addheadm(globals
->iocache_lruhead
, &ioc
->node
);
414 LONG
validateiocache(struct IOCache
*ioc
, ULONG blockoffset
, ULONG blocks
) {
417 /* This function will read the missing data from this IOCache from disk
418 and merge it with any existing dirty blocks. To reduce copying it
419 will either use the newly read IOCache or the current IOCache to
420 store the final result. It will switch the data pointers so there is
421 no need for rehashing. */
423 if(globals
->iocache_readonwrite
!=FALSE
|| bmtsto(ioc
->valid
, blockoffset
, blocks
)!=0) {
427 // _DEBUG(("validateiocache: ioc->block = %ld, ioc->blocks = %ld, ioc->dirty = 0x%08lx, ioc->valid = 0x%08lx\n", ioc->block, ioc->blocks, ioc->dirty[0], ioc->valid[0]));
429 if((errorcode
=transfer(DIO_READ
, globals
->ioc_buffer
->data
, ioc
->block
, ioc
->blocks
))==0) {
430 LONG i
=globals
->iocache_sizeinblocks
;
432 // _DEBUG(("validateiocache: BMCNTO returned %ld\n", BMCNTO(ioc->dirty, 0, ioc->blocks)));
434 if(bmcnto(ioc
->dirty
, 0, ioc
->blocks
) < globals
->iocache_sizeinblocks
/2) {
437 /* Copying the dirty blocks to the new IOCache. */
439 // _DEBUG(("validateiocache: Using new IOCache\n"));
442 if(bmtsto(ioc
->dirty
, i
, 1)!=FALSE
) {
443 CopyMemQuick((UBYTE
*)ioc
->data
+ (i
<<globals
->shifts_block
), (UBYTE
*)globals
->ioc_buffer
->data
+ (i
<<globals
->shifts_block
), globals
->bytes_block
);
447 data
=globals
->ioc_buffer
->data
;
448 globals
->ioc_buffer
->data
=ioc
->data
;
453 /* Copying the newly read blocks to the existing IOCache. */
455 // _DEBUG(("validateiocache: Using existing IOCache\n"));
458 if(bmtstz(ioc
->dirty
, i
, 1)!=FALSE
) {
459 CopyMemQuick((UBYTE
*)globals
->ioc_buffer
->data
+ (i
<<globals
->shifts_block
), (UBYTE
*)ioc
->data
+ (i
<<globals
->shifts_block
), globals
->bytes_block
);
464 ioc
->valid
[0]=0xFFFFFFFF;
465 ioc
->valid
[1]=0xFFFFFFFF;
466 ioc
->valid
[2]=0xFFFFFFFF;
467 ioc
->valid
[3]=0xFFFFFFFF;
475 static LONG
copybackiocache(struct IOCache
*ioc
) {
477 LONG dirtylow
, dirtyhigh
;
479 /* Writes out any dirty data, and resets the dirty bit.
481 For extra efficiency this function will in case of a physical
482 disk-access also flush any buffers following this one, to avoid
483 physical head movement. */
485 // _DEBUG(("copybackiocache: ioc->block = %ld\n", ioc->block));
487 while(ioc
!=0 && ioc
->blocks
!=0 && (ioc
->bits
& IOC_DIRTY
)!=0) {
489 _DEBUG(("copybackiocache: ioc->dirty=%p (@=%08x) ioc->blocks-1=%d\n", ioc
->dirty
, AROS_BE2LONG(*(ULONG
*)ioc
->dirty
), ioc
->blocks
-1));
491 if((dirtyhigh
=bmflo(ioc
->dirty
, ioc
->blocks
-1))<0) {
492 _DEBUG(("copybackiocache: Say what?\n"));
494 // dirtyhigh = ioc->blocks-1;
497 dirtylow
=bmffo(ioc
->dirty
, 4, 0);
499 // _DEBUG(("copybackiocache: dirtylow = %ld, dirtyhigh = %ld, ioc->dirty = 0x%08lx\n", dirtylow, dirtyhigh, ioc->dirty[0]));
501 /* dirtylow and dirtyhigh are known. Now, to check if we can write
502 all these changes in a single write we check if all the blocks
503 between dirtylow and dirtyhigh are VALID (not dirty, although
504 most of them probably will be dirty). */
506 if(bmffz(ioc
->valid
, 4, dirtylow
)<dirtyhigh
) {
507 // _DEBUG(("copybackiocache: calling validateiocache\n"));
508 if((errorcode
=validateiocache(ioc
, 0, ioc
->blocks
))!=0) {
513 if((errorcode
=transfer(DIO_WRITE
, (UBYTE
*)ioc
->data
+ (dirtylow
<<globals
->shifts_block
), ioc
->block
+ dirtylow
, dirtyhigh
- dirtylow
+ 1))!=0) {
517 ioc
->bits
&=~IOC_DIRTY
;
522 // ioc->dirtylow=255;
525 ioc
=findiocache(ioc
->block
+ioc
->blocks
);
533 LONG
flushiocache(void) {
537 /* Writes all dirty data to disk, but keeps the cached data for
538 later reads. Use this to ensure data is comitted to disk
539 when doing critical operations. */
541 ioc
=(struct IOCache
*)globals
->iocache_lruhead
->mlh_Head
;
543 while(ioc
->node
.mln_Succ
!=0) {
544 if((errorcode
=copybackiocache(ioc
))!=0) {
548 ioc
=(struct IOCache
*)(ioc
->node
.mln_Succ
);
555 // _DEBUG(("flushiocache: errorcode = %ld\n", errorcode));
562 void invalidateiocaches(void) {
565 /* Clears all buffers in the IOCache. This should be used BEFORE
566 directly writing to the disk (for example, call this before
567 ACTION_INHIBIT(TRUE)). Before calling this function make
568 sure all pending changes have been flushed using flushiocache() */
570 ioc
=(struct IOCache
*)globals
->iocache_lruhead
->mlh_Head
;
572 while(ioc
->node
.mln_Succ
!=0) {
573 invalidateiocache(ioc
);
575 ioc
=(struct IOCache
*)(ioc
->node
.mln_Succ
);
582 void copyiocachetobuffer(struct IOCache *ioc, BLCK *block, UBYTE **buffer, ULONG *blocks) {
583 ULONG blockoffset=*block-ioc->block;
584 ULONG blocklength=ioc->blocks-blockoffset;
586 if(*blocks<blocklength) {
590 if(((ULONG)(*buffer) & 0x00000003) != 0) {
591 CopyMem((UBYTE *)ioc->data + (blockoffset<<shifts_block), *buffer, blocklength<<shifts_block);
594 CopyMemQuick((UBYTE *)ioc->data + (blockoffset<<shifts_block), *buffer, blocklength<<shifts_block);
598 *blocks-=blocklength;
599 *buffer+=blocklength<<shifts_block;
604 LONG
readintocache(BLCK block
, struct IOCache
**returned_ioc
) {
608 if((ioc
=locateiocache(block
))==0) {
609 if((errorcode
=lruiocache(&ioc
))==0) {
610 ULONG blockstart
=block
& ~globals
->iocache_mask
;
611 ULONG blocklength
=globals
->iocache_sizeinblocks
;
613 if(blockstart
+blocklength
>globals
->blocks_total
) {
614 blocklength
=globals
->blocks_total
-blockstart
;
617 if((errorcode
=transfer(DIO_READ
, ioc
->data
, blockstart
, blocklength
))==0) {
618 ioc
->block
=blockstart
;
619 ioc
->blocks
=blocklength
;
621 ioc
->valid
[0]=0xFFFFFFFF;
622 ioc
->valid
[1]=0xFFFFFFFF;
623 ioc
->valid
[2]=0xFFFFFFFF;
624 ioc
->valid
[3]=0xFFFFFFFF;
638 LONG
readonwriteintocache(BLCK block
, struct IOCache
**returned_ioc
) {
642 /* Only does a physical read if iocache_readonwrite is TRUE */
644 if((ioc
=locateiocache(block
))==0) {
645 if((errorcode
=lruiocache(&ioc
))==0) {
646 ULONG blockstart
=block
& ~globals
->iocache_mask
;
647 ULONG blocklength
=globals
->iocache_sizeinblocks
;
649 if(blockstart
+blocklength
>globals
->blocks_total
) {
650 blocklength
=globals
->blocks_total
-blockstart
;
653 if(globals
->iocache_readonwrite
==FALSE
|| (errorcode
=transfer(DIO_READ
, ioc
->data
, blockstart
, blocklength
))==0) {
654 ioc
->block
=blockstart
;
655 ioc
->blocks
=blocklength
;
657 if(globals
->iocache_readonwrite
!=FALSE
) {
658 ioc
->valid
[0]=0xFFFFFFFF;
659 ioc
->valid
[1]=0xFFFFFFFF;
660 ioc
->valid
[2]=0xFFFFFFFF;
661 ioc
->valid
[3]=0xFFFFFFFF;
676 static LONG
copybackoverlappingiocaches(BLCK block
, ULONG blocks
) {
681 /* This function copies back any IOCaches which fall (partially) in the
682 region specified by the input parameters. */
684 lastblock
=(block
+blocks
-1) & ~globals
->iocache_mask
;
685 block
=block
& ~globals
->iocache_mask
;
687 while(block
<=lastblock
) { // Aug 6 1998: Changed '<' into '<='.
688 if((ioc
=locateiocache(block
))!=0) {
689 if((errorcode
=copybackiocache(ioc
))!=0) {
693 block
+=globals
->iocache_sizeinblocks
;
701 LONG
readbytes(BLCK block
, UBYTE
*buffer
, UWORD offsetinblock
, UWORD bytes
) {
705 /* This function is intended to copy data directly from a IOCache buffer
706 which was read. It can be used for small amounts of data only (1 to
709 This function will fall back to reading a single block if the cache
712 if((errorcode
=readintocache(block
, &ioc
))==0 && (errorcode
=validateiocache(ioc
, block
-ioc
->block
, 1))==0) {
713 CopyMem((UBYTE
*)ioc
->data
+((block
-ioc
->block
)<<globals
->shifts_block
) + offsetinblock
, buffer
, bytes
);
721 LONG
writebytes(BLCK block
, UBYTE
*buffer
, UWORD offsetinblock
, UWORD bytes
) {
725 /* This function is intended to copy data directly into a IOCache buffer
726 which was read. It can be used for small amounts of data only (1 to
729 This function will fall back to reading/modifying/writing a single
730 block if the cache or copyback caching is disabled. */
732 if((errorcode
=readintocache(block
, &ioc
))==0 && (errorcode
=validateiocache(ioc
, block
-ioc
->block
, 1))==0) {
733 CopyMem(buffer
, (UBYTE
*)ioc
->data
+ ((block
-ioc
->block
)<<globals
->shifts_block
) + offsetinblock
, bytes
);
734 if(globals
->iocache_copyback
==FALSE
) {
735 errorcode
=write(block
, (UBYTE
*)ioc
->data
+((block
-ioc
->block
)<<globals
->shifts_block
), 1);
738 bmset(ioc
->dirty
, 4, block
-ioc
->block
, 1);
739 ioc
->bits
|=IOC_DIRTY
;
748 LONG
read(BLCK block
, UBYTE
*buffer
, ULONG blocks
) {
753 /* The readahead caching system works simple; if the data-request is lesser
754 than or equal to the line-size than we first try to locate the data in
755 the cache. If it is available it is copied and returned. If the data
756 isn't available then it is read into the cache, and copied afterwards.
758 Large requests are processed seperately and don't go through the cache.
759 The idea is that large requests are quite fast when loaded from the HD
760 anyway. Also, to be able to properly speed up even large requests you'd
761 also need a substantially larger cache, which isn't what we want here. */
763 if(globals
->iocache_lines
!=0 && blocks
<=globals
->iocache_sizeinblocks
>>1) { // ****** // dit kan sneller, als het ondanks het te grote request toch in de cache staat!
766 while(errorcode
==0 && blocks
!=0) {
767 if((errorcode
=readintocache(block
, &ioc
))==0) {
768 ULONG blockoffset
=block
-ioc
->block
;
769 ULONG blocklength
=ioc
->blocks
-blockoffset
;
771 if(blocks
<blocklength
) {
775 if((errorcode
=validateiocache(ioc
, blockoffset
, blocklength
))!=0) {
779 if(((IPTR
)buffer
& 0x00000003) != 0) {
780 CopyMem((UBYTE
*)ioc
->data
+ (blockoffset
<<globals
->shifts_block
), buffer
, blocklength
<<globals
->shifts_block
);
783 CopyMemQuick((UBYTE
*)ioc
->data
+ (blockoffset
<<globals
->shifts_block
), buffer
, blocklength
<<globals
->shifts_block
);
788 buffer
+=blocklength
<<globals
->shifts_block
;
793 if((errorcode
=copybackoverlappingiocaches(block
, blocks
))==0) {
794 errorcode
=transfer(DIO_READ
, buffer
, block
, blocks
);
804 void writethroughoverlappingiocaches(BLCK block
, ULONG blocks
, UBYTE
*buffer
) {
809 /* This function copies data from the buffer to any IOCaches which fall (partially)
810 in the region specified by the input parameters. */
812 firstblock
=block
& ~globals
->iocache_mask
;
813 lastblock
=(block
+blocks
-1) & ~globals
->iocache_mask
;
815 while(firstblock
<=lastblock
) {
816 if((ioc
=locateiocache(firstblock
))!=0) {
821 ULONG maxinline
, overlappedblocks
;
823 // _DEBUG(("IOCACHE: found overlapping cache (%ld-%ld) for block %ld of %ld blocks\n",ioc->block,ioc->block+ioc->blocks-1,block,blocks));
825 /* |-------| |-----| |-------| |---------|
826 |=======| |=========| |=======| |=====|
828 block blocks firstblock offsetinline overlappedblocks
829 ---------------------------------------------------------
830 1 20 0 1 7 21 -( 0 + 1) = 20
831 8 0 8 21 -( 8 + 0) = 13
832 16 0 5 21 -(16 + 0) = 5
833 ---------------------------------------------------------
834 1 3 0 1 3 4 -( 0 + 1) = 3
835 20 3 16 4 3 23 -(16 + 4) = 3
836 --------------------------------------------------------- */
838 offsetinline
=firstblock
<block
? block
-firstblock
: 0;
839 startinline
=firstblock
+offsetinline
;
840 maxinline
=ioc
->blocks
-offsetinline
;
841 overlappedblocks
=block
+blocks
-startinline
;
843 if(overlappedblocks
>maxinline
) {
844 overlappedblocks
=maxinline
;
847 /* startblock and endblock (exclusive) now contain the region to be
848 overwritten in this IOCache. */
850 if(globals
->iocache_copyback
!=FALSE
&& (ioc
->bits
& IOC_DIRTY
)!=0) {
852 /* Copyback mode is active! We need to unmark any dirty blocks
853 which are now overwritten. */
855 bmclr(ioc
->dirty
, 4, offsetinline
, overlappedblocks
);
858 bmset(ioc
->valid
, 4, offsetinline
, overlappedblocks
);
860 src
=buffer
+ ((startinline
-block
)<<globals
->shifts_block
);
861 dst
=(UBYTE
*)ioc
->data
+ (offsetinline
<<globals
->shifts_block
);
863 if(((IPTR
)buffer
& 0x00000003) != 0) {
864 CopyMem(src
, dst
, overlappedblocks
<<globals
->shifts_block
);
867 CopyMemQuick(src
, dst
, overlappedblocks
<<globals
->shifts_block
);
871 firstblock
+=globals
->iocache_sizeinblocks
;
877 LONG
writethrough(BLCK block
, UBYTE
*buffer
, ULONG blocks
) {
879 /* This function writes data to disk, it writes through existing
880 cache buffers but doesn't cause reads if cache is in copyback
883 if(globals
->iocache_lines
!=0) {
884 writethroughoverlappingiocaches(block
,blocks
,buffer
);
887 return(transfer(DIO_WRITE
,buffer
,block
,blocks
));
892 LONG
write(BLCK block
, UBYTE
*buffer
, ULONG blocks
) {
894 if(globals
->iocache_lines
!=0) {
895 ULONG maxblocks
=globals
->iocache_sizeinblocks
>>2;
901 if(globals
->iocache_copyback
!=FALSE
&& blocks
<=maxblocks
) {
903 struct IOCache
*ioc2
;
906 if((errorcode
=readonwriteintocache(block
, &ioc
))==0) { /* a trick, which works because cachesystem consists of atleast 4 blocks. */
907 if((errorcode
=readonwriteintocache(block
+blocks
-1, &ioc2
))==0) {
908 WORD offsetinline
=block
-ioc
->block
;
910 writethroughoverlappingiocaches(block
, blocks
, buffer
); /* This function kills dirty blocks if needed. */
912 ioc
->bits
|=IOC_DIRTY
;
915 bmset(ioc
->dirty
, 4, offsetinline
, blocks
);
916 bmset(ioc
->valid
, 4, offsetinline
, blocks
);
919 LONG blocks2
=globals
->iocache_sizeinblocks
-offsetinline
;
921 ioc2
->bits
|=IOC_DIRTY
;
923 bmset(ioc
->dirty
, 4, offsetinline
, blocks2
);
924 bmset(ioc
->valid
, 4, offsetinline
, blocks2
);
925 bmset(ioc2
->dirty
, 4, 0, blocks
-blocks2
);
926 bmset(ioc2
->valid
, 4, 0, blocks
-blocks2
);
935 return(writethrough(block
, buffer
, blocks
));
940 LONG
getbuffer(UBYTE
**tempbuffer
, ULONG
*maxblocks
) {
943 lruiocache(&ioc
); /** Might fail!! Make sure it never does! */
945 *tempbuffer
=ioc
->data
;
946 *maxblocks
=globals
->iocache_sizeinblocks
;