2 SuperCollider real time audio synthesis system
3 Copyright (c) 2002 James McCartney. All rights reserved.
4 http://www.audiosynth.com
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "SC_AllocPool.h"
24 #include "SC_BoundsMacros.h"
28 Requests are `small' if both the corresponding and the next bin are small
32 #define check_pool() DoCheckPool()
33 #define check_free_chunk(P) DoCheckFreeChunk(P)
34 #define check_inuse_chunk(P) DoCheckInUseChunk(P)
35 #define check_chunk(P) DoCheckChunk(P)
36 #define check_malloced_chunk(P,N) DoCheckAllocedChunk(P,N)
37 #define garbage_fill(P) DoGarbageFill(P)
40 #define check_free_chunk(P)
41 #define check_inuse_chunk(P)
42 #define check_chunk(P)
43 #define check_malloced_chunk(P,N)
44 #define garbage_fill(P)
47 #define aligned_OK(m) ((((unsigned long)(m)) & kAlignMask) == 0)
50 void* allocmem(AllocPool *pool, int32 size);
51 void* allocmem(AllocPool *pool, int32 size)
53 return pool->Alloc(size);
56 void* reallocmem(AllocPool *pool, void* ptr, int32 size);
57 void* reallocmem(AllocPool *pool, void* ptr, int32 size)
59 return pool->Realloc(ptr, size);
62 void freemem(AllocPool *pool, void* ptr);
63 void freemem(AllocPool *pool, void* ptr)
68 void AllocPool::InitAlloc()
70 if (mAreaInitSize
== 0) return;
72 /* alloc initial area */
73 NewArea(mAreaInitSize
);
75 AllocAreaPtr area
= mAreas
;
76 AllocChunkPtr chunk
= &area
->mChunk
;
82 void AllocPool::InitBins()
84 for (int i
=0; i
<kNumAllocBins
; ++i
) {
87 for (int i
=0; i
<4; ++i
) {
92 AllocPool::AllocPool(NewAreaFunc inAllocArea
, FreeAreaFunc inFreeArea
,
93 size_t inAreaInitSize
, size_t inAreaMoreSize
)
96 mAreaInitSize
= inAreaInitSize
;
97 mAreaMoreSize
= inAreaMoreSize
;
98 mAllocArea
= inAllocArea
;
99 mFreeArea
= inFreeArea
;
106 AllocPool::~AllocPool()
111 void AllocPool::FreeAll()
114 AllocAreaPtr area
= mAreas
;
116 AllocAreaPtr firstArea
= area
;
118 AllocAreaPtr nextarea
= area
->mNext
;
119 (mFreeArea
)(area
->mUnalignedPointerToThis
);
121 } while (area
!= firstArea
);
128 void AllocPool::FreeAllInternal()
133 AllocAreaPtr area
= mAreas
;
135 AllocAreaPtr firstArea
= area
;
137 AllocAreaPtr nextarea
= area
->mNext
;
138 size_t size
= area
->mSize
;
139 AllocChunkPtr chunk
= &area
->mChunk
;
140 chunk
->SetSizeFree(size
);
141 chunk
->SetNeighborsInUse(size
);
144 } while (area
!= firstArea
);
149 void AllocPool::Reinit()
155 void AllocPool::Free(void *inPtr
)
157 #ifdef DISABLE_MEMORY_POOLS
163 if (inPtr
== 0) return; /* free(0) has no effect */
165 AllocChunkPtr chunk
= MemToChunk(inPtr
);
167 check_inuse_chunk(chunk
);
170 size_t size
= chunk
->Size();
172 if (!chunk
->PrevInUse()) /* consolidate backward */
174 size_t prevSize
= chunk
->PrevSize();
175 chunk
= chunk
->ChunkAtOffset(0L-prevSize
);
180 AllocChunkPtr next
= chunk
->ChunkAtOffset(size
);
181 if (!next
->InUse()) /* consolidate forward */
183 size
+= next
->Size();
187 chunk
->SetSizeFree(size
);
188 if (mAreaMoreSize
&& chunk
->IsArea()) {
189 // whole area is free
199 AllocAreaPtr
AllocPool::NewArea(size_t inAreaSize
)
201 void *ptr
= (AllocAreaPtr
)(mAllocArea
)(inAreaSize
+ kAreaOverhead
);
204 throw std::runtime_error("Could not allocate new area");
206 // AllocAreaPtr area = (AllocAreaPtr)((unsigned long)ptr & ~kAlignMask);
207 AllocAreaPtr area
= (AllocAreaPtr
)(((unsigned long)ptr
+ kAlignMask
) & ~kAlignMask
);
208 assert((area
>= ptr
) && ((void*)((unsigned long)area
& ~kAlignMask
) == area
));
210 area
->mUnalignedPointerToThis
= ptr
;
214 area
->mNext
= mAreas
;
215 area
->mPrev
= mAreas
->mPrev
;
216 area
->mNext
->mPrev
= area
;
217 area
->mPrev
->mNext
= area
;
225 area
->mSize
= inAreaSize
;
226 area
->mChunk
.BeEmpty();
227 area
->mChunk
.SetNeighborsInUse(inAreaSize
);
228 area
->mChunk
.SetSizeFree(inAreaSize
);
233 void AllocPool::FreeArea(AllocChunkPtr chunk
)
235 AllocAreaPtr area
= (AllocAreaPtr
)((char*)chunk
- sizeof(AllocAreaHdr
));
237 if (area
->mNext
== area
) {
241 mAreas
= area
->mPrev
->mNext
= area
->mNext
;
242 area
->mNext
->mPrev
= area
->mPrev
;
245 (mFreeArea
)(area
->mUnalignedPointerToThis
);
249 size_t AllocPool::TotalFree()
252 for (int i
=0; i
<kNumAllocBins
; ++i
) {
253 AllocChunkPtr bin
= mBins
+ i
;
254 if (bin
->Prev() != bin
) {
255 for (AllocChunkPtr candidate
= bin
->Prev(); candidate
!= bin
; candidate
= candidate
->Prev()) {
256 total
+= candidate
->Size();
263 size_t AllocPool::LargestFreeChunk()
266 for (int i
=3; i
>=0; --i
) {
272 int binBits
= (int)mBinBlocks
[word
];
273 int bitPosition
= NUMBITS(binBits
) - 1;
274 int index
= (word
<< 5) + bitPosition
;
275 AllocChunkPtr bin
= mBins
+ index
;
276 //postbuf("** %p %p %p %p\n", mBinBlocks[0], mBinBlocks[1], mBinBlocks[2], mBinBlocks[3]);
277 //postbuf("%d %d %d %p %p %p\n", word, bitPosition, index, binBits, bin->Prev(), bin->Next());
279 AllocChunkPtr candidate
;
281 for (candidate
= bin
->Prev(); candidate
!= bin
; candidate
= candidate
->Prev()) {
282 size_t candidate_size
= candidate
->Size();
283 maxsize
= sc_max(maxsize
, candidate_size
);
284 //postbuf(" %d %d\n", maxsize, candidate_size);
287 /*for (int i=0; i<kNumAllocBins; ++i) {
289 if (bin->Prev() != bin) {
290 postbuf("* %d %d\n", i, bin->Prev()->Size());
296 void* AllocPool::Alloc(size_t inReqSize
)
298 #ifdef DISABLE_MEMORY_POOLS
299 return malloc(inReqSize
);
302 // OK it has a lot of gotos, but these remove a whole lot of common code
303 // that was obfuscating the original version of this function.
304 // So here I am choosing the OnceAndOnlyOnce principle over the caveats on gotos.
305 // The gotos only jump forward and only to the exit paths of the function
307 // The old bin block scheme has been replaced by 4 x 32 bit words so that each bin has a bit
308 // and the next bin is found using a count leading zeroes instruction. Much faster.
309 // Also now each bin's flag can be kept accurate. This simplifies the searching code quite a bit.
311 // Also fwiw, changed 'victim' in the original code to 'candidate'. 'victim' just bothered me.
314 AllocChunkPtr candidate
; /* inspected/selected chunk */
315 size_t candidate_size
; /* its size */
316 AllocChunkPtr remainder
; /* remainder from a split */
317 int32 remainder_size
; /* its size */
321 size_t size
= RequestToSize(inReqSize
);
322 int index
= BinIndex(size
);
324 AllocChunkPtr bin
= mBins
+ index
;
328 /* Check for exact match in a bin */
329 if (index
< kMaxSmallBin
) { /* Faster version for small requests */
330 /* No traversal or size check necessary for small bins. */
331 candidate
= bin
->Prev();
333 /* Also scan the next one, since it would have a remainder < kMinAllocSize */
334 if (candidate
== bin
) candidate
= (++bin
)->Prev();
335 if (candidate
!= bin
) {
336 candidate_size
= candidate
->Size();
337 goto found_exact_fit
;
340 index
+= 2; /* Set for bin scan below. We've already scanned 2 bins. */
342 for (candidate
= bin
->Prev(); candidate
!= bin
; candidate
= candidate
->Prev()) {
344 candidate_size
= candidate
->Size();
345 remainder_size
= (int)(candidate_size
- size
);
346 if (remainder_size
>= (int32
)kMinAllocSize
) { /* too big */
347 --index
; /* adjust to rescan below after checking last remainder */
349 } else if (remainder_size
>= 0) { /* exact fit */
350 goto found_exact_fit
;
356 for(; (index
= NextFullBin(index
)) >= 0; ++index
) {
359 /* Find and use first big enough chunk ... */
360 for (candidate
= bin
->Prev(); candidate
!= bin
; candidate
= candidate
->Prev()) {
361 candidate_size
= candidate
->Size();
362 remainder_size
= (int)(candidate_size
- size
);
363 if (remainder_size
>= (int32
)kMinAllocSize
) { /* split */
364 UnlinkFree(candidate
);
365 goto found_bigger_fit
;
366 } else if (remainder_size
>= 0) goto found_exact_fit
;
371 if (mAreaMoreSize
== 0) { /* pool has a non-growable area */
372 if (mAreas
!= NULL
/* fixed size area exhausted */
373 || size
> mAreaInitSize
) /* too big anyway */
375 areaSize
= mAreaInitSize
;
379 if (size
> mAreaMoreSize
) {
383 areaSize
= mAreaMoreSize
;
389 //ipostbuf("alloc failed. size: %d\n", inReqSize);
390 throw std::runtime_error("alloc failed, increase server's memory allocation (e.g. via ServerOptions)");
393 //ipostbuf("whole_new_area\n");
394 area
= NewArea(areaSize
);
396 candidate
= &area
->mChunk
;
397 candidate_size
= candidate
->Size();
401 //ipostbuf("split_new_area\n");
402 area
= NewArea(areaSize
);
404 candidate
= &area
->mChunk
;
405 candidate_size
= candidate
->Size();
406 remainder_size
= (int)(areaSize
- size
);
409 //ipostbuf("found_bigger_fit\n");
410 remainder
= candidate
->ChunkAtOffset(size
);
411 remainder
->SetSizeFree(remainder_size
);
412 candidate_size
-= remainder_size
;
418 UnlinkFree(candidate
);
422 candidate
->SetSizeInUse(candidate_size
);
423 check_malloced_chunk(candidate
, candidate_size
);
425 garbage_fill(candidate
);
426 return candidate
->ToPtr();
430 void* AllocPool::Realloc(void* inPtr
, size_t inReqSize
)
432 #ifdef DISABLE_MEMORY_POOLS
433 return realloc(inPtr
, inReqSize
);
442 /* realloc of null is supposed to be same as malloc */
443 if (inPtr
== 0) return Alloc(inReqSize
);
445 AllocChunkPtr oldChunk
= MemToChunk(inPtr
);
446 AllocChunkPtr newChunk
= oldChunk
;
447 size_t oldsize
= oldChunk
->Size();
448 size_t newsize
= oldsize
;
449 size_t size
= RequestToSize(inReqSize
);
450 size_t nextsize
, prevsize
;
451 check_inuse_chunk(oldChunk
);
453 if (oldsize
< size
) {
454 /* Try expanding forward */
455 AllocChunkPtr next
= oldChunk
->NextChunk();
456 if (!next
->InUse()) {
457 nextsize
= next
->Size();
458 /* Forward into next chunk */
459 if (nextsize
+ newsize
>= size
) {
470 /* Try shifting backwards. */
471 prev
= oldChunk
->PrevChunk();
472 if (!prev
->InUse()) {
473 prevsize
= prev
->Size();
475 /* try forward + backward first to save a later consolidation */
477 /* into next chunk */
478 if (nextsize
+ prevsize
+ newsize
>= size
) {
479 newsize
+= nextsize
+ prevsize
;
486 if (prev
!= 0 && prevsize
+ newsize
>= size
) {
494 outPtr
= Alloc(inReqSize
);
498 //ipostbuf("realloc failed. size: %d\n", inReqSize);
499 throw std::runtime_error("realloc failed, increase server's memory allocation (e.g. via ServerOptions)");
502 /* Otherwise copy, free, and exit */
503 memcpy(outPtr
, inPtr
, oldsize
- sizeof(AllocChunk
));
513 split
: /* split off extra room in old or expanded chunk */
515 if (newsize
- size
>= kMinAllocSize
) { /* split off remainder */
516 size_t remainder_size
= newsize
- size
;
517 AllocChunkPtr remainder
= newChunk
->ChunkAtOffset(size
);
518 remainder
->SetSizeInUse(remainder_size
);
519 newChunk
->SetSizeInUse(size
);
520 Free(remainder
->ToPtr()); /* let free() deal with it */
522 newChunk
->SetSizeInUse(newsize
);
524 outPtr
= newChunk
->ToPtr();
526 memmove(outPtr
, inPtr
, oldsize
- sizeof(AllocChunk
));
528 check_inuse_chunk(newChunk
);
530 garbage_fill(newChunk
);
534 void AllocPool::LinkFree(AllocChunkPtr inChunk
)
536 size_t size
= inChunk
->Size();
537 int index
= BinIndex(size
);
539 AllocChunkPtr bin
= mBins
+ index
;
541 if (index
< kNumSmallBins
|| bin
->IsEmpty()) {
542 inChunk
->InsertAfter(bin
);
545 AllocChunkPtr link
= bin
->Next();
546 while (link
!= bin
&& size
< link
->Size()) link
= link
->Next();
547 inChunk
->InsertBefore(link
);
551 void AllocPool::DoCheckArea(AllocAreaPtr area
)
553 assert(area
->mChunk
.PrevInUse());
555 AllocChunkPtr p
= &area
->mChunk
;
556 while (p
->mSize
!= kChunkInUse
) {
558 DoCheckInUseChunk(p
);
566 void AllocPool::DoCheckBin(AllocChunkPtr bin
, long index
)
568 AllocChunkPtr p
= bin
->Next();
571 assert(BinIndex(p
->Size()) == index
);
578 void AllocPool::DoCheckPool()
580 AllocAreaPtr area
= mAreas
;
583 AllocAreaPtr nextarea
= area
->mNext
;
586 } while (area
!= mAreas
);
589 for (int i
=0; i
<kNumAllocBins
; ++i
) {
590 AllocChunkPtr b
= mBins
+ i
;
596 void AllocPool::DoCheckChunk(AllocChunkPtr p
)
599 size_t size
= p
->Size();
600 //size_t maxsize = mAreaInitSize > mAreaMoreSize ? mAreaInitSize : mAreaMoreSize;
601 // assert(size < maxsize);
603 AllocChunkPtr next
= p
->ChunkAtOffset(size
);
605 assert(p
->mSize
== next
->mPrevSize
);
609 void AllocPool::DoCheckFreeChunk(AllocChunkPtr p
)
611 size_t size
= p
->Size();
613 AllocChunkPtr next
= p
->ChunkAtOffset(size
);
617 /* Check whether it claims to be free ... */
620 /* Unless an end marker, must have OK fields */
621 if (size
>= kMinAllocSize
)
623 assert((size
& kAlignMask
) == 0);
624 assert(aligned_OK(p
->ToPtr()));
625 /* ... and is fully consolidated */
626 assert(p
->PrevInUse());
627 assert(next
->InUse());
629 /* ... and has minimally sane links */
630 assert(p
->Next()->Prev() == p
);
631 assert(p
->Prev()->Next() == p
);
633 else /* end markers are always of size 0 */
637 void AllocPool::DoCheckInUseChunk(AllocChunkPtr p
)
639 size_t size
= p
->Size();
640 AllocChunkPtr next
= p
->NextChunk();
644 /* Check whether it claims to be in use ... */
647 /* ... and is surrounded by OK chunks.
648 Since more things can be checked with free chunks than inuse ones,
649 if an inuse chunk borders them and debug is on, it's worth doing them.
651 if (!p
->PrevInUse()) {
652 size_t prevsize
= p
->PrevSize();
654 AllocChunkPtr prv
= p
->PrevChunk();
655 assert(prv
->NextChunk() == p
);
656 DoCheckFreeChunk(prv
);
659 if (!p
->ChunkAtOffset(size
)->InUse()) {
660 DoCheckFreeChunk(next
);
664 void AllocPool::DoCheckAllocedChunk(AllocChunkPtr p
, size_t s
)
667 size_t size
= p
->Size();
668 long room
= size
- s
;
671 DoCheckInUseChunk(p
);
674 assert(size
>= kMinAllocSize
);
675 assert((size
& kAlignMask
) == 0);
677 assert(room
< kMinAllocSize
);
679 /* ... and alignment */
680 assert(aligned_OK(p
->ToPtr()));
683 /* ... and was allocated at front of an available chunk */
684 assert(p
->PrevInUse()); // huh?? - jmc
688 void AllocPool::DoGarbageFill(AllocChunkPtr p
)
690 long size
= (p
->Size() - sizeof(AllocChunk
));
691 DoGarbageFill(p
, size
);
694 void AllocPool::DoGarbageFill(AllocChunkPtr p
, long size
)
696 size
/= sizeof(long);
697 long *ptr
= (long*)p
->ToPtr();
698 for (int i
=0; i
<size
; ++i
) {