1 ///////////////////////////////////////////////////////////////////////////////
2 // $Source: x:/prj/tech/libsrc/lgalloc/RCS/poolimp.cpp $
4 // $Date: 1997/08/14 12:22:17 $
7 // Implementation of pools
21 #pragma code_seg("lgalloc")
23 ///////////////////////////////////////////////////////////////////////////////
29 #define kPoolCoreGrowSize (1024 * 64)
30 #define kPoolCoreMaxSize (1024 * 1024 * 64)
35 static void * AllocPage();
36 static void FreePage(void *);
39 static void * gm_pCoreStack
;
40 static void * gm_pCoreStackLimit
;
43 static ulong gm_nTotalCoreBytes
;
48 void * cPoolCore::gm_pCoreStack
;
49 void * cPoolCore::gm_pCoreStackLimit
;
51 ulong
cPoolCore::gm_nTotalCoreBytes
;
54 void * cPoolCore::AllocPage()
57 // MessageBox(NULL, "Only even allocations?!", NULL, MB_OK);
61 gm_pCoreStack
= VirtualAlloc(NULL
, kPoolCoreMaxSize
, MEM_RESERVE
, PAGE_READWRITE
);
62 AssertMsg(gm_pCoreStack
, "VirtualAlloc failed!");
63 VirtualAlloc(gm_pCoreStack
, kPoolCoreGrowSize
, MEM_COMMIT
, PAGE_READWRITE
);
64 gm_pCoreStackLimit
= (uchar
*)gm_pCoreStack
+ kPoolCoreGrowSize
;
67 void * pReturn
= gm_pCoreStack
;
68 gm_pCoreStack
= (uchar
*)gm_pCoreStack
+ kPageSize
;
70 while (gm_pCoreStack
> gm_pCoreStackLimit
)
72 if (VirtualAlloc(gm_pCoreStackLimit
, kPoolCoreGrowSize
, MEM_COMMIT
, PAGE_READWRITE
))
74 gm_pCoreStackLimit
= (uchar
*)gm_pCoreStackLimit
+ kPoolCoreGrowSize
;
78 CriticalMsg("VirtualAlloc failed!");
84 gm_nTotalCoreBytes
+= kPageSize
;
91 ///////////////////////////////////////////////////////////////////////////////
97 sPoolBlock
* pNextFree
;
102 const nStackLevels
= 8;
104 struct sAllocPoolPart
106 void *Stack
[nStackLevels
];
107 sPoolBlock
*pNextAlloc
;
108 sPoolBlock
*pPrevAlloc
;
111 struct sPoolBlock
: sAllocPoolPart
,
114 struct sPoolBlock
: sFreePoolPart
120 #define RealElemSize(size) ((size) + sizeof(sAllocPoolPart))
122 #define RealElemSize(size) size
126 #define PoolItemToClient(p) ((sFreePoolPart *)(p))
127 #define ClientToPoolItem(p) (((sPoolBlock *)((sFreePoolPart *)(p))))
135 sPoolDumper::~sPoolDumper()
137 cPoolAllocator::DumpPools();
140 sPoolDumper PoolDumper
;
143 ///////////////////////////////////////////////////////////////////////////////
145 cPoolAllocator
* cPoolAllocator::m_pPools
= 0;
147 ///////////////////////////////////////
152 void cPoolAllocator::Init(size_t elemSize
)
154 m_nElementSize
= elemSize
;
167 if (m_nElementSize
< sizeof(sFreePoolPart
))
168 m_nElementSize
= sizeof(sFreePoolPart
);
170 m_nBlockingFactor
= kPageSize
/ RealElemSize(m_nElementSize
);
172 m_pNextPool
= m_pPools
;
176 ///////////////////////////////////////
178 // Refill an empty freelist
181 void cPoolAllocator::ThreadNewBlock()
183 DebugMsg1("Getting new block of %d", kPageSize
);
185 AssertMsg(!m_pFreeList
, "ThreadNew called when not empty");
187 // First get a new batch ...
188 m_pFreeList
= (sPoolBlock
*)(PoolCoreAllocPage());
193 DebugMsg2("Threading New block: BlockSize = %u, elemSize = %u", kPageSize
, RealElemSize(m_nElementSize
));
199 // ... Then start threading it, starting with the last element ...
200 sPoolBlock
*p
= (sPoolBlock
*)((char *)(m_pFreeList
) + (m_nBlockingFactor
- 1) * RealElemSize(m_nElementSize
));
202 sPoolBlock
*pPrev
= 0;
205 p
->pNextFree
= pPrev
;
207 // ... and work back to the first ...
208 if (p
== m_pFreeList
)
212 p
= (sPoolBlock
*)((char *)(p
) - RealElemSize(m_nElementSize
));
216 ///////////////////////////////////////
218 // Allocate one item from the pool
221 void *cPoolAllocator::Alloc()
223 // Refill the free list if needed
228 // Check for out-of-memory
233 sPoolBlock
*p
= m_pFreeList
;
234 DebugMsg1("Alloc'd item @%#p", p
);
236 m_pFreeList
= m_pFreeList
->pNextFree
;
239 // Check that the new node isn't still on the list somehow.
241 for (sPoolBlock
* p1
= m_pFreeList
; p1
; p1
= p1
->pNextFree
)
244 Assert_((void *)(p1
->pNextFree
) != (void *)0xcdcdcdcd);
245 AssertMsg(p1
!= p
, "Rethreading already freed");
253 if (m_nAllocs
- m_nFrees
> m_nMaxTakes
)
254 m_nMaxTakes
= m_nAllocs
- m_nFrees
;
256 // cross link to next element
258 m_pAllocList
->pPrevAlloc
= p
;
260 p
->pNextAlloc
= m_pAllocList
;
262 // cross link to head
266 // FillStackArray(3, nStackLevels, p->Stack);
269 return PoolItemToClient(p
);
272 ///////////////////////////////////////
274 // Put memory back on the free chain
277 void cPoolAllocator::Free(void *p
)
279 DebugMsg1("Returning item 0x%x to freelist", p
);
281 #if defined(TRACK_ALLOCS) || defined(HEAP_CHECK)
282 sPoolBlock
*fp
= ClientToPoolItem(p
);
285 // Cross link next element to previous element
287 fp
->pNextAlloc
->pPrevAlloc
= fp
->pPrevAlloc
;
290 fp
->pPrevAlloc
->pNextAlloc
= fp
->pNextAlloc
;
292 m_pAllocList
= fp
->pNextAlloc
;
294 fp
->pPrevAlloc
= fp
->pNextAlloc
= (sPoolBlock
*)((void *) -1);
296 AssertMsg(m_nInUse
, "Freeing once more than Alloc'd");
303 // Prevent Circular free list (and resulting memleak & corruption)
304 for (sPoolBlock
* p1
= m_pFreeList
; p1
; p1
= p1
->pNextFree
)
305 AssertMsg1(p1
!= fp
, "Rethreading already freed 0x%x", this);
310 ClientToPoolItem(p
)->pNextFree
= m_pFreeList
;
312 m_pFreeList
= ClientToPoolItem(p
);
315 ///////////////////////////////////////
317 // Dump allocated blocks
320 void cPoolAllocator::DumpAllocs()
325 DebugMsg("No outstanding allocs");
329 for (sPoolBlock
* p
= m_pAllocList
; p
; p
= p
->pNextAlloc
)
331 DebugMsg7("[0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x]", p
->Stack
[0],
332 p
->Stack
[1], p
->Stack
[2],
333 p
->Stack
[3], p
->Stack
[4],
334 p
->Stack
[5], p
->Stack
[6]);
339 ///////////////////////////////////////
341 // Dump out all pools
344 void cPoolAllocator::DumpPools()
347 DebugMsg("DumpPools()");
349 for (cPoolAllocator
* p
= m_pPools
; p
; p
= p
->m_pNextPool
)
351 DebugMsg5("Pool: ES=%d BF=%d Bs=%lu A=%lu F=%lu",
353 p
->m_nBlockingFactor
,
365 ///////////////////////////////////////////////////////////////////////////////