1 /***********************************************************************
3 * This software is part of the ast package *
4 * Copyright (c) 1985-2010 AT&T Intellectual Property *
5 * and is licensed under the *
6 * Common Public License, Version 1.0 *
7 * by AT&T Intellectual Property *
9 * A copy of the License is available at *
10 * http://www.opensource.org/licenses/cpl1.0.txt *
11 * (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9) *
13 * Information and Software Systems Research *
17 * Glenn Fowler <gsf@research.att.com> *
18 * David Korn <dgk@research.att.com> *
19 * Phong Vo <kpv@research.att.com> *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
24 void _STUB_vmprivate(){}
30 static char* Version
= "\n@(#)$Id: Vmalloc (AT&T Research) 2010-01-01 $\0\n";
32 /* Private code used in the vmalloc library
34 ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
37 /* Get more memory for a region */
39 static Block_t
* vmextend(reg Vmalloc_t
* vm
, size_t size
, Vmsearch_f searchf
)
41 static Block_t
* vmextend(vm
, size
, searchf
)
42 reg Vmalloc_t
* vm
; /* region to increase in size */
43 size_t size
; /* desired amount of space */
44 Vmsearch_f searchf
; /* tree search function */
50 reg Vmuchar_t
* addr
= (Vmuchar_t
*)Version
; /* shut compiler warning */
51 reg Vmdata_t
* vd
= vm
->data
;
52 reg Vmemory_f memoryf
= vm
->disc
->memoryf
;
53 reg Vmexcept_f exceptf
= vm
->disc
->exceptf
;
55 GETPAGESIZE(_Vmpagesize
);
57 #if DEBUG /* trace all allocation calls through the heap */
58 if(!_Vmtrace
&& vm
== Vmheap
&& (vd
->mode
&VM_TRUST
) )
62 if(vd
->incr
<= 0) /* this is just _Vmheap on the first call */
63 vd
->incr
= VMHEAPINCR
;
65 /* Get slightly more for administrative data */
66 s
= size
+ sizeof(Seg_t
) + sizeof(Block_t
) + sizeof(Head_t
) + 2*ALIGN
;
67 if(s
<= size
) /* size was too large and we have wrapped around */
69 if((size
= ROUND(s
,vd
->incr
)) < s
)
72 /* increase the rounding factor to reduce # of future extensions */
73 if(size
> 2*vd
->incr
&& vm
->disc
->round
< vd
->incr
)
76 /* see if we can extend the current segment */
78 addr
= NIL(Vmuchar_t
*);
80 { if(!vd
->wild
|| SEG(vd
->wild
) != seg
)
83 { s
= SIZE(vd
->wild
) + sizeof(Head_t
);
84 if((s
= (s
/vd
->incr
)*vd
->incr
) == size
)
87 addr
= (Vmuchar_t
*)(*memoryf
)(vm
,seg
->addr
,seg
->extent
,
88 seg
->extent
+size
-s
,vm
->disc
);
92 { /**/ASSERT(addr
== (Vmuchar_t
*)seg
->addr
);
98 while(!addr
) /* try to get space */
99 { if((addr
= (Vmuchar_t
*)(*memoryf
)(vm
,NIL(Void_t
*),0,size
,vm
->disc
)) )
102 /* check with exception handler to see if we should continue */
104 return NIL(Block_t
*);
107 lock
= vd
->mode
&VM_LOCK
;
108 vd
->mode
&= ~VM_LOCK
;
109 rv
= (*exceptf
)(vm
,VM_NOMEM
,(Void_t
*)size
,vm
->disc
);
113 vd
->mode
|= VM_AGAIN
;
114 return NIL(Block_t
*);
120 { /* extending current segment */
121 bp
= BLOCK(seg
->baddr
);
123 if(vd
->mode
&(VM_MTBEST
|VM_MTDEBUG
|VM_MTPROFILE
) )
124 { /**/ ASSERT((SIZE(bp
)&~BITS
) == 0);
125 /**/ ASSERT(SEG(bp
) == seg
);
126 if(!ISPFREE(SIZE(bp
)) )
127 SIZE(bp
) = size
- sizeof(Head_t
);
129 { /**/ ASSERT(searchf
);
132 vd
->wild
= NIL(Block_t
*);
133 else REMOVE(vd
,bp
,INDEX(SIZE(bp
)),t
,(*searchf
));
140 seg
->free
= NIL(Block_t
*);
145 SIZE(bp
) = size
- sizeof(Head_t
);
154 { /* creating a new segment */
155 reg Seg_t
*sp
, *lastsp
;
157 if((s
= (size_t)(VLONG(addr
)%ALIGN
)) != 0)
162 seg
->addr
= (Void_t
*)(addr
- (s
? ALIGN
-s
: 0));
164 seg
->baddr
= addr
+ size
- (s
? 2*ALIGN
: 0);
165 seg
->free
= NIL(Block_t
*);
168 SIZE(bp
) = seg
->baddr
- (Vmuchar_t
*)bp
- 2*sizeof(Head_t
);
170 /* NOTE: for Vmbest, Vmdebug and Vmprofile the region's segment list
171 is reversely ordered by addresses. This is so that we can easily
172 check for the wild block.
174 lastsp
= NIL(Seg_t
*);
176 if(vd
->mode
&(VM_MTBEST
|VM_MTDEBUG
|VM_MTPROFILE
))
177 for(; sp
; lastsp
= sp
, sp
= sp
->next
)
178 if(seg
->addr
> sp
->addr
)
185 seg
->size
= SIZE(bp
);
188 /* make a fake header for possible segmented memory */
193 /* see if the wild block is still wild */
194 if((t
= vd
->wild
) && (seg
= SEG(t
)) != vd
->seg
)
195 { CLRPFREE(SIZE(NEXT(t
)));
196 if(vd
->mode
&(VM_MTBEST
|VM_MTDEBUG
|VM_MTPROFILE
) )
197 { SIZE(t
) |= BUSY
|JUNK
;
198 LINK(t
) = CACHE(vd
)[C_INDEX(SIZE(t
))];
199 CACHE(vd
)[C_INDEX(SIZE(t
))] = t
;
203 vd
->wild
= NIL(Block_t
*);
209 /* Truncate a segment if possible */
211 static ssize_t
vmtruncate(Vmalloc_t
* vm
, Seg_t
* seg
, size_t size
, int exact
)
213 static ssize_t
vmtruncate(vm
, seg
, size
, exact
)
214 Vmalloc_t
* vm
; /* containing region */
215 Seg_t
* seg
; /* the one to be truncated */
216 size_t size
; /* amount of free space */
222 reg Vmdata_t
* vd
= vm
->data
;
223 reg Vmemory_f memoryf
= vm
->disc
->memoryf
;
232 else /* keep truncated amount to discipline requirements */
233 { if((less
= vm
->disc
->round
) <= 0)
235 less
= (size
/less
)*less
;
236 less
= (less
/vd
->incr
)*vd
->incr
;
237 if(less
> 0 && size
> (size_t)less
&& (size
-(size_t)less
) < sizeof(Block_t
) )
238 less
= (size_t)less
<= vd
->incr
? 0 : (size_t)less
- vd
->incr
;
242 (*memoryf
)(vm
,caddr
,seg
->extent
,seg
->extent
-less
,vm
->disc
) != caddr
)
248 SEG(BLOCK(seg
->baddr
)) = seg
;
249 SIZE(BLOCK(seg
->baddr
)) = BUSY
;
254 { /* unlink segment from region */
256 { vd
->seg
= seg
->next
;
260 { for(last
= vd
->seg
; last
->next
!= seg
; last
= last
->next
)
262 last
->next
= seg
->next
;
266 if((*memoryf
)(vm
,caddr
,seg
->extent
,0,vm
->disc
) == caddr
)
269 /* space reduction failed, reinsert segment */
271 { seg
->next
= last
->next
;
275 { seg
->next
= vd
->seg
;
282 /* Externally visible names but local to library */
283 Vmextern_t _Vmextern
=
284 { vmextend
, /* _Vmextend */
285 vmtruncate
, /* _Vmtruncate */
287 NIL(char*(*)_ARG_((char*,const char*,int))), /* _Vmstrcpy */
288 NIL(char*(*)_ARG_((Vmulong_t
,int))), /* _Vmitoa */
289 NIL(void(*)_ARG_((Vmalloc_t
*,
290 Vmuchar_t
*,Vmuchar_t
*,size_t,size_t))), /* _Vmtrace */
291 NIL(void(*)_ARG_((Vmalloc_t
*))) /* _Vmpfclose */