1 /* SB - Copyright 1982 by Ken Harrenstien, SRI International
2 * This software is quasi-public; it may be used freely with
3 * like software, but may NOT be sold or made part of licensed
4 * products without permission of the author. In all cases
5 * the source code and any modifications thereto must remain
6 * available to any user.
8 * This is part of the SB library package.
9 * Any software using the SB library must likewise be made
10 * quasi-public, with freely available sources.
14 This file contains the low
-level memory allocation
15 subroutines which are used by the SBLK routines
. The code here
16 is quite machine
-dependent
, and the definitions in
"sb.h" should be
17 carefully checked to verify that they are correct
for the target
20 The ultimate low
-level routine is
"sbrk()" which must be
21 provided by the system
''s C library
. SBM expects that successive calls
22 to
sbrk() will
return contiguous areas of memory with progressively
23 higher addresses
. Also
, the very first call to
sbrk() is assumed to
24 return a word
-aligned address
.
29 #define FUDGE (sizeof(struct smblk)) /* Allow this much fudge in
30 allocation, to prevent undue fragmentation */
32 char *(*sbm_debug
)(); /* Debug switch - user-furnished routine */
34 struct smblk
*sbm_nfl
; /* Pointer to node freelist */
35 struct smblk
*sbm_nxtra
; /* Reserved extra free node */
36 struct smblk
*sbm_list
; /* Pointer to smblk memory alloc list.
37 * ALL smblks are strung onto this list
38 * except for the freelist!
40 SBMA sbm_lowaddr
; /* Lowest word-aligned address we know about.*/
42 /* If compiling with debug switch set, use special routine in place of
43 * sbrk so we can pretend we have a very limited area of free memory.
46 #define SBM_SBRK sbm_brk
52 /* Forward routine declarations */
54 struct smblk
*sbm_nmak(), *sbm_nget(), *sbm_mget(), *sbm_split();
55 struct smblk
*sbm_lmak(), *sbm_err();
57 /* SBM_INIT - Initialize storage management.
58 * If args are zero, normal initialization is done. Otherwise,
59 * args are understood to be pointers to an area of memory allocated
60 * on the stack (eg by an "int mem[2000]" declaration in MAIN) and
61 * initialization will include this area in addition to the
62 * unused space between "_end" and the start of the stack segment.
63 * This is mostly of use for PDP11s which would otherwise waste a lot
65 * Maybe should have a SBM_RESET() function?
70 SBMA xaddr
; /* Address of allocated stack area if any */
71 SBMO xlen
; /* Size of this area */
72 { register struct smblk
*sm
, *sml
;
75 /* Get initial chunk of memory from standard system rtn */
76 if((cp
= SBM_SBRK(SMNODES
*sizeof(struct smblk
))) == 0
78 return(sbm_err(0,"Can't sbrk"));
79 sm
= (struct smblk
*)cp
; /* Better be word-aligned! */
80 sbm_lmak(sm
,(SBMO
)sizeof(struct smblk
),SMNODES
); /* Make list */
81 sbm_nfl
= sm
; /* Point freelist at it */
82 sbm_lowaddr
= (SBMA
)sm
; /* Remember lowest addr seen */
84 /* Set up 1st node pointing to all memory from here on up.
85 * We don't know exactly how much will be available at this point,
86 * so we just pretend we have the maximum possible.
88 sbm_list
= sml
= sbm_nget();
89 sml
->smforw
= sml
->smback
= 0;
90 sml
->smflags
= SM_USE
|SM_NID
; /* Initial flags */
91 sml
->smaddr
= (SBMA
) sml
;
92 sml
->smlen
= MAXSBMO
; /* Pretend we have lots */
93 sml
->smuse
= (SMNODES
* sizeof(struct smblk
));
95 /* Now split off everything above initial allocation as NXM. */
96 sm
= sbm_split(sml
, sm
->smuse
);
97 sml
->smflags
|= SM_MNODS
; /* Mark 1st node as having SM nodes */
98 sm
->smflags
|= SM_NXM
; /* Mark 2nd node as NXM */
100 /* Now possibly set up extra nodes, if stack mem is being allocated
101 * (From our viewpoint it looks as if a chunk in the middle of
102 * the initial NXM section has been declared usable)
105 { /* Allow for "extra" static stack memory */
106 /* Will lose if xaddr <= 1st NXM! */
107 sml
= sbm_split(sm
, (SBMO
)(xaddr
- sm
->smaddr
));
108 sbm_split(sml
, xlen
); /* Split off following NXM */
109 sml
->smflags
&= ~(SM_USE
|SM_NXM
); /* This node is free mem! */
112 /* Now set up a small additional node which points to the NXM
113 * that we cannot get from SBRK. At this stage, this is just
114 * a place-holder, to reserve the node so we don't have to
115 * worry about running out of nodes at the same time sbrk stops
117 * SM points to the NXM that we expect SBRK to dig into.
119 sbm_split(sm
, sm
->smlen
- WDSIZE
); /* Chop off teensy bit */
120 sm
->smflags
&= ~SM_USE
; /* Now mark NXM "free"!! */
122 /* Finally, reserve an "extra" SM node for use by sbm_nget
123 * when it is allocating more freelist node chunks.
125 sbm_nxtra
= sbm_nget();
130 /* SBM_NGET() - Get a free SM node.
131 * Note the hair to provide a spare SM node when
132 * we are allocating memory for more SM nodes. This is necessary
133 * because sbm_mget and sbm_nget call each other recursively and
134 * sbm_mget cannot create any new memory without a SM node to point
135 * at the allocated chunk.
139 { register struct smblk
*sm
, *sml
;
141 if(!(sm
= sbm_nfl
)) /* Get a node from freelist */
142 { /* Freelist is empty, try to allocate more nodes. */
144 /* Put our "spare" smblk on freelist temporarily so that
145 * sbm_mget has a chance of winning.
146 * Infinite recursion is avoided by a test
147 * in sbm_mget which checks sbm_nfl and sbm_nxtra.
149 if(!(sm
= sbm_nxtra
))
150 return(sbm_err(0,"Zero sbm_nxtra!"));
155 /* Try to allocate another chunk of SM nodes. */
156 sml
= sbm_nmak(sizeof(struct smblk
),SM_MNODS
);
158 /* Put the new free nodes (if any) on freelist.
159 * Done this way because freelist may have had one or two
160 * nodes added to it by sbm_mget, so can't just stick
161 * a new pointer in sbm_nfl.
168 /* Now reserve an extra node again.
169 * It is an error if there is nothing on freelist here,
170 * because even if sbm_mget failed the "extra node" should
171 * still be on freelist. The check for a zero sbm_nxtra
172 * above will catch such an error.
174 sbm_nxtra
= sbm_nget();
176 /* Now see if anything to return */
177 if(!(sm
= sbm_nfl
)) /* If freelist empty again, */
178 return(0); /* give up. */
180 sbm_nfl
= sm
->smforw
; /* If win, take it off freelist */
181 return(sm
); /* Return ptr or 0 if none */
184 /* SBM_NFRE(sm) - Return a SM node to the SM freelist.
188 { register struct smblk
*sm
;
189 (sm
= smp
)->smflags
= 0;
190 sm
->smforw
= sbm_nfl
;
194 /* SBM_NMAK(elsize, flag) - Make (allocate & build) a typeless node freelist.
197 sbm_nmak(elsize
, flag
)
200 { register struct smblk
*sm
, *smp
;
203 if((sm
= sbm_mget(SMNODES
*elsize
,SMNODES
*elsize
)) == 0)
206 sm
->smflags
|= flag
; /* Indicate type of nodes */
207 cnt
= sm
->smlen
/elsize
; /* Find # nodes that will fit */
208 sm
->smuse
= cnt
* elsize
; /* Actual size used */
209 smp
= (struct smblk
*)(sm
->smaddr
); /* Ptr to 1st loc of mem */
210 sbm_lmak(smp
, (SBMO
)elsize
, cnt
); /* Build freelist */
211 return(smp
); /* Return 1st free node. Caller is */
212 /* responsible for setting freelist ptr. */
215 /* SBM_LMAK - Build freelist of typeless nodes.
216 * Note this does not allocate memory, it just converts an already
217 * allocated memory area.
220 sbm_lmak(addr
, elsize
, num
)
224 { register struct smblk
*sm
, *smp
;
227 smp
= (struct smblk
*) addr
;
230 do { sm
= smp
; /* Save ptr */
231 sm
->smforw
= (smp
= (struct smblk
*) ((SBMA
)smp
+ elsize
));
234 sm
->smforw
= 0; /* Last node points to nothing */
235 return(sm
); /* Return ptr to last node */
238 /* SBM_NMOV(sm1, sm2, begp, elsize) - Move a typeless node.
239 * Copy sm1 to sm2, adjust ptrs, leave sm1 free.
241 sbm_nmov(smp1
,smp2
,begp
,elsize
)
242 struct smblk
*smp1
, *smp2
, **begp
;
244 { register struct smblk
*sm
;
246 bcopy((SBMA
)smp1
,(SBMA
)(sm
= smp2
), elsize
); /* Copy the stuff */
247 if(sm
->smforw
) sm
->smforw
->smback
= sm
; /* Fix up links */
248 if(sm
->smback
) sm
->smback
->smforw
= sm
;
252 /* SBM_MGET(min,max) - Get a SMBLK with specified amount of memory.
253 * Returns 0 if none available.
254 * Memory is guaranteed to start on word boundary, but may not
255 * end on one. Note that sbm_mfree is responsible for
256 * ensuring that free mem starts word-aligned.
257 * A subtle but major concern of this code is the number of freelist
258 * nodes gobbled by a single call. If the freelist happens to not have
259 * enough nodes, then a recursive call to sbm_mget is made (via sbm_nget)
260 * in order to allocate a new batch of freelist nodes! sbm_nget will
261 * always provide a single "spare" node during such an allocation, but
262 * there is only one and it is essential that sbm_mget gobble only ONE
263 * (if any) during such a call, which is indicated by sbm_nxtra==0.
264 * The maximum # of freelist nodes that sbm_mget can gobble is
265 * 2, when (1) NXM memory is obtained, and a SM is needed to point at
266 * the new free mem, plus (2) the resulting SM is too big, and has to
267 * be split up, which requires another SM for the remainder.
268 * The "used-NXM" smblk is set up at init time precisely in order to
269 * avoid the necessity of creating it here when sbrk stops winning, since
270 * that would require yet another freelist node and make it possible for
271 * sbm_mget to gobble 3 during one call -- too many.
272 * Further note: the sbm_nfl checks are necessary in order
273 * to ensure that a SM node is available for use by sbm_split. Otherwise
274 * the calls to sbm_split might create a new SM freelist by gobbling the
275 * very memory which we are hoping to return!
277 SBMO sbm_chksiz
= SMCHUNKSIZ
; /* Current chunk size to feed sbrk */
282 { register struct smblk
*sm
, *sml
;
284 register SBMA addr
, xaddr
;
286 if((sm
= sbm_list
) == 0 /* If never done, */
287 && (sm
= sbm_init((SBMA
)0,(SBMO
)0)) == 0) /* initialize mem alloc stuff. */
288 return(0); /* Can't init??? */
290 /* Round up sizes to word boundary */
291 if(rndrem(cmin
)) cmin
= rndup(cmin
);
292 if(rndrem(cmax
)) cmax
= rndup(cmax
);
294 /* Search for a free block having enough memory.
295 * If run into a free-NXM block, always "win", since there may be
296 * a combination of preceding free-mem and new mem which will satisfy
297 * the request. If it turns out this didn't work, we'll just fail
298 * a little farther on.
300 retry
: csiz
= cmin
; /* Set size that will satisfy us */
302 if( ((sm
->smflags
&SM_USE
) == 0)
303 && ((sm
->smlen
>= csiz
) || (sm
->smflags
&SM_NXM
)) )
305 } while(sm
= sm
->smforw
);
307 return(0); /* Found none that minimum would fit */
309 if(sm
->smflags
&SM_NXM
)
310 { /* Found free area, but it's marked NXM and the system
311 * must be persuaded (via sbrk) to let us use that portion
312 * of our address space. Grab a good-sized chunk.
314 if(sbm_nfl
== 0) /* Verify a spare SM node is avail */
315 goto getnod
; /* Nope, must get one. */
317 /* Decide amount of mem to ask system for, via sbrk.
318 * The fine point here is the check of sbm_nxtra to make sure
319 * that, when building more freelist nodes, we don't have
320 * to use more than one SM node in the process. If we
321 * asked for too much mem, we'd have to use a SM node
322 * to hold the excess after splitting.
325 if(sbm_nxtra
/* If normal then try for big chunk */
326 && csiz
< sbm_chksiz
) csiz
= sbm_chksiz
; /* Max */
327 if (csiz
> sm
->smlen
) csiz
= sm
->smlen
; /* Min */
329 /* Get the NXM mem */
330 if((addr
= (SBMA
)SBM_SBRK(csiz
)) != sm
->smaddr
)
331 { /* Unexpected value returned from SBRK! */
333 if((int)addr
!= 0 && (int)addr
!= -1)
334 { return(sbm_err(0,"SBRK %o != %o", addr
,
337 /* If value indicates couldn't get the stuff, then
338 * we have probably hit our limit and the rest of
339 * NXM should be declared "used" to prevent further
340 * hopeless sbrk calls. We split off the portion
341 * of NXM that is known for sure to be unavailable,
342 * and mark it "used". If a "used NXM" area already
343 * exists following this one, the two are merged.
344 * The chunk size is then reduced by half, so
345 * only log2(SMCHUNKSIZ) attempts will be made, and
348 /* If returned some mem which starts outside
349 * the NXM then something is screwed up. */
351 || (addr
>= sm
->smaddr
+sm
->smlen
))
352 return(sbm_err(0,"SBRK %o != %o",
354 /* Got some mem, falls within NXM.
355 * Presumably someone else has called sbrk
356 * since last time, so we need to fence off
357 * the intervening area. */
358 sm
= sbm_split((sml
=sm
),(addr
- sm
->smaddr
));
359 sml
->smflags
|= SM_USE
|SM_EXT
;
360 return(sbm_mget(cmin
,cmax
));
364 /* Handle case of SBRK claiming no more memory.
365 * Gobble as much as we can, and then turn this NXM
366 * block into a free-mem block, and leave the
367 * remainder in the used-NXM block (which should
368 * immediately follow this free-NXM block!)
370 if(!(sml
= sm
->smforw
) /* Ensure have used-NXM blk */
371 || (sml
->smflags
&(SM_USE
|SM_NXM
))
373 return(sbm_err(0,"No uNXM node!"));
374 xaddr
= sm
->smaddr
; /* Use this for checking */
375 sm
->smuse
= 0; /* Use this for sum */
376 for(csiz
= sm
->smlen
; csiz
> 0;)
377 { addr
= SBM_SBRK(csiz
);
378 if((int)addr
== 0 || (int)addr
== -1)
383 return(sbm_err(0,"SBRK %o != %o", addr
,
389 /* Have gobbled as much from SBRK as we could.
390 * Turn the free-NXM block into a free-mem block,
391 * unless we got nothing, in which case just merge
392 * it into the used-NXM block and continue
393 * searching from this point.
395 if(!(csiz
= sm
->smuse
)) /* Get total added */
396 { sm
->smflags
= sml
->smflags
; /* Ugh. */
398 goto retry
; /* Keep looking */
401 { sml
->smaddr
= sm
->smaddr
+ csiz
;
402 sml
->smlen
+= sm
->smlen
- csiz
;
404 sm
->smflags
&= ~SM_NXM
; /* No longer NXM */
408 /* Here when we've acquired CSIZ more memory from sbrk.
409 * If preceding mem area is not in use, merge new mem
412 if((sml
= sm
->smback
) &&
413 (sml
->smflags
&(SM_USE
|SM_NXM
))==0) /* Previous free? */
414 { sml
->smlen
+= csiz
; /* Yes, simple! */
415 sm
->smaddr
+= csiz
; /* Fix up */
416 if((sm
->smlen
-= csiz
) == 0) /* If no NXM left,*/
417 sbm_mmrg(sml
); /* Merge NXM node w/prev */
418 sm
= sml
; /* Prev is now winning node */
421 { /* Prev node isn't a free area. Split up the NXM
422 * node to account for acquired mem, unless we
423 * gobbled all the mem available.
425 if(sm
->smlen
> csiz
/* Split unless all used */
426 && !sbm_split(sm
,csiz
)) /* Call shd always win */
427 return(sbm_err(0,"getsplit err: %o",sm
));
428 sm
->smflags
&= ~SM_NXM
; /* Node is now real mem */
431 /* Now make a final check that we have enough memory.
432 * This can fail because SBRK may not have been able
433 * to gobble enough memory, either because (1) not
434 * as much NXM was available as we thought,
435 * or (2) we noticed the free-NXM area and immediately
436 * gambled on trying it without checking any lengths.
437 * In any case, we try again starting from the current SM
438 * because there may be more free mem higher up (eg on
445 /* Check to see if node has too much mem. This is especially true
446 * for memory just acquired via sbrk, which gobbles a huge chunk each
447 * time. If there's too much, we split up the area.
449 if(sm
->smlen
> cmax
+FUDGE
) /* Got too much? (Allow some fudge)*/
450 /* Yes, split up so don't gobble too much. */
451 if(sbm_nfl
) /* If success guaranteed, */
452 sbm_split(sm
,cmax
); /* split it, all's well. */
456 sm
->smflags
|= SM_USE
; /* Finally seize it by marking "in-use". */
459 /* Come here when we will need to get another SM node but the
460 * SM freelist is empty. We have to forget about using the area
461 * we just found, because sbm_nget may gobble it for the
462 * freelist. So, we first force a refill of the freelist, and then
463 * invoke ourselves again on what's left.
466 if(sml
= sbm_nget()) /* Try to build freelist */
467 { sbm_nfre(sml
); /* Won, give node back, */
468 sm
= sbm_list
; /* and retry, starting over! */
471 /* Failed. Not enough memory for both this request
472 * and one more block of SM nodes. Since such a SM_MNODS
473 * block isn't very big, we are so close to the limits that it
474 * isn't worth trying to do something fancy here to satisfy the
475 * original request. So we just fail.
481 /* Code for debugging stuff by imposing an artificial limitation on size
482 * of available memory.
484 SBMO sbm_dlim
= MAXSBMO
; /* Amount of mem to allow (default is max) */
489 { register char *addr
;
491 if(size
> sbm_dlim
) return(0);
493 if((int)addr
== 0 || (int)addr
== -1)
500 /* SBM_MFREE(sm) - Free up an allocated memory area.
504 register struct smblk
*sm
;
505 { register struct smblk
*smx
;
508 sm
->smflags
&= ~SM_USE
; /* Say mem is free */
509 if((smx
= sm
->smback
) /* Check preceding mem */
510 && (smx
->smflags
&(SM_USE
|SM_NXM
))==0) /* If it's free, */
511 sbm_mmrg(sm
= smx
); /* then merge 'em. */
512 if((smx
= sm
->smforw
) /* Check following mem */
513 && (smx
->smflags
&(SM_USE
|SM_NXM
))==0) /* Again, if free, */
514 sbm_mmrg(sm
); /* merge them. */
516 if(sm
->smlen
== 0) /* Just in case, chk for null blk */
517 { if(smx
= sm
->smback
) /* If pred exists, */
518 sbm_mmrg(smx
); /* merge quietly. */
520 sbm_list
= sm
->smforw
; /* 1st node on list, so */
521 sbm_nfre(sm
); /* simply flush it. */
526 /* This code is slightly over-general for some machines.
527 * The pointer subtraction is done in order to get a valid integer
528 * offset value regardless of the internal representation of a pointer.
529 * We cannot reliably force alignment via casts; some C implementations
530 * treat that as a no-op.
532 if(crem
= rndrem(sm
->smaddr
- sbm_lowaddr
)) /* On word bndry? */
533 { /* No -- must adjust. All free mem blks MUST, by fiat,
534 * start on word boundary. Here we fix things by
535 * making the leftover bytes belong to the previous blk,
536 * no matter what it is used for. Prev blk is guaranteed to
537 * (1) Exist (this cannot be 1st blk since 1st is known to
538 * start on wd boundary) and to be (2) Non-free (else it would
541 if((smx
= sm
->smback
) == 0) /* Get ptr to prev blk */
542 { sbm_err(0,"Align err"); /* Catch screws */
545 crem
= WDSIZE
- crem
; /* Find # bytes to flush */
546 if(crem
>= sm
->smlen
) /* Make sure node has that many */
547 { sbm_mmrg(smx
); /* Flush node to avoid zero length */
550 smx
->smlen
+= crem
; /* Make stray bytes part of prev */
551 sm
->smaddr
+= crem
; /* And flush from current. */
556 /* SBM_EXP - Expand (or shrink) size of an allocated memory chunk.
557 * "nsize" is desired new size; may be larger or smaller than current
562 register struct smblk
*sm
;
564 { register struct smblk
*smf
;
565 register SBMO mexp
, pred
, succ
;
567 if(sm
->smlen
>= size
) /* Do we want truncation? */
568 goto realo2
; /* Yup, go split block */
570 /* Block is expanding. */
571 mexp
= size
- sm
->smlen
; /* Get # bytes to expand by */
573 if((smf
= sm
->smforw
) /* See if free mem follows */
574 && (smf
->smflags
&(SM_USE
|SM_NXM
)) == 0)
575 if((succ
= smf
->smlen
) >= mexp
)
576 goto realo1
; /* Quick stuff if succ OK */
578 if((smf
= sm
->smback
) /* See if free mem precedes */
579 && (smf
->smflags
&(SM_USE
|SM_NXM
)) == 0)
582 /* If not enough free space combined on both sides of this chunk,
583 * we have to look for a completely new block.
586 { if((smf
= sbm_mget(size
,size
)) == 0)
587 return(0); /* Couldn't find one */
588 else pred
= 0; /* Won, indicate new block */
591 /* OK, must copy either into new block or down into predecessor
592 * (overlap is OK as long as bcopy moves 1st byte first)
594 bcopy(sm
->smaddr
, smf
->smaddr
, sm
->smlen
);
595 smf
->smflags
= sm
->smflags
; /* Copy extra attribs */
596 smf
->smuse
= sm
->smuse
;
597 if(!pred
) /* If invoked sbm_mget */
598 { sbm_mfree(sm
); /* then must free up old area */
599 return(smf
); /* and can return immediately. */
601 sbm_mmrg(smf
); /* Merge current into pred blk */
602 sm
= smf
; /* Now pred is current blk. */
605 realo1
: sbm_mmrg(sm
); /* Merge succ into current blk */
606 realo2
: if(sm
->smlen
> size
/* If now have too much, */
607 && sbm_split(sm
, size
)) /* split up and possibly */
608 sbm_mfree(sm
->smforw
); /* free up unused space. */
611 /* Note that sbm_split can fail if it can't get a free node,
612 * which is only possible if we are reducing the size of an area.
613 * If it fails, we just return anyway without truncating the area.
617 /* SBM_MMRG(sm) - Merge a memory area with the area following it.
618 * The node (and memory area) following the SM pointed to are
619 * merged in and the successor node freed up. The flags
620 * and smuse of the current SM (which is not moved or anything)
625 { register struct smblk
*sm
, *sm2
;
628 sm
->smlen
+= (sm2
= sm
->smforw
)->smlen
; /* Add succ's len */
629 if(sm
->smforw
= sm2
->smforw
) /* and fix linkages */
630 sm
->smforw
->smback
= sm
;
631 sbm_nfre(sm2
); /* now can flush succ node */
634 /* SBM_SPLIT - Split up an area (gets a new smblk to point to split-off
636 * Note returned value is ptr to 2nd smblk, since this is a new one.
637 * Ptr to 1st remains valid since original smblk stays where it is.
638 * NOTE: Beware of splitting up free mem (SM_USE == 0) since sbm_nget may
639 * steal it out from under unless precautions are taken! See comments
640 * at sbm_mget related to this.
646 { register struct smblk
*sm
, *smx
;
649 if((sm
= smp
)->smlen
<= (csiz
= coff
))
651 if((smx
= sbm_nget()) == 0)
653 smx
->smlen
= sm
->smlen
- csiz
; /* Set 2nd size */
654 smx
->smaddr
= sm
->smaddr
+ csiz
; /* Set 2nd addr */
655 sm
->smlen
= csiz
; /* Take from 1st size */
656 smx
->smflags
= sm
->smflags
; /* Copy flags */
657 if(smx
->smforw
= sm
->smforw
) /* Splice 2nd after 1 */
658 smx
->smforw
->smback
= smx
;
660 sm
->smforw
= smx
; /* Put 2nd into chain */
661 return(smx
); /* Return ptr to 2nd smblk */
664 #if 0 /* Replaced by "bcopy" for system-dep efficiency */
665 /* SBM_SCPY - Copy string of bytes. Somewhat machine-dependent;
666 * Tries to be clever about using word moves instead of byte moves.
668 sbm_scpy(from
, to
, count
) /* Copy count bytes from -> to */
671 { register char *s1
, *s2
;
672 register unsigned cnt
;
675 if((cnt
= count
) == 0)
679 while(rndrem((int)s1
)) /* Get 1st ptr aligned */
681 if(--cnt
== 0) return;
683 if(rndrem((int)s2
) == 0) /* Do wd move if ptr 2 now aligned */
685 #ifdef DUMBPCC /* Code for dumber (Portable C type) compiler */
686 register WORD
*ap
, *bp
;
690 if(cnt
= rnddiv(cnt
))
691 do { *bp
++ = *ap
++; }
693 if ((cnt
= rndrem(tmp
)) ==0)
698 /* Tight loop for efficient copying on 11s */
700 if(cnt
= rnddiv(cnt
))
701 do { *((WORD
*)s2
)++ = *((WORD
*)s1
)++; }
703 if((cnt
= rndrem(tmp
)) == 0)
707 do { *s2
++ = *s1
++; } /* Finish up with byte loop */
712 struct smblk
* /* If it returns at all, this is most common type */
713 sbm_err(val
,str
,a0
,a1
,a2
,a3
)
718 sptr
= (int *) &sptr
; /* Point to self on stack */
719 sptr
+= 5; /* Point to return addr */
720 if((int)sbm_debug
==1)
723 (*sbm_debug
)(0,*sptr
,str
,a0
,a1
,a2
,a3
);
727 /* These routines correspond to the V7 LIBC routines as described
728 * in the V7 UPM (3). They should provide satisfactory emulation
729 * if the documentation is correct. Replacement is necessary since
730 * the SBM routines are jealous and cannot tolerate competition for
731 * calls of SBRK; i.e. the memory being managed must be contiguous.
734 /* Guaranteed to return word-aligned pointer to area of AT LEAST
735 * requested size. Area size is rounded up to word boundary.
741 { register struct smblk
*sm
, **sma
;
744 siz
= rndup(size
+ sizeof (struct smblk
*)); /* Make room for ptr */
745 if((sm
= sbm_mget(siz
,siz
)) == 0)
747 *(sma
= (struct smblk
**)sm
->smaddr
) = sm
; /* Store ptr in addr-1 */
748 return((char *)++sma
);
752 alloc(size
) /* For V6 programs - note different failure value! */
754 { register char *addr
;
755 return((addr
= malloc(size
)) ? addr
: (char *) -1);
760 { register struct smblk
*sm
, **smp
;
762 smp
= &((struct smblk
**)ptr
)[-1]; /* Point to addr-1 */
763 sm
= *smp
; /* Pluck SM ptr therefrom */
764 if(((sm
->smflags
&0377) != SM_NID
) || sm
->smaddr
!= (SBMA
)smp
)
765 return((int)sbm_err(0,"free: bad arg %o", ptr
));
774 { register struct smblk
*sm
, **smp
;
776 smp
= &((struct smblk
**)ptr
)[-1]; /* Point to addr-1 */
777 sm
= *smp
; /* Pluck SM ptr therefrom */
778 if(((sm
->smflags
&0377) != SM_NID
) || (sm
->smaddr
!= (SBMA
)smp
))
779 return((char *)sbm_err(0,"realloc: bad arg %o",ptr
));
780 if((sm
= sbm_exp(sm
, rndup(size
+(sizeof(struct smblk
*))))) == 0)
782 *(smp
= (struct smblk
**)sm
->smaddr
) = sm
; /* Save smblk ptr */
783 return((char *)++smp
);
788 unsigned nelem
, elsize
;
789 { register SBMO cmin
;
790 register WORD
*ip
; /* Clear in units of words */
793 if((cmin
= nelem
*elsize
) == 0 /* Find # bytes to get */
794 || (addr
= malloc(cmin
)) == 0) /* Get it */
796 ip
= (WORD
*) addr
; /* Set up ptr to area */
797 cmin
= rnddiv(cmin
+WDSIZE
-1); /* Find # words to clear */
798 do { *ip
++ = 0; } while (--cmin
); /* Zap the area */
802 /* SBM_NGC() - Specific routine for GC'ing SMBLK nodes.
804 * SBM_XNGC(begp, elsize, type) - Compact nodes of specified type.
805 * Scans allocated mem from low to high to find chunks with nodes of
806 * the specified type.
807 * Flushes current freelist and rebuilds it as scan progresses,
808 * such that 1st thing on list is lowest-addr node. When a node is
809 * seen that can be moved, new node is acquired from freelist if
810 * it exists, otherwise no move is made. If a chunk has been scanned
811 * and no active nodes remain, it is flushed and freelist updated.
812 * NOTE: This has not yet been verified to work with nodes of any
813 * type other than SMBLK.
817 { register struct smblk
*sm
;
818 if(!(sm
= sbm_nxtra
))
819 return((int)sbm_err(0,"Zero sbm_nxtra"));
820 sm
->smflags
|= SM_USE
; /* Ensure this one isn't GC'd */
821 sbm_xngc(&sbm_nfl
, sizeof(struct smblk
), SM_MNODS
);
822 sm
->smflags
= 0; /* Flush temporary crock */
824 sbm_xngc(begp
, elsize
, flag
)
826 unsigned elsize
, flag
;
827 { register struct smblk
*sm
, *chk
, *smf
;
828 struct smblk
*ftail
, *savtail
;
831 *begp
= ftail
= 0; /* Flush node freelist */
832 for(chk
= sbm_list
; chk
; chk
= chk
->smforw
)
833 if(chk
->smflags
&flag
)
834 { sm
= (struct smblk
*) chk
->smaddr
;
835 cnt
= (chk
->smuse
)/elsize
;
839 /* Set up ptr to 1st freelist node */
841 { /* Here decide if movable */
842 if(sm
->smflags
&& smf
/* Live and have copy place */
844 (sm
->smflags
&SM_USE
) == 0 /* Free mem? */
845 || (sm
->smflags
&(SM_MNODS
|SM_DNODS
))
847 && sm
->smback
) /* has backptr (see ncpy) */
848 { /* Move the node */
849 *begp
= smf
->smforw
; /* Get free node */
854 /* Move node. Already checked for back ptr
855 * of 0 since no obvious way to tell where
856 * the ptr to list is kept. Sigh.
858 sbm_nmov(sm
,smf
,(struct smblk
**)0,elsize
);
859 /* Get ptr to new freelist node. Note
860 * check to ensure that it is not in this
861 * same chunk (if it is, no point in moving
864 if((smf
= *begp
) >= chk
)
865 smf
= 0; /* Zero if same chk */
866 sm
->smflags
= 0; /* Make node free */
868 /* At this point, not movable */
869 if(sm
->smflags
== 0) /* Free node? */
870 { if(ftail
) /* Add to freelist */
878 sm
= (struct smblk
*)((SBMA
)sm
+ elsize
);
880 if(inuse
== 0 /* All free? */
881 && (sm
= chk
->smback
)) /* & not 1st? */
882 { if(savtail
) /* Edit freelist */
883 (ftail
= savtail
)->smforw
= 0;
884 else *begp
= ftail
= 0;
892 * Note that proc must return a zero value, or loop aborts and
893 * returns that selfsame value.
895 sbm_nfor(flag
,nodsiz
,proc
,arg
)
900 { register struct smblk
*sm
, *np
;
904 for(sm
= sbm_list
; sm
; sm
= sm
->smforw
)
906 { np
= (struct smblk
*) sm
->smaddr
;
907 cnt
= sm
->smuse
/nodsiz
;
910 if(res
= (*proc
)(np
,arg
))
912 np
= (struct smblk
*)((SBMA
)np
+ nodsiz
);