1 /* SB - Copyright 1982 by Ken Harrenstien, SRI International
2 * This software is quasi-public; it may be used freely with
3 * like software, but may NOT be sold or made part of licensed
4 * products without permission of the author. In all cases
5 * the source code and any modifications thereto must remain
6 * available to any user.
8 * This is part of the SB library package.
9 * Any software using the SB library must likewise be made
10 * quasi-public, with freely available sources.
14 This file contains the low
-level memory allocation
15 subroutines which are used by the SBLK routines
. The code here
16 is quite machine
-dependent
, and the definitions in
"sb.h" should be
17 carefully checked to verify that they are correct
for the target
20 The ultimate low
-level routine is
"sbrk()" which must be
21 provided by the system
''s C library
. SBM expects that successive calls
22 to
sbrk() will
return contiguous areas of memory with progressively
23 higher addresses
. Also
, the very first call to
sbrk() is assumed to
24 return a word
-aligned address
.
29 #define FUDGE (sizeof(struct smblk)) /* Allow this much fudge in
30 allocation, to prevent undue fragmentation */
32 char *(*sbm_debug
)(); /* Debug switch - user-furnished routine */
34 struct smblk
*sbm_nfl
; /* Pointer to node freelist */
35 struct smblk
*sbm_nxtra
; /* Reserved extra free node */
36 struct smblk
*sbm_list
; /* Pointer to smblk memory alloc list.
37 * ALL smblks are strung onto this list
38 * except for the freelist!
40 SBMA sbm_lowaddr
; /* Lowest word-aligned address we know about.*/
42 /* If compiling with debug switch set, use special routine in place of
43 * sbrk so we can pretend we have a very limited area of free memory.
46 #define SBM_SBRK sbm_brk
52 /* Forward routine declarations */
54 struct smblk
*sbm_nmak(), *sbm_nget(), *sbm_mget(), *sbm_split();
55 struct smblk
*sbm_lmak(), *sbm_err();
57 /* SBM_INIT - Initialize storage management.
58 * If args are zero, normal initialization is done. Otherwise,
59 * args are understood to be pointers to an area of memory allocated
60 * on the stack (eg by an "int mem[2000]" declaration in MAIN) and
61 * initialization will include this area in addition to the
62 * unused space between "_end" and the start of the stack segment.
63 * This is mostly of use for PDP11s which would otherwise waste a lot
65 * Maybe should have a SBM_RESET() function?
70 SBMA xaddr
; /* Address of allocated stack area if any */
71 SBMO xlen
; /* Size of this area */
72 { register struct smblk
*sm
, *sml
;
75 /* Get initial chunk of memory from standard system rtn */
76 if((cp
= SBM_SBRK(SMNODES
*sizeof(struct smblk
))) == 0
78 return(sbm_err(0,"Can't sbrk"));
79 sm
= (struct smblk
*)cp
; /* Better be word-aligned! */
80 sbm_lmak(sm
,(SBMO
)sizeof(struct smblk
),SMNODES
); /* Make list */
81 sbm_nfl
= sm
; /* Point freelist at it */
82 sbm_lowaddr
= (SBMA
)sm
; /* Remember lowest addr seen */
84 /* Set up 1st node pointing to all memory from here on up.
85 * We don't know exactly how much will be available at this point,
86 * so we just pretend we have the maximum possible.
88 sbm_list
= sml
= sbm_nget();
89 sml
->smforw
= sml
->smback
= 0;
90 sml
->smflags
= SM_USE
|SM_NID
; /* Initial flags */
91 sml
->smaddr
= (SBMA
) sml
;
92 sml
->smlen
= MAXSBMO
; /* Pretend we have lots */
93 sml
->smuse
= (SMNODES
* sizeof(struct smblk
));
95 /* Now split off everything above initial allocation as NXM. */
96 sm
= sbm_split(sml
, sm
->smuse
);
97 sml
->smflags
|= SM_MNODS
; /* Mark 1st node as having SM nodes */
98 sm
->smflags
|= SM_NXM
; /* Mark 2nd node as NXM */
100 /* Now possibly set up extra nodes, if stack mem is being allocated
101 * (From our viewpoint it looks as if a chunk in the middle of
102 * the initial NXM section has been declared usable)
105 { /* Allow for "extra" static stack memory */
106 /* Will lose if xaddr <= 1st NXM! */
107 sml
= sbm_split(sm
, (SBMO
)(xaddr
- sm
->smaddr
));
108 sbm_split(sml
, xlen
); /* Split off following NXM */
109 sml
->smflags
&= ~(SM_USE
|SM_NXM
); /* This node is free mem! */
112 /* Now set up a small additional node which points to the NXM
113 * that we cannot get from SBRK. At this stage, this is just
114 * a place-holder, to reserve the node so we don't have to
115 * worry about running out of nodes at the same time sbrk stops
117 * SM points to the NXM that we expect SBRK to dig into.
119 sbm_split(sm
, sm
->smlen
- WDSIZE
); /* Chop off teensy bit */
120 sm
->smflags
&= ~SM_USE
; /* Now mark NXM "free"!! */
122 /* Finally, reserve an "extra" SM node for use by sbm_nget
123 * when it is allocating more freelist node chunks.
125 sbm_nxtra
= sbm_nget();
130 /* SBM_NGET() - Get a free SM node.
131 * Note the hair to provide a spare SM node when
132 * we are allocating memory for more SM nodes. This is necessary
133 * because sbm_mget and sbm_nget call each other recursively and
134 * sbm_mget cannot create any new memory without a SM node to point
135 * at the allocated chunk.
139 { register struct smblk
*sm
, *sml
;
141 if(!(sm
= sbm_nfl
)) /* Get a node from freelist */
142 { /* Freelist is empty, try to allocate more nodes. */
144 /* Put our "spare" smblk on freelist temporarily so that
145 * sbm_mget has a chance of winning.
146 * Infinite recursion is avoided by a test
147 * in sbm_mget which checks sbm_nfl and sbm_nxtra.
149 if(!(sm
= sbm_nxtra
))
150 return(sbm_err(0,"Zero sbm_nxtra!"));
155 /* Try to allocate another chunk of SM nodes. */
156 sml
= sbm_nmak(sizeof(struct smblk
),SM_MNODS
);
158 /* Put the new free nodes (if any) on freelist.
159 * Done this way because freelist may have had one or two
160 * nodes added to it by sbm_mget, so can't just stick
161 * a new pointer in sbm_nfl.
168 /* Now reserve an extra node again.
169 * It is an error if there is nothing on freelist here,
170 * because even if sbm_mget failed the "extra node" should
171 * still be on freelist. The check for a zero sbm_nxtra
172 * above will catch such an error.
174 sbm_nxtra
= sbm_nget();
176 /* Now see if anything to return */
177 if(!(sm
= sbm_nfl
)) /* If freelist empty again, */
178 return(0); /* give up. */
180 sbm_nfl
= sm
->smforw
; /* If win, take it off freelist */
181 return(sm
); /* Return ptr or 0 if none */
184 /* SBM_NFRE(sm) - Return a SM node to the SM freelist.
188 { register struct smblk
*sm
;
189 (sm
= smp
)->smflags
= 0;
190 sm
->smforw
= sbm_nfl
;
194 /* SBM_NMAK(elsize, flag) - Make (allocate & build) a typeless node freelist.
197 sbm_nmak(elsize
, flag
)
200 { register struct smblk
*sm
, *smp
;
203 if((sm
= sbm_mget(SMNODES
*elsize
,SMNODES
*elsize
)) == 0)
206 sm
->smflags
|= flag
; /* Indicate type of nodes */
207 cnt
= sm
->smlen
/elsize
; /* Find # nodes that will fit */
208 sm
->smuse
= cnt
* elsize
; /* Actual size used */
209 smp
= (struct smblk
*)(sm
->smaddr
); /* Ptr to 1st loc of mem */
210 sbm_lmak(smp
, (SBMO
)elsize
, cnt
); /* Build freelist */
211 return(smp
); /* Return 1st free node. Caller is */
212 /* responsible for setting freelist ptr. */
215 /* SBM_LMAK - Build freelist of typeless nodes.
216 * Note this does not allocate memory, it just converts an already
217 * allocated memory area.
220 sbm_lmak(addr
, elsize
, num
)
224 { register struct smblk
*sm
, *smp
;
227 smp
= (struct smblk
*) addr
;
230 do { sm
= smp
; /* Save ptr */
231 sm
->smforw
= (smp
= (struct smblk
*) ((SBMA
)smp
+ elsize
));
234 sm
->smforw
= 0; /* Last node points to nothing */
235 return(sm
); /* Return ptr to last node */
238 /* SBM_NMOV(sm1, sm2, begp, elsize) - Move a typeless node.
239 * Copy sm1 to sm2, adjust ptrs, leave sm1 free.
241 sbm_nmov(smp1
,smp2
,begp
,elsize
)
242 struct smblk
*smp1
, *smp2
, **begp
;
244 { register struct smblk
*sm
;
246 bcopy((SBMA
)smp1
,(SBMA
)(sm
= smp2
), elsize
); /* Copy the stuff */
247 if(sm
->smforw
) sm
->smforw
->smback
= sm
; /* Fix up links */
248 if(sm
->smback
) sm
->smback
->smforw
= sm
;
252 /* SBM_MGET(min,max) - Get a SMBLK with specified amount of memory.
253 * Returns 0 if none available.
254 * Memory is guaranteed to start on word boundary, but may not
255 * end on one. Note that sbm_mfree is responsible for
256 * ensuring that free mem starts word-aligned.
257 * A subtle but major concern of this code is the number of freelist
258 * nodes gobbled by a single call. If the freelist happens to not have
259 * enough nodes, then a recursive call to sbm_mget is made (via sbm_nget)
260 * in order to allocate a new batch of freelist nodes! sbm_nget will
261 * always provide a single "spare" node during such an allocation, but
262 * there is only one and it is essential that sbm_mget gobble only ONE
263 * (if any) during such a call, which is indicated by sbm_nxtra==0.
264 * The maximum # of freelist nodes that sbm_mget can gobble is
265 * 2, when (1) NXM memory is obtained, and a SM is needed to point at
266 * the new free mem, plus (2) the resulting SM is too big, and has to
267 * be split up, which requires another SM for the remainder.
268 * The "used-NXM" smblk is set up at init time precisely in order to
269 * avoid the necessity of creating it here when sbrk stops winning, since
270 * that would require yet another freelist node and make it possible for
271 * sbm_mget to gobble 3 during one call -- too many.
272 * Further note: the sbm_nfl checks are necessary in order
273 * to ensure that a SM node is available for use by sbm_split. Otherwise
274 * the calls to sbm_split might create a new SM freelist by gobbling the
275 * very memory which we are hoping to return!
277 SBMO sbm_chksiz
= SMCHUNKSIZ
; /* Current chunk size to feed sbrk */
282 { register struct smblk
*sm
, *sml
;
284 register SBMA addr
, xaddr
;
286 if((sm
= sbm_list
) == 0 /* If never done, */
287 && (sm
= sbm_init((SBMA
)0,(SBMO
)0)) == 0) /* initialize mem alloc stuff. */
288 return(0); /* Can't init??? */
290 /* Round up sizes to word boundary */
291 if(rndrem(cmin
)) cmin
= rndup(cmin
);
292 if(rndrem(cmax
)) cmax
= rndup(cmax
);
294 /* Search for a free block having enough memory.
295 * If run into a free-NXM block, always "win", since there may be
296 * a combination of preceding free-mem and new mem which will satisfy
297 * the request. If it turns out this didn't work, we'll just fail
298 * a little farther on.
300 retry
: csiz
= cmin
; /* Set size that will satisfy us */
302 if( ((sm
->smflags
&SM_USE
) == 0)
303 && ((sm
->smlen
>= csiz
) || (sm
->smflags
&SM_NXM
)) )
305 } while(sm
= sm
->smforw
);
307 return(0); /* Found none that minimum would fit */
309 if(sm
->smflags
&SM_NXM
)
310 { /* Found free area, but it's marked NXM and the system
311 * must be persuaded (via sbrk) to let us use that portion
312 * of our address space. Grab a good-sized chunk.
314 if(sbm_nfl
== 0) /* Verify a spare SM node is avail */
315 goto getnod
; /* Nope, must get one. */
317 /* Decide amount of mem to ask system for, via sbrk.
318 * The fine point here is the check of sbm_nxtra to make sure
319 * that, when building more freelist nodes, we don't have
320 * to use more than one SM node in the process. If we
321 * asked for too much mem, we'd have to use a SM node
322 * to hold the excess after splitting.
325 if(sbm_nxtra
/* If normal then try for big chunk */
326 && csiz
< sbm_chksiz
) csiz
= sbm_chksiz
; /* Max */
327 if (csiz
> sm
->smlen
) csiz
= sm
->smlen
; /* Min */
329 /* Get the NXM mem */
330 if((addr
= (SBMA
)SBM_SBRK(csiz
)) != sm
->smaddr
)
331 { /* Unexpected value returned from SBRK! */
333 if((int)addr
!= 0 && (int)addr
!= -1)
334 { return(sbm_err(0,"SBRK %o != %o", addr
,
337 /* If value indicates couldn't get the stuff, then
338 * we have probably hit our limit and the rest of
339 * NXM should be declared "used" to prevent further
340 * hopeless sbrk calls. We split off the portion
341 * of NXM that is known for sure to be unavailable,
342 * and mark it "used". If a "used NXM" area already
343 * exists following this one, the two are merged.
344 * The chunk size is then reduced by half, so
345 * only log2(SMCHUNKSIZ) attempts will be made, and
348 /* If returned some mem which starts outside
349 * the NXM then something is screwed up. */
351 || (addr
>= sm
->smaddr
+sm
->smlen
))
352 return(sbm_err(0,"SBRK %o != %o",
354 /* Got some mem, falls within NXM.
355 * Presumably someone else has called sbrk
356 * since last time, so we need to fence off
357 * the intervening area. */
358 sm
= sbm_split((sml
=sm
),(addr
- sm
->smaddr
));
359 sml
->smflags
|= SM_USE
|SM_EXT
;
360 return(sbm_mget(cmin
,cmax
));
364 /* Handle case of SBRK claiming no more memory.
365 * Gobble as much as we can, and then turn this NXM
366 * block into a free-mem block, and leave the
367 * remainder in the used-NXM block (which should
368 * immediately follow this free-NXM block!)
370 if(!(sml
= sm
->smforw
) /* Ensure have used-NXM blk */
371 || (sml
->smflags
&(SM_USE
|SM_NXM
))
373 return(sbm_err(0,"No uNXM node!"));
374 xaddr
= sm
->smaddr
; /* Use this for checking */
375 sm
->smuse
= 0; /* Use this for sum */
376 for(csiz
= sm
->smlen
; csiz
> 0;)
377 { addr
= SBM_SBRK(csiz
);
378 if((int)addr
== 0 || (int)addr
== -1)
383 return(sbm_err(0,"SBRK %o != %o", addr
,
389 /* Have gobbled as much from SBRK as we could.
390 * Turn the free-NXM block into a free-mem block,
391 * unless we got nothing, in which case just merge
392 * it into the used-NXM block and continue
393 * searching from this point.
395 if(!(csiz
= sm
->smuse
)) /* Get total added */
396 { sm
->smflags
= sml
->smflags
; /* Ugh. */
398 goto retry
; /* Keep looking */
401 { sml
->smaddr
= sm
->smaddr
+ csiz
;
402 sml
->smlen
+= sm
->smlen
- csiz
;
404 sm
->smflags
&= ~SM_NXM
; /* No longer NXM */
408 /* Here when we've acquired CSIZ more memory from sbrk.
409 * If preceding mem area is not in use, merge new mem
412 if((sml
= sm
->smback
) &&
413 (sml
->smflags
&(SM_USE
|SM_NXM
))==0) /* Previous free? */
414 { sml
->smlen
+= csiz
; /* Yes, simple! */
415 sm
->smaddr
+= csiz
; /* Fix up */
416 if((sm
->smlen
-= csiz
) == 0) /* If no NXM left,*/
417 sbm_mmrg(sml
); /* Merge NXM node w/prev */
418 sm
= sml
; /* Prev is now winning node */
421 { /* Prev node isn't a free area. Split up the NXM
422 * node to account for acquired mem, unless we
423 * gobbled all the mem available.
425 if(sm
->smlen
> csiz
/* Split unless all used */
426 && !sbm_split(sm
,csiz
)) /* Call shd always win */
427 return(sbm_err(0,"getsplit err: %o",sm
));
428 sm
->smflags
&= ~SM_NXM
; /* Node is now real mem */
431 /* Now make a final check that we have enough memory.
432 * This can fail because SBRK may not have been able
433 * to gobble enough memory, either because (1) not
434 * as much NXM was available as we thought,
435 * or (2) we noticed the free-NXM area and immediately
436 * gambled on trying it without checking any lengths.
437 * In any case, we try again starting from the current SM
438 * because there may be more free mem higher up (eg on
445 /* Check to see if node has too much mem. This is especially true
446 * for memory just acquired via sbrk, which gobbles a huge chunk each
447 * time. If there's too much, we split up the area.
449 if(sm
->smlen
> cmax
+FUDGE
) /* Got too much? (Allow some fudge)*/
450 /* Yes, split up so don't gobble too much. */
451 if(sbm_nfl
) /* If success guaranteed, */
452 sbm_split(sm
,cmax
); /* split it, all's well. */
456 sm
->smflags
|= SM_USE
; /* Finally seize it by marking "in-use". */
459 /* Come here when we will need to get another SM node but the
460 * SM freelist is empty. We have to forget about using the area
461 * we just found, because sbm_nget may gobble it for the
462 * freelist. So, we first force a refill of the freelist, and then
463 * invoke ourselves again on what's left.
466 if(sml
= sbm_nget()) /* Try to build freelist */
467 { sbm_nfre(sml
); /* Won, give node back, */
468 sm
= sbm_list
; /* and retry, starting over! */
471 /* Failed. Not enough memory for both this request
472 * and one more block of SM nodes. Since such a SM_MNODS
473 * block isn't very big, we are so close to the limits that it
474 * isn't worth trying to do something fancy here to satisfy the
475 * original request. So we just fail.
481 /* Code for debugging stuff by imposing an artificial limitation on size
482 * of available memory.
484 SBMO sbm_dlim
= MAXSBMO
; /* Amount of mem to allow (default is max) */
489 { register char *addr
;
491 if(size
> sbm_dlim
) return(0);
493 if((int)addr
== 0 || (int)addr
== -1)
500 /* SBM_MFREE(sm) - Free up an allocated memory area.
503 register struct smblk
*sm
;
504 { register struct smblk
*smx
;
507 sm
->smflags
&= ~SM_USE
; /* Say mem is free */
508 if((smx
= sm
->smback
) /* Check preceding mem */
509 && (smx
->smflags
&(SM_USE
|SM_NXM
))==0) /* If it's free, */
510 sbm_mmrg(sm
= smx
); /* then merge 'em. */
511 if((smx
= sm
->smforw
) /* Check following mem */
512 && (smx
->smflags
&(SM_USE
|SM_NXM
))==0) /* Again, if free, */
513 sbm_mmrg(sm
); /* merge them. */
515 if(sm
->smlen
== 0) /* Just in case, chk for null blk */
516 { if(smx
= sm
->smback
) /* If pred exists, */
517 sbm_mmrg(smx
); /* merge quietly. */
519 sbm_list
= sm
->smforw
; /* 1st node on list, so */
520 sbm_nfre(sm
); /* simply flush it. */
525 /* This code is slightly over-general for some machines.
526 * The pointer subtraction is done in order to get a valid integer
527 * offset value regardless of the internal representation of a pointer.
528 * We cannot reliably force alignment via casts; some C implementations
529 * treat that as a no-op.
531 if(crem
= rndrem(sm
->smaddr
- sbm_lowaddr
)) /* On word bndry? */
532 { /* No -- must adjust. All free mem blks MUST, by fiat,
533 * start on word boundary. Here we fix things by
534 * making the leftover bytes belong to the previous blk,
535 * no matter what it is used for. Prev blk is guaranteed to
536 * (1) Exist (this cannot be 1st blk since 1st is known to
537 * start on wd boundary) and to be (2) Non-free (else it would
540 if((smx
= sm
->smback
) == 0) /* Get ptr to prev blk */
541 { sbm_err(0,"Align err"); /* Catch screws */
544 crem
= WDSIZE
- crem
; /* Find # bytes to flush */
545 if(crem
>= sm
->smlen
) /* Make sure node has that many */
546 { sbm_mmrg(smx
); /* Flush node to avoid zero length */
549 smx
->smlen
+= crem
; /* Make stray bytes part of prev */
550 sm
->smaddr
+= crem
; /* And flush from current. */
555 /* SBM_EXP - Expand (or shrink) size of an allocated memory chunk.
556 * "nsize" is desired new size; may be larger or smaller than current
561 register struct smblk
*sm
;
563 { register struct smblk
*smf
;
564 register SBMO mexp
, pred
, succ
;
566 if(sm
->smlen
>= size
) /* Do we want truncation? */
567 goto realo2
; /* Yup, go split block */
569 /* Block is expanding. */
570 mexp
= size
- sm
->smlen
; /* Get # bytes to expand by */
572 if((smf
= sm
->smforw
) /* See if free mem follows */
573 && (smf
->smflags
&(SM_USE
|SM_NXM
)) == 0)
574 if((succ
= smf
->smlen
) >= mexp
)
575 goto realo1
; /* Quick stuff if succ OK */
577 if((smf
= sm
->smback
) /* See if free mem precedes */
578 && (smf
->smflags
&(SM_USE
|SM_NXM
)) == 0)
581 /* If not enough free space combined on both sides of this chunk,
582 * we have to look for a completely new block.
585 { if((smf
= sbm_mget(size
,size
)) == 0)
586 return(0); /* Couldn't find one */
587 else pred
= 0; /* Won, indicate new block */
590 /* OK, must copy either into new block or down into predecessor
591 * (overlap is OK as long as bcopy moves 1st byte first)
593 bcopy(sm
->smaddr
, smf
->smaddr
, sm
->smlen
);
594 smf
->smflags
= sm
->smflags
; /* Copy extra attribs */
595 smf
->smuse
= sm
->smuse
;
596 if(!pred
) /* If invoked sbm_mget */
597 { sbm_mfree(sm
); /* then must free up old area */
598 return(smf
); /* and can return immediately. */
600 sbm_mmrg(smf
); /* Merge current into pred blk */
601 sm
= smf
; /* Now pred is current blk. */
604 realo1
: sbm_mmrg(sm
); /* Merge succ into current blk */
605 realo2
: if(sm
->smlen
> size
/* If now have too much, */
606 && sbm_split(sm
, size
)) /* split up and possibly */
607 sbm_mfree(sm
->smforw
); /* free up unused space. */
610 /* Note that sbm_split can fail if it can't get a free node,
611 * which is only possible if we are reducing the size of an area.
612 * If it fails, we just return anyway without truncating the area.
616 /* SBM_MMRG(sm) - Merge a memory area with the area following it.
617 * The node (and memory area) following the SM pointed to are
618 * merged in and the successor node freed up. The flags
619 * and smuse of the current SM (which is not moved or anything)
624 { register struct smblk
*sm
, *sm2
;
627 sm
->smlen
+= (sm2
= sm
->smforw
)->smlen
; /* Add succ's len */
628 if(sm
->smforw
= sm2
->smforw
) /* and fix linkages */
629 sm
->smforw
->smback
= sm
;
630 sbm_nfre(sm2
); /* now can flush succ node */
633 /* SBM_SPLIT - Split up an area (gets a new smblk to point to split-off
635 * Note returned value is ptr to 2nd smblk, since this is a new one.
636 * Ptr to 1st remains valid since original smblk stays where it is.
637 * NOTE: Beware of splitting up free mem (SM_USE == 0) since sbm_nget may
638 * steal it out from under unless precautions are taken! See comments
639 * at sbm_mget related to this.
645 { register struct smblk
*sm
, *smx
;
648 if((sm
= smp
)->smlen
<= (csiz
= coff
))
650 if((smx
= sbm_nget()) == 0)
652 smx
->smlen
= sm
->smlen
- csiz
; /* Set 2nd size */
653 smx
->smaddr
= sm
->smaddr
+ csiz
; /* Set 2nd addr */
654 sm
->smlen
= csiz
; /* Take from 1st size */
655 smx
->smflags
= sm
->smflags
; /* Copy flags */
656 if(smx
->smforw
= sm
->smforw
) /* Splice 2nd after 1 */
657 smx
->smforw
->smback
= smx
;
659 sm
->smforw
= smx
; /* Put 2nd into chain */
660 return(smx
); /* Return ptr to 2nd smblk */
663 #if 0 /* Replaced by "bcopy" for system-dep efficiency */
664 /* SBM_SCPY - Copy string of bytes. Somewhat machine-dependent;
665 * Tries to be clever about using word moves instead of byte moves.
667 sbm_scpy(from
, to
, count
) /* Copy count bytes from -> to */
670 { register char *s1
, *s2
;
671 register unsigned cnt
;
674 if((cnt
= count
) == 0)
678 while(rndrem((int)s1
)) /* Get 1st ptr aligned */
680 if(--cnt
== 0) return;
682 if(rndrem((int)s2
) == 0) /* Do wd move if ptr 2 now aligned */
684 #ifdef DUMBPCC /* Code for dumber (Portable C type) compiler */
685 register WORD
*ap
, *bp
;
689 if(cnt
= rnddiv(cnt
))
690 do { *bp
++ = *ap
++; }
692 if ((cnt
= rndrem(tmp
)) ==0)
697 /* Tight loop for efficient copying on 11s */
699 if(cnt
= rnddiv(cnt
))
700 do { *((WORD
*)s2
)++ = *((WORD
*)s1
)++; }
702 if((cnt
= rndrem(tmp
)) == 0)
706 do { *s2
++ = *s1
++; } /* Finish up with byte loop */
711 struct smblk
* /* If it returns at all, this is most common type */
712 sbm_err(val
,str
,a0
,a1
,a2
,a3
)
717 sptr
= (int *) &sptr
; /* Point to self on stack */
718 sptr
+= 5; /* Point to return addr */
719 if((int)sbm_debug
==1)
722 (*sbm_debug
)(0,*sptr
,str
,a0
,a1
,a2
,a3
);
726 /* These routines correspond to the V7 LIBC routines as described
727 * in the V7 UPM (3). They should provide satisfactory emulation
728 * if the documentation is correct. Replacement is necessary since
729 * the SBM routines are jealous and cannot tolerate competition for
730 * calls of SBRK; i.e. the memory being managed must be contiguous.
733 /* Guaranteed to return word-aligned pointer to area of AT LEAST
734 * requested size. Area size is rounded up to word boundary.
740 { register struct smblk
*sm
, **sma
;
743 siz
= rndup(size
+ sizeof (struct smblk
*)); /* Make room for ptr */
744 if((sm
= sbm_mget(siz
,siz
)) == 0)
746 *(sma
= (struct smblk
**)sm
->smaddr
) = sm
; /* Store ptr in addr-1 */
747 return((char *)++sma
);
751 alloc(size
) /* For V6 programs - note different failure value! */
753 { register char *addr
;
754 return((addr
= malloc(size
)) ? addr
: (char *) -1);
759 { register struct smblk
*sm
, **smp
;
761 smp
= &((struct smblk
**)ptr
)[-1]; /* Point to addr-1 */
762 sm
= *smp
; /* Pluck SM ptr therefrom */
763 if(((sm
->smflags
&0377) != SM_NID
) || sm
->smaddr
!= (SBMA
)smp
)
764 return((int)sbm_err(0,"free: bad arg %o", ptr
));
773 { register struct smblk
*sm
, **smp
;
775 smp
= &((struct smblk
**)ptr
)[-1]; /* Point to addr-1 */
776 sm
= *smp
; /* Pluck SM ptr therefrom */
777 if(((sm
->smflags
&0377) != SM_NID
) || (sm
->smaddr
!= (SBMA
)smp
))
778 return((char *)sbm_err(0,"realloc: bad arg %o",ptr
));
779 if((sm
= sbm_exp(sm
, rndup(size
+(sizeof(struct smblk
*))))) == 0)
781 *(smp
= (struct smblk
**)sm
->smaddr
) = sm
; /* Save smblk ptr */
782 return((char *)++smp
);
787 unsigned nelem
, elsize
;
788 { register SBMO cmin
;
789 register WORD
*ip
; /* Clear in units of words */
792 if((cmin
= nelem
*elsize
) == 0 /* Find # bytes to get */
793 || (addr
= malloc(cmin
)) == 0) /* Get it */
795 ip
= (WORD
*) addr
; /* Set up ptr to area */
796 cmin
= rnddiv(cmin
+WDSIZE
-1); /* Find # words to clear */
797 do { *ip
++ = 0; } while (--cmin
); /* Zap the area */
801 /* SBM_NGC() - Specific routine for GC'ing SMBLK nodes.
803 * SBM_XNGC(begp, elsize, type) - Compact nodes of specified type.
804 * Scans allocated mem from low to high to find chunks with nodes of
805 * the specified type.
806 * Flushes current freelist and rebuilds it as scan progresses,
807 * such that 1st thing on list is lowest-addr node. When a node is
808 * seen that can be moved, new node is acquired from freelist if
809 * it exists, otherwise no move is made. If a chunk has been scanned
810 * and no active nodes remain, it is flushed and freelist updated.
811 * NOTE: This has not yet been verified to work with nodes of any
812 * type other than SMBLK.
816 { register struct smblk
*sm
;
817 if(!(sm
= sbm_nxtra
))
818 return((int)sbm_err(0,"Zero sbm_nxtra"));
819 sm
->smflags
|= SM_USE
; /* Ensure this one isn't GC'd */
820 sbm_xngc(&sbm_nfl
, sizeof(struct smblk
), SM_MNODS
);
821 sm
->smflags
= 0; /* Flush temporary crock */
823 sbm_xngc(begp
, elsize
, flag
)
825 unsigned elsize
, flag
;
826 { register struct smblk
*sm
, *chk
, *smf
;
827 struct smblk
*ftail
, *savtail
;
830 *begp
= ftail
= 0; /* Flush node freelist */
831 for(chk
= sbm_list
; chk
; chk
= chk
->smforw
)
832 if(chk
->smflags
&flag
)
833 { sm
= (struct smblk
*) chk
->smaddr
;
834 cnt
= (chk
->smuse
)/elsize
;
838 /* Set up ptr to 1st freelist node */
840 { /* Here decide if movable */
841 if(sm
->smflags
&& smf
/* Live and have copy place */
843 (sm
->smflags
&SM_USE
) == 0 /* Free mem? */
844 || (sm
->smflags
&(SM_MNODS
|SM_DNODS
))
846 && sm
->smback
) /* has backptr (see ncpy) */
847 { /* Move the node */
848 *begp
= smf
->smforw
; /* Get free node */
853 /* Move node. Already checked for back ptr
854 * of 0 since no obvious way to tell where
855 * the ptr to list is kept. Sigh.
857 sbm_nmov(sm
,smf
,(struct smblk
**)0,elsize
);
858 /* Get ptr to new freelist node. Note
859 * check to ensure that it is not in this
860 * same chunk (if it is, no point in moving
863 if((smf
= *begp
) >= chk
)
864 smf
= 0; /* Zero if same chk */
865 sm
->smflags
= 0; /* Make node free */
867 /* At this point, not movable */
868 if(sm
->smflags
== 0) /* Free node? */
869 { if(ftail
) /* Add to freelist */
877 sm
= (struct smblk
*)((SBMA
)sm
+ elsize
);
879 if(inuse
== 0 /* All free? */
880 && (sm
= chk
->smback
)) /* & not 1st? */
881 { if(savtail
) /* Edit freelist */
882 (ftail
= savtail
)->smforw
= 0;
883 else *begp
= ftail
= 0;
891 * Note that proc must return a zero value, or loop aborts and
892 * returns that selfsame value.
894 sbm_nfor(flag
,nodsiz
,proc
,arg
)
899 { register struct smblk
*sm
, *np
;
903 for(sm
= sbm_list
; sm
; sm
= sm
->smforw
)
905 { np
= (struct smblk
*) sm
->smaddr
;
906 cnt
= sm
->smuse
/nodsiz
;
909 if(res
= (*proc
)(np
,arg
))
911 np
= (struct smblk
*)((SBMA
)np
+ nodsiz
);