1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
3 * This file is part of the LibreOffice project.
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 * This file incorporates work covered by the following license notice:
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
20 #include "alloc_arena.hxx"
22 #include "alloc_impl.hxx"
23 #include "internal/rtllifecycle.h"
24 #include "sal/macros.h"
25 #include "osl/diagnose.h"
31 extern AllocMode alloc_mode
;
33 /* ================================================================= *
37 * ================================================================= */
42 struct rtl_arena_list_st
44 rtl_memory_lock_type m_lock
;
45 rtl_arena_type m_arena_head
;
48 static rtl_arena_list_st g_arena_list
;
52 * provided for arena_type allocations, and hash_table resizing.
56 static rtl_arena_type
* gp_arena_arena
= 0;
61 * Low level virtual memory (pseudo) arena
62 * (platform dependent implementation)
66 static rtl_arena_type
* gp_machdep_arena
= 0;
70 rtl_arena_type
* gp_default_arena
= 0;
76 SAL_CALL
rtl_machdep_alloc (
77 rtl_arena_type
* pArena
,
82 SAL_CALL
rtl_machdep_free (
83 rtl_arena_type
* pArena
,
89 rtl_machdep_pagesize();
92 /* ================================================================= */
94 /** rtl_arena_segment_constructor()
97 rtl_arena_segment_constructor (void * obj
)
99 rtl_arena_segment_type
* segment
= (rtl_arena_segment_type
*)(obj
);
101 QUEUE_START_NAMED(segment
, s
);
102 QUEUE_START_NAMED(segment
, f
);
108 /** rtl_arena_segment_destructor()
111 rtl_arena_segment_destructor (void * obj
)
113 rtl_arena_segment_type
* segment
= static_cast< rtl_arena_segment_type
* >(
115 assert(QUEUE_STARTED_NAMED(segment
, s
));
116 assert(QUEUE_STARTED_NAMED(segment
, f
));
117 (void) segment
; // avoid warnings
120 /* ================================================================= */
122 /** rtl_arena_segment_populate()
124 * @precond arena->m_lock acquired.
127 rtl_arena_segment_populate (
128 rtl_arena_type
* arena
131 rtl_arena_segment_type
*span
;
132 sal_Size size
= rtl_machdep_pagesize();
134 span
= static_cast< rtl_arena_segment_type
* >(
135 rtl_machdep_alloc(gp_machdep_arena
, &size
));
138 rtl_arena_segment_type
*first
, *last
, *head
;
139 sal_Size count
= size
/ sizeof(rtl_arena_segment_type
);
141 /* insert onto reserve span list */
142 QUEUE_INSERT_TAIL_NAMED(&(arena
->m_segment_reserve_span_head
), span
, s
);
143 QUEUE_START_NAMED(span
, f
);
144 span
->m_addr
= (sal_uIntPtr
)(span
);
146 span
->m_type
= RTL_ARENA_SEGMENT_TYPE_SPAN
;
148 /* insert onto reserve list */
149 head
= &(arena
->m_segment_reserve_head
);
150 for (first
= span
+ 1, last
= span
+ count
; first
< last
; ++first
)
152 QUEUE_INSERT_TAIL_NAMED(head
, first
, s
);
153 QUEUE_START_NAMED(first
, f
);
163 /** rtl_arena_segment_get()
165 * @precond arena->m_lock acquired.
166 * @precond (*ppSegment == 0)
169 rtl_arena_segment_get (
170 rtl_arena_type
* arena
,
171 rtl_arena_segment_type
** ppSegment
174 rtl_arena_segment_type
* head
;
176 assert(*ppSegment
== 0);
178 head
= &(arena
->m_segment_reserve_head
);
179 if ((head
->m_snext
!= head
) || rtl_arena_segment_populate (arena
))
181 (*ppSegment
) = head
->m_snext
;
182 QUEUE_REMOVE_NAMED((*ppSegment
), s
);
186 /** rtl_arena_segment_put()
188 * @precond arena->m_lock acquired.
189 * @postcond (*ppSegment == 0)
192 rtl_arena_segment_put (
193 rtl_arena_type
* arena
,
194 rtl_arena_segment_type
** ppSegment
197 rtl_arena_segment_type
* head
;
199 assert(QUEUE_STARTED_NAMED((*ppSegment
), s
));
200 assert(QUEUE_STARTED_NAMED((*ppSegment
), f
));
202 (*ppSegment
)->m_addr
= 0;
203 (*ppSegment
)->m_size
= 0;
205 assert((*ppSegment
)->m_type
!= RTL_ARENA_SEGMENT_TYPE_HEAD
);
206 (*ppSegment
)->m_type
= 0;
208 /* keep as reserve */
209 head
= &(arena
->m_segment_reserve_head
);
210 QUEUE_INSERT_HEAD_NAMED(head
, (*ppSegment
), s
);
217 /** rtl_arena_freelist_insert()
219 * @precond arena->m_lock acquired.
222 rtl_arena_freelist_insert (
223 rtl_arena_type
* arena
,
224 rtl_arena_segment_type
* segment
227 rtl_arena_segment_type
* head
;
229 head
= &(arena
->m_freelist_head
[highbit(segment
->m_size
) - 1]);
230 QUEUE_INSERT_TAIL_NAMED(head
, segment
, f
);
232 arena
->m_freelist_bitmap
|= head
->m_size
;
235 /** rtl_arena_freelist_remove()
237 * @precond arena->m_lock acquired.
240 rtl_arena_freelist_remove (
241 rtl_arena_type
* arena
,
242 rtl_arena_segment_type
* segment
245 if ((segment
->m_fnext
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
) &&
246 (segment
->m_fprev
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
) )
248 rtl_arena_segment_type
* head
;
250 head
= segment
->m_fprev
;
251 assert(arena
->m_freelist_bitmap
& head
->m_size
);
252 arena
->m_freelist_bitmap
^= head
->m_size
;
254 QUEUE_REMOVE_NAMED(segment
, f
);
257 /* ================================================================= */
259 /** RTL_ARENA_HASH_INDEX()
261 #define RTL_ARENA_HASH_INDEX_IMPL(a, s, q, m) \
262 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
264 #define RTL_ARENA_HASH_INDEX(arena, addr) \
265 RTL_ARENA_HASH_INDEX_IMPL((addr), (arena)->m_hash_shift, (arena)->m_quantum_shift, ((arena)->m_hash_size - 1))
267 /** rtl_arena_hash_rescale()
269 * @precond arena->m_lock released.
272 rtl_arena_hash_rescale (
273 rtl_arena_type
* arena
,
277 rtl_arena_segment_type
** new_table
;
280 new_bytes
= new_size
* sizeof(rtl_arena_segment_type
*);
281 new_table
= (rtl_arena_segment_type
**)rtl_arena_alloc (gp_arena_arena
, &new_bytes
);
285 rtl_arena_segment_type
** old_table
;
286 sal_Size old_size
, i
;
288 memset (new_table
, 0, new_bytes
);
290 RTL_MEMORY_LOCK_ACQUIRE(&(arena
->m_lock
));
292 old_table
= arena
->m_hash_table
;
293 old_size
= arena
->m_hash_size
;
297 // "rtl_arena_hash_rescale(" << arena->m_name << "): nseg: "
298 // << (arena->m_stats.m_alloc - arena->m_stats.m_free) << " (ave: "
299 // << ((arena->m_stats.m_alloc - arena->m_stats.m_free)
300 // >> arena->m_hash_shift)
301 // << "), frees: " << arena->m_stats.m_free << " [old_size: "
302 // << old_size << ", new_size: " << new_size << ']');
304 arena
->m_hash_table
= new_table
;
305 arena
->m_hash_size
= new_size
;
306 arena
->m_hash_shift
= highbit(arena
->m_hash_size
) - 1;
308 for (i
= 0; i
< old_size
; i
++)
310 rtl_arena_segment_type
* curr
= old_table
[i
];
313 rtl_arena_segment_type
* next
= curr
->m_fnext
;
314 rtl_arena_segment_type
** head
;
316 head
= &(arena
->m_hash_table
[RTL_ARENA_HASH_INDEX(arena
, curr
->m_addr
)]);
317 curr
->m_fnext
= (*head
);
325 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
327 if (old_table
!= arena
->m_hash_table_0
)
329 sal_Size old_bytes
= old_size
* sizeof(rtl_arena_segment_type
*);
330 rtl_arena_free (gp_arena_arena
, old_table
, old_bytes
);
336 /** rtl_arena_hash_insert()
337 * ...and update stats.
340 rtl_arena_hash_insert (
341 rtl_arena_type
* arena
,
342 rtl_arena_segment_type
* segment
345 rtl_arena_segment_type
** ppSegment
;
347 ppSegment
= &(arena
->m_hash_table
[RTL_ARENA_HASH_INDEX(arena
, segment
->m_addr
)]);
349 segment
->m_fnext
= (*ppSegment
);
350 (*ppSegment
) = segment
;
352 arena
->m_stats
.m_alloc
+= 1;
353 arena
->m_stats
.m_mem_alloc
+= segment
->m_size
;
356 /** rtl_arena_hash_remove()
357 * ...and update stats.
359 rtl_arena_segment_type
*
360 rtl_arena_hash_remove (
361 rtl_arena_type
* arena
,
366 rtl_arena_segment_type
*segment
, **segpp
;
367 sal_Size lookups
= 0;
369 segpp
= &(arena
->m_hash_table
[RTL_ARENA_HASH_INDEX(arena
, addr
)]);
370 while ((segment
= *segpp
) != 0)
372 if (segment
->m_addr
== addr
)
374 *segpp
= segment
->m_fnext
, segment
->m_fnext
= segment
->m_fprev
= segment
;
378 /* update lookup miss stats */
380 segpp
= &(segment
->m_fnext
);
383 assert(segment
!= 0); // bad free
386 assert(segment
->m_size
== size
);
387 (void) size
; // avoid warnings
389 arena
->m_stats
.m_free
+= 1;
390 arena
->m_stats
.m_mem_alloc
-= segment
->m_size
;
394 sal_Size nseg
= (sal_Size
)(arena
->m_stats
.m_alloc
- arena
->m_stats
.m_free
);
395 if (nseg
> 4 * arena
->m_hash_size
)
397 if (!(arena
->m_flags
& RTL_ARENA_FLAG_RESCALE
))
399 sal_Size ave
= nseg
>> arena
->m_hash_shift
;
400 sal_Size new_size
= arena
->m_hash_size
<< (highbit(ave
) - 1);
402 arena
->m_flags
|= RTL_ARENA_FLAG_RESCALE
;
403 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
404 rtl_arena_hash_rescale (arena
, new_size
);
405 RTL_MEMORY_LOCK_ACQUIRE(&(arena
->m_lock
));
406 arena
->m_flags
&= ~RTL_ARENA_FLAG_RESCALE
;
415 /* ================================================================= */
417 /** rtl_arena_segment_alloc()
418 * allocate (and remove) segment from freelist
420 * @precond arena->m_lock acquired
421 * @precond (*ppSegment == 0)
424 rtl_arena_segment_alloc (
425 rtl_arena_type
* arena
,
427 rtl_arena_segment_type
** ppSegment
432 assert(*ppSegment
== 0);
433 if (!RTL_MEMORY_ISP2(size
))
435 int msb
= highbit(size
);
436 if (RTL_ARENA_FREELIST_SIZE
== sal::static_int_cast
< size_t >(msb
))
438 /* highest possible freelist: fall back to first fit */
439 rtl_arena_segment_type
*head
, *segment
;
441 head
= &(arena
->m_freelist_head
[msb
- 1]);
442 for (segment
= head
->m_fnext
; segment
!= head
; segment
= segment
->m_fnext
)
444 if (segment
->m_size
>= size
)
446 /* allocate first fit segment */
447 (*ppSegment
) = segment
;
451 goto dequeue_and_leave
;
454 /* roundup to next power of 2 */
458 index
= lowbit(RTL_MEMORY_P2ALIGN(arena
->m_freelist_bitmap
, size
));
461 /* instant fit: allocate first free segment */
462 rtl_arena_segment_type
*head
;
464 head
= &(arena
->m_freelist_head
[index
- 1]);
465 (*ppSegment
) = head
->m_fnext
;
466 assert((*ppSegment
) != head
);
472 /* remove from freelist */
473 rtl_arena_freelist_remove (arena
, (*ppSegment
));
475 return (*ppSegment
!= 0);
479 /** rtl_arena_segment_create()
480 * import new (span) segment from source arena
482 * @precond arena->m_lock acquired
483 * @precond (*ppSegment == 0)
486 rtl_arena_segment_create (
487 rtl_arena_type
* arena
,
489 rtl_arena_segment_type
** ppSegment
492 assert((*ppSegment
) == 0);
493 if (arena
->m_source_alloc
!= 0)
495 rtl_arena_segment_get (arena
, ppSegment
);
498 rtl_arena_segment_type
* span
= 0;
499 rtl_arena_segment_get (arena
, &span
);
502 /* import new span from source arena */
503 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
506 span
->m_addr
= (sal_uIntPtr
)(arena
->m_source_alloc
)(
507 arena
->m_source_arena
, &(span
->m_size
));
509 RTL_MEMORY_LOCK_ACQUIRE(&(arena
->m_lock
));
510 if (span
->m_addr
!= 0)
512 /* insert onto segment list, update stats */
513 span
->m_type
= RTL_ARENA_SEGMENT_TYPE_SPAN
;
514 QUEUE_INSERT_HEAD_NAMED(&(arena
->m_segment_head
), span
, s
);
515 arena
->m_stats
.m_mem_total
+= span
->m_size
;
517 (*ppSegment
)->m_addr
= span
->m_addr
;
518 (*ppSegment
)->m_size
= span
->m_size
;
519 (*ppSegment
)->m_type
= RTL_ARENA_SEGMENT_TYPE_FREE
;
520 QUEUE_INSERT_HEAD_NAMED(span
, (*ppSegment
), s
);
525 rtl_arena_segment_put (arena
, &span
);
527 rtl_arena_segment_put (arena
, ppSegment
);
534 /** rtl_arena_segment_coalesce()
535 * mark as free and join with adjacent free segment(s)
537 * @precond arena->m_lock acquired
538 * @precond segment marked 'used'
541 rtl_arena_segment_coalesce (
542 rtl_arena_type
* arena
,
543 rtl_arena_segment_type
* segment
546 rtl_arena_segment_type
*next
, *prev
;
548 /* mark segment free */
549 assert(segment
->m_type
== RTL_ARENA_SEGMENT_TYPE_USED
);
550 segment
->m_type
= RTL_ARENA_SEGMENT_TYPE_FREE
;
552 /* try to merge w/ next segment */
553 next
= segment
->m_snext
;
554 if (next
->m_type
== RTL_ARENA_SEGMENT_TYPE_FREE
)
556 assert(segment
->m_addr
+ segment
->m_size
== next
->m_addr
);
557 segment
->m_size
+= next
->m_size
;
559 /* remove from freelist */
560 rtl_arena_freelist_remove (arena
, next
);
562 /* remove from segment list */
563 QUEUE_REMOVE_NAMED(next
, s
);
565 /* release segment descriptor */
566 rtl_arena_segment_put (arena
, &next
);
569 /* try to merge w/ prev segment */
570 prev
= segment
->m_sprev
;
571 if (prev
->m_type
== RTL_ARENA_SEGMENT_TYPE_FREE
)
573 assert(prev
->m_addr
+ prev
->m_size
== segment
->m_addr
);
574 segment
->m_addr
= prev
->m_addr
;
575 segment
->m_size
+= prev
->m_size
;
577 /* remove from freelist */
578 rtl_arena_freelist_remove (arena
, prev
);
580 /* remove from segment list */
581 QUEUE_REMOVE_NAMED(prev
, s
);
583 /* release segment descriptor */
584 rtl_arena_segment_put (arena
, &prev
);
588 /* ================================================================= */
590 /** rtl_arena_constructor()
593 rtl_arena_constructor (void * obj
)
595 rtl_arena_type
* arena
= (rtl_arena_type
*)(obj
);
596 rtl_arena_segment_type
* head
;
599 memset (arena
, 0, sizeof(rtl_arena_type
));
601 QUEUE_START_NAMED(arena
, arena_
);
603 (void) RTL_MEMORY_LOCK_INIT(&(arena
->m_lock
));
605 head
= &(arena
->m_segment_reserve_span_head
);
606 rtl_arena_segment_constructor (head
);
607 head
->m_type
= RTL_ARENA_SEGMENT_TYPE_HEAD
;
609 head
= &(arena
->m_segment_reserve_head
);
610 rtl_arena_segment_constructor (head
);
611 head
->m_type
= RTL_ARENA_SEGMENT_TYPE_HEAD
;
613 head
= &(arena
->m_segment_head
);
614 rtl_arena_segment_constructor (head
);
615 head
->m_type
= RTL_ARENA_SEGMENT_TYPE_HEAD
;
617 for (i
= 0; i
< RTL_ARENA_FREELIST_SIZE
; i
++)
619 head
= &(arena
->m_freelist_head
[i
]);
620 rtl_arena_segment_constructor (head
);
622 head
->m_size
= (1UL << i
);
623 head
->m_type
= RTL_ARENA_SEGMENT_TYPE_HEAD
;
626 arena
->m_hash_table
= arena
->m_hash_table_0
;
627 arena
->m_hash_size
= RTL_ARENA_HASH_SIZE
;
628 arena
->m_hash_shift
= highbit(arena
->m_hash_size
) - 1;
632 /** rtl_arena_destructor()
635 rtl_arena_destructor (void * obj
)
637 rtl_arena_type
* arena
= (rtl_arena_type
*)(obj
);
638 rtl_arena_segment_type
* head
;
641 assert(QUEUE_STARTED_NAMED(arena
, arena_
));
643 RTL_MEMORY_LOCK_DESTROY(&(arena
->m_lock
));
645 head
= &(arena
->m_segment_reserve_span_head
);
646 assert(head
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
);
647 rtl_arena_segment_destructor (head
);
649 head
= &(arena
->m_segment_reserve_head
);
650 assert(head
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
);
651 rtl_arena_segment_destructor (head
);
653 head
= &(arena
->m_segment_head
);
654 assert(head
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
);
655 rtl_arena_segment_destructor (head
);
657 for (i
= 0; i
< RTL_ARENA_FREELIST_SIZE
; i
++)
659 head
= &(arena
->m_freelist_head
[i
]);
661 assert(head
->m_size
== (1UL << i
));
662 assert(head
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
);
664 rtl_arena_segment_destructor (head
);
667 assert(arena
->m_hash_table
== arena
->m_hash_table_0
);
668 assert(arena
->m_hash_size
== RTL_ARENA_HASH_SIZE
);
670 arena
->m_hash_shift
==
671 sal::static_int_cast
< unsigned >(highbit(arena
->m_hash_size
) - 1));
674 /* ================================================================= */
676 /** rtl_arena_activate()
680 rtl_arena_type
* arena
,
683 sal_Size quantum_cache_max
,
684 rtl_arena_type
* source_arena
,
685 void * (SAL_CALL
* source_alloc
)(rtl_arena_type
*, sal_Size
*),
686 void (SAL_CALL
* source_free
) (rtl_arena_type
*, void *, sal_Size
)
692 (void) snprintf (arena
->m_name
, sizeof(arena
->m_name
), "%s", name
);
694 if (!RTL_MEMORY_ISP2(quantum
))
696 /* roundup to next power of 2 */
697 quantum
= (1UL << highbit(quantum
));
699 quantum_cache_max
= RTL_MEMORY_P2ROUNDUP(quantum_cache_max
, quantum
);
701 arena
->m_quantum
= quantum
;
702 arena
->m_quantum_shift
= highbit(arena
->m_quantum
) - 1;
703 arena
->m_qcache_max
= quantum_cache_max
;
705 arena
->m_source_arena
= source_arena
;
706 arena
->m_source_alloc
= source_alloc
;
707 arena
->m_source_free
= source_free
;
709 if (arena
->m_qcache_max
> 0)
711 char namebuf
[RTL_ARENA_NAME_LENGTH
+ 1];
712 int i
, n
= (arena
->m_qcache_max
>> arena
->m_quantum_shift
);
714 sal_Size size
= n
* sizeof(rtl_cache_type
*);
715 arena
->m_qcache_ptr
= (rtl_cache_type
**)rtl_arena_alloc (gp_arena_arena
, &size
);
716 if (!(arena
->m_qcache_ptr
))
721 for (i
= 1; i
<= n
; i
++)
723 size
= i
* arena
->m_quantum
;
724 (void) snprintf (namebuf
, sizeof(namebuf
), "%s_%lu", arena
->m_name
, size
);
725 arena
->m_qcache_ptr
[i
- 1] = rtl_cache_create(namebuf
, size
, 0, NULL
, NULL
, NULL
, NULL
, arena
, RTL_CACHE_FLAG_QUANTUMCACHE
);
729 /* insert into arena list */
730 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list
.m_lock
));
731 QUEUE_INSERT_TAIL_NAMED(&(g_arena_list
.m_arena_head
), arena
, arena_
);
732 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list
.m_lock
));
737 /** rtl_arena_deactivate()
740 rtl_arena_deactivate (
741 rtl_arena_type
* arena
744 rtl_arena_segment_type
* head
, * segment
;
746 /* remove from arena list */
747 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list
.m_lock
));
748 QUEUE_REMOVE_NAMED(arena
, arena_
);
749 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list
.m_lock
));
751 /* cleanup quantum cache(s) */
752 if ((arena
->m_qcache_max
> 0) && (arena
->m_qcache_ptr
!= 0))
754 int i
, n
= (arena
->m_qcache_max
>> arena
->m_quantum_shift
);
755 for (i
= 1; i
<= n
; i
++)
757 if (arena
->m_qcache_ptr
[i
- 1] != 0)
759 rtl_cache_destroy (arena
->m_qcache_ptr
[i
- 1]);
760 arena
->m_qcache_ptr
[i
- 1] = 0;
766 n
* sizeof(rtl_cache_type
*));
768 arena
->m_qcache_ptr
= 0;
771 /* check for leaked segments */
774 // "rtl_arena_deactivate(" << arena->m_name << "): allocs: "
775 // << arena->m_stats.m_alloc << ", frees: " << arena->m_stats.m_free
776 // << "; total: " << arena->m_stats.m_mem_total << ", used: "
777 // << arena->m_stats.m_mem_alloc);
778 if (arena
->m_stats
.m_alloc
> arena
->m_stats
.m_free
)
784 // "rtl_arena_deactivate(" << arena->m_name << "): cleaning up "
785 // << (arena->m_stats.m_alloc - arena->m_stats.m_free)
786 // << " leaked segment(s) [" << arena->m_stats.m_mem_alloc
789 /* cleanup still used segment(s) */
790 for (i
= 0, n
= arena
->m_hash_size
; i
< n
; i
++)
792 while ((segment
= arena
->m_hash_table
[i
]) != 0)
794 /* pop from hash table */
795 arena
->m_hash_table
[i
] = segment
->m_fnext
, segment
->m_fnext
= segment
->m_fprev
= segment
;
797 /* coalesce w/ adjacent free segment(s) */
798 rtl_arena_segment_coalesce (arena
, segment
);
800 /* insert onto freelist */
801 rtl_arena_freelist_insert (arena
, segment
);
806 /* cleanup hash table */
807 if (arena
->m_hash_table
!= arena
->m_hash_table_0
)
812 arena
->m_hash_size
* sizeof(rtl_arena_segment_type
*));
814 arena
->m_hash_table
= arena
->m_hash_table_0
;
815 arena
->m_hash_size
= RTL_ARENA_HASH_SIZE
;
816 arena
->m_hash_shift
= highbit(arena
->m_hash_size
) - 1;
819 /* cleanup segment list */
820 head
= &(arena
->m_segment_head
);
821 for (segment
= head
->m_snext
; segment
!= head
; segment
= head
->m_snext
)
823 if (segment
->m_type
== RTL_ARENA_SEGMENT_TYPE_FREE
)
825 /* remove from freelist */
826 rtl_arena_freelist_remove (arena
, segment
);
830 /* can have only free and span segments here */
831 assert(segment
->m_type
== RTL_ARENA_SEGMENT_TYPE_SPAN
);
834 /* remove from segment list */
835 QUEUE_REMOVE_NAMED(segment
, s
);
837 /* release segment descriptor */
838 rtl_arena_segment_put (arena
, &segment
);
841 /* cleanup segment reserve list */
842 head
= &(arena
->m_segment_reserve_head
);
843 for (segment
= head
->m_snext
; segment
!= head
; segment
= head
->m_snext
)
845 /* remove from segment list */
846 QUEUE_REMOVE_NAMED(segment
, s
);
849 /* cleanup segment reserve span(s) */
850 head
= &(arena
->m_segment_reserve_span_head
);
851 for (segment
= head
->m_snext
; segment
!= head
; segment
= head
->m_snext
)
853 /* can have only span segments here */
854 assert(segment
->m_type
== RTL_ARENA_SEGMENT_TYPE_SPAN
);
856 /* remove from segment list */
857 QUEUE_REMOVE_NAMED(segment
, s
);
859 /* return span to g_machdep_arena */
860 rtl_machdep_free (gp_machdep_arena
, (void*)(segment
->m_addr
), segment
->m_size
);
865 /* ================================================================= *
867 * arena implementation.
869 * ================================================================= */
871 /** rtl_arena_create()
874 SAL_CALL
rtl_arena_create (
877 sal_Size quantum_cache_max
,
878 rtl_arena_type
* source_arena
,
879 void * (SAL_CALL
* source_alloc
)(rtl_arena_type
*, sal_Size
*),
880 void (SAL_CALL
* source_free
) (rtl_arena_type
*, void *, sal_Size
),
881 SAL_UNUSED_PARAMETER
int
882 ) SAL_THROW_EXTERN_C()
884 rtl_arena_type
* result
= 0;
885 sal_Size size
= sizeof(rtl_arena_type
);
888 result
= (rtl_arena_type
*)rtl_arena_alloc (gp_arena_arena
, &size
);
891 rtl_arena_type
* arena
= result
;
892 rtl_arena_constructor (arena
);
896 assert(gp_default_arena
!= 0);
897 source_arena
= gp_default_arena
;
900 result
= rtl_arena_activate (
912 rtl_arena_deactivate (arena
);
913 rtl_arena_destructor (arena
);
914 rtl_arena_free (gp_arena_arena
, arena
, size
);
917 else if (gp_arena_arena
== 0)
919 ensureArenaSingleton();
929 /** rtl_arena_destroy()
932 SAL_CALL
rtl_arena_destroy (
933 rtl_arena_type
* arena
934 ) SAL_THROW_EXTERN_C()
938 rtl_arena_deactivate (arena
);
939 rtl_arena_destructor (arena
);
940 rtl_arena_free (gp_arena_arena
, arena
, sizeof(rtl_arena_type
));
944 /** rtl_arena_alloc()
947 SAL_CALL
rtl_arena_alloc (
948 rtl_arena_type
* arena
,
950 ) SAL_THROW_EXTERN_C()
954 if ((arena
!= 0) && (pSize
!= 0))
958 if (alloc_mode
== AMode_SYSTEM
)
959 return rtl_allocateMemory(*pSize
);
961 size
= RTL_MEMORY_ALIGN((*pSize
), arena
->m_quantum
);
962 if (size
> arena
->m_qcache_max
)
964 /* allocate from segment list */
965 rtl_arena_segment_type
*segment
= 0;
967 RTL_MEMORY_LOCK_ACQUIRE(&(arena
->m_lock
));
968 if (rtl_arena_segment_alloc (arena
, size
, &segment
) ||
969 rtl_arena_segment_create(arena
, size
, &segment
) )
974 /* mark segment used */
975 assert(segment
->m_type
== RTL_ARENA_SEGMENT_TYPE_FREE
);
976 segment
->m_type
= RTL_ARENA_SEGMENT_TYPE_USED
;
979 assert(segment
->m_size
>= size
);
980 oversize
= segment
->m_size
- size
;
981 if ((oversize
>= arena
->m_quantum
) && (oversize
>= arena
->m_qcache_max
))
983 rtl_arena_segment_type
* remainder
= 0;
984 rtl_arena_segment_get (arena
, &remainder
);
987 segment
->m_size
= size
;
989 remainder
->m_addr
= segment
->m_addr
+ segment
->m_size
;
990 remainder
->m_size
= oversize
;
991 remainder
->m_type
= RTL_ARENA_SEGMENT_TYPE_FREE
;
992 QUEUE_INSERT_HEAD_NAMED(segment
, remainder
, s
);
994 rtl_arena_freelist_insert (arena
, remainder
);
998 rtl_arena_hash_insert (arena
, segment
);
1000 (*pSize
) = segment
->m_size
;
1001 addr
= (void*)(segment
->m_addr
);
1003 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
1007 /* allocate from quantum cache(s) */
1008 int index
= (size
>> arena
->m_quantum_shift
) - 1;
1009 assert(arena
->m_qcache_ptr
[index
] != 0);
1011 addr
= rtl_cache_alloc (arena
->m_qcache_ptr
[index
]);
1019 /** rtl_arena_free()
1022 SAL_CALL
rtl_arena_free (
1023 rtl_arena_type
* arena
,
1026 ) SAL_THROW_EXTERN_C()
1030 if (alloc_mode
== AMode_SYSTEM
)
1032 rtl_freeMemory(addr
);
1036 size
= RTL_MEMORY_ALIGN(size
, arena
->m_quantum
);
1037 if (size
> arena
->m_qcache_max
)
1039 /* free to segment list */
1040 rtl_arena_segment_type
* segment
;
1042 RTL_MEMORY_LOCK_ACQUIRE(&(arena
->m_lock
));
1044 segment
= rtl_arena_hash_remove (arena
, (sal_uIntPtr
)(addr
), size
);
1047 rtl_arena_segment_type
*next
, *prev
;
1049 /* coalesce w/ adjacent free segment(s) */
1050 rtl_arena_segment_coalesce (arena
, segment
);
1052 /* determine (new) next and prev segment */
1053 next
= segment
->m_snext
, prev
= segment
->m_sprev
;
1055 /* entire span free when prev is a span, and next is either a span or a list head */
1056 if (((prev
->m_type
== RTL_ARENA_SEGMENT_TYPE_SPAN
)) &&
1057 ((next
->m_type
== RTL_ARENA_SEGMENT_TYPE_SPAN
) ||
1058 (next
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
)) )
1061 prev
->m_addr
== segment
->m_addr
1062 && prev
->m_size
== segment
->m_size
);
1064 if (arena
->m_source_free
)
1066 addr
= (void*)(prev
->m_addr
);
1067 size
= prev
->m_size
;
1069 /* remove from segment list */
1070 QUEUE_REMOVE_NAMED(segment
, s
);
1072 /* release segment descriptor */
1073 rtl_arena_segment_put (arena
, &segment
);
1075 /* remove from segment list */
1076 QUEUE_REMOVE_NAMED(prev
, s
);
1078 /* release (span) segment descriptor */
1079 rtl_arena_segment_put (arena
, &prev
);
1081 /* update stats, return span to source arena */
1082 arena
->m_stats
.m_mem_total
-= size
;
1083 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
1085 (arena
->m_source_free
)(arena
->m_source_arena
, addr
, size
);
1090 /* insert onto freelist */
1091 rtl_arena_freelist_insert (arena
, segment
);
1094 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
1098 /* free to quantum cache(s) */
1099 int index
= (size
>> arena
->m_quantum_shift
) - 1;
1100 assert(arena
->m_qcache_ptr
[index
] != 0);
1102 rtl_cache_free (arena
->m_qcache_ptr
[index
], addr
);
1107 /* ================================================================= *
1109 * machdep internals.
1111 * ================================================================= */
1113 #if defined(SAL_UNX)
1114 #include <sys/mman.h>
1115 #elif defined(SAL_W32)
1116 #define MAP_FAILED 0
1117 #endif /* SAL_UNX || SAL_W32 */
1122 /** rtl_machdep_alloc()
1125 SAL_CALL
rtl_machdep_alloc (
1126 rtl_arena_type
* pArena
,
1131 sal_Size size
= (*pSize
);
1133 assert(pArena
== gp_machdep_arena
);
1135 #if defined(SOLARIS) && defined(SPARC)
1136 /* see @ mmap(2) man pages */
1137 size
+= (pArena
->m_quantum
+ pArena
->m_quantum
); /* "red-zone" pages */
1138 if (size
> (4 << 20))
1139 size
= RTL_MEMORY_P2ROUNDUP(size
, (4 << 20));
1140 else if (size
> (512 << 10))
1141 size
= RTL_MEMORY_P2ROUNDUP(size
, (512 << 10));
1143 size
= RTL_MEMORY_P2ROUNDUP(size
, (64 << 10));
1144 size
-= (pArena
->m_quantum
+ pArena
->m_quantum
); /* "red-zone" pages */
1146 /* default allocation granularity */
1147 if(pArena
->m_quantum
< (64 << 10))
1149 size
= RTL_MEMORY_P2ROUNDUP(size
, (64 << 10));
1153 size
= RTL_MEMORY_P2ROUNDUP(size
, pArena
->m_quantum
);
1157 #if defined(SAL_UNX)
1158 addr
= mmap (NULL
, (size_t)(size
), PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
1159 #elif defined(SAL_W32)
1160 addr
= VirtualAlloc (NULL
, (SIZE_T
)(size
), MEM_COMMIT
, PAGE_READWRITE
);
1161 #endif /* (SAL_UNX || SAL_W32) */
1163 if (addr
!= MAP_FAILED
)
1165 pArena
->m_stats
.m_alloc
+= 1;
1166 pArena
->m_stats
.m_mem_total
+= size
;
1167 pArena
->m_stats
.m_mem_alloc
+= size
;
1175 /** rtl_machdep_free()
1178 SAL_CALL
rtl_machdep_free (
1179 rtl_arena_type
* pArena
,
1184 assert(pArena
== gp_machdep_arena
);
1186 pArena
->m_stats
.m_free
+= 1;
1187 pArena
->m_stats
.m_mem_total
-= nSize
;
1188 pArena
->m_stats
.m_mem_alloc
-= nSize
;
1190 #if defined(SAL_UNX)
1191 (void) munmap(pAddr
, nSize
);
1192 #elif defined(SAL_W32)
1193 (void) VirtualFree ((LPVOID
)(pAddr
), (SIZE_T
)(0), MEM_RELEASE
);
1194 #endif /* (SAL_UNX || SAL_W32) */
1199 rtl_machdep_pagesize()
1201 #if defined(SAL_UNX)
1202 #if defined(FREEBSD) || defined(NETBSD) || defined(DRAGONFLY)
1203 return ((sal_Size
)getpagesize());
1205 return ((sal_Size
)sysconf(_SC_PAGESIZE
));
1206 #endif /* xBSD || POSIX */
1207 #elif defined(SAL_W32)
1209 GetSystemInfo (&info
);
1210 return ((sal_Size
)(info
.dwPageSize
));
1211 #endif /* (SAL_UNX || SAL_W32) */
1216 /* ================================================================= *
1218 * arena initialization.
1220 * ================================================================= */
1226 /* list of arenas */
1227 RTL_MEMORY_LOCK_INIT(&(g_arena_list
.m_lock
));
1228 rtl_arena_constructor (&(g_arena_list
.m_arena_head
));
1231 /* machdep (pseudo) arena */
1232 static rtl_arena_type g_machdep_arena
;
1234 assert(gp_machdep_arena
== 0);
1235 rtl_arena_constructor (&g_machdep_arena
);
1237 gp_machdep_arena
= rtl_arena_activate (
1239 "rtl_machdep_arena",
1240 rtl_machdep_pagesize(),
1241 0, /* no quantum caching */
1242 0, 0, 0 /* no source */
1244 assert(gp_machdep_arena
!= 0);
1248 static rtl_arena_type g_default_arena
;
1250 assert(gp_default_arena
== 0);
1251 rtl_arena_constructor (&g_default_arena
);
1253 gp_default_arena
= rtl_arena_activate (
1255 "rtl_default_arena",
1256 rtl_machdep_pagesize(),
1257 0, /* no quantum caching */
1258 gp_machdep_arena
, /* source */
1262 assert(gp_default_arena
!= 0);
1265 /* arena internal arena */
1266 static rtl_arena_type g_arena_arena
;
1268 assert(gp_arena_arena
== 0);
1269 rtl_arena_constructor (&g_arena_arena
);
1271 gp_arena_arena
= rtl_arena_activate (
1273 "rtl_arena_internal_arena",
1275 0, /* no quantum caching */
1276 gp_default_arena
, /* source */
1280 assert(gp_arena_arena
!= 0);
1282 // SAL_INFO("sal.rtl", "rtl_arena_init completed");
1285 /* ================================================================= */
1290 if (gp_arena_arena
!= 0)
1292 rtl_arena_type
* arena
, * head
;
1294 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list
.m_lock
));
1295 head
= &(g_arena_list
.m_arena_head
);
1297 for (arena
= head
->m_arena_next
; arena
!= head
; arena
= arena
->m_arena_next
)
1301 // "rtl_arena_fini(" << arena->m_name << "): allocs: "
1302 // << arena->m_stats.m_alloc << ", frees: "
1303 // << arena->m_stats.m_free << "; total: "
1304 // << arena->m_stats.m_mem_total << ", used: "
1305 // << arena->m_stats.m_mem_alloc);
1307 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list
.m_lock
));
1309 // SAL_INFO("sal.rtl", "rtl_arena_fini completed");
1312 /* ================================================================= */
1314 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */