1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
3 * This file is part of the LibreOffice project.
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 * This file incorporates work covered by the following license notice:
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
20 #include "alloc_arena.hxx"
22 #include "alloc_impl.hxx"
23 #include "internal/rtllifecycle.h"
24 #include "sal/macros.h"
25 #include "osl/diagnose.h"
31 /* ================================================================= *
35 * ================================================================= */
40 struct rtl_arena_list_st
42 rtl_memory_lock_type m_lock
;
43 rtl_arena_type m_arena_head
;
46 static rtl_arena_list_st g_arena_list
;
49 * provided for arena_type allocations, and hash_table resizing.
53 static rtl_arena_type
* gp_arena_arena
= 0;
57 * Low level virtual memory (pseudo) arena
58 * (platform dependent implementation)
62 static rtl_arena_type
* gp_machdep_arena
= 0;
66 rtl_arena_type
* gp_default_arena
= 0;
72 SAL_CALL
rtl_machdep_alloc (
73 rtl_arena_type
* pArena
,
78 SAL_CALL
rtl_machdep_free (
79 rtl_arena_type
* pArena
,
85 rtl_machdep_pagesize();
87 /* ================================================================= */
89 /** rtl_arena_segment_constructor()
92 rtl_arena_segment_constructor (void * obj
)
94 rtl_arena_segment_type
* segment
= static_cast<rtl_arena_segment_type
*>(obj
);
96 QUEUE_START_NAMED(segment
, s
);
97 QUEUE_START_NAMED(segment
, f
);
102 /** rtl_arena_segment_destructor()
105 rtl_arena_segment_destructor (void * obj
)
107 rtl_arena_segment_type
* segment
= static_cast< rtl_arena_segment_type
* >(
109 assert(QUEUE_STARTED_NAMED(segment
, s
));
110 assert(QUEUE_STARTED_NAMED(segment
, f
));
111 (void) segment
; // avoid warnings
114 /* ================================================================= */
116 /** rtl_arena_segment_populate()
118 * @precond arena->m_lock acquired.
121 rtl_arena_segment_populate (
122 rtl_arena_type
* arena
125 rtl_arena_segment_type
*span
;
126 sal_Size size
= rtl_machdep_pagesize();
128 span
= static_cast< rtl_arena_segment_type
* >(
129 rtl_machdep_alloc(gp_machdep_arena
, &size
));
132 rtl_arena_segment_type
*first
, *last
, *head
;
133 sal_Size count
= size
/ sizeof(rtl_arena_segment_type
);
135 /* insert onto reserve span list */
136 QUEUE_INSERT_TAIL_NAMED(&(arena
->m_segment_reserve_span_head
), span
, s
);
137 QUEUE_START_NAMED(span
, f
);
138 span
->m_addr
= reinterpret_cast<sal_uIntPtr
>(span
);
140 span
->m_type
= RTL_ARENA_SEGMENT_TYPE_SPAN
;
142 /* insert onto reserve list */
143 head
= &(arena
->m_segment_reserve_head
);
144 for (first
= span
+ 1, last
= span
+ count
; first
< last
; ++first
)
146 QUEUE_INSERT_TAIL_NAMED(head
, first
, s
);
147 QUEUE_START_NAMED(first
, f
);
156 /** rtl_arena_segment_get()
158 * @precond arena->m_lock acquired.
159 * @precond (*ppSegment == 0)
162 rtl_arena_segment_get (
163 rtl_arena_type
* arena
,
164 rtl_arena_segment_type
** ppSegment
167 rtl_arena_segment_type
* head
;
169 assert(*ppSegment
== 0);
171 head
= &(arena
->m_segment_reserve_head
);
172 if ((head
->m_snext
!= head
) || rtl_arena_segment_populate (arena
))
174 (*ppSegment
) = head
->m_snext
;
175 QUEUE_REMOVE_NAMED((*ppSegment
), s
);
179 /** rtl_arena_segment_put()
181 * @precond arena->m_lock acquired.
182 * @postcond (*ppSegment == 0)
185 rtl_arena_segment_put (
186 rtl_arena_type
* arena
,
187 rtl_arena_segment_type
** ppSegment
190 rtl_arena_segment_type
* head
;
192 assert(QUEUE_STARTED_NAMED((*ppSegment
), s
));
193 assert(QUEUE_STARTED_NAMED((*ppSegment
), f
));
195 (*ppSegment
)->m_addr
= 0;
196 (*ppSegment
)->m_size
= 0;
198 assert((*ppSegment
)->m_type
!= RTL_ARENA_SEGMENT_TYPE_HEAD
);
199 (*ppSegment
)->m_type
= 0;
201 /* keep as reserve */
202 head
= &(arena
->m_segment_reserve_head
);
203 QUEUE_INSERT_HEAD_NAMED(head
, (*ppSegment
), s
);
209 /** rtl_arena_freelist_insert()
211 * @precond arena->m_lock acquired.
214 rtl_arena_freelist_insert (
215 rtl_arena_type
* arena
,
216 rtl_arena_segment_type
* segment
219 rtl_arena_segment_type
* head
;
221 head
= &(arena
->m_freelist_head
[highbit(segment
->m_size
) - 1]);
222 QUEUE_INSERT_TAIL_NAMED(head
, segment
, f
);
224 arena
->m_freelist_bitmap
|= head
->m_size
;
227 /** rtl_arena_freelist_remove()
229 * @precond arena->m_lock acquired.
232 rtl_arena_freelist_remove (
233 rtl_arena_type
* arena
,
234 rtl_arena_segment_type
* segment
237 if ((segment
->m_fnext
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
) &&
238 (segment
->m_fprev
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
) )
240 rtl_arena_segment_type
* head
;
242 head
= segment
->m_fprev
;
243 assert(arena
->m_freelist_bitmap
& head
->m_size
);
244 arena
->m_freelist_bitmap
^= head
->m_size
;
246 QUEUE_REMOVE_NAMED(segment
, f
);
249 /* ================================================================= */
251 /** RTL_ARENA_HASH_INDEX()
253 #define RTL_ARENA_HASH_INDEX_IMPL(a, s, q, m) \
254 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
256 #define RTL_ARENA_HASH_INDEX(arena, addr) \
257 RTL_ARENA_HASH_INDEX_IMPL((addr), (arena)->m_hash_shift, (arena)->m_quantum_shift, ((arena)->m_hash_size - 1))
259 /** rtl_arena_hash_rescale()
261 * @precond arena->m_lock released.
264 rtl_arena_hash_rescale (
265 rtl_arena_type
* arena
,
269 assert(new_size
!= 0);
271 rtl_arena_segment_type
** new_table
;
274 new_bytes
= new_size
* sizeof(rtl_arena_segment_type
*);
275 new_table
= static_cast<rtl_arena_segment_type
**>(rtl_arena_alloc (gp_arena_arena
, &new_bytes
));
279 rtl_arena_segment_type
** old_table
;
280 sal_Size old_size
, i
;
282 memset (new_table
, 0, new_bytes
);
284 RTL_MEMORY_LOCK_ACQUIRE(&(arena
->m_lock
));
286 old_table
= arena
->m_hash_table
;
287 old_size
= arena
->m_hash_size
;
291 // "rtl_arena_hash_rescale(" << arena->m_name << "): nseg: "
292 // << (arena->m_stats.m_alloc - arena->m_stats.m_free) << " (ave: "
293 // << ((arena->m_stats.m_alloc - arena->m_stats.m_free)
294 // >> arena->m_hash_shift)
295 // << "), frees: " << arena->m_stats.m_free << " [old_size: "
296 // << old_size << ", new_size: " << new_size << ']');
298 arena
->m_hash_table
= new_table
;
299 arena
->m_hash_size
= new_size
;
300 arena
->m_hash_shift
= highbit(arena
->m_hash_size
) - 1;
302 for (i
= 0; i
< old_size
; i
++)
304 rtl_arena_segment_type
* curr
= old_table
[i
];
307 rtl_arena_segment_type
* next
= curr
->m_fnext
;
308 rtl_arena_segment_type
** head
;
310 // coverity[negative_shift]
311 head
= &(arena
->m_hash_table
[RTL_ARENA_HASH_INDEX(arena
, curr
->m_addr
)]);
312 curr
->m_fnext
= (*head
);
320 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
322 if (old_table
!= arena
->m_hash_table_0
)
324 sal_Size old_bytes
= old_size
* sizeof(rtl_arena_segment_type
*);
325 rtl_arena_free (gp_arena_arena
, old_table
, old_bytes
);
330 /** rtl_arena_hash_insert()
331 * ...and update stats.
334 rtl_arena_hash_insert (
335 rtl_arena_type
* arena
,
336 rtl_arena_segment_type
* segment
339 rtl_arena_segment_type
** ppSegment
;
341 ppSegment
= &(arena
->m_hash_table
[RTL_ARENA_HASH_INDEX(arena
, segment
->m_addr
)]);
343 segment
->m_fnext
= (*ppSegment
);
344 (*ppSegment
) = segment
;
346 arena
->m_stats
.m_alloc
+= 1;
347 arena
->m_stats
.m_mem_alloc
+= segment
->m_size
;
350 /** rtl_arena_hash_remove()
351 * ...and update stats.
353 rtl_arena_segment_type
*
354 rtl_arena_hash_remove (
355 rtl_arena_type
* arena
,
360 rtl_arena_segment_type
*segment
, **segpp
;
361 sal_Size lookups
= 0;
363 segpp
= &(arena
->m_hash_table
[RTL_ARENA_HASH_INDEX(arena
, addr
)]);
364 while ((segment
= *segpp
) != 0)
366 if (segment
->m_addr
== addr
)
368 *segpp
= segment
->m_fnext
, segment
->m_fnext
= segment
->m_fprev
= segment
;
372 /* update lookup miss stats */
374 segpp
= &(segment
->m_fnext
);
377 assert(segment
!= 0); // bad free
380 assert(segment
->m_size
== size
);
381 (void) size
; // avoid warnings
383 arena
->m_stats
.m_free
+= 1;
384 arena
->m_stats
.m_mem_alloc
-= segment
->m_size
;
388 sal_Size nseg
= (sal_Size
)(arena
->m_stats
.m_alloc
- arena
->m_stats
.m_free
);
389 if (nseg
> 4 * arena
->m_hash_size
)
391 if (!(arena
->m_flags
& RTL_ARENA_FLAG_RESCALE
))
393 sal_Size ave
= nseg
>> arena
->m_hash_shift
;
395 sal_Size new_size
= arena
->m_hash_size
<< (highbit(ave
) - 1);
397 arena
->m_flags
|= RTL_ARENA_FLAG_RESCALE
;
398 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
399 rtl_arena_hash_rescale (arena
, new_size
);
400 RTL_MEMORY_LOCK_ACQUIRE(&(arena
->m_lock
));
401 arena
->m_flags
&= ~RTL_ARENA_FLAG_RESCALE
;
410 /* ================================================================= */
412 /** rtl_arena_segment_alloc()
413 * allocate (and remove) segment from freelist
415 * @precond arena->m_lock acquired
416 * @precond (*ppSegment == 0)
419 rtl_arena_segment_alloc (
420 rtl_arena_type
* arena
,
422 rtl_arena_segment_type
** ppSegment
427 assert(*ppSegment
== 0);
428 if (!RTL_MEMORY_ISP2(size
))
430 int msb
= highbit(size
);
431 if (RTL_ARENA_FREELIST_SIZE
== sal::static_int_cast
< size_t >(msb
))
433 /* highest possible freelist: fall back to first fit */
434 rtl_arena_segment_type
*head
, *segment
;
436 head
= &(arena
->m_freelist_head
[msb
- 1]);
437 for (segment
= head
->m_fnext
; segment
!= head
; segment
= segment
->m_fnext
)
439 if (segment
->m_size
>= size
)
441 /* allocate first fit segment */
442 (*ppSegment
) = segment
;
446 goto dequeue_and_leave
;
449 /* roundup to next power of 2 */
450 size
= (((sal_Size
)1) << msb
);
453 index
= lowbit(RTL_MEMORY_P2ALIGN(arena
->m_freelist_bitmap
, size
));
456 /* instant fit: allocate first free segment */
457 rtl_arena_segment_type
*head
;
459 head
= &(arena
->m_freelist_head
[index
- 1]);
460 (*ppSegment
) = head
->m_fnext
;
461 assert((*ppSegment
) != head
);
467 /* remove from freelist */
468 rtl_arena_freelist_remove (arena
, (*ppSegment
));
470 return (*ppSegment
!= 0);
473 /** rtl_arena_segment_create()
474 * import new (span) segment from source arena
476 * @precond arena->m_lock acquired
477 * @precond (*ppSegment == 0)
480 rtl_arena_segment_create (
481 rtl_arena_type
* arena
,
483 rtl_arena_segment_type
** ppSegment
486 assert((*ppSegment
) == 0);
487 if (arena
->m_source_alloc
!= 0)
489 rtl_arena_segment_get (arena
, ppSegment
);
492 rtl_arena_segment_type
* span
= 0;
493 rtl_arena_segment_get (arena
, &span
);
496 /* import new span from source arena */
497 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
500 span
->m_addr
= reinterpret_cast<sal_uIntPtr
>(
501 (arena
->m_source_alloc
)(
502 arena
->m_source_arena
, &(span
->m_size
)));
504 RTL_MEMORY_LOCK_ACQUIRE(&(arena
->m_lock
));
505 if (span
->m_addr
!= 0)
507 /* insert onto segment list, update stats */
508 span
->m_type
= RTL_ARENA_SEGMENT_TYPE_SPAN
;
509 QUEUE_INSERT_HEAD_NAMED(&(arena
->m_segment_head
), span
, s
);
510 arena
->m_stats
.m_mem_total
+= span
->m_size
;
512 (*ppSegment
)->m_addr
= span
->m_addr
;
513 (*ppSegment
)->m_size
= span
->m_size
;
514 (*ppSegment
)->m_type
= RTL_ARENA_SEGMENT_TYPE_FREE
;
515 QUEUE_INSERT_HEAD_NAMED(span
, (*ppSegment
), s
);
520 rtl_arena_segment_put (arena
, &span
);
522 rtl_arena_segment_put (arena
, ppSegment
);
528 /** rtl_arena_segment_coalesce()
529 * mark as free and join with adjacent free segment(s)
531 * @precond arena->m_lock acquired
532 * @precond segment marked 'used'
535 rtl_arena_segment_coalesce (
536 rtl_arena_type
* arena
,
537 rtl_arena_segment_type
* segment
540 rtl_arena_segment_type
*next
, *prev
;
542 /* mark segment free */
543 assert(segment
->m_type
== RTL_ARENA_SEGMENT_TYPE_USED
);
544 segment
->m_type
= RTL_ARENA_SEGMENT_TYPE_FREE
;
546 /* try to merge w/ next segment */
547 next
= segment
->m_snext
;
548 if (next
->m_type
== RTL_ARENA_SEGMENT_TYPE_FREE
)
550 assert(segment
->m_addr
+ segment
->m_size
== next
->m_addr
);
551 segment
->m_size
+= next
->m_size
;
553 /* remove from freelist */
554 rtl_arena_freelist_remove (arena
, next
);
556 /* remove from segment list */
557 QUEUE_REMOVE_NAMED(next
, s
);
559 /* release segment descriptor */
560 rtl_arena_segment_put (arena
, &next
);
563 /* try to merge w/ prev segment */
564 prev
= segment
->m_sprev
;
565 if (prev
->m_type
== RTL_ARENA_SEGMENT_TYPE_FREE
)
567 assert(prev
->m_addr
+ prev
->m_size
== segment
->m_addr
);
568 segment
->m_addr
= prev
->m_addr
;
569 segment
->m_size
+= prev
->m_size
;
571 /* remove from freelist */
572 rtl_arena_freelist_remove (arena
, prev
);
574 /* remove from segment list */
575 QUEUE_REMOVE_NAMED(prev
, s
);
577 /* release segment descriptor */
578 rtl_arena_segment_put (arena
, &prev
);
582 /* ================================================================= */
584 /** rtl_arena_constructor()
587 rtl_arena_constructor (void * obj
)
589 rtl_arena_type
* arena
= static_cast<rtl_arena_type
*>(obj
);
590 rtl_arena_segment_type
* head
;
593 memset (arena
, 0, sizeof(rtl_arena_type
));
595 QUEUE_START_NAMED(arena
, arena_
);
597 (void) RTL_MEMORY_LOCK_INIT(&(arena
->m_lock
));
599 head
= &(arena
->m_segment_reserve_span_head
);
600 rtl_arena_segment_constructor (head
);
601 head
->m_type
= RTL_ARENA_SEGMENT_TYPE_HEAD
;
603 head
= &(arena
->m_segment_reserve_head
);
604 rtl_arena_segment_constructor (head
);
605 head
->m_type
= RTL_ARENA_SEGMENT_TYPE_HEAD
;
607 head
= &(arena
->m_segment_head
);
608 rtl_arena_segment_constructor (head
);
609 head
->m_type
= RTL_ARENA_SEGMENT_TYPE_HEAD
;
611 for (i
= 0; i
< RTL_ARENA_FREELIST_SIZE
; i
++)
613 head
= &(arena
->m_freelist_head
[i
]);
614 rtl_arena_segment_constructor (head
);
616 head
->m_size
= (((sal_Size
)1) << i
);
617 head
->m_type
= RTL_ARENA_SEGMENT_TYPE_HEAD
;
620 arena
->m_hash_table
= arena
->m_hash_table_0
;
621 arena
->m_hash_size
= RTL_ARENA_HASH_SIZE
;
622 arena
->m_hash_shift
= highbit(arena
->m_hash_size
) - 1;
625 /** rtl_arena_destructor()
628 rtl_arena_destructor (void * obj
)
630 rtl_arena_type
* arena
= static_cast<rtl_arena_type
*>(obj
);
631 rtl_arena_segment_type
* head
;
634 assert(QUEUE_STARTED_NAMED(arena
, arena_
));
636 RTL_MEMORY_LOCK_DESTROY(&(arena
->m_lock
));
638 head
= &(arena
->m_segment_reserve_span_head
);
639 assert(head
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
);
640 rtl_arena_segment_destructor (head
);
642 head
= &(arena
->m_segment_reserve_head
);
643 assert(head
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
);
644 rtl_arena_segment_destructor (head
);
646 head
= &(arena
->m_segment_head
);
647 assert(head
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
);
648 rtl_arena_segment_destructor (head
);
650 for (i
= 0; i
< RTL_ARENA_FREELIST_SIZE
; i
++)
652 head
= &(arena
->m_freelist_head
[i
]);
654 assert(head
->m_size
== (((sal_Size
)1) << i
));
655 assert(head
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
);
657 rtl_arena_segment_destructor (head
);
660 assert(arena
->m_hash_table
== arena
->m_hash_table_0
);
661 assert(arena
->m_hash_size
== RTL_ARENA_HASH_SIZE
);
663 arena
->m_hash_shift
==
664 sal::static_int_cast
< unsigned >(highbit(arena
->m_hash_size
) - 1));
667 /* ================================================================= */
669 /** rtl_arena_activate()
673 rtl_arena_type
* arena
,
676 sal_Size quantum_cache_max
,
677 rtl_arena_type
* source_arena
,
678 void * (SAL_CALL
* source_alloc
)(rtl_arena_type
*, sal_Size
*),
679 void (SAL_CALL
* source_free
) (rtl_arena_type
*, void *, sal_Size
)
685 (void) snprintf (arena
->m_name
, sizeof(arena
->m_name
), "%s", name
);
687 if (!RTL_MEMORY_ISP2(quantum
))
689 /* roundup to next power of 2 */
690 quantum
= (((sal_Size
)1) << highbit(quantum
));
692 quantum_cache_max
= RTL_MEMORY_P2ROUNDUP(quantum_cache_max
, quantum
);
694 arena
->m_quantum
= quantum
;
695 arena
->m_quantum_shift
= highbit(arena
->m_quantum
) - 1;
696 arena
->m_qcache_max
= quantum_cache_max
;
698 arena
->m_source_arena
= source_arena
;
699 arena
->m_source_alloc
= source_alloc
;
700 arena
->m_source_free
= source_free
;
702 if (arena
->m_qcache_max
> 0)
704 char namebuf
[RTL_ARENA_NAME_LENGTH
+ 1];
705 int i
, n
= (arena
->m_qcache_max
>> arena
->m_quantum_shift
);
707 sal_Size size
= n
* sizeof(rtl_cache_type
*);
708 arena
->m_qcache_ptr
= static_cast<rtl_cache_type
**>(rtl_arena_alloc (gp_arena_arena
, &size
));
709 if (!(arena
->m_qcache_ptr
))
714 for (i
= 1; i
<= n
; i
++)
716 size
= i
* arena
->m_quantum
;
717 (void) snprintf (namebuf
, sizeof(namebuf
), "%s_%" SAL_PRIuUINTPTR
, arena
->m_name
, size
);
718 arena
->m_qcache_ptr
[i
- 1] = rtl_cache_create(namebuf
, size
, 0, NULL
, NULL
, NULL
, NULL
, arena
, RTL_CACHE_FLAG_QUANTUMCACHE
);
722 /* insert into arena list */
723 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list
.m_lock
));
724 QUEUE_INSERT_TAIL_NAMED(&(g_arena_list
.m_arena_head
), arena
, arena_
);
725 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list
.m_lock
));
730 /** rtl_arena_deactivate()
733 rtl_arena_deactivate (
734 rtl_arena_type
* arena
737 rtl_arena_segment_type
* head
, * segment
;
739 /* remove from arena list */
740 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list
.m_lock
));
741 QUEUE_REMOVE_NAMED(arena
, arena_
);
742 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list
.m_lock
));
744 /* cleanup quantum cache(s) */
745 if ((arena
->m_qcache_max
> 0) && (arena
->m_qcache_ptr
!= 0))
747 int i
, n
= (arena
->m_qcache_max
>> arena
->m_quantum_shift
);
748 for (i
= 1; i
<= n
; i
++)
750 if (arena
->m_qcache_ptr
[i
- 1] != 0)
752 rtl_cache_destroy (arena
->m_qcache_ptr
[i
- 1]);
753 arena
->m_qcache_ptr
[i
- 1] = 0;
759 n
* sizeof(rtl_cache_type
*));
761 arena
->m_qcache_ptr
= 0;
764 /* check for leaked segments */
767 // "rtl_arena_deactivate(" << arena->m_name << "): allocs: "
768 // << arena->m_stats.m_alloc << ", frees: " << arena->m_stats.m_free
769 // << "; total: " << arena->m_stats.m_mem_total << ", used: "
770 // << arena->m_stats.m_mem_alloc);
771 if (arena
->m_stats
.m_alloc
> arena
->m_stats
.m_free
)
777 // "rtl_arena_deactivate(" << arena->m_name << "): cleaning up "
778 // << (arena->m_stats.m_alloc - arena->m_stats.m_free)
779 // << " leaked segment(s) [" << arena->m_stats.m_mem_alloc
782 /* cleanup still used segment(s) */
783 for (i
= 0, n
= arena
->m_hash_size
; i
< n
; i
++)
785 while ((segment
= arena
->m_hash_table
[i
]) != 0)
787 /* pop from hash table */
788 arena
->m_hash_table
[i
] = segment
->m_fnext
, segment
->m_fnext
= segment
->m_fprev
= segment
;
790 /* coalesce w/ adjacent free segment(s) */
791 rtl_arena_segment_coalesce (arena
, segment
);
793 /* insert onto freelist */
794 rtl_arena_freelist_insert (arena
, segment
);
799 /* cleanup hash table */
800 if (arena
->m_hash_table
!= arena
->m_hash_table_0
)
805 arena
->m_hash_size
* sizeof(rtl_arena_segment_type
*));
807 arena
->m_hash_table
= arena
->m_hash_table_0
;
808 arena
->m_hash_size
= RTL_ARENA_HASH_SIZE
;
809 arena
->m_hash_shift
= highbit(arena
->m_hash_size
) - 1;
812 /* cleanup segment list */
813 head
= &(arena
->m_segment_head
);
814 for (segment
= head
->m_snext
; segment
!= head
; segment
= head
->m_snext
)
816 if (segment
->m_type
== RTL_ARENA_SEGMENT_TYPE_FREE
)
818 /* remove from freelist */
819 rtl_arena_freelist_remove (arena
, segment
);
823 /* can have only free and span segments here */
824 assert(segment
->m_type
== RTL_ARENA_SEGMENT_TYPE_SPAN
);
827 /* remove from segment list */
828 QUEUE_REMOVE_NAMED(segment
, s
);
830 /* release segment descriptor */
831 rtl_arena_segment_put (arena
, &segment
);
834 /* cleanup segment reserve list */
835 head
= &(arena
->m_segment_reserve_head
);
836 for (segment
= head
->m_snext
; segment
!= head
; segment
= head
->m_snext
)
838 /* remove from segment list */
839 QUEUE_REMOVE_NAMED(segment
, s
);
842 /* cleanup segment reserve span(s) */
843 head
= &(arena
->m_segment_reserve_span_head
);
844 for (segment
= head
->m_snext
; segment
!= head
; segment
= head
->m_snext
)
846 /* can have only span segments here */
847 assert(segment
->m_type
== RTL_ARENA_SEGMENT_TYPE_SPAN
);
849 /* remove from segment list */
850 QUEUE_REMOVE_NAMED(segment
, s
);
852 /* return span to g_machdep_arena */
853 rtl_machdep_free (gp_machdep_arena
, reinterpret_cast<void*>(segment
->m_addr
), segment
->m_size
);
858 /* ================================================================= *
860 * arena implementation.
862 * ================================================================= */
864 /** rtl_arena_create()
867 SAL_CALL
rtl_arena_create (
870 sal_Size quantum_cache_max
,
871 rtl_arena_type
* source_arena
,
872 void * (SAL_CALL
* source_alloc
)(rtl_arena_type
*, sal_Size
*),
873 void (SAL_CALL
* source_free
) (rtl_arena_type
*, void *, sal_Size
),
874 SAL_UNUSED_PARAMETER
int
875 ) SAL_THROW_EXTERN_C()
877 rtl_arena_type
* result
= 0;
878 sal_Size size
= sizeof(rtl_arena_type
);
881 result
= static_cast<rtl_arena_type
*>(rtl_arena_alloc (gp_arena_arena
, &size
));
884 rtl_arena_type
* arena
= result
;
885 rtl_arena_constructor (arena
);
889 assert(gp_default_arena
!= 0);
890 source_arena
= gp_default_arena
;
893 result
= rtl_arena_activate (
905 rtl_arena_deactivate (arena
);
906 rtl_arena_destructor (arena
);
907 rtl_arena_free (gp_arena_arena
, arena
, size
);
910 else if (gp_arena_arena
== 0)
912 ensureArenaSingleton();
922 /** rtl_arena_destroy()
925 SAL_CALL
rtl_arena_destroy (
926 rtl_arena_type
* arena
927 ) SAL_THROW_EXTERN_C()
931 rtl_arena_deactivate (arena
);
932 rtl_arena_destructor (arena
);
933 rtl_arena_free (gp_arena_arena
, arena
, sizeof(rtl_arena_type
));
937 /** rtl_arena_alloc()
940 SAL_CALL
rtl_arena_alloc (
941 rtl_arena_type
* arena
,
943 ) SAL_THROW_EXTERN_C()
947 if ((arena
!= 0) && (pSize
!= 0))
951 size
= RTL_MEMORY_ALIGN((*pSize
), arena
->m_quantum
);
952 if (size
> arena
->m_qcache_max
)
954 /* allocate from segment list */
955 rtl_arena_segment_type
*segment
= 0;
957 RTL_MEMORY_LOCK_ACQUIRE(&(arena
->m_lock
));
958 if (rtl_arena_segment_alloc (arena
, size
, &segment
) ||
959 rtl_arena_segment_create(arena
, size
, &segment
) )
964 /* mark segment used */
965 assert(segment
->m_type
== RTL_ARENA_SEGMENT_TYPE_FREE
);
966 segment
->m_type
= RTL_ARENA_SEGMENT_TYPE_USED
;
969 assert(segment
->m_size
>= size
);
970 oversize
= segment
->m_size
- size
;
971 if ((oversize
>= arena
->m_quantum
) && (oversize
>= arena
->m_qcache_max
))
973 rtl_arena_segment_type
* remainder
= 0;
974 rtl_arena_segment_get (arena
, &remainder
);
977 segment
->m_size
= size
;
979 remainder
->m_addr
= segment
->m_addr
+ segment
->m_size
;
980 remainder
->m_size
= oversize
;
981 remainder
->m_type
= RTL_ARENA_SEGMENT_TYPE_FREE
;
982 QUEUE_INSERT_HEAD_NAMED(segment
, remainder
, s
);
984 rtl_arena_freelist_insert (arena
, remainder
);
988 rtl_arena_hash_insert (arena
, segment
);
990 (*pSize
) = segment
->m_size
;
991 addr
= reinterpret_cast<void*>(segment
->m_addr
);
993 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
997 /* allocate from quantum cache(s) */
998 int index
= (size
>> arena
->m_quantum_shift
) - 1;
999 assert(arena
->m_qcache_ptr
[index
] != 0);
1001 addr
= rtl_cache_alloc (arena
->m_qcache_ptr
[index
]);
1009 /** rtl_arena_free()
1012 SAL_CALL
rtl_arena_free (
1013 rtl_arena_type
* arena
,
1016 ) SAL_THROW_EXTERN_C()
1020 size
= RTL_MEMORY_ALIGN(size
, arena
->m_quantum
);
1021 if (size
> arena
->m_qcache_max
)
1023 /* free to segment list */
1024 rtl_arena_segment_type
* segment
;
1026 RTL_MEMORY_LOCK_ACQUIRE(&(arena
->m_lock
));
1028 segment
= rtl_arena_hash_remove (arena
, reinterpret_cast<sal_uIntPtr
>(addr
), size
);
1031 rtl_arena_segment_type
*next
, *prev
;
1033 /* coalesce w/ adjacent free segment(s) */
1034 rtl_arena_segment_coalesce (arena
, segment
);
1036 /* determine (new) next and prev segment */
1037 next
= segment
->m_snext
, prev
= segment
->m_sprev
;
1039 /* entire span free when prev is a span, and next is either a span or a list head */
1040 if (((prev
->m_type
== RTL_ARENA_SEGMENT_TYPE_SPAN
)) &&
1041 ((next
->m_type
== RTL_ARENA_SEGMENT_TYPE_SPAN
) ||
1042 (next
->m_type
== RTL_ARENA_SEGMENT_TYPE_HEAD
)) )
1045 prev
->m_addr
== segment
->m_addr
1046 && prev
->m_size
== segment
->m_size
);
1048 if (arena
->m_source_free
)
1050 addr
= reinterpret_cast<void*>(prev
->m_addr
);
1051 size
= prev
->m_size
;
1053 /* remove from segment list */
1054 QUEUE_REMOVE_NAMED(segment
, s
);
1056 /* release segment descriptor */
1057 rtl_arena_segment_put (arena
, &segment
);
1059 /* remove from segment list */
1060 QUEUE_REMOVE_NAMED(prev
, s
);
1062 /* release (span) segment descriptor */
1063 rtl_arena_segment_put (arena
, &prev
);
1065 /* update stats, return span to source arena */
1066 arena
->m_stats
.m_mem_total
-= size
;
1067 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
1069 (arena
->m_source_free
)(arena
->m_source_arena
, addr
, size
);
1074 /* insert onto freelist */
1075 rtl_arena_freelist_insert (arena
, segment
);
1078 RTL_MEMORY_LOCK_RELEASE(&(arena
->m_lock
));
1082 /* free to quantum cache(s) */
1083 int index
= (size
>> arena
->m_quantum_shift
) - 1;
1084 assert(arena
->m_qcache_ptr
[index
] != 0);
1086 rtl_cache_free (arena
->m_qcache_ptr
[index
], addr
);
1091 /* ================================================================= *
1093 * machdep internals.
1095 * ================================================================= */
1097 #if defined(SAL_UNX)
1098 #include <sys/mman.h>
1099 #elif defined(SAL_W32)
1100 #define MAP_FAILED 0
1101 #endif /* SAL_UNX || SAL_W32 */
1106 /** rtl_machdep_alloc()
1109 SAL_CALL
rtl_machdep_alloc (
1110 rtl_arena_type
* pArena
,
1115 sal_Size size
= (*pSize
);
1117 assert(pArena
== gp_machdep_arena
);
1119 #if defined(SOLARIS) && defined(SPARC)
1120 /* see @ mmap(2) man pages */
1121 size
+= (pArena
->m_quantum
+ pArena
->m_quantum
); /* "red-zone" pages */
1122 if (size
> (4 << 20))
1123 size
= RTL_MEMORY_P2ROUNDUP(size
, (4 << 20));
1124 else if (size
> (512 << 10))
1125 size
= RTL_MEMORY_P2ROUNDUP(size
, (512 << 10));
1127 size
= RTL_MEMORY_P2ROUNDUP(size
, (64 << 10));
1128 size
-= (pArena
->m_quantum
+ pArena
->m_quantum
); /* "red-zone" pages */
1130 /* default allocation granularity */
1131 if(pArena
->m_quantum
< (64 << 10))
1133 size
= RTL_MEMORY_P2ROUNDUP(size
, (64 << 10));
1137 size
= RTL_MEMORY_P2ROUNDUP(size
, pArena
->m_quantum
);
1141 #if defined(SAL_UNX)
1142 addr
= mmap (NULL
, (size_t)(size
), PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
1143 #elif defined(SAL_W32)
1144 addr
= VirtualAlloc (NULL
, (SIZE_T
)(size
), MEM_COMMIT
, PAGE_READWRITE
);
1145 #endif /* (SAL_UNX || SAL_W32) */
1147 if (addr
!= MAP_FAILED
)
1149 pArena
->m_stats
.m_alloc
+= 1;
1150 pArena
->m_stats
.m_mem_total
+= size
;
1151 pArena
->m_stats
.m_mem_alloc
+= size
;
1159 /** rtl_machdep_free()
1162 SAL_CALL
rtl_machdep_free (
1163 rtl_arena_type
* pArena
,
1168 assert(pArena
== gp_machdep_arena
);
1170 pArena
->m_stats
.m_free
+= 1;
1171 pArena
->m_stats
.m_mem_total
-= nSize
;
1172 pArena
->m_stats
.m_mem_alloc
-= nSize
;
1174 #if defined(SAL_UNX)
1175 (void) munmap(pAddr
, nSize
);
1176 #elif defined(SAL_W32)
1177 (void) VirtualFree ((LPVOID
)(pAddr
), (SIZE_T
)(0), MEM_RELEASE
);
1178 #endif /* (SAL_UNX || SAL_W32) */
1182 rtl_machdep_pagesize()
1184 #if defined(SAL_UNX)
1185 #if defined(FREEBSD) || defined(NETBSD) || defined(DRAGONFLY)
1186 return (sal_Size
)getpagesize();
1188 return (sal_Size
)sysconf(_SC_PAGESIZE
);
1189 #endif /* xBSD || POSIX */
1190 #elif defined(SAL_W32)
1192 GetSystemInfo (&info
);
1193 return (sal_Size
)info
.dwPageSize
;
1194 #endif /* (SAL_UNX || SAL_W32) */
1199 /* ================================================================= *
1201 * arena initialization.
1203 * ================================================================= */
1209 /* list of arenas */
1210 RTL_MEMORY_LOCK_INIT(&(g_arena_list
.m_lock
));
1211 rtl_arena_constructor (&(g_arena_list
.m_arena_head
));
1214 /* machdep (pseudo) arena */
1215 static rtl_arena_type g_machdep_arena
;
1217 assert(gp_machdep_arena
== 0);
1218 rtl_arena_constructor (&g_machdep_arena
);
1220 gp_machdep_arena
= rtl_arena_activate (
1222 "rtl_machdep_arena",
1223 rtl_machdep_pagesize(),
1224 0, /* no quantum caching */
1225 0, 0, 0 /* no source */
1227 assert(gp_machdep_arena
!= 0);
1231 static rtl_arena_type g_default_arena
;
1233 assert(gp_default_arena
== 0);
1234 rtl_arena_constructor (&g_default_arena
);
1236 gp_default_arena
= rtl_arena_activate (
1238 "rtl_default_arena",
1239 rtl_machdep_pagesize(),
1240 0, /* no quantum caching */
1241 gp_machdep_arena
, /* source */
1245 assert(gp_default_arena
!= 0);
1248 /* arena internal arena */
1249 static rtl_arena_type g_arena_arena
;
1251 assert(gp_arena_arena
== 0);
1252 rtl_arena_constructor (&g_arena_arena
);
1254 gp_arena_arena
= rtl_arena_activate (
1256 "rtl_arena_internal_arena",
1258 0, /* no quantum caching */
1259 gp_default_arena
, /* source */
1263 assert(gp_arena_arena
!= 0);
1265 // SAL_INFO("sal.rtl", "rtl_arena_init completed");
1268 /* ================================================================= */
1273 if (gp_arena_arena
!= 0)
1275 rtl_arena_type
* arena
, * head
;
1277 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list
.m_lock
));
1278 head
= &(g_arena_list
.m_arena_head
);
1280 for (arena
= head
->m_arena_next
; arena
!= head
; arena
= arena
->m_arena_next
)
1284 // "rtl_arena_fini(" << arena->m_name << "): allocs: "
1285 // << arena->m_stats.m_alloc << ", frees: "
1286 // << arena->m_stats.m_free << "; total: "
1287 // << arena->m_stats.m_mem_total << ", used: "
1288 // << arena->m_stats.m_mem_alloc);
1290 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list
.m_lock
));
1292 // SAL_INFO("sal.rtl", "rtl_arena_fini completed");
1295 /* ================================================================= */
1297 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */