update credits
[LibreOffice.git] / sal / rtl / alloc_arena.cxx
blob329d013dfafbe5903ea1bdf799908327c670d0dc
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
2 /*
3 * This file is part of the LibreOffice project.
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 * This file incorporates work covered by the following license notice:
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
20 #include "alloc_arena.hxx"
22 #include "alloc_impl.hxx"
23 #include "internal/rtllifecycle.h"
24 #include "sal/macros.h"
25 #include "osl/diagnose.h"
27 #include <cassert>
28 #include <string.h>
29 #include <stdio.h>
31 /* ================================================================= *
33 * arena internals.
35 * ================================================================= */
37 /** g_arena_list
38 * @internal
40 struct rtl_arena_list_st
42 rtl_memory_lock_type m_lock;
43 rtl_arena_type m_arena_head;
46 static rtl_arena_list_st g_arena_list;
48 /** gp_arena_arena
49 * provided for arena_type allocations, and hash_table resizing.
51 * @internal
53 static rtl_arena_type * gp_arena_arena = 0;
55 /** gp_machdep_arena
57 * Low level virtual memory (pseudo) arena
58 * (platform dependent implementation)
60 * @internal
62 static rtl_arena_type * gp_machdep_arena = 0;
64 /** gp_default_arena
66 rtl_arena_type * gp_default_arena = 0;
68 namespace
71 void *
72 SAL_CALL rtl_machdep_alloc (
73 rtl_arena_type * pArena,
74 sal_Size * pSize
77 void
78 SAL_CALL rtl_machdep_free (
79 rtl_arena_type * pArena,
80 void * pAddr,
81 sal_Size nSize
84 sal_Size
85 rtl_machdep_pagesize();
87 /* ================================================================= */
89 /** rtl_arena_segment_constructor()
91 int
92 rtl_arena_segment_constructor (void * obj)
94 rtl_arena_segment_type * segment = static_cast<rtl_arena_segment_type*>(obj);
96 QUEUE_START_NAMED(segment, s);
97 QUEUE_START_NAMED(segment, f);
99 return 1;
102 /** rtl_arena_segment_destructor()
104 void
105 rtl_arena_segment_destructor (void * obj)
107 rtl_arena_segment_type * segment = static_cast< rtl_arena_segment_type * >(
108 obj);
109 assert(QUEUE_STARTED_NAMED(segment, s));
110 assert(QUEUE_STARTED_NAMED(segment, f));
111 (void) segment; // avoid warnings
114 /* ================================================================= */
116 /** rtl_arena_segment_populate()
118 * @precond arena->m_lock acquired.
120 bool
121 rtl_arena_segment_populate (
122 rtl_arena_type * arena
125 rtl_arena_segment_type *span;
126 sal_Size size = rtl_machdep_pagesize();
128 span = static_cast< rtl_arena_segment_type * >(
129 rtl_machdep_alloc(gp_machdep_arena, &size));
130 if (span != 0)
132 rtl_arena_segment_type *first, *last, *head;
133 sal_Size count = size / sizeof(rtl_arena_segment_type);
135 /* insert onto reserve span list */
136 QUEUE_INSERT_TAIL_NAMED(&(arena->m_segment_reserve_span_head), span, s);
137 QUEUE_START_NAMED(span, f);
138 span->m_addr = reinterpret_cast<sal_uIntPtr>(span);
139 span->m_size = size;
140 span->m_type = RTL_ARENA_SEGMENT_TYPE_SPAN;
142 /* insert onto reserve list */
143 head = &(arena->m_segment_reserve_head);
144 for (first = span + 1, last = span + count; first < last; ++first)
146 QUEUE_INSERT_TAIL_NAMED(head, first, s);
147 QUEUE_START_NAMED(first, f);
148 first->m_addr = 0;
149 first->m_size = 0;
150 first->m_type = 0;
153 return (span != 0);
156 /** rtl_arena_segment_get()
158 * @precond arena->m_lock acquired.
159 * @precond (*ppSegment == 0)
161 inline void
162 rtl_arena_segment_get (
163 rtl_arena_type * arena,
164 rtl_arena_segment_type ** ppSegment
167 rtl_arena_segment_type * head;
169 assert(*ppSegment == 0);
171 head = &(arena->m_segment_reserve_head);
172 if ((head->m_snext != head) || rtl_arena_segment_populate (arena))
174 (*ppSegment) = head->m_snext;
175 QUEUE_REMOVE_NAMED((*ppSegment), s);
179 /** rtl_arena_segment_put()
181 * @precond arena->m_lock acquired.
182 * @postcond (*ppSegment == 0)
184 inline void
185 rtl_arena_segment_put (
186 rtl_arena_type * arena,
187 rtl_arena_segment_type ** ppSegment
190 rtl_arena_segment_type * head;
192 assert(QUEUE_STARTED_NAMED((*ppSegment), s));
193 assert(QUEUE_STARTED_NAMED((*ppSegment), f));
195 (*ppSegment)->m_addr = 0;
196 (*ppSegment)->m_size = 0;
198 assert((*ppSegment)->m_type != RTL_ARENA_SEGMENT_TYPE_HEAD);
199 (*ppSegment)->m_type = 0;
201 /* keep as reserve */
202 head = &(arena->m_segment_reserve_head);
203 QUEUE_INSERT_HEAD_NAMED(head, (*ppSegment), s);
205 /* clear */
206 (*ppSegment) = 0;
209 /** rtl_arena_freelist_insert()
211 * @precond arena->m_lock acquired.
213 inline void
214 rtl_arena_freelist_insert (
215 rtl_arena_type * arena,
216 rtl_arena_segment_type * segment
219 rtl_arena_segment_type * head;
221 head = &(arena->m_freelist_head[highbit(segment->m_size) - 1]);
222 QUEUE_INSERT_TAIL_NAMED(head, segment, f);
224 arena->m_freelist_bitmap |= head->m_size;
227 /** rtl_arena_freelist_remove()
229 * @precond arena->m_lock acquired.
231 inline void
232 rtl_arena_freelist_remove (
233 rtl_arena_type * arena,
234 rtl_arena_segment_type * segment
237 if ((segment->m_fnext->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD) &&
238 (segment->m_fprev->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD) )
240 rtl_arena_segment_type * head;
242 head = segment->m_fprev;
243 assert(arena->m_freelist_bitmap & head->m_size);
244 arena->m_freelist_bitmap ^= head->m_size;
246 QUEUE_REMOVE_NAMED(segment, f);
249 /* ================================================================= */
251 /** RTL_ARENA_HASH_INDEX()
253 #define RTL_ARENA_HASH_INDEX_IMPL(a, s, q, m) \
254 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
256 #define RTL_ARENA_HASH_INDEX(arena, addr) \
257 RTL_ARENA_HASH_INDEX_IMPL((addr), (arena)->m_hash_shift, (arena)->m_quantum_shift, ((arena)->m_hash_size - 1))
259 /** rtl_arena_hash_rescale()
261 * @precond arena->m_lock released.
263 void
264 rtl_arena_hash_rescale (
265 rtl_arena_type * arena,
266 sal_Size new_size
269 assert(new_size != 0);
271 rtl_arena_segment_type ** new_table;
272 sal_Size new_bytes;
274 new_bytes = new_size * sizeof(rtl_arena_segment_type*);
275 new_table = static_cast<rtl_arena_segment_type **>(rtl_arena_alloc (gp_arena_arena, &new_bytes));
277 if (new_table != 0)
279 rtl_arena_segment_type ** old_table;
280 sal_Size old_size, i;
282 memset (new_table, 0, new_bytes);
284 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
286 old_table = arena->m_hash_table;
287 old_size = arena->m_hash_size;
289 // SAL_INFO(
290 // "sal.rtl",
291 // "rtl_arena_hash_rescale(" << arena->m_name << "): nseg: "
292 // << (arena->m_stats.m_alloc - arena->m_stats.m_free) << " (ave: "
293 // << ((arena->m_stats.m_alloc - arena->m_stats.m_free)
294 // >> arena->m_hash_shift)
295 // << "), frees: " << arena->m_stats.m_free << " [old_size: "
296 // << old_size << ", new_size: " << new_size << ']');
298 arena->m_hash_table = new_table;
299 arena->m_hash_size = new_size;
300 arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
302 for (i = 0; i < old_size; i++)
304 rtl_arena_segment_type * curr = old_table[i];
305 while (curr != 0)
307 rtl_arena_segment_type * next = curr->m_fnext;
308 rtl_arena_segment_type ** head;
310 // coverity[negative_shift]
311 head = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, curr->m_addr)]);
312 curr->m_fnext = (*head);
313 (*head) = curr;
315 curr = next;
317 old_table[i] = 0;
320 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
322 if (old_table != arena->m_hash_table_0)
324 sal_Size old_bytes = old_size * sizeof(rtl_arena_segment_type*);
325 rtl_arena_free (gp_arena_arena, old_table, old_bytes);
330 /** rtl_arena_hash_insert()
331 * ...and update stats.
333 inline void
334 rtl_arena_hash_insert (
335 rtl_arena_type * arena,
336 rtl_arena_segment_type * segment
339 rtl_arena_segment_type ** ppSegment;
341 ppSegment = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, segment->m_addr)]);
343 segment->m_fnext = (*ppSegment);
344 (*ppSegment) = segment;
346 arena->m_stats.m_alloc += 1;
347 arena->m_stats.m_mem_alloc += segment->m_size;
350 /** rtl_arena_hash_remove()
351 * ...and update stats.
353 rtl_arena_segment_type *
354 rtl_arena_hash_remove (
355 rtl_arena_type * arena,
356 sal_uIntPtr addr,
357 sal_Size size
360 rtl_arena_segment_type *segment, **segpp;
361 sal_Size lookups = 0;
363 segpp = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, addr)]);
364 while ((segment = *segpp) != 0)
366 if (segment->m_addr == addr)
368 *segpp = segment->m_fnext, segment->m_fnext = segment->m_fprev = segment;
369 break;
372 /* update lookup miss stats */
373 lookups += 1;
374 segpp = &(segment->m_fnext);
377 assert(segment != 0); // bad free
378 if (segment != 0)
380 assert(segment->m_size == size);
381 (void) size; // avoid warnings
383 arena->m_stats.m_free += 1;
384 arena->m_stats.m_mem_alloc -= segment->m_size;
386 if (lookups > 1)
388 sal_Size nseg = (sal_Size)(arena->m_stats.m_alloc - arena->m_stats.m_free);
389 if (nseg > 4 * arena->m_hash_size)
391 if (!(arena->m_flags & RTL_ARENA_FLAG_RESCALE))
393 sal_Size ave = nseg >> arena->m_hash_shift;
394 assert(ave != 0);
395 sal_Size new_size = arena->m_hash_size << (highbit(ave) - 1);
397 arena->m_flags |= RTL_ARENA_FLAG_RESCALE;
398 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
399 rtl_arena_hash_rescale (arena, new_size);
400 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
401 arena->m_flags &= ~RTL_ARENA_FLAG_RESCALE;
407 return segment;
410 /* ================================================================= */
412 /** rtl_arena_segment_alloc()
413 * allocate (and remove) segment from freelist
415 * @precond arena->m_lock acquired
416 * @precond (*ppSegment == 0)
418 bool
419 rtl_arena_segment_alloc (
420 rtl_arena_type * arena,
421 sal_Size size,
422 rtl_arena_segment_type ** ppSegment
425 int index = 0;
427 assert(*ppSegment == 0);
428 if (!RTL_MEMORY_ISP2(size))
430 int msb = highbit(size);
431 if (RTL_ARENA_FREELIST_SIZE == sal::static_int_cast< size_t >(msb))
433 /* highest possible freelist: fall back to first fit */
434 rtl_arena_segment_type *head, *segment;
436 head = &(arena->m_freelist_head[msb - 1]);
437 for (segment = head->m_fnext; segment != head; segment = segment->m_fnext)
439 if (segment->m_size >= size)
441 /* allocate first fit segment */
442 (*ppSegment) = segment;
443 break;
446 goto dequeue_and_leave;
449 /* roundup to next power of 2 */
450 size = (((sal_Size)1) << msb);
453 index = lowbit(RTL_MEMORY_P2ALIGN(arena->m_freelist_bitmap, size));
454 if (index > 0)
456 /* instant fit: allocate first free segment */
457 rtl_arena_segment_type *head;
459 head = &(arena->m_freelist_head[index - 1]);
460 (*ppSegment) = head->m_fnext;
461 assert((*ppSegment) != head);
464 dequeue_and_leave:
465 if (*ppSegment != 0)
467 /* remove from freelist */
468 rtl_arena_freelist_remove (arena, (*ppSegment));
470 return (*ppSegment != 0);
473 /** rtl_arena_segment_create()
474 * import new (span) segment from source arena
476 * @precond arena->m_lock acquired
477 * @precond (*ppSegment == 0)
480 rtl_arena_segment_create (
481 rtl_arena_type * arena,
482 sal_Size size,
483 rtl_arena_segment_type ** ppSegment
486 assert((*ppSegment) == 0);
487 if (arena->m_source_alloc != 0)
489 rtl_arena_segment_get (arena, ppSegment);
490 if (*ppSegment != 0)
492 rtl_arena_segment_type * span = 0;
493 rtl_arena_segment_get (arena, &span);
494 if (span != 0)
496 /* import new span from source arena */
497 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
499 span->m_size = size;
500 span->m_addr = reinterpret_cast<sal_uIntPtr>(
501 (arena->m_source_alloc)(
502 arena->m_source_arena, &(span->m_size)));
504 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
505 if (span->m_addr != 0)
507 /* insert onto segment list, update stats */
508 span->m_type = RTL_ARENA_SEGMENT_TYPE_SPAN;
509 QUEUE_INSERT_HEAD_NAMED(&(arena->m_segment_head), span, s);
510 arena->m_stats.m_mem_total += span->m_size;
512 (*ppSegment)->m_addr = span->m_addr;
513 (*ppSegment)->m_size = span->m_size;
514 (*ppSegment)->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
515 QUEUE_INSERT_HEAD_NAMED(span, (*ppSegment), s);
517 /* report success */
518 return 1;
520 rtl_arena_segment_put (arena, &span);
522 rtl_arena_segment_put (arena, ppSegment);
525 return 0;
528 /** rtl_arena_segment_coalesce()
529 * mark as free and join with adjacent free segment(s)
531 * @precond arena->m_lock acquired
532 * @precond segment marked 'used'
534 void
535 rtl_arena_segment_coalesce (
536 rtl_arena_type * arena,
537 rtl_arena_segment_type * segment
540 rtl_arena_segment_type *next, *prev;
542 /* mark segment free */
543 assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_USED);
544 segment->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
546 /* try to merge w/ next segment */
547 next = segment->m_snext;
548 if (next->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
550 assert(segment->m_addr + segment->m_size == next->m_addr);
551 segment->m_size += next->m_size;
553 /* remove from freelist */
554 rtl_arena_freelist_remove (arena, next);
556 /* remove from segment list */
557 QUEUE_REMOVE_NAMED(next, s);
559 /* release segment descriptor */
560 rtl_arena_segment_put (arena, &next);
563 /* try to merge w/ prev segment */
564 prev = segment->m_sprev;
565 if (prev->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
567 assert(prev->m_addr + prev->m_size == segment->m_addr);
568 segment->m_addr = prev->m_addr;
569 segment->m_size += prev->m_size;
571 /* remove from freelist */
572 rtl_arena_freelist_remove (arena, prev);
574 /* remove from segment list */
575 QUEUE_REMOVE_NAMED(prev, s);
577 /* release segment descriptor */
578 rtl_arena_segment_put (arena, &prev);
582 /* ================================================================= */
584 /** rtl_arena_constructor()
586 void
587 rtl_arena_constructor (void * obj)
589 rtl_arena_type * arena = static_cast<rtl_arena_type*>(obj);
590 rtl_arena_segment_type * head;
591 size_t i;
593 memset (arena, 0, sizeof(rtl_arena_type));
595 QUEUE_START_NAMED(arena, arena_);
597 (void) RTL_MEMORY_LOCK_INIT(&(arena->m_lock));
599 head = &(arena->m_segment_reserve_span_head);
600 rtl_arena_segment_constructor (head);
601 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
603 head = &(arena->m_segment_reserve_head);
604 rtl_arena_segment_constructor (head);
605 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
607 head = &(arena->m_segment_head);
608 rtl_arena_segment_constructor (head);
609 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
611 for (i = 0; i < RTL_ARENA_FREELIST_SIZE; i++)
613 head = &(arena->m_freelist_head[i]);
614 rtl_arena_segment_constructor (head);
616 head->m_size = (((sal_Size)1) << i);
617 head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
620 arena->m_hash_table = arena->m_hash_table_0;
621 arena->m_hash_size = RTL_ARENA_HASH_SIZE;
622 arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
625 /** rtl_arena_destructor()
627 void
628 rtl_arena_destructor (void * obj)
630 rtl_arena_type * arena = static_cast<rtl_arena_type*>(obj);
631 rtl_arena_segment_type * head;
632 size_t i;
634 assert(QUEUE_STARTED_NAMED(arena, arena_));
636 RTL_MEMORY_LOCK_DESTROY(&(arena->m_lock));
638 head = &(arena->m_segment_reserve_span_head);
639 assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
640 rtl_arena_segment_destructor (head);
642 head = &(arena->m_segment_reserve_head);
643 assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
644 rtl_arena_segment_destructor (head);
646 head = &(arena->m_segment_head);
647 assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
648 rtl_arena_segment_destructor (head);
650 for (i = 0; i < RTL_ARENA_FREELIST_SIZE; i++)
652 head = &(arena->m_freelist_head[i]);
654 assert(head->m_size == (((sal_Size)1) << i));
655 assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
657 rtl_arena_segment_destructor (head);
660 assert(arena->m_hash_table == arena->m_hash_table_0);
661 assert(arena->m_hash_size == RTL_ARENA_HASH_SIZE);
662 assert(
663 arena->m_hash_shift ==
664 sal::static_int_cast< unsigned >(highbit(arena->m_hash_size) - 1));
667 /* ================================================================= */
669 /** rtl_arena_activate()
671 rtl_arena_type *
672 rtl_arena_activate (
673 rtl_arena_type * arena,
674 const char * name,
675 sal_Size quantum,
676 sal_Size quantum_cache_max,
677 rtl_arena_type * source_arena,
678 void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *),
679 void (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size)
682 assert(arena != 0);
683 if (arena != 0)
685 (void) snprintf (arena->m_name, sizeof(arena->m_name), "%s", name);
687 if (!RTL_MEMORY_ISP2(quantum))
689 /* roundup to next power of 2 */
690 quantum = (((sal_Size)1) << highbit(quantum));
692 quantum_cache_max = RTL_MEMORY_P2ROUNDUP(quantum_cache_max, quantum);
694 arena->m_quantum = quantum;
695 arena->m_quantum_shift = highbit(arena->m_quantum) - 1;
696 arena->m_qcache_max = quantum_cache_max;
698 arena->m_source_arena = source_arena;
699 arena->m_source_alloc = source_alloc;
700 arena->m_source_free = source_free;
702 if (arena->m_qcache_max > 0)
704 char namebuf[RTL_ARENA_NAME_LENGTH + 1];
705 int i, n = (arena->m_qcache_max >> arena->m_quantum_shift);
707 sal_Size size = n * sizeof(rtl_cache_type*);
708 arena->m_qcache_ptr = static_cast<rtl_cache_type**>(rtl_arena_alloc (gp_arena_arena, &size));
709 if (!(arena->m_qcache_ptr))
711 /* out of memory */
712 return 0;
714 for (i = 1; i <= n; i++)
716 size = i * arena->m_quantum;
717 (void) snprintf (namebuf, sizeof(namebuf), "%s_%" SAL_PRIuUINTPTR, arena->m_name, size);
718 arena->m_qcache_ptr[i - 1] = rtl_cache_create(namebuf, size, 0, NULL, NULL, NULL, NULL, arena, RTL_CACHE_FLAG_QUANTUMCACHE);
722 /* insert into arena list */
723 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
724 QUEUE_INSERT_TAIL_NAMED(&(g_arena_list.m_arena_head), arena, arena_);
725 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
727 return arena;
730 /** rtl_arena_deactivate()
732 void
733 rtl_arena_deactivate (
734 rtl_arena_type * arena
737 rtl_arena_segment_type * head, * segment;
739 /* remove from arena list */
740 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
741 QUEUE_REMOVE_NAMED(arena, arena_);
742 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
744 /* cleanup quantum cache(s) */
745 if ((arena->m_qcache_max > 0) && (arena->m_qcache_ptr != 0))
747 int i, n = (arena->m_qcache_max >> arena->m_quantum_shift);
748 for (i = 1; i <= n; i++)
750 if (arena->m_qcache_ptr[i - 1] != 0)
752 rtl_cache_destroy (arena->m_qcache_ptr[i - 1]);
753 arena->m_qcache_ptr[i - 1] = 0;
756 rtl_arena_free (
757 gp_arena_arena,
758 arena->m_qcache_ptr,
759 n * sizeof(rtl_cache_type*));
761 arena->m_qcache_ptr = 0;
764 /* check for leaked segments */
765 // SAL_INFO(
766 // "sal.rtl",
767 // "rtl_arena_deactivate(" << arena->m_name << "): allocs: "
768 // << arena->m_stats.m_alloc << ", frees: " << arena->m_stats.m_free
769 // << "; total: " << arena->m_stats.m_mem_total << ", used: "
770 // << arena->m_stats.m_mem_alloc);
771 if (arena->m_stats.m_alloc > arena->m_stats.m_free)
773 sal_Size i, n;
775 // SAL_INFO(
776 // "sal.rtl",
777 // "rtl_arena_deactivate(" << arena->m_name << "): cleaning up "
778 // << (arena->m_stats.m_alloc - arena->m_stats.m_free)
779 // << " leaked segment(s) [" << arena->m_stats.m_mem_alloc
780 // << " bytes]");
782 /* cleanup still used segment(s) */
783 for (i = 0, n = arena->m_hash_size; i < n; i++)
785 while ((segment = arena->m_hash_table[i]) != 0)
787 /* pop from hash table */
788 arena->m_hash_table[i] = segment->m_fnext, segment->m_fnext = segment->m_fprev = segment;
790 /* coalesce w/ adjacent free segment(s) */
791 rtl_arena_segment_coalesce (arena, segment);
793 /* insert onto freelist */
794 rtl_arena_freelist_insert (arena, segment);
799 /* cleanup hash table */
800 if (arena->m_hash_table != arena->m_hash_table_0)
802 rtl_arena_free (
803 gp_arena_arena,
804 arena->m_hash_table,
805 arena->m_hash_size * sizeof(rtl_arena_segment_type*));
807 arena->m_hash_table = arena->m_hash_table_0;
808 arena->m_hash_size = RTL_ARENA_HASH_SIZE;
809 arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
812 /* cleanup segment list */
813 head = &(arena->m_segment_head);
814 for (segment = head->m_snext; segment != head; segment = head->m_snext)
816 if (segment->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
818 /* remove from freelist */
819 rtl_arena_freelist_remove (arena, segment);
821 else
823 /* can have only free and span segments here */
824 assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN);
827 /* remove from segment list */
828 QUEUE_REMOVE_NAMED(segment, s);
830 /* release segment descriptor */
831 rtl_arena_segment_put (arena, &segment);
834 /* cleanup segment reserve list */
835 head = &(arena->m_segment_reserve_head);
836 for (segment = head->m_snext; segment != head; segment = head->m_snext)
838 /* remove from segment list */
839 QUEUE_REMOVE_NAMED(segment, s);
842 /* cleanup segment reserve span(s) */
843 head = &(arena->m_segment_reserve_span_head);
844 for (segment = head->m_snext; segment != head; segment = head->m_snext)
846 /* can have only span segments here */
847 assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN);
849 /* remove from segment list */
850 QUEUE_REMOVE_NAMED(segment, s);
852 /* return span to g_machdep_arena */
853 rtl_machdep_free (gp_machdep_arena, reinterpret_cast<void*>(segment->m_addr), segment->m_size);
857 } //namespace
858 /* ================================================================= *
860 * arena implementation.
862 * ================================================================= */
864 /** rtl_arena_create()
866 rtl_arena_type *
867 SAL_CALL rtl_arena_create (
868 const char * name,
869 sal_Size quantum,
870 sal_Size quantum_cache_max,
871 rtl_arena_type * source_arena,
872 void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *),
873 void (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size),
874 SAL_UNUSED_PARAMETER int
875 ) SAL_THROW_EXTERN_C()
877 rtl_arena_type * result = 0;
878 sal_Size size = sizeof(rtl_arena_type);
880 try_alloc:
881 result = static_cast<rtl_arena_type*>(rtl_arena_alloc (gp_arena_arena, &size));
882 if (result != 0)
884 rtl_arena_type * arena = result;
885 rtl_arena_constructor (arena);
887 if (!source_arena)
889 assert(gp_default_arena != 0);
890 source_arena = gp_default_arena;
893 result = rtl_arena_activate (
894 arena,
895 name,
896 quantum,
897 quantum_cache_max,
898 source_arena,
899 source_alloc,
900 source_free
903 if (result == 0)
905 rtl_arena_deactivate (arena);
906 rtl_arena_destructor (arena);
907 rtl_arena_free (gp_arena_arena, arena, size);
910 else if (gp_arena_arena == 0)
912 ensureArenaSingleton();
913 if (gp_arena_arena)
915 /* try again */
916 goto try_alloc;
919 return result;
922 /** rtl_arena_destroy()
924 void
925 SAL_CALL rtl_arena_destroy (
926 rtl_arena_type * arena
927 ) SAL_THROW_EXTERN_C()
929 if (arena != 0)
931 rtl_arena_deactivate (arena);
932 rtl_arena_destructor (arena);
933 rtl_arena_free (gp_arena_arena, arena, sizeof(rtl_arena_type));
937 /** rtl_arena_alloc()
939 void *
940 SAL_CALL rtl_arena_alloc (
941 rtl_arena_type * arena,
942 sal_Size * pSize
943 ) SAL_THROW_EXTERN_C()
945 void * addr = 0;
947 if ((arena != 0) && (pSize != 0))
949 sal_Size size;
951 size = RTL_MEMORY_ALIGN((*pSize), arena->m_quantum);
952 if (size > arena->m_qcache_max)
954 /* allocate from segment list */
955 rtl_arena_segment_type *segment = 0;
957 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
958 if (rtl_arena_segment_alloc (arena, size, &segment) ||
959 rtl_arena_segment_create(arena, size, &segment) )
961 /* shrink to fit */
962 sal_Size oversize;
964 /* mark segment used */
965 assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_FREE);
966 segment->m_type = RTL_ARENA_SEGMENT_TYPE_USED;
968 /* resize */
969 assert(segment->m_size >= size);
970 oversize = segment->m_size - size;
971 if ((oversize >= arena->m_quantum) && (oversize >= arena->m_qcache_max))
973 rtl_arena_segment_type * remainder = 0;
974 rtl_arena_segment_get (arena, &remainder);
975 if (remainder != 0)
977 segment->m_size = size;
979 remainder->m_addr = segment->m_addr + segment->m_size;
980 remainder->m_size = oversize;
981 remainder->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
982 QUEUE_INSERT_HEAD_NAMED(segment, remainder, s);
984 rtl_arena_freelist_insert (arena, remainder);
988 rtl_arena_hash_insert (arena, segment);
990 (*pSize) = segment->m_size;
991 addr = reinterpret_cast<void*>(segment->m_addr);
993 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
995 else if (size > 0)
997 /* allocate from quantum cache(s) */
998 int index = (size >> arena->m_quantum_shift) - 1;
999 assert(arena->m_qcache_ptr[index] != 0);
1001 addr = rtl_cache_alloc (arena->m_qcache_ptr[index]);
1002 if (addr != 0)
1003 (*pSize) = size;
1006 return addr;
1009 /** rtl_arena_free()
1011 void
1012 SAL_CALL rtl_arena_free (
1013 rtl_arena_type * arena,
1014 void * addr,
1015 sal_Size size
1016 ) SAL_THROW_EXTERN_C()
1018 if (arena != 0)
1020 size = RTL_MEMORY_ALIGN(size, arena->m_quantum);
1021 if (size > arena->m_qcache_max)
1023 /* free to segment list */
1024 rtl_arena_segment_type * segment;
1026 RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
1028 segment = rtl_arena_hash_remove (arena, reinterpret_cast<sal_uIntPtr>(addr), size);
1029 if (segment != 0)
1031 rtl_arena_segment_type *next, *prev;
1033 /* coalesce w/ adjacent free segment(s) */
1034 rtl_arena_segment_coalesce (arena, segment);
1036 /* determine (new) next and prev segment */
1037 next = segment->m_snext, prev = segment->m_sprev;
1039 /* entire span free when prev is a span, and next is either a span or a list head */
1040 if (((prev->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN)) &&
1041 ((next->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN) ||
1042 (next->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD)) )
1044 assert(
1045 prev->m_addr == segment->m_addr
1046 && prev->m_size == segment->m_size);
1048 if (arena->m_source_free)
1050 addr = reinterpret_cast<void*>(prev->m_addr);
1051 size = prev->m_size;
1053 /* remove from segment list */
1054 QUEUE_REMOVE_NAMED(segment, s);
1056 /* release segment descriptor */
1057 rtl_arena_segment_put (arena, &segment);
1059 /* remove from segment list */
1060 QUEUE_REMOVE_NAMED(prev, s);
1062 /* release (span) segment descriptor */
1063 rtl_arena_segment_put (arena, &prev);
1065 /* update stats, return span to source arena */
1066 arena->m_stats.m_mem_total -= size;
1067 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
1069 (arena->m_source_free)(arena->m_source_arena, addr, size);
1070 return;
1074 /* insert onto freelist */
1075 rtl_arena_freelist_insert (arena, segment);
1078 RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
1080 else if (size > 0)
1082 /* free to quantum cache(s) */
1083 int index = (size >> arena->m_quantum_shift) - 1;
1084 assert(arena->m_qcache_ptr[index] != 0);
1086 rtl_cache_free (arena->m_qcache_ptr[index], addr);
1091 /* ================================================================= *
1093 * machdep internals.
1095 * ================================================================= */
1097 #if defined(SAL_UNX)
1098 #include <sys/mman.h>
1099 #elif defined(SAL_W32)
1100 #define MAP_FAILED 0
1101 #endif /* SAL_UNX || SAL_W32 */
1103 namespace
1106 /** rtl_machdep_alloc()
1108 void *
1109 SAL_CALL rtl_machdep_alloc (
1110 rtl_arena_type * pArena,
1111 sal_Size * pSize
1114 void * addr;
1115 sal_Size size = (*pSize);
1117 assert(pArena == gp_machdep_arena);
1119 #if defined(SOLARIS) && defined(SPARC)
1120 /* see @ mmap(2) man pages */
1121 size += (pArena->m_quantum + pArena->m_quantum); /* "red-zone" pages */
1122 if (size > (4 << 20))
1123 size = RTL_MEMORY_P2ROUNDUP(size, (4 << 20));
1124 else if (size > (512 << 10))
1125 size = RTL_MEMORY_P2ROUNDUP(size, (512 << 10));
1126 else
1127 size = RTL_MEMORY_P2ROUNDUP(size, (64 << 10));
1128 size -= (pArena->m_quantum + pArena->m_quantum); /* "red-zone" pages */
1129 #else
1130 /* default allocation granularity */
1131 if(pArena->m_quantum < (64 << 10))
1133 size = RTL_MEMORY_P2ROUNDUP(size, (64 << 10));
1135 else
1137 size = RTL_MEMORY_P2ROUNDUP(size, pArena->m_quantum);
1139 #endif
1141 #if defined(SAL_UNX)
1142 addr = mmap (NULL, (size_t)(size), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1143 #elif defined(SAL_W32)
1144 addr = VirtualAlloc (NULL, (SIZE_T)(size), MEM_COMMIT, PAGE_READWRITE);
1145 #endif /* (SAL_UNX || SAL_W32) */
1147 if (addr != MAP_FAILED)
1149 pArena->m_stats.m_alloc += 1;
1150 pArena->m_stats.m_mem_total += size;
1151 pArena->m_stats.m_mem_alloc += size;
1153 (*pSize) = size;
1154 return addr;
1156 return NULL;
1159 /** rtl_machdep_free()
1161 void
1162 SAL_CALL rtl_machdep_free (
1163 rtl_arena_type * pArena,
1164 void * pAddr,
1165 sal_Size nSize
1168 assert(pArena == gp_machdep_arena);
1170 pArena->m_stats.m_free += 1;
1171 pArena->m_stats.m_mem_total -= nSize;
1172 pArena->m_stats.m_mem_alloc -= nSize;
1174 #if defined(SAL_UNX)
1175 (void) munmap(pAddr, nSize);
1176 #elif defined(SAL_W32)
1177 (void) VirtualFree ((LPVOID)(pAddr), (SIZE_T)(0), MEM_RELEASE);
1178 #endif /* (SAL_UNX || SAL_W32) */
1181 sal_Size
1182 rtl_machdep_pagesize()
1184 #if defined(SAL_UNX)
1185 #if defined(FREEBSD) || defined(NETBSD) || defined(DRAGONFLY)
1186 return (sal_Size)getpagesize();
1187 #else /* POSIX */
1188 return (sal_Size)sysconf(_SC_PAGESIZE);
1189 #endif /* xBSD || POSIX */
1190 #elif defined(SAL_W32)
1191 SYSTEM_INFO info;
1192 GetSystemInfo (&info);
1193 return (sal_Size)info.dwPageSize;
1194 #endif /* (SAL_UNX || SAL_W32) */
1197 } //namespace
1199 /* ================================================================= *
1201 * arena initialization.
1203 * ================================================================= */
1205 void
1206 rtl_arena_init()
1209 /* list of arenas */
1210 RTL_MEMORY_LOCK_INIT(&(g_arena_list.m_lock));
1211 rtl_arena_constructor (&(g_arena_list.m_arena_head));
1214 /* machdep (pseudo) arena */
1215 static rtl_arena_type g_machdep_arena;
1217 assert(gp_machdep_arena == 0);
1218 rtl_arena_constructor (&g_machdep_arena);
1220 gp_machdep_arena = rtl_arena_activate (
1221 &g_machdep_arena,
1222 "rtl_machdep_arena",
1223 rtl_machdep_pagesize(),
1224 0, /* no quantum caching */
1225 0, 0, 0 /* no source */
1227 assert(gp_machdep_arena != 0);
1230 /* default arena */
1231 static rtl_arena_type g_default_arena;
1233 assert(gp_default_arena == 0);
1234 rtl_arena_constructor (&g_default_arena);
1236 gp_default_arena = rtl_arena_activate (
1237 &g_default_arena,
1238 "rtl_default_arena",
1239 rtl_machdep_pagesize(),
1240 0, /* no quantum caching */
1241 gp_machdep_arena, /* source */
1242 rtl_machdep_alloc,
1243 rtl_machdep_free
1245 assert(gp_default_arena != 0);
1248 /* arena internal arena */
1249 static rtl_arena_type g_arena_arena;
1251 assert(gp_arena_arena == 0);
1252 rtl_arena_constructor (&g_arena_arena);
1254 gp_arena_arena = rtl_arena_activate (
1255 &g_arena_arena,
1256 "rtl_arena_internal_arena",
1257 64, /* quantum */
1258 0, /* no quantum caching */
1259 gp_default_arena, /* source */
1260 rtl_arena_alloc,
1261 rtl_arena_free
1263 assert(gp_arena_arena != 0);
1265 // SAL_INFO("sal.rtl", "rtl_arena_init completed");
1268 /* ================================================================= */
1270 void
1271 rtl_arena_fini()
1273 if (gp_arena_arena != 0)
1275 rtl_arena_type * arena, * head;
1277 RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
1278 head = &(g_arena_list.m_arena_head);
1280 for (arena = head->m_arena_next; arena != head; arena = arena->m_arena_next)
1282 // SAL_INFO(
1283 // "sal.rtl",
1284 // "rtl_arena_fini(" << arena->m_name << "): allocs: "
1285 // << arena->m_stats.m_alloc << ", frees: "
1286 // << arena->m_stats.m_free << "; total: "
1287 // << arena->m_stats.m_mem_total << ", used: "
1288 // << arena->m_stats.m_mem_alloc);
1290 RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
1292 // SAL_INFO("sal.rtl", "rtl_arena_fini completed");
1295 /* ================================================================= */
1297 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */