1 /*************************************************************************
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 * Copyright 2000, 2010 Oracle and/or its affiliates.
7 * OpenOffice.org - a multi-platform office productivity suite
9 * This file is part of OpenOffice.org.
11 * OpenOffice.org is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU Lesser General Public License version 3
13 * only, as published by the Free Software Foundation.
15 * OpenOffice.org is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License version 3 for more details
19 * (a copy is included in the LICENSE file that accompanied this code).
21 * You should have received a copy of the GNU Lesser General Public License
22 * version 3 along with OpenOffice.org. If not, see
23 * <http://www.openoffice.org/license.html>
24 * for a copy of the LGPLv3 License.
26 ************************************************************************/
28 #include "alloc_cache.h"
29 #include "alloc_impl.h"
30 #include "alloc_arena.h"
31 #include "internal/once.h"
32 #include "sal/macros.h"
33 #include "osl/diagnose.h"
35 #ifndef INCLUDED_STRING_H
39 #ifndef INCLUDED_STDIO_H
45 #define OSL_TRACE 1 ? ((void)0) : _OSL_GLOBAL osl_trace
48 /* ================================================================= *
52 * ================================================================= */
57 struct rtl_cache_list_st
59 rtl_memory_lock_type m_lock
;
60 rtl_cache_type m_cache_head
;
62 #if defined(SAL_UNX) || defined(SAL_OS2)
63 pthread_t m_update_thread
;
64 pthread_cond_t m_update_cond
;
65 #elif defined(SAL_W32)
66 HANDLE m_update_thread
;
68 #endif /* SAL_UNX || SAL_W32 */
72 static struct rtl_cache_list_st g_cache_list
;
76 * provided for cache_type allocations, and hash_table resizing.
80 static rtl_arena_type
* gp_cache_arena
= 0;
83 /** gp_cache_magazine_cache
86 static rtl_cache_type
* gp_cache_magazine_cache
= 0;
89 /** gp_cache_slab_cache
92 static rtl_cache_type
* gp_cache_slab_cache
= 0;
95 /** gp_cache_bufctl_cache
98 static rtl_cache_type
* gp_cache_bufctl_cache
= 0;
105 rtl_cache_init (void);
108 /* ================================================================= */
110 /** RTL_CACHE_HASH_INDEX()
112 #define RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
113 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
115 #define RTL_CACHE_HASH_INDEX(cache, addr) \
116 RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
119 /** rtl_cache_hash_rescale()
122 rtl_cache_hash_rescale (
123 rtl_cache_type
* cache
,
127 rtl_cache_bufctl_type
** new_table
;
130 new_bytes
= new_size
* sizeof(rtl_cache_bufctl_type
*);
131 new_table
= (rtl_cache_bufctl_type
**)rtl_arena_alloc(gp_cache_arena
, &new_bytes
);
135 rtl_cache_bufctl_type
** old_table
;
136 sal_Size old_size
, i
;
138 memset (new_table
, 0, new_bytes
);
140 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
142 old_table
= cache
->m_hash_table
;
143 old_size
= cache
->m_hash_size
;
146 "rtl_cache_hash_rescale(\"%s\"): "
147 "nbuf: % " PRIu64
" (ave: %" PRIu64
"), frees: %" PRIu64
" "
148 "[old_size: %lu, new_size: %lu]",
150 cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
,
151 (cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
) >> cache
->m_hash_shift
,
152 cache
->m_slab_stats
.m_free
,
155 cache
->m_hash_table
= new_table
;
156 cache
->m_hash_size
= new_size
;
157 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
159 for (i
= 0; i
< old_size
; i
++)
161 rtl_cache_bufctl_type
* curr
= old_table
[i
];
164 rtl_cache_bufctl_type
* next
= curr
->m_next
;
165 rtl_cache_bufctl_type
** head
;
167 head
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, curr
->m_addr
)]);
168 curr
->m_next
= (*head
);
176 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
178 if (old_table
!= cache
->m_hash_table_0
)
180 sal_Size old_bytes
= old_size
* sizeof(rtl_cache_bufctl_type
*);
181 rtl_arena_free (gp_cache_arena
, old_table
, old_bytes
);
186 /** rtl_cache_hash_insert()
188 static RTL_MEMORY_INLINE sal_uIntPtr
189 rtl_cache_hash_insert (
190 rtl_cache_type
* cache
,
191 rtl_cache_bufctl_type
* bufctl
194 rtl_cache_bufctl_type
** ppHead
;
196 ppHead
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, bufctl
->m_addr
)]);
198 bufctl
->m_next
= (*ppHead
);
201 return (bufctl
->m_addr
);
204 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
205 #pragma inline(rtl_cache_hash_insert)
206 #endif /* __SUNPRO_C */
209 /** rtl_cache_hash_remove()
211 static rtl_cache_bufctl_type
*
212 rtl_cache_hash_remove (
213 rtl_cache_type
* cache
,
217 rtl_cache_bufctl_type
** ppHead
;
218 rtl_cache_bufctl_type
* bufctl
;
219 sal_Size lookups
= 0;
221 ppHead
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, addr
)]);
222 while ((bufctl
= *ppHead
) != 0)
224 if (bufctl
->m_addr
== addr
)
226 *ppHead
= bufctl
->m_next
, bufctl
->m_next
= 0;
231 ppHead
= &(bufctl
->m_next
);
234 OSL_ASSERT (bufctl
!= 0); /* bad free */
238 sal_Size nbuf
= (sal_Size
)(cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
);
239 if (nbuf
> 4 * cache
->m_hash_size
)
241 if (!(cache
->m_features
& RTL_CACHE_FEATURE_RESCALE
))
243 sal_Size ave
= nbuf
>> cache
->m_hash_shift
;
244 sal_Size new_size
= cache
->m_hash_size
<< (highbit(ave
) - 1);
246 cache
->m_features
|= RTL_CACHE_FEATURE_RESCALE
;
247 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
248 rtl_cache_hash_rescale (cache
, new_size
);
249 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
250 cache
->m_features
&= ~RTL_CACHE_FEATURE_RESCALE
;
258 /* ================================================================= */
262 #define RTL_CACHE_SLAB(addr, size) \
263 (((rtl_cache_slab_type*)(RTL_MEMORY_P2END((sal_uIntPtr)(addr), (size)))) - 1)
266 /** rtl_cache_slab_constructor()
269 rtl_cache_slab_constructor (void * obj
, void * arg
)
271 rtl_cache_slab_type
* slab
= (rtl_cache_slab_type
*)(obj
);
273 (void) arg
; /* unused */
275 QUEUE_START_NAMED(slab
, slab_
);
282 /** rtl_cache_slab_destructor()
285 rtl_cache_slab_destructor (void * obj
, void * arg
)
287 #if OSL_DEBUG_LEVEL == 0
288 (void) obj
; /* unused */
289 #else /* OSL_DEBUG_LEVEL */
290 rtl_cache_slab_type
* slab
= (rtl_cache_slab_type
*)(obj
);
292 /* assure removed from queue(s) */
293 OSL_ASSERT(QUEUE_STARTED_NAMED(slab
, slab_
));
295 /* assure no longer referenced */
296 OSL_ASSERT(slab
->m_ntypes
== 0);
297 #endif /* OSL_DEBUG_LEVEL */
299 (void) arg
; /* unused */
303 /** rtl_cache_slab_create()
305 * @precond cache->m_slab_lock released.
307 static rtl_cache_slab_type
*
308 rtl_cache_slab_create (
309 rtl_cache_type
* cache
312 rtl_cache_slab_type
* slab
= 0;
316 size
= cache
->m_slab_size
;
317 addr
= rtl_arena_alloc (cache
->m_source
, &size
);
320 OSL_ASSERT(size
>= cache
->m_slab_size
);
322 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
324 /* allocate slab struct from slab cache */
325 OSL_ASSERT (cache
!= gp_cache_slab_cache
);
326 slab
= (rtl_cache_slab_type
*)rtl_cache_alloc (gp_cache_slab_cache
);
330 /* construct embedded slab struct */
331 slab
= RTL_CACHE_SLAB(addr
, cache
->m_slab_size
);
332 (void) rtl_cache_slab_constructor (slab
, 0);
336 slab
->m_data
= (sal_uIntPtr
)(addr
);
338 /* dynamic freelist initialization */
339 slab
->m_bp
= slab
->m_data
;
344 rtl_arena_free (cache
->m_source
, addr
, size
);
351 /** rtl_cache_slab_destroy()
353 * @precond cache->m_slab_lock released.
356 rtl_cache_slab_destroy (
357 rtl_cache_type
* cache
,
358 rtl_cache_slab_type
* slab
361 void * addr
= (void*)(slab
->m_data
);
362 sal_Size refcnt
= slab
->m_ntypes
; slab
->m_ntypes
= 0;
364 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
366 /* cleanup bufctl(s) for free buffer(s) */
367 sal_Size ntypes
= (slab
->m_bp
- slab
->m_data
) / cache
->m_type_size
;
368 for (ntypes
-= refcnt
; slab
->m_sp
!= 0; ntypes
--)
370 rtl_cache_bufctl_type
* bufctl
= slab
->m_sp
;
372 /* pop from freelist */
373 slab
->m_sp
= bufctl
->m_next
, bufctl
->m_next
= 0;
375 /* return bufctl struct to bufctl cache */
376 rtl_cache_free (gp_cache_bufctl_cache
, bufctl
);
378 OSL_ASSERT(ntypes
== 0);
380 /* return slab struct to slab cache */
381 rtl_cache_free (gp_cache_slab_cache
, slab
);
385 /* destruct embedded slab struct */
386 rtl_cache_slab_destructor (slab
, 0);
389 if ((refcnt
== 0) || (cache
->m_features
& RTL_CACHE_FEATURE_BULKDESTROY
))
392 rtl_arena_free (cache
->m_source
, addr
, cache
->m_slab_size
);
397 /** rtl_cache_slab_populate()
399 * @precond cache->m_slab_lock acquired.
402 rtl_cache_slab_populate (
403 rtl_cache_type
* cache
406 rtl_cache_slab_type
* slab
;
408 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
409 slab
= rtl_cache_slab_create (cache
);
410 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
413 /* update buffer start addr w/ current color */
414 slab
->m_bp
+= cache
->m_ncolor
;
416 /* update color for next slab */
417 cache
->m_ncolor
+= cache
->m_type_align
;
418 if (cache
->m_ncolor
> cache
->m_ncolor_max
)
422 cache
->m_slab_stats
.m_mem_total
+= cache
->m_slab_size
;
424 /* insert onto 'free' queue */
425 QUEUE_INSERT_HEAD_NAMED(&(cache
->m_free_head
), slab
, slab_
);
430 /* ================================================================= */
432 /** rtl_cache_slab_alloc()
434 * Allocate a buffer from slab layer; used by magazine layer.
437 rtl_cache_slab_alloc (
438 rtl_cache_type
* cache
442 rtl_cache_slab_type
* head
;
444 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
446 head
= &(cache
->m_free_head
);
447 if ((head
->m_slab_next
!= head
) || rtl_cache_slab_populate (cache
))
449 rtl_cache_slab_type
* slab
;
450 rtl_cache_bufctl_type
* bufctl
;
452 slab
= head
->m_slab_next
;
453 OSL_ASSERT(slab
->m_ntypes
< cache
->m_ntypes
);
457 /* initialize bufctl w/ current 'slab->m_bp' */
458 OSL_ASSERT (slab
->m_bp
< slab
->m_data
+ cache
->m_ntypes
* cache
->m_type_size
+ cache
->m_ncolor_max
);
459 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
461 /* allocate bufctl */
462 OSL_ASSERT (cache
!= gp_cache_bufctl_cache
);
463 bufctl
= (rtl_cache_bufctl_type
*)rtl_cache_alloc (gp_cache_bufctl_cache
);
467 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
471 bufctl
->m_addr
= slab
->m_bp
;
472 bufctl
->m_slab
= (sal_uIntPtr
)(slab
);
476 /* embedded bufctl */
477 bufctl
= (rtl_cache_bufctl_type
*)(slab
->m_bp
);
481 /* update 'slab->m_bp' to next free buffer */
482 slab
->m_bp
+= cache
->m_type_size
;
484 /* assign bufctl to freelist */
490 slab
->m_sp
= bufctl
->m_next
;
492 /* increment usage, check for full slab */
493 if ((slab
->m_ntypes
+= 1) == cache
->m_ntypes
)
495 /* remove from 'free' queue */
496 QUEUE_REMOVE_NAMED(slab
, slab_
);
498 /* insert onto 'used' queue (tail) */
499 QUEUE_INSERT_TAIL_NAMED(&(cache
->m_used_head
), slab
, slab_
);
503 cache
->m_slab_stats
.m_alloc
+= 1;
504 cache
->m_slab_stats
.m_mem_alloc
+= cache
->m_type_size
;
506 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
507 addr
= (void*)rtl_cache_hash_insert (cache
, bufctl
);
511 /* DEBUG ONLY: mark allocated, undefined */
512 OSL_DEBUG_ONLY(memset(addr
, 0x77777777, cache
->m_type_size
));
513 VALGRIND_MEMPOOL_ALLOC(cache
, addr
, cache
->m_type_size
);
516 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
521 /** rtl_cache_slab_free()
523 * Return a buffer to slab layer; used by magazine layer.
526 rtl_cache_slab_free (
527 rtl_cache_type
* cache
,
531 rtl_cache_bufctl_type
* bufctl
;
532 rtl_cache_slab_type
* slab
;
534 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
536 /* DEBUG ONLY: mark unallocated, undefined */
537 VALGRIND_MEMPOOL_FREE(cache
, addr
);
538 /* OSL_DEBUG_ONLY() */ VALGRIND_MAKE_MEM_UNDEFINED(addr
, cache
->m_type_size
);
539 OSL_DEBUG_ONLY(memset(addr
, 0x33333333, cache
->m_type_size
));
541 /* determine slab from addr */
542 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
544 bufctl
= rtl_cache_hash_remove (cache
, (sal_uIntPtr
)(addr
));
545 slab
= (bufctl
!= 0) ? (rtl_cache_slab_type
*)(bufctl
->m_slab
) : 0;
549 /* embedded slab struct */
550 bufctl
= (rtl_cache_bufctl_type
*)(addr
);
551 slab
= RTL_CACHE_SLAB(addr
, cache
->m_slab_size
);
556 /* check for full slab */
557 if (slab
->m_ntypes
== cache
->m_ntypes
)
559 /* remove from 'used' queue */
560 QUEUE_REMOVE_NAMED(slab
, slab_
);
562 /* insert onto 'free' queue (head) */
563 QUEUE_INSERT_HEAD_NAMED(&(cache
->m_free_head
), slab
, slab_
);
567 bufctl
->m_next
= slab
->m_sp
;
571 cache
->m_slab_stats
.m_free
+= 1;
572 cache
->m_slab_stats
.m_mem_alloc
-= cache
->m_type_size
;
574 /* decrement usage, check for empty slab */
575 if ((slab
->m_ntypes
-= 1) == 0)
577 /* remove from 'free' queue */
578 QUEUE_REMOVE_NAMED(slab
, slab_
);
581 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
583 /* free 'empty' slab */
584 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
585 rtl_cache_slab_destroy (cache
, slab
);
590 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
593 /* ================================================================= */
595 /** rtl_cache_magazine_constructor()
598 rtl_cache_magazine_constructor (void * obj
, void * arg
)
600 rtl_cache_magazine_type
* mag
= (rtl_cache_magazine_type
*)(obj
);
601 /* @@@ sal_Size size = (sal_Size)(arg); @@@ */
603 (void) arg
; /* unused */
606 mag
->m_mag_size
= RTL_CACHE_MAGAZINE_SIZE
;
613 /** rtl_cache_magazine_destructor()
616 rtl_cache_magazine_destructor (void * obj
, void * arg
)
618 #if OSL_DEBUG_LEVEL == 0
619 (void) obj
; /* unused */
620 #else /* OSL_DEBUG_LEVEL */
621 rtl_cache_magazine_type
* mag
= (rtl_cache_magazine_type
*)(obj
);
623 /* assure removed from queue(s) */
624 OSL_ASSERT(mag
->m_mag_next
== 0);
626 /* assure no longer referenced */
627 OSL_ASSERT(mag
->m_mag_used
== 0);
628 #endif /* OSL_DEBUG_LEVEL */
630 (void) arg
; /* unused */
634 /** rtl_cache_magazine_clear()
637 rtl_cache_magazine_clear (
638 rtl_cache_type
* cache
,
639 rtl_cache_magazine_type
* mag
642 for (; mag
->m_mag_used
> 0; --mag
->m_mag_used
)
644 void * obj
= mag
->m_objects
[mag
->m_mag_used
- 1];
645 mag
->m_objects
[mag
->m_mag_used
- 1] = 0;
647 /* DEBUG ONLY: mark cached object allocated, undefined */
648 VALGRIND_MEMPOOL_ALLOC(cache
, obj
, cache
->m_type_size
);
649 if (cache
->m_destructor
!= 0)
651 /* DEBUG ONLY: keep constructed object defined */
652 VALGRIND_MAKE_MEM_DEFINED(obj
, cache
->m_type_size
);
654 /* destruct object */
655 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
658 /* return buffer to slab layer */
659 rtl_cache_slab_free (cache
, obj
);
663 /* ================================================================= */
665 /** rtl_cache_depot_enqueue()
667 * @precond cache->m_depot_lock acquired.
669 static RTL_MEMORY_INLINE
void
670 rtl_cache_depot_enqueue (
671 rtl_cache_depot_type
* depot
,
672 rtl_cache_magazine_type
* mag
675 /* enqueue empty magazine */
676 mag
->m_mag_next
= depot
->m_mag_next
;
677 depot
->m_mag_next
= mag
;
679 /* update depot stats */
680 depot
->m_mag_count
++;
683 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
684 #pragma inline(rtl_cache_depot_enqueue)
685 #endif /* __SUNPRO_C */
688 /** rtl_cache_depot_dequeue()
690 * @precond cache->m_depot_lock acquired.
692 static RTL_MEMORY_INLINE rtl_cache_magazine_type
*
693 rtl_cache_depot_dequeue (
694 rtl_cache_depot_type
* depot
697 rtl_cache_magazine_type
* mag
= 0;
698 if (depot
->m_mag_count
> 0)
700 /* dequeue magazine */
701 OSL_ASSERT(depot
->m_mag_next
!= 0);
703 mag
= depot
->m_mag_next
;
704 depot
->m_mag_next
= mag
->m_mag_next
;
707 /* update depot stats */
708 depot
->m_mag_count
--;
709 depot
->m_curr_min
= SAL_MIN(depot
->m_curr_min
, depot
->m_mag_count
);
714 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
715 #pragma inline(rtl_cache_depot_dequeue)
716 #endif /* __SUNPRO_C */
719 /** rtl_cache_depot_exchange_alloc()
721 * @precond cache->m_depot_lock acquired.
723 static RTL_MEMORY_INLINE rtl_cache_magazine_type
*
724 rtl_cache_depot_exchange_alloc (
725 rtl_cache_type
* cache
,
726 rtl_cache_magazine_type
* empty
729 rtl_cache_magazine_type
* full
;
731 OSL_ASSERT((empty
== 0) || (empty
->m_mag_used
== 0));
733 /* dequeue full magazine */
734 full
= rtl_cache_depot_dequeue (&(cache
->m_depot_full
));
735 if ((full
!= 0) && (empty
!= 0))
737 /* enqueue empty magazine */
738 rtl_cache_depot_enqueue (&(cache
->m_depot_empty
), empty
);
741 OSL_ASSERT((full
== 0) || (full
->m_mag_used
> 0));
746 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
747 #pragma inline(rtl_cache_depot_exchange_alloc)
748 #endif /* __SUNPRO_C */
751 /** rtl_cache_depot_exchange_free()
753 * @precond cache->m_depot_lock acquired.
755 static RTL_MEMORY_INLINE rtl_cache_magazine_type
*
756 rtl_cache_depot_exchange_free (
757 rtl_cache_type
* cache
,
758 rtl_cache_magazine_type
* full
761 rtl_cache_magazine_type
* empty
;
763 OSL_ASSERT((full
== 0) || (full
->m_mag_used
> 0));
765 /* dequeue empty magazine */
766 empty
= rtl_cache_depot_dequeue (&(cache
->m_depot_empty
));
767 if ((empty
!= 0) && (full
!= 0))
769 /* enqueue full magazine */
770 rtl_cache_depot_enqueue (&(cache
->m_depot_full
), full
);
773 OSL_ASSERT((empty
== 0) || (empty
->m_mag_used
== 0));
778 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
779 #pragma inline(rtl_cache_depot_exchange_free)
780 #endif /* __SUNPRO_C */
783 /** rtl_cache_depot_populate()
785 * @precond cache->m_depot_lock acquired.
788 rtl_cache_depot_populate (
789 rtl_cache_type
* cache
792 rtl_cache_magazine_type
* empty
= 0;
794 if (cache
->m_magazine_cache
!= 0)
796 /* allocate new empty magazine */
797 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
798 empty
= (rtl_cache_magazine_type
*)rtl_cache_alloc (cache
->m_magazine_cache
);
799 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
802 /* enqueue (new) empty magazine */
803 rtl_cache_depot_enqueue (&(cache
->m_depot_empty
), empty
);
809 /* ================================================================= */
811 /** rtl_cache_constructor()
814 rtl_cache_constructor (void * obj
)
816 rtl_cache_type
* cache
= (rtl_cache_type
*)(obj
);
818 memset (cache
, 0, sizeof(rtl_cache_type
));
821 QUEUE_START_NAMED(cache
, cache_
);
824 (void)RTL_MEMORY_LOCK_INIT(&(cache
->m_slab_lock
));
826 QUEUE_START_NAMED(&(cache
->m_free_head
), slab_
);
827 QUEUE_START_NAMED(&(cache
->m_used_head
), slab_
);
829 cache
->m_hash_table
= cache
->m_hash_table_0
;
830 cache
->m_hash_size
= RTL_CACHE_HASH_SIZE
;
831 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
834 (void)RTL_MEMORY_LOCK_INIT(&(cache
->m_depot_lock
));
839 /** rtl_cache_destructor()
842 rtl_cache_destructor (void * obj
)
844 rtl_cache_type
* cache
= (rtl_cache_type
*)(obj
);
847 OSL_ASSERT(QUEUE_STARTED_NAMED(cache
, cache_
));
850 (void)RTL_MEMORY_LOCK_DESTROY(&(cache
->m_slab_lock
));
852 OSL_ASSERT(QUEUE_STARTED_NAMED(&(cache
->m_free_head
), slab_
));
853 OSL_ASSERT(QUEUE_STARTED_NAMED(&(cache
->m_used_head
), slab_
));
855 OSL_ASSERT(cache
->m_hash_table
== cache
->m_hash_table_0
);
856 OSL_ASSERT(cache
->m_hash_size
== RTL_CACHE_HASH_SIZE
);
857 OSL_ASSERT(cache
->m_hash_shift
== (sal_Size
)(highbit(cache
->m_hash_size
) - 1));
860 (void)RTL_MEMORY_LOCK_DESTROY(&(cache
->m_depot_lock
));
863 /* ================================================================= */
865 /** rtl_cache_activate()
867 static rtl_cache_type
*
869 rtl_cache_type
* cache
,
873 int (SAL_CALL
* constructor
)(void * obj
, void * userarg
),
874 void (SAL_CALL
* destructor
) (void * obj
, void * userarg
),
875 void (SAL_CALL
* reclaim
) (void * userarg
),
877 rtl_arena_type
* source
,
881 OSL_ASSERT(cache
!= 0);
886 snprintf (cache
->m_name
, sizeof(cache
->m_name
), "%s", name
);
888 /* ensure minimum size (embedded bufctl linkage) */
889 objsize
= SAL_MAX(objsize
, sizeof(rtl_cache_bufctl_type
*));
893 /* determine default alignment */
894 if (objsize
>= RTL_MEMORY_ALIGNMENT_8
)
895 objalign
= RTL_MEMORY_ALIGNMENT_8
;
897 objalign
= RTL_MEMORY_ALIGNMENT_4
;
901 /* ensure minimum alignment */
902 objalign
= SAL_MAX(objalign
, RTL_MEMORY_ALIGNMENT_4
);
904 OSL_ASSERT(RTL_MEMORY_ISP2(objalign
));
906 cache
->m_type_size
= objsize
= RTL_MEMORY_P2ROUNDUP(objsize
, objalign
);
907 cache
->m_type_align
= objalign
;
908 cache
->m_type_shift
= highbit(cache
->m_type_size
) - 1;
910 cache
->m_constructor
= constructor
;
911 cache
->m_destructor
= destructor
;
912 cache
->m_reclaim
= reclaim
;
913 cache
->m_userarg
= userarg
;
916 cache
->m_source
= source
;
918 slabsize
= source
->m_quantum
; /* minimum slab size */
919 if (flags
& RTL_CACHE_FLAG_QUANTUMCACHE
)
921 /* next power of 2 above 3 * qcache_max */
922 slabsize
= SAL_MAX(slabsize
, (1UL << highbit(3 * source
->m_qcache_max
)));
926 /* waste at most 1/8 of slab */
927 slabsize
= SAL_MAX(slabsize
, cache
->m_type_size
* 8);
930 slabsize
= RTL_MEMORY_P2ROUNDUP(slabsize
, source
->m_quantum
);
931 if (!RTL_MEMORY_ISP2(slabsize
))
932 slabsize
= 1UL << highbit(slabsize
);
933 cache
->m_slab_size
= slabsize
;
935 if (cache
->m_slab_size
> source
->m_quantum
)
937 OSL_ASSERT(gp_cache_slab_cache
!= 0);
938 OSL_ASSERT(gp_cache_bufctl_cache
!= 0);
940 cache
->m_features
|= RTL_CACHE_FEATURE_HASH
;
941 cache
->m_ntypes
= cache
->m_slab_size
/ cache
->m_type_size
;
942 cache
->m_ncolor_max
= cache
->m_slab_size
% cache
->m_type_size
;
946 /* embedded slab struct */
947 cache
->m_ntypes
= (cache
->m_slab_size
- sizeof(rtl_cache_slab_type
)) / cache
->m_type_size
;
948 cache
->m_ncolor_max
= (cache
->m_slab_size
- sizeof(rtl_cache_slab_type
)) % cache
->m_type_size
;
951 OSL_ASSERT(cache
->m_ntypes
> 0);
954 if (flags
& RTL_CACHE_FLAG_BULKDESTROY
)
956 /* allow bulk slab delete upon cache deactivation */
957 cache
->m_features
|= RTL_CACHE_FEATURE_BULKDESTROY
;
961 if (!(flags
& RTL_CACHE_FLAG_NOMAGAZINE
))
963 OSL_ASSERT(gp_cache_magazine_cache
!= 0);
964 cache
->m_magazine_cache
= gp_cache_magazine_cache
;
967 /* insert into cache list */
968 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
969 QUEUE_INSERT_TAIL_NAMED(&(g_cache_list
.m_cache_head
), cache
, cache_
);
970 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
975 /** rtl_cache_deactivate()
978 rtl_cache_deactivate (
979 rtl_cache_type
* cache
984 /* remove from cache list */
985 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
986 active
= QUEUE_STARTED_NAMED(cache
, cache_
) == 0;
987 QUEUE_REMOVE_NAMED(cache
, cache_
);
988 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
990 OSL_PRECOND(active
, "rtl_cache_deactivate(): orphaned cache.");
992 /* cleanup magazine layer */
993 if (cache
->m_magazine_cache
!= 0)
995 rtl_cache_type
* mag_cache
;
996 rtl_cache_magazine_type
* mag
;
998 /* prevent recursion */
999 mag_cache
= cache
->m_magazine_cache
, cache
->m_magazine_cache
= 0;
1001 /* cleanup cpu layer */
1002 if ((mag
= cache
->m_cpu_curr
) != 0)
1004 cache
->m_cpu_curr
= 0;
1005 rtl_cache_magazine_clear (cache
, mag
);
1006 rtl_cache_free (mag_cache
, mag
);
1008 if ((mag
= cache
->m_cpu_prev
) != 0)
1010 cache
->m_cpu_prev
= 0;
1011 rtl_cache_magazine_clear (cache
, mag
);
1012 rtl_cache_free (mag_cache
, mag
);
1015 /* cleanup depot layer */
1016 while ((mag
= rtl_cache_depot_dequeue(&(cache
->m_depot_full
))) != 0)
1018 rtl_cache_magazine_clear (cache
, mag
);
1019 rtl_cache_free (mag_cache
, mag
);
1021 while ((mag
= rtl_cache_depot_dequeue(&(cache
->m_depot_empty
))) != 0)
1023 rtl_cache_magazine_clear (cache
, mag
);
1024 rtl_cache_free (mag_cache
, mag
);
1029 "rtl_cache_deactivate(\"%s\"): "
1030 "[slab]: allocs: %"PRIu64
", frees: %"PRIu64
"; total: %lu, used: %lu; "
1031 "[cpu]: allocs: %"PRIu64
", frees: %"PRIu64
"; "
1032 "[total]: allocs: %"PRIu64
", frees: %"PRIu64
"",
1034 cache
->m_slab_stats
.m_alloc
, cache
->m_slab_stats
.m_free
,
1035 cache
->m_slab_stats
.m_mem_total
, cache
->m_slab_stats
.m_mem_alloc
,
1036 cache
->m_cpu_stats
.m_alloc
, cache
->m_cpu_stats
.m_free
,
1037 cache
->m_slab_stats
.m_alloc
+ cache
->m_cpu_stats
.m_alloc
,
1038 cache
->m_slab_stats
.m_free
+ cache
->m_cpu_stats
.m_free
1041 /* cleanup slab layer */
1042 if (cache
->m_slab_stats
.m_alloc
> cache
->m_slab_stats
.m_free
)
1045 "rtl_cache_deactivate(\"%s\"): "
1046 "cleaning up %"PRIu64
" leaked buffer(s) [%lu bytes] [%lu total]",
1048 cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
,
1049 cache
->m_slab_stats
.m_mem_alloc
, cache
->m_slab_stats
.m_mem_total
1052 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
1054 /* cleanup bufctl(s) for leaking buffer(s) */
1055 sal_Size i
, n
= cache
->m_hash_size
;
1056 for (i
= 0; i
< n
; i
++)
1058 rtl_cache_bufctl_type
* bufctl
;
1059 while ((bufctl
= cache
->m_hash_table
[i
]) != 0)
1061 /* pop from hash table */
1062 cache
->m_hash_table
[i
] = bufctl
->m_next
, bufctl
->m_next
= 0;
1064 /* return to bufctl cache */
1065 rtl_cache_free (gp_cache_bufctl_cache
, bufctl
);
1070 /* force cleanup of remaining slabs */
1071 rtl_cache_slab_type
*head
, *slab
;
1073 head
= &(cache
->m_used_head
);
1074 for (slab
= head
->m_slab_next
; slab
!= head
; slab
= head
->m_slab_next
)
1076 /* remove from 'used' queue */
1077 QUEUE_REMOVE_NAMED(slab
, slab_
);
1080 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
1083 rtl_cache_slab_destroy (cache
, slab
);
1086 head
= &(cache
->m_free_head
);
1087 for (slab
= head
->m_slab_next
; slab
!= head
; slab
= head
->m_slab_next
)
1089 /* remove from 'free' queue */
1090 QUEUE_REMOVE_NAMED(slab
, slab_
);
1093 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
1096 rtl_cache_slab_destroy (cache
, slab
);
1101 if (cache
->m_hash_table
!= cache
->m_hash_table_0
)
1105 cache
->m_hash_table
,
1106 cache
->m_hash_size
* sizeof(rtl_cache_bufctl_type
*));
1108 cache
->m_hash_table
= cache
->m_hash_table_0
;
1109 cache
->m_hash_size
= RTL_CACHE_HASH_SIZE
;
1110 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
1114 /* ================================================================= *
1116 * cache implementation.
1118 * ================================================================= */
1120 /** rtl_cache_create()
1123 SAL_CALL
rtl_cache_create (
1127 int (SAL_CALL
* constructor
)(void * obj
, void * userarg
),
1128 void (SAL_CALL
* destructor
) (void * obj
, void * userarg
),
1129 void (SAL_CALL
* reclaim
) (void * userarg
),
1131 rtl_arena_type
* source
,
1133 ) SAL_THROW_EXTERN_C()
1135 rtl_cache_type
* result
= 0;
1136 sal_Size size
= sizeof(rtl_cache_type
);
1139 result
= (rtl_cache_type
*)rtl_arena_alloc (gp_cache_arena
, &size
);
1142 rtl_cache_type
* cache
= result
;
1143 VALGRIND_CREATE_MEMPOOL(cache
, 0, 0);
1144 (void) rtl_cache_constructor (cache
);
1148 /* use default arena */
1149 OSL_ASSERT(gp_default_arena
!= 0);
1150 source
= gp_default_arena
;
1153 result
= rtl_cache_activate (
1168 /* activation failed */
1169 rtl_cache_deactivate (cache
);
1170 rtl_cache_destructor (cache
);
1171 VALGRIND_DESTROY_MEMPOOL(cache
);
1172 rtl_arena_free (gp_cache_arena
, cache
, size
);
1175 else if (gp_cache_arena
== 0)
1177 if (rtl_cache_init())
1186 /** rtl_cache_destroy()
1188 void SAL_CALL
rtl_cache_destroy (
1189 rtl_cache_type
* cache
1190 ) SAL_THROW_EXTERN_C()
1194 rtl_cache_deactivate (cache
);
1195 rtl_cache_destructor (cache
);
1196 VALGRIND_DESTROY_MEMPOOL(cache
);
1197 rtl_arena_free (gp_cache_arena
, cache
, sizeof(rtl_cache_type
));
1201 /** rtl_cache_alloc()
1204 SAL_CALL
rtl_cache_alloc (
1205 rtl_cache_type
* cache
1206 ) SAL_THROW_EXTERN_C()
1213 if (cache
->m_cpu_curr
!= 0)
1215 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1219 /* take object from magazine layer */
1220 rtl_cache_magazine_type
*curr
, *prev
, *temp
;
1222 curr
= cache
->m_cpu_curr
;
1223 if ((curr
!= 0) && (curr
->m_mag_used
> 0))
1225 obj
= curr
->m_objects
[--curr
->m_mag_used
];
1226 #if defined(HAVE_VALGRIND_MEMCHECK_H)
1227 VALGRIND_MEMPOOL_ALLOC(cache
, obj
, cache
->m_type_size
);
1228 if (cache
->m_constructor
!= 0)
1230 /* keep constructed object defined */
1231 VALGRIND_MAKE_MEM_DEFINED(obj
, cache
->m_type_size
);
1233 #endif /* HAVE_VALGRIND_MEMCHECK_H */
1234 cache
->m_cpu_stats
.m_alloc
+= 1;
1235 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1240 prev
= cache
->m_cpu_prev
;
1241 if ((prev
!= 0) && (prev
->m_mag_used
> 0))
1243 temp
= cache
->m_cpu_curr
;
1244 cache
->m_cpu_curr
= cache
->m_cpu_prev
;
1245 cache
->m_cpu_prev
= temp
;
1250 temp
= rtl_cache_depot_exchange_alloc (cache
, prev
);
1253 cache
->m_cpu_prev
= cache
->m_cpu_curr
;
1254 cache
->m_cpu_curr
= temp
;
1259 /* no full magazine: fall through to slab layer */
1263 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1266 /* alloc buffer from slab layer */
1267 obj
= rtl_cache_slab_alloc (cache
);
1268 if ((obj
!= 0) && (cache
->m_constructor
!= 0))
1270 /* construct object */
1271 if (!((cache
->m_constructor
)(obj
, cache
->m_userarg
)))
1273 /* construction failure */
1274 rtl_cache_slab_free (cache
, obj
), obj
= 0;
1280 /** rtl_cache_free()
1283 SAL_CALL
rtl_cache_free (
1284 rtl_cache_type
* cache
,
1286 ) SAL_THROW_EXTERN_C()
1288 if ((obj
!= 0) && (cache
!= 0))
1290 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1294 /* return object to magazine layer */
1295 rtl_cache_magazine_type
*curr
, *prev
, *temp
;
1297 curr
= cache
->m_cpu_curr
;
1298 if ((curr
!= 0) && (curr
->m_mag_used
< curr
->m_mag_size
))
1300 curr
->m_objects
[curr
->m_mag_used
++] = obj
;
1301 #if defined(HAVE_VALGRIND_MEMCHECK_H)
1302 VALGRIND_MEMPOOL_FREE(cache
, obj
);
1303 #endif /* HAVE_VALGRIND_MEMCHECK_H */
1304 cache
->m_cpu_stats
.m_free
+= 1;
1305 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1310 prev
= cache
->m_cpu_prev
;
1311 if ((prev
!= 0) && (prev
->m_mag_used
== 0))
1313 temp
= cache
->m_cpu_curr
;
1314 cache
->m_cpu_curr
= cache
->m_cpu_prev
;
1315 cache
->m_cpu_prev
= temp
;
1320 temp
= rtl_cache_depot_exchange_free (cache
, prev
);
1323 cache
->m_cpu_prev
= cache
->m_cpu_curr
;
1324 cache
->m_cpu_curr
= temp
;
1329 if (rtl_cache_depot_populate(cache
) != 0)
1334 /* no empty magazine: fall through to slab layer */
1338 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1340 /* no space for constructed object in magazine layer */
1341 if (cache
->m_destructor
!= 0)
1343 /* destruct object */
1344 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
1347 /* return buffer to slab layer */
1348 rtl_cache_slab_free (cache
, obj
);
1352 /* ================================================================= *
1354 * cache wsupdate (machdep) internals.
1356 * ================================================================= */
1358 /** rtl_cache_wsupdate_init()
1360 * @precond g_cache_list.m_lock initialized
1363 rtl_cache_wsupdate_init (void);
1366 /** rtl_cache_wsupdate_wait()
1368 * @precond g_cache_list.m_lock acquired
1371 rtl_cache_wsupdate_wait (
1372 unsigned int seconds
1375 /** rtl_cache_wsupdate_fini()
1379 rtl_cache_wsupdate_fini (void);
1381 /* ================================================================= */
1383 #if defined(SAL_UNX) || defined(SAL_OS2)
1385 #include <sys/time.h>
1388 rtl_cache_wsupdate_all (void * arg
);
1391 rtl_cache_wsupdate_init (void)
1393 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1394 g_cache_list
.m_update_done
= 0;
1395 (void) pthread_cond_init (&(g_cache_list
.m_update_cond
), NULL
);
1396 if (pthread_create (
1397 &(g_cache_list
.m_update_thread
), NULL
, rtl_cache_wsupdate_all
, (void*)(10)) != 0)
1400 g_cache_list
.m_update_thread
= (pthread_t
)(0);
1402 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1406 rtl_cache_wsupdate_wait (unsigned int seconds
)
1411 struct timespec wakeup
;
1413 gettimeofday(&now
, 0);
1414 wakeup
.tv_sec
= now
.tv_sec
+ (seconds
);
1415 wakeup
.tv_nsec
= now
.tv_usec
* 1000;
1417 (void) pthread_cond_timedwait (
1418 &(g_cache_list
.m_update_cond
),
1419 &(g_cache_list
.m_lock
),
1425 rtl_cache_wsupdate_fini (void)
1427 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1428 g_cache_list
.m_update_done
= 1;
1429 pthread_cond_signal (&(g_cache_list
.m_update_cond
));
1430 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1432 if (g_cache_list
.m_update_thread
!= (pthread_t
)(0))
1433 pthread_join (g_cache_list
.m_update_thread
, NULL
);
1436 /* ================================================================= */
1438 #elif defined(SAL_W32)
1441 rtl_cache_wsupdate_all (void * arg
);
1444 rtl_cache_wsupdate_init (void)
1448 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1449 g_cache_list
.m_update_done
= 0;
1450 g_cache_list
.m_update_cond
= CreateEvent (0, TRUE
, FALSE
, 0);
1452 g_cache_list
.m_update_thread
=
1453 CreateThread (NULL
, 0, rtl_cache_wsupdate_all
, (LPVOID
)(10), 0, &dwThreadId
);
1454 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1458 rtl_cache_wsupdate_wait (unsigned int seconds
)
1462 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1463 WaitForSingleObject (g_cache_list
.m_update_cond
, (DWORD
)(seconds
* 1000));
1464 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1469 rtl_cache_wsupdate_fini (void)
1471 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1472 g_cache_list
.m_update_done
= 1;
1473 SetEvent (g_cache_list
.m_update_cond
);
1474 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1476 WaitForSingleObject (g_cache_list
.m_update_thread
, INFINITE
);
1479 #endif /* SAL_UNX || SAL_W32 */
1481 /* ================================================================= */
1483 /** rtl_cache_depot_wsupdate()
1484 * update depot stats and purge excess magazines.
1486 * @precond cache->m_depot_lock acquired
1489 rtl_cache_depot_wsupdate (
1490 rtl_cache_type
* cache
,
1491 rtl_cache_depot_type
* depot
1496 depot
->m_prev_min
= depot
->m_curr_min
;
1497 depot
->m_curr_min
= depot
->m_mag_count
;
1499 npurge
= SAL_MIN(depot
->m_curr_min
, depot
->m_prev_min
);
1500 for (; npurge
> 0; npurge
--)
1502 rtl_cache_magazine_type
* mag
= rtl_cache_depot_dequeue (depot
);
1505 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1506 rtl_cache_magazine_clear (cache
, mag
);
1507 rtl_cache_free (cache
->m_magazine_cache
, mag
);
1508 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1513 /** rtl_cache_wsupdate()
1515 * @precond cache->m_depot_lock released
1518 rtl_cache_wsupdate (
1519 rtl_cache_type
* cache
1522 if (cache
->m_magazine_cache
!= 0)
1524 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1527 "rtl_cache_wsupdate(\"%s\") "
1528 "[depot: count, curr_min, prev_min] "
1529 "full: %lu, %lu, %lu; empty: %lu, %lu, %lu",
1531 cache
->m_depot_full
.m_mag_count
,
1532 cache
->m_depot_full
.m_curr_min
,
1533 cache
->m_depot_full
.m_prev_min
,
1534 cache
->m_depot_empty
.m_mag_count
,
1535 cache
->m_depot_empty
.m_curr_min
,
1536 cache
->m_depot_empty
.m_prev_min
1539 rtl_cache_depot_wsupdate (cache
, &(cache
->m_depot_full
));
1540 rtl_cache_depot_wsupdate (cache
, &(cache
->m_depot_empty
));
1542 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1546 /** rtl_cache_wsupdate_all()
1549 #if defined(SAL_UNX) || defined(SAL_OS2)
1551 #elif defined(SAL_W32)
1553 #endif /* SAL_UNX || SAL_W32 */
1554 rtl_cache_wsupdate_all (void * arg
)
1556 unsigned int seconds
= (unsigned int)SAL_INT_CAST(sal_uIntPtr
, arg
);
1558 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1559 while (!g_cache_list
.m_update_done
)
1561 rtl_cache_wsupdate_wait (seconds
);
1562 if (!g_cache_list
.m_update_done
)
1564 rtl_cache_type
* head
, * cache
;
1566 head
= &(g_cache_list
.m_cache_head
);
1567 for (cache
= head
->m_cache_next
;
1569 cache
= cache
->m_cache_next
)
1571 rtl_cache_wsupdate (cache
);
1575 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1580 /* ================================================================= *
1582 * cache initialization.
1584 * ================================================================= */
1587 rtl_cache_once_init (void)
1590 /* list of caches */
1591 RTL_MEMORY_LOCK_INIT(&(g_cache_list
.m_lock
));
1592 (void) rtl_cache_constructor (&(g_cache_list
.m_cache_head
));
1595 /* cache: internal arena */
1596 OSL_ASSERT(gp_cache_arena
== 0);
1598 gp_cache_arena
= rtl_arena_create (
1599 "rtl_cache_internal_arena",
1601 0, /* no quantum caching */
1602 NULL
, /* default source */
1607 OSL_ASSERT(gp_cache_arena
!= 0);
1609 /* check 'gp_default_arena' initialization */
1610 OSL_ASSERT(gp_default_arena
!= 0);
1613 /* cache: magazine cache */
1614 static rtl_cache_type g_cache_magazine_cache
;
1616 OSL_ASSERT(gp_cache_magazine_cache
== 0);
1617 VALGRIND_CREATE_MEMPOOL(&g_cache_magazine_cache
, 0, 0);
1618 (void) rtl_cache_constructor (&g_cache_magazine_cache
);
1620 gp_cache_magazine_cache
= rtl_cache_activate (
1621 &g_cache_magazine_cache
,
1622 "rtl_cache_magazine_cache",
1623 sizeof(rtl_cache_magazine_type
), /* objsize */
1625 rtl_cache_magazine_constructor
,
1626 rtl_cache_magazine_destructor
,
1628 0, /* userarg: NYI */
1629 gp_default_arena
, /* source */
1630 RTL_CACHE_FLAG_NOMAGAZINE
/* during bootstrap; activated below */
1632 OSL_ASSERT(gp_cache_magazine_cache
!= 0);
1634 /* activate magazine layer */
1635 g_cache_magazine_cache
.m_magazine_cache
= gp_cache_magazine_cache
;
1638 /* cache: slab (struct) cache */
1639 static rtl_cache_type g_cache_slab_cache
;
1641 OSL_ASSERT(gp_cache_slab_cache
== 0);
1642 VALGRIND_CREATE_MEMPOOL(&g_cache_slab_cache
, 0, 0);
1643 (void) rtl_cache_constructor (&g_cache_slab_cache
);
1645 gp_cache_slab_cache
= rtl_cache_activate (
1646 &g_cache_slab_cache
,
1647 "rtl_cache_slab_cache",
1648 sizeof(rtl_cache_slab_type
), /* objsize */
1650 rtl_cache_slab_constructor
,
1651 rtl_cache_slab_destructor
,
1653 0, /* userarg: none */
1654 gp_default_arena
, /* source */
1657 OSL_ASSERT(gp_cache_slab_cache
!= 0);
1660 /* cache: bufctl cache */
1661 static rtl_cache_type g_cache_bufctl_cache
;
1663 OSL_ASSERT(gp_cache_bufctl_cache
== 0);
1664 VALGRIND_CREATE_MEMPOOL(&g_cache_bufctl_cache
, 0, 0);
1665 (void) rtl_cache_constructor (&g_cache_bufctl_cache
);
1667 gp_cache_bufctl_cache
= rtl_cache_activate (
1668 &g_cache_bufctl_cache
,
1669 "rtl_cache_bufctl_cache",
1670 sizeof(rtl_cache_bufctl_type
), /* objsize */
1672 0, /* constructor */
1676 gp_default_arena
, /* source */
1679 OSL_ASSERT(gp_cache_bufctl_cache
!= 0);
1682 rtl_cache_wsupdate_init();
1686 rtl_cache_init (void)
1688 static sal_once_type g_once
= SAL_ONCE_INIT
;
1689 SAL_ONCE(&g_once
, rtl_cache_once_init
);
1690 return (gp_cache_arena
!= 0);
1693 /* ================================================================= */
1696 Issue http://udk.openoffice.org/issues/show_bug.cgi?id=92388
1698 Mac OS X does not seem to support "__cxa__atexit", thus leading
1699 to the situation that "__attribute__((destructor))__" functions
1700 (in particular "rtl_{memory|cache|arena}_fini") become called
1701 _before_ global C++ object d'tors.
1703 Delegated the call to "rtl_cache_fini()" into a dummy C++ object,
1704 see alloc_fini.cxx .
1706 #if defined(__GNUC__) && !defined(MACOSX)
1707 static void rtl_cache_fini (void) __attribute__((destructor
));
1708 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
1709 #pragma fini(rtl_cache_fini)
1710 static void rtl_cache_fini (void);
1711 #endif /* __GNUC__ || __SUNPRO_C */
1714 rtl_cache_fini (void)
1716 if (gp_cache_arena
!= 0)
1718 rtl_cache_type
* cache
, * head
;
1720 rtl_cache_wsupdate_fini();
1722 if (gp_cache_bufctl_cache
!= 0)
1724 cache
= gp_cache_bufctl_cache
, gp_cache_bufctl_cache
= 0;
1725 rtl_cache_deactivate (cache
);
1726 rtl_cache_destructor (cache
);
1727 VALGRIND_DESTROY_MEMPOOL(cache
);
1729 if (gp_cache_slab_cache
!= 0)
1731 cache
= gp_cache_slab_cache
, gp_cache_slab_cache
= 0;
1732 rtl_cache_deactivate (cache
);
1733 rtl_cache_destructor (cache
);
1734 VALGRIND_DESTROY_MEMPOOL(cache
);
1736 if (gp_cache_magazine_cache
!= 0)
1738 cache
= gp_cache_magazine_cache
, gp_cache_magazine_cache
= 0;
1739 rtl_cache_deactivate (cache
);
1740 rtl_cache_destructor (cache
);
1741 VALGRIND_DESTROY_MEMPOOL(cache
);
1743 if (gp_cache_arena
!= 0)
1745 rtl_arena_destroy (gp_cache_arena
);
1749 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1750 head
= &(g_cache_list
.m_cache_head
);
1751 for (cache
= head
->m_cache_next
; cache
!= head
; cache
= cache
->m_cache_next
)
1754 "rtl_cache_fini(\"%s\") "
1755 "[slab]: allocs: %"PRIu64
", frees: %"PRIu64
"; total: %lu, used: %lu; "
1756 "[cpu]: allocs: %"PRIu64
", frees: %"PRIu64
"; "
1757 "[total]: allocs: %"PRIu64
", frees: %"PRIu64
"",
1759 cache
->m_slab_stats
.m_alloc
, cache
->m_slab_stats
.m_free
,
1760 cache
->m_slab_stats
.m_mem_total
, cache
->m_slab_stats
.m_mem_alloc
,
1761 cache
->m_cpu_stats
.m_alloc
, cache
->m_cpu_stats
.m_free
,
1762 cache
->m_slab_stats
.m_alloc
+ cache
->m_cpu_stats
.m_alloc
,
1763 cache
->m_slab_stats
.m_free
+ cache
->m_cpu_stats
.m_free
1766 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1770 /* ================================================================= */