1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
3 * This file is part of the LibreOffice project.
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 * This file incorporates work covered by the following license notice:
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
20 #include "alloc_cache.hxx"
21 #include "alloc_impl.hxx"
22 #include "alloc_arena.hxx"
23 #include "internal/rtllifecycle.h"
24 #include "sal/macros.h"
25 #include "osl/diagnose.h"
26 #include <osl/thread.hxx>
32 extern AllocMode alloc_mode
;
34 /* ================================================================= *
38 * ================================================================= */
43 struct rtl_cache_list_st
45 rtl_memory_lock_type m_lock
;
46 rtl_cache_type m_cache_head
;
49 pthread_t m_update_thread
;
50 pthread_cond_t m_update_cond
;
51 #elif defined(SAL_W32)
52 HANDLE m_update_thread
;
54 #endif /* SAL_UNX || SAL_W32 */
58 static rtl_cache_list_st g_cache_list
;
61 * provided for cache_type allocations, and hash_table resizing.
65 static rtl_arena_type
* gp_cache_arena
= 0;
67 /** gp_cache_magazine_cache
70 static rtl_cache_type
* gp_cache_magazine_cache
= 0;
72 /** gp_cache_slab_cache
75 static rtl_cache_type
* gp_cache_slab_cache
= 0;
77 /** gp_cache_bufctl_cache
80 static rtl_cache_type
* gp_cache_bufctl_cache
= 0;
82 /* ================================================================= */
84 /** RTL_CACHE_HASH_INDEX()
86 #define RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
87 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
89 #define RTL_CACHE_HASH_INDEX(cache, addr) \
90 RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
96 rtl_cache_hash_rescale (
97 rtl_cache_type
* cache
,
101 rtl_cache_bufctl_type
** new_table
;
104 new_bytes
= new_size
* sizeof(rtl_cache_bufctl_type
*);
105 new_table
= (rtl_cache_bufctl_type
**)rtl_arena_alloc(gp_cache_arena
, &new_bytes
);
109 rtl_cache_bufctl_type
** old_table
;
110 sal_Size old_size
, i
;
112 memset (new_table
, 0, new_bytes
);
114 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
116 old_table
= cache
->m_hash_table
;
117 old_size
= cache
->m_hash_size
;
121 // "rtl_cache_hash_rescale(" << cache->m_name << "): nbuf: "
122 // << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
124 // << ((cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
125 // >> cache->m_hash_shift)
126 // << "), frees: " << cache->m_slab_stats.m_free << " [old_size: "
127 // << old_size << ", new_size: " << new_size << ']');
129 cache
->m_hash_table
= new_table
;
130 cache
->m_hash_size
= new_size
;
131 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
133 for (i
= 0; i
< old_size
; i
++)
135 rtl_cache_bufctl_type
* curr
= old_table
[i
];
138 rtl_cache_bufctl_type
* next
= curr
->m_next
;
139 rtl_cache_bufctl_type
** head
;
141 head
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, curr
->m_addr
)]);
142 curr
->m_next
= (*head
);
150 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
152 if (old_table
!= cache
->m_hash_table_0
)
154 sal_Size old_bytes
= old_size
* sizeof(rtl_cache_bufctl_type
*);
155 rtl_arena_free (gp_cache_arena
, old_table
, old_bytes
);
161 rtl_cache_hash_insert (
162 rtl_cache_type
* cache
,
163 rtl_cache_bufctl_type
* bufctl
166 rtl_cache_bufctl_type
** ppHead
;
168 ppHead
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, bufctl
->m_addr
)]);
170 bufctl
->m_next
= (*ppHead
);
173 return (bufctl
->m_addr
);
176 /** rtl_cache_hash_remove()
178 rtl_cache_bufctl_type
*
179 rtl_cache_hash_remove (
180 rtl_cache_type
* cache
,
184 rtl_cache_bufctl_type
** ppHead
;
185 rtl_cache_bufctl_type
* bufctl
;
186 sal_Size lookups
= 0;
188 ppHead
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, addr
)]);
189 while ((bufctl
= *ppHead
) != 0)
191 if (bufctl
->m_addr
== addr
)
193 *ppHead
= bufctl
->m_next
, bufctl
->m_next
= 0;
198 ppHead
= &(bufctl
->m_next
);
201 assert(bufctl
!= 0); // bad free
205 sal_Size nbuf
= (sal_Size
)(cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
);
206 if (nbuf
> 4 * cache
->m_hash_size
)
208 if (!(cache
->m_features
& RTL_CACHE_FEATURE_RESCALE
))
210 sal_Size ave
= nbuf
>> cache
->m_hash_shift
;
211 sal_Size new_size
= cache
->m_hash_size
<< (highbit(ave
) - 1);
213 cache
->m_features
|= RTL_CACHE_FEATURE_RESCALE
;
214 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
215 rtl_cache_hash_rescale (cache
, new_size
);
216 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
217 cache
->m_features
&= ~RTL_CACHE_FEATURE_RESCALE
;
225 /* ================================================================= */
229 #define RTL_CACHE_SLAB(addr, size) \
230 (((rtl_cache_slab_type*)(RTL_MEMORY_P2END((sal_uIntPtr)(addr), (size)))) - 1)
232 /** rtl_cache_slab_constructor()
235 rtl_cache_slab_constructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
237 rtl_cache_slab_type
* slab
= (rtl_cache_slab_type
*)(obj
);
239 QUEUE_START_NAMED(slab
, slab_
);
245 /** rtl_cache_slab_destructor()
248 rtl_cache_slab_destructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
250 rtl_cache_slab_type
* slab
= static_cast< rtl_cache_slab_type
* >(obj
);
251 assert(QUEUE_STARTED_NAMED(slab
, slab_
)); // assure removed from queue(s)
252 assert(slab
->m_ntypes
== 0); // assure no longer referenced
253 (void) slab
; // avoid warnings
256 /** rtl_cache_slab_create()
258 * @precond cache->m_slab_lock released.
260 rtl_cache_slab_type
*
261 rtl_cache_slab_create (
262 rtl_cache_type
* cache
265 rtl_cache_slab_type
* slab
= 0;
269 size
= cache
->m_slab_size
;
270 addr
= rtl_arena_alloc (cache
->m_source
, &size
);
273 assert(size
>= cache
->m_slab_size
);
275 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
277 /* allocate slab struct from slab cache */
278 assert(cache
!= gp_cache_slab_cache
);
279 slab
= (rtl_cache_slab_type
*)rtl_cache_alloc (gp_cache_slab_cache
);
283 /* construct embedded slab struct */
284 slab
= RTL_CACHE_SLAB(addr
, cache
->m_slab_size
);
285 (void) rtl_cache_slab_constructor (slab
, 0);
289 slab
->m_data
= (sal_uIntPtr
)(addr
);
291 /* dynamic freelist initialization */
292 slab
->m_bp
= slab
->m_data
;
297 rtl_arena_free (cache
->m_source
, addr
, size
);
303 /** rtl_cache_slab_destroy()
305 * @precond cache->m_slab_lock released.
308 rtl_cache_slab_destroy (
309 rtl_cache_type
* cache
,
310 rtl_cache_slab_type
* slab
313 void * addr
= (void*)(slab
->m_data
);
314 sal_Size refcnt
= slab
->m_ntypes
; slab
->m_ntypes
= 0;
316 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
318 /* cleanup bufctl(s) for free buffer(s) */
319 sal_Size ntypes
= (slab
->m_bp
- slab
->m_data
) / cache
->m_type_size
;
320 for (ntypes
-= refcnt
; slab
->m_sp
!= 0; ntypes
--)
322 rtl_cache_bufctl_type
* bufctl
= slab
->m_sp
;
324 /* pop from freelist */
325 slab
->m_sp
= bufctl
->m_next
, bufctl
->m_next
= 0;
327 /* return bufctl struct to bufctl cache */
328 rtl_cache_free (gp_cache_bufctl_cache
, bufctl
);
332 /* return slab struct to slab cache */
333 rtl_cache_free (gp_cache_slab_cache
, slab
);
337 /* destruct embedded slab struct */
338 rtl_cache_slab_destructor (slab
, 0);
341 if ((refcnt
== 0) || (cache
->m_features
& RTL_CACHE_FEATURE_BULKDESTROY
))
344 rtl_arena_free (cache
->m_source
, addr
, cache
->m_slab_size
);
348 /** rtl_cache_slab_populate()
350 * @precond cache->m_slab_lock acquired.
353 rtl_cache_slab_populate (
354 rtl_cache_type
* cache
357 rtl_cache_slab_type
* slab
;
359 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
360 slab
= rtl_cache_slab_create (cache
);
361 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
364 /* update buffer start addr w/ current color */
365 slab
->m_bp
+= cache
->m_ncolor
;
367 /* update color for next slab */
368 cache
->m_ncolor
+= cache
->m_type_align
;
369 if (cache
->m_ncolor
> cache
->m_ncolor_max
)
373 cache
->m_slab_stats
.m_mem_total
+= cache
->m_slab_size
;
375 /* insert onto 'free' queue */
376 QUEUE_INSERT_HEAD_NAMED(&(cache
->m_free_head
), slab
, slab_
);
381 /* ================================================================= */
383 /** rtl_cache_slab_alloc()
385 * Allocate a buffer from slab layer; used by magazine layer.
388 rtl_cache_slab_alloc (
389 rtl_cache_type
* cache
393 rtl_cache_slab_type
* head
;
395 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
397 head
= &(cache
->m_free_head
);
398 if ((head
->m_slab_next
!= head
) || rtl_cache_slab_populate (cache
))
400 rtl_cache_slab_type
* slab
;
401 rtl_cache_bufctl_type
* bufctl
;
403 slab
= head
->m_slab_next
;
404 assert(slab
->m_ntypes
< cache
->m_ntypes
);
408 /* initialize bufctl w/ current 'slab->m_bp' */
409 assert(slab
->m_bp
< slab
->m_data
+ cache
->m_ntypes
* cache
->m_type_size
+ cache
->m_ncolor_max
);
410 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
412 /* allocate bufctl */
413 assert(cache
!= gp_cache_bufctl_cache
);
414 bufctl
= (rtl_cache_bufctl_type
*)rtl_cache_alloc (gp_cache_bufctl_cache
);
418 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
422 bufctl
->m_addr
= slab
->m_bp
;
423 bufctl
->m_slab
= (sal_uIntPtr
)(slab
);
427 /* embedded bufctl */
428 bufctl
= (rtl_cache_bufctl_type
*)(slab
->m_bp
);
432 /* update 'slab->m_bp' to next free buffer */
433 slab
->m_bp
+= cache
->m_type_size
;
435 /* assign bufctl to freelist */
441 slab
->m_sp
= bufctl
->m_next
;
443 /* increment usage, check for full slab */
444 if ((slab
->m_ntypes
+= 1) == cache
->m_ntypes
)
446 /* remove from 'free' queue */
447 QUEUE_REMOVE_NAMED(slab
, slab_
);
449 /* insert onto 'used' queue (tail) */
450 QUEUE_INSERT_TAIL_NAMED(&(cache
->m_used_head
), slab
, slab_
);
454 cache
->m_slab_stats
.m_alloc
+= 1;
455 cache
->m_slab_stats
.m_mem_alloc
+= cache
->m_type_size
;
457 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
458 addr
= (void*)rtl_cache_hash_insert (cache
, bufctl
);
463 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
467 /** rtl_cache_slab_free()
469 * Return a buffer to slab layer; used by magazine layer.
472 rtl_cache_slab_free (
473 rtl_cache_type
* cache
,
477 rtl_cache_bufctl_type
* bufctl
;
478 rtl_cache_slab_type
* slab
;
480 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
482 /* determine slab from addr */
483 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
485 bufctl
= rtl_cache_hash_remove (cache
, (sal_uIntPtr
)(addr
));
486 slab
= (bufctl
!= 0) ? (rtl_cache_slab_type
*)(bufctl
->m_slab
) : 0;
490 /* embedded slab struct */
491 bufctl
= (rtl_cache_bufctl_type
*)(addr
);
492 slab
= RTL_CACHE_SLAB(addr
, cache
->m_slab_size
);
497 /* check for full slab */
498 if (slab
->m_ntypes
== cache
->m_ntypes
)
500 /* remove from 'used' queue */
501 QUEUE_REMOVE_NAMED(slab
, slab_
);
503 /* insert onto 'free' queue (head) */
504 QUEUE_INSERT_HEAD_NAMED(&(cache
->m_free_head
), slab
, slab_
);
508 bufctl
->m_next
= slab
->m_sp
;
512 cache
->m_slab_stats
.m_free
+= 1;
513 cache
->m_slab_stats
.m_mem_alloc
-= cache
->m_type_size
;
515 /* decrement usage, check for empty slab */
516 if ((slab
->m_ntypes
-= 1) == 0)
518 /* remove from 'free' queue */
519 QUEUE_REMOVE_NAMED(slab
, slab_
);
522 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
524 /* free 'empty' slab */
525 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
526 rtl_cache_slab_destroy (cache
, slab
);
531 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
534 /* ================================================================= */
536 /** rtl_cache_magazine_constructor()
539 rtl_cache_magazine_constructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
541 rtl_cache_magazine_type
* mag
= (rtl_cache_magazine_type
*)(obj
);
542 /* @@@ sal_Size size = (sal_Size)(arg); @@@ */
545 mag
->m_mag_size
= RTL_CACHE_MAGAZINE_SIZE
;
551 /** rtl_cache_magazine_destructor()
554 rtl_cache_magazine_destructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
556 rtl_cache_magazine_type
* mag
= static_cast< rtl_cache_magazine_type
* >(
558 assert(mag
->m_mag_next
== 0); // assure removed from queue(s)
559 assert(mag
->m_mag_used
== 0); // assure no longer referenced
560 (void) mag
; // avoid warnings
563 /** rtl_cache_magazine_clear()
566 rtl_cache_magazine_clear (
567 rtl_cache_type
* cache
,
568 rtl_cache_magazine_type
* mag
571 for (; mag
->m_mag_used
> 0; --mag
->m_mag_used
)
573 void * obj
= mag
->m_objects
[mag
->m_mag_used
- 1];
574 mag
->m_objects
[mag
->m_mag_used
- 1] = 0;
576 if (cache
->m_destructor
!= 0)
578 /* destruct object */
579 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
582 /* return buffer to slab layer */
583 rtl_cache_slab_free (cache
, obj
);
587 /* ================================================================= */
589 /** rtl_cache_depot_enqueue()
591 * @precond cache->m_depot_lock acquired.
594 rtl_cache_depot_enqueue (
595 rtl_cache_depot_type
* depot
,
596 rtl_cache_magazine_type
* mag
599 /* enqueue empty magazine */
600 mag
->m_mag_next
= depot
->m_mag_next
;
601 depot
->m_mag_next
= mag
;
603 /* update depot stats */
604 depot
->m_mag_count
++;
607 /** rtl_cache_depot_dequeue()
609 * @precond cache->m_depot_lock acquired.
611 inline rtl_cache_magazine_type
*
612 rtl_cache_depot_dequeue (
613 rtl_cache_depot_type
* depot
616 rtl_cache_magazine_type
* mag
= 0;
617 if (depot
->m_mag_count
> 0)
619 /* dequeue magazine */
620 assert(depot
->m_mag_next
!= 0);
622 mag
= depot
->m_mag_next
;
623 depot
->m_mag_next
= mag
->m_mag_next
;
626 /* update depot stats */
627 depot
->m_mag_count
--;
628 if(depot
->m_curr_min
> depot
->m_mag_count
)
630 depot
->m_curr_min
= depot
->m_mag_count
;
636 /** rtl_cache_depot_exchange_alloc()
638 * @precond cache->m_depot_lock acquired.
640 inline rtl_cache_magazine_type
*
641 rtl_cache_depot_exchange_alloc (
642 rtl_cache_type
* cache
,
643 rtl_cache_magazine_type
* empty
646 rtl_cache_magazine_type
* full
;
648 assert((empty
== 0) || (empty
->m_mag_used
== 0));
650 /* dequeue full magazine */
651 full
= rtl_cache_depot_dequeue (&(cache
->m_depot_full
));
652 if ((full
!= 0) && (empty
!= 0))
654 /* enqueue empty magazine */
655 rtl_cache_depot_enqueue (&(cache
->m_depot_empty
), empty
);
658 assert((full
== 0) || (full
->m_mag_used
> 0));
663 /** rtl_cache_depot_exchange_free()
665 * @precond cache->m_depot_lock acquired.
667 inline rtl_cache_magazine_type
*
668 rtl_cache_depot_exchange_free (
669 rtl_cache_type
* cache
,
670 rtl_cache_magazine_type
* full
673 rtl_cache_magazine_type
* empty
;
675 assert((full
== 0) || (full
->m_mag_used
> 0));
677 /* dequeue empty magazine */
678 empty
= rtl_cache_depot_dequeue (&(cache
->m_depot_empty
));
679 if ((empty
!= 0) && (full
!= 0))
681 /* enqueue full magazine */
682 rtl_cache_depot_enqueue (&(cache
->m_depot_full
), full
);
685 assert((empty
== 0) || (empty
->m_mag_used
== 0));
690 /** rtl_cache_depot_populate()
692 * @precond cache->m_depot_lock acquired.
695 rtl_cache_depot_populate (
696 rtl_cache_type
* cache
699 rtl_cache_magazine_type
* empty
= 0;
701 if (cache
->m_magazine_cache
!= 0)
703 /* allocate new empty magazine */
704 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
705 empty
= (rtl_cache_magazine_type
*)rtl_cache_alloc (cache
->m_magazine_cache
);
706 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
709 /* enqueue (new) empty magazine */
710 rtl_cache_depot_enqueue (&(cache
->m_depot_empty
), empty
);
716 /* ================================================================= */
718 /** rtl_cache_constructor()
721 rtl_cache_constructor (void * obj
)
723 rtl_cache_type
* cache
= (rtl_cache_type
*)(obj
);
725 memset (cache
, 0, sizeof(rtl_cache_type
));
728 QUEUE_START_NAMED(cache
, cache_
);
731 (void)RTL_MEMORY_LOCK_INIT(&(cache
->m_slab_lock
));
733 QUEUE_START_NAMED(&(cache
->m_free_head
), slab_
);
734 QUEUE_START_NAMED(&(cache
->m_used_head
), slab_
);
736 cache
->m_hash_table
= cache
->m_hash_table_0
;
737 cache
->m_hash_size
= RTL_CACHE_HASH_SIZE
;
738 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
741 (void)RTL_MEMORY_LOCK_INIT(&(cache
->m_depot_lock
));
746 /** rtl_cache_destructor()
749 rtl_cache_destructor (void * obj
)
751 rtl_cache_type
* cache
= (rtl_cache_type
*)(obj
);
754 assert(QUEUE_STARTED_NAMED(cache
, cache_
));
757 (void)RTL_MEMORY_LOCK_DESTROY(&(cache
->m_slab_lock
));
759 assert(QUEUE_STARTED_NAMED(&(cache
->m_free_head
), slab_
));
760 assert(QUEUE_STARTED_NAMED(&(cache
->m_used_head
), slab_
));
762 assert(cache
->m_hash_table
== cache
->m_hash_table_0
);
763 assert(cache
->m_hash_size
== RTL_CACHE_HASH_SIZE
);
764 assert(cache
->m_hash_shift
== (sal_Size
)(highbit(cache
->m_hash_size
) - 1));
767 (void)RTL_MEMORY_LOCK_DESTROY(&(cache
->m_depot_lock
));
770 /* ================================================================= */
772 /** rtl_cache_activate()
776 rtl_cache_type
* cache
,
780 int (SAL_CALL
* constructor
)(void * obj
, void * userarg
),
781 void (SAL_CALL
* destructor
) (void * obj
, void * userarg
),
782 void (SAL_CALL
* reclaim
) (void * userarg
),
784 rtl_arena_type
* source
,
793 snprintf (cache
->m_name
, sizeof(cache
->m_name
), "%s", name
);
795 /* ensure minimum size (embedded bufctl linkage) */
796 if(objsize
< sizeof(rtl_cache_bufctl_type
*))
798 objsize
= sizeof(rtl_cache_bufctl_type
*);
803 /* determine default alignment */
804 if (objsize
>= RTL_MEMORY_ALIGNMENT_8
)
805 objalign
= RTL_MEMORY_ALIGNMENT_8
;
807 objalign
= RTL_MEMORY_ALIGNMENT_4
;
811 /* ensure minimum alignment */
812 if(objalign
< RTL_MEMORY_ALIGNMENT_4
)
814 objalign
= RTL_MEMORY_ALIGNMENT_4
;
817 assert(RTL_MEMORY_ISP2(objalign
));
819 cache
->m_type_size
= objsize
= RTL_MEMORY_P2ROUNDUP(objsize
, objalign
);
820 cache
->m_type_align
= objalign
;
821 cache
->m_type_shift
= highbit(cache
->m_type_size
) - 1;
823 cache
->m_constructor
= constructor
;
824 cache
->m_destructor
= destructor
;
825 cache
->m_reclaim
= reclaim
;
826 cache
->m_userarg
= userarg
;
829 cache
->m_source
= source
;
831 slabsize
= source
->m_quantum
; /* minimum slab size */
832 if (flags
& RTL_CACHE_FLAG_QUANTUMCACHE
)
834 /* next power of 2 above 3 * qcache_max */
835 if(slabsize
< (((sal_Size
)1) << highbit(3 * source
->m_qcache_max
)))
837 slabsize
= (((sal_Size
)1) << highbit(3 * source
->m_qcache_max
));
842 /* waste at most 1/8 of slab */
843 if(slabsize
< cache
->m_type_size
* 8)
845 slabsize
= cache
->m_type_size
* 8;
849 slabsize
= RTL_MEMORY_P2ROUNDUP(slabsize
, source
->m_quantum
);
850 if (!RTL_MEMORY_ISP2(slabsize
))
851 slabsize
= (((sal_Size
)1) << highbit(slabsize
));
852 cache
->m_slab_size
= slabsize
;
854 if (cache
->m_slab_size
> source
->m_quantum
)
856 assert(gp_cache_slab_cache
!= 0);
857 assert(gp_cache_bufctl_cache
!= 0);
859 cache
->m_features
|= RTL_CACHE_FEATURE_HASH
;
860 cache
->m_ntypes
= cache
->m_slab_size
/ cache
->m_type_size
;
861 cache
->m_ncolor_max
= cache
->m_slab_size
% cache
->m_type_size
;
865 /* embedded slab struct */
866 cache
->m_ntypes
= (cache
->m_slab_size
- sizeof(rtl_cache_slab_type
)) / cache
->m_type_size
;
867 cache
->m_ncolor_max
= (cache
->m_slab_size
- sizeof(rtl_cache_slab_type
)) % cache
->m_type_size
;
870 assert(cache
->m_ntypes
> 0);
873 if (flags
& RTL_CACHE_FLAG_BULKDESTROY
)
875 /* allow bulk slab delete upon cache deactivation */
876 cache
->m_features
|= RTL_CACHE_FEATURE_BULKDESTROY
;
880 if (!(flags
& RTL_CACHE_FLAG_NOMAGAZINE
))
882 assert(gp_cache_magazine_cache
!= 0);
883 cache
->m_magazine_cache
= gp_cache_magazine_cache
;
886 /* insert into cache list */
887 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
888 QUEUE_INSERT_TAIL_NAMED(&(g_cache_list
.m_cache_head
), cache
, cache_
);
889 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
894 /** rtl_cache_deactivate()
897 rtl_cache_deactivate (
898 rtl_cache_type
* cache
901 /* remove from cache list */
902 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
903 bool active
= !QUEUE_STARTED_NAMED(cache
, cache_
);
904 QUEUE_REMOVE_NAMED(cache
, cache_
);
905 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
907 assert(active
); // orphaned cache
910 /* cleanup magazine layer */
911 if (cache
->m_magazine_cache
!= 0)
913 rtl_cache_type
* mag_cache
;
914 rtl_cache_magazine_type
* mag
;
916 /* prevent recursion */
917 mag_cache
= cache
->m_magazine_cache
, cache
->m_magazine_cache
= 0;
919 /* cleanup cpu layer */
920 if ((mag
= cache
->m_cpu_curr
) != 0)
922 cache
->m_cpu_curr
= 0;
923 rtl_cache_magazine_clear (cache
, mag
);
924 rtl_cache_free (mag_cache
, mag
);
926 if ((mag
= cache
->m_cpu_prev
) != 0)
928 cache
->m_cpu_prev
= 0;
929 rtl_cache_magazine_clear (cache
, mag
);
930 rtl_cache_free (mag_cache
, mag
);
933 /* cleanup depot layer */
934 while ((mag
= rtl_cache_depot_dequeue(&(cache
->m_depot_full
))) != 0)
936 rtl_cache_magazine_clear (cache
, mag
);
937 rtl_cache_free (mag_cache
, mag
);
939 while ((mag
= rtl_cache_depot_dequeue(&(cache
->m_depot_empty
))) != 0)
941 rtl_cache_magazine_clear (cache
, mag
);
942 rtl_cache_free (mag_cache
, mag
);
948 // "rtl_cache_deactivate(" << cache->m_name << "): [slab]: allocs: "
949 // << cache->m_slab_stats.m_alloc << ", frees: "
950 // << cache->m_slab_stats.m_free << "; total: "
951 // << cache->m_slab_stats.m_mem_total << ", used: "
952 // << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
953 // << cache->m_cpu_stats.m_alloc << ", frees: "
954 // << cache->m_cpu_stats.m_free << "; [total]: allocs: "
955 // << (cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc)
957 // << (cache->m_slab_stats.m_free + cache->m_cpu_stats.m_free));
959 /* cleanup slab layer */
960 if (cache
->m_slab_stats
.m_alloc
> cache
->m_slab_stats
.m_free
)
964 // "rtl_cache_deactivate(" << cache->m_name << "): cleaning up "
965 // << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
966 // << " leaked buffer(s) [" << cache->m_slab_stats.m_mem_alloc
967 // << " bytes] [" << cache->m_slab_stats.m_mem_total << " total]");
969 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
971 /* cleanup bufctl(s) for leaking buffer(s) */
972 sal_Size i
, n
= cache
->m_hash_size
;
973 for (i
= 0; i
< n
; i
++)
975 rtl_cache_bufctl_type
* bufctl
;
976 while ((bufctl
= cache
->m_hash_table
[i
]) != 0)
978 /* pop from hash table */
979 cache
->m_hash_table
[i
] = bufctl
->m_next
, bufctl
->m_next
= 0;
981 /* return to bufctl cache */
982 rtl_cache_free (gp_cache_bufctl_cache
, bufctl
);
987 /* force cleanup of remaining slabs */
988 rtl_cache_slab_type
*head
, *slab
;
990 head
= &(cache
->m_used_head
);
991 for (slab
= head
->m_slab_next
; slab
!= head
; slab
= head
->m_slab_next
)
993 /* remove from 'used' queue */
994 QUEUE_REMOVE_NAMED(slab
, slab_
);
997 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
1000 rtl_cache_slab_destroy (cache
, slab
);
1003 head
= &(cache
->m_free_head
);
1004 for (slab
= head
->m_slab_next
; slab
!= head
; slab
= head
->m_slab_next
)
1006 /* remove from 'free' queue */
1007 QUEUE_REMOVE_NAMED(slab
, slab_
);
1010 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
1013 rtl_cache_slab_destroy (cache
, slab
);
1018 if (cache
->m_hash_table
!= cache
->m_hash_table_0
)
1022 cache
->m_hash_table
,
1023 cache
->m_hash_size
* sizeof(rtl_cache_bufctl_type
*));
1025 cache
->m_hash_table
= cache
->m_hash_table_0
;
1026 cache
->m_hash_size
= RTL_CACHE_HASH_SIZE
;
1027 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
1033 /* ================================================================= *
1035 * cache implementation.
1037 * ================================================================= */
1039 /** rtl_cache_create()
1042 SAL_CALL
rtl_cache_create (
1046 int (SAL_CALL
* constructor
)(void * obj
, void * userarg
),
1047 void (SAL_CALL
* destructor
) (void * obj
, void * userarg
),
1048 void (SAL_CALL
* reclaim
) (void * userarg
),
1050 rtl_arena_type
* source
,
1052 ) SAL_THROW_EXTERN_C()
1054 rtl_cache_type
* result
= 0;
1055 sal_Size size
= sizeof(rtl_cache_type
);
1058 result
= (rtl_cache_type
*)rtl_arena_alloc (gp_cache_arena
, &size
);
1061 rtl_cache_type
* cache
= result
;
1062 (void) rtl_cache_constructor (cache
);
1066 /* use default arena */
1067 assert(gp_default_arena
!= 0);
1068 source
= gp_default_arena
;
1071 result
= rtl_cache_activate (
1086 /* activation failed */
1087 rtl_cache_deactivate (cache
);
1088 rtl_cache_destructor (cache
);
1089 rtl_arena_free (gp_cache_arena
, cache
, size
);
1092 else if (gp_cache_arena
== 0)
1094 ensureCacheSingleton();
1104 /** rtl_cache_destroy()
1106 void SAL_CALL
rtl_cache_destroy (
1107 rtl_cache_type
* cache
1108 ) SAL_THROW_EXTERN_C()
1112 rtl_cache_deactivate (cache
);
1113 rtl_cache_destructor (cache
);
1114 rtl_arena_free (gp_cache_arena
, cache
, sizeof(rtl_cache_type
));
1118 /** rtl_cache_alloc()
1121 SAL_CALL
rtl_cache_alloc (
1122 rtl_cache_type
* cache
1123 ) SAL_THROW_EXTERN_C()
1130 if (alloc_mode
== AMode_SYSTEM
)
1132 obj
= rtl_allocateMemory(cache
->m_type_size
);
1133 if ((obj
!= 0) && (cache
->m_constructor
!= 0))
1135 if (!((cache
->m_constructor
)(obj
, cache
->m_userarg
)))
1137 /* construction failure */
1138 rtl_freeMemory(obj
), obj
= 0;
1144 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1145 if (cache
->m_cpu_curr
!= 0)
1149 /* take object from magazine layer */
1150 rtl_cache_magazine_type
*curr
, *prev
, *temp
;
1152 curr
= cache
->m_cpu_curr
;
1153 if ((curr
!= 0) && (curr
->m_mag_used
> 0))
1155 obj
= curr
->m_objects
[--curr
->m_mag_used
];
1156 cache
->m_cpu_stats
.m_alloc
+= 1;
1157 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1162 prev
= cache
->m_cpu_prev
;
1163 if ((prev
!= 0) && (prev
->m_mag_used
> 0))
1165 temp
= cache
->m_cpu_curr
;
1166 cache
->m_cpu_curr
= cache
->m_cpu_prev
;
1167 cache
->m_cpu_prev
= temp
;
1172 temp
= rtl_cache_depot_exchange_alloc (cache
, prev
);
1175 cache
->m_cpu_prev
= cache
->m_cpu_curr
;
1176 cache
->m_cpu_curr
= temp
;
1181 /* no full magazine: fall through to slab layer */
1185 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1187 /* alloc buffer from slab layer */
1188 obj
= rtl_cache_slab_alloc (cache
);
1189 if ((obj
!= 0) && (cache
->m_constructor
!= 0))
1191 /* construct object */
1192 if (!((cache
->m_constructor
)(obj
, cache
->m_userarg
)))
1194 /* construction failure */
1195 rtl_cache_slab_free (cache
, obj
), obj
= 0;
1201 /** rtl_cache_free()
1204 SAL_CALL
rtl_cache_free (
1205 rtl_cache_type
* cache
,
1207 ) SAL_THROW_EXTERN_C()
1209 if ((obj
!= 0) && (cache
!= 0))
1211 if (alloc_mode
== AMode_SYSTEM
)
1213 if (cache
->m_destructor
!= 0)
1215 /* destruct object */
1216 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
1218 rtl_freeMemory(obj
);
1222 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1226 /* return object to magazine layer */
1227 rtl_cache_magazine_type
*curr
, *prev
, *temp
;
1229 curr
= cache
->m_cpu_curr
;
1230 if ((curr
!= 0) && (curr
->m_mag_used
< curr
->m_mag_size
))
1232 curr
->m_objects
[curr
->m_mag_used
++] = obj
;
1233 cache
->m_cpu_stats
.m_free
+= 1;
1234 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1239 prev
= cache
->m_cpu_prev
;
1240 if ((prev
!= 0) && (prev
->m_mag_used
== 0))
1242 temp
= cache
->m_cpu_curr
;
1243 cache
->m_cpu_curr
= cache
->m_cpu_prev
;
1244 cache
->m_cpu_prev
= temp
;
1249 temp
= rtl_cache_depot_exchange_free (cache
, prev
);
1252 cache
->m_cpu_prev
= cache
->m_cpu_curr
;
1253 cache
->m_cpu_curr
= temp
;
1258 if (rtl_cache_depot_populate(cache
))
1263 /* no empty magazine: fall through to slab layer */
1267 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1269 /* no space for constructed object in magazine layer */
1270 if (cache
->m_destructor
!= 0)
1272 /* destruct object */
1273 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
1276 /* return buffer to slab layer */
1277 rtl_cache_slab_free (cache
, obj
);
1281 /* ================================================================= *
1283 * cache wsupdate (machdep) internals.
1285 * ================================================================= */
1287 /** rtl_cache_wsupdate_init()
1289 * @precond g_cache_list.m_lock initialized
1292 rtl_cache_wsupdate_init();
1294 /** rtl_cache_wsupdate_wait()
1296 * @precond g_cache_list.m_lock acquired
1299 rtl_cache_wsupdate_wait (
1300 unsigned int seconds
1303 /** rtl_cache_wsupdate_fini()
1307 rtl_cache_wsupdate_fini();
1309 /* ================================================================= */
1311 #if defined(SAL_UNX)
1313 #include <sys/time.h>
1316 rtl_cache_wsupdate_all (void * arg
);
1319 rtl_cache_wsupdate_init()
1321 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1322 g_cache_list
.m_update_done
= 0;
1323 (void) pthread_cond_init (&(g_cache_list
.m_update_cond
), NULL
);
1324 if (pthread_create (
1325 &(g_cache_list
.m_update_thread
), NULL
, rtl_cache_wsupdate_all
, (void*)(10)) != 0)
1328 g_cache_list
.m_update_thread
= (pthread_t
)(0);
1330 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1334 rtl_cache_wsupdate_wait (unsigned int seconds
)
1341 gettimeofday(&now
, 0);
1342 wakeup
.tv_sec
= now
.tv_sec
+ (seconds
);
1343 wakeup
.tv_nsec
= now
.tv_usec
* 1000;
1345 (void) pthread_cond_timedwait (
1346 &(g_cache_list
.m_update_cond
),
1347 &(g_cache_list
.m_lock
),
1353 rtl_cache_wsupdate_fini()
1355 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1356 g_cache_list
.m_update_done
= 1;
1357 pthread_cond_signal (&(g_cache_list
.m_update_cond
));
1358 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1360 if (g_cache_list
.m_update_thread
!= (pthread_t
)(0))
1361 pthread_join (g_cache_list
.m_update_thread
, NULL
);
1364 /* ================================================================= */
1366 #elif defined(SAL_W32)
1369 rtl_cache_wsupdate_all (void * arg
);
1372 rtl_cache_wsupdate_init()
1376 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1377 g_cache_list
.m_update_done
= 0;
1378 g_cache_list
.m_update_cond
= CreateEvent (0, TRUE
, FALSE
, 0);
1380 g_cache_list
.m_update_thread
=
1381 CreateThread (NULL
, 0, rtl_cache_wsupdate_all
, (LPVOID
)(10), 0, &dwThreadId
);
1382 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1386 rtl_cache_wsupdate_wait (unsigned int seconds
)
1390 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1391 WaitForSingleObject (g_cache_list
.m_update_cond
, (DWORD
)(seconds
* 1000));
1392 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1397 rtl_cache_wsupdate_fini()
1399 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1400 g_cache_list
.m_update_done
= 1;
1401 SetEvent (g_cache_list
.m_update_cond
);
1402 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1404 WaitForSingleObject (g_cache_list
.m_update_thread
, INFINITE
);
1407 #endif /* SAL_UNX || SAL_W32 */
1409 /* ================================================================= */
1411 /** rtl_cache_depot_wsupdate()
1412 * update depot stats and purge excess magazines.
1414 * @precond cache->m_depot_lock acquired
1417 rtl_cache_depot_wsupdate (
1418 rtl_cache_type
* cache
,
1419 rtl_cache_depot_type
* depot
1424 depot
->m_prev_min
= depot
->m_curr_min
;
1425 depot
->m_curr_min
= depot
->m_mag_count
;
1427 npurge
= depot
->m_curr_min
< depot
->m_prev_min
? depot
->m_curr_min
: depot
->m_prev_min
;
1428 for (; npurge
> 0; npurge
--)
1430 rtl_cache_magazine_type
* mag
= rtl_cache_depot_dequeue (depot
);
1433 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1434 rtl_cache_magazine_clear (cache
, mag
);
1435 rtl_cache_free (cache
->m_magazine_cache
, mag
);
1436 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1441 /** rtl_cache_wsupdate()
1443 * @precond cache->m_depot_lock released
1446 rtl_cache_wsupdate (
1447 rtl_cache_type
* cache
1450 if (cache
->m_magazine_cache
!= 0)
1452 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1456 // "rtl_cache_wsupdate(" << cache->m_name
1457 // << ") [depot: count, curr_min, prev_min] full: "
1458 // << cache->m_depot_full.m_mag_count << ", "
1459 // << cache->m_depot_full.m_curr_min << ", "
1460 // << cache->m_depot_full.m_prev_min << "; empty: "
1461 // << cache->m_depot_empty.m_mag_count << ", "
1462 // << cache->m_depot_empty.m_curr_min << ", "
1463 // << cache->m_depot_empty.m_prev_min);
1465 rtl_cache_depot_wsupdate (cache
, &(cache
->m_depot_full
));
1466 rtl_cache_depot_wsupdate (cache
, &(cache
->m_depot_empty
));
1468 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1472 /** rtl_cache_wsupdate_all()
1475 #if defined(SAL_UNX)
1477 #elif defined(SAL_W32)
1479 #endif /* SAL_UNX || SAL_W32 */
1480 rtl_cache_wsupdate_all (void * arg
)
1482 osl::Thread::setName("rtl_cache_wsupdate_all");
1483 unsigned int seconds
= sal::static_int_cast
< unsigned int >(
1484 reinterpret_cast< sal_uIntPtr
>(arg
));
1486 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1487 while (!g_cache_list
.m_update_done
)
1489 rtl_cache_wsupdate_wait (seconds
);
1490 if (!g_cache_list
.m_update_done
)
1492 rtl_cache_type
* head
, * cache
;
1494 head
= &(g_cache_list
.m_cache_head
);
1495 for (cache
= head
->m_cache_next
;
1497 cache
= cache
->m_cache_next
)
1499 rtl_cache_wsupdate (cache
);
1503 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1508 /* ================================================================= *
1510 * cache initialization.
1512 * ================================================================= */
1518 /* list of caches */
1519 RTL_MEMORY_LOCK_INIT(&(g_cache_list
.m_lock
));
1520 (void) rtl_cache_constructor (&(g_cache_list
.m_cache_head
));
1523 /* cache: internal arena */
1524 assert(gp_cache_arena
== 0);
1526 gp_cache_arena
= rtl_arena_create (
1527 "rtl_cache_internal_arena",
1529 0, /* no quantum caching */
1530 NULL
, /* default source */
1535 assert(gp_cache_arena
!= 0);
1537 /* check 'gp_default_arena' initialization */
1538 assert(gp_default_arena
!= 0);
1541 /* cache: magazine cache */
1542 static rtl_cache_type g_cache_magazine_cache
;
1544 assert(gp_cache_magazine_cache
== 0);
1545 (void) rtl_cache_constructor (&g_cache_magazine_cache
);
1547 gp_cache_magazine_cache
= rtl_cache_activate (
1548 &g_cache_magazine_cache
,
1549 "rtl_cache_magazine_cache",
1550 sizeof(rtl_cache_magazine_type
), /* objsize */
1552 rtl_cache_magazine_constructor
,
1553 rtl_cache_magazine_destructor
,
1555 0, /* userarg: NYI */
1556 gp_default_arena
, /* source */
1557 RTL_CACHE_FLAG_NOMAGAZINE
/* during bootstrap; activated below */
1559 assert(gp_cache_magazine_cache
!= 0);
1561 /* activate magazine layer */
1562 g_cache_magazine_cache
.m_magazine_cache
= gp_cache_magazine_cache
;
1565 /* cache: slab (struct) cache */
1566 static rtl_cache_type g_cache_slab_cache
;
1568 assert(gp_cache_slab_cache
== 0);
1569 (void) rtl_cache_constructor (&g_cache_slab_cache
);
1571 gp_cache_slab_cache
= rtl_cache_activate (
1572 &g_cache_slab_cache
,
1573 "rtl_cache_slab_cache",
1574 sizeof(rtl_cache_slab_type
), /* objsize */
1576 rtl_cache_slab_constructor
,
1577 rtl_cache_slab_destructor
,
1579 0, /* userarg: none */
1580 gp_default_arena
, /* source */
1583 assert(gp_cache_slab_cache
!= 0);
1586 /* cache: bufctl cache */
1587 static rtl_cache_type g_cache_bufctl_cache
;
1589 assert(gp_cache_bufctl_cache
== 0);
1590 (void) rtl_cache_constructor (&g_cache_bufctl_cache
);
1592 gp_cache_bufctl_cache
= rtl_cache_activate (
1593 &g_cache_bufctl_cache
,
1594 "rtl_cache_bufctl_cache",
1595 sizeof(rtl_cache_bufctl_type
), /* objsize */
1597 0, /* constructor */
1601 gp_default_arena
, /* source */
1604 assert(gp_cache_bufctl_cache
!= 0);
1607 rtl_cache_wsupdate_init();
1608 // SAL_INFO("sal.rtl", "rtl_cache_init completed");
1611 /* ================================================================= */
1616 if (gp_cache_arena
!= 0)
1618 rtl_cache_type
* cache
, * head
;
1620 rtl_cache_wsupdate_fini();
1622 if (gp_cache_bufctl_cache
!= 0)
1624 cache
= gp_cache_bufctl_cache
, gp_cache_bufctl_cache
= 0;
1625 rtl_cache_deactivate (cache
);
1626 rtl_cache_destructor (cache
);
1628 if (gp_cache_slab_cache
!= 0)
1630 cache
= gp_cache_slab_cache
, gp_cache_slab_cache
= 0;
1631 rtl_cache_deactivate (cache
);
1632 rtl_cache_destructor (cache
);
1634 if (gp_cache_magazine_cache
!= 0)
1636 cache
= gp_cache_magazine_cache
, gp_cache_magazine_cache
= 0;
1637 rtl_cache_deactivate (cache
);
1638 rtl_cache_destructor (cache
);
1640 if (gp_cache_arena
!= 0)
1642 rtl_arena_destroy (gp_cache_arena
);
1646 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1647 head
= &(g_cache_list
.m_cache_head
);
1648 for (cache
= head
->m_cache_next
; cache
!= head
; cache
= cache
->m_cache_next
)
1652 // "rtl_cache_fini(" << cache->m_name << ") [slab]: allocs: "
1653 // << cache->m_slab_stats.m_alloc << ", frees: "
1654 // << cache->m_slab_stats.m_free << "; total: "
1655 // << cache->m_slab_stats.m_mem_total << ", used: "
1656 // << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
1657 // << cache->m_cpu_stats.m_alloc << ", frees: "
1658 // << cache->m_cpu_stats.m_free << "; [total]: allocs: "
1659 // << (cache->m_slab_stats.m_alloc
1660 // + cache->m_cpu_stats.m_alloc)
1662 // << (cache->m_slab_stats.m_free
1663 // + cache->m_cpu_stats.m_free));
1665 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1667 // SAL_INFO("sal.rtl", "rtl_cache_fini completed");
1670 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */