1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
3 * This file is part of the LibreOffice project.
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 * This file incorporates work covered by the following license notice:
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
20 #include "alloc_cache.hxx"
21 #include "alloc_impl.hxx"
22 #include "alloc_arena.hxx"
23 #include "internal/rtllifecycle.h"
24 #include "sal/macros.h"
25 #include "osl/diagnose.h"
26 #include <osl/thread.hxx>
32 extern AllocMode alloc_mode
;
34 /* ================================================================= *
38 * ================================================================= */
43 struct rtl_cache_list_st
45 rtl_memory_lock_type m_lock
;
46 rtl_cache_type m_cache_head
;
49 pthread_t m_update_thread
;
50 pthread_cond_t m_update_cond
;
51 #elif defined(SAL_W32)
52 HANDLE m_update_thread
;
54 #endif /* SAL_UNX || SAL_W32 */
58 static rtl_cache_list_st g_cache_list
;
61 * provided for cache_type allocations, and hash_table resizing.
65 static rtl_arena_type
* gp_cache_arena
= 0;
67 /** gp_cache_magazine_cache
70 static rtl_cache_type
* gp_cache_magazine_cache
= 0;
72 /** gp_cache_slab_cache
75 static rtl_cache_type
* gp_cache_slab_cache
= 0;
77 /** gp_cache_bufctl_cache
80 static rtl_cache_type
* gp_cache_bufctl_cache
= 0;
82 /* ================================================================= */
84 /** RTL_CACHE_HASH_INDEX()
86 #define RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
87 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
89 #define RTL_CACHE_HASH_INDEX(cache, addr) \
90 RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
96 rtl_cache_hash_rescale (
97 rtl_cache_type
* cache
,
101 rtl_cache_bufctl_type
** new_table
;
104 new_bytes
= new_size
* sizeof(rtl_cache_bufctl_type
*);
105 new_table
= static_cast<rtl_cache_bufctl_type
**>(rtl_arena_alloc(gp_cache_arena
, &new_bytes
));
109 rtl_cache_bufctl_type
** old_table
;
110 sal_Size old_size
, i
;
112 memset (new_table
, 0, new_bytes
);
114 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
116 old_table
= cache
->m_hash_table
;
117 old_size
= cache
->m_hash_size
;
121 // "rtl_cache_hash_rescale(" << cache->m_name << "): nbuf: "
122 // << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
124 // << ((cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
125 // >> cache->m_hash_shift)
126 // << "), frees: " << cache->m_slab_stats.m_free << " [old_size: "
127 // << old_size << ", new_size: " << new_size << ']');
129 cache
->m_hash_table
= new_table
;
130 cache
->m_hash_size
= new_size
;
131 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
133 for (i
= 0; i
< old_size
; i
++)
135 rtl_cache_bufctl_type
* curr
= old_table
[i
];
138 rtl_cache_bufctl_type
* next
= curr
->m_next
;
139 rtl_cache_bufctl_type
** head
;
141 // coverity[negative_shift]
142 head
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, curr
->m_addr
)]);
143 curr
->m_next
= (*head
);
151 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
153 if (old_table
!= cache
->m_hash_table_0
)
155 sal_Size old_bytes
= old_size
* sizeof(rtl_cache_bufctl_type
*);
156 rtl_arena_free (gp_cache_arena
, old_table
, old_bytes
);
162 rtl_cache_hash_insert (
163 rtl_cache_type
* cache
,
164 rtl_cache_bufctl_type
* bufctl
167 rtl_cache_bufctl_type
** ppHead
;
169 ppHead
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, bufctl
->m_addr
)]);
171 bufctl
->m_next
= (*ppHead
);
174 return bufctl
->m_addr
;
177 /** rtl_cache_hash_remove()
179 rtl_cache_bufctl_type
*
180 rtl_cache_hash_remove (
181 rtl_cache_type
* cache
,
185 rtl_cache_bufctl_type
** ppHead
;
186 rtl_cache_bufctl_type
* bufctl
;
187 sal_Size lookups
= 0;
189 ppHead
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, addr
)]);
190 while ((bufctl
= *ppHead
) != 0)
192 if (bufctl
->m_addr
== addr
)
194 *ppHead
= bufctl
->m_next
, bufctl
->m_next
= 0;
199 ppHead
= &(bufctl
->m_next
);
202 assert(bufctl
!= 0); // bad free
206 sal_Size nbuf
= (sal_Size
)(cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
);
207 if (nbuf
> 4 * cache
->m_hash_size
)
209 if (!(cache
->m_features
& RTL_CACHE_FEATURE_RESCALE
))
211 sal_Size ave
= nbuf
>> cache
->m_hash_shift
;
212 // coverity[negative_shift]
213 sal_Size new_size
= cache
->m_hash_size
<< (highbit(ave
) - 1);
215 cache
->m_features
|= RTL_CACHE_FEATURE_RESCALE
;
216 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
217 rtl_cache_hash_rescale (cache
, new_size
);
218 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
219 cache
->m_features
&= ~RTL_CACHE_FEATURE_RESCALE
;
227 /* ================================================================= */
231 #define RTL_CACHE_SLAB(addr, size) \
232 ((reinterpret_cast<rtl_cache_slab_type*>(RTL_MEMORY_P2END(reinterpret_cast<sal_uIntPtr>(addr), (size)))) - 1)
234 /** rtl_cache_slab_constructor()
237 rtl_cache_slab_constructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
239 rtl_cache_slab_type
* slab
= static_cast<rtl_cache_slab_type
*>(obj
);
241 QUEUE_START_NAMED(slab
, slab_
);
247 /** rtl_cache_slab_destructor()
250 rtl_cache_slab_destructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
252 rtl_cache_slab_type
* slab
= static_cast< rtl_cache_slab_type
* >(obj
);
253 assert(QUEUE_STARTED_NAMED(slab
, slab_
)); // assure removed from queue(s)
254 assert(slab
->m_ntypes
== 0); // assure no longer referenced
255 (void) slab
; // avoid warnings
258 /** rtl_cache_slab_create()
260 * @precond cache->m_slab_lock released.
262 rtl_cache_slab_type
*
263 rtl_cache_slab_create (
264 rtl_cache_type
* cache
267 rtl_cache_slab_type
* slab
= 0;
271 size
= cache
->m_slab_size
;
272 addr
= rtl_arena_alloc (cache
->m_source
, &size
);
275 assert(size
>= cache
->m_slab_size
);
277 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
279 /* allocate slab struct from slab cache */
280 assert(cache
!= gp_cache_slab_cache
);
281 slab
= static_cast<rtl_cache_slab_type
*>(rtl_cache_alloc (gp_cache_slab_cache
));
285 /* construct embedded slab struct */
286 slab
= RTL_CACHE_SLAB(addr
, cache
->m_slab_size
);
287 (void) rtl_cache_slab_constructor (slab
, 0);
291 slab
->m_data
= reinterpret_cast<sal_uIntPtr
>(addr
);
293 /* dynamic freelist initialization */
294 slab
->m_bp
= slab
->m_data
;
299 rtl_arena_free (cache
->m_source
, addr
, size
);
305 /** rtl_cache_slab_destroy()
307 * @precond cache->m_slab_lock released.
310 rtl_cache_slab_destroy (
311 rtl_cache_type
* cache
,
312 rtl_cache_slab_type
* slab
315 void * addr
= reinterpret_cast<void*>(slab
->m_data
);
316 sal_Size refcnt
= slab
->m_ntypes
; slab
->m_ntypes
= 0;
318 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
320 /* cleanup bufctl(s) for free buffer(s) */
321 sal_Size ntypes
= (slab
->m_bp
- slab
->m_data
) / cache
->m_type_size
;
322 for (ntypes
-= refcnt
; slab
->m_sp
!= 0; ntypes
--)
324 rtl_cache_bufctl_type
* bufctl
= slab
->m_sp
;
326 /* pop from freelist */
327 slab
->m_sp
= bufctl
->m_next
, bufctl
->m_next
= 0;
329 /* return bufctl struct to bufctl cache */
330 rtl_cache_free (gp_cache_bufctl_cache
, bufctl
);
334 /* return slab struct to slab cache */
335 rtl_cache_free (gp_cache_slab_cache
, slab
);
339 /* destruct embedded slab struct */
340 rtl_cache_slab_destructor (slab
, 0);
343 if ((refcnt
== 0) || (cache
->m_features
& RTL_CACHE_FEATURE_BULKDESTROY
))
346 rtl_arena_free (cache
->m_source
, addr
, cache
->m_slab_size
);
350 /** rtl_cache_slab_populate()
352 * @precond cache->m_slab_lock acquired.
355 rtl_cache_slab_populate (
356 rtl_cache_type
* cache
359 rtl_cache_slab_type
* slab
;
361 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
362 slab
= rtl_cache_slab_create (cache
);
363 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
366 /* update buffer start addr w/ current color */
367 slab
->m_bp
+= cache
->m_ncolor
;
369 /* update color for next slab */
370 cache
->m_ncolor
+= cache
->m_type_align
;
371 if (cache
->m_ncolor
> cache
->m_ncolor_max
)
375 cache
->m_slab_stats
.m_mem_total
+= cache
->m_slab_size
;
377 /* insert onto 'free' queue */
378 QUEUE_INSERT_HEAD_NAMED(&(cache
->m_free_head
), slab
, slab_
);
383 /* ================================================================= */
385 /** rtl_cache_slab_alloc()
387 * Allocate a buffer from slab layer; used by magazine layer.
390 rtl_cache_slab_alloc (
391 rtl_cache_type
* cache
395 rtl_cache_slab_type
* head
;
397 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
399 head
= &(cache
->m_free_head
);
400 if ((head
->m_slab_next
!= head
) || rtl_cache_slab_populate (cache
))
402 rtl_cache_slab_type
* slab
;
403 rtl_cache_bufctl_type
* bufctl
;
405 slab
= head
->m_slab_next
;
406 assert(slab
->m_ntypes
< cache
->m_ntypes
);
410 /* initialize bufctl w/ current 'slab->m_bp' */
411 assert(slab
->m_bp
< slab
->m_data
+ cache
->m_ntypes
* cache
->m_type_size
+ cache
->m_ncolor_max
);
412 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
414 /* allocate bufctl */
415 assert(cache
!= gp_cache_bufctl_cache
);
416 bufctl
= static_cast<rtl_cache_bufctl_type
*>(rtl_cache_alloc (gp_cache_bufctl_cache
));
420 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
424 bufctl
->m_addr
= slab
->m_bp
;
425 bufctl
->m_slab
= reinterpret_cast<sal_uIntPtr
>(slab
);
429 /* embedded bufctl */
430 bufctl
= reinterpret_cast<rtl_cache_bufctl_type
*>(slab
->m_bp
);
434 /* update 'slab->m_bp' to next free buffer */
435 slab
->m_bp
+= cache
->m_type_size
;
437 /* assign bufctl to freelist */
443 slab
->m_sp
= bufctl
->m_next
;
445 /* increment usage, check for full slab */
446 if ((slab
->m_ntypes
+= 1) == cache
->m_ntypes
)
448 /* remove from 'free' queue */
449 QUEUE_REMOVE_NAMED(slab
, slab_
);
451 /* insert onto 'used' queue (tail) */
452 QUEUE_INSERT_TAIL_NAMED(&(cache
->m_used_head
), slab
, slab_
);
456 cache
->m_slab_stats
.m_alloc
+= 1;
457 cache
->m_slab_stats
.m_mem_alloc
+= cache
->m_type_size
;
459 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
460 addr
= reinterpret_cast<void*>(rtl_cache_hash_insert (cache
, bufctl
));
465 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
469 /** rtl_cache_slab_free()
471 * Return a buffer to slab layer; used by magazine layer.
474 rtl_cache_slab_free (
475 rtl_cache_type
* cache
,
479 rtl_cache_bufctl_type
* bufctl
;
480 rtl_cache_slab_type
* slab
;
482 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
484 /* determine slab from addr */
485 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
487 bufctl
= rtl_cache_hash_remove (cache
, reinterpret_cast<sal_uIntPtr
>(addr
));
488 slab
= (bufctl
!= 0) ? reinterpret_cast<rtl_cache_slab_type
*>(bufctl
->m_slab
) : 0;
492 /* embedded slab struct */
493 bufctl
= static_cast<rtl_cache_bufctl_type
*>(addr
);
494 slab
= RTL_CACHE_SLAB(addr
, cache
->m_slab_size
);
499 /* check for full slab */
500 if (slab
->m_ntypes
== cache
->m_ntypes
)
502 /* remove from 'used' queue */
503 QUEUE_REMOVE_NAMED(slab
, slab_
);
505 /* insert onto 'free' queue (head) */
506 QUEUE_INSERT_HEAD_NAMED(&(cache
->m_free_head
), slab
, slab_
);
510 bufctl
->m_next
= slab
->m_sp
;
514 cache
->m_slab_stats
.m_free
+= 1;
515 cache
->m_slab_stats
.m_mem_alloc
-= cache
->m_type_size
;
517 /* decrement usage, check for empty slab */
518 if ((slab
->m_ntypes
-= 1) == 0)
520 /* remove from 'free' queue */
521 QUEUE_REMOVE_NAMED(slab
, slab_
);
524 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
526 /* free 'empty' slab */
527 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
528 rtl_cache_slab_destroy (cache
, slab
);
533 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
536 /* ================================================================= */
538 /** rtl_cache_magazine_constructor()
541 rtl_cache_magazine_constructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
543 rtl_cache_magazine_type
* mag
= static_cast<rtl_cache_magazine_type
*>(obj
);
544 /* @@@ sal_Size size = (sal_Size)(arg); @@@ */
547 mag
->m_mag_size
= RTL_CACHE_MAGAZINE_SIZE
;
553 /** rtl_cache_magazine_destructor()
556 rtl_cache_magazine_destructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
558 rtl_cache_magazine_type
* mag
= static_cast< rtl_cache_magazine_type
* >(
560 assert(mag
->m_mag_next
== 0); // assure removed from queue(s)
561 assert(mag
->m_mag_used
== 0); // assure no longer referenced
562 (void) mag
; // avoid warnings
565 /** rtl_cache_magazine_clear()
568 rtl_cache_magazine_clear (
569 rtl_cache_type
* cache
,
570 rtl_cache_magazine_type
* mag
573 for (; mag
->m_mag_used
> 0; --mag
->m_mag_used
)
575 void * obj
= mag
->m_objects
[mag
->m_mag_used
- 1];
576 mag
->m_objects
[mag
->m_mag_used
- 1] = 0;
578 if (cache
->m_destructor
!= 0)
580 /* destruct object */
581 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
584 /* return buffer to slab layer */
585 rtl_cache_slab_free (cache
, obj
);
589 /* ================================================================= */
591 /** rtl_cache_depot_enqueue()
593 * @precond cache->m_depot_lock acquired.
596 rtl_cache_depot_enqueue (
597 rtl_cache_depot_type
* depot
,
598 rtl_cache_magazine_type
* mag
601 /* enqueue empty magazine */
602 mag
->m_mag_next
= depot
->m_mag_next
;
603 depot
->m_mag_next
= mag
;
605 /* update depot stats */
606 depot
->m_mag_count
++;
609 /** rtl_cache_depot_dequeue()
611 * @precond cache->m_depot_lock acquired.
613 inline rtl_cache_magazine_type
*
614 rtl_cache_depot_dequeue (
615 rtl_cache_depot_type
* depot
618 rtl_cache_magazine_type
* mag
= 0;
619 if (depot
->m_mag_count
> 0)
621 /* dequeue magazine */
622 assert(depot
->m_mag_next
!= 0);
624 mag
= depot
->m_mag_next
;
625 depot
->m_mag_next
= mag
->m_mag_next
;
628 /* update depot stats */
629 depot
->m_mag_count
--;
630 if(depot
->m_curr_min
> depot
->m_mag_count
)
632 depot
->m_curr_min
= depot
->m_mag_count
;
638 /** rtl_cache_depot_exchange_alloc()
640 * @precond cache->m_depot_lock acquired.
642 inline rtl_cache_magazine_type
*
643 rtl_cache_depot_exchange_alloc (
644 rtl_cache_type
* cache
,
645 rtl_cache_magazine_type
* empty
648 rtl_cache_magazine_type
* full
;
650 assert((empty
== 0) || (empty
->m_mag_used
== 0));
652 /* dequeue full magazine */
653 full
= rtl_cache_depot_dequeue (&(cache
->m_depot_full
));
654 if ((full
!= 0) && (empty
!= 0))
656 /* enqueue empty magazine */
657 rtl_cache_depot_enqueue (&(cache
->m_depot_empty
), empty
);
660 assert((full
== 0) || (full
->m_mag_used
> 0));
665 /** rtl_cache_depot_exchange_free()
667 * @precond cache->m_depot_lock acquired.
669 inline rtl_cache_magazine_type
*
670 rtl_cache_depot_exchange_free (
671 rtl_cache_type
* cache
,
672 rtl_cache_magazine_type
* full
675 rtl_cache_magazine_type
* empty
;
677 assert((full
== 0) || (full
->m_mag_used
> 0));
679 /* dequeue empty magazine */
680 empty
= rtl_cache_depot_dequeue (&(cache
->m_depot_empty
));
681 if ((empty
!= 0) && (full
!= 0))
683 /* enqueue full magazine */
684 rtl_cache_depot_enqueue (&(cache
->m_depot_full
), full
);
687 assert((empty
== 0) || (empty
->m_mag_used
== 0));
692 /** rtl_cache_depot_populate()
694 * @precond cache->m_depot_lock acquired.
697 rtl_cache_depot_populate (
698 rtl_cache_type
* cache
701 rtl_cache_magazine_type
* empty
= 0;
703 if (cache
->m_magazine_cache
!= 0)
705 /* allocate new empty magazine */
706 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
707 empty
= static_cast<rtl_cache_magazine_type
*>(rtl_cache_alloc (cache
->m_magazine_cache
));
708 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
711 /* enqueue (new) empty magazine */
712 rtl_cache_depot_enqueue (&(cache
->m_depot_empty
), empty
);
718 /* ================================================================= */
720 /** rtl_cache_constructor()
723 rtl_cache_constructor (void * obj
)
725 rtl_cache_type
* cache
= static_cast<rtl_cache_type
*>(obj
);
727 memset (cache
, 0, sizeof(rtl_cache_type
));
730 QUEUE_START_NAMED(cache
, cache_
);
733 (void)RTL_MEMORY_LOCK_INIT(&(cache
->m_slab_lock
));
735 QUEUE_START_NAMED(&(cache
->m_free_head
), slab_
);
736 QUEUE_START_NAMED(&(cache
->m_used_head
), slab_
);
738 cache
->m_hash_table
= cache
->m_hash_table_0
;
739 cache
->m_hash_size
= RTL_CACHE_HASH_SIZE
;
740 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
743 (void)RTL_MEMORY_LOCK_INIT(&(cache
->m_depot_lock
));
748 /** rtl_cache_destructor()
751 rtl_cache_destructor (void * obj
)
753 rtl_cache_type
* cache
= static_cast<rtl_cache_type
*>(obj
);
756 assert(QUEUE_STARTED_NAMED(cache
, cache_
));
759 (void)RTL_MEMORY_LOCK_DESTROY(&(cache
->m_slab_lock
));
761 assert(QUEUE_STARTED_NAMED(&(cache
->m_free_head
), slab_
));
762 assert(QUEUE_STARTED_NAMED(&(cache
->m_used_head
), slab_
));
764 assert(cache
->m_hash_table
== cache
->m_hash_table_0
);
765 assert(cache
->m_hash_size
== RTL_CACHE_HASH_SIZE
);
766 assert(cache
->m_hash_shift
== (sal_Size
)(highbit(cache
->m_hash_size
) - 1));
769 (void)RTL_MEMORY_LOCK_DESTROY(&(cache
->m_depot_lock
));
772 /* ================================================================= */
774 /** rtl_cache_activate()
778 rtl_cache_type
* cache
,
782 int (SAL_CALL
* constructor
)(void * obj
, void * userarg
),
783 void (SAL_CALL
* destructor
) (void * obj
, void * userarg
),
784 void (SAL_CALL
* reclaim
) (void * userarg
),
786 rtl_arena_type
* source
,
795 snprintf (cache
->m_name
, sizeof(cache
->m_name
), "%s", name
);
797 /* ensure minimum size (embedded bufctl linkage) */
798 if(objsize
< sizeof(rtl_cache_bufctl_type
*))
800 objsize
= sizeof(rtl_cache_bufctl_type
*);
805 /* determine default alignment */
806 if (objsize
>= RTL_MEMORY_ALIGNMENT_8
)
807 objalign
= RTL_MEMORY_ALIGNMENT_8
;
809 objalign
= RTL_MEMORY_ALIGNMENT_4
;
813 /* ensure minimum alignment */
814 if(objalign
< RTL_MEMORY_ALIGNMENT_4
)
816 objalign
= RTL_MEMORY_ALIGNMENT_4
;
819 assert(RTL_MEMORY_ISP2(objalign
));
821 cache
->m_type_size
= objsize
= RTL_MEMORY_P2ROUNDUP(objsize
, objalign
);
822 cache
->m_type_align
= objalign
;
823 cache
->m_type_shift
= highbit(cache
->m_type_size
) - 1;
825 cache
->m_constructor
= constructor
;
826 cache
->m_destructor
= destructor
;
827 cache
->m_reclaim
= reclaim
;
828 cache
->m_userarg
= userarg
;
831 cache
->m_source
= source
;
833 slabsize
= source
->m_quantum
; /* minimum slab size */
834 if (flags
& RTL_CACHE_FLAG_QUANTUMCACHE
)
836 /* next power of 2 above 3 * qcache_max */
837 if(slabsize
< (((sal_Size
)1) << highbit(3 * source
->m_qcache_max
)))
839 slabsize
= (((sal_Size
)1) << highbit(3 * source
->m_qcache_max
));
844 /* waste at most 1/8 of slab */
845 if(slabsize
< cache
->m_type_size
* 8)
847 slabsize
= cache
->m_type_size
* 8;
851 slabsize
= RTL_MEMORY_P2ROUNDUP(slabsize
, source
->m_quantum
);
852 if (!RTL_MEMORY_ISP2(slabsize
))
853 slabsize
= (((sal_Size
)1) << highbit(slabsize
));
854 cache
->m_slab_size
= slabsize
;
856 if (cache
->m_slab_size
> source
->m_quantum
)
858 assert(gp_cache_slab_cache
!= 0);
859 assert(gp_cache_bufctl_cache
!= 0);
861 cache
->m_features
|= RTL_CACHE_FEATURE_HASH
;
862 cache
->m_ntypes
= cache
->m_slab_size
/ cache
->m_type_size
;
863 cache
->m_ncolor_max
= cache
->m_slab_size
% cache
->m_type_size
;
867 /* embedded slab struct */
868 cache
->m_ntypes
= (cache
->m_slab_size
- sizeof(rtl_cache_slab_type
)) / cache
->m_type_size
;
869 cache
->m_ncolor_max
= (cache
->m_slab_size
- sizeof(rtl_cache_slab_type
)) % cache
->m_type_size
;
872 assert(cache
->m_ntypes
> 0);
875 if (flags
& RTL_CACHE_FLAG_BULKDESTROY
)
877 /* allow bulk slab delete upon cache deactivation */
878 cache
->m_features
|= RTL_CACHE_FEATURE_BULKDESTROY
;
882 if (!(flags
& RTL_CACHE_FLAG_NOMAGAZINE
))
884 assert(gp_cache_magazine_cache
!= 0);
885 cache
->m_magazine_cache
= gp_cache_magazine_cache
;
888 /* insert into cache list */
889 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
890 QUEUE_INSERT_TAIL_NAMED(&(g_cache_list
.m_cache_head
), cache
, cache_
);
891 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
896 /** rtl_cache_deactivate()
899 rtl_cache_deactivate (
900 rtl_cache_type
* cache
903 /* remove from cache list */
904 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
905 bool active
= !QUEUE_STARTED_NAMED(cache
, cache_
);
906 QUEUE_REMOVE_NAMED(cache
, cache_
);
907 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
909 assert(active
); // orphaned cache
912 /* cleanup magazine layer */
913 if (cache
->m_magazine_cache
!= 0)
915 rtl_cache_type
* mag_cache
;
916 rtl_cache_magazine_type
* mag
;
918 /* prevent recursion */
919 mag_cache
= cache
->m_magazine_cache
, cache
->m_magazine_cache
= 0;
921 /* cleanup cpu layer */
922 if ((mag
= cache
->m_cpu_curr
) != 0)
924 // coverity[missing_lock]
925 cache
->m_cpu_curr
= 0;
926 rtl_cache_magazine_clear (cache
, mag
);
927 rtl_cache_free (mag_cache
, mag
);
929 if ((mag
= cache
->m_cpu_prev
) != 0)
931 // coverity[missing_lock]
932 cache
->m_cpu_prev
= 0;
933 rtl_cache_magazine_clear (cache
, mag
);
934 rtl_cache_free (mag_cache
, mag
);
937 /* cleanup depot layer */
938 while ((mag
= rtl_cache_depot_dequeue(&(cache
->m_depot_full
))) != 0)
940 rtl_cache_magazine_clear (cache
, mag
);
941 rtl_cache_free (mag_cache
, mag
);
943 while ((mag
= rtl_cache_depot_dequeue(&(cache
->m_depot_empty
))) != 0)
945 rtl_cache_magazine_clear (cache
, mag
);
946 rtl_cache_free (mag_cache
, mag
);
952 // "rtl_cache_deactivate(" << cache->m_name << "): [slab]: allocs: "
953 // << cache->m_slab_stats.m_alloc << ", frees: "
954 // << cache->m_slab_stats.m_free << "; total: "
955 // << cache->m_slab_stats.m_mem_total << ", used: "
956 // << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
957 // << cache->m_cpu_stats.m_alloc << ", frees: "
958 // << cache->m_cpu_stats.m_free << "; [total]: allocs: "
959 // << (cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc)
961 // << (cache->m_slab_stats.m_free + cache->m_cpu_stats.m_free));
963 /* cleanup slab layer */
964 if (cache
->m_slab_stats
.m_alloc
> cache
->m_slab_stats
.m_free
)
968 // "rtl_cache_deactivate(" << cache->m_name << "): cleaning up "
969 // << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
970 // << " leaked buffer(s) [" << cache->m_slab_stats.m_mem_alloc
971 // << " bytes] [" << cache->m_slab_stats.m_mem_total << " total]");
973 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
975 /* cleanup bufctl(s) for leaking buffer(s) */
976 sal_Size i
, n
= cache
->m_hash_size
;
977 for (i
= 0; i
< n
; i
++)
979 rtl_cache_bufctl_type
* bufctl
;
980 while ((bufctl
= cache
->m_hash_table
[i
]) != 0)
982 /* pop from hash table */
983 cache
->m_hash_table
[i
] = bufctl
->m_next
, bufctl
->m_next
= 0;
985 /* return to bufctl cache */
986 rtl_cache_free (gp_cache_bufctl_cache
, bufctl
);
991 /* force cleanup of remaining slabs */
992 rtl_cache_slab_type
*head
, *slab
;
994 head
= &(cache
->m_used_head
);
995 for (slab
= head
->m_slab_next
; slab
!= head
; slab
= head
->m_slab_next
)
997 /* remove from 'used' queue */
998 QUEUE_REMOVE_NAMED(slab
, slab_
);
1001 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
1004 rtl_cache_slab_destroy (cache
, slab
);
1007 head
= &(cache
->m_free_head
);
1008 for (slab
= head
->m_slab_next
; slab
!= head
; slab
= head
->m_slab_next
)
1010 /* remove from 'free' queue */
1011 QUEUE_REMOVE_NAMED(slab
, slab_
);
1014 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
1017 rtl_cache_slab_destroy (cache
, slab
);
1022 if (cache
->m_hash_table
!= cache
->m_hash_table_0
)
1026 cache
->m_hash_table
,
1027 cache
->m_hash_size
* sizeof(rtl_cache_bufctl_type
*));
1029 cache
->m_hash_table
= cache
->m_hash_table_0
;
1030 cache
->m_hash_size
= RTL_CACHE_HASH_SIZE
;
1031 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
1037 /* ================================================================= *
1039 * cache implementation.
1041 * ================================================================= */
1043 /** rtl_cache_create()
1046 SAL_CALL
rtl_cache_create (
1050 int (SAL_CALL
* constructor
)(void * obj
, void * userarg
),
1051 void (SAL_CALL
* destructor
) (void * obj
, void * userarg
),
1052 void (SAL_CALL
* reclaim
) (void * userarg
),
1054 rtl_arena_type
* source
,
1056 ) SAL_THROW_EXTERN_C()
1058 rtl_cache_type
* result
= 0;
1059 sal_Size size
= sizeof(rtl_cache_type
);
1062 result
= static_cast<rtl_cache_type
*>(rtl_arena_alloc (gp_cache_arena
, &size
));
1065 rtl_cache_type
* cache
= result
;
1066 (void) rtl_cache_constructor (cache
);
1070 /* use default arena */
1071 assert(gp_default_arena
!= 0);
1072 source
= gp_default_arena
;
1075 result
= rtl_cache_activate (
1090 /* activation failed */
1091 rtl_cache_deactivate (cache
);
1092 rtl_cache_destructor (cache
);
1093 rtl_arena_free (gp_cache_arena
, cache
, size
);
1096 else if (gp_cache_arena
== 0)
1098 ensureCacheSingleton();
1108 /** rtl_cache_destroy()
1110 void SAL_CALL
rtl_cache_destroy (
1111 rtl_cache_type
* cache
1112 ) SAL_THROW_EXTERN_C()
1116 rtl_cache_deactivate (cache
);
1117 rtl_cache_destructor (cache
);
1118 rtl_arena_free (gp_cache_arena
, cache
, sizeof(rtl_cache_type
));
1122 /** rtl_cache_alloc()
1125 SAL_CALL
rtl_cache_alloc (
1126 rtl_cache_type
* cache
1127 ) SAL_THROW_EXTERN_C()
1134 if (alloc_mode
== AMode_SYSTEM
)
1136 obj
= rtl_allocateMemory(cache
->m_type_size
);
1137 if ((obj
!= 0) && (cache
->m_constructor
!= 0))
1139 if (!((cache
->m_constructor
)(obj
, cache
->m_userarg
)))
1141 /* construction failure */
1142 rtl_freeMemory(obj
), obj
= 0;
1148 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1149 if (cache
->m_cpu_curr
!= 0)
1153 /* take object from magazine layer */
1154 rtl_cache_magazine_type
*curr
, *prev
, *temp
;
1156 curr
= cache
->m_cpu_curr
;
1157 if ((curr
!= 0) && (curr
->m_mag_used
> 0))
1159 obj
= curr
->m_objects
[--curr
->m_mag_used
];
1160 cache
->m_cpu_stats
.m_alloc
+= 1;
1161 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1166 prev
= cache
->m_cpu_prev
;
1167 if ((prev
!= 0) && (prev
->m_mag_used
> 0))
1169 temp
= cache
->m_cpu_curr
;
1170 cache
->m_cpu_curr
= cache
->m_cpu_prev
;
1171 cache
->m_cpu_prev
= temp
;
1176 temp
= rtl_cache_depot_exchange_alloc (cache
, prev
);
1179 cache
->m_cpu_prev
= cache
->m_cpu_curr
;
1180 cache
->m_cpu_curr
= temp
;
1185 /* no full magazine: fall through to slab layer */
1189 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1191 /* alloc buffer from slab layer */
1192 obj
= rtl_cache_slab_alloc (cache
);
1193 if ((obj
!= 0) && (cache
->m_constructor
!= 0))
1195 /* construct object */
1196 if (!((cache
->m_constructor
)(obj
, cache
->m_userarg
)))
1198 /* construction failure */
1199 rtl_cache_slab_free (cache
, obj
), obj
= 0;
1205 /** rtl_cache_free()
1208 SAL_CALL
rtl_cache_free (
1209 rtl_cache_type
* cache
,
1211 ) SAL_THROW_EXTERN_C()
1213 if ((obj
!= 0) && (cache
!= 0))
1215 if (alloc_mode
== AMode_SYSTEM
)
1217 if (cache
->m_destructor
!= 0)
1219 /* destruct object */
1220 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
1222 rtl_freeMemory(obj
);
1226 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1230 /* return object to magazine layer */
1231 rtl_cache_magazine_type
*curr
, *prev
, *temp
;
1233 curr
= cache
->m_cpu_curr
;
1234 if ((curr
!= 0) && (curr
->m_mag_used
< curr
->m_mag_size
))
1236 curr
->m_objects
[curr
->m_mag_used
++] = obj
;
1237 cache
->m_cpu_stats
.m_free
+= 1;
1238 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1243 prev
= cache
->m_cpu_prev
;
1244 if ((prev
!= 0) && (prev
->m_mag_used
== 0))
1246 temp
= cache
->m_cpu_curr
;
1247 cache
->m_cpu_curr
= cache
->m_cpu_prev
;
1248 cache
->m_cpu_prev
= temp
;
1253 temp
= rtl_cache_depot_exchange_free (cache
, prev
);
1256 cache
->m_cpu_prev
= cache
->m_cpu_curr
;
1257 cache
->m_cpu_curr
= temp
;
1262 if (rtl_cache_depot_populate(cache
))
1267 /* no empty magazine: fall through to slab layer */
1271 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1273 /* no space for constructed object in magazine layer */
1274 if (cache
->m_destructor
!= 0)
1276 /* destruct object */
1277 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
1280 /* return buffer to slab layer */
1281 rtl_cache_slab_free (cache
, obj
);
1285 /* ================================================================= *
1287 * cache wsupdate (machdep) internals.
1289 * ================================================================= */
1291 /** rtl_cache_wsupdate_init()
1293 * @precond g_cache_list.m_lock initialized
1296 rtl_cache_wsupdate_init();
1298 /** rtl_cache_wsupdate_wait()
1300 * @precond g_cache_list.m_lock acquired
1303 rtl_cache_wsupdate_wait (
1304 unsigned int seconds
1307 /** rtl_cache_wsupdate_fini()
1311 rtl_cache_wsupdate_fini();
1313 /* ================================================================= */
1315 #if defined(SAL_UNX)
1318 rtl_secureZeroMemory (void *Ptr
, sal_Size Bytes
) SAL_THROW_EXTERN_C()
1320 //currently glibc doesn't implement memset_s
1321 volatile char *p
= static_cast<volatile char*>(Ptr
);
1326 #include <sys/time.h>
1329 rtl_cache_wsupdate_all (void * arg
);
1332 rtl_cache_wsupdate_init()
1334 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1335 g_cache_list
.m_update_done
= 0;
1336 (void) pthread_cond_init (&(g_cache_list
.m_update_cond
), NULL
);
1337 if (pthread_create (
1338 &(g_cache_list
.m_update_thread
), NULL
, rtl_cache_wsupdate_all
, reinterpret_cast<void*>(10)) != 0)
1341 g_cache_list
.m_update_thread
= (pthread_t
)(0);
1343 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1347 rtl_cache_wsupdate_wait (unsigned int seconds
)
1354 gettimeofday(&now
, 0);
1355 wakeup
.tv_sec
= now
.tv_sec
+ (seconds
);
1356 wakeup
.tv_nsec
= now
.tv_usec
* 1000;
1358 (void) pthread_cond_timedwait (
1359 &(g_cache_list
.m_update_cond
),
1360 &(g_cache_list
.m_lock
),
1366 rtl_cache_wsupdate_fini()
1368 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1369 g_cache_list
.m_update_done
= 1;
1370 pthread_cond_signal (&(g_cache_list
.m_update_cond
));
1371 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1373 if (g_cache_list
.m_update_thread
!= (pthread_t
)(0))
1374 pthread_join (g_cache_list
.m_update_thread
, NULL
);
1377 /* ================================================================= */
1379 #elif defined(SAL_W32)
1382 rtl_secureZeroMemory (void *Ptr
, sal_Size Bytes
) SAL_THROW_EXTERN_C()
1384 RtlSecureZeroMemory(Ptr
, Bytes
);
1388 rtl_cache_wsupdate_all (void * arg
);
1391 rtl_cache_wsupdate_init()
1395 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1396 g_cache_list
.m_update_done
= 0;
1397 g_cache_list
.m_update_cond
= CreateEvent (0, TRUE
, FALSE
, 0);
1399 g_cache_list
.m_update_thread
=
1400 CreateThread (NULL
, 0, rtl_cache_wsupdate_all
, (LPVOID
)(10), 0, &dwThreadId
);
1401 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1405 rtl_cache_wsupdate_wait (unsigned int seconds
)
1409 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1410 WaitForSingleObject (g_cache_list
.m_update_cond
, (DWORD
)(seconds
* 1000));
1411 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1416 rtl_cache_wsupdate_fini()
1418 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1419 g_cache_list
.m_update_done
= 1;
1420 SetEvent (g_cache_list
.m_update_cond
);
1421 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1423 WaitForSingleObject (g_cache_list
.m_update_thread
, INFINITE
);
1426 #endif /* SAL_UNX || SAL_W32 */
1428 /* ================================================================= */
1430 /** rtl_cache_depot_wsupdate()
1431 * update depot stats and purge excess magazines.
1433 * @precond cache->m_depot_lock acquired
1436 rtl_cache_depot_wsupdate (
1437 rtl_cache_type
* cache
,
1438 rtl_cache_depot_type
* depot
1443 depot
->m_prev_min
= depot
->m_curr_min
;
1444 depot
->m_curr_min
= depot
->m_mag_count
;
1446 npurge
= depot
->m_curr_min
< depot
->m_prev_min
? depot
->m_curr_min
: depot
->m_prev_min
;
1447 for (; npurge
> 0; npurge
--)
1449 rtl_cache_magazine_type
* mag
= rtl_cache_depot_dequeue (depot
);
1452 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1453 rtl_cache_magazine_clear (cache
, mag
);
1454 rtl_cache_free (cache
->m_magazine_cache
, mag
);
1455 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1458 // coverity[missing_unlock]
1461 /** rtl_cache_wsupdate()
1463 * @precond cache->m_depot_lock released
1466 rtl_cache_wsupdate (
1467 rtl_cache_type
* cache
1470 if (cache
->m_magazine_cache
!= 0)
1472 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1476 // "rtl_cache_wsupdate(" << cache->m_name
1477 // << ") [depot: count, curr_min, prev_min] full: "
1478 // << cache->m_depot_full.m_mag_count << ", "
1479 // << cache->m_depot_full.m_curr_min << ", "
1480 // << cache->m_depot_full.m_prev_min << "; empty: "
1481 // << cache->m_depot_empty.m_mag_count << ", "
1482 // << cache->m_depot_empty.m_curr_min << ", "
1483 // << cache->m_depot_empty.m_prev_min);
1485 rtl_cache_depot_wsupdate (cache
, &(cache
->m_depot_full
));
1486 rtl_cache_depot_wsupdate (cache
, &(cache
->m_depot_empty
));
1488 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1492 /** rtl_cache_wsupdate_all()
1495 #if defined(SAL_UNX)
1497 #elif defined(SAL_W32)
1499 #endif /* SAL_UNX || SAL_W32 */
1500 rtl_cache_wsupdate_all (void * arg
)
1502 osl::Thread::setName("rtl_cache_wsupdate_all");
1503 unsigned int seconds
= sal::static_int_cast
< unsigned int >(
1504 reinterpret_cast< sal_uIntPtr
>(arg
));
1506 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1507 while (!g_cache_list
.m_update_done
)
1509 rtl_cache_wsupdate_wait (seconds
);
1510 if (!g_cache_list
.m_update_done
)
1512 rtl_cache_type
* head
, * cache
;
1514 head
= &(g_cache_list
.m_cache_head
);
1515 for (cache
= head
->m_cache_next
;
1517 cache
= cache
->m_cache_next
)
1519 rtl_cache_wsupdate (cache
);
1523 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1528 /* ================================================================= *
1530 * cache initialization.
1532 * ================================================================= */
1538 /* list of caches */
1539 RTL_MEMORY_LOCK_INIT(&(g_cache_list
.m_lock
));
1540 (void) rtl_cache_constructor (&(g_cache_list
.m_cache_head
));
1543 /* cache: internal arena */
1544 assert(gp_cache_arena
== 0);
1546 gp_cache_arena
= rtl_arena_create (
1547 "rtl_cache_internal_arena",
1549 0, /* no quantum caching */
1550 NULL
, /* default source */
1555 assert(gp_cache_arena
!= 0);
1557 /* check 'gp_default_arena' initialization */
1558 assert(gp_default_arena
!= 0);
1561 /* cache: magazine cache */
1562 static rtl_cache_type g_cache_magazine_cache
;
1564 assert(gp_cache_magazine_cache
== 0);
1565 (void) rtl_cache_constructor (&g_cache_magazine_cache
);
1567 gp_cache_magazine_cache
= rtl_cache_activate (
1568 &g_cache_magazine_cache
,
1569 "rtl_cache_magazine_cache",
1570 sizeof(rtl_cache_magazine_type
), /* objsize */
1572 rtl_cache_magazine_constructor
,
1573 rtl_cache_magazine_destructor
,
1575 0, /* userarg: NYI */
1576 gp_default_arena
, /* source */
1577 RTL_CACHE_FLAG_NOMAGAZINE
/* during bootstrap; activated below */
1579 assert(gp_cache_magazine_cache
!= 0);
1581 /* activate magazine layer */
1582 g_cache_magazine_cache
.m_magazine_cache
= gp_cache_magazine_cache
;
1585 /* cache: slab (struct) cache */
1586 static rtl_cache_type g_cache_slab_cache
;
1588 assert(gp_cache_slab_cache
== 0);
1589 (void) rtl_cache_constructor (&g_cache_slab_cache
);
1591 gp_cache_slab_cache
= rtl_cache_activate (
1592 &g_cache_slab_cache
,
1593 "rtl_cache_slab_cache",
1594 sizeof(rtl_cache_slab_type
), /* objsize */
1596 rtl_cache_slab_constructor
,
1597 rtl_cache_slab_destructor
,
1599 0, /* userarg: none */
1600 gp_default_arena
, /* source */
1603 assert(gp_cache_slab_cache
!= 0);
1606 /* cache: bufctl cache */
1607 static rtl_cache_type g_cache_bufctl_cache
;
1609 assert(gp_cache_bufctl_cache
== 0);
1610 (void) rtl_cache_constructor (&g_cache_bufctl_cache
);
1612 gp_cache_bufctl_cache
= rtl_cache_activate (
1613 &g_cache_bufctl_cache
,
1614 "rtl_cache_bufctl_cache",
1615 sizeof(rtl_cache_bufctl_type
), /* objsize */
1617 0, /* constructor */
1621 gp_default_arena
, /* source */
1624 assert(gp_cache_bufctl_cache
!= 0);
1627 rtl_cache_wsupdate_init();
1628 // SAL_INFO("sal.rtl", "rtl_cache_init completed");
1631 /* ================================================================= */
1636 if (gp_cache_arena
!= 0)
1638 rtl_cache_type
* cache
, * head
;
1640 rtl_cache_wsupdate_fini();
1642 if (gp_cache_bufctl_cache
!= 0)
1644 cache
= gp_cache_bufctl_cache
, gp_cache_bufctl_cache
= 0;
1645 rtl_cache_deactivate (cache
);
1646 rtl_cache_destructor (cache
);
1648 if (gp_cache_slab_cache
!= 0)
1650 cache
= gp_cache_slab_cache
, gp_cache_slab_cache
= 0;
1651 rtl_cache_deactivate (cache
);
1652 rtl_cache_destructor (cache
);
1654 if (gp_cache_magazine_cache
!= 0)
1656 cache
= gp_cache_magazine_cache
, gp_cache_magazine_cache
= 0;
1657 rtl_cache_deactivate (cache
);
1658 rtl_cache_destructor (cache
);
1660 if (gp_cache_arena
!= 0)
1662 rtl_arena_destroy (gp_cache_arena
);
1666 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1667 head
= &(g_cache_list
.m_cache_head
);
1668 for (cache
= head
->m_cache_next
; cache
!= head
; cache
= cache
->m_cache_next
)
1672 // "rtl_cache_fini(" << cache->m_name << ") [slab]: allocs: "
1673 // << cache->m_slab_stats.m_alloc << ", frees: "
1674 // << cache->m_slab_stats.m_free << "; total: "
1675 // << cache->m_slab_stats.m_mem_total << ", used: "
1676 // << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
1677 // << cache->m_cpu_stats.m_alloc << ", frees: "
1678 // << cache->m_cpu_stats.m_free << "; [total]: allocs: "
1679 // << (cache->m_slab_stats.m_alloc
1680 // + cache->m_cpu_stats.m_alloc)
1682 // << (cache->m_slab_stats.m_free
1683 // + cache->m_cpu_stats.m_free));
1685 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1687 // SAL_INFO("sal.rtl", "rtl_cache_fini completed");
1690 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */