1 /* $NetBSD: subr_pool.c,v 1.180 2010/01/03 01:07:19 mlelstv Exp $ */
4 * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.180 2010/01/03 01:07:19 mlelstv Exp $");
38 #include "opt_poollog.h"
39 #include "opt_lockdebug.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bitops.h>
45 #include <sys/errno.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
49 #include <sys/syslog.h>
50 #include <sys/debug.h>
51 #include <sys/lockdebug.h>
52 #include <sys/xcall.h>
54 #include <sys/atomic.h>
59 * Pool resource management utility.
61 * Memory is allocated in pages which are split into pieces according to
62 * the pool item size. Each page is kept on one of three lists in the
63 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
64 * for empty, full and partially-full pages respectively. The individual
65 * pool items are on a linked list headed by `ph_itemlist' in each page
66 * header. The memory for building the page list is either taken from
67 * the allocated pages themselves (for small pool items) or taken from
68 * an internal pool of page headers (`phpool').
71 /* List of all pools */
72 static TAILQ_HEAD(, pool
) pool_head
= TAILQ_HEAD_INITIALIZER(pool_head
);
74 /* Private pool for page header structures */
76 static struct pool phpool
[PHPOOL_MAX
];
77 #define PHPOOL_FREELIST_NELEM(idx) \
78 (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
81 /* Pool of subpages for use by normal pools. */
82 static struct pool psppool
;
85 static SLIST_HEAD(, pool_allocator
) pa_deferinitq
=
86 SLIST_HEAD_INITIALIZER(pa_deferinitq
);
88 static void *pool_page_alloc_meta(struct pool
*, int);
89 static void pool_page_free_meta(struct pool
*, void *);
91 /* allocator for pool metadata */
92 struct pool_allocator pool_allocator_meta
= {
93 pool_page_alloc_meta
, pool_page_free_meta
,
94 .pa_backingmapptr
= &kmem_map
,
97 /* # of seconds to retain page after last use */
98 int pool_inactive_time
= 10;
100 /* Next candidate for drainage (see pool_drain()) */
101 static struct pool
*drainpp
;
103 /* This lock protects both pool_head and drainpp. */
104 static kmutex_t pool_head_lock
;
105 static kcondvar_t pool_busy
;
107 /* This lock protects initialization of a potentially shared pool allocator */
108 static kmutex_t pool_allocator_lock
;
110 typedef uint32_t pool_item_bitmap_t
;
111 #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
112 #define BITMAP_MASK (BITMAP_SIZE - 1)
114 struct pool_item_header
{
116 LIST_ENTRY(pool_item_header
)
117 ph_pagelist
; /* pool page list */
118 SPLAY_ENTRY(pool_item_header
)
119 ph_node
; /* Off-page page headers */
120 void * ph_page
; /* this page's address */
121 uint32_t ph_time
; /* last referenced */
122 uint16_t ph_nmissing
; /* # of chunks in use */
123 uint16_t ph_off
; /* start offset in page */
127 LIST_HEAD(, pool_item
)
128 phu_itemlist
; /* chunk list for this page */
132 pool_item_bitmap_t phu_bitmap
[1];
136 #define ph_itemlist ph_u.phu_normal.phu_itemlist
137 #define ph_bitmap ph_u.phu_notouch.phu_bitmap
143 #define PI_MAGIC 0xdeaddeadU
144 /* Other entries use only this list entry */
145 LIST_ENTRY(pool_item
) pi_list
;
148 #define POOL_NEEDS_CATCHUP(pp) \
149 ((pp)->pr_nitems < (pp)->pr_minitems)
152 * Pool cache management.
154 * Pool caches provide a way for constructed objects to be cached by the
155 * pool subsystem. This can lead to performance improvements by avoiding
156 * needless object construction/destruction; it is deferred until absolutely
159 * Caches are grouped into cache groups. Each cache group references up
160 * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
161 * object from the pool, it calls the object's constructor and places it
162 * into a cache group. When a cache group frees an object back to the
163 * pool, it first calls the object's destructor. This allows the object
164 * to persist in constructed form while freed to the cache.
166 * The pool references each cache, so that when a pool is drained by the
167 * pagedaemon, it can drain each individual cache as well. Each time a
168 * cache is drained, the most idle cache group is freed to the pool in
171 * Pool caches are layed on top of pools. By layering them, we can avoid
172 * the complexity of cache management for pools which would not benefit
176 static struct pool pcg_normal_pool
;
177 static struct pool pcg_large_pool
;
178 static struct pool cache_pool
;
179 static struct pool cache_cpu_pool
;
181 /* List of all caches. */
182 TAILQ_HEAD(,pool_cache
) pool_cache_head
=
183 TAILQ_HEAD_INITIALIZER(pool_cache_head
);
185 int pool_cache_disable
; /* global disable for caching */
186 static const pcg_t pcg_dummy
; /* zero sized: always empty, yet always full */
188 static bool pool_cache_put_slow(pool_cache_cpu_t
*, int,
190 static bool pool_cache_get_slow(pool_cache_cpu_t
*, int,
191 void **, paddr_t
*, int);
192 static void pool_cache_cpu_init1(struct cpu_info
*, pool_cache_t
);
193 static void pool_cache_invalidate_groups(pool_cache_t
, pcg_t
*);
194 static void pool_cache_invalidate_cpu(pool_cache_t
, u_int
);
195 static void pool_cache_xcall(pool_cache_t
);
197 static int pool_catchup(struct pool
*);
198 static void pool_prime_page(struct pool
*, void *,
199 struct pool_item_header
*);
200 static void pool_update_curpage(struct pool
*);
202 static int pool_grow(struct pool
*, int);
203 static void *pool_allocator_alloc(struct pool
*, int);
204 static void pool_allocator_free(struct pool
*, void *);
206 static void pool_print_pagelist(struct pool
*, struct pool_pagelist
*,
207 void (*)(const char *, ...));
208 static void pool_print1(struct pool
*, const char *,
209 void (*)(const char *, ...));
211 static int pool_chk_page(struct pool
*, const char *,
212 struct pool_item_header
*);
215 * Pool log entry. An array of these is allocated in pool_init().
226 #ifdef POOL_DIAGNOSTIC
227 /* Number of entries in pool log buffers */
229 #define POOL_LOGSIZE 10
232 int pool_logsize
= POOL_LOGSIZE
;
235 pr_log(struct pool
*pp
, void *v
, int action
, const char *file
, long line
)
240 if ((pp
->pr_roflags
& PR_LOGGING
) == 0)
243 if (pp
->pr_log
== NULL
) {
244 if (kmem_map
!= NULL
)
246 pool_logsize
* sizeof(struct pool_log
),
247 M_TEMP
, M_NOWAIT
| M_ZERO
);
248 if (pp
->pr_log
== NULL
)
250 pp
->pr_curlogentry
= 0;
251 pp
->pr_logsize
= pool_logsize
;
255 * Fill in the current entry. Wrap around and overwrite
256 * the oldest entry if necessary.
258 n
= pp
->pr_curlogentry
;
262 pl
->pl_action
= action
;
264 if (++n
>= pp
->pr_logsize
)
266 pp
->pr_curlogentry
= n
;
270 pr_printlog(struct pool
*pp
, struct pool_item
*pi
,
271 void (*pr
)(const char *, ...))
273 int i
= pp
->pr_logsize
;
274 int n
= pp
->pr_curlogentry
;
276 if (pp
->pr_log
== NULL
)
280 * Print all entries in this pool's log.
283 struct pool_log
*pl
= &pp
->pr_log
[n
];
284 if (pl
->pl_action
!= 0) {
285 if (pi
== NULL
|| pi
== pl
->pl_addr
) {
286 (*pr
)("\tlog entry %d:\n", i
);
287 (*pr
)("\t\taction = %s, addr = %p\n",
288 pl
->pl_action
== PRLOG_GET
? "get" : "put",
290 (*pr
)("\t\tfile: %s at line %lu\n",
291 pl
->pl_file
, pl
->pl_line
);
294 if (++n
>= pp
->pr_logsize
)
300 pr_enter(struct pool
*pp
, const char *file
, long line
)
303 if (__predict_false(pp
->pr_entered_file
!= NULL
)) {
304 printf("pool %s: reentrancy at file %s line %ld\n",
305 pp
->pr_wchan
, file
, line
);
306 printf(" previous entry at file %s line %ld\n",
307 pp
->pr_entered_file
, pp
->pr_entered_line
);
311 pp
->pr_entered_file
= file
;
312 pp
->pr_entered_line
= line
;
316 pr_leave(struct pool
*pp
)
319 if (__predict_false(pp
->pr_entered_file
== NULL
)) {
320 printf("pool %s not entered?\n", pp
->pr_wchan
);
324 pp
->pr_entered_file
= NULL
;
325 pp
->pr_entered_line
= 0;
329 pr_enter_check(struct pool
*pp
, void (*pr
)(const char *, ...))
332 if (pp
->pr_entered_file
!= NULL
)
333 (*pr
)("\n\tcurrently entered from file %s line %ld\n",
334 pp
->pr_entered_file
, pp
->pr_entered_line
);
337 #define pr_log(pp, v, action, file, line)
338 #define pr_printlog(pp, pi, pr)
339 #define pr_enter(pp, file, line)
341 #define pr_enter_check(pp, pr)
342 #endif /* POOL_DIAGNOSTIC */
344 static inline unsigned int
345 pr_item_notouch_index(const struct pool
*pp
, const struct pool_item_header
*ph
,
351 KASSERT(pp
->pr_roflags
& PR_NOTOUCH
);
352 idx
= (cp
- (char *)ph
->ph_page
- ph
->ph_off
) / pp
->pr_size
;
353 KASSERT(idx
< pp
->pr_itemsperpage
);
358 pr_item_notouch_put(const struct pool
*pp
, struct pool_item_header
*ph
,
361 unsigned int idx
= pr_item_notouch_index(pp
, ph
, obj
);
362 pool_item_bitmap_t
*bitmap
= ph
->ph_bitmap
+ (idx
/ BITMAP_SIZE
);
363 pool_item_bitmap_t mask
= 1 << (idx
& BITMAP_MASK
);
365 KASSERT((*bitmap
& mask
) == 0);
370 pr_item_notouch_get(const struct pool
*pp
, struct pool_item_header
*ph
)
372 pool_item_bitmap_t
*bitmap
= ph
->ph_bitmap
;
379 KASSERT((i
* BITMAP_SIZE
) < pp
->pr_itemsperpage
);
380 bit
= ffs32(bitmap
[i
]);
382 pool_item_bitmap_t mask
;
385 idx
= (i
* BITMAP_SIZE
) + bit
;
387 KASSERT((bitmap
[i
] & mask
) != 0);
392 KASSERT(idx
< pp
->pr_itemsperpage
);
393 return (char *)ph
->ph_page
+ ph
->ph_off
+ idx
* pp
->pr_size
;
397 pr_item_notouch_init(const struct pool
*pp
, struct pool_item_header
*ph
)
399 pool_item_bitmap_t
*bitmap
= ph
->ph_bitmap
;
400 const int n
= howmany(pp
->pr_itemsperpage
, BITMAP_SIZE
);
403 for (i
= 0; i
< n
; i
++) {
404 bitmap
[i
] = (pool_item_bitmap_t
)-1;
409 phtree_compare(struct pool_item_header
*a
, struct pool_item_header
*b
)
413 * we consider pool_item_header with smaller ph_page bigger.
414 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
417 if (a
->ph_page
< b
->ph_page
)
419 else if (a
->ph_page
> b
->ph_page
)
425 SPLAY_PROTOTYPE(phtree
, pool_item_header
, ph_node
, phtree_compare
);
426 SPLAY_GENERATE(phtree
, pool_item_header
, ph_node
, phtree_compare
);
428 static inline struct pool_item_header
*
429 pr_find_pagehead_noalign(struct pool
*pp
, void *v
)
431 struct pool_item_header
*ph
, tmp
;
433 tmp
.ph_page
= (void *)(uintptr_t)v
;
434 ph
= SPLAY_FIND(phtree
, &pp
->pr_phtree
, &tmp
);
436 ph
= SPLAY_ROOT(&pp
->pr_phtree
);
437 if (ph
!= NULL
&& phtree_compare(&tmp
, ph
) >= 0) {
438 ph
= SPLAY_NEXT(phtree
, &pp
->pr_phtree
, ph
);
440 KASSERT(ph
== NULL
|| phtree_compare(&tmp
, ph
) < 0);
447 * Return the pool page header based on item address.
449 static inline struct pool_item_header
*
450 pr_find_pagehead(struct pool
*pp
, void *v
)
452 struct pool_item_header
*ph
, tmp
;
454 if ((pp
->pr_roflags
& PR_NOALIGN
) != 0) {
455 ph
= pr_find_pagehead_noalign(pp
, v
);
458 (void *)((uintptr_t)v
& pp
->pr_alloc
->pa_pagemask
);
460 if ((pp
->pr_roflags
& PR_PHINPAGE
) != 0) {
461 ph
= (struct pool_item_header
*)((char *)page
+ pp
->pr_phoffset
);
464 ph
= SPLAY_FIND(phtree
, &pp
->pr_phtree
, &tmp
);
468 KASSERT(ph
== NULL
|| ((pp
->pr_roflags
& PR_PHINPAGE
) != 0) ||
469 ((char *)ph
->ph_page
<= (char *)v
&&
470 (char *)v
< (char *)ph
->ph_page
+ pp
->pr_alloc
->pa_pagesz
));
475 pr_pagelist_free(struct pool
*pp
, struct pool_pagelist
*pq
)
477 struct pool_item_header
*ph
;
479 while ((ph
= LIST_FIRST(pq
)) != NULL
) {
480 LIST_REMOVE(ph
, ph_pagelist
);
481 pool_allocator_free(pp
, ph
->ph_page
);
482 if ((pp
->pr_roflags
& PR_PHINPAGE
) == 0)
483 pool_put(pp
->pr_phpool
, ph
);
488 * Remove a page from the pool.
491 pr_rmpage(struct pool
*pp
, struct pool_item_header
*ph
,
492 struct pool_pagelist
*pq
)
495 KASSERT(mutex_owned(&pp
->pr_lock
));
498 * If the page was idle, decrement the idle page count.
500 if (ph
->ph_nmissing
== 0) {
502 if (pp
->pr_nidle
== 0)
503 panic("pr_rmpage: nidle inconsistent");
504 if (pp
->pr_nitems
< pp
->pr_itemsperpage
)
505 panic("pr_rmpage: nitems inconsistent");
510 pp
->pr_nitems
-= pp
->pr_itemsperpage
;
513 * Unlink the page from the pool and queue it for release.
515 LIST_REMOVE(ph
, ph_pagelist
);
516 if ((pp
->pr_roflags
& PR_PHINPAGE
) == 0)
517 SPLAY_REMOVE(phtree
, &pp
->pr_phtree
, ph
);
518 LIST_INSERT_HEAD(pq
, ph
, ph_pagelist
);
523 pool_update_curpage(pp
);
527 pa_starved_p(struct pool_allocator
*pa
)
530 if (pa
->pa_backingmap
!= NULL
) {
531 return vm_map_starved_p(pa
->pa_backingmap
);
537 pool_reclaim_callback(struct callback_entry
*ce
, void *obj
, void *arg
)
539 struct pool
*pp
= obj
;
540 struct pool_allocator
*pa
= pp
->pr_alloc
;
542 KASSERT(&pp
->pr_reclaimerentry
== ce
);
544 if (!pa_starved_p(pa
)) {
545 return CALLBACK_CHAIN_ABORT
;
547 return CALLBACK_CHAIN_CONTINUE
;
551 pool_reclaim_register(struct pool
*pp
)
553 struct vm_map
*map
= pp
->pr_alloc
->pa_backingmap
;
560 s
= splvm(); /* not necessary for INTRSAFE maps, but don't care. */
561 callback_register(&vm_map_to_kernel(map
)->vmk_reclaim_callback
,
562 &pp
->pr_reclaimerentry
, pp
, pool_reclaim_callback
);
567 pool_reclaim_unregister(struct pool
*pp
)
569 struct vm_map
*map
= pp
->pr_alloc
->pa_backingmap
;
576 s
= splvm(); /* not necessary for INTRSAFE maps, but don't care. */
577 callback_unregister(&vm_map_to_kernel(map
)->vmk_reclaim_callback
,
578 &pp
->pr_reclaimerentry
);
583 pa_reclaim_register(struct pool_allocator
*pa
)
585 struct vm_map
*map
= *pa
->pa_backingmapptr
;
588 KASSERT(pa
->pa_backingmap
== NULL
);
590 SLIST_INSERT_HEAD(&pa_deferinitq
, pa
, pa_q
);
593 pa
->pa_backingmap
= map
;
594 TAILQ_FOREACH(pp
, &pa
->pa_list
, pr_alloc_list
) {
595 pool_reclaim_register(pp
);
600 * Initialize all the pools listed in the "pools" link set.
603 pool_subsystem_init(void)
605 struct pool_allocator
*pa
;
607 mutex_init(&pool_head_lock
, MUTEX_DEFAULT
, IPL_NONE
);
608 mutex_init(&pool_allocator_lock
, MUTEX_DEFAULT
, IPL_NONE
);
609 cv_init(&pool_busy
, "poolbusy");
611 while ((pa
= SLIST_FIRST(&pa_deferinitq
)) != NULL
) {
612 KASSERT(pa
->pa_backingmapptr
!= NULL
);
613 KASSERT(*pa
->pa_backingmapptr
!= NULL
);
614 SLIST_REMOVE_HEAD(&pa_deferinitq
, pa_q
);
615 pa_reclaim_register(pa
);
618 pool_init(&cache_pool
, sizeof(struct pool_cache
), coherency_unit
,
619 0, 0, "pcache", &pool_allocator_nointr
, IPL_NONE
);
621 pool_init(&cache_cpu_pool
, sizeof(pool_cache_cpu_t
), coherency_unit
,
622 0, 0, "pcachecpu", &pool_allocator_nointr
, IPL_NONE
);
626 * Initialize the given pool resource structure.
628 * We export this routine to allow other kernel parts to declare
629 * static pools that must be initialized before malloc() is available.
632 pool_init(struct pool
*pp
, size_t size
, u_int align
, u_int ioff
, int flags
,
633 const char *wchan
, struct pool_allocator
*palloc
, int ipl
)
636 size_t trysize
, phsize
;
641 * Check that the pool hasn't already been initialised and
642 * added to the list of all pools.
644 TAILQ_FOREACH(pp1
, &pool_head
, pr_poollist
) {
646 panic("pool_init: pool %s already initialised",
651 #ifdef POOL_DIAGNOSTIC
653 * Always log if POOL_DIAGNOSTIC is defined.
655 if (pool_logsize
!= 0)
660 palloc
= &pool_allocator_kmem
;
662 if (size
> palloc
->pa_pagesz
) {
663 if (palloc
== &pool_allocator_kmem
)
664 palloc
= &pool_allocator_kmem_fullpage
;
665 else if (palloc
== &pool_allocator_nointr
)
666 palloc
= &pool_allocator_nointr_fullpage
;
668 #endif /* POOL_SUBPAGE */
670 mutex_enter(&pool_allocator_lock
);
671 if (palloc
->pa_refcnt
++ == 0) {
672 if (palloc
->pa_pagesz
== 0)
673 palloc
->pa_pagesz
= PAGE_SIZE
;
675 TAILQ_INIT(&palloc
->pa_list
);
677 mutex_init(&palloc
->pa_lock
, MUTEX_DEFAULT
, IPL_VM
);
678 palloc
->pa_pagemask
= ~(palloc
->pa_pagesz
- 1);
679 palloc
->pa_pageshift
= ffs(palloc
->pa_pagesz
) - 1;
681 if (palloc
->pa_backingmapptr
!= NULL
) {
682 pa_reclaim_register(palloc
);
686 mutex_exit(&pool_allocator_lock
);
691 if ((flags
& PR_NOTOUCH
) == 0 && size
< sizeof(struct pool_item
))
692 size
= sizeof(struct pool_item
);
694 size
= roundup(size
, align
);
696 if (size
> palloc
->pa_pagesz
)
697 panic("pool_init: pool item size (%zu) too large", size
);
701 * Initialize the pool structure.
703 LIST_INIT(&pp
->pr_emptypages
);
704 LIST_INIT(&pp
->pr_fullpages
);
705 LIST_INIT(&pp
->pr_partpages
);
707 pp
->pr_curpage
= NULL
;
711 pp
->pr_maxpages
= UINT_MAX
;
712 pp
->pr_roflags
= flags
;
715 pp
->pr_align
= align
;
716 pp
->pr_wchan
= wchan
;
717 pp
->pr_alloc
= palloc
;
720 pp
->pr_hardlimit
= UINT_MAX
;
721 pp
->pr_hardlimit_warning
= NULL
;
722 pp
->pr_hardlimit_ratecap
.tv_sec
= 0;
723 pp
->pr_hardlimit_ratecap
.tv_usec
= 0;
724 pp
->pr_hardlimit_warning_last
.tv_sec
= 0;
725 pp
->pr_hardlimit_warning_last
.tv_usec
= 0;
726 pp
->pr_drain_hook
= NULL
;
727 pp
->pr_drain_hook_arg
= NULL
;
728 pp
->pr_freecheck
= NULL
;
731 * Decide whether to put the page header off page to avoid
732 * wasting too large a part of the page or too big item.
733 * Off-page page headers go on a hash table, so we can match
734 * a returned item with its header based on the page address.
735 * We use 1/16 of the page size and about 8 times of the item
736 * size as the threshold (XXX: tune)
738 * However, we'll put the header into the page if we can put
739 * it without wasting any items.
741 * Silently enforce `0 <= ioff < align'.
743 pp
->pr_itemoffset
= ioff
%= align
;
744 /* See the comment below about reserved bytes. */
745 trysize
= palloc
->pa_pagesz
- ((align
- ioff
) % align
);
746 phsize
= ALIGN(sizeof(struct pool_item_header
));
747 if ((pp
->pr_roflags
& (PR_NOTOUCH
| PR_NOALIGN
)) == 0 &&
748 (pp
->pr_size
< MIN(palloc
->pa_pagesz
/ 16, phsize
<< 3) ||
749 trysize
/ pp
->pr_size
== (trysize
- phsize
) / pp
->pr_size
)) {
750 /* Use the end of the page for the page header */
751 pp
->pr_roflags
|= PR_PHINPAGE
;
752 pp
->pr_phoffset
= off
= palloc
->pa_pagesz
- phsize
;
754 /* The page header will be taken from our page header pool */
756 off
= palloc
->pa_pagesz
;
757 SPLAY_INIT(&pp
->pr_phtree
);
761 * Alignment is to take place at `ioff' within the item. This means
762 * we must reserve up to `align - 1' bytes on the page to allow
763 * appropriate positioning of each item.
765 pp
->pr_itemsperpage
= (off
- ((align
- ioff
) % align
)) / pp
->pr_size
;
766 KASSERT(pp
->pr_itemsperpage
!= 0);
767 if ((pp
->pr_roflags
& PR_NOTOUCH
)) {
770 for (idx
= 0; pp
->pr_itemsperpage
> PHPOOL_FREELIST_NELEM(idx
);
774 if (idx
>= PHPOOL_MAX
) {
776 * if you see this panic, consider to tweak
777 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
779 panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
780 pp
->pr_wchan
, pp
->pr_itemsperpage
);
782 pp
->pr_phpool
= &phpool
[idx
];
783 } else if ((pp
->pr_roflags
& PR_PHINPAGE
) == 0) {
784 pp
->pr_phpool
= &phpool
[0];
786 #if defined(DIAGNOSTIC)
788 pp
->pr_phpool
= NULL
;
793 * Use the slack between the chunks and the page header
794 * for "cache coloring".
796 slack
= off
- pp
->pr_itemsperpage
* pp
->pr_size
;
797 pp
->pr_maxcolor
= (slack
/ align
) * align
;
803 pp
->pr_npagealloc
= 0;
804 pp
->pr_npagefree
= 0;
811 pp
->pr_entered_file
= NULL
;
812 pp
->pr_entered_line
= 0;
814 mutex_init(&pp
->pr_lock
, MUTEX_DEFAULT
, ipl
);
815 cv_init(&pp
->pr_cv
, wchan
);
819 * Initialize private page header pool and cache magazine pool if we
820 * haven't done so yet.
823 if (phpool
[0].pr_size
== 0) {
825 for (idx
= 0; idx
< PHPOOL_MAX
; idx
++) {
826 static char phpool_names
[PHPOOL_MAX
][6+1+6+1];
830 nelem
= PHPOOL_FREELIST_NELEM(idx
);
831 snprintf(phpool_names
[idx
], sizeof(phpool_names
[idx
]),
833 sz
= sizeof(struct pool_item_header
);
835 sz
= offsetof(struct pool_item_header
,
836 ph_bitmap
[howmany(nelem
, BITMAP_SIZE
)]);
838 pool_init(&phpool
[idx
], sz
, 0, 0, 0,
839 phpool_names
[idx
], &pool_allocator_meta
, IPL_VM
);
842 pool_init(&psppool
, POOL_SUBPAGE
, POOL_SUBPAGE
, 0,
843 PR_RECURSIVE
, "psppool", &pool_allocator_meta
, IPL_VM
);
846 size
= sizeof(pcg_t
) +
847 (PCG_NOBJECTS_NORMAL
- 1) * sizeof(pcgpair_t
);
848 pool_init(&pcg_normal_pool
, size
, coherency_unit
, 0, 0,
849 "pcgnormal", &pool_allocator_meta
, IPL_VM
);
851 size
= sizeof(pcg_t
) +
852 (PCG_NOBJECTS_LARGE
- 1) * sizeof(pcgpair_t
);
853 pool_init(&pcg_large_pool
, size
, coherency_unit
, 0, 0,
854 "pcglarge", &pool_allocator_meta
, IPL_VM
);
857 /* Insert into the list of all pools. */
859 mutex_enter(&pool_head_lock
);
860 TAILQ_FOREACH(pp1
, &pool_head
, pr_poollist
) {
861 if (strcmp(pp1
->pr_wchan
, pp
->pr_wchan
) > 0)
865 TAILQ_INSERT_TAIL(&pool_head
, pp
, pr_poollist
);
867 TAILQ_INSERT_BEFORE(pp1
, pp
, pr_poollist
);
869 mutex_exit(&pool_head_lock
);
871 /* Insert this into the list of pools using this allocator. */
873 mutex_enter(&palloc
->pa_lock
);
874 TAILQ_INSERT_TAIL(&palloc
->pa_list
, pp
, pr_alloc_list
);
876 mutex_exit(&palloc
->pa_lock
);
878 pool_reclaim_register(pp
);
882 * De-commision a pool resource.
885 pool_destroy(struct pool
*pp
)
887 struct pool_pagelist pq
;
888 struct pool_item_header
*ph
;
890 /* Remove from global pool list */
891 mutex_enter(&pool_head_lock
);
892 while (pp
->pr_refcnt
!= 0)
893 cv_wait(&pool_busy
, &pool_head_lock
);
894 TAILQ_REMOVE(&pool_head
, pp
, pr_poollist
);
897 mutex_exit(&pool_head_lock
);
899 /* Remove this pool from its allocator's list of pools. */
900 pool_reclaim_unregister(pp
);
901 mutex_enter(&pp
->pr_alloc
->pa_lock
);
902 TAILQ_REMOVE(&pp
->pr_alloc
->pa_list
, pp
, pr_alloc_list
);
903 mutex_exit(&pp
->pr_alloc
->pa_lock
);
905 mutex_enter(&pool_allocator_lock
);
906 if (--pp
->pr_alloc
->pa_refcnt
== 0)
907 mutex_destroy(&pp
->pr_alloc
->pa_lock
);
908 mutex_exit(&pool_allocator_lock
);
910 mutex_enter(&pp
->pr_lock
);
912 KASSERT(pp
->pr_cache
== NULL
);
915 if (pp
->pr_nout
!= 0) {
916 pr_printlog(pp
, NULL
, printf
);
917 panic("pool_destroy: pool busy: still out: %u",
922 KASSERT(LIST_EMPTY(&pp
->pr_fullpages
));
923 KASSERT(LIST_EMPTY(&pp
->pr_partpages
));
925 /* Remove all pages */
927 while ((ph
= LIST_FIRST(&pp
->pr_emptypages
)) != NULL
)
928 pr_rmpage(pp
, ph
, &pq
);
930 mutex_exit(&pp
->pr_lock
);
932 pr_pagelist_free(pp
, &pq
);
934 #ifdef POOL_DIAGNOSTIC
935 if (pp
->pr_log
!= NULL
) {
936 free(pp
->pr_log
, M_TEMP
);
941 cv_destroy(&pp
->pr_cv
);
942 mutex_destroy(&pp
->pr_lock
);
946 pool_set_drain_hook(struct pool
*pp
, void (*fn
)(void *, int), void *arg
)
949 /* XXX no locking -- must be used just after pool_init() */
951 if (pp
->pr_drain_hook
!= NULL
)
952 panic("pool_set_drain_hook(%s): already set", pp
->pr_wchan
);
954 pp
->pr_drain_hook
= fn
;
955 pp
->pr_drain_hook_arg
= arg
;
958 static struct pool_item_header
*
959 pool_alloc_item_header(struct pool
*pp
, void *storage
, int flags
)
961 struct pool_item_header
*ph
;
963 if ((pp
->pr_roflags
& PR_PHINPAGE
) != 0)
964 ph
= (struct pool_item_header
*) ((char *)storage
+ pp
->pr_phoffset
);
966 ph
= pool_get(pp
->pr_phpool
, flags
);
972 * Grab an item from the pool.
975 #ifdef POOL_DIAGNOSTIC
976 _pool_get(struct pool
*pp
, int flags
, const char *file
, long line
)
978 pool_get(struct pool
*pp
, int flags
)
981 struct pool_item
*pi
;
982 struct pool_item_header
*ph
;
986 if (__predict_false(pp
->pr_itemsperpage
== 0))
987 panic("pool_get: pool %p: pr_itemsperpage is zero, "
988 "pool not initialized?", pp
);
989 if (__predict_false(curlwp
== NULL
&& doing_shutdown
== 0 &&
990 (flags
& PR_WAITOK
) != 0))
991 panic("pool_get: %s: must have NOWAIT", pp
->pr_wchan
);
993 #endif /* DIAGNOSTIC */
995 if (flags
& PR_WAITOK
) {
1000 mutex_enter(&pp
->pr_lock
);
1001 pr_enter(pp
, file
, line
);
1005 * Check to see if we've reached the hard limit. If we have,
1006 * and we can wait, then wait until an item has been returned to
1010 if (__predict_false(pp
->pr_nout
> pp
->pr_hardlimit
)) {
1012 mutex_exit(&pp
->pr_lock
);
1013 panic("pool_get: %s: crossed hard limit", pp
->pr_wchan
);
1016 if (__predict_false(pp
->pr_nout
== pp
->pr_hardlimit
)) {
1017 if (pp
->pr_drain_hook
!= NULL
) {
1019 * Since the drain hook is going to free things
1020 * back to the pool, unlock, call the hook, re-lock,
1021 * and check the hardlimit condition again.
1024 mutex_exit(&pp
->pr_lock
);
1025 (*pp
->pr_drain_hook
)(pp
->pr_drain_hook_arg
, flags
);
1026 mutex_enter(&pp
->pr_lock
);
1027 pr_enter(pp
, file
, line
);
1028 if (pp
->pr_nout
< pp
->pr_hardlimit
)
1032 if ((flags
& PR_WAITOK
) && !(flags
& PR_LIMITFAIL
)) {
1034 * XXX: A warning isn't logged in this case. Should
1037 pp
->pr_flags
|= PR_WANTED
;
1039 cv_wait(&pp
->pr_cv
, &pp
->pr_lock
);
1040 pr_enter(pp
, file
, line
);
1045 * Log a message that the hard limit has been hit.
1047 if (pp
->pr_hardlimit_warning
!= NULL
&&
1048 ratecheck(&pp
->pr_hardlimit_warning_last
,
1049 &pp
->pr_hardlimit_ratecap
))
1050 log(LOG_ERR
, "%s\n", pp
->pr_hardlimit_warning
);
1055 mutex_exit(&pp
->pr_lock
);
1060 * The convention we use is that if `curpage' is not NULL, then
1061 * it points at a non-empty bucket. In particular, `curpage'
1062 * never points at a page header which has PR_PHINPAGE set and
1063 * has no items in its bucket.
1065 if ((ph
= pp
->pr_curpage
) == NULL
) {
1069 if (pp
->pr_nitems
!= 0) {
1070 mutex_exit(&pp
->pr_lock
);
1071 printf("pool_get: %s: curpage NULL, nitems %u\n",
1072 pp
->pr_wchan
, pp
->pr_nitems
);
1073 panic("pool_get: nitems inconsistent");
1078 * Call the back-end page allocator for more memory.
1079 * Release the pool lock, as the back-end page allocator
1083 error
= pool_grow(pp
, flags
);
1084 pr_enter(pp
, file
, line
);
1087 * We were unable to allocate a page or item
1088 * header, but we released the lock during
1089 * allocation, so perhaps items were freed
1090 * back to the pool. Check for this case.
1092 if (pp
->pr_curpage
!= NULL
)
1097 mutex_exit(&pp
->pr_lock
);
1101 /* Start the allocation process over. */
1104 if (pp
->pr_roflags
& PR_NOTOUCH
) {
1106 if (__predict_false(ph
->ph_nmissing
== pp
->pr_itemsperpage
)) {
1108 mutex_exit(&pp
->pr_lock
);
1109 panic("pool_get: %s: page empty", pp
->pr_wchan
);
1112 v
= pr_item_notouch_get(pp
, ph
);
1113 #ifdef POOL_DIAGNOSTIC
1114 pr_log(pp
, v
, PRLOG_GET
, file
, line
);
1117 v
= pi
= LIST_FIRST(&ph
->ph_itemlist
);
1118 if (__predict_false(v
== NULL
)) {
1120 mutex_exit(&pp
->pr_lock
);
1121 panic("pool_get: %s: page empty", pp
->pr_wchan
);
1124 if (__predict_false(pp
->pr_nitems
== 0)) {
1126 mutex_exit(&pp
->pr_lock
);
1127 printf("pool_get: %s: items on itemlist, nitems %u\n",
1128 pp
->pr_wchan
, pp
->pr_nitems
);
1129 panic("pool_get: nitems inconsistent");
1133 #ifdef POOL_DIAGNOSTIC
1134 pr_log(pp
, v
, PRLOG_GET
, file
, line
);
1138 if (__predict_false(pi
->pi_magic
!= PI_MAGIC
)) {
1139 pr_printlog(pp
, pi
, printf
);
1140 panic("pool_get(%s): free list modified: "
1141 "magic=%x; page %p; item addr %p\n",
1142 pp
->pr_wchan
, pi
->pi_magic
, ph
->ph_page
, pi
);
1147 * Remove from item list.
1149 LIST_REMOVE(pi
, pi_list
);
1153 if (ph
->ph_nmissing
== 0) {
1155 if (__predict_false(pp
->pr_nidle
== 0))
1156 panic("pool_get: nidle inconsistent");
1161 * This page was previously empty. Move it to the list of
1162 * partially-full pages. This page is already curpage.
1164 LIST_REMOVE(ph
, ph_pagelist
);
1165 LIST_INSERT_HEAD(&pp
->pr_partpages
, ph
, ph_pagelist
);
1168 if (ph
->ph_nmissing
== pp
->pr_itemsperpage
) {
1170 if (__predict_false((pp
->pr_roflags
& PR_NOTOUCH
) == 0 &&
1171 !LIST_EMPTY(&ph
->ph_itemlist
))) {
1173 mutex_exit(&pp
->pr_lock
);
1174 panic("pool_get: %s: nmissing inconsistent",
1179 * This page is now full. Move it to the full list
1180 * and select a new current page.
1182 LIST_REMOVE(ph
, ph_pagelist
);
1183 LIST_INSERT_HEAD(&pp
->pr_fullpages
, ph
, ph_pagelist
);
1184 pool_update_curpage(pp
);
1191 * If we have a low water mark and we are now below that low
1192 * water mark, add more items to the pool.
1194 if (POOL_NEEDS_CATCHUP(pp
) && pool_catchup(pp
) != 0) {
1196 * XXX: Should we log a warning? Should we set up a timeout
1197 * to try again in a second or so? The latter could break
1198 * a caller's assumptions about interrupt protection, etc.
1202 mutex_exit(&pp
->pr_lock
);
1203 KASSERT((((vaddr_t
)v
+ pp
->pr_itemoffset
) & (pp
->pr_align
- 1)) == 0);
1204 FREECHECK_OUT(&pp
->pr_freecheck
, v
);
1209 * Internal version of pool_put(). Pool is already locked/entered.
1212 pool_do_put(struct pool
*pp
, void *v
, struct pool_pagelist
*pq
)
1214 struct pool_item
*pi
= v
;
1215 struct pool_item_header
*ph
;
1217 KASSERT(mutex_owned(&pp
->pr_lock
));
1218 FREECHECK_IN(&pp
->pr_freecheck
, v
);
1219 LOCKDEBUG_MEM_CHECK(v
, pp
->pr_size
);
1222 if (__predict_false(pp
->pr_nout
== 0)) {
1223 printf("pool %s: putting with none out\n",
1229 if (__predict_false((ph
= pr_find_pagehead(pp
, v
)) == NULL
)) {
1230 pr_printlog(pp
, NULL
, printf
);
1231 panic("pool_put: %s: page header missing", pp
->pr_wchan
);
1235 * Return to item list.
1237 if (pp
->pr_roflags
& PR_NOTOUCH
) {
1238 pr_item_notouch_put(pp
, ph
, v
);
1241 pi
->pi_magic
= PI_MAGIC
;
1247 for (i
= 0; i
< pp
->pr_size
/ sizeof(int); i
++) {
1253 LIST_INSERT_HEAD(&ph
->ph_itemlist
, pi
, pi_list
);
1255 KDASSERT(ph
->ph_nmissing
!= 0);
1261 /* Cancel "pool empty" condition if it exists */
1262 if (pp
->pr_curpage
== NULL
)
1263 pp
->pr_curpage
= ph
;
1265 if (pp
->pr_flags
& PR_WANTED
) {
1266 pp
->pr_flags
&= ~PR_WANTED
;
1267 cv_broadcast(&pp
->pr_cv
);
1271 * If this page is now empty, do one of two things:
1273 * (1) If we have more pages than the page high water mark,
1274 * free the page back to the system. ONLY CONSIDER
1275 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1278 * (2) Otherwise, move the page to the empty page list.
1280 * Either way, select a new current page (so we use a partially-full
1281 * page if one is available).
1283 if (ph
->ph_nmissing
== 0) {
1285 if (pp
->pr_npages
> pp
->pr_minpages
&&
1286 pp
->pr_npages
> pp
->pr_maxpages
) {
1287 pr_rmpage(pp
, ph
, pq
);
1289 LIST_REMOVE(ph
, ph_pagelist
);
1290 LIST_INSERT_HEAD(&pp
->pr_emptypages
, ph
, ph_pagelist
);
1293 * Update the timestamp on the page. A page must
1294 * be idle for some period of time before it can
1295 * be reclaimed by the pagedaemon. This minimizes
1296 * ping-pong'ing for memory.
1298 * note for 64-bit time_t: truncating to 32-bit is not
1299 * a problem for our usage.
1301 ph
->ph_time
= time_uptime
;
1303 pool_update_curpage(pp
);
1307 * If the page was previously completely full, move it to the
1308 * partially-full list and make it the current page. The next
1309 * allocation will get the item from this page, instead of
1310 * further fragmenting the pool.
1312 else if (ph
->ph_nmissing
== (pp
->pr_itemsperpage
- 1)) {
1313 LIST_REMOVE(ph
, ph_pagelist
);
1314 LIST_INSERT_HEAD(&pp
->pr_partpages
, ph
, ph_pagelist
);
1315 pp
->pr_curpage
= ph
;
1320 * Return resource to the pool.
1322 #ifdef POOL_DIAGNOSTIC
1324 _pool_put(struct pool
*pp
, void *v
, const char *file
, long line
)
1326 struct pool_pagelist pq
;
1330 mutex_enter(&pp
->pr_lock
);
1331 pr_enter(pp
, file
, line
);
1333 pr_log(pp
, v
, PRLOG_PUT
, file
, line
);
1335 pool_do_put(pp
, v
, &pq
);
1338 mutex_exit(&pp
->pr_lock
);
1340 pr_pagelist_free(pp
, &pq
);
1343 #endif /* POOL_DIAGNOSTIC */
1346 pool_put(struct pool
*pp
, void *v
)
1348 struct pool_pagelist pq
;
1352 mutex_enter(&pp
->pr_lock
);
1353 pool_do_put(pp
, v
, &pq
);
1354 mutex_exit(&pp
->pr_lock
);
1356 pr_pagelist_free(pp
, &pq
);
1359 #ifdef POOL_DIAGNOSTIC
1360 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1364 * pool_grow: grow a pool by a page.
1366 * => called with pool locked.
1367 * => unlock and relock the pool.
1368 * => return with pool locked.
1372 pool_grow(struct pool
*pp
, int flags
)
1374 struct pool_item_header
*ph
= NULL
;
1377 mutex_exit(&pp
->pr_lock
);
1378 cp
= pool_allocator_alloc(pp
, flags
);
1379 if (__predict_true(cp
!= NULL
)) {
1380 ph
= pool_alloc_item_header(pp
, cp
, flags
);
1382 if (__predict_false(cp
== NULL
|| ph
== NULL
)) {
1384 pool_allocator_free(pp
, cp
);
1386 mutex_enter(&pp
->pr_lock
);
1390 mutex_enter(&pp
->pr_lock
);
1391 pool_prime_page(pp
, cp
, ph
);
1392 pp
->pr_npagealloc
++;
1397 * Add N items to the pool.
1400 pool_prime(struct pool
*pp
, int n
)
1405 mutex_enter(&pp
->pr_lock
);
1407 newpages
= roundup(n
, pp
->pr_itemsperpage
) / pp
->pr_itemsperpage
;
1409 while (newpages
-- > 0) {
1410 error
= pool_grow(pp
, PR_NOWAIT
);
1417 if (pp
->pr_minpages
>= pp
->pr_maxpages
)
1418 pp
->pr_maxpages
= pp
->pr_minpages
+ 1; /* XXX */
1420 mutex_exit(&pp
->pr_lock
);
1425 * Add a page worth of items to the pool.
1427 * Note, we must be called with the pool descriptor LOCKED.
1430 pool_prime_page(struct pool
*pp
, void *storage
, struct pool_item_header
*ph
)
1432 struct pool_item
*pi
;
1434 const unsigned int align
= pp
->pr_align
;
1435 const unsigned int ioff
= pp
->pr_itemoffset
;
1438 KASSERT(mutex_owned(&pp
->pr_lock
));
1441 if ((pp
->pr_roflags
& PR_NOALIGN
) == 0 &&
1442 ((uintptr_t)cp
& (pp
->pr_alloc
->pa_pagesz
- 1)) != 0)
1443 panic("pool_prime_page: %s: unaligned page", pp
->pr_wchan
);
1447 * Insert page header.
1449 LIST_INSERT_HEAD(&pp
->pr_emptypages
, ph
, ph_pagelist
);
1450 LIST_INIT(&ph
->ph_itemlist
);
1451 ph
->ph_page
= storage
;
1452 ph
->ph_nmissing
= 0;
1453 ph
->ph_time
= time_uptime
;
1454 if ((pp
->pr_roflags
& PR_PHINPAGE
) == 0)
1455 SPLAY_INSERT(phtree
, &pp
->pr_phtree
, ph
);
1462 ph
->ph_off
= pp
->pr_curcolor
;
1463 cp
= (char *)cp
+ ph
->ph_off
;
1464 if ((pp
->pr_curcolor
+= align
) > pp
->pr_maxcolor
)
1465 pp
->pr_curcolor
= 0;
1468 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1471 cp
= (char *)cp
+ align
- ioff
;
1473 KASSERT((((vaddr_t
)cp
+ ioff
) & (align
- 1)) == 0);
1476 * Insert remaining chunks on the bucket list.
1478 n
= pp
->pr_itemsperpage
;
1481 if (pp
->pr_roflags
& PR_NOTOUCH
) {
1482 pr_item_notouch_init(pp
, ph
);
1485 pi
= (struct pool_item
*)cp
;
1487 KASSERT(((((vaddr_t
)pi
) + ioff
) & (align
- 1)) == 0);
1489 /* Insert on page list */
1490 LIST_INSERT_HEAD(&ph
->ph_itemlist
, pi
, pi_list
);
1492 pi
->pi_magic
= PI_MAGIC
;
1494 cp
= (char *)cp
+ pp
->pr_size
;
1496 KASSERT((((vaddr_t
)cp
+ ioff
) & (align
- 1)) == 0);
1501 * If the pool was depleted, point at the new page.
1503 if (pp
->pr_curpage
== NULL
)
1504 pp
->pr_curpage
= ph
;
1506 if (++pp
->pr_npages
> pp
->pr_hiwat
)
1507 pp
->pr_hiwat
= pp
->pr_npages
;
1511 * Used by pool_get() when nitems drops below the low water mark. This
1512 * is used to catch up pr_nitems with the low water mark.
1514 * Note 1, we never wait for memory here, we let the caller decide what to do.
1516 * Note 2, we must be called with the pool already locked, and we return
1520 pool_catchup(struct pool
*pp
)
1524 while (POOL_NEEDS_CATCHUP(pp
)) {
1525 error
= pool_grow(pp
, PR_NOWAIT
);
1534 pool_update_curpage(struct pool
*pp
)
1537 pp
->pr_curpage
= LIST_FIRST(&pp
->pr_partpages
);
1538 if (pp
->pr_curpage
== NULL
) {
1539 pp
->pr_curpage
= LIST_FIRST(&pp
->pr_emptypages
);
1541 KASSERT((pp
->pr_curpage
== NULL
&& pp
->pr_nitems
== 0) ||
1542 (pp
->pr_curpage
!= NULL
&& pp
->pr_nitems
> 0));
1546 pool_setlowat(struct pool
*pp
, int n
)
1549 mutex_enter(&pp
->pr_lock
);
1551 pp
->pr_minitems
= n
;
1552 pp
->pr_minpages
= (n
== 0)
1554 : roundup(n
, pp
->pr_itemsperpage
) / pp
->pr_itemsperpage
;
1556 /* Make sure we're caught up with the newly-set low water mark. */
1557 if (POOL_NEEDS_CATCHUP(pp
) && pool_catchup(pp
) != 0) {
1559 * XXX: Should we log a warning? Should we set up a timeout
1560 * to try again in a second or so? The latter could break
1561 * a caller's assumptions about interrupt protection, etc.
1565 mutex_exit(&pp
->pr_lock
);
1569 pool_sethiwat(struct pool
*pp
, int n
)
1572 mutex_enter(&pp
->pr_lock
);
1574 pp
->pr_maxpages
= (n
== 0)
1576 : roundup(n
, pp
->pr_itemsperpage
) / pp
->pr_itemsperpage
;
1578 mutex_exit(&pp
->pr_lock
);
1582 pool_sethardlimit(struct pool
*pp
, int n
, const char *warnmess
, int ratecap
)
1585 mutex_enter(&pp
->pr_lock
);
1587 pp
->pr_hardlimit
= n
;
1588 pp
->pr_hardlimit_warning
= warnmess
;
1589 pp
->pr_hardlimit_ratecap
.tv_sec
= ratecap
;
1590 pp
->pr_hardlimit_warning_last
.tv_sec
= 0;
1591 pp
->pr_hardlimit_warning_last
.tv_usec
= 0;
1594 * In-line version of pool_sethiwat(), because we don't want to
1597 pp
->pr_maxpages
= (n
== 0)
1599 : roundup(n
, pp
->pr_itemsperpage
) / pp
->pr_itemsperpage
;
1601 mutex_exit(&pp
->pr_lock
);
1605 * Release all complete pages that have not been used recently.
1608 #ifdef POOL_DIAGNOSTIC
1609 _pool_reclaim(struct pool
*pp
, const char *file
, long line
)
1611 pool_reclaim(struct pool
*pp
)
1614 struct pool_item_header
*ph
, *phnext
;
1615 struct pool_pagelist pq
;
1620 if (pp
->pr_drain_hook
!= NULL
) {
1622 * The drain hook must be called with the pool unlocked.
1624 (*pp
->pr_drain_hook
)(pp
->pr_drain_hook_arg
, PR_NOWAIT
);
1628 * XXXSMP Because we do not want to cause non-MPSAFE code
1631 if (pp
->pr_ipl
== IPL_SOFTNET
|| pp
->pr_ipl
== IPL_SOFTCLOCK
||
1632 pp
->pr_ipl
== IPL_SOFTSERIAL
) {
1633 KERNEL_LOCK(1, NULL
);
1638 /* Reclaim items from the pool's cache (if any). */
1639 if (pp
->pr_cache
!= NULL
)
1640 pool_cache_invalidate(pp
->pr_cache
);
1642 if (mutex_tryenter(&pp
->pr_lock
) == 0) {
1644 KERNEL_UNLOCK_ONE(NULL
);
1648 pr_enter(pp
, file
, line
);
1652 curtime
= time_uptime
;
1654 for (ph
= LIST_FIRST(&pp
->pr_emptypages
); ph
!= NULL
; ph
= phnext
) {
1655 phnext
= LIST_NEXT(ph
, ph_pagelist
);
1657 /* Check our minimum page claim */
1658 if (pp
->pr_npages
<= pp
->pr_minpages
)
1661 KASSERT(ph
->ph_nmissing
== 0);
1662 if (curtime
- ph
->ph_time
< pool_inactive_time
1663 && !pa_starved_p(pp
->pr_alloc
))
1667 * If freeing this page would put us below
1668 * the low water mark, stop now.
1670 if ((pp
->pr_nitems
- pp
->pr_itemsperpage
) <
1674 pr_rmpage(pp
, ph
, &pq
);
1678 mutex_exit(&pp
->pr_lock
);
1680 if (LIST_EMPTY(&pq
))
1683 pr_pagelist_free(pp
, &pq
);
1688 KERNEL_UNLOCK_ONE(NULL
);
1695 * Drain pools, one at a time. This is a two stage process;
1696 * drain_start kicks off a cross call to drain CPU-level caches
1697 * if the pool has an associated pool_cache. drain_end waits
1698 * for those cross calls to finish, and then drains the cache
1699 * (if any) and pool.
1701 * Note, must never be called from interrupt context.
1704 pool_drain_start(struct pool
**ppp
, uint64_t *wp
)
1708 KASSERT(!TAILQ_EMPTY(&pool_head
));
1712 /* Find next pool to drain, and add a reference. */
1713 mutex_enter(&pool_head_lock
);
1715 if (drainpp
== NULL
) {
1716 drainpp
= TAILQ_FIRST(&pool_head
);
1718 if (drainpp
!= NULL
) {
1720 drainpp
= TAILQ_NEXT(pp
, pr_poollist
);
1723 * Skip completely idle pools. We depend on at least
1724 * one pool in the system being active.
1726 } while (pp
== NULL
|| pp
->pr_npages
== 0);
1728 mutex_exit(&pool_head_lock
);
1730 /* If there is a pool_cache, drain CPU level caches. */
1732 if (pp
->pr_cache
!= NULL
) {
1733 *wp
= xc_broadcast(0, (xcfunc_t
)pool_cache_xcall
,
1734 pp
->pr_cache
, NULL
);
1739 pool_drain_end(struct pool
*pp
, uint64_t where
)
1745 KASSERT(pp
->pr_refcnt
> 0);
1747 /* Wait for remote draining to complete. */
1748 if (pp
->pr_cache
!= NULL
)
1751 /* Drain the cache (if any) and pool.. */
1754 /* Finally, unlock the pool. */
1755 mutex_enter(&pool_head_lock
);
1757 cv_broadcast(&pool_busy
);
1758 mutex_exit(&pool_head_lock
);
1762 * Diagnostic helpers.
1765 pool_print(struct pool
*pp
, const char *modif
)
1768 pool_print1(pp
, modif
, printf
);
1772 pool_printall(const char *modif
, void (*pr
)(const char *, ...))
1776 TAILQ_FOREACH(pp
, &pool_head
, pr_poollist
) {
1777 pool_printit(pp
, modif
, pr
);
1782 pool_printit(struct pool
*pp
, const char *modif
, void (*pr
)(const char *, ...))
1786 (*pr
)("Must specify a pool to print.\n");
1790 pool_print1(pp
, modif
, pr
);
1794 pool_print_pagelist(struct pool
*pp
, struct pool_pagelist
*pl
,
1795 void (*pr
)(const char *, ...))
1797 struct pool_item_header
*ph
;
1799 struct pool_item
*pi
;
1802 LIST_FOREACH(ph
, pl
, ph_pagelist
) {
1803 (*pr
)("\t\tpage %p, nmissing %d, time %" PRIu32
"\n",
1804 ph
->ph_page
, ph
->ph_nmissing
, ph
->ph_time
);
1806 if (!(pp
->pr_roflags
& PR_NOTOUCH
)) {
1807 LIST_FOREACH(pi
, &ph
->ph_itemlist
, pi_list
) {
1808 if (pi
->pi_magic
!= PI_MAGIC
) {
1809 (*pr
)("\t\t\titem %p, magic 0x%x\n",
1819 pool_print1(struct pool
*pp
, const char *modif
, void (*pr
)(const char *, ...))
1821 struct pool_item_header
*ph
;
1824 pool_cache_cpu_t
*cc
;
1825 uint64_t cpuhit
, cpumiss
;
1826 int i
, print_log
= 0, print_pagelist
= 0, print_cache
= 0;
1829 while ((c
= *modif
++) != '\0') {
1838 if ((pc
= pp
->pr_cache
) != NULL
) {
1839 (*pr
)("POOL CACHE");
1844 (*pr
)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1845 pp
->pr_wchan
, pp
->pr_size
, pp
->pr_align
, pp
->pr_itemoffset
,
1847 (*pr
)("\talloc %p\n", pp
->pr_alloc
);
1848 (*pr
)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1849 pp
->pr_minitems
, pp
->pr_minpages
, pp
->pr_maxpages
, pp
->pr_npages
);
1850 (*pr
)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1851 pp
->pr_itemsperpage
, pp
->pr_nitems
, pp
->pr_nout
, pp
->pr_hardlimit
);
1853 (*pr
)("\tnget %lu, nfail %lu, nput %lu\n",
1854 pp
->pr_nget
, pp
->pr_nfail
, pp
->pr_nput
);
1855 (*pr
)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1856 pp
->pr_npagealloc
, pp
->pr_npagefree
, pp
->pr_hiwat
, pp
->pr_nidle
);
1858 if (print_pagelist
== 0)
1861 if ((ph
= LIST_FIRST(&pp
->pr_emptypages
)) != NULL
)
1862 (*pr
)("\n\tempty page list:\n");
1863 pool_print_pagelist(pp
, &pp
->pr_emptypages
, pr
);
1864 if ((ph
= LIST_FIRST(&pp
->pr_fullpages
)) != NULL
)
1865 (*pr
)("\n\tfull page list:\n");
1866 pool_print_pagelist(pp
, &pp
->pr_fullpages
, pr
);
1867 if ((ph
= LIST_FIRST(&pp
->pr_partpages
)) != NULL
)
1868 (*pr
)("\n\tpartial-page list:\n");
1869 pool_print_pagelist(pp
, &pp
->pr_partpages
, pr
);
1871 if (pp
->pr_curpage
== NULL
)
1872 (*pr
)("\tno current page\n");
1874 (*pr
)("\tcurpage %p\n", pp
->pr_curpage
->ph_page
);
1881 if ((pp
->pr_roflags
& PR_LOGGING
) == 0)
1882 (*pr
)("\tno log\n");
1884 pr_printlog(pp
, NULL
, pr
);
1889 #define PR_GROUPLIST(pcg) \
1890 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1891 for (i = 0; i < pcg->pcg_size; i++) { \
1892 if (pcg->pcg_objects[i].pcgo_pa != \
1893 POOL_PADDR_INVALID) { \
1894 (*pr)("\t\t\t%p, 0x%llx\n", \
1895 pcg->pcg_objects[i].pcgo_va, \
1896 (unsigned long long) \
1897 pcg->pcg_objects[i].pcgo_pa); \
1899 (*pr)("\t\t\t%p\n", \
1900 pcg->pcg_objects[i].pcgo_va); \
1907 for (i
= 0; i
< MAXCPUS
; i
++) {
1908 if ((cc
= pc
->pc_cpus
[i
]) == NULL
)
1910 cpuhit
+= cc
->cc_hits
;
1911 cpumiss
+= cc
->cc_misses
;
1913 (*pr
)("\tcpu layer hits %llu misses %llu\n", cpuhit
, cpumiss
);
1914 (*pr
)("\tcache layer hits %llu misses %llu\n",
1915 pc
->pc_hits
, pc
->pc_misses
);
1916 (*pr
)("\tcache layer entry uncontended %llu contended %llu\n",
1917 pc
->pc_hits
+ pc
->pc_misses
- pc
->pc_contended
,
1919 (*pr
)("\tcache layer empty groups %u full groups %u\n",
1920 pc
->pc_nempty
, pc
->pc_nfull
);
1922 (*pr
)("\tfull cache groups:\n");
1923 for (pcg
= pc
->pc_fullgroups
; pcg
!= NULL
;
1924 pcg
= pcg
->pcg_next
) {
1927 (*pr
)("\tempty cache groups:\n");
1928 for (pcg
= pc
->pc_emptygroups
; pcg
!= NULL
;
1929 pcg
= pcg
->pcg_next
) {
1936 pr_enter_check(pp
, pr
);
1940 pool_chk_page(struct pool
*pp
, const char *label
, struct pool_item_header
*ph
)
1942 struct pool_item
*pi
;
1946 if ((pp
->pr_roflags
& PR_NOALIGN
) == 0) {
1947 page
= (void *)((uintptr_t)ph
& pp
->pr_alloc
->pa_pagemask
);
1948 if (page
!= ph
->ph_page
&&
1949 (pp
->pr_roflags
& PR_PHINPAGE
) != 0) {
1951 printf("%s: ", label
);
1952 printf("pool(%p:%s): page inconsistency: page %p;"
1953 " at page head addr %p (p %p)\n", pp
,
1954 pp
->pr_wchan
, ph
->ph_page
,
1960 if ((pp
->pr_roflags
& PR_NOTOUCH
) != 0)
1963 for (pi
= LIST_FIRST(&ph
->ph_itemlist
), n
= 0;
1965 pi
= LIST_NEXT(pi
,pi_list
), n
++) {
1968 if (pi
->pi_magic
!= PI_MAGIC
) {
1970 printf("%s: ", label
);
1971 printf("pool(%s): free list modified: magic=%x;"
1972 " page %p; item ordinal %d; addr %p\n",
1973 pp
->pr_wchan
, pi
->pi_magic
, ph
->ph_page
,
1978 if ((pp
->pr_roflags
& PR_NOALIGN
) != 0) {
1981 page
= (void *)((uintptr_t)pi
& pp
->pr_alloc
->pa_pagemask
);
1982 if (page
== ph
->ph_page
)
1986 printf("%s: ", label
);
1987 printf("pool(%p:%s): page inconsistency: page %p;"
1988 " item ordinal %d; addr %p (p %p)\n", pp
,
1989 pp
->pr_wchan
, ph
->ph_page
,
1998 pool_chk(struct pool
*pp
, const char *label
)
2000 struct pool_item_header
*ph
;
2003 mutex_enter(&pp
->pr_lock
);
2004 LIST_FOREACH(ph
, &pp
->pr_emptypages
, ph_pagelist
) {
2005 r
= pool_chk_page(pp
, label
, ph
);
2010 LIST_FOREACH(ph
, &pp
->pr_fullpages
, ph_pagelist
) {
2011 r
= pool_chk_page(pp
, label
, ph
);
2016 LIST_FOREACH(ph
, &pp
->pr_partpages
, ph_pagelist
) {
2017 r
= pool_chk_page(pp
, label
, ph
);
2024 mutex_exit(&pp
->pr_lock
);
2031 * Initialize a pool cache.
2034 pool_cache_init(size_t size
, u_int align
, u_int align_offset
, u_int flags
,
2035 const char *wchan
, struct pool_allocator
*palloc
, int ipl
,
2036 int (*ctor
)(void *, void *, int), void (*dtor
)(void *, void *), void *arg
)
2040 pc
= pool_get(&cache_pool
, PR_WAITOK
);
2044 pool_cache_bootstrap(pc
, size
, align
, align_offset
, flags
, wchan
,
2045 palloc
, ipl
, ctor
, dtor
, arg
);
2051 * pool_cache_bootstrap:
2053 * Kernel-private version of pool_cache_init(). The caller
2054 * provides initial storage.
2057 pool_cache_bootstrap(pool_cache_t pc
, size_t size
, u_int align
,
2058 u_int align_offset
, u_int flags
, const char *wchan
,
2059 struct pool_allocator
*palloc
, int ipl
,
2060 int (*ctor
)(void *, void *, int), void (*dtor
)(void *, void *),
2063 CPU_INFO_ITERATOR cii
;
2065 struct cpu_info
*ci
;
2069 if (palloc
== NULL
&& ipl
== IPL_NONE
)
2070 palloc
= &pool_allocator_nointr
;
2071 pool_init(pp
, size
, align
, align_offset
, flags
, wchan
, palloc
, ipl
);
2072 mutex_init(&pc
->pc_lock
, MUTEX_DEFAULT
, ipl
);
2075 ctor
= (int (*)(void *, void *, int))nullop
;
2078 dtor
= (void (*)(void *, void *))nullop
;
2081 pc
->pc_emptygroups
= NULL
;
2082 pc
->pc_fullgroups
= NULL
;
2083 pc
->pc_partgroups
= NULL
;
2092 pc
->pc_contended
= 0;
2094 pc
->pc_freecheck
= NULL
;
2096 if ((flags
& PR_LARGECACHE
) != 0) {
2097 pc
->pc_pcgsize
= PCG_NOBJECTS_LARGE
;
2098 pc
->pc_pcgpool
= &pcg_large_pool
;
2100 pc
->pc_pcgsize
= PCG_NOBJECTS_NORMAL
;
2101 pc
->pc_pcgpool
= &pcg_normal_pool
;
2104 /* Allocate per-CPU caches. */
2105 memset(pc
->pc_cpus
, 0, sizeof(pc
->pc_cpus
));
2108 /* XXX For sparc: boot CPU is not attached yet. */
2109 pool_cache_cpu_init1(curcpu(), pc
);
2111 for (CPU_INFO_FOREACH(cii
, ci
)) {
2112 pool_cache_cpu_init1(ci
, pc
);
2116 /* Add to list of all pools. */
2117 if (__predict_true(!cold
))
2118 mutex_enter(&pool_head_lock
);
2119 TAILQ_FOREACH(pc1
, &pool_cache_head
, pc_cachelist
) {
2120 if (strcmp(pc1
->pc_pool
.pr_wchan
, pc
->pc_pool
.pr_wchan
) > 0)
2124 TAILQ_INSERT_TAIL(&pool_cache_head
, pc
, pc_cachelist
);
2126 TAILQ_INSERT_BEFORE(pc1
, pc
, pc_cachelist
);
2127 if (__predict_true(!cold
))
2128 mutex_exit(&pool_head_lock
);
2135 * pool_cache_destroy:
2137 * Destroy a pool cache.
2140 pool_cache_destroy(pool_cache_t pc
)
2142 struct pool
*pp
= &pc
->pc_pool
;
2145 /* Remove it from the global list. */
2146 mutex_enter(&pool_head_lock
);
2147 while (pc
->pc_refcnt
!= 0)
2148 cv_wait(&pool_busy
, &pool_head_lock
);
2149 TAILQ_REMOVE(&pool_cache_head
, pc
, pc_cachelist
);
2150 mutex_exit(&pool_head_lock
);
2152 /* First, invalidate the entire cache. */
2153 pool_cache_invalidate(pc
);
2155 /* Disassociate it from the pool. */
2156 mutex_enter(&pp
->pr_lock
);
2157 pp
->pr_cache
= NULL
;
2158 mutex_exit(&pp
->pr_lock
);
2160 /* Destroy per-CPU data */
2161 for (i
= 0; i
< MAXCPUS
; i
++)
2162 pool_cache_invalidate_cpu(pc
, i
);
2164 /* Finally, destroy it. */
2165 mutex_destroy(&pc
->pc_lock
);
2167 pool_put(&cache_pool
, pc
);
2171 * pool_cache_cpu_init1:
2173 * Called for each pool_cache whenever a new CPU is attached.
2176 pool_cache_cpu_init1(struct cpu_info
*ci
, pool_cache_t pc
)
2178 pool_cache_cpu_t
*cc
;
2181 index
= ci
->ci_index
;
2183 KASSERT(index
< MAXCPUS
);
2185 if ((cc
= pc
->pc_cpus
[index
]) != NULL
) {
2186 KASSERT(cc
->cc_cpuindex
== index
);
2191 * The first CPU is 'free'. This needs to be the case for
2192 * bootstrap - we may not be able to allocate yet.
2194 if (pc
->pc_ncpu
== 0) {
2198 mutex_enter(&pc
->pc_lock
);
2200 mutex_exit(&pc
->pc_lock
);
2201 cc
= pool_get(&cache_cpu_pool
, PR_WAITOK
);
2204 cc
->cc_ipl
= pc
->pc_pool
.pr_ipl
;
2205 cc
->cc_iplcookie
= makeiplcookie(cc
->cc_ipl
);
2207 cc
->cc_cpuindex
= index
;
2210 cc
->cc_current
= __UNCONST(&pcg_dummy
);
2211 cc
->cc_previous
= __UNCONST(&pcg_dummy
);
2213 pc
->pc_cpus
[index
] = cc
;
2217 * pool_cache_cpu_init:
2219 * Called whenever a new CPU is attached.
2222 pool_cache_cpu_init(struct cpu_info
*ci
)
2226 mutex_enter(&pool_head_lock
);
2227 TAILQ_FOREACH(pc
, &pool_cache_head
, pc_cachelist
) {
2229 mutex_exit(&pool_head_lock
);
2231 pool_cache_cpu_init1(ci
, pc
);
2233 mutex_enter(&pool_head_lock
);
2235 cv_broadcast(&pool_busy
);
2237 mutex_exit(&pool_head_lock
);
2241 * pool_cache_reclaim:
2243 * Reclaim memory from a pool cache.
2246 pool_cache_reclaim(pool_cache_t pc
)
2249 return pool_reclaim(&pc
->pc_pool
);
2253 pool_cache_destruct_object1(pool_cache_t pc
, void *object
)
2256 (*pc
->pc_dtor
)(pc
->pc_arg
, object
);
2257 pool_put(&pc
->pc_pool
, object
);
2261 * pool_cache_destruct_object:
2263 * Force destruction of an object and its release back into
2267 pool_cache_destruct_object(pool_cache_t pc
, void *object
)
2270 FREECHECK_IN(&pc
->pc_freecheck
, object
);
2272 pool_cache_destruct_object1(pc
, object
);
2276 * pool_cache_invalidate_groups:
2278 * Invalidate a chain of groups and destruct all objects.
2281 pool_cache_invalidate_groups(pool_cache_t pc
, pcg_t
*pcg
)
2287 for (; pcg
!= NULL
; pcg
= next
) {
2288 next
= pcg
->pcg_next
;
2290 for (i
= 0; i
< pcg
->pcg_avail
; i
++) {
2291 object
= pcg
->pcg_objects
[i
].pcgo_va
;
2292 pool_cache_destruct_object1(pc
, object
);
2295 if (pcg
->pcg_size
== PCG_NOBJECTS_LARGE
) {
2296 pool_put(&pcg_large_pool
, pcg
);
2298 KASSERT(pcg
->pcg_size
== PCG_NOBJECTS_NORMAL
);
2299 pool_put(&pcg_normal_pool
, pcg
);
2305 * pool_cache_invalidate:
2307 * Invalidate a pool cache (destruct and release all of the
2308 * cached objects). Does not reclaim objects from the pool.
2310 * Note: For pool caches that provide constructed objects, there
2311 * is an assumption that another level of synchronization is occurring
2312 * between the input to the constructor and the cache invalidation.
2315 pool_cache_invalidate(pool_cache_t pc
)
2317 pcg_t
*full
, *empty
, *part
;
2320 if (ncpu
< 2 || !mp_online
) {
2322 * We might be called early enough in the boot process
2323 * for the CPU data structures to not be fully initialized.
2324 * In this case, simply gather the local CPU's cache now
2325 * since it will be the only one running.
2327 pool_cache_xcall(pc
);
2330 * Gather all of the CPU-specific caches into the
2333 where
= xc_broadcast(0, (xcfunc_t
)pool_cache_xcall
, pc
, NULL
);
2337 mutex_enter(&pc
->pc_lock
);
2338 full
= pc
->pc_fullgroups
;
2339 empty
= pc
->pc_emptygroups
;
2340 part
= pc
->pc_partgroups
;
2341 pc
->pc_fullgroups
= NULL
;
2342 pc
->pc_emptygroups
= NULL
;
2343 pc
->pc_partgroups
= NULL
;
2347 mutex_exit(&pc
->pc_lock
);
2349 pool_cache_invalidate_groups(pc
, full
);
2350 pool_cache_invalidate_groups(pc
, empty
);
2351 pool_cache_invalidate_groups(pc
, part
);
2355 * pool_cache_invalidate_cpu:
2357 * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2358 * identified by its associated index.
2359 * It is caller's responsibility to ensure that no operation is
2360 * taking place on this pool cache while doing this invalidation.
2361 * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2362 * pool cached objects from a CPU different from the one currently running
2363 * may result in an undefined behaviour.
2366 pool_cache_invalidate_cpu(pool_cache_t pc
, u_int index
)
2369 pool_cache_cpu_t
*cc
;
2372 if ((cc
= pc
->pc_cpus
[index
]) == NULL
)
2375 if ((pcg
= cc
->cc_current
) != &pcg_dummy
) {
2376 pcg
->pcg_next
= NULL
;
2377 pool_cache_invalidate_groups(pc
, pcg
);
2379 if ((pcg
= cc
->cc_previous
) != &pcg_dummy
) {
2380 pcg
->pcg_next
= NULL
;
2381 pool_cache_invalidate_groups(pc
, pcg
);
2383 if (cc
!= &pc
->pc_cpu0
)
2384 pool_put(&cache_cpu_pool
, cc
);
2389 pool_cache_set_drain_hook(pool_cache_t pc
, void (*fn
)(void *, int), void *arg
)
2392 pool_set_drain_hook(&pc
->pc_pool
, fn
, arg
);
2396 pool_cache_setlowat(pool_cache_t pc
, int n
)
2399 pool_setlowat(&pc
->pc_pool
, n
);
2403 pool_cache_sethiwat(pool_cache_t pc
, int n
)
2406 pool_sethiwat(&pc
->pc_pool
, n
);
2410 pool_cache_sethardlimit(pool_cache_t pc
, int n
, const char *warnmess
, int ratecap
)
2413 pool_sethardlimit(&pc
->pc_pool
, n
, warnmess
, ratecap
);
2416 static bool __noinline
2417 pool_cache_get_slow(pool_cache_cpu_t
*cc
, int s
, void **objectp
,
2418 paddr_t
*pap
, int flags
)
2425 KASSERT(cc
->cc_current
->pcg_avail
== 0);
2426 KASSERT(cc
->cc_previous
->pcg_avail
== 0);
2432 * Nothing was available locally. Try and grab a group
2435 if (__predict_false(!mutex_tryenter(&pc
->pc_lock
))) {
2436 ncsw
= curlwp
->l_ncsw
;
2437 mutex_enter(&pc
->pc_lock
);
2441 * If we context switched while locking, then
2442 * our view of the per-CPU data is invalid:
2445 if (curlwp
->l_ncsw
!= ncsw
) {
2446 mutex_exit(&pc
->pc_lock
);
2451 if (__predict_true((pcg
= pc
->pc_fullgroups
) != NULL
)) {
2453 * If there's a full group, release our empty
2454 * group back to the cache. Install the full
2455 * group as cc_current and return.
2457 if (__predict_true((cur
= cc
->cc_current
) != &pcg_dummy
)) {
2458 KASSERT(cur
->pcg_avail
== 0);
2459 cur
->pcg_next
= pc
->pc_emptygroups
;
2460 pc
->pc_emptygroups
= cur
;
2463 KASSERT(pcg
->pcg_avail
== pcg
->pcg_size
);
2464 cc
->cc_current
= pcg
;
2465 pc
->pc_fullgroups
= pcg
->pcg_next
;
2468 mutex_exit(&pc
->pc_lock
);
2473 * Nothing available locally or in cache. Take the slow
2474 * path: fetch a new object from the pool and construct
2478 mutex_exit(&pc
->pc_lock
);
2481 object
= pool_get(&pc
->pc_pool
, flags
);
2483 if (__predict_false(object
== NULL
))
2486 if (__predict_false((*pc
->pc_ctor
)(pc
->pc_arg
, object
, flags
) != 0)) {
2487 pool_put(&pc
->pc_pool
, object
);
2492 KASSERT((((vaddr_t
)object
+ pc
->pc_pool
.pr_itemoffset
) &
2493 (pc
->pc_pool
.pr_align
- 1)) == 0);
2497 *pap
= POOL_VTOPHYS(object
);
2499 *pap
= POOL_PADDR_INVALID
;
2503 FREECHECK_OUT(&pc
->pc_freecheck
, object
);
2508 * pool_cache_get{,_paddr}:
2510 * Get an object from a pool cache (optionally returning
2511 * the physical address of the object).
2514 pool_cache_get_paddr(pool_cache_t pc
, int flags
, paddr_t
*pap
)
2516 pool_cache_cpu_t
*cc
;
2522 if (flags
& PR_WAITOK
) {
2527 /* Lock out interrupts and disable preemption. */
2529 while (/* CONSTCOND */ true) {
2530 /* Try and allocate an object from the current group. */
2531 cc
= pc
->pc_cpus
[curcpu()->ci_index
];
2532 KASSERT(cc
->cc_cache
== pc
);
2533 pcg
= cc
->cc_current
;
2534 if (__predict_true(pcg
->pcg_avail
> 0)) {
2535 object
= pcg
->pcg_objects
[--pcg
->pcg_avail
].pcgo_va
;
2536 if (__predict_false(pap
!= NULL
))
2537 *pap
= pcg
->pcg_objects
[pcg
->pcg_avail
].pcgo_pa
;
2538 #if defined(DIAGNOSTIC)
2539 pcg
->pcg_objects
[pcg
->pcg_avail
].pcgo_va
= NULL
;
2540 KASSERT(pcg
->pcg_avail
< pcg
->pcg_size
);
2541 KASSERT(object
!= NULL
);
2545 FREECHECK_OUT(&pc
->pc_freecheck
, object
);
2550 * That failed. If the previous group isn't empty, swap
2551 * it with the current group and allocate from there.
2553 pcg
= cc
->cc_previous
;
2554 if (__predict_true(pcg
->pcg_avail
> 0)) {
2555 cc
->cc_previous
= cc
->cc_current
;
2556 cc
->cc_current
= pcg
;
2561 * Can't allocate from either group: try the slow path.
2562 * If get_slow() allocated an object for us, or if
2563 * no more objects are available, it will return false.
2564 * Otherwise, we need to retry.
2566 if (!pool_cache_get_slow(cc
, s
, &object
, pap
, flags
))
2573 static bool __noinline
2574 pool_cache_put_slow(pool_cache_cpu_t
*cc
, int s
, void *object
)
2580 KASSERT(cc
->cc_current
->pcg_avail
== cc
->cc_current
->pcg_size
);
2581 KASSERT(cc
->cc_previous
->pcg_avail
== cc
->cc_previous
->pcg_size
);
2588 * If there are no empty groups in the cache then allocate one
2589 * while still unlocked.
2591 if (__predict_false(pc
->pc_emptygroups
== NULL
)) {
2592 if (__predict_true(!pool_cache_disable
)) {
2593 pcg
= pool_get(pc
->pc_pcgpool
, PR_NOWAIT
);
2595 if (__predict_true(pcg
!= NULL
)) {
2597 pcg
->pcg_size
= pc
->pc_pcgsize
;
2601 /* Lock the cache. */
2602 if (__predict_false(!mutex_tryenter(&pc
->pc_lock
))) {
2603 ncsw
= curlwp
->l_ncsw
;
2604 mutex_enter(&pc
->pc_lock
);
2608 * If we context switched while locking, then our view of
2609 * the per-CPU data is invalid: retry.
2611 if (__predict_false(curlwp
->l_ncsw
!= ncsw
)) {
2612 mutex_exit(&pc
->pc_lock
);
2614 pool_put(pc
->pc_pcgpool
, pcg
);
2620 /* If there are no empty groups in the cache then allocate one. */
2621 if (pcg
== NULL
&& pc
->pc_emptygroups
!= NULL
) {
2622 pcg
= pc
->pc_emptygroups
;
2623 pc
->pc_emptygroups
= pcg
->pcg_next
;
2628 * If there's a empty group, release our full group back
2629 * to the cache. Install the empty group to the local CPU
2633 KASSERT(pcg
->pcg_avail
== 0);
2634 if (__predict_false(cc
->cc_previous
== &pcg_dummy
)) {
2635 cc
->cc_previous
= pcg
;
2637 cur
= cc
->cc_current
;
2638 if (__predict_true(cur
!= &pcg_dummy
)) {
2639 KASSERT(cur
->pcg_avail
== cur
->pcg_size
);
2640 cur
->pcg_next
= pc
->pc_fullgroups
;
2641 pc
->pc_fullgroups
= cur
;
2644 cc
->cc_current
= pcg
;
2647 mutex_exit(&pc
->pc_lock
);
2652 * Nothing available locally or in cache, and we didn't
2653 * allocate an empty group. Take the slow path and destroy
2654 * the object here and now.
2657 mutex_exit(&pc
->pc_lock
);
2659 pool_cache_destruct_object(pc
, object
);
2665 * pool_cache_put{,_paddr}:
2667 * Put an object back to the pool cache (optionally caching the
2668 * physical address of the object).
2671 pool_cache_put_paddr(pool_cache_t pc
, void *object
, paddr_t pa
)
2673 pool_cache_cpu_t
*cc
;
2677 KASSERT(object
!= NULL
);
2678 FREECHECK_IN(&pc
->pc_freecheck
, object
);
2680 /* Lock out interrupts and disable preemption. */
2682 while (/* CONSTCOND */ true) {
2683 /* If the current group isn't full, release it there. */
2684 cc
= pc
->pc_cpus
[curcpu()->ci_index
];
2685 KASSERT(cc
->cc_cache
== pc
);
2686 pcg
= cc
->cc_current
;
2687 if (__predict_true(pcg
->pcg_avail
< pcg
->pcg_size
)) {
2688 pcg
->pcg_objects
[pcg
->pcg_avail
].pcgo_va
= object
;
2689 pcg
->pcg_objects
[pcg
->pcg_avail
].pcgo_pa
= pa
;
2697 * That failed. If the previous group isn't full, swap
2698 * it with the current group and try again.
2700 pcg
= cc
->cc_previous
;
2701 if (__predict_true(pcg
->pcg_avail
< pcg
->pcg_size
)) {
2702 cc
->cc_previous
= cc
->cc_current
;
2703 cc
->cc_current
= pcg
;
2708 * Can't free to either group: try the slow path.
2709 * If put_slow() releases the object for us, it
2710 * will return false. Otherwise we need to retry.
2712 if (!pool_cache_put_slow(cc
, s
, object
))
2720 * Transfer objects from the per-CPU cache to the global cache.
2721 * Run within a cross-call thread.
2724 pool_cache_xcall(pool_cache_t pc
)
2726 pool_cache_cpu_t
*cc
;
2727 pcg_t
*prev
, *cur
, **list
;
2731 mutex_enter(&pc
->pc_lock
);
2732 cc
= pc
->pc_cpus
[curcpu()->ci_index
];
2733 cur
= cc
->cc_current
;
2734 cc
->cc_current
= __UNCONST(&pcg_dummy
);
2735 prev
= cc
->cc_previous
;
2736 cc
->cc_previous
= __UNCONST(&pcg_dummy
);
2737 if (cur
!= &pcg_dummy
) {
2738 if (cur
->pcg_avail
== cur
->pcg_size
) {
2739 list
= &pc
->pc_fullgroups
;
2741 } else if (cur
->pcg_avail
== 0) {
2742 list
= &pc
->pc_emptygroups
;
2745 list
= &pc
->pc_partgroups
;
2748 cur
->pcg_next
= *list
;
2751 if (prev
!= &pcg_dummy
) {
2752 if (prev
->pcg_avail
== prev
->pcg_size
) {
2753 list
= &pc
->pc_fullgroups
;
2755 } else if (prev
->pcg_avail
== 0) {
2756 list
= &pc
->pc_emptygroups
;
2759 list
= &pc
->pc_partgroups
;
2762 prev
->pcg_next
= *list
;
2765 mutex_exit(&pc
->pc_lock
);
2770 * Pool backend allocators.
2772 * Each pool has a backend allocator that handles allocation, deallocation,
2773 * and any additional draining that might be needed.
2775 * We provide two standard allocators:
2777 * pool_allocator_kmem - the default when no allocator is specified
2779 * pool_allocator_nointr - used for pools that will not be accessed
2780 * in interrupt context.
2782 void *pool_page_alloc(struct pool
*, int);
2783 void pool_page_free(struct pool
*, void *);
2786 struct pool_allocator pool_allocator_kmem_fullpage
= {
2787 pool_page_alloc
, pool_page_free
, 0,
2788 .pa_backingmapptr
= &kmem_map
,
2791 struct pool_allocator pool_allocator_kmem
= {
2792 pool_page_alloc
, pool_page_free
, 0,
2793 .pa_backingmapptr
= &kmem_map
,
2797 void *pool_page_alloc_nointr(struct pool
*, int);
2798 void pool_page_free_nointr(struct pool
*, void *);
2801 struct pool_allocator pool_allocator_nointr_fullpage
= {
2802 pool_page_alloc_nointr
, pool_page_free_nointr
, 0,
2803 .pa_backingmapptr
= &kernel_map
,
2806 struct pool_allocator pool_allocator_nointr
= {
2807 pool_page_alloc_nointr
, pool_page_free_nointr
, 0,
2808 .pa_backingmapptr
= &kernel_map
,
2813 void *pool_subpage_alloc(struct pool
*, int);
2814 void pool_subpage_free(struct pool
*, void *);
2816 struct pool_allocator pool_allocator_kmem
= {
2817 pool_subpage_alloc
, pool_subpage_free
, POOL_SUBPAGE
,
2818 .pa_backingmapptr
= &kmem_map
,
2821 void *pool_subpage_alloc_nointr(struct pool
*, int);
2822 void pool_subpage_free_nointr(struct pool
*, void *);
2824 struct pool_allocator pool_allocator_nointr
= {
2825 pool_subpage_alloc
, pool_subpage_free
, POOL_SUBPAGE
,
2826 .pa_backingmapptr
= &kmem_map
,
2828 #endif /* POOL_SUBPAGE */
2831 pool_allocator_alloc(struct pool
*pp
, int flags
)
2833 struct pool_allocator
*pa
= pp
->pr_alloc
;
2836 res
= (*pa
->pa_alloc
)(pp
, flags
);
2837 if (res
== NULL
&& (flags
& PR_WAITOK
) == 0) {
2839 * We only run the drain hook here if PR_NOWAIT.
2840 * In other cases, the hook will be run in
2843 if (pp
->pr_drain_hook
!= NULL
) {
2844 (*pp
->pr_drain_hook
)(pp
->pr_drain_hook_arg
, flags
);
2845 res
= (*pa
->pa_alloc
)(pp
, flags
);
2852 pool_allocator_free(struct pool
*pp
, void *v
)
2854 struct pool_allocator
*pa
= pp
->pr_alloc
;
2856 (*pa
->pa_free
)(pp
, v
);
2860 pool_page_alloc(struct pool
*pp
, int flags
)
2862 bool waitok
= (flags
& PR_WAITOK
) ? true : false;
2864 return ((void *) uvm_km_alloc_poolpage_cache(kmem_map
, waitok
));
2868 pool_page_free(struct pool
*pp
, void *v
)
2871 uvm_km_free_poolpage_cache(kmem_map
, (vaddr_t
) v
);
2875 pool_page_alloc_meta(struct pool
*pp
, int flags
)
2877 bool waitok
= (flags
& PR_WAITOK
) ? true : false;
2879 return ((void *) uvm_km_alloc_poolpage(kmem_map
, waitok
));
2883 pool_page_free_meta(struct pool
*pp
, void *v
)
2886 uvm_km_free_poolpage(kmem_map
, (vaddr_t
) v
);
2890 /* Sub-page allocator, for machines with large hardware pages. */
2892 pool_subpage_alloc(struct pool
*pp
, int flags
)
2894 return pool_get(&psppool
, flags
);
2898 pool_subpage_free(struct pool
*pp
, void *v
)
2900 pool_put(&psppool
, v
);
2903 /* We don't provide a real nointr allocator. Maybe later. */
2905 pool_subpage_alloc_nointr(struct pool
*pp
, int flags
)
2908 return (pool_subpage_alloc(pp
, flags
));
2912 pool_subpage_free_nointr(struct pool
*pp
, void *v
)
2915 pool_subpage_free(pp
, v
);
2917 #endif /* POOL_SUBPAGE */
2919 pool_page_alloc_nointr(struct pool
*pp
, int flags
)
2921 bool waitok
= (flags
& PR_WAITOK
) ? true : false;
2923 return ((void *) uvm_km_alloc_poolpage_cache(kernel_map
, waitok
));
2927 pool_page_free_nointr(struct pool
*pp
, void *v
)
2930 uvm_km_free_poolpage_cache(kernel_map
, (vaddr_t
) v
);
2935 pool_in_page(struct pool
*pp
, struct pool_item_header
*ph
, uintptr_t addr
)
2938 return (uintptr_t)ph
->ph_page
<= addr
&&
2939 addr
< (uintptr_t)ph
->ph_page
+ pp
->pr_alloc
->pa_pagesz
;
2943 pool_in_item(struct pool
*pp
, void *item
, uintptr_t addr
)
2946 return (uintptr_t)item
<= addr
&& addr
< (uintptr_t)item
+ pp
->pr_size
;
2950 pool_in_cg(struct pool
*pp
, struct pool_cache_group
*pcg
, uintptr_t addr
)
2957 for (i
= 0; i
< pcg
->pcg_avail
; i
++) {
2958 if (pool_in_item(pp
, pcg
->pcg_objects
[i
].pcgo_va
, addr
)) {
2966 pool_allocated(struct pool
*pp
, struct pool_item_header
*ph
, uintptr_t addr
)
2969 if ((pp
->pr_roflags
& PR_NOTOUCH
) != 0) {
2970 unsigned int idx
= pr_item_notouch_index(pp
, ph
, (void *)addr
);
2971 pool_item_bitmap_t
*bitmap
=
2972 ph
->ph_bitmap
+ (idx
/ BITMAP_SIZE
);
2973 pool_item_bitmap_t mask
= 1 << (idx
& BITMAP_MASK
);
2975 return (*bitmap
& mask
) == 0;
2977 struct pool_item
*pi
;
2979 LIST_FOREACH(pi
, &ph
->ph_itemlist
, pi_list
) {
2980 if (pool_in_item(pp
, pi
, addr
)) {
2989 pool_whatis(uintptr_t addr
, void (*pr
)(const char *, ...))
2993 TAILQ_FOREACH(pp
, &pool_head
, pr_poollist
) {
2994 struct pool_item_header
*ph
;
2996 bool allocated
= true;
2997 bool incache
= false;
2998 bool incpucache
= false;
2999 char cpucachestr
[32];
3001 if ((pp
->pr_roflags
& PR_PHINPAGE
) != 0) {
3002 LIST_FOREACH(ph
, &pp
->pr_fullpages
, ph_pagelist
) {
3003 if (pool_in_page(pp
, ph
, addr
)) {
3007 LIST_FOREACH(ph
, &pp
->pr_partpages
, ph_pagelist
) {
3008 if (pool_in_page(pp
, ph
, addr
)) {
3010 pool_allocated(pp
, ph
, addr
);
3014 LIST_FOREACH(ph
, &pp
->pr_emptypages
, ph_pagelist
) {
3015 if (pool_in_page(pp
, ph
, addr
)) {
3022 ph
= pr_find_pagehead_noalign(pp
, (void *)addr
);
3023 if (ph
== NULL
|| !pool_in_page(pp
, ph
, addr
)) {
3026 allocated
= pool_allocated(pp
, ph
, addr
);
3029 if (allocated
&& pp
->pr_cache
) {
3030 pool_cache_t pc
= pp
->pr_cache
;
3031 struct pool_cache_group
*pcg
;
3034 for (pcg
= pc
->pc_fullgroups
; pcg
!= NULL
;
3035 pcg
= pcg
->pcg_next
) {
3036 if (pool_in_cg(pp
, pcg
, addr
)) {
3041 for (i
= 0; i
< MAXCPUS
; i
++) {
3042 pool_cache_cpu_t
*cc
;
3044 if ((cc
= pc
->pc_cpus
[i
]) == NULL
) {
3047 if (pool_in_cg(pp
, cc
->cc_current
, addr
) ||
3048 pool_in_cg(pp
, cc
->cc_previous
, addr
)) {
3049 struct cpu_info
*ci
=
3053 snprintf(cpucachestr
,
3054 sizeof(cpucachestr
),
3062 item
= (uintptr_t)ph
->ph_page
+ ph
->ph_off
;
3063 item
= item
+ rounddown(addr
- item
, pp
->pr_size
);
3064 (*pr
)("%p is %p+%zu in POOL '%s' (%s)\n",
3065 (void *)addr
, item
, (size_t)(addr
- item
),
3067 incpucache
? cpucachestr
:
3068 incache
? "cached" : allocated
? "allocated" : "free");
3071 #endif /* defined(DDB) */