4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #ifndef _SYS_KMEM_IMPL_H
28 #define _SYS_KMEM_IMPL_H
32 #include <sys/thread.h>
33 #include <sys/t_lock.h>
35 #include <sys/kstat.h>
36 #include <sys/cpuvar.h>
37 #include <sys/systm.h>
47 * kernel memory allocator: implementation-private data structures
51 * 2. cc_lock in order by CPU ID
54 * Do not call kmem_cache_alloc() or taskq_dispatch() while holding any of the
58 #define KMF_AUDIT 0x00000001 /* transaction auditing */
59 #define KMF_DEADBEEF 0x00000002 /* deadbeef checking */
60 #define KMF_REDZONE 0x00000004 /* redzone checking */
61 #define KMF_CONTENTS 0x00000008 /* freed-buffer content logging */
62 #define KMF_STICKY 0x00000010 /* if set, override /etc/system */
63 #define KMF_NOMAGAZINE 0x00000020 /* disable per-cpu magazines */
64 #define KMF_FIREWALL 0x00000040 /* put all bufs before unmapped pages */
65 #define KMF_LITE 0x00000100 /* lightweight debugging */
67 #define KMF_HASH 0x00000200 /* cache has hash table */
68 #define KMF_RANDOMIZE 0x00000400 /* randomize other kmem_flags */
70 #define KMF_BUFTAG (KMF_DEADBEEF | KMF_REDZONE)
71 #define KMF_TOUCH (KMF_BUFTAG | KMF_LITE | KMF_CONTENTS)
72 #define KMF_RANDOM (KMF_TOUCH | KMF_AUDIT | KMF_NOMAGAZINE)
73 #define KMF_DEBUG (KMF_RANDOM | KMF_FIREWALL)
75 #define KMEM_STACK_DEPTH 15
77 #define KMEM_FREE_PATTERN 0xdeadbeefdeadbeefULL
78 #define KMEM_UNINITIALIZED_PATTERN 0xbaddcafebaddcafeULL
79 #define KMEM_REDZONE_PATTERN 0xfeedfacefeedfaceULL
80 #define KMEM_REDZONE_BYTE 0xbb
83 * Redzone size encodings for kmem_alloc() / kmem_free(). We encode the
84 * allocation size, rather than storing it directly, so that kmem_free()
85 * can distinguish frees of the wrong size from redzone violations.
87 * A size of zero is never valid.
89 #define KMEM_SIZE_ENCODE(x) (251 * (x) + 1)
90 #define KMEM_SIZE_DECODE(x) ((x) / 251)
91 #define KMEM_SIZE_VALID(x) ((x) % 251 == 1 && (x) != 1)
94 #define KMEM_ALIGN 8 /* min guaranteed alignment */
95 #define KMEM_ALIGN_SHIFT 3 /* log2(KMEM_ALIGN) */
96 #define KMEM_VOID_FRACTION 8 /* never waste more than 1/8 of slab */
98 #define KMEM_SLAB_IS_PARTIAL(sp) \
99 ((sp)->slab_refcnt > 0 && (sp)->slab_refcnt < (sp)->slab_chunks)
100 #define KMEM_SLAB_IS_ALL_USED(sp) \
101 ((sp)->slab_refcnt == (sp)->slab_chunks)
104 * The bufctl (buffer control) structure keeps some minimal information
105 * about each buffer: its address, its slab, and its current linkage,
106 * which is either on the slab's freelist (if the buffer is free), or
107 * on the cache's buf-to-bufctl hash table (if the buffer is allocated).
108 * In the case of non-hashed, or "raw", caches (the common case), only
109 * the freelist linkage is necessary: the buffer address is at a fixed
110 * offset from the bufctl address, and the slab is at the end of the page.
112 * NOTE: bc_next must be the first field; raw buffers have linkage only.
114 typedef struct kmem_bufctl
{
115 struct kmem_bufctl
*bc_next
; /* next bufctl struct */
116 void *bc_addr
; /* address of buffer */
117 struct kmem_slab
*bc_slab
; /* controlling slab */
121 * The KMF_AUDIT version of the bufctl structure. The beginning of this
122 * structure must be identical to the normal bufctl structure so that
123 * pointers are interchangeable.
125 typedef struct kmem_bufctl_audit
{
126 struct kmem_bufctl
*bc_next
; /* next bufctl struct */
127 void *bc_addr
; /* address of buffer */
128 struct kmem_slab
*bc_slab
; /* controlling slab */
129 kmem_cache_t
*bc_cache
; /* controlling cache */
130 hrtime_t bc_timestamp
; /* transaction time */
131 kthread_t
*bc_thread
; /* thread doing transaction */
132 struct kmem_bufctl
*bc_lastlog
; /* last log entry */
133 void *bc_contents
; /* contents at last free */
134 int bc_depth
; /* stack depth */
135 pc_t bc_stack
[KMEM_STACK_DEPTH
]; /* pc stack */
136 } kmem_bufctl_audit_t
;
139 * A kmem_buftag structure is appended to each buffer whenever any of the
140 * KMF_BUFTAG flags (KMF_DEADBEEF, KMF_REDZONE, KMF_VERIFY) are set.
142 typedef struct kmem_buftag
{
143 uint64_t bt_redzone
; /* 64-bit redzone pattern */
144 kmem_bufctl_t
*bt_bufctl
; /* bufctl */
145 intptr_t bt_bxstat
; /* bufctl ^ (alloc/free) */
149 * A variant of the kmem_buftag structure used for KMF_LITE caches.
150 * Previous callers are stored in reverse chronological order. (i.e. most
153 typedef struct kmem_buftag_lite
{
154 kmem_buftag_t bt_buftag
; /* a normal buftag */
155 pc_t bt_history
[1]; /* zero or more callers */
156 } kmem_buftag_lite_t
;
158 #define KMEM_BUFTAG_LITE_SIZE(f) \
159 (offsetof(kmem_buftag_lite_t, bt_history[f]))
161 #define KMEM_BUFTAG(cp, buf) \
162 ((kmem_buftag_t *)((char *)(buf) + (cp)->cache_buftag))
164 #define KMEM_BUFCTL(cp, buf) \
165 ((kmem_bufctl_t *)((char *)(buf) + (cp)->cache_bufctl))
167 #define KMEM_BUF(cp, bcp) \
168 ((void *)((char *)(bcp) - (cp)->cache_bufctl))
170 #define KMEM_SLAB(cp, buf) \
171 ((kmem_slab_t *)P2END((uintptr_t)(buf), (cp)->cache_slabsize) - 1)
174 * The "CPU" macro loads a cpu_t that refers to the cpu that the current
175 * thread is running on at the time the macro is executed. A context switch
176 * may occur immediately after loading this data structure, leaving this
177 * thread pointing at the cpu_t for the previous cpu. This is not a problem;
178 * we'd just end up checking the previous cpu's per-cpu cache, and then check
179 * the other layers of the kmem cache if need be.
181 * It's not even a problem if the old cpu gets DR'ed out during the context
182 * switch. The cpu-remove DR operation bzero()s the cpu_t, but doesn't free
183 * it. So the cpu_t's cpu_cache_offset would read as 0, causing us to use
184 * cpu 0's per-cpu cache.
186 * So, there is no need to disable kernel preemption while using the CPU macro
187 * below since if we have been context switched, there will not be any
188 * correctness problem, just a momentary use of a different per-cpu cache.
191 #define KMEM_CPU_CACHE(cp) \
192 (kmem_cpu_cache_t *)((char *)(&cp->cache_cpu) + CPU->cpu_cache_offset)
194 #define KMEM_MAGAZINE_VALID(cp, mp) \
195 (((kmem_slab_t *)P2END((uintptr_t)(mp), PAGESIZE) - 1)->slab_cache == \
196 (cp)->cache_magtype->mt_cache)
198 #define KMEM_SLAB_OFFSET(sp, buf) \
199 ((size_t)((uintptr_t)(buf) - (uintptr_t)((sp)->slab_base)))
201 #define KMEM_SLAB_MEMBER(sp, buf) \
202 (KMEM_SLAB_OFFSET(sp, buf) < (sp)->slab_cache->cache_slabsize)
204 #define KMEM_BUFTAG_ALLOC 0xa110c8edUL
205 #define KMEM_BUFTAG_FREE 0xf4eef4eeUL
207 /* slab_later_count thresholds */
208 #define KMEM_DISBELIEF 3
211 #define KMEM_SLAB_NOMOVE 0x1
212 #define KMEM_SLAB_MOVE_PENDING 0x2
214 typedef struct kmem_slab
{
215 struct kmem_cache
*slab_cache
; /* controlling cache */
216 void *slab_base
; /* base of allocated memory */
217 avl_node_t slab_link
; /* slab linkage */
218 struct kmem_bufctl
*slab_head
; /* first free buffer */
219 long slab_refcnt
; /* outstanding allocations */
220 long slab_chunks
; /* chunks (bufs) in this slab */
221 uint32_t slab_stuck_offset
; /* unmoved buffer offset */
222 uint16_t slab_later_count
; /* cf KMEM_CBRC_LATER */
223 uint16_t slab_flags
; /* bits to mark the slab */
226 #define KMEM_HASH_INITIAL 64
228 #define KMEM_HASH(cp, buf) \
229 ((cp)->cache_hash_table + \
230 (((uintptr_t)(buf) >> (cp)->cache_hash_shift) & (cp)->cache_hash_mask))
232 typedef struct kmem_magazine
{
234 void *mag_round
[1]; /* one or more rounds */
238 * The magazine types for fast per-cpu allocation
240 typedef struct kmem_magtype
{
241 int mt_magsize
; /* magazine size (number of rounds) */
242 int mt_align
; /* magazine alignment */
243 size_t mt_minbuf
; /* all smaller buffers qualify */
244 size_t mt_maxbuf
; /* no larger buffers qualify */
245 kmem_cache_t
*mt_cache
; /* magazine cache */
248 #define KMEM_CPU_CACHE_SIZE 64 /* must be power of 2 */
249 #define KMEM_CPU_PAD (KMEM_CPU_CACHE_SIZE - sizeof (kmutex_t) - \
250 2 * sizeof (uint64_t) - 2 * sizeof (void *) - 4 * sizeof (int))
251 #define KMEM_CACHE_SIZE(ncpus) \
252 ((size_t)(&((kmem_cache_t *)0)->cache_cpu[ncpus]))
254 /* Offset from kmem_cache->cache_cpu for per cpu caches */
255 #define KMEM_CPU_CACHE_OFFSET(cpuid) \
256 ((size_t)(&((kmem_cache_t *)0)->cache_cpu[cpuid]) - \
257 (size_t)(&((kmem_cache_t *)0)->cache_cpu))
259 typedef struct kmem_cpu_cache
{
260 kmutex_t cc_lock
; /* protects this cpu's local cache */
261 uint64_t cc_alloc
; /* allocations from this cpu */
262 uint64_t cc_free
; /* frees to this cpu */
263 kmem_magazine_t
*cc_loaded
; /* the currently loaded magazine */
264 kmem_magazine_t
*cc_ploaded
; /* the previously loaded magazine */
265 int cc_rounds
; /* number of objects in loaded mag */
266 int cc_prounds
; /* number of objects in previous mag */
267 int cc_magsize
; /* number of rounds in a full mag */
268 int cc_flags
; /* CPU-local copy of cache_flags */
269 char cc_pad
[KMEM_CPU_PAD
]; /* for nice alignment */
273 * The magazine lists used in the depot.
275 typedef struct kmem_maglist
{
276 kmem_magazine_t
*ml_list
; /* magazine list */
277 long ml_total
; /* number of magazines */
278 long ml_min
; /* min since last update */
279 long ml_reaplimit
; /* max reapable magazines */
280 uint64_t ml_alloc
; /* allocations from this list */
283 typedef struct kmem_defrag
{
287 uint64_t kmd_callbacks
; /* move callbacks */
288 uint64_t kmd_yes
; /* KMEM_CBRC_YES responses */
289 uint64_t kmd_no
; /* NO responses */
290 uint64_t kmd_later
; /* LATER responses */
291 uint64_t kmd_dont_need
; /* DONT_NEED responses */
292 uint64_t kmd_dont_know
; /* DONT_KNOW responses */
293 uint64_t kmd_hunt_found
; /* DONT_KNOW: # found in mag */
296 * Consolidator fields
298 avl_tree_t kmd_moves_pending
; /* buffer moves pending */
299 list_t kmd_deadlist
; /* deferred slab frees */
300 size_t kmd_deadcount
; /* # of slabs in kmd_deadlist */
301 uint8_t kmd_reclaim_numer
; /* slab usage threshold */
302 uint8_t kmd_pad1
; /* compiler padding */
303 size_t kmd_slabs_sought
; /* reclaimable slabs sought */
304 size_t kmd_slabs_found
; /* reclaimable slabs found */
305 size_t kmd_scans
; /* nth scan interval counter */
307 * Fields used to ASSERT that the client does not kmem_cache_free()
308 * objects passed to the move callback.
310 void *kmd_from_buf
; /* object to move */
311 void *kmd_to_buf
; /* move destination */
312 kthread_t
*kmd_thread
; /* thread calling move */
315 #define KMEM_CACHE_NAMELEN 31
321 uint64_t cache_slab_create
; /* slab creates */
322 uint64_t cache_slab_destroy
; /* slab destroys */
323 uint64_t cache_slab_alloc
; /* slab layer allocations */
324 uint64_t cache_slab_free
; /* slab layer frees */
325 uint64_t cache_alloc_fail
; /* total failed allocations */
326 uint64_t cache_buftotal
; /* total buffers */
327 uint64_t cache_bufmax
; /* max buffers ever */
328 uint64_t cache_bufslab
; /* buffers free in slab layer */
329 uint64_t cache_rescale
; /* # of hash table rescales */
330 uint64_t cache_lookup_depth
; /* hash lookup depth */
331 uint64_t cache_depot_contention
; /* mutex contention count */
332 uint64_t cache_depot_contention_prev
; /* previous snapshot */
337 char cache_name
[KMEM_CACHE_NAMELEN
+ 1];
338 size_t cache_bufsize
; /* object size */
339 size_t cache_align
; /* object alignment */
340 int (*cache_constructor
)(void *, void *, int);
341 void (*cache_destructor
)(void *, void *);
342 void (*cache_reclaim
)(void *);
343 kmem_cbrc_t (*cache_move
)(void *, void *, size_t, void *);
344 void *cache_private
; /* opaque arg to callbacks */
345 vmem_t
*cache_arena
; /* vmem source for slabs */
346 int cache_cflags
; /* cache creation flags */
347 int cache_flags
; /* various cache state info */
348 uint32_t cache_mtbf
; /* induced alloc failure rate */
349 uint32_t cache_pad1
; /* compiler padding */
350 kstat_t
*cache_kstat
; /* exported statistics */
351 list_node_t cache_link
; /* cache linkage */
356 kmutex_t cache_lock
; /* protects slab layer */
357 size_t cache_chunksize
; /* buf + alignment [+ debug] */
358 size_t cache_slabsize
; /* size of a slab */
359 size_t cache_maxchunks
; /* max buffers per slab */
360 size_t cache_bufctl
; /* buf-to-bufctl distance */
361 size_t cache_buftag
; /* buf-to-buftag distance */
362 size_t cache_verify
; /* bytes to verify */
363 size_t cache_contents
; /* bytes of saved content */
364 size_t cache_color
; /* next slab color */
365 size_t cache_mincolor
; /* maximum slab color */
366 size_t cache_maxcolor
; /* maximum slab color */
367 size_t cache_hash_shift
; /* get to interesting bits */
368 size_t cache_hash_mask
; /* hash table mask */
369 list_t cache_complete_slabs
; /* completely allocated slabs */
370 size_t cache_complete_slab_count
;
371 avl_tree_t cache_partial_slabs
; /* partial slab freelist */
372 size_t cache_partial_binshift
; /* for AVL sort bins */
373 kmem_cache_t
*cache_bufctl_cache
; /* source of bufctls */
374 kmem_bufctl_t
**cache_hash_table
; /* hash table base */
375 kmem_defrag_t
*cache_defrag
; /* slab consolidator fields */
380 kmutex_t cache_depot_lock
; /* protects depot */
381 kmem_magtype_t
*cache_magtype
; /* magazine type */
382 kmem_maglist_t cache_full
; /* full magazines */
383 kmem_maglist_t cache_empty
; /* empty magazines */
388 kmem_cpu_cache_t cache_cpu
[1]; /* max_ncpus actual elements */
391 typedef struct kmem_cpu_log_header
{
397 char clh_pad
[64 - sizeof (kmutex_t
) - sizeof (char *) -
398 sizeof (size_t) - 2 * sizeof (int)];
399 } kmem_cpu_log_header_t
;
401 typedef struct kmem_log_header
{
410 kmem_cpu_log_header_t lh_cpu
[1]; /* ncpus actually allocated */
413 /* kmem_move kmm_flags */
414 #define KMM_DESPERATE 0x1
415 #define KMM_NOTIFY 0x2
417 typedef struct kmem_move
{
418 kmem_slab_t
*kmm_from_slab
;
421 avl_node_t kmm_entry
;
426 * In order to consolidate partial slabs, it must be possible for the cache to
427 * have partial slabs.
429 #define KMEM_IS_MOVABLE(cp) \
430 (((cp)->cache_chunksize * 2) <= (cp)->cache_slabsize)
436 #endif /* _SYS_KMEM_IMPL_H */