4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
32 * Big Theory Statement for the virtual memory allocator.
34 * For a more complete description of the main ideas, see:
36 * Jeff Bonwick and Jonathan Adams,
38 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
39 * Arbitrary Resources.
41 * Proceedings of the 2001 Usenix Conference.
42 * Available as http://www.usenix.org/event/usenix01/bonwick.html
44 * Section 1, below, is also the primary contents of vmem(9). If for some
45 * reason you are updating this comment, you will also wish to update the
53 * We divide the kernel address space into a number of logically distinct
54 * pieces, or *arenas*: text, data, heap, stack, and so on. Within these
55 * arenas we often subdivide further; for example, we use heap addresses
56 * not only for the kernel heap (kmem_alloc() space), but also for DVMA,
57 * bp_mapin(), /dev/kmem, and even some device mappings like the TOD chip.
58 * The kernel address space, therefore, is most accurately described as
59 * a tree of arenas in which each node of the tree *imports* some subset
60 * of its parent. The virtual memory allocator manages these arenas and
61 * supports their natural hierarchical structure.
65 * An arena is nothing more than a set of integers. These integers most
66 * commonly represent virtual addresses, but in fact they can represent
67 * anything at all. For example, we could use an arena containing the
68 * integers minpid through maxpid to allocate process IDs. vmem_create()
69 * and vmem_destroy() create and destroy vmem arenas. In order to
70 * differentiate between arenas used for adresses and arenas used for
71 * identifiers, the VMC_IDENTIFIER flag is passed to vmem_create(). This
72 * prevents identifier exhaustion from being diagnosed as general memory
77 * We represent the integers in an arena as a collection of *spans*, or
78 * contiguous ranges of integers. For example, the kernel heap consists
79 * of just one span: [kernelheap, ekernelheap). Spans can be added to an
80 * arena in two ways: explicitly, by vmem_add(), or implicitly, by
81 * importing, as described in Section 1.5 below.
85 * Spans are subdivided into *segments*, each of which is either allocated
86 * or free. A segment, like a span, is a contiguous range of integers.
87 * Each allocated segment [addr, addr + size) represents exactly one
88 * vmem_alloc(size) that returned addr. Free segments represent the space
89 * between allocated segments. If two free segments are adjacent, we
90 * coalesce them into one larger segment; that is, if segments [a, b) and
91 * [b, c) are both free, we merge them into a single segment [a, c).
92 * The segments within a span are linked together in increasing-address order
93 * so we can easily determine whether coalescing is possible.
95 * Segments never cross span boundaries. When all segments within
96 * an imported span become free, we return the span to its source.
100 * As mentioned in the overview, some arenas are logical subsets of
101 * other arenas. For example, kmem_va_arena (a virtual address cache
102 * that satisfies most kmem_slab_create() requests) is just a subset
103 * of heap_arena (the kernel heap) that provides caching for the most
104 * common slab sizes. When kmem_va_arena runs out of virtual memory,
105 * it *imports* more from the heap; we say that heap_arena is the
106 * *vmem source* for kmem_va_arena. vmem_create() allows you to
107 * specify any existing vmem arena as the source for your new arena.
108 * Topologically, since every arena is a child of at most one source,
109 * the set of all arenas forms a collection of trees.
111 * 1.6 Constrained Allocations
112 * ---------------------------
113 * Some vmem clients are quite picky about the kind of address they want.
114 * For example, the DVMA code may need an address that is at a particular
115 * phase with respect to some alignment (to get good cache coloring), or
116 * that lies within certain limits (the addressable range of a device),
117 * or that doesn't cross some boundary (a DMA counter restriction) --
118 * or all of the above. vmem_xalloc() allows the client to specify any
119 * or all of these constraints.
121 * 1.7 The Vmem Quantum
122 * --------------------
123 * Every arena has a notion of 'quantum', specified at vmem_create() time,
124 * that defines the arena's minimum unit of currency. Most commonly the
125 * quantum is either 1 or PAGESIZE, but any power of 2 is legal.
126 * All vmem allocations are guaranteed to be quantum-aligned.
128 * 1.8 Quantum Caching
129 * -------------------
130 * A vmem arena may be so hot (frequently used) that the scalability of vmem
131 * allocation is a significant concern. We address this by allowing the most
132 * common allocation sizes to be serviced by the kernel memory allocator,
133 * which provides low-latency per-cpu caching. The qcache_max argument to
134 * vmem_create() specifies the largest allocation size to cache.
136 * 1.9 Relationship to Kernel Memory Allocator
137 * -------------------------------------------
138 * Every kmem cache has a vmem arena as its slab supplier. The kernel memory
139 * allocator uses vmem_alloc() and vmem_free() to create and destroy slabs.
145 * 2.1 Segment lists and markers
146 * -----------------------------
147 * The segment structure (vmem_seg_t) contains two doubly-linked lists.
149 * The arena list (vs_anext/vs_aprev) links all segments in the arena.
150 * In addition to the allocated and free segments, the arena contains
151 * special marker segments at span boundaries. Span markers simplify
152 * coalescing and importing logic by making it easy to tell both when
153 * we're at a span boundary (so we don't coalesce across it), and when
154 * a span is completely free (its neighbors will both be span markers).
156 * Imported spans will have vs_import set.
158 * The next-of-kin list (vs_knext/vs_kprev) links segments of the same type:
159 * (1) for allocated segments, vs_knext is the hash chain linkage;
160 * (2) for free segments, vs_knext is the freelist linkage;
161 * (3) for span marker segments, vs_knext is the next span marker.
163 * 2.2 Allocation hashing
164 * ----------------------
165 * We maintain a hash table of all allocated segments, hashed by address.
166 * This allows vmem_free() to discover the target segment in constant time.
167 * vmem_update() periodically resizes hash tables to keep hash chains short.
169 * 2.3 Freelist management
170 * -----------------------
171 * We maintain power-of-2 freelists for free segments, i.e. free segments
172 * of size >= 2^n reside in vmp->vm_freelist[n]. To ensure constant-time
173 * allocation, vmem_xalloc() looks not in the first freelist that *might*
174 * satisfy the allocation, but in the first freelist that *definitely*
175 * satisfies the allocation (unless VM_BESTFIT is specified, or all larger
176 * freelists are empty). For example, a 1000-byte allocation will be
177 * satisfied not from the 512..1023-byte freelist, whose members *might*
178 * contains a 1000-byte segment, but from a 1024-byte or larger freelist,
179 * the first member of which will *definitely* satisfy the allocation.
180 * This ensures that vmem_xalloc() works in constant time.
182 * We maintain a bit map to determine quickly which freelists are non-empty.
183 * vmp->vm_freemap & (1 << n) is non-zero iff vmp->vm_freelist[n] is non-empty.
185 * The different freelists are linked together into one large freelist,
186 * with the freelist heads serving as markers. Freelist markers simplify
187 * the maintenance of vm_freemap by making it easy to tell when we're taking
188 * the last member of a freelist (both of its neighbors will be markers).
192 * For simplicity, all arena state is protected by a per-arena lock.
193 * For very hot arenas, use quantum caching for scalability.
195 * 2.5 Vmem Population
196 * -------------------
197 * Any internal vmem routine that might need to allocate new segment
198 * structures must prepare in advance by calling vmem_populate(), which
199 * will preallocate enough vmem_seg_t's to get is through the entire
200 * operation without dropping the arena lock.
204 * If KMF_AUDIT is set in kmem_flags, we audit vmem allocations as well.
205 * Since virtual addresses cannot be scribbled on, there is no equivalent
206 * in vmem to redzone checking, deadbeef, or other kmem debugging features.
207 * Moreover, we do not audit frees because segment coalescing destroys the
208 * association between an address and its segment structure. Auditing is
209 * thus intended primarily to keep track of who's consuming the arena.
210 * Debugging support could certainly be extended in the future if it proves
211 * necessary, but we do so much live checking via the allocation hash table
212 * that even non-DEBUG systems get quite a bit of sanity checking already.
215 #include <sys/vmem_impl.h>
216 #include <sys/kmem.h>
217 #include <sys/kstat.h>
218 #include <sys/param.h>
219 #include <sys/systm.h>
220 #include <sys/atomic.h>
221 #include <sys/bitmap.h>
222 #include <sys/sysmacros.h>
223 #include <sys/cmn_err.h>
224 #include <sys/debug.h>
225 #include <sys/panic.h>
227 #define VMEM_INITIAL 10 /* early vmem arenas */
228 #define VMEM_SEG_INITIAL 200 /* early segments */
231 * Adding a new span to an arena requires two segment structures: one to
232 * represent the span, and one to represent the free segment it contains.
234 #define VMEM_SEGS_PER_SPAN_CREATE 2
237 * Allocating a piece of an existing segment requires 0-2 segment structures
238 * depending on how much of the segment we're allocating.
240 * To allocate the entire segment, no new segment structures are needed; we
241 * simply move the existing segment structure from the freelist to the
242 * allocation hash table.
244 * To allocate a piece from the left or right end of the segment, we must
245 * split the segment into two pieces (allocated part and remainder), so we
246 * need one new segment structure to represent the remainder.
248 * To allocate from the middle of a segment, we need two new segment strucures
249 * to represent the remainders on either side of the allocated part.
251 #define VMEM_SEGS_PER_EXACT_ALLOC 0
252 #define VMEM_SEGS_PER_LEFT_ALLOC 1
253 #define VMEM_SEGS_PER_RIGHT_ALLOC 1
254 #define VMEM_SEGS_PER_MIDDLE_ALLOC 2
257 * vmem_populate() preallocates segment structures for vmem to do its work.
258 * It must preallocate enough for the worst case, which is when we must import
259 * a new span and then allocate from the middle of it.
261 #define VMEM_SEGS_PER_ALLOC_MAX \
262 (VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC)
265 * The segment structures themselves are allocated from vmem_seg_arena, so
266 * we have a recursion problem when vmem_seg_arena needs to populate itself.
267 * We address this by working out the maximum number of segment structures
268 * this act will require, and multiplying by the maximum number of threads
269 * that we'll allow to do it simultaneously.
271 * The worst-case segment consumption to populate vmem_seg_arena is as
272 * follows (depicted as a stack trace to indicate why events are occurring):
274 * (In order to lower the fragmentation in the heap_arena, we specify a
275 * minimum import size for the vmem_metadata_arena which is the same size
276 * as the kmem_va quantum cache allocations. This causes the worst-case
277 * allocation from the vmem_metadata_arena to be 3 segments.)
279 * vmem_alloc(vmem_seg_arena) -> 2 segs (span create + exact alloc)
280 * segkmem_alloc(vmem_metadata_arena)
281 * vmem_alloc(vmem_metadata_arena) -> 3 segs (span create + left alloc)
282 * vmem_alloc(heap_arena) -> 1 seg (left alloc)
287 * vmem_alloc(hat_memload_arena) -> 2 segs (span create + exact alloc)
288 * segkmem_alloc(heap_arena)
289 * vmem_alloc(heap_arena) -> 1 seg (left alloc)
291 * hat_memload() -> (hat layer won't recurse further)
293 * The worst-case consumption for each arena is 3 segment structures.
294 * Of course, a 3-seg reserve could easily be blown by multiple threads.
295 * Therefore, we serialize all allocations from vmem_seg_arena (which is OK
296 * because they're rare). We cannot allow a non-blocking allocation to get
297 * tied up behind a blocking allocation, however, so we use separate locks
298 * for VM_SLEEP and VM_NOSLEEP allocations. Similarly, VM_PUSHPAGE allocations
299 * must not block behind ordinary VM_SLEEPs. In addition, if the system is
300 * panicking then we must keep enough resources for panic_thread to do its
301 * work. Thus we have at most four threads trying to allocate from
302 * vmem_seg_arena, and each thread consumes at most three segment structures,
303 * so we must maintain a 12-seg reserve.
305 #define VMEM_POPULATE_RESERVE 12
308 * vmem_populate() ensures that each arena has VMEM_MINFREE seg structures
309 * so that it can satisfy the worst-case allocation *and* participate in
310 * worst-case allocation from vmem_seg_arena.
312 #define VMEM_MINFREE (VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX)
314 static vmem_t vmem0
[VMEM_INITIAL
];
315 static vmem_t
*vmem_populator
[VMEM_INITIAL
];
316 static uint32_t vmem_id
;
317 static uint32_t vmem_populators
;
318 static vmem_seg_t vmem_seg0
[VMEM_SEG_INITIAL
];
319 static vmem_seg_t
*vmem_segfree
;
320 static kmutex_t vmem_list_lock
;
321 static kmutex_t vmem_segfree_lock
;
322 static kmutex_t vmem_sleep_lock
;
323 static kmutex_t vmem_nosleep_lock
;
324 static kmutex_t vmem_pushpage_lock
;
325 static kmutex_t vmem_panic_lock
;
326 static vmem_t
*vmem_list
;
327 static vmem_t
*vmem_metadata_arena
;
328 static vmem_t
*vmem_seg_arena
;
329 static vmem_t
*vmem_hash_arena
;
330 static vmem_t
*vmem_vmem_arena
;
331 static long vmem_update_interval
= 15; /* vmem_update() every 15 seconds */
332 uint32_t vmem_mtbf
; /* mean time between failures [default: off] */
333 size_t vmem_seg_size
= sizeof (vmem_seg_t
);
335 static vmem_kstat_t vmem_kstat_template
= {
336 { "mem_inuse", KSTAT_DATA_UINT64
},
337 { "mem_import", KSTAT_DATA_UINT64
},
338 { "mem_total", KSTAT_DATA_UINT64
},
339 { "vmem_source", KSTAT_DATA_UINT32
},
340 { "alloc", KSTAT_DATA_UINT64
},
341 { "free", KSTAT_DATA_UINT64
},
342 { "wait", KSTAT_DATA_UINT64
},
343 { "fail", KSTAT_DATA_UINT64
},
344 { "lookup", KSTAT_DATA_UINT64
},
345 { "search", KSTAT_DATA_UINT64
},
346 { "populate_wait", KSTAT_DATA_UINT64
},
347 { "populate_fail", KSTAT_DATA_UINT64
},
348 { "contains", KSTAT_DATA_UINT64
},
349 { "contains_search", KSTAT_DATA_UINT64
},
353 * Insert/delete from arena list (type 'a') or next-of-kin list (type 'k').
355 #define VMEM_INSERT(vprev, vsp, type) \
357 vmem_seg_t *vnext = (vprev)->vs_##type##next; \
358 (vsp)->vs_##type##next = (vnext); \
359 (vsp)->vs_##type##prev = (vprev); \
360 (vprev)->vs_##type##next = (vsp); \
361 (vnext)->vs_##type##prev = (vsp); \
364 #define VMEM_DELETE(vsp, type) \
366 vmem_seg_t *vprev = (vsp)->vs_##type##prev; \
367 vmem_seg_t *vnext = (vsp)->vs_##type##next; \
368 (vprev)->vs_##type##next = (vnext); \
369 (vnext)->vs_##type##prev = (vprev); \
373 * Get a vmem_seg_t from the global segfree list.
376 vmem_getseg_global(void)
380 mutex_enter(&vmem_segfree_lock
);
381 if ((vsp
= vmem_segfree
) != NULL
)
382 vmem_segfree
= vsp
->vs_knext
;
383 mutex_exit(&vmem_segfree_lock
);
389 * Put a vmem_seg_t on the global segfree list.
392 vmem_putseg_global(vmem_seg_t
*vsp
)
394 mutex_enter(&vmem_segfree_lock
);
395 vsp
->vs_knext
= vmem_segfree
;
397 mutex_exit(&vmem_segfree_lock
);
401 * Get a vmem_seg_t from vmp's segfree list.
404 vmem_getseg(vmem_t
*vmp
)
408 ASSERT(vmp
->vm_nsegfree
> 0);
410 vsp
= vmp
->vm_segfree
;
411 vmp
->vm_segfree
= vsp
->vs_knext
;
418 * Put a vmem_seg_t on vmp's segfree list.
421 vmem_putseg(vmem_t
*vmp
, vmem_seg_t
*vsp
)
423 vsp
->vs_knext
= vmp
->vm_segfree
;
424 vmp
->vm_segfree
= vsp
;
429 * Add vsp to the appropriate freelist.
432 vmem_freelist_insert(vmem_t
*vmp
, vmem_seg_t
*vsp
)
436 ASSERT(*VMEM_HASH(vmp
, vsp
->vs_start
) != vsp
);
438 vprev
= (vmem_seg_t
*)&vmp
->vm_freelist
[highbit(VS_SIZE(vsp
)) - 1];
439 vsp
->vs_type
= VMEM_FREE
;
440 vmp
->vm_freemap
|= VS_SIZE(vprev
);
441 VMEM_INSERT(vprev
, vsp
, k
);
443 cv_broadcast(&vmp
->vm_cv
);
447 * Take vsp from the freelist.
450 vmem_freelist_delete(vmem_t
*vmp
, vmem_seg_t
*vsp
)
452 ASSERT(*VMEM_HASH(vmp
, vsp
->vs_start
) != vsp
);
453 ASSERT(vsp
->vs_type
== VMEM_FREE
);
455 if (vsp
->vs_knext
->vs_start
== 0 && vsp
->vs_kprev
->vs_start
== 0) {
457 * The segments on both sides of 'vsp' are freelist heads,
458 * so taking vsp leaves the freelist at vsp->vs_kprev empty.
460 ASSERT(vmp
->vm_freemap
& VS_SIZE(vsp
->vs_kprev
));
461 vmp
->vm_freemap
^= VS_SIZE(vsp
->vs_kprev
);
467 * Add vsp to the allocated-segment hash table and update kstats.
470 vmem_hash_insert(vmem_t
*vmp
, vmem_seg_t
*vsp
)
474 vsp
->vs_type
= VMEM_ALLOC
;
475 bucket
= VMEM_HASH(vmp
, vsp
->vs_start
);
476 vsp
->vs_knext
= *bucket
;
479 if (vmem_seg_size
== sizeof (vmem_seg_t
)) {
480 vsp
->vs_depth
= (uint8_t)getpcstack(vsp
->vs_stack
,
482 vsp
->vs_thread
= curthread
;
483 vsp
->vs_timestamp
= gethrtime();
488 vmp
->vm_kstat
.vk_alloc
.value
.ui64
++;
489 vmp
->vm_kstat
.vk_mem_inuse
.value
.ui64
+= VS_SIZE(vsp
);
493 * Remove vsp from the allocated-segment hash table and update kstats.
496 vmem_hash_delete(vmem_t
*vmp
, uintptr_t addr
, size_t size
)
498 vmem_seg_t
*vsp
, **prev_vspp
;
500 prev_vspp
= VMEM_HASH(vmp
, addr
);
501 while ((vsp
= *prev_vspp
) != NULL
) {
502 if (vsp
->vs_start
== addr
) {
503 *prev_vspp
= vsp
->vs_knext
;
506 vmp
->vm_kstat
.vk_lookup
.value
.ui64
++;
507 prev_vspp
= &vsp
->vs_knext
;
511 panic("vmem_hash_delete(%p, %lx, %lu): bad free",
512 (void *)vmp
, addr
, size
);
513 if (VS_SIZE(vsp
) != size
)
514 panic("vmem_hash_delete(%p, %lx, %lu): wrong size (expect %lu)",
515 (void *)vmp
, addr
, size
, VS_SIZE(vsp
));
517 vmp
->vm_kstat
.vk_free
.value
.ui64
++;
518 vmp
->vm_kstat
.vk_mem_inuse
.value
.ui64
-= size
;
524 * Create a segment spanning the range [start, end) and add it to the arena.
527 vmem_seg_create(vmem_t
*vmp
, vmem_seg_t
*vprev
, uintptr_t start
, uintptr_t end
)
529 vmem_seg_t
*newseg
= vmem_getseg(vmp
);
531 newseg
->vs_start
= start
;
532 newseg
->vs_end
= end
;
534 newseg
->vs_import
= 0;
536 VMEM_INSERT(vprev
, newseg
, a
);
542 * Remove segment vsp from the arena.
545 vmem_seg_destroy(vmem_t
*vmp
, vmem_seg_t
*vsp
)
547 ASSERT(vsp
->vs_type
!= VMEM_ROTOR
);
550 vmem_putseg(vmp
, vsp
);
554 * Add the span [vaddr, vaddr + size) to vmp and update kstats.
557 vmem_span_create(vmem_t
*vmp
, void *vaddr
, size_t size
, uint8_t import
)
559 vmem_seg_t
*newseg
, *span
;
560 uintptr_t start
= (uintptr_t)vaddr
;
561 uintptr_t end
= start
+ size
;
563 ASSERT(MUTEX_HELD(&vmp
->vm_lock
));
565 if ((start
| end
) & (vmp
->vm_quantum
- 1))
566 panic("vmem_span_create(%p, %p, %lu): misaligned",
567 (void *)vmp
, vaddr
, size
);
569 span
= vmem_seg_create(vmp
, vmp
->vm_seg0
.vs_aprev
, start
, end
);
570 span
->vs_type
= VMEM_SPAN
;
571 span
->vs_import
= import
;
572 VMEM_INSERT(vmp
->vm_seg0
.vs_kprev
, span
, k
);
574 newseg
= vmem_seg_create(vmp
, span
, start
, end
);
575 vmem_freelist_insert(vmp
, newseg
);
578 vmp
->vm_kstat
.vk_mem_import
.value
.ui64
+= size
;
579 vmp
->vm_kstat
.vk_mem_total
.value
.ui64
+= size
;
585 * Remove span vsp from vmp and update kstats.
588 vmem_span_destroy(vmem_t
*vmp
, vmem_seg_t
*vsp
)
590 vmem_seg_t
*span
= vsp
->vs_aprev
;
591 size_t size
= VS_SIZE(vsp
);
593 ASSERT(MUTEX_HELD(&vmp
->vm_lock
));
594 ASSERT(span
->vs_type
== VMEM_SPAN
);
597 vmp
->vm_kstat
.vk_mem_import
.value
.ui64
-= size
;
598 vmp
->vm_kstat
.vk_mem_total
.value
.ui64
-= size
;
600 VMEM_DELETE(span
, k
);
602 vmem_seg_destroy(vmp
, vsp
);
603 vmem_seg_destroy(vmp
, span
);
607 * Allocate the subrange [addr, addr + size) from segment vsp.
608 * If there are leftovers on either side, place them on the freelist.
609 * Returns a pointer to the segment representing [addr, addr + size).
612 vmem_seg_alloc(vmem_t
*vmp
, vmem_seg_t
*vsp
, uintptr_t addr
, size_t size
)
614 uintptr_t vs_start
= vsp
->vs_start
;
615 uintptr_t vs_end
= vsp
->vs_end
;
616 size_t vs_size
= vs_end
- vs_start
;
617 size_t realsize
= P2ROUNDUP(size
, vmp
->vm_quantum
);
618 uintptr_t addr_end
= addr
+ realsize
;
620 ASSERT(P2PHASE(vs_start
, vmp
->vm_quantum
) == 0);
621 ASSERT(P2PHASE(addr
, vmp
->vm_quantum
) == 0);
622 ASSERT(vsp
->vs_type
== VMEM_FREE
);
623 ASSERT(addr
>= vs_start
&& addr_end
- 1 <= vs_end
- 1);
624 ASSERT(addr
- 1 <= addr_end
- 1);
627 * If we're allocating from the start of the segment, and the
628 * remainder will be on the same freelist, we can save quite
631 if (P2SAMEHIGHBIT(vs_size
, vs_size
- realsize
) && addr
== vs_start
) {
632 ASSERT(highbit(vs_size
) == highbit(vs_size
- realsize
));
633 vsp
->vs_start
= addr_end
;
634 vsp
= vmem_seg_create(vmp
, vsp
->vs_aprev
, addr
, addr
+ size
);
635 vmem_hash_insert(vmp
, vsp
);
639 vmem_freelist_delete(vmp
, vsp
);
641 if (vs_end
!= addr_end
)
642 vmem_freelist_insert(vmp
,
643 vmem_seg_create(vmp
, vsp
, addr_end
, vs_end
));
645 if (vs_start
!= addr
)
646 vmem_freelist_insert(vmp
,
647 vmem_seg_create(vmp
, vsp
->vs_aprev
, vs_start
, addr
));
649 vsp
->vs_start
= addr
;
650 vsp
->vs_end
= addr
+ size
;
652 vmem_hash_insert(vmp
, vsp
);
657 * Returns 1 if we are populating, 0 otherwise.
658 * Call it if we want to prevent recursion from HAT.
663 return (mutex_owner(&vmem_sleep_lock
) == curthread
||
664 mutex_owner(&vmem_nosleep_lock
) == curthread
||
665 mutex_owner(&vmem_pushpage_lock
) == curthread
||
666 mutex_owner(&vmem_panic_lock
) == curthread
);
670 * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
673 vmem_populate(vmem_t
*vmp
, int vmflag
)
682 while (vmp
->vm_nsegfree
< VMEM_MINFREE
&&
683 (vsp
= vmem_getseg_global()) != NULL
)
684 vmem_putseg(vmp
, vsp
);
686 if (vmp
->vm_nsegfree
>= VMEM_MINFREE
)
690 * If we're already populating, tap the reserve.
692 if (vmem_is_populator()) {
693 ASSERT(vmp
->vm_cflags
& VMC_POPULATOR
);
697 mutex_exit(&vmp
->vm_lock
);
699 if (panic_thread
== curthread
)
700 lp
= &vmem_panic_lock
;
701 else if (vmflag
& VM_NOSLEEP
)
702 lp
= &vmem_nosleep_lock
;
703 else if (vmflag
& VM_PUSHPAGE
)
704 lp
= &vmem_pushpage_lock
;
706 lp
= &vmem_sleep_lock
;
710 nseg
= VMEM_MINFREE
+ vmem_populators
* VMEM_POPULATE_RESERVE
;
711 size
= P2ROUNDUP(nseg
* vmem_seg_size
, vmem_seg_arena
->vm_quantum
);
712 nseg
= size
/ vmem_seg_size
;
715 * The following vmem_alloc() may need to populate vmem_seg_arena
716 * and all the things it imports from. When doing so, it will tap
717 * each arena's reserve to prevent recursion (see the block comment
718 * above the definition of VMEM_POPULATE_RESERVE).
720 p
= vmem_alloc(vmem_seg_arena
, size
, vmflag
& VM_KMFLAGS
);
723 mutex_enter(&vmp
->vm_lock
);
724 vmp
->vm_kstat
.vk_populate_fail
.value
.ui64
++;
729 * Restock the arenas that may have been depleted during population.
731 for (i
= 0; i
< vmem_populators
; i
++) {
732 mutex_enter(&vmem_populator
[i
]->vm_lock
);
733 while (vmem_populator
[i
]->vm_nsegfree
< VMEM_POPULATE_RESERVE
)
734 vmem_putseg(vmem_populator
[i
],
735 (vmem_seg_t
*)(p
+ --nseg
* vmem_seg_size
));
736 mutex_exit(&vmem_populator
[i
]->vm_lock
);
740 mutex_enter(&vmp
->vm_lock
);
743 * Now take our own segments.
745 ASSERT(nseg
>= VMEM_MINFREE
);
746 while (vmp
->vm_nsegfree
< VMEM_MINFREE
)
747 vmem_putseg(vmp
, (vmem_seg_t
*)(p
+ --nseg
* vmem_seg_size
));
750 * Give the remainder to charity.
753 vmem_putseg_global((vmem_seg_t
*)(p
+ --nseg
* vmem_seg_size
));
759 * Advance a walker from its previous position to 'afterme'.
760 * Note: may drop and reacquire vmp->vm_lock.
763 vmem_advance(vmem_t
*vmp
, vmem_seg_t
*walker
, vmem_seg_t
*afterme
)
765 vmem_seg_t
*vprev
= walker
->vs_aprev
;
766 vmem_seg_t
*vnext
= walker
->vs_anext
;
767 vmem_seg_t
*vsp
= NULL
;
769 VMEM_DELETE(walker
, a
);
772 VMEM_INSERT(afterme
, walker
, a
);
775 * The walker segment's presence may have prevented its neighbors
776 * from coalescing. If so, coalesce them now.
778 if (vprev
->vs_type
== VMEM_FREE
) {
779 if (vnext
->vs_type
== VMEM_FREE
) {
780 ASSERT(vprev
->vs_end
== vnext
->vs_start
);
781 vmem_freelist_delete(vmp
, vnext
);
782 vmem_freelist_delete(vmp
, vprev
);
783 vprev
->vs_end
= vnext
->vs_end
;
784 vmem_freelist_insert(vmp
, vprev
);
785 vmem_seg_destroy(vmp
, vnext
);
788 } else if (vnext
->vs_type
== VMEM_FREE
) {
793 * vsp could represent a complete imported span,
794 * in which case we must return it to the source.
796 if (vsp
!= NULL
&& vsp
->vs_aprev
->vs_import
&&
797 vmp
->vm_source_free
!= NULL
&&
798 vsp
->vs_aprev
->vs_type
== VMEM_SPAN
&&
799 vsp
->vs_anext
->vs_type
== VMEM_SPAN
) {
800 void *vaddr
= (void *)vsp
->vs_start
;
801 size_t size
= VS_SIZE(vsp
);
802 ASSERT(size
== VS_SIZE(vsp
->vs_aprev
));
803 vmem_freelist_delete(vmp
, vsp
);
804 vmem_span_destroy(vmp
, vsp
);
805 mutex_exit(&vmp
->vm_lock
);
806 vmp
->vm_source_free(vmp
->vm_source
, vaddr
, size
);
807 mutex_enter(&vmp
->vm_lock
);
812 * VM_NEXTFIT allocations deliberately cycle through all virtual addresses
813 * in an arena, so that we avoid reusing addresses for as long as possible.
814 * This helps to catch used-after-freed bugs. It's also the perfect policy
815 * for allocating things like process IDs, where we want to cycle through
816 * all values in order.
819 vmem_nextfit_alloc(vmem_t
*vmp
, size_t size
, int vmflag
)
821 vmem_seg_t
*vsp
, *rotor
;
823 size_t realsize
= P2ROUNDUP(size
, vmp
->vm_quantum
);
826 mutex_enter(&vmp
->vm_lock
);
828 if (vmp
->vm_nsegfree
< VMEM_MINFREE
&& !vmem_populate(vmp
, vmflag
)) {
829 mutex_exit(&vmp
->vm_lock
);
834 * The common case is that the segment right after the rotor is free,
835 * and large enough that extracting 'size' bytes won't change which
836 * freelist it's on. In this case we can avoid a *lot* of work.
837 * Instead of the normal vmem_seg_alloc(), we just advance the start
838 * address of the victim segment. Instead of moving the rotor, we
839 * create the new segment structure *behind the rotor*, which has
840 * the same effect. And finally, we know we don't have to coalesce
841 * the rotor's neighbors because the new segment lies between them.
843 rotor
= &vmp
->vm_rotor
;
844 vsp
= rotor
->vs_anext
;
845 if (vsp
->vs_type
== VMEM_FREE
&& (vs_size
= VS_SIZE(vsp
)) > realsize
&&
846 P2SAMEHIGHBIT(vs_size
, vs_size
- realsize
)) {
847 ASSERT(highbit(vs_size
) == highbit(vs_size
- realsize
));
848 addr
= vsp
->vs_start
;
849 vsp
->vs_start
= addr
+ realsize
;
850 vmem_hash_insert(vmp
,
851 vmem_seg_create(vmp
, rotor
->vs_aprev
, addr
, addr
+ size
));
852 mutex_exit(&vmp
->vm_lock
);
853 return ((void *)addr
);
857 * Starting at the rotor, look for a segment large enough to
858 * satisfy the allocation.
861 vmp
->vm_kstat
.vk_search
.value
.ui64
++;
862 if (vsp
->vs_type
== VMEM_FREE
&& VS_SIZE(vsp
) >= size
)
867 * We've come full circle. One possibility is that the
868 * there's actually enough space, but the rotor itself
869 * is preventing the allocation from succeeding because
870 * it's sitting between two free segments. Therefore,
871 * we advance the rotor and see if that liberates a
874 vmem_advance(vmp
, rotor
, rotor
->vs_anext
);
875 vsp
= rotor
->vs_aprev
;
876 if (vsp
->vs_type
== VMEM_FREE
&& VS_SIZE(vsp
) >= size
)
879 * If there's a lower arena we can import from, or it's
880 * a VM_NOSLEEP allocation, let vmem_xalloc() handle it.
881 * Otherwise, wait until another thread frees something.
883 if (vmp
->vm_source_alloc
!= NULL
||
884 (vmflag
& VM_NOSLEEP
)) {
885 mutex_exit(&vmp
->vm_lock
);
886 return (vmem_xalloc(vmp
, size
, vmp
->vm_quantum
,
887 0, 0, NULL
, NULL
, vmflag
& VM_KMFLAGS
));
889 vmp
->vm_kstat
.vk_wait
.value
.ui64
++;
890 cv_wait(&vmp
->vm_cv
, &vmp
->vm_lock
);
891 vsp
= rotor
->vs_anext
;
896 * We found a segment. Extract enough space to satisfy the allocation.
898 addr
= vsp
->vs_start
;
899 vsp
= vmem_seg_alloc(vmp
, vsp
, addr
, size
);
900 ASSERT(vsp
->vs_type
== VMEM_ALLOC
&&
901 vsp
->vs_start
== addr
&& vsp
->vs_end
== addr
+ size
);
904 * Advance the rotor to right after the newly-allocated segment.
905 * That's where the next VM_NEXTFIT allocation will begin searching.
907 vmem_advance(vmp
, rotor
, vsp
);
908 mutex_exit(&vmp
->vm_lock
);
909 return ((void *)addr
);
913 * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its
914 * freelist. If size is not a power-of-2, it can return a false-negative.
916 * Used to decide if a newly imported span is superfluous after re-acquiring
920 vmem_canalloc(vmem_t
*vmp
, size_t size
)
924 ASSERT(MUTEX_HELD(&vmp
->vm_lock
));
927 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, size
));
928 else if ((hb
= highbit(size
)) < VMEM_FREELISTS
)
929 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, 1UL << hb
));
935 * Allocate size bytes at offset phase from an align boundary such that the
936 * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
937 * that does not straddle a nocross-aligned boundary.
940 vmem_xalloc(vmem_t
*vmp
, size_t size
, size_t align_arg
, size_t phase
,
941 size_t nocross
, void *minaddr
, void *maxaddr
, int vmflag
)
944 vmem_seg_t
*vbest
= NULL
;
945 uintptr_t addr
, taddr
, start
, end
;
946 uintptr_t align
= (align_arg
!= 0) ? align_arg
: vmp
->vm_quantum
;
947 void *vaddr
, *xvaddr
= NULL
;
952 if ((align
| phase
| nocross
) & (vmp
->vm_quantum
- 1))
953 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
954 "parameters not vm_quantum aligned",
955 (void *)vmp
, size
, align_arg
, phase
, nocross
,
956 minaddr
, maxaddr
, vmflag
);
959 (align
> nocross
|| P2ROUNDUP(phase
+ size
, align
) > nocross
))
960 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
961 "overconstrained allocation",
962 (void *)vmp
, size
, align_arg
, phase
, nocross
,
963 minaddr
, maxaddr
, vmflag
);
965 if (phase
>= align
|| !ISP2(align
) || !ISP2(nocross
))
966 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
967 "parameters inconsistent or invalid",
968 (void *)vmp
, size
, align_arg
, phase
, nocross
,
969 minaddr
, maxaddr
, vmflag
);
971 if ((mtbf
= vmem_mtbf
| vmp
->vm_mtbf
) != 0 && gethrtime() % mtbf
== 0 &&
972 (vmflag
& (VM_NOSLEEP
| VM_PANIC
)) == VM_NOSLEEP
)
975 mutex_enter(&vmp
->vm_lock
);
977 if (vmp
->vm_nsegfree
< VMEM_MINFREE
&&
978 !vmem_populate(vmp
, vmflag
))
982 * highbit() returns the highest bit + 1, which is exactly
983 * what we want: we want to search the first freelist whose
984 * members are *definitely* large enough to satisfy our
985 * allocation. However, there are certain cases in which we
986 * want to look at the next-smallest freelist (which *might*
987 * be able to satisfy the allocation):
989 * (1) The size is exactly a power of 2, in which case
990 * the smaller freelist is always big enough;
992 * (2) All other freelists are empty;
994 * (3) We're in the highest possible freelist, which is
995 * always empty (e.g. the 4GB freelist on 32-bit systems);
997 * (4) We're doing a best-fit or first-fit allocation.
1000 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, size
));
1003 if ((vmp
->vm_freemap
>> hb
) == 0 ||
1004 hb
== VMEM_FREELISTS
||
1005 (vmflag
& (VM_BESTFIT
| VM_FIRSTFIT
)))
1007 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, 1UL << hb
));
1010 for (vbest
= NULL
, vsp
= (flist
== 0) ? NULL
:
1011 vmp
->vm_freelist
[flist
- 1].vs_knext
;
1012 vsp
!= NULL
; vsp
= vsp
->vs_knext
) {
1013 vmp
->vm_kstat
.vk_search
.value
.ui64
++;
1014 if (vsp
->vs_start
== 0) {
1016 * We're moving up to a larger freelist,
1017 * so if we've already found a candidate,
1018 * the fit can't possibly get any better.
1023 * Find the next non-empty freelist.
1025 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
,
1029 vsp
= (vmem_seg_t
*)&vmp
->vm_freelist
[flist
];
1030 ASSERT(vsp
->vs_knext
->vs_type
== VMEM_FREE
);
1033 if (vsp
->vs_end
- 1 < (uintptr_t)minaddr
)
1035 if (vsp
->vs_start
> (uintptr_t)maxaddr
- 1)
1037 start
= MAX(vsp
->vs_start
, (uintptr_t)minaddr
);
1038 end
= MIN(vsp
->vs_end
- 1, (uintptr_t)maxaddr
- 1) + 1;
1039 taddr
= P2PHASEUP(start
, align
, phase
);
1040 if (P2BOUNDARY(taddr
, size
, nocross
))
1042 P2ROUNDUP(P2NPHASE(taddr
, nocross
), align
);
1043 if ((taddr
- start
) + size
> end
- start
||
1044 (vbest
!= NULL
&& VS_SIZE(vsp
) >= VS_SIZE(vbest
)))
1048 if (!(vmflag
& VM_BESTFIT
) || VS_SIZE(vbest
) == size
)
1053 ASSERT(xvaddr
== NULL
);
1055 panic("vmem_xalloc(): size == 0");
1056 if (vmp
->vm_source_alloc
!= NULL
&& nocross
== 0 &&
1057 minaddr
== NULL
&& maxaddr
== NULL
) {
1058 size_t aneeded
, asize
;
1059 size_t aquantum
= MAX(vmp
->vm_quantum
,
1060 vmp
->vm_source
->vm_quantum
);
1061 size_t aphase
= phase
;
1062 if ((align
> aquantum
) &&
1063 !(vmp
->vm_cflags
& VMC_XALIGN
)) {
1064 aphase
= (P2PHASE(phase
, aquantum
) != 0) ?
1065 align
- vmp
->vm_quantum
: align
- aquantum
;
1066 ASSERT(aphase
>= phase
);
1068 aneeded
= MAX(size
+ aphase
, vmp
->vm_min_import
);
1069 asize
= P2ROUNDUP(aneeded
, aquantum
);
1073 * The rounding induced overflow; return NULL
1074 * if we are permitted to fail the allocation
1075 * (and explicitly panic if we aren't).
1077 if ((vmflag
& VM_NOSLEEP
) &&
1078 !(vmflag
& VM_PANIC
)) {
1079 mutex_exit(&vmp
->vm_lock
);
1083 panic("vmem_xalloc(): size overflow");
1087 * Determine how many segment structures we'll consume.
1088 * The calculation must be precise because if we're
1089 * here on behalf of vmem_populate(), we are taking
1090 * segments from a very limited reserve.
1092 if (size
== asize
&& !(vmp
->vm_cflags
& VMC_XALLOC
))
1093 resv
= VMEM_SEGS_PER_SPAN_CREATE
+
1094 VMEM_SEGS_PER_EXACT_ALLOC
;
1095 else if (phase
== 0 &&
1096 align
<= vmp
->vm_source
->vm_quantum
)
1097 resv
= VMEM_SEGS_PER_SPAN_CREATE
+
1098 VMEM_SEGS_PER_LEFT_ALLOC
;
1100 resv
= VMEM_SEGS_PER_ALLOC_MAX
;
1102 ASSERT(vmp
->vm_nsegfree
>= resv
);
1103 vmp
->vm_nsegfree
-= resv
; /* reserve our segs */
1104 mutex_exit(&vmp
->vm_lock
);
1105 if (vmp
->vm_cflags
& VMC_XALLOC
) {
1106 size_t oasize
= asize
;
1107 vaddr
= ((vmem_ximport_t
*)
1108 vmp
->vm_source_alloc
)(vmp
->vm_source
,
1109 &asize
, align
, vmflag
& VM_KMFLAGS
);
1110 ASSERT(asize
>= oasize
);
1111 ASSERT(P2PHASE(asize
,
1112 vmp
->vm_source
->vm_quantum
) == 0);
1113 ASSERT(!(vmp
->vm_cflags
& VMC_XALIGN
) ||
1114 IS_P2ALIGNED(vaddr
, align
));
1116 vaddr
= vmp
->vm_source_alloc(vmp
->vm_source
,
1117 asize
, vmflag
& VM_KMFLAGS
);
1119 mutex_enter(&vmp
->vm_lock
);
1120 vmp
->vm_nsegfree
+= resv
; /* claim reservation */
1121 aneeded
= size
+ align
- vmp
->vm_quantum
;
1122 aneeded
= P2ROUNDUP(aneeded
, vmp
->vm_quantum
);
1123 if (vaddr
!= NULL
) {
1125 * Since we dropped the vmem lock while
1126 * calling the import function, other
1127 * threads could have imported space
1128 * and made our import unnecessary. In
1129 * order to save space, we return
1130 * excess imports immediately.
1132 if (asize
> aneeded
&&
1133 vmp
->vm_source_free
!= NULL
&&
1134 vmem_canalloc(vmp
, aneeded
)) {
1136 VMEM_SEGS_PER_MIDDLE_ALLOC
);
1141 vbest
= vmem_span_create(vmp
, vaddr
, asize
, 1);
1142 addr
= P2PHASEUP(vbest
->vs_start
, align
, phase
);
1144 } else if (vmem_canalloc(vmp
, aneeded
)) {
1146 * Our import failed, but another thread
1147 * added sufficient free memory to the arena
1148 * to satisfy our request. Go back and
1151 ASSERT(resv
>= VMEM_SEGS_PER_MIDDLE_ALLOC
);
1157 * If the requestor chooses to fail the allocation attempt
1158 * rather than reap wait and retry - get out of the loop.
1160 if (vmflag
& VM_ABORT
)
1162 mutex_exit(&vmp
->vm_lock
);
1163 if (vmp
->vm_cflags
& VMC_IDENTIFIER
)
1164 kmem_reap_idspace();
1167 mutex_enter(&vmp
->vm_lock
);
1168 if (vmflag
& VM_NOSLEEP
)
1170 vmp
->vm_kstat
.vk_wait
.value
.ui64
++;
1171 cv_wait(&vmp
->vm_cv
, &vmp
->vm_lock
);
1173 if (vbest
!= NULL
) {
1174 ASSERT(vbest
->vs_type
== VMEM_FREE
);
1175 ASSERT(vbest
->vs_knext
!= vbest
);
1176 /* re-position to end of buffer */
1177 if (vmflag
& VM_ENDALLOC
) {
1178 addr
+= ((vbest
->vs_end
- (addr
+ size
)) / align
) *
1181 (void) vmem_seg_alloc(vmp
, vbest
, addr
, size
);
1182 mutex_exit(&vmp
->vm_lock
);
1184 vmp
->vm_source_free(vmp
->vm_source
, xvaddr
, xsize
);
1185 ASSERT(P2PHASE(addr
, align
) == phase
);
1186 ASSERT(!P2BOUNDARY(addr
, size
, nocross
));
1187 ASSERT(addr
>= (uintptr_t)minaddr
);
1188 ASSERT(addr
+ size
- 1 <= (uintptr_t)maxaddr
- 1);
1189 return ((void *)addr
);
1191 vmp
->vm_kstat
.vk_fail
.value
.ui64
++;
1192 mutex_exit(&vmp
->vm_lock
);
1193 if (vmflag
& VM_PANIC
)
1194 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
1195 "cannot satisfy mandatory allocation",
1196 (void *)vmp
, size
, align_arg
, phase
, nocross
,
1197 minaddr
, maxaddr
, vmflag
);
1198 ASSERT(xvaddr
== NULL
);
1203 * Free the segment [vaddr, vaddr + size), where vaddr was a constrained
1204 * allocation. vmem_xalloc() and vmem_xfree() must always be paired because
1205 * both routines bypass the quantum caches.
1208 vmem_xfree(vmem_t
*vmp
, void *vaddr
, size_t size
)
1210 vmem_seg_t
*vsp
, *vnext
, *vprev
;
1212 mutex_enter(&vmp
->vm_lock
);
1214 vsp
= vmem_hash_delete(vmp
, (uintptr_t)vaddr
, size
);
1215 vsp
->vs_end
= P2ROUNDUP(vsp
->vs_end
, vmp
->vm_quantum
);
1218 * Attempt to coalesce with the next segment.
1220 vnext
= vsp
->vs_anext
;
1221 if (vnext
->vs_type
== VMEM_FREE
) {
1222 ASSERT(vsp
->vs_end
== vnext
->vs_start
);
1223 vmem_freelist_delete(vmp
, vnext
);
1224 vsp
->vs_end
= vnext
->vs_end
;
1225 vmem_seg_destroy(vmp
, vnext
);
1229 * Attempt to coalesce with the previous segment.
1231 vprev
= vsp
->vs_aprev
;
1232 if (vprev
->vs_type
== VMEM_FREE
) {
1233 ASSERT(vprev
->vs_end
== vsp
->vs_start
);
1234 vmem_freelist_delete(vmp
, vprev
);
1235 vprev
->vs_end
= vsp
->vs_end
;
1236 vmem_seg_destroy(vmp
, vsp
);
1241 * If the entire span is free, return it to the source.
1243 if (vsp
->vs_aprev
->vs_import
&& vmp
->vm_source_free
!= NULL
&&
1244 vsp
->vs_aprev
->vs_type
== VMEM_SPAN
&&
1245 vsp
->vs_anext
->vs_type
== VMEM_SPAN
) {
1246 vaddr
= (void *)vsp
->vs_start
;
1247 size
= VS_SIZE(vsp
);
1248 ASSERT(size
== VS_SIZE(vsp
->vs_aprev
));
1249 vmem_span_destroy(vmp
, vsp
);
1250 mutex_exit(&vmp
->vm_lock
);
1251 vmp
->vm_source_free(vmp
->vm_source
, vaddr
, size
);
1253 vmem_freelist_insert(vmp
, vsp
);
1254 mutex_exit(&vmp
->vm_lock
);
1259 * Allocate size bytes from arena vmp. Returns the allocated address
1260 * on success, NULL on failure. vmflag specifies VM_SLEEP or VM_NOSLEEP,
1261 * and may also specify best-fit, first-fit, or next-fit allocation policy
1262 * instead of the default instant-fit policy. VM_SLEEP allocations are
1263 * guaranteed to succeed.
1266 vmem_alloc(vmem_t
*vmp
, size_t size
, int vmflag
)
1274 if (size
- 1 < vmp
->vm_qcache_max
)
1275 return (kmem_cache_alloc(vmp
->vm_qcache
[(size
- 1) >>
1276 vmp
->vm_qshift
], vmflag
& VM_KMFLAGS
));
1278 if ((mtbf
= vmem_mtbf
| vmp
->vm_mtbf
) != 0 && gethrtime() % mtbf
== 0 &&
1279 (vmflag
& (VM_NOSLEEP
| VM_PANIC
)) == VM_NOSLEEP
)
1282 if (vmflag
& VM_NEXTFIT
)
1283 return (vmem_nextfit_alloc(vmp
, size
, vmflag
));
1285 if (vmflag
& (VM_BESTFIT
| VM_FIRSTFIT
))
1286 return (vmem_xalloc(vmp
, size
, vmp
->vm_quantum
, 0, 0,
1287 NULL
, NULL
, vmflag
));
1290 * Unconstrained instant-fit allocation from the segment list.
1292 mutex_enter(&vmp
->vm_lock
);
1294 if (vmp
->vm_nsegfree
>= VMEM_MINFREE
|| vmem_populate(vmp
, vmflag
)) {
1296 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, size
));
1297 else if ((hb
= highbit(size
)) < VMEM_FREELISTS
)
1298 flist
= lowbit(P2ALIGN(vmp
->vm_freemap
, 1UL << hb
));
1302 mutex_exit(&vmp
->vm_lock
);
1303 return (vmem_xalloc(vmp
, size
, vmp
->vm_quantum
,
1304 0, 0, NULL
, NULL
, vmflag
));
1307 ASSERT(size
<= (1UL << flist
));
1308 vsp
= vmp
->vm_freelist
[flist
].vs_knext
;
1309 addr
= vsp
->vs_start
;
1310 if (vmflag
& VM_ENDALLOC
) {
1311 addr
+= vsp
->vs_end
- (addr
+ size
);
1313 (void) vmem_seg_alloc(vmp
, vsp
, addr
, size
);
1314 mutex_exit(&vmp
->vm_lock
);
1315 return ((void *)addr
);
1319 * Free the segment [vaddr, vaddr + size).
1322 vmem_free(vmem_t
*vmp
, void *vaddr
, size_t size
)
1324 if (size
- 1 < vmp
->vm_qcache_max
)
1325 kmem_cache_free(vmp
->vm_qcache
[(size
- 1) >> vmp
->vm_qshift
],
1328 vmem_xfree(vmp
, vaddr
, size
);
1332 * Determine whether arena vmp contains the segment [vaddr, vaddr + size).
1335 vmem_contains(vmem_t
*vmp
, void *vaddr
, size_t size
)
1337 uintptr_t start
= (uintptr_t)vaddr
;
1338 uintptr_t end
= start
+ size
;
1340 vmem_seg_t
*seg0
= &vmp
->vm_seg0
;
1342 mutex_enter(&vmp
->vm_lock
);
1343 vmp
->vm_kstat
.vk_contains
.value
.ui64
++;
1344 for (vsp
= seg0
->vs_knext
; vsp
!= seg0
; vsp
= vsp
->vs_knext
) {
1345 vmp
->vm_kstat
.vk_contains_search
.value
.ui64
++;
1346 ASSERT(vsp
->vs_type
== VMEM_SPAN
);
1347 if (start
>= vsp
->vs_start
&& end
- 1 <= vsp
->vs_end
- 1)
1350 mutex_exit(&vmp
->vm_lock
);
1351 return (vsp
!= seg0
);
1355 * Add the span [vaddr, vaddr + size) to arena vmp.
1358 vmem_add(vmem_t
*vmp
, void *vaddr
, size_t size
, int vmflag
)
1360 if (vaddr
== NULL
|| size
== 0)
1361 panic("vmem_add(%p, %p, %lu): bad arguments",
1362 (void *)vmp
, vaddr
, size
);
1364 ASSERT(!vmem_contains(vmp
, vaddr
, size
));
1366 mutex_enter(&vmp
->vm_lock
);
1367 if (vmem_populate(vmp
, vmflag
))
1368 (void) vmem_span_create(vmp
, vaddr
, size
, 0);
1371 mutex_exit(&vmp
->vm_lock
);
1376 * Walk the vmp arena, applying func to each segment matching typemask.
1377 * If VMEM_REENTRANT is specified, the arena lock is dropped across each
1378 * call to func(); otherwise, it is held for the duration of vmem_walk()
1379 * to ensure a consistent snapshot. Note that VMEM_REENTRANT callbacks
1380 * are *not* necessarily consistent, so they may only be used when a hint
1384 vmem_walk(vmem_t
*vmp
, int typemask
,
1385 void (*func
)(void *, void *, size_t), void *arg
)
1388 vmem_seg_t
*seg0
= &vmp
->vm_seg0
;
1391 if (typemask
& VMEM_WALKER
)
1394 bzero(&walker
, sizeof (walker
));
1395 walker
.vs_type
= VMEM_WALKER
;
1397 mutex_enter(&vmp
->vm_lock
);
1398 VMEM_INSERT(seg0
, &walker
, a
);
1399 for (vsp
= seg0
->vs_anext
; vsp
!= seg0
; vsp
= vsp
->vs_anext
) {
1400 if (vsp
->vs_type
& typemask
) {
1401 void *start
= (void *)vsp
->vs_start
;
1402 size_t size
= VS_SIZE(vsp
);
1403 if (typemask
& VMEM_REENTRANT
) {
1404 vmem_advance(vmp
, &walker
, vsp
);
1405 mutex_exit(&vmp
->vm_lock
);
1406 func(arg
, start
, size
);
1407 mutex_enter(&vmp
->vm_lock
);
1410 func(arg
, start
, size
);
1414 vmem_advance(vmp
, &walker
, NULL
);
1415 mutex_exit(&vmp
->vm_lock
);
1419 * Return the total amount of memory whose type matches typemask. Thus:
1421 * typemask VMEM_ALLOC yields total memory allocated (in use).
1422 * typemask VMEM_FREE yields total memory free (available).
1423 * typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size.
1426 vmem_size(vmem_t
*vmp
, int typemask
)
1430 if (typemask
& VMEM_ALLOC
)
1431 size
+= vmp
->vm_kstat
.vk_mem_inuse
.value
.ui64
;
1432 if (typemask
& VMEM_FREE
)
1433 size
+= vmp
->vm_kstat
.vk_mem_total
.value
.ui64
-
1434 vmp
->vm_kstat
.vk_mem_inuse
.value
.ui64
;
1435 return ((size_t)size
);
1439 * Create an arena called name whose initial span is [base, base + size).
1440 * The arena's natural unit of currency is quantum, so vmem_alloc()
1441 * guarantees quantum-aligned results. The arena may import new spans
1442 * by invoking afunc() on source, and may return those spans by invoking
1443 * ffunc() on source. To make small allocations fast and scalable,
1444 * the arena offers high-performance caching for each integer multiple
1445 * of quantum up to qcache_max.
1448 vmem_create_common(const char *name
, void *base
, size_t size
, size_t quantum
,
1449 void *(*afunc
)(vmem_t
*, size_t, int),
1450 void (*ffunc
)(vmem_t
*, void *, size_t),
1451 vmem_t
*source
, size_t qcache_max
, int vmflag
)
1455 vmem_t
*vmp
, *cur
, **vmpp
;
1457 vmem_freelist_t
*vfp
;
1458 uint32_t id
= atomic_inc_32_nv(&vmem_id
);
1460 if (vmem_vmem_arena
!= NULL
) {
1461 vmp
= vmem_alloc(vmem_vmem_arena
, sizeof (vmem_t
),
1462 vmflag
& VM_KMFLAGS
);
1464 ASSERT(id
<= VMEM_INITIAL
);
1465 vmp
= &vmem0
[id
- 1];
1468 /* An identifier arena must inherit from another identifier arena */
1469 ASSERT(source
== NULL
|| ((source
->vm_cflags
& VMC_IDENTIFIER
) ==
1470 (vmflag
& VMC_IDENTIFIER
)));
1474 bzero(vmp
, sizeof (vmem_t
));
1476 (void) snprintf(vmp
->vm_name
, VMEM_NAMELEN
, "%s", name
);
1477 mutex_init(&vmp
->vm_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1478 cv_init(&vmp
->vm_cv
, NULL
, CV_DEFAULT
, NULL
);
1479 vmp
->vm_cflags
= vmflag
;
1480 vmflag
&= VM_KMFLAGS
;
1482 vmp
->vm_quantum
= quantum
;
1483 vmp
->vm_qshift
= highbit(quantum
) - 1;
1484 nqcache
= MIN(qcache_max
>> vmp
->vm_qshift
, VMEM_NQCACHE_MAX
);
1486 for (i
= 0; i
<= VMEM_FREELISTS
; i
++) {
1487 vfp
= &vmp
->vm_freelist
[i
];
1488 vfp
->vs_end
= 1UL << i
;
1489 vfp
->vs_knext
= (vmem_seg_t
*)(vfp
+ 1);
1490 vfp
->vs_kprev
= (vmem_seg_t
*)(vfp
- 1);
1493 vmp
->vm_freelist
[0].vs_kprev
= NULL
;
1494 vmp
->vm_freelist
[VMEM_FREELISTS
].vs_knext
= NULL
;
1495 vmp
->vm_freelist
[VMEM_FREELISTS
].vs_end
= 0;
1496 vmp
->vm_hash_table
= vmp
->vm_hash0
;
1497 vmp
->vm_hash_mask
= VMEM_HASH_INITIAL
- 1;
1498 vmp
->vm_hash_shift
= highbit(vmp
->vm_hash_mask
);
1500 vsp
= &vmp
->vm_seg0
;
1501 vsp
->vs_anext
= vsp
;
1502 vsp
->vs_aprev
= vsp
;
1503 vsp
->vs_knext
= vsp
;
1504 vsp
->vs_kprev
= vsp
;
1505 vsp
->vs_type
= VMEM_SPAN
;
1507 vsp
= &vmp
->vm_rotor
;
1508 vsp
->vs_type
= VMEM_ROTOR
;
1509 VMEM_INSERT(&vmp
->vm_seg0
, vsp
, a
);
1511 bcopy(&vmem_kstat_template
, &vmp
->vm_kstat
, sizeof (vmem_kstat_t
));
1515 vmp
->vm_kstat
.vk_source_id
.value
.ui32
= source
->vm_id
;
1516 vmp
->vm_source
= source
;
1517 vmp
->vm_source_alloc
= afunc
;
1518 vmp
->vm_source_free
= ffunc
;
1521 * Some arenas (like vmem_metadata and kmem_metadata) cannot
1522 * use quantum caching to lower fragmentation. Instead, we
1523 * increase their imports, giving a similar effect.
1525 if (vmp
->vm_cflags
& VMC_NO_QCACHE
) {
1526 vmp
->vm_min_import
=
1527 VMEM_QCACHE_SLABSIZE(nqcache
<< vmp
->vm_qshift
);
1532 ASSERT(!(vmflag
& VM_NOSLEEP
));
1533 vmp
->vm_qcache_max
= nqcache
<< vmp
->vm_qshift
;
1534 for (i
= 0; i
< nqcache
; i
++) {
1535 char buf
[VMEM_NAMELEN
+ 21];
1536 (void) sprintf(buf
, "%s_%lu", vmp
->vm_name
,
1538 vmp
->vm_qcache
[i
] = kmem_cache_create(buf
,
1539 (i
+ 1) * quantum
, quantum
, NULL
, NULL
, NULL
,
1540 NULL
, vmp
, KMC_QCACHE
| KMC_NOTOUCH
);
1544 if ((vmp
->vm_ksp
= kstat_create("vmem", vmp
->vm_id
, vmp
->vm_name
,
1545 "vmem", KSTAT_TYPE_NAMED
, sizeof (vmem_kstat_t
) /
1546 sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
)) != NULL
) {
1547 vmp
->vm_ksp
->ks_data
= &vmp
->vm_kstat
;
1548 kstat_install(vmp
->vm_ksp
);
1551 mutex_enter(&vmem_list_lock
);
1553 while ((cur
= *vmpp
) != NULL
)
1554 vmpp
= &cur
->vm_next
;
1556 mutex_exit(&vmem_list_lock
);
1558 if (vmp
->vm_cflags
& VMC_POPULATOR
) {
1559 ASSERT(vmem_populators
< VMEM_INITIAL
);
1560 vmem_populator
[atomic_inc_32_nv(&vmem_populators
) - 1] = vmp
;
1561 mutex_enter(&vmp
->vm_lock
);
1562 (void) vmem_populate(vmp
, vmflag
| VM_PANIC
);
1563 mutex_exit(&vmp
->vm_lock
);
1566 if ((base
|| size
) && vmem_add(vmp
, base
, size
, vmflag
) == NULL
) {
1575 vmem_xcreate(const char *name
, void *base
, size_t size
, size_t quantum
,
1576 vmem_ximport_t
*afunc
, vmem_free_t
*ffunc
, vmem_t
*source
,
1577 size_t qcache_max
, int vmflag
)
1579 ASSERT(!(vmflag
& (VMC_POPULATOR
| VMC_XALLOC
)));
1580 vmflag
&= ~(VMC_POPULATOR
| VMC_XALLOC
);
1582 return (vmem_create_common(name
, base
, size
, quantum
,
1583 (vmem_alloc_t
*)afunc
, ffunc
, source
, qcache_max
,
1584 vmflag
| VMC_XALLOC
));
1588 vmem_create(const char *name
, void *base
, size_t size
, size_t quantum
,
1589 vmem_alloc_t
*afunc
, vmem_free_t
*ffunc
, vmem_t
*source
,
1590 size_t qcache_max
, int vmflag
)
1592 ASSERT(!(vmflag
& (VMC_XALLOC
| VMC_XALIGN
)));
1593 vmflag
&= ~(VMC_XALLOC
| VMC_XALIGN
);
1595 return (vmem_create_common(name
, base
, size
, quantum
,
1596 afunc
, ffunc
, source
, qcache_max
, vmflag
));
1600 * Destroy arena vmp.
1603 vmem_destroy(vmem_t
*vmp
)
1605 vmem_t
*cur
, **vmpp
;
1606 vmem_seg_t
*seg0
= &vmp
->vm_seg0
;
1607 vmem_seg_t
*vsp
, *anext
;
1611 mutex_enter(&vmem_list_lock
);
1613 while ((cur
= *vmpp
) != vmp
)
1614 vmpp
= &cur
->vm_next
;
1615 *vmpp
= vmp
->vm_next
;
1616 mutex_exit(&vmem_list_lock
);
1618 for (i
= 0; i
< VMEM_NQCACHE_MAX
; i
++)
1619 if (vmp
->vm_qcache
[i
])
1620 kmem_cache_destroy(vmp
->vm_qcache
[i
]);
1622 leaked
= vmem_size(vmp
, VMEM_ALLOC
);
1624 cmn_err(CE_WARN
, "vmem_destroy('%s'): leaked %lu %s",
1625 vmp
->vm_name
, leaked
, (vmp
->vm_cflags
& VMC_IDENTIFIER
) ?
1626 "identifiers" : "bytes");
1628 if (vmp
->vm_hash_table
!= vmp
->vm_hash0
)
1629 vmem_free(vmem_hash_arena
, vmp
->vm_hash_table
,
1630 (vmp
->vm_hash_mask
+ 1) * sizeof (void *));
1633 * Give back the segment structures for anything that's left in the
1634 * arena, e.g. the primary spans and their free segments.
1636 VMEM_DELETE(&vmp
->vm_rotor
, a
);
1637 for (vsp
= seg0
->vs_anext
; vsp
!= seg0
; vsp
= anext
) {
1638 anext
= vsp
->vs_anext
;
1639 vmem_putseg_global(vsp
);
1642 while (vmp
->vm_nsegfree
> 0)
1643 vmem_putseg_global(vmem_getseg(vmp
));
1645 kstat_delete(vmp
->vm_ksp
);
1647 mutex_destroy(&vmp
->vm_lock
);
1648 cv_destroy(&vmp
->vm_cv
);
1649 vmem_free(vmem_vmem_arena
, vmp
, sizeof (vmem_t
));
1653 * Only shrink vmem hashtable if it is 1<<vmem_rescale_minshift times (8x)
1654 * larger than necessary.
1656 int vmem_rescale_minshift
= 3;
1659 * Resize vmp's hash table to keep the average lookup depth near 1.0.
1662 vmem_hash_rescale(vmem_t
*vmp
)
1664 vmem_seg_t
**old_table
, **new_table
, *vsp
;
1665 size_t old_size
, new_size
, h
, nseg
;
1667 nseg
= (size_t)(vmp
->vm_kstat
.vk_alloc
.value
.ui64
-
1668 vmp
->vm_kstat
.vk_free
.value
.ui64
);
1670 new_size
= MAX(VMEM_HASH_INITIAL
, 1 << (highbit(3 * nseg
+ 4) - 2));
1671 old_size
= vmp
->vm_hash_mask
+ 1;
1673 if ((old_size
>> vmem_rescale_minshift
) <= new_size
&&
1674 new_size
<= (old_size
<< 1))
1677 new_table
= vmem_alloc(vmem_hash_arena
, new_size
* sizeof (void *),
1679 if (new_table
== NULL
)
1681 bzero(new_table
, new_size
* sizeof (void *));
1683 mutex_enter(&vmp
->vm_lock
);
1685 old_size
= vmp
->vm_hash_mask
+ 1;
1686 old_table
= vmp
->vm_hash_table
;
1688 vmp
->vm_hash_mask
= new_size
- 1;
1689 vmp
->vm_hash_table
= new_table
;
1690 vmp
->vm_hash_shift
= highbit(vmp
->vm_hash_mask
);
1692 for (h
= 0; h
< old_size
; h
++) {
1694 while (vsp
!= NULL
) {
1695 uintptr_t addr
= vsp
->vs_start
;
1696 vmem_seg_t
*next_vsp
= vsp
->vs_knext
;
1697 vmem_seg_t
**hash_bucket
= VMEM_HASH(vmp
, addr
);
1698 vsp
->vs_knext
= *hash_bucket
;
1704 mutex_exit(&vmp
->vm_lock
);
1706 if (old_table
!= vmp
->vm_hash0
)
1707 vmem_free(vmem_hash_arena
, old_table
,
1708 old_size
* sizeof (void *));
1712 * Perform periodic maintenance on all vmem arenas.
1715 vmem_update(void *dummy
)
1719 mutex_enter(&vmem_list_lock
);
1720 for (vmp
= vmem_list
; vmp
!= NULL
; vmp
= vmp
->vm_next
) {
1722 * If threads are waiting for resources, wake them up
1723 * periodically so they can issue another kmem_reap()
1724 * to reclaim resources cached by the slab allocator.
1726 cv_broadcast(&vmp
->vm_cv
);
1729 * Rescale the hash table to keep the hash chains short.
1731 vmem_hash_rescale(vmp
);
1733 mutex_exit(&vmem_list_lock
);
1735 (void) timeout(vmem_update
, dummy
, vmem_update_interval
* hz
);
1739 vmem_qcache_reap(vmem_t
*vmp
)
1744 * Reap any quantum caches that may be part of this vmem.
1746 for (i
= 0; i
< VMEM_NQCACHE_MAX
; i
++)
1747 if (vmp
->vm_qcache
[i
])
1748 kmem_cache_reap_now(vmp
->vm_qcache
[i
]);
1752 * Prepare vmem for use.
1755 vmem_init(const char *heap_name
,
1756 void *heap_start
, size_t heap_size
, size_t heap_quantum
,
1757 void *(*heap_alloc
)(vmem_t
*, size_t, int),
1758 void (*heap_free
)(vmem_t
*, void *, size_t))
1761 int nseg
= VMEM_SEG_INITIAL
;
1765 vmem_putseg_global(&vmem_seg0
[nseg
]);
1767 heap
= vmem_create(heap_name
,
1768 heap_start
, heap_size
, heap_quantum
,
1769 NULL
, NULL
, NULL
, 0,
1770 VM_SLEEP
| VMC_POPULATOR
);
1772 vmem_metadata_arena
= vmem_create("vmem_metadata",
1773 NULL
, 0, heap_quantum
,
1774 vmem_alloc
, vmem_free
, heap
, 8 * heap_quantum
,
1775 VM_SLEEP
| VMC_POPULATOR
| VMC_NO_QCACHE
);
1777 vmem_seg_arena
= vmem_create("vmem_seg",
1778 NULL
, 0, heap_quantum
,
1779 heap_alloc
, heap_free
, vmem_metadata_arena
, 0,
1780 VM_SLEEP
| VMC_POPULATOR
);
1782 vmem_hash_arena
= vmem_create("vmem_hash",
1784 heap_alloc
, heap_free
, vmem_metadata_arena
, 0,
1787 vmem_vmem_arena
= vmem_create("vmem_vmem",
1788 vmem0
, sizeof (vmem0
), 1,
1789 heap_alloc
, heap_free
, vmem_metadata_arena
, 0,
1792 for (id
= 0; id
< vmem_id
; id
++)
1793 (void) vmem_xalloc(vmem_vmem_arena
, sizeof (vmem_t
),
1794 1, 0, 0, &vmem0
[id
], &vmem0
[id
+ 1],
1795 VM_NOSLEEP
| VM_BESTFIT
| VM_PANIC
);