2 * mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * Copyright (C) 2017 Facebook Inc.
8 * Copyright (C) 2017 Dennis Zhou <dennisszhou@gmail.com>
10 * This file is released under the GPLv2 license.
12 * The percpu allocator handles both static and dynamic areas. Percpu
13 * areas are allocated in chunks which are divided into units. There is
14 * a 1-to-1 mapping for units to possible cpus. These units are grouped
15 * based on NUMA properties of the machine.
18 * ------------------- ------------------- ------------
19 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
20 * ------------------- ...... ------------------- .... ------------
22 * Allocation is done by offsets into a unit's address space. Ie., an
23 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
24 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
25 * and even sparse. Access is handled by configuring percpu base
26 * registers according to the cpu to unit mappings and offsetting the
27 * base address using pcpu_unit_size.
29 * There is special consideration for the first chunk which must handle
30 * the static percpu variables in the kernel image as allocation services
31 * are not online yet. In short, the first chunk is structured like so:
33 * <Static | [Reserved] | Dynamic>
35 * The static data is copied from the original section managed by the
36 * linker. The reserved section, if non-zero, primarily manages static
37 * percpu variables from kernel modules. Finally, the dynamic section
38 * takes care of normal allocations.
40 * The allocator organizes chunks into lists according to free size and
41 * tries to allocate from the fullest chunk first. Each chunk is managed
42 * by a bitmap with metadata blocks. The allocation map is updated on
43 * every allocation and free to reflect the current state while the boundary
44 * map is only updated on allocation. Each metadata block contains
45 * information to help mitigate the need to iterate over large portions
46 * of the bitmap. The reverse mapping from page to chunk is stored in
47 * the page's index. Lastly, units are lazily backed and grow in unison.
49 * There is a unique conversion that goes on here between bytes and bits.
50 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
51 * tracks the number of pages it is responsible for in nr_pages. Helper
52 * functions are used to convert from between the bytes, bits, and blocks.
53 * All hints are managed in bits unless explicitly stated.
55 * To use this allocator, arch code should do the following:
57 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
58 * regular address to percpu pointer and back if they need to be
59 * different from the default
61 * - use pcpu_setup_first_chunk() during percpu area initialization to
62 * setup the first chunk containing the kernel static percpu area
65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/bitmap.h>
68 #include <linux/bootmem.h>
69 #include <linux/err.h>
70 #include <linux/lcm.h>
71 #include <linux/list.h>
72 #include <linux/log2.h>
74 #include <linux/module.h>
75 #include <linux/mutex.h>
76 #include <linux/percpu.h>
77 #include <linux/pfn.h>
78 #include <linux/slab.h>
79 #include <linux/spinlock.h>
80 #include <linux/vmalloc.h>
81 #include <linux/workqueue.h>
82 #include <linux/kmemleak.h>
83 #include <linux/sched.h>
85 #include <asm/cacheflush.h>
86 #include <asm/sections.h>
87 #include <asm/tlbflush.h>
90 #define CREATE_TRACE_POINTS
91 #include <trace/events/percpu.h>
93 #include "percpu-internal.h"
95 /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
96 #define PCPU_SLOT_BASE_SHIFT 5
98 #define PCPU_EMPTY_POP_PAGES_LOW 2
99 #define PCPU_EMPTY_POP_PAGES_HIGH 4
102 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
103 #ifndef __addr_to_pcpu_ptr
104 #define __addr_to_pcpu_ptr(addr) \
105 (void __percpu *)((unsigned long)(addr) - \
106 (unsigned long)pcpu_base_addr + \
107 (unsigned long)__per_cpu_start)
109 #ifndef __pcpu_ptr_to_addr
110 #define __pcpu_ptr_to_addr(ptr) \
111 (void __force *)((unsigned long)(ptr) + \
112 (unsigned long)pcpu_base_addr - \
113 (unsigned long)__per_cpu_start)
115 #else /* CONFIG_SMP */
116 /* on UP, it's always identity mapped */
117 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
118 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
119 #endif /* CONFIG_SMP */
121 static int pcpu_unit_pages __ro_after_init
;
122 static int pcpu_unit_size __ro_after_init
;
123 static int pcpu_nr_units __ro_after_init
;
124 static int pcpu_atom_size __ro_after_init
;
125 int pcpu_nr_slots __ro_after_init
;
126 static size_t pcpu_chunk_struct_size __ro_after_init
;
128 /* cpus with the lowest and highest unit addresses */
129 static unsigned int pcpu_low_unit_cpu __ro_after_init
;
130 static unsigned int pcpu_high_unit_cpu __ro_after_init
;
132 /* the address of the first chunk which starts with the kernel static area */
133 void *pcpu_base_addr __ro_after_init
;
134 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
136 static const int *pcpu_unit_map __ro_after_init
; /* cpu -> unit */
137 const unsigned long *pcpu_unit_offsets __ro_after_init
; /* cpu -> unit offset */
139 /* group information, used for vm allocation */
140 static int pcpu_nr_groups __ro_after_init
;
141 static const unsigned long *pcpu_group_offsets __ro_after_init
;
142 static const size_t *pcpu_group_sizes __ro_after_init
;
145 * The first chunk which always exists. Note that unlike other
146 * chunks, this one can be allocated and mapped in several different
147 * ways and thus often doesn't live in the vmalloc area.
149 struct pcpu_chunk
*pcpu_first_chunk __ro_after_init
;
152 * Optional reserved chunk. This chunk reserves part of the first
153 * chunk and serves it for reserved allocations. When the reserved
154 * region doesn't exist, the following variable is NULL.
156 struct pcpu_chunk
*pcpu_reserved_chunk __ro_after_init
;
158 DEFINE_SPINLOCK(pcpu_lock
); /* all internal data structures */
159 static DEFINE_MUTEX(pcpu_alloc_mutex
); /* chunk create/destroy, [de]pop, map ext */
161 struct list_head
*pcpu_slot __ro_after_init
; /* chunk list slots */
163 /* chunks which need their map areas extended, protected by pcpu_lock */
164 static LIST_HEAD(pcpu_map_extend_chunks
);
167 * The number of empty populated pages, protected by pcpu_lock. The
168 * reserved chunk doesn't contribute to the count.
170 int pcpu_nr_empty_pop_pages
;
173 * Balance work is used to populate or destroy chunks asynchronously. We
174 * try to keep the number of populated free pages between
175 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
178 static void pcpu_balance_workfn(struct work_struct
*work
);
179 static DECLARE_WORK(pcpu_balance_work
, pcpu_balance_workfn
);
180 static bool pcpu_async_enabled __read_mostly
;
181 static bool pcpu_atomic_alloc_failed
;
183 static void pcpu_schedule_balance_work(void)
185 if (pcpu_async_enabled
)
186 schedule_work(&pcpu_balance_work
);
190 * pcpu_addr_in_chunk - check if the address is served from this chunk
191 * @chunk: chunk of interest
192 * @addr: percpu address
195 * True if the address is served from this chunk.
197 static bool pcpu_addr_in_chunk(struct pcpu_chunk
*chunk
, void *addr
)
199 void *start_addr
, *end_addr
;
204 start_addr
= chunk
->base_addr
+ chunk
->start_offset
;
205 end_addr
= chunk
->base_addr
+ chunk
->nr_pages
* PAGE_SIZE
-
208 return addr
>= start_addr
&& addr
< end_addr
;
211 static int __pcpu_size_to_slot(int size
)
213 int highbit
= fls(size
); /* size is in bytes */
214 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
217 static int pcpu_size_to_slot(int size
)
219 if (size
== pcpu_unit_size
)
220 return pcpu_nr_slots
- 1;
221 return __pcpu_size_to_slot(size
);
224 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
226 if (chunk
->free_bytes
< PCPU_MIN_ALLOC_SIZE
|| chunk
->contig_bits
== 0)
229 return pcpu_size_to_slot(chunk
->free_bytes
);
232 /* set the pointer to a chunk in a page struct */
233 static void pcpu_set_page_chunk(struct page
*page
, struct pcpu_chunk
*pcpu
)
235 page
->index
= (unsigned long)pcpu
;
238 /* obtain pointer to a chunk from a page struct */
239 static struct pcpu_chunk
*pcpu_get_page_chunk(struct page
*page
)
241 return (struct pcpu_chunk
*)page
->index
;
244 static int __maybe_unused
pcpu_page_idx(unsigned int cpu
, int page_idx
)
246 return pcpu_unit_map
[cpu
] * pcpu_unit_pages
+ page_idx
;
249 static unsigned long pcpu_unit_page_offset(unsigned int cpu
, int page_idx
)
251 return pcpu_unit_offsets
[cpu
] + (page_idx
<< PAGE_SHIFT
);
254 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
255 unsigned int cpu
, int page_idx
)
257 return (unsigned long)chunk
->base_addr
+
258 pcpu_unit_page_offset(cpu
, page_idx
);
261 static void pcpu_next_unpop(unsigned long *bitmap
, int *rs
, int *re
, int end
)
263 *rs
= find_next_zero_bit(bitmap
, end
, *rs
);
264 *re
= find_next_bit(bitmap
, end
, *rs
+ 1);
267 static void pcpu_next_pop(unsigned long *bitmap
, int *rs
, int *re
, int end
)
269 *rs
= find_next_bit(bitmap
, end
, *rs
);
270 *re
= find_next_zero_bit(bitmap
, end
, *rs
+ 1);
274 * Bitmap region iterators. Iterates over the bitmap between
275 * [@start, @end) in @chunk. @rs and @re should be integer variables
276 * and will be set to start and end index of the current free region.
278 #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \
279 for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
281 (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end)))
283 #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \
284 for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \
286 (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end)))
289 * The following are helper functions to help access bitmaps and convert
290 * between bitmap offsets to address offsets.
292 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk
*chunk
, int index
)
294 return chunk
->alloc_map
+
295 (index
* PCPU_BITMAP_BLOCK_BITS
/ BITS_PER_LONG
);
298 static unsigned long pcpu_off_to_block_index(int off
)
300 return off
/ PCPU_BITMAP_BLOCK_BITS
;
303 static unsigned long pcpu_off_to_block_off(int off
)
305 return off
& (PCPU_BITMAP_BLOCK_BITS
- 1);
308 static unsigned long pcpu_block_off_to_off(int index
, int off
)
310 return index
* PCPU_BITMAP_BLOCK_BITS
+ off
;
314 * pcpu_next_md_free_region - finds the next hint free area
315 * @chunk: chunk of interest
316 * @bit_off: chunk offset
317 * @bits: size of free area
319 * Helper function for pcpu_for_each_md_free_region. It checks
320 * block->contig_hint and performs aggregation across blocks to find the
321 * next hint. It modifies bit_off and bits in-place to be consumed in the
324 static void pcpu_next_md_free_region(struct pcpu_chunk
*chunk
, int *bit_off
,
327 int i
= pcpu_off_to_block_index(*bit_off
);
328 int block_off
= pcpu_off_to_block_off(*bit_off
);
329 struct pcpu_block_md
*block
;
332 for (block
= chunk
->md_blocks
+ i
; i
< pcpu_chunk_nr_blocks(chunk
);
334 /* handles contig area across blocks */
336 *bits
+= block
->left_free
;
337 if (block
->left_free
== PCPU_BITMAP_BLOCK_BITS
)
343 * This checks three things. First is there a contig_hint to
344 * check. Second, have we checked this hint before by
345 * comparing the block_off. Third, is this the same as the
346 * right contig hint. In the last case, it spills over into
347 * the next block and should be handled by the contig area
348 * across blocks code.
350 *bits
= block
->contig_hint
;
351 if (*bits
&& block
->contig_hint_start
>= block_off
&&
352 *bits
+ block
->contig_hint_start
< PCPU_BITMAP_BLOCK_BITS
) {
353 *bit_off
= pcpu_block_off_to_off(i
,
354 block
->contig_hint_start
);
357 /* reset to satisfy the second predicate above */
360 *bits
= block
->right_free
;
361 *bit_off
= (i
+ 1) * PCPU_BITMAP_BLOCK_BITS
- block
->right_free
;
366 * pcpu_next_fit_region - finds fit areas for a given allocation request
367 * @chunk: chunk of interest
368 * @alloc_bits: size of allocation
369 * @align: alignment of area (max PAGE_SIZE)
370 * @bit_off: chunk offset
371 * @bits: size of free area
373 * Finds the next free region that is viable for use with a given size and
374 * alignment. This only returns if there is a valid area to be used for this
375 * allocation. block->first_free is returned if the allocation request fits
376 * within the block to see if the request can be fulfilled prior to the contig
379 static void pcpu_next_fit_region(struct pcpu_chunk
*chunk
, int alloc_bits
,
380 int align
, int *bit_off
, int *bits
)
382 int i
= pcpu_off_to_block_index(*bit_off
);
383 int block_off
= pcpu_off_to_block_off(*bit_off
);
384 struct pcpu_block_md
*block
;
387 for (block
= chunk
->md_blocks
+ i
; i
< pcpu_chunk_nr_blocks(chunk
);
389 /* handles contig area across blocks */
391 *bits
+= block
->left_free
;
392 if (*bits
>= alloc_bits
)
394 if (block
->left_free
== PCPU_BITMAP_BLOCK_BITS
)
398 /* check block->contig_hint */
399 *bits
= ALIGN(block
->contig_hint_start
, align
) -
400 block
->contig_hint_start
;
402 * This uses the block offset to determine if this has been
403 * checked in the prior iteration.
405 if (block
->contig_hint
&&
406 block
->contig_hint_start
>= block_off
&&
407 block
->contig_hint
>= *bits
+ alloc_bits
) {
408 *bits
+= alloc_bits
+ block
->contig_hint_start
-
410 *bit_off
= pcpu_block_off_to_off(i
, block
->first_free
);
413 /* reset to satisfy the second predicate above */
416 *bit_off
= ALIGN(PCPU_BITMAP_BLOCK_BITS
- block
->right_free
,
418 *bits
= PCPU_BITMAP_BLOCK_BITS
- *bit_off
;
419 *bit_off
= pcpu_block_off_to_off(i
, *bit_off
);
420 if (*bits
>= alloc_bits
)
424 /* no valid offsets were found - fail condition */
425 *bit_off
= pcpu_chunk_map_bits(chunk
);
429 * Metadata free area iterators. These perform aggregation of free areas
430 * based on the metadata blocks and return the offset @bit_off and size in
431 * bits of the free area @bits. pcpu_for_each_fit_region only returns when
432 * a fit is found for the allocation request.
434 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
435 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
436 (bit_off) < pcpu_chunk_map_bits((chunk)); \
437 (bit_off) += (bits) + 1, \
438 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
440 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
441 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
443 (bit_off) < pcpu_chunk_map_bits((chunk)); \
444 (bit_off) += (bits), \
445 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
449 * pcpu_mem_zalloc - allocate memory
450 * @size: bytes to allocate
451 * @gfp: allocation flags
453 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
454 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
455 * This is to facilitate passing through whitelisted flags. The
456 * returned memory is always zeroed.
459 * Does GFP_KERNEL allocation.
462 * Pointer to the allocated area on success, NULL on failure.
464 static void *pcpu_mem_zalloc(size_t size
, gfp_t gfp
)
466 if (WARN_ON_ONCE(!slab_is_available()))
469 if (size
<= PAGE_SIZE
)
470 return kzalloc(size
, gfp
| GFP_KERNEL
);
472 return __vmalloc(size
, gfp
| GFP_KERNEL
| __GFP_ZERO
,
477 * pcpu_mem_free - free memory
478 * @ptr: memory to free
480 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
482 static void pcpu_mem_free(void *ptr
)
488 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
489 * @chunk: chunk of interest
490 * @oslot: the previous slot it was on
492 * This function is called after an allocation or free changed @chunk.
493 * New slot according to the changed state is determined and @chunk is
494 * moved to the slot. Note that the reserved chunk is never put on
500 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
502 int nslot
= pcpu_chunk_slot(chunk
);
504 if (chunk
!= pcpu_reserved_chunk
&& oslot
!= nslot
) {
506 list_move(&chunk
->list
, &pcpu_slot
[nslot
]);
508 list_move_tail(&chunk
->list
, &pcpu_slot
[nslot
]);
513 * pcpu_cnt_pop_pages- counts populated backing pages in range
514 * @chunk: chunk of interest
515 * @bit_off: start offset
516 * @bits: size of area to check
518 * Calculates the number of populated pages in the region
519 * [page_start, page_end). This keeps track of how many empty populated
520 * pages are available and decide if async work should be scheduled.
523 * The nr of populated pages.
525 static inline int pcpu_cnt_pop_pages(struct pcpu_chunk
*chunk
, int bit_off
,
528 int page_start
= PFN_UP(bit_off
* PCPU_MIN_ALLOC_SIZE
);
529 int page_end
= PFN_DOWN((bit_off
+ bits
) * PCPU_MIN_ALLOC_SIZE
);
531 if (page_start
>= page_end
)
535 * bitmap_weight counts the number of bits set in a bitmap up to
536 * the specified number of bits. This is counting the populated
537 * pages up to page_end and then subtracting the populated pages
538 * up to page_start to count the populated pages in
539 * [page_start, page_end).
541 return bitmap_weight(chunk
->populated
, page_end
) -
542 bitmap_weight(chunk
->populated
, page_start
);
546 * pcpu_chunk_update - updates the chunk metadata given a free area
547 * @chunk: chunk of interest
548 * @bit_off: chunk offset
549 * @bits: size of free area
551 * This updates the chunk's contig hint and starting offset given a free area.
552 * Choose the best starting offset if the contig hint is equal.
554 static void pcpu_chunk_update(struct pcpu_chunk
*chunk
, int bit_off
, int bits
)
556 if (bits
> chunk
->contig_bits
) {
557 chunk
->contig_bits_start
= bit_off
;
558 chunk
->contig_bits
= bits
;
559 } else if (bits
== chunk
->contig_bits
&& chunk
->contig_bits_start
&&
561 __ffs(bit_off
) > __ffs(chunk
->contig_bits_start
))) {
562 /* use the start with the best alignment */
563 chunk
->contig_bits_start
= bit_off
;
568 * pcpu_chunk_refresh_hint - updates metadata about a chunk
569 * @chunk: chunk of interest
571 * Iterates over the metadata blocks to find the largest contig area.
572 * It also counts the populated pages and uses the delta to update the
577 * chunk->contig_bits_start
578 * nr_empty_pop_pages (chunk and global)
580 static void pcpu_chunk_refresh_hint(struct pcpu_chunk
*chunk
)
582 int bit_off
, bits
, nr_empty_pop_pages
;
585 chunk
->contig_bits
= 0;
587 bit_off
= chunk
->first_bit
;
588 bits
= nr_empty_pop_pages
= 0;
589 pcpu_for_each_md_free_region(chunk
, bit_off
, bits
) {
590 pcpu_chunk_update(chunk
, bit_off
, bits
);
592 nr_empty_pop_pages
+= pcpu_cnt_pop_pages(chunk
, bit_off
, bits
);
596 * Keep track of nr_empty_pop_pages.
598 * The chunk maintains the previous number of free pages it held,
599 * so the delta is used to update the global counter. The reserved
600 * chunk is not part of the free page count as they are populated
601 * at init and are special to serving reserved allocations.
603 if (chunk
!= pcpu_reserved_chunk
)
604 pcpu_nr_empty_pop_pages
+=
605 (nr_empty_pop_pages
- chunk
->nr_empty_pop_pages
);
607 chunk
->nr_empty_pop_pages
= nr_empty_pop_pages
;
611 * pcpu_block_update - updates a block given a free area
612 * @block: block of interest
613 * @start: start offset in block
614 * @end: end offset in block
616 * Updates a block given a known free area. The region [start, end) is
617 * expected to be the entirety of the free area within a block. Chooses
618 * the best starting offset if the contig hints are equal.
620 static void pcpu_block_update(struct pcpu_block_md
*block
, int start
, int end
)
622 int contig
= end
- start
;
624 block
->first_free
= min(block
->first_free
, start
);
626 block
->left_free
= contig
;
628 if (end
== PCPU_BITMAP_BLOCK_BITS
)
629 block
->right_free
= contig
;
631 if (contig
> block
->contig_hint
) {
632 block
->contig_hint_start
= start
;
633 block
->contig_hint
= contig
;
634 } else if (block
->contig_hint_start
&& contig
== block
->contig_hint
&&
635 (!start
|| __ffs(start
) > __ffs(block
->contig_hint_start
))) {
636 /* use the start with the best alignment */
637 block
->contig_hint_start
= start
;
642 * pcpu_block_refresh_hint
643 * @chunk: chunk of interest
644 * @index: index of the metadata block
646 * Scans over the block beginning at first_free and updates the block
647 * metadata accordingly.
649 static void pcpu_block_refresh_hint(struct pcpu_chunk
*chunk
, int index
)
651 struct pcpu_block_md
*block
= chunk
->md_blocks
+ index
;
652 unsigned long *alloc_map
= pcpu_index_alloc_map(chunk
, index
);
653 int rs
, re
; /* region start, region end */
656 block
->contig_hint
= 0;
657 block
->left_free
= block
->right_free
= 0;
659 /* iterate over free areas and update the contig hints */
660 pcpu_for_each_unpop_region(alloc_map
, rs
, re
, block
->first_free
,
661 PCPU_BITMAP_BLOCK_BITS
) {
662 pcpu_block_update(block
, rs
, re
);
667 * pcpu_block_update_hint_alloc - update hint on allocation path
668 * @chunk: chunk of interest
669 * @bit_off: chunk offset
670 * @bits: size of request
672 * Updates metadata for the allocation path. The metadata only has to be
673 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
674 * scans are required if the block's contig hint is broken.
676 static void pcpu_block_update_hint_alloc(struct pcpu_chunk
*chunk
, int bit_off
,
679 struct pcpu_block_md
*s_block
, *e_block
, *block
;
680 int s_index
, e_index
; /* block indexes of the freed allocation */
681 int s_off
, e_off
; /* block offsets of the freed allocation */
684 * Calculate per block offsets.
685 * The calculation uses an inclusive range, but the resulting offsets
686 * are [start, end). e_index always points to the last block in the
689 s_index
= pcpu_off_to_block_index(bit_off
);
690 e_index
= pcpu_off_to_block_index(bit_off
+ bits
- 1);
691 s_off
= pcpu_off_to_block_off(bit_off
);
692 e_off
= pcpu_off_to_block_off(bit_off
+ bits
- 1) + 1;
694 s_block
= chunk
->md_blocks
+ s_index
;
695 e_block
= chunk
->md_blocks
+ e_index
;
699 * block->first_free must be updated if the allocation takes its place.
700 * If the allocation breaks the contig_hint, a scan is required to
703 if (s_off
== s_block
->first_free
)
704 s_block
->first_free
= find_next_zero_bit(
705 pcpu_index_alloc_map(chunk
, s_index
),
706 PCPU_BITMAP_BLOCK_BITS
,
709 if (s_off
>= s_block
->contig_hint_start
&&
710 s_off
< s_block
->contig_hint_start
+ s_block
->contig_hint
) {
711 /* block contig hint is broken - scan to fix it */
712 pcpu_block_refresh_hint(chunk
, s_index
);
714 /* update left and right contig manually */
715 s_block
->left_free
= min(s_block
->left_free
, s_off
);
716 if (s_index
== e_index
)
717 s_block
->right_free
= min_t(int, s_block
->right_free
,
718 PCPU_BITMAP_BLOCK_BITS
- e_off
);
720 s_block
->right_free
= 0;
726 if (s_index
!= e_index
) {
728 * When the allocation is across blocks, the end is along
729 * the left part of the e_block.
731 e_block
->first_free
= find_next_zero_bit(
732 pcpu_index_alloc_map(chunk
, e_index
),
733 PCPU_BITMAP_BLOCK_BITS
, e_off
);
735 if (e_off
== PCPU_BITMAP_BLOCK_BITS
) {
736 /* reset the block */
739 if (e_off
> e_block
->contig_hint_start
) {
740 /* contig hint is broken - scan to fix it */
741 pcpu_block_refresh_hint(chunk
, e_index
);
743 e_block
->left_free
= 0;
744 e_block
->right_free
=
745 min_t(int, e_block
->right_free
,
746 PCPU_BITMAP_BLOCK_BITS
- e_off
);
750 /* update in-between md_blocks */
751 for (block
= s_block
+ 1; block
< e_block
; block
++) {
752 block
->contig_hint
= 0;
753 block
->left_free
= 0;
754 block
->right_free
= 0;
759 * The only time a full chunk scan is required is if the chunk
760 * contig hint is broken. Otherwise, it means a smaller space
761 * was used and therefore the chunk contig hint is still correct.
763 if (bit_off
>= chunk
->contig_bits_start
&&
764 bit_off
< chunk
->contig_bits_start
+ chunk
->contig_bits
)
765 pcpu_chunk_refresh_hint(chunk
);
769 * pcpu_block_update_hint_free - updates the block hints on the free path
770 * @chunk: chunk of interest
771 * @bit_off: chunk offset
772 * @bits: size of request
774 * Updates metadata for the allocation path. This avoids a blind block
775 * refresh by making use of the block contig hints. If this fails, it scans
776 * forward and backward to determine the extent of the free area. This is
777 * capped at the boundary of blocks.
779 * A chunk update is triggered if a page becomes free, a block becomes free,
780 * or the free spans across blocks. This tradeoff is to minimize iterating
781 * over the block metadata to update chunk->contig_bits. chunk->contig_bits
782 * may be off by up to a page, but it will never be more than the available
783 * space. If the contig hint is contained in one block, it will be accurate.
785 static void pcpu_block_update_hint_free(struct pcpu_chunk
*chunk
, int bit_off
,
788 struct pcpu_block_md
*s_block
, *e_block
, *block
;
789 int s_index
, e_index
; /* block indexes of the freed allocation */
790 int s_off
, e_off
; /* block offsets of the freed allocation */
791 int start
, end
; /* start and end of the whole free area */
794 * Calculate per block offsets.
795 * The calculation uses an inclusive range, but the resulting offsets
796 * are [start, end). e_index always points to the last block in the
799 s_index
= pcpu_off_to_block_index(bit_off
);
800 e_index
= pcpu_off_to_block_index(bit_off
+ bits
- 1);
801 s_off
= pcpu_off_to_block_off(bit_off
);
802 e_off
= pcpu_off_to_block_off(bit_off
+ bits
- 1) + 1;
804 s_block
= chunk
->md_blocks
+ s_index
;
805 e_block
= chunk
->md_blocks
+ e_index
;
808 * Check if the freed area aligns with the block->contig_hint.
809 * If it does, then the scan to find the beginning/end of the
810 * larger free area can be avoided.
812 * start and end refer to beginning and end of the free area
813 * within each their respective blocks. This is not necessarily
814 * the entire free area as it may span blocks past the beginning
815 * or end of the block.
818 if (s_off
== s_block
->contig_hint
+ s_block
->contig_hint_start
) {
819 start
= s_block
->contig_hint_start
;
822 * Scan backwards to find the extent of the free area.
823 * find_last_bit returns the starting bit, so if the start bit
824 * is returned, that means there was no last bit and the
825 * remainder of the chunk is free.
827 int l_bit
= find_last_bit(pcpu_index_alloc_map(chunk
, s_index
),
829 start
= (start
== l_bit
) ? 0 : l_bit
+ 1;
833 if (e_off
== e_block
->contig_hint_start
)
834 end
= e_block
->contig_hint_start
+ e_block
->contig_hint
;
836 end
= find_next_bit(pcpu_index_alloc_map(chunk
, e_index
),
837 PCPU_BITMAP_BLOCK_BITS
, end
);
840 e_off
= (s_index
== e_index
) ? end
: PCPU_BITMAP_BLOCK_BITS
;
841 pcpu_block_update(s_block
, start
, e_off
);
843 /* freeing in the same block */
844 if (s_index
!= e_index
) {
846 pcpu_block_update(e_block
, 0, end
);
848 /* reset md_blocks in the middle */
849 for (block
= s_block
+ 1; block
< e_block
; block
++) {
850 block
->first_free
= 0;
851 block
->contig_hint_start
= 0;
852 block
->contig_hint
= PCPU_BITMAP_BLOCK_BITS
;
853 block
->left_free
= PCPU_BITMAP_BLOCK_BITS
;
854 block
->right_free
= PCPU_BITMAP_BLOCK_BITS
;
859 * Refresh chunk metadata when the free makes a page free, a block
860 * free, or spans across blocks. The contig hint may be off by up to
861 * a page, but if the hint is contained in a block, it will be accurate
862 * with the else condition below.
864 if ((ALIGN_DOWN(end
, min(PCPU_BITS_PER_PAGE
, PCPU_BITMAP_BLOCK_BITS
)) >
865 ALIGN(start
, min(PCPU_BITS_PER_PAGE
, PCPU_BITMAP_BLOCK_BITS
))) ||
867 pcpu_chunk_refresh_hint(chunk
);
869 pcpu_chunk_update(chunk
, pcpu_block_off_to_off(s_index
, start
),
870 s_block
->contig_hint
);
874 * pcpu_is_populated - determines if the region is populated
875 * @chunk: chunk of interest
876 * @bit_off: chunk offset
877 * @bits: size of area
878 * @next_off: return value for the next offset to start searching
880 * For atomic allocations, check if the backing pages are populated.
883 * Bool if the backing pages are populated.
884 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
886 static bool pcpu_is_populated(struct pcpu_chunk
*chunk
, int bit_off
, int bits
,
889 int page_start
, page_end
, rs
, re
;
891 page_start
= PFN_DOWN(bit_off
* PCPU_MIN_ALLOC_SIZE
);
892 page_end
= PFN_UP((bit_off
+ bits
) * PCPU_MIN_ALLOC_SIZE
);
895 pcpu_next_unpop(chunk
->populated
, &rs
, &re
, page_end
);
899 *next_off
= re
* PAGE_SIZE
/ PCPU_MIN_ALLOC_SIZE
;
904 * pcpu_find_block_fit - finds the block index to start searching
905 * @chunk: chunk of interest
906 * @alloc_bits: size of request in allocation units
907 * @align: alignment of area (max PAGE_SIZE bytes)
908 * @pop_only: use populated regions only
910 * Given a chunk and an allocation spec, find the offset to begin searching
911 * for a free region. This iterates over the bitmap metadata blocks to
912 * find an offset that will be guaranteed to fit the requirements. It is
913 * not quite first fit as if the allocation does not fit in the contig hint
914 * of a block or chunk, it is skipped. This errs on the side of caution
915 * to prevent excess iteration. Poor alignment can cause the allocator to
916 * skip over blocks and chunks that have valid free areas.
919 * The offset in the bitmap to begin searching.
920 * -1 if no offset is found.
922 static int pcpu_find_block_fit(struct pcpu_chunk
*chunk
, int alloc_bits
,
923 size_t align
, bool pop_only
)
925 int bit_off
, bits
, next_off
;
928 * Check to see if the allocation can fit in the chunk's contig hint.
929 * This is an optimization to prevent scanning by assuming if it
930 * cannot fit in the global hint, there is memory pressure and creating
931 * a new chunk would happen soon.
933 bit_off
= ALIGN(chunk
->contig_bits_start
, align
) -
934 chunk
->contig_bits_start
;
935 if (bit_off
+ alloc_bits
> chunk
->contig_bits
)
938 bit_off
= chunk
->first_bit
;
940 pcpu_for_each_fit_region(chunk
, alloc_bits
, align
, bit_off
, bits
) {
941 if (!pop_only
|| pcpu_is_populated(chunk
, bit_off
, bits
,
949 if (bit_off
== pcpu_chunk_map_bits(chunk
))
956 * pcpu_alloc_area - allocates an area from a pcpu_chunk
957 * @chunk: chunk of interest
958 * @alloc_bits: size of request in allocation units
959 * @align: alignment of area (max PAGE_SIZE)
960 * @start: bit_off to start searching
962 * This function takes in a @start offset to begin searching to fit an
963 * allocation of @alloc_bits with alignment @align. It needs to scan
964 * the allocation map because if it fits within the block's contig hint,
965 * @start will be block->first_free. This is an attempt to fill the
966 * allocation prior to breaking the contig hint. The allocation and
967 * boundary maps are updated accordingly if it confirms a valid
971 * Allocated addr offset in @chunk on success.
972 * -1 if no matching area is found.
974 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int alloc_bits
,
975 size_t align
, int start
)
977 size_t align_mask
= (align
) ? (align
- 1) : 0;
978 int bit_off
, end
, oslot
;
980 lockdep_assert_held(&pcpu_lock
);
982 oslot
= pcpu_chunk_slot(chunk
);
985 * Search to find a fit.
987 end
= start
+ alloc_bits
+ PCPU_BITMAP_BLOCK_BITS
;
988 bit_off
= bitmap_find_next_zero_area(chunk
->alloc_map
, end
, start
,
989 alloc_bits
, align_mask
);
993 /* update alloc map */
994 bitmap_set(chunk
->alloc_map
, bit_off
, alloc_bits
);
996 /* update boundary map */
997 set_bit(bit_off
, chunk
->bound_map
);
998 bitmap_clear(chunk
->bound_map
, bit_off
+ 1, alloc_bits
- 1);
999 set_bit(bit_off
+ alloc_bits
, chunk
->bound_map
);
1001 chunk
->free_bytes
-= alloc_bits
* PCPU_MIN_ALLOC_SIZE
;
1003 /* update first free bit */
1004 if (bit_off
== chunk
->first_bit
)
1005 chunk
->first_bit
= find_next_zero_bit(
1007 pcpu_chunk_map_bits(chunk
),
1008 bit_off
+ alloc_bits
);
1010 pcpu_block_update_hint_alloc(chunk
, bit_off
, alloc_bits
);
1012 pcpu_chunk_relocate(chunk
, oslot
);
1014 return bit_off
* PCPU_MIN_ALLOC_SIZE
;
1018 * pcpu_free_area - frees the corresponding offset
1019 * @chunk: chunk of interest
1020 * @off: addr offset into chunk
1022 * This function determines the size of an allocation to free using
1023 * the boundary bitmap and clears the allocation map.
1025 static void pcpu_free_area(struct pcpu_chunk
*chunk
, int off
)
1027 int bit_off
, bits
, end
, oslot
;
1029 lockdep_assert_held(&pcpu_lock
);
1030 pcpu_stats_area_dealloc(chunk
);
1032 oslot
= pcpu_chunk_slot(chunk
);
1034 bit_off
= off
/ PCPU_MIN_ALLOC_SIZE
;
1036 /* find end index */
1037 end
= find_next_bit(chunk
->bound_map
, pcpu_chunk_map_bits(chunk
),
1039 bits
= end
- bit_off
;
1040 bitmap_clear(chunk
->alloc_map
, bit_off
, bits
);
1042 /* update metadata */
1043 chunk
->free_bytes
+= bits
* PCPU_MIN_ALLOC_SIZE
;
1045 /* update first free bit */
1046 chunk
->first_bit
= min(chunk
->first_bit
, bit_off
);
1048 pcpu_block_update_hint_free(chunk
, bit_off
, bits
);
1050 pcpu_chunk_relocate(chunk
, oslot
);
1053 static void pcpu_init_md_blocks(struct pcpu_chunk
*chunk
)
1055 struct pcpu_block_md
*md_block
;
1057 for (md_block
= chunk
->md_blocks
;
1058 md_block
!= chunk
->md_blocks
+ pcpu_chunk_nr_blocks(chunk
);
1060 md_block
->contig_hint
= PCPU_BITMAP_BLOCK_BITS
;
1061 md_block
->left_free
= PCPU_BITMAP_BLOCK_BITS
;
1062 md_block
->right_free
= PCPU_BITMAP_BLOCK_BITS
;
1067 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1068 * @tmp_addr: the start of the region served
1069 * @map_size: size of the region served
1071 * This is responsible for creating the chunks that serve the first chunk. The
1072 * base_addr is page aligned down of @tmp_addr while the region end is page
1073 * aligned up. Offsets are kept track of to determine the region served. All
1074 * this is done to appease the bitmap allocator in avoiding partial blocks.
1077 * Chunk serving the region at @tmp_addr of @map_size.
1079 static struct pcpu_chunk
* __init
pcpu_alloc_first_chunk(unsigned long tmp_addr
,
1082 struct pcpu_chunk
*chunk
;
1083 unsigned long aligned_addr
, lcm_align
;
1084 int start_offset
, offset_bits
, region_size
, region_bits
;
1086 /* region calculations */
1087 aligned_addr
= tmp_addr
& PAGE_MASK
;
1089 start_offset
= tmp_addr
- aligned_addr
;
1092 * Align the end of the region with the LCM of PAGE_SIZE and
1093 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1096 lcm_align
= lcm(PAGE_SIZE
, PCPU_BITMAP_BLOCK_SIZE
);
1097 region_size
= ALIGN(start_offset
+ map_size
, lcm_align
);
1099 /* allocate chunk */
1100 chunk
= memblock_virt_alloc(sizeof(struct pcpu_chunk
) +
1101 BITS_TO_LONGS(region_size
>> PAGE_SHIFT
),
1104 INIT_LIST_HEAD(&chunk
->list
);
1106 chunk
->base_addr
= (void *)aligned_addr
;
1107 chunk
->start_offset
= start_offset
;
1108 chunk
->end_offset
= region_size
- chunk
->start_offset
- map_size
;
1110 chunk
->nr_pages
= region_size
>> PAGE_SHIFT
;
1111 region_bits
= pcpu_chunk_map_bits(chunk
);
1113 chunk
->alloc_map
= memblock_virt_alloc(BITS_TO_LONGS(region_bits
) *
1114 sizeof(chunk
->alloc_map
[0]), 0);
1115 chunk
->bound_map
= memblock_virt_alloc(BITS_TO_LONGS(region_bits
+ 1) *
1116 sizeof(chunk
->bound_map
[0]), 0);
1117 chunk
->md_blocks
= memblock_virt_alloc(pcpu_chunk_nr_blocks(chunk
) *
1118 sizeof(chunk
->md_blocks
[0]), 0);
1119 pcpu_init_md_blocks(chunk
);
1121 /* manage populated page bitmap */
1122 chunk
->immutable
= true;
1123 bitmap_fill(chunk
->populated
, chunk
->nr_pages
);
1124 chunk
->nr_populated
= chunk
->nr_pages
;
1125 chunk
->nr_empty_pop_pages
=
1126 pcpu_cnt_pop_pages(chunk
, start_offset
/ PCPU_MIN_ALLOC_SIZE
,
1127 map_size
/ PCPU_MIN_ALLOC_SIZE
);
1129 chunk
->contig_bits
= map_size
/ PCPU_MIN_ALLOC_SIZE
;
1130 chunk
->free_bytes
= map_size
;
1132 if (chunk
->start_offset
) {
1133 /* hide the beginning of the bitmap */
1134 offset_bits
= chunk
->start_offset
/ PCPU_MIN_ALLOC_SIZE
;
1135 bitmap_set(chunk
->alloc_map
, 0, offset_bits
);
1136 set_bit(0, chunk
->bound_map
);
1137 set_bit(offset_bits
, chunk
->bound_map
);
1139 chunk
->first_bit
= offset_bits
;
1141 pcpu_block_update_hint_alloc(chunk
, 0, offset_bits
);
1144 if (chunk
->end_offset
) {
1145 /* hide the end of the bitmap */
1146 offset_bits
= chunk
->end_offset
/ PCPU_MIN_ALLOC_SIZE
;
1147 bitmap_set(chunk
->alloc_map
,
1148 pcpu_chunk_map_bits(chunk
) - offset_bits
,
1150 set_bit((start_offset
+ map_size
) / PCPU_MIN_ALLOC_SIZE
,
1152 set_bit(region_bits
, chunk
->bound_map
);
1154 pcpu_block_update_hint_alloc(chunk
, pcpu_chunk_map_bits(chunk
)
1155 - offset_bits
, offset_bits
);
1161 static struct pcpu_chunk
*pcpu_alloc_chunk(gfp_t gfp
)
1163 struct pcpu_chunk
*chunk
;
1166 chunk
= pcpu_mem_zalloc(pcpu_chunk_struct_size
, gfp
);
1170 INIT_LIST_HEAD(&chunk
->list
);
1171 chunk
->nr_pages
= pcpu_unit_pages
;
1172 region_bits
= pcpu_chunk_map_bits(chunk
);
1174 chunk
->alloc_map
= pcpu_mem_zalloc(BITS_TO_LONGS(region_bits
) *
1175 sizeof(chunk
->alloc_map
[0]), gfp
);
1176 if (!chunk
->alloc_map
)
1177 goto alloc_map_fail
;
1179 chunk
->bound_map
= pcpu_mem_zalloc(BITS_TO_LONGS(region_bits
+ 1) *
1180 sizeof(chunk
->bound_map
[0]), gfp
);
1181 if (!chunk
->bound_map
)
1182 goto bound_map_fail
;
1184 chunk
->md_blocks
= pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk
) *
1185 sizeof(chunk
->md_blocks
[0]), gfp
);
1186 if (!chunk
->md_blocks
)
1187 goto md_blocks_fail
;
1189 pcpu_init_md_blocks(chunk
);
1192 chunk
->contig_bits
= region_bits
;
1193 chunk
->free_bytes
= chunk
->nr_pages
* PAGE_SIZE
;
1198 pcpu_mem_free(chunk
->bound_map
);
1200 pcpu_mem_free(chunk
->alloc_map
);
1202 pcpu_mem_free(chunk
);
1207 static void pcpu_free_chunk(struct pcpu_chunk
*chunk
)
1211 pcpu_mem_free(chunk
->md_blocks
);
1212 pcpu_mem_free(chunk
->bound_map
);
1213 pcpu_mem_free(chunk
->alloc_map
);
1214 pcpu_mem_free(chunk
);
1218 * pcpu_chunk_populated - post-population bookkeeping
1219 * @chunk: pcpu_chunk which got populated
1220 * @page_start: the start page
1221 * @page_end: the end page
1222 * @for_alloc: if this is to populate for allocation
1224 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1225 * the bookkeeping information accordingly. Must be called after each
1226 * successful population.
1228 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1229 * is to serve an allocation in that area.
1231 static void pcpu_chunk_populated(struct pcpu_chunk
*chunk
, int page_start
,
1232 int page_end
, bool for_alloc
)
1234 int nr
= page_end
- page_start
;
1236 lockdep_assert_held(&pcpu_lock
);
1238 bitmap_set(chunk
->populated
, page_start
, nr
);
1239 chunk
->nr_populated
+= nr
;
1242 chunk
->nr_empty_pop_pages
+= nr
;
1243 pcpu_nr_empty_pop_pages
+= nr
;
1248 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1249 * @chunk: pcpu_chunk which got depopulated
1250 * @page_start: the start page
1251 * @page_end: the end page
1253 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1254 * Update the bookkeeping information accordingly. Must be called after
1255 * each successful depopulation.
1257 static void pcpu_chunk_depopulated(struct pcpu_chunk
*chunk
,
1258 int page_start
, int page_end
)
1260 int nr
= page_end
- page_start
;
1262 lockdep_assert_held(&pcpu_lock
);
1264 bitmap_clear(chunk
->populated
, page_start
, nr
);
1265 chunk
->nr_populated
-= nr
;
1266 chunk
->nr_empty_pop_pages
-= nr
;
1267 pcpu_nr_empty_pop_pages
-= nr
;
1271 * Chunk management implementation.
1273 * To allow different implementations, chunk alloc/free and
1274 * [de]population are implemented in a separate file which is pulled
1275 * into this file and compiled together. The following functions
1276 * should be implemented.
1278 * pcpu_populate_chunk - populate the specified range of a chunk
1279 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1280 * pcpu_create_chunk - create a new chunk
1281 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1282 * pcpu_addr_to_page - translate address to physical address
1283 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1285 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
,
1287 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
);
1288 static struct pcpu_chunk
*pcpu_create_chunk(gfp_t gfp
);
1289 static void pcpu_destroy_chunk(struct pcpu_chunk
*chunk
);
1290 static struct page
*pcpu_addr_to_page(void *addr
);
1291 static int __init
pcpu_verify_alloc_info(const struct pcpu_alloc_info
*ai
);
1293 #ifdef CONFIG_NEED_PER_CPU_KM
1294 #include "percpu-km.c"
1296 #include "percpu-vm.c"
1300 * pcpu_chunk_addr_search - determine chunk containing specified address
1301 * @addr: address for which the chunk needs to be determined.
1303 * This is an internal function that handles all but static allocations.
1304 * Static percpu address values should never be passed into the allocator.
1307 * The address of the found chunk.
1309 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
1311 /* is it in the dynamic region (first chunk)? */
1312 if (pcpu_addr_in_chunk(pcpu_first_chunk
, addr
))
1313 return pcpu_first_chunk
;
1315 /* is it in the reserved region? */
1316 if (pcpu_addr_in_chunk(pcpu_reserved_chunk
, addr
))
1317 return pcpu_reserved_chunk
;
1320 * The address is relative to unit0 which might be unused and
1321 * thus unmapped. Offset the address to the unit space of the
1322 * current processor before looking it up in the vmalloc
1323 * space. Note that any possible cpu id can be used here, so
1324 * there's no need to worry about preemption or cpu hotplug.
1326 addr
+= pcpu_unit_offsets
[raw_smp_processor_id()];
1327 return pcpu_get_page_chunk(pcpu_addr_to_page(addr
));
1331 * pcpu_alloc - the percpu allocator
1332 * @size: size of area to allocate in bytes
1333 * @align: alignment of area (max PAGE_SIZE)
1334 * @reserved: allocate from the reserved chunk if available
1335 * @gfp: allocation flags
1337 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1338 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1339 * then no warning will be triggered on invalid or failed allocation
1343 * Percpu pointer to the allocated area on success, NULL on failure.
1345 static void __percpu
*pcpu_alloc(size_t size
, size_t align
, bool reserved
,
1348 bool is_atomic
= (gfp
& GFP_KERNEL
) != GFP_KERNEL
;
1349 bool do_warn
= !(gfp
& __GFP_NOWARN
);
1350 static int warn_limit
= 10;
1351 struct pcpu_chunk
*chunk
;
1353 int slot
, off
, cpu
, ret
;
1354 unsigned long flags
;
1356 size_t bits
, bit_align
;
1359 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1360 * therefore alignment must be a minimum of that many bytes.
1361 * An allocation may have internal fragmentation from rounding up
1362 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1364 if (unlikely(align
< PCPU_MIN_ALLOC_SIZE
))
1365 align
= PCPU_MIN_ALLOC_SIZE
;
1367 size
= ALIGN(size
, PCPU_MIN_ALLOC_SIZE
);
1368 bits
= size
>> PCPU_MIN_ALLOC_SHIFT
;
1369 bit_align
= align
>> PCPU_MIN_ALLOC_SHIFT
;
1371 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
||
1372 !is_power_of_2(align
))) {
1373 WARN(do_warn
, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1379 mutex_lock(&pcpu_alloc_mutex
);
1381 spin_lock_irqsave(&pcpu_lock
, flags
);
1383 /* serve reserved allocations from the reserved chunk if available */
1384 if (reserved
&& pcpu_reserved_chunk
) {
1385 chunk
= pcpu_reserved_chunk
;
1387 off
= pcpu_find_block_fit(chunk
, bits
, bit_align
, is_atomic
);
1389 err
= "alloc from reserved chunk failed";
1393 off
= pcpu_alloc_area(chunk
, bits
, bit_align
, off
);
1397 err
= "alloc from reserved chunk failed";
1402 /* search through normal chunks */
1403 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
1404 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
1405 off
= pcpu_find_block_fit(chunk
, bits
, bit_align
,
1410 off
= pcpu_alloc_area(chunk
, bits
, bit_align
, off
);
1417 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1420 * No space left. Create a new chunk. We don't want multiple
1421 * tasks to create chunks simultaneously. Serialize and create iff
1422 * there's still no empty chunk after grabbing the mutex.
1425 err
= "atomic alloc failed, no space left";
1429 if (list_empty(&pcpu_slot
[pcpu_nr_slots
- 1])) {
1430 chunk
= pcpu_create_chunk(0);
1432 err
= "failed to allocate new chunk";
1436 spin_lock_irqsave(&pcpu_lock
, flags
);
1437 pcpu_chunk_relocate(chunk
, -1);
1439 spin_lock_irqsave(&pcpu_lock
, flags
);
1445 pcpu_stats_area_alloc(chunk
, size
);
1446 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1448 /* populate if not all pages are already there */
1450 int page_start
, page_end
, rs
, re
;
1452 page_start
= PFN_DOWN(off
);
1453 page_end
= PFN_UP(off
+ size
);
1455 pcpu_for_each_unpop_region(chunk
->populated
, rs
, re
,
1456 page_start
, page_end
) {
1457 WARN_ON(chunk
->immutable
);
1459 ret
= pcpu_populate_chunk(chunk
, rs
, re
, 0);
1461 spin_lock_irqsave(&pcpu_lock
, flags
);
1463 pcpu_free_area(chunk
, off
);
1464 err
= "failed to populate";
1467 pcpu_chunk_populated(chunk
, rs
, re
, true);
1468 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1471 mutex_unlock(&pcpu_alloc_mutex
);
1474 if (pcpu_nr_empty_pop_pages
< PCPU_EMPTY_POP_PAGES_LOW
)
1475 pcpu_schedule_balance_work();
1477 /* clear the areas and return address relative to base address */
1478 for_each_possible_cpu(cpu
)
1479 memset((void *)pcpu_chunk_addr(chunk
, cpu
, 0) + off
, 0, size
);
1481 ptr
= __addr_to_pcpu_ptr(chunk
->base_addr
+ off
);
1482 kmemleak_alloc_percpu(ptr
, size
, gfp
);
1484 trace_percpu_alloc_percpu(reserved
, is_atomic
, size
, align
,
1485 chunk
->base_addr
, off
, ptr
);
1490 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1492 trace_percpu_alloc_percpu_fail(reserved
, is_atomic
, size
, align
);
1494 if (!is_atomic
&& do_warn
&& warn_limit
) {
1495 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1496 size
, align
, is_atomic
, err
);
1499 pr_info("limit reached, disable warning\n");
1502 /* see the flag handling in pcpu_blance_workfn() */
1503 pcpu_atomic_alloc_failed
= true;
1504 pcpu_schedule_balance_work();
1506 mutex_unlock(&pcpu_alloc_mutex
);
1512 * __alloc_percpu_gfp - allocate dynamic percpu area
1513 * @size: size of area to allocate in bytes
1514 * @align: alignment of area (max PAGE_SIZE)
1515 * @gfp: allocation flags
1517 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1518 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1519 * be called from any context but is a lot more likely to fail. If @gfp
1520 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1521 * allocation requests.
1524 * Percpu pointer to the allocated area on success, NULL on failure.
1526 void __percpu
*__alloc_percpu_gfp(size_t size
, size_t align
, gfp_t gfp
)
1528 return pcpu_alloc(size
, align
, false, gfp
);
1530 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp
);
1533 * __alloc_percpu - allocate dynamic percpu area
1534 * @size: size of area to allocate in bytes
1535 * @align: alignment of area (max PAGE_SIZE)
1537 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1539 void __percpu
*__alloc_percpu(size_t size
, size_t align
)
1541 return pcpu_alloc(size
, align
, false, GFP_KERNEL
);
1543 EXPORT_SYMBOL_GPL(__alloc_percpu
);
1546 * __alloc_reserved_percpu - allocate reserved percpu area
1547 * @size: size of area to allocate in bytes
1548 * @align: alignment of area (max PAGE_SIZE)
1550 * Allocate zero-filled percpu area of @size bytes aligned at @align
1551 * from reserved percpu area if arch has set it up; otherwise,
1552 * allocation is served from the same dynamic area. Might sleep.
1553 * Might trigger writeouts.
1556 * Does GFP_KERNEL allocation.
1559 * Percpu pointer to the allocated area on success, NULL on failure.
1561 void __percpu
*__alloc_reserved_percpu(size_t size
, size_t align
)
1563 return pcpu_alloc(size
, align
, true, GFP_KERNEL
);
1567 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1570 * Reclaim all fully free chunks except for the first one. This is also
1571 * responsible for maintaining the pool of empty populated pages. However,
1572 * it is possible that this is called when physical memory is scarce causing
1573 * OOM killer to be triggered. We should avoid doing so until an actual
1574 * allocation causes the failure as it is possible that requests can be
1575 * serviced from already backed regions.
1577 static void pcpu_balance_workfn(struct work_struct
*work
)
1579 /* gfp flags passed to underlying allocators */
1580 const gfp_t gfp
= __GFP_NORETRY
| __GFP_NOWARN
;
1582 struct list_head
*free_head
= &pcpu_slot
[pcpu_nr_slots
- 1];
1583 struct pcpu_chunk
*chunk
, *next
;
1584 int slot
, nr_to_pop
, ret
;
1587 * There's no reason to keep around multiple unused chunks and VM
1588 * areas can be scarce. Destroy all free chunks except for one.
1590 mutex_lock(&pcpu_alloc_mutex
);
1591 spin_lock_irq(&pcpu_lock
);
1593 list_for_each_entry_safe(chunk
, next
, free_head
, list
) {
1594 WARN_ON(chunk
->immutable
);
1596 /* spare the first one */
1597 if (chunk
== list_first_entry(free_head
, struct pcpu_chunk
, list
))
1600 list_move(&chunk
->list
, &to_free
);
1603 spin_unlock_irq(&pcpu_lock
);
1605 list_for_each_entry_safe(chunk
, next
, &to_free
, list
) {
1608 pcpu_for_each_pop_region(chunk
->populated
, rs
, re
, 0,
1610 pcpu_depopulate_chunk(chunk
, rs
, re
);
1611 spin_lock_irq(&pcpu_lock
);
1612 pcpu_chunk_depopulated(chunk
, rs
, re
);
1613 spin_unlock_irq(&pcpu_lock
);
1615 pcpu_destroy_chunk(chunk
);
1619 * Ensure there are certain number of free populated pages for
1620 * atomic allocs. Fill up from the most packed so that atomic
1621 * allocs don't increase fragmentation. If atomic allocation
1622 * failed previously, always populate the maximum amount. This
1623 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1624 * failing indefinitely; however, large atomic allocs are not
1625 * something we support properly and can be highly unreliable and
1629 if (pcpu_atomic_alloc_failed
) {
1630 nr_to_pop
= PCPU_EMPTY_POP_PAGES_HIGH
;
1631 /* best effort anyway, don't worry about synchronization */
1632 pcpu_atomic_alloc_failed
= false;
1634 nr_to_pop
= clamp(PCPU_EMPTY_POP_PAGES_HIGH
-
1635 pcpu_nr_empty_pop_pages
,
1636 0, PCPU_EMPTY_POP_PAGES_HIGH
);
1639 for (slot
= pcpu_size_to_slot(PAGE_SIZE
); slot
< pcpu_nr_slots
; slot
++) {
1640 int nr_unpop
= 0, rs
, re
;
1645 spin_lock_irq(&pcpu_lock
);
1646 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
1647 nr_unpop
= chunk
->nr_pages
- chunk
->nr_populated
;
1651 spin_unlock_irq(&pcpu_lock
);
1656 /* @chunk can't go away while pcpu_alloc_mutex is held */
1657 pcpu_for_each_unpop_region(chunk
->populated
, rs
, re
, 0,
1659 int nr
= min(re
- rs
, nr_to_pop
);
1661 ret
= pcpu_populate_chunk(chunk
, rs
, rs
+ nr
, gfp
);
1664 spin_lock_irq(&pcpu_lock
);
1665 pcpu_chunk_populated(chunk
, rs
, rs
+ nr
, false);
1666 spin_unlock_irq(&pcpu_lock
);
1677 /* ran out of chunks to populate, create a new one and retry */
1678 chunk
= pcpu_create_chunk(gfp
);
1680 spin_lock_irq(&pcpu_lock
);
1681 pcpu_chunk_relocate(chunk
, -1);
1682 spin_unlock_irq(&pcpu_lock
);
1687 mutex_unlock(&pcpu_alloc_mutex
);
1691 * free_percpu - free percpu area
1692 * @ptr: pointer to area to free
1694 * Free percpu area @ptr.
1697 * Can be called from atomic context.
1699 void free_percpu(void __percpu
*ptr
)
1702 struct pcpu_chunk
*chunk
;
1703 unsigned long flags
;
1709 kmemleak_free_percpu(ptr
);
1711 addr
= __pcpu_ptr_to_addr(ptr
);
1713 spin_lock_irqsave(&pcpu_lock
, flags
);
1715 chunk
= pcpu_chunk_addr_search(addr
);
1716 off
= addr
- chunk
->base_addr
;
1718 pcpu_free_area(chunk
, off
);
1720 /* if there are more than one fully free chunks, wake up grim reaper */
1721 if (chunk
->free_bytes
== pcpu_unit_size
) {
1722 struct pcpu_chunk
*pos
;
1724 list_for_each_entry(pos
, &pcpu_slot
[pcpu_nr_slots
- 1], list
)
1726 pcpu_schedule_balance_work();
1731 trace_percpu_free_percpu(chunk
->base_addr
, off
, ptr
);
1733 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1735 EXPORT_SYMBOL_GPL(free_percpu
);
1737 bool __is_kernel_percpu_address(unsigned long addr
, unsigned long *can_addr
)
1740 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
1741 void __percpu
*base
= __addr_to_pcpu_ptr(pcpu_base_addr
);
1744 for_each_possible_cpu(cpu
) {
1745 void *start
= per_cpu_ptr(base
, cpu
);
1746 void *va
= (void *)addr
;
1748 if (va
>= start
&& va
< start
+ static_size
) {
1750 *can_addr
= (unsigned long) (va
- start
);
1751 *can_addr
+= (unsigned long)
1752 per_cpu_ptr(base
, get_boot_cpu_id());
1758 /* on UP, can't distinguish from other static vars, always false */
1763 * is_kernel_percpu_address - test whether address is from static percpu area
1764 * @addr: address to test
1766 * Test whether @addr belongs to in-kernel static percpu area. Module
1767 * static percpu areas are not considered. For those, use
1768 * is_module_percpu_address().
1771 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1773 bool is_kernel_percpu_address(unsigned long addr
)
1775 return __is_kernel_percpu_address(addr
, NULL
);
1779 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1780 * @addr: the address to be converted to physical address
1782 * Given @addr which is dereferenceable address obtained via one of
1783 * percpu access macros, this function translates it into its physical
1784 * address. The caller is responsible for ensuring @addr stays valid
1785 * until this function finishes.
1787 * percpu allocator has special setup for the first chunk, which currently
1788 * supports either embedding in linear address space or vmalloc mapping,
1789 * and, from the second one, the backing allocator (currently either vm or
1790 * km) provides translation.
1792 * The addr can be translated simply without checking if it falls into the
1793 * first chunk. But the current code reflects better how percpu allocator
1794 * actually works, and the verification can discover both bugs in percpu
1795 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1799 * The physical address for @addr.
1801 phys_addr_t
per_cpu_ptr_to_phys(void *addr
)
1803 void __percpu
*base
= __addr_to_pcpu_ptr(pcpu_base_addr
);
1804 bool in_first_chunk
= false;
1805 unsigned long first_low
, first_high
;
1809 * The following test on unit_low/high isn't strictly
1810 * necessary but will speed up lookups of addresses which
1811 * aren't in the first chunk.
1813 * The address check is against full chunk sizes. pcpu_base_addr
1814 * points to the beginning of the first chunk including the
1815 * static region. Assumes good intent as the first chunk may
1816 * not be full (ie. < pcpu_unit_pages in size).
1818 first_low
= (unsigned long)pcpu_base_addr
+
1819 pcpu_unit_page_offset(pcpu_low_unit_cpu
, 0);
1820 first_high
= (unsigned long)pcpu_base_addr
+
1821 pcpu_unit_page_offset(pcpu_high_unit_cpu
, pcpu_unit_pages
);
1822 if ((unsigned long)addr
>= first_low
&&
1823 (unsigned long)addr
< first_high
) {
1824 for_each_possible_cpu(cpu
) {
1825 void *start
= per_cpu_ptr(base
, cpu
);
1827 if (addr
>= start
&& addr
< start
+ pcpu_unit_size
) {
1828 in_first_chunk
= true;
1834 if (in_first_chunk
) {
1835 if (!is_vmalloc_addr(addr
))
1838 return page_to_phys(vmalloc_to_page(addr
)) +
1839 offset_in_page(addr
);
1841 return page_to_phys(pcpu_addr_to_page(addr
)) +
1842 offset_in_page(addr
);
1846 * pcpu_alloc_alloc_info - allocate percpu allocation info
1847 * @nr_groups: the number of groups
1848 * @nr_units: the number of units
1850 * Allocate ai which is large enough for @nr_groups groups containing
1851 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1852 * cpu_map array which is long enough for @nr_units and filled with
1853 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1854 * pointer of other groups.
1857 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1860 struct pcpu_alloc_info
* __init
pcpu_alloc_alloc_info(int nr_groups
,
1863 struct pcpu_alloc_info
*ai
;
1864 size_t base_size
, ai_size
;
1868 base_size
= ALIGN(sizeof(*ai
) + nr_groups
* sizeof(ai
->groups
[0]),
1869 __alignof__(ai
->groups
[0].cpu_map
[0]));
1870 ai_size
= base_size
+ nr_units
* sizeof(ai
->groups
[0].cpu_map
[0]);
1872 ptr
= memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size
), 0);
1878 ai
->groups
[0].cpu_map
= ptr
;
1880 for (unit
= 0; unit
< nr_units
; unit
++)
1881 ai
->groups
[0].cpu_map
[unit
] = NR_CPUS
;
1883 ai
->nr_groups
= nr_groups
;
1884 ai
->__ai_size
= PFN_ALIGN(ai_size
);
1890 * pcpu_free_alloc_info - free percpu allocation info
1891 * @ai: pcpu_alloc_info to free
1893 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1895 void __init
pcpu_free_alloc_info(struct pcpu_alloc_info
*ai
)
1897 memblock_free_early(__pa(ai
), ai
->__ai_size
);
1901 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1903 * @ai: allocation info to dump
1905 * Print out information about @ai using loglevel @lvl.
1907 static void pcpu_dump_alloc_info(const char *lvl
,
1908 const struct pcpu_alloc_info
*ai
)
1910 int group_width
= 1, cpu_width
= 1, width
;
1911 char empty_str
[] = "--------";
1912 int alloc
= 0, alloc_end
= 0;
1914 int upa
, apl
; /* units per alloc, allocs per line */
1920 v
= num_possible_cpus();
1923 empty_str
[min_t(int, cpu_width
, sizeof(empty_str
) - 1)] = '\0';
1925 upa
= ai
->alloc_size
/ ai
->unit_size
;
1926 width
= upa
* (cpu_width
+ 1) + group_width
+ 3;
1927 apl
= rounddown_pow_of_two(max(60 / width
, 1));
1929 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1930 lvl
, ai
->static_size
, ai
->reserved_size
, ai
->dyn_size
,
1931 ai
->unit_size
, ai
->alloc_size
/ ai
->atom_size
, ai
->atom_size
);
1933 for (group
= 0; group
< ai
->nr_groups
; group
++) {
1934 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1935 int unit
= 0, unit_end
= 0;
1937 BUG_ON(gi
->nr_units
% upa
);
1938 for (alloc_end
+= gi
->nr_units
/ upa
;
1939 alloc
< alloc_end
; alloc
++) {
1940 if (!(alloc
% apl
)) {
1942 printk("%spcpu-alloc: ", lvl
);
1944 pr_cont("[%0*d] ", group_width
, group
);
1946 for (unit_end
+= upa
; unit
< unit_end
; unit
++)
1947 if (gi
->cpu_map
[unit
] != NR_CPUS
)
1949 cpu_width
, gi
->cpu_map
[unit
]);
1951 pr_cont("%s ", empty_str
);
1958 * pcpu_setup_first_chunk - initialize the first percpu chunk
1959 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1960 * @base_addr: mapped address
1962 * Initialize the first percpu chunk which contains the kernel static
1963 * perpcu area. This function is to be called from arch percpu area
1966 * @ai contains all information necessary to initialize the first
1967 * chunk and prime the dynamic percpu allocator.
1969 * @ai->static_size is the size of static percpu area.
1971 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1972 * reserve after the static area in the first chunk. This reserves
1973 * the first chunk such that it's available only through reserved
1974 * percpu allocation. This is primarily used to serve module percpu
1975 * static areas on architectures where the addressing model has
1976 * limited offset range for symbol relocations to guarantee module
1977 * percpu symbols fall inside the relocatable range.
1979 * @ai->dyn_size determines the number of bytes available for dynamic
1980 * allocation in the first chunk. The area between @ai->static_size +
1981 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1983 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1984 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1987 * @ai->atom_size is the allocation atom size and used as alignment
1990 * @ai->alloc_size is the allocation size and always multiple of
1991 * @ai->atom_size. This is larger than @ai->atom_size if
1992 * @ai->unit_size is larger than @ai->atom_size.
1994 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1995 * percpu areas. Units which should be colocated are put into the
1996 * same group. Dynamic VM areas will be allocated according to these
1997 * groupings. If @ai->nr_groups is zero, a single group containing
1998 * all units is assumed.
2000 * The caller should have mapped the first chunk at @base_addr and
2001 * copied static data to each unit.
2003 * The first chunk will always contain a static and a dynamic region.
2004 * However, the static region is not managed by any chunk. If the first
2005 * chunk also contains a reserved region, it is served by two chunks -
2006 * one for the reserved region and one for the dynamic region. They
2007 * share the same vm, but use offset regions in the area allocation map.
2008 * The chunk serving the dynamic region is circulated in the chunk slots
2009 * and available for dynamic allocation like any other chunk.
2012 * 0 on success, -errno on failure.
2014 int __init
pcpu_setup_first_chunk(const struct pcpu_alloc_info
*ai
,
2017 size_t size_sum
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
2018 size_t static_size
, dyn_size
;
2019 struct pcpu_chunk
*chunk
;
2020 unsigned long *group_offsets
;
2021 size_t *group_sizes
;
2022 unsigned long *unit_off
;
2027 unsigned long tmp_addr
;
2029 #define PCPU_SETUP_BUG_ON(cond) do { \
2030 if (unlikely(cond)) { \
2031 pr_emerg("failed to initialize, %s\n", #cond); \
2032 pr_emerg("cpu_possible_mask=%*pb\n", \
2033 cpumask_pr_args(cpu_possible_mask)); \
2034 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2040 PCPU_SETUP_BUG_ON(ai
->nr_groups
<= 0);
2042 PCPU_SETUP_BUG_ON(!ai
->static_size
);
2043 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start
));
2045 PCPU_SETUP_BUG_ON(!base_addr
);
2046 PCPU_SETUP_BUG_ON(offset_in_page(base_addr
));
2047 PCPU_SETUP_BUG_ON(ai
->unit_size
< size_sum
);
2048 PCPU_SETUP_BUG_ON(offset_in_page(ai
->unit_size
));
2049 PCPU_SETUP_BUG_ON(ai
->unit_size
< PCPU_MIN_UNIT_SIZE
);
2050 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai
->unit_size
, PCPU_BITMAP_BLOCK_SIZE
));
2051 PCPU_SETUP_BUG_ON(ai
->dyn_size
< PERCPU_DYNAMIC_EARLY_SIZE
);
2052 PCPU_SETUP_BUG_ON(!ai
->dyn_size
);
2053 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai
->reserved_size
, PCPU_MIN_ALLOC_SIZE
));
2054 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE
, PAGE_SIZE
) ||
2055 IS_ALIGNED(PAGE_SIZE
, PCPU_BITMAP_BLOCK_SIZE
)));
2056 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai
) < 0);
2058 /* process group information and build config tables accordingly */
2059 group_offsets
= memblock_virt_alloc(ai
->nr_groups
*
2060 sizeof(group_offsets
[0]), 0);
2061 group_sizes
= memblock_virt_alloc(ai
->nr_groups
*
2062 sizeof(group_sizes
[0]), 0);
2063 unit_map
= memblock_virt_alloc(nr_cpu_ids
* sizeof(unit_map
[0]), 0);
2064 unit_off
= memblock_virt_alloc(nr_cpu_ids
* sizeof(unit_off
[0]), 0);
2066 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++)
2067 unit_map
[cpu
] = UINT_MAX
;
2069 pcpu_low_unit_cpu
= NR_CPUS
;
2070 pcpu_high_unit_cpu
= NR_CPUS
;
2072 for (group
= 0, unit
= 0; group
< ai
->nr_groups
; group
++, unit
+= i
) {
2073 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2075 group_offsets
[group
] = gi
->base_offset
;
2076 group_sizes
[group
] = gi
->nr_units
* ai
->unit_size
;
2078 for (i
= 0; i
< gi
->nr_units
; i
++) {
2079 cpu
= gi
->cpu_map
[i
];
2083 PCPU_SETUP_BUG_ON(cpu
>= nr_cpu_ids
);
2084 PCPU_SETUP_BUG_ON(!cpu_possible(cpu
));
2085 PCPU_SETUP_BUG_ON(unit_map
[cpu
] != UINT_MAX
);
2087 unit_map
[cpu
] = unit
+ i
;
2088 unit_off
[cpu
] = gi
->base_offset
+ i
* ai
->unit_size
;
2090 /* determine low/high unit_cpu */
2091 if (pcpu_low_unit_cpu
== NR_CPUS
||
2092 unit_off
[cpu
] < unit_off
[pcpu_low_unit_cpu
])
2093 pcpu_low_unit_cpu
= cpu
;
2094 if (pcpu_high_unit_cpu
== NR_CPUS
||
2095 unit_off
[cpu
] > unit_off
[pcpu_high_unit_cpu
])
2096 pcpu_high_unit_cpu
= cpu
;
2099 pcpu_nr_units
= unit
;
2101 for_each_possible_cpu(cpu
)
2102 PCPU_SETUP_BUG_ON(unit_map
[cpu
] == UINT_MAX
);
2104 /* we're done parsing the input, undefine BUG macro and dump config */
2105 #undef PCPU_SETUP_BUG_ON
2106 pcpu_dump_alloc_info(KERN_DEBUG
, ai
);
2108 pcpu_nr_groups
= ai
->nr_groups
;
2109 pcpu_group_offsets
= group_offsets
;
2110 pcpu_group_sizes
= group_sizes
;
2111 pcpu_unit_map
= unit_map
;
2112 pcpu_unit_offsets
= unit_off
;
2114 /* determine basic parameters */
2115 pcpu_unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
2116 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
2117 pcpu_atom_size
= ai
->atom_size
;
2118 pcpu_chunk_struct_size
= sizeof(struct pcpu_chunk
) +
2119 BITS_TO_LONGS(pcpu_unit_pages
) * sizeof(unsigned long);
2121 pcpu_stats_save_ai(ai
);
2124 * Allocate chunk slots. The additional last slot is for
2127 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
2128 pcpu_slot
= memblock_virt_alloc(
2129 pcpu_nr_slots
* sizeof(pcpu_slot
[0]), 0);
2130 for (i
= 0; i
< pcpu_nr_slots
; i
++)
2131 INIT_LIST_HEAD(&pcpu_slot
[i
]);
2134 * The end of the static region needs to be aligned with the
2135 * minimum allocation size as this offsets the reserved and
2136 * dynamic region. The first chunk ends page aligned by
2137 * expanding the dynamic region, therefore the dynamic region
2138 * can be shrunk to compensate while still staying above the
2141 static_size
= ALIGN(ai
->static_size
, PCPU_MIN_ALLOC_SIZE
);
2142 dyn_size
= ai
->dyn_size
- (static_size
- ai
->static_size
);
2145 * Initialize first chunk.
2146 * If the reserved_size is non-zero, this initializes the reserved
2147 * chunk. If the reserved_size is zero, the reserved chunk is NULL
2148 * and the dynamic region is initialized here. The first chunk,
2149 * pcpu_first_chunk, will always point to the chunk that serves
2150 * the dynamic region.
2152 tmp_addr
= (unsigned long)base_addr
+ static_size
;
2153 map_size
= ai
->reserved_size
?: dyn_size
;
2154 chunk
= pcpu_alloc_first_chunk(tmp_addr
, map_size
);
2156 /* init dynamic chunk if necessary */
2157 if (ai
->reserved_size
) {
2158 pcpu_reserved_chunk
= chunk
;
2160 tmp_addr
= (unsigned long)base_addr
+ static_size
+
2162 map_size
= dyn_size
;
2163 chunk
= pcpu_alloc_first_chunk(tmp_addr
, map_size
);
2166 /* link the first chunk in */
2167 pcpu_first_chunk
= chunk
;
2168 pcpu_nr_empty_pop_pages
= pcpu_first_chunk
->nr_empty_pop_pages
;
2169 pcpu_chunk_relocate(pcpu_first_chunk
, -1);
2171 pcpu_stats_chunk_alloc();
2172 trace_percpu_create_chunk(base_addr
);
2175 pcpu_base_addr
= base_addr
;
2181 const char * const pcpu_fc_names
[PCPU_FC_NR
] __initconst
= {
2182 [PCPU_FC_AUTO
] = "auto",
2183 [PCPU_FC_EMBED
] = "embed",
2184 [PCPU_FC_PAGE
] = "page",
2187 enum pcpu_fc pcpu_chosen_fc __initdata
= PCPU_FC_AUTO
;
2189 static int __init
percpu_alloc_setup(char *str
)
2196 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2197 else if (!strcmp(str
, "embed"))
2198 pcpu_chosen_fc
= PCPU_FC_EMBED
;
2200 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2201 else if (!strcmp(str
, "page"))
2202 pcpu_chosen_fc
= PCPU_FC_PAGE
;
2205 pr_warn("unknown allocator %s specified\n", str
);
2209 early_param("percpu_alloc", percpu_alloc_setup
);
2212 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2213 * Build it if needed by the arch config or the generic setup is going
2216 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2217 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2218 #define BUILD_EMBED_FIRST_CHUNK
2221 /* build pcpu_page_first_chunk() iff needed by the arch config */
2222 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2223 #define BUILD_PAGE_FIRST_CHUNK
2226 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2227 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2229 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2230 * @reserved_size: the size of reserved percpu area in bytes
2231 * @dyn_size: minimum free size for dynamic allocation in bytes
2232 * @atom_size: allocation atom size
2233 * @cpu_distance_fn: callback to determine distance between cpus, optional
2235 * This function determines grouping of units, their mappings to cpus
2236 * and other parameters considering needed percpu size, allocation
2237 * atom size and distances between CPUs.
2239 * Groups are always multiples of atom size and CPUs which are of
2240 * LOCAL_DISTANCE both ways are grouped together and share space for
2241 * units in the same group. The returned configuration is guaranteed
2242 * to have CPUs on different nodes on different groups and >=75% usage
2243 * of allocated virtual address space.
2246 * On success, pointer to the new allocation_info is returned. On
2247 * failure, ERR_PTR value is returned.
2249 static struct pcpu_alloc_info
* __init
pcpu_build_alloc_info(
2250 size_t reserved_size
, size_t dyn_size
,
2252 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
)
2254 static int group_map
[NR_CPUS
] __initdata
;
2255 static int group_cnt
[NR_CPUS
] __initdata
;
2256 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
2257 int nr_groups
= 1, nr_units
= 0;
2258 size_t size_sum
, min_unit_size
, alloc_size
;
2259 int upa
, max_upa
, uninitialized_var(best_upa
); /* units_per_alloc */
2260 int last_allocs
, group
, unit
;
2261 unsigned int cpu
, tcpu
;
2262 struct pcpu_alloc_info
*ai
;
2263 unsigned int *cpu_map
;
2265 /* this function may be called multiple times */
2266 memset(group_map
, 0, sizeof(group_map
));
2267 memset(group_cnt
, 0, sizeof(group_cnt
));
2269 /* calculate size_sum and ensure dyn_size is enough for early alloc */
2270 size_sum
= PFN_ALIGN(static_size
+ reserved_size
+
2271 max_t(size_t, dyn_size
, PERCPU_DYNAMIC_EARLY_SIZE
));
2272 dyn_size
= size_sum
- static_size
- reserved_size
;
2275 * Determine min_unit_size, alloc_size and max_upa such that
2276 * alloc_size is multiple of atom_size and is the smallest
2277 * which can accommodate 4k aligned segments which are equal to
2278 * or larger than min_unit_size.
2280 min_unit_size
= max_t(size_t, size_sum
, PCPU_MIN_UNIT_SIZE
);
2282 /* determine the maximum # of units that can fit in an allocation */
2283 alloc_size
= roundup(min_unit_size
, atom_size
);
2284 upa
= alloc_size
/ min_unit_size
;
2285 while (alloc_size
% upa
|| (offset_in_page(alloc_size
/ upa
)))
2289 /* group cpus according to their proximity */
2290 for_each_possible_cpu(cpu
) {
2293 for_each_possible_cpu(tcpu
) {
2296 if (group_map
[tcpu
] == group
&& cpu_distance_fn
&&
2297 (cpu_distance_fn(cpu
, tcpu
) > LOCAL_DISTANCE
||
2298 cpu_distance_fn(tcpu
, cpu
) > LOCAL_DISTANCE
)) {
2300 nr_groups
= max(nr_groups
, group
+ 1);
2304 group_map
[cpu
] = group
;
2309 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2310 * Expand the unit_size until we use >= 75% of the units allocated.
2311 * Related to atom_size, which could be much larger than the unit_size.
2313 last_allocs
= INT_MAX
;
2314 for (upa
= max_upa
; upa
; upa
--) {
2315 int allocs
= 0, wasted
= 0;
2317 if (alloc_size
% upa
|| (offset_in_page(alloc_size
/ upa
)))
2320 for (group
= 0; group
< nr_groups
; group
++) {
2321 int this_allocs
= DIV_ROUND_UP(group_cnt
[group
], upa
);
2322 allocs
+= this_allocs
;
2323 wasted
+= this_allocs
* upa
- group_cnt
[group
];
2327 * Don't accept if wastage is over 1/3. The
2328 * greater-than comparison ensures upa==1 always
2329 * passes the following check.
2331 if (wasted
> num_possible_cpus() / 3)
2334 /* and then don't consume more memory */
2335 if (allocs
> last_allocs
)
2337 last_allocs
= allocs
;
2342 /* allocate and fill alloc_info */
2343 for (group
= 0; group
< nr_groups
; group
++)
2344 nr_units
+= roundup(group_cnt
[group
], upa
);
2346 ai
= pcpu_alloc_alloc_info(nr_groups
, nr_units
);
2348 return ERR_PTR(-ENOMEM
);
2349 cpu_map
= ai
->groups
[0].cpu_map
;
2351 for (group
= 0; group
< nr_groups
; group
++) {
2352 ai
->groups
[group
].cpu_map
= cpu_map
;
2353 cpu_map
+= roundup(group_cnt
[group
], upa
);
2356 ai
->static_size
= static_size
;
2357 ai
->reserved_size
= reserved_size
;
2358 ai
->dyn_size
= dyn_size
;
2359 ai
->unit_size
= alloc_size
/ upa
;
2360 ai
->atom_size
= atom_size
;
2361 ai
->alloc_size
= alloc_size
;
2363 for (group
= 0, unit
= 0; group_cnt
[group
]; group
++) {
2364 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2367 * Initialize base_offset as if all groups are located
2368 * back-to-back. The caller should update this to
2369 * reflect actual allocation.
2371 gi
->base_offset
= unit
* ai
->unit_size
;
2373 for_each_possible_cpu(cpu
)
2374 if (group_map
[cpu
] == group
)
2375 gi
->cpu_map
[gi
->nr_units
++] = cpu
;
2376 gi
->nr_units
= roundup(gi
->nr_units
, upa
);
2377 unit
+= gi
->nr_units
;
2379 BUG_ON(unit
!= nr_units
);
2383 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2385 #if defined(BUILD_EMBED_FIRST_CHUNK)
2387 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2388 * @reserved_size: the size of reserved percpu area in bytes
2389 * @dyn_size: minimum free size for dynamic allocation in bytes
2390 * @atom_size: allocation atom size
2391 * @cpu_distance_fn: callback to determine distance between cpus, optional
2392 * @alloc_fn: function to allocate percpu page
2393 * @free_fn: function to free percpu page
2395 * This is a helper to ease setting up embedded first percpu chunk and
2396 * can be called where pcpu_setup_first_chunk() is expected.
2398 * If this function is used to setup the first chunk, it is allocated
2399 * by calling @alloc_fn and used as-is without being mapped into
2400 * vmalloc area. Allocations are always whole multiples of @atom_size
2401 * aligned to @atom_size.
2403 * This enables the first chunk to piggy back on the linear physical
2404 * mapping which often uses larger page size. Please note that this
2405 * can result in very sparse cpu->unit mapping on NUMA machines thus
2406 * requiring large vmalloc address space. Don't use this allocator if
2407 * vmalloc space is not orders of magnitude larger than distances
2408 * between node memory addresses (ie. 32bit NUMA machines).
2410 * @dyn_size specifies the minimum dynamic area size.
2412 * If the needed size is smaller than the minimum or specified unit
2413 * size, the leftover is returned using @free_fn.
2416 * 0 on success, -errno on failure.
2418 int __init
pcpu_embed_first_chunk(size_t reserved_size
, size_t dyn_size
,
2420 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
,
2421 pcpu_fc_alloc_fn_t alloc_fn
,
2422 pcpu_fc_free_fn_t free_fn
)
2424 void *base
= (void *)ULONG_MAX
;
2425 void **areas
= NULL
;
2426 struct pcpu_alloc_info
*ai
;
2427 size_t size_sum
, areas_size
;
2428 unsigned long max_distance
;
2429 int group
, i
, highest_group
, rc
;
2431 ai
= pcpu_build_alloc_info(reserved_size
, dyn_size
, atom_size
,
2436 size_sum
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
2437 areas_size
= PFN_ALIGN(ai
->nr_groups
* sizeof(void *));
2439 areas
= memblock_virt_alloc_nopanic(areas_size
, 0);
2445 /* allocate, copy and determine base address & max_distance */
2447 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2448 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2449 unsigned int cpu
= NR_CPUS
;
2452 for (i
= 0; i
< gi
->nr_units
&& cpu
== NR_CPUS
; i
++)
2453 cpu
= gi
->cpu_map
[i
];
2454 BUG_ON(cpu
== NR_CPUS
);
2456 /* allocate space for the whole group */
2457 ptr
= alloc_fn(cpu
, gi
->nr_units
* ai
->unit_size
, atom_size
);
2460 goto out_free_areas
;
2462 /* kmemleak tracks the percpu allocations separately */
2466 base
= min(ptr
, base
);
2467 if (ptr
> areas
[highest_group
])
2468 highest_group
= group
;
2470 max_distance
= areas
[highest_group
] - base
;
2471 max_distance
+= ai
->unit_size
* ai
->groups
[highest_group
].nr_units
;
2473 /* warn if maximum distance is further than 75% of vmalloc space */
2474 if (max_distance
> VMALLOC_TOTAL
* 3 / 4) {
2475 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2476 max_distance
, VMALLOC_TOTAL
);
2477 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2478 /* and fail if we have fallback */
2480 goto out_free_areas
;
2485 * Copy data and free unused parts. This should happen after all
2486 * allocations are complete; otherwise, we may end up with
2487 * overlapping groups.
2489 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2490 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2491 void *ptr
= areas
[group
];
2493 for (i
= 0; i
< gi
->nr_units
; i
++, ptr
+= ai
->unit_size
) {
2494 if (gi
->cpu_map
[i
] == NR_CPUS
) {
2495 /* unused unit, free whole */
2496 free_fn(ptr
, ai
->unit_size
);
2499 /* copy and return the unused part */
2500 memcpy(ptr
, __per_cpu_load
, ai
->static_size
);
2501 free_fn(ptr
+ size_sum
, ai
->unit_size
- size_sum
);
2505 /* base address is now known, determine group base offsets */
2506 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2507 ai
->groups
[group
].base_offset
= areas
[group
] - base
;
2510 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2511 PFN_DOWN(size_sum
), ai
->static_size
, ai
->reserved_size
,
2512 ai
->dyn_size
, ai
->unit_size
);
2514 rc
= pcpu_setup_first_chunk(ai
, base
);
2518 for (group
= 0; group
< ai
->nr_groups
; group
++)
2520 free_fn(areas
[group
],
2521 ai
->groups
[group
].nr_units
* ai
->unit_size
);
2523 pcpu_free_alloc_info(ai
);
2525 memblock_free_early(__pa(areas
), areas_size
);
2528 #endif /* BUILD_EMBED_FIRST_CHUNK */
2530 #ifdef BUILD_PAGE_FIRST_CHUNK
2532 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2533 * @reserved_size: the size of reserved percpu area in bytes
2534 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2535 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2536 * @populate_pte_fn: function to populate pte
2538 * This is a helper to ease setting up page-remapped first percpu
2539 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2541 * This is the basic allocator. Static percpu area is allocated
2542 * page-by-page into vmalloc area.
2545 * 0 on success, -errno on failure.
2547 int __init
pcpu_page_first_chunk(size_t reserved_size
,
2548 pcpu_fc_alloc_fn_t alloc_fn
,
2549 pcpu_fc_free_fn_t free_fn
,
2550 pcpu_fc_populate_pte_fn_t populate_pte_fn
)
2552 static struct vm_struct vm
;
2553 struct pcpu_alloc_info
*ai
;
2557 struct page
**pages
;
2562 snprintf(psize_str
, sizeof(psize_str
), "%luK", PAGE_SIZE
>> 10);
2564 ai
= pcpu_build_alloc_info(reserved_size
, 0, PAGE_SIZE
, NULL
);
2567 BUG_ON(ai
->nr_groups
!= 1);
2568 upa
= ai
->alloc_size
/ai
->unit_size
;
2569 nr_g0_units
= roundup(num_possible_cpus(), upa
);
2570 if (unlikely(WARN_ON(ai
->groups
[0].nr_units
!= nr_g0_units
))) {
2571 pcpu_free_alloc_info(ai
);
2575 unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
2577 /* unaligned allocations can't be freed, round up to page size */
2578 pages_size
= PFN_ALIGN(unit_pages
* num_possible_cpus() *
2580 pages
= memblock_virt_alloc(pages_size
, 0);
2582 /* allocate pages */
2584 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
2585 unsigned int cpu
= ai
->groups
[0].cpu_map
[unit
];
2586 for (i
= 0; i
< unit_pages
; i
++) {
2589 ptr
= alloc_fn(cpu
, PAGE_SIZE
, PAGE_SIZE
);
2591 pr_warn("failed to allocate %s page for cpu%u\n",
2595 /* kmemleak tracks the percpu allocations separately */
2597 pages
[j
++] = virt_to_page(ptr
);
2601 /* allocate vm area, map the pages and copy static data */
2602 vm
.flags
= VM_ALLOC
;
2603 vm
.size
= num_possible_cpus() * ai
->unit_size
;
2604 vm_area_register_early(&vm
, PAGE_SIZE
);
2606 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
2607 unsigned long unit_addr
=
2608 (unsigned long)vm
.addr
+ unit
* ai
->unit_size
;
2610 for (i
= 0; i
< unit_pages
; i
++)
2611 populate_pte_fn(unit_addr
+ (i
<< PAGE_SHIFT
));
2613 /* pte already populated, the following shouldn't fail */
2614 rc
= __pcpu_map_pages(unit_addr
, &pages
[unit
* unit_pages
],
2617 panic("failed to map percpu area, err=%d\n", rc
);
2620 * FIXME: Archs with virtual cache should flush local
2621 * cache for the linear mapping here - something
2622 * equivalent to flush_cache_vmap() on the local cpu.
2623 * flush_cache_vmap() can't be used as most supporting
2624 * data structures are not set up yet.
2627 /* copy static data */
2628 memcpy((void *)unit_addr
, __per_cpu_load
, ai
->static_size
);
2631 /* we're ready, commit */
2632 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
2633 unit_pages
, psize_str
, ai
->static_size
,
2634 ai
->reserved_size
, ai
->dyn_size
);
2636 rc
= pcpu_setup_first_chunk(ai
, vm
.addr
);
2641 free_fn(page_address(pages
[j
]), PAGE_SIZE
);
2644 memblock_free_early(__pa(pages
), pages_size
);
2645 pcpu_free_alloc_info(ai
);
2648 #endif /* BUILD_PAGE_FIRST_CHUNK */
2650 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2652 * Generic SMP percpu area setup.
2654 * The embedding helper is used because its behavior closely resembles
2655 * the original non-dynamic generic percpu area setup. This is
2656 * important because many archs have addressing restrictions and might
2657 * fail if the percpu area is located far away from the previous
2658 * location. As an added bonus, in non-NUMA cases, embedding is
2659 * generally a good idea TLB-wise because percpu area can piggy back
2660 * on the physical linear memory mapping which uses large page
2661 * mappings on applicable archs.
2663 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
2664 EXPORT_SYMBOL(__per_cpu_offset
);
2666 static void * __init
pcpu_dfl_fc_alloc(unsigned int cpu
, size_t size
,
2669 return memblock_virt_alloc_from_nopanic(
2670 size
, align
, __pa(MAX_DMA_ADDRESS
));
2673 static void __init
pcpu_dfl_fc_free(void *ptr
, size_t size
)
2675 memblock_free_early(__pa(ptr
), size
);
2678 void __init
setup_per_cpu_areas(void)
2680 unsigned long delta
;
2685 * Always reserve area for module percpu variables. That's
2686 * what the legacy allocator did.
2688 rc
= pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE
,
2689 PERCPU_DYNAMIC_RESERVE
, PAGE_SIZE
, NULL
,
2690 pcpu_dfl_fc_alloc
, pcpu_dfl_fc_free
);
2692 panic("Failed to initialize percpu areas.");
2694 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
2695 for_each_possible_cpu(cpu
)
2696 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
2698 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2700 #else /* CONFIG_SMP */
2703 * UP percpu area setup.
2705 * UP always uses km-based percpu allocator with identity mapping.
2706 * Static percpu variables are indistinguishable from the usual static
2707 * variables and don't require any special preparation.
2709 void __init
setup_per_cpu_areas(void)
2711 const size_t unit_size
=
2712 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE
,
2713 PERCPU_DYNAMIC_RESERVE
));
2714 struct pcpu_alloc_info
*ai
;
2717 ai
= pcpu_alloc_alloc_info(1, 1);
2718 fc
= memblock_virt_alloc_from_nopanic(unit_size
,
2720 __pa(MAX_DMA_ADDRESS
));
2722 panic("Failed to allocate memory for percpu areas.");
2723 /* kmemleak tracks the percpu allocations separately */
2726 ai
->dyn_size
= unit_size
;
2727 ai
->unit_size
= unit_size
;
2728 ai
->atom_size
= unit_size
;
2729 ai
->alloc_size
= unit_size
;
2730 ai
->groups
[0].nr_units
= 1;
2731 ai
->groups
[0].cpu_map
[0] = 0;
2733 if (pcpu_setup_first_chunk(ai
, fc
) < 0)
2734 panic("Failed to initialize percpu areas.");
2737 #endif /* CONFIG_SMP */
2740 * Percpu allocator is initialized early during boot when neither slab or
2741 * workqueue is available. Plug async management until everything is up
2744 static int __init
percpu_enable_async(void)
2746 pcpu_async_enabled
= true;
2749 subsys_initcall(percpu_enable_async
);