1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/percpu.c - percpu memory allocator
5 * Copyright (C) 2009 SUSE Linux Products GmbH
6 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
8 * Copyright (C) 2017 Facebook Inc.
9 * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
11 * The percpu allocator handles both static and dynamic areas. Percpu
12 * areas are allocated in chunks which are divided into units. There is
13 * a 1-to-1 mapping for units to possible cpus. These units are grouped
14 * based on NUMA properties of the machine.
17 * ------------------- ------------------- ------------
18 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
19 * ------------------- ...... ------------------- .... ------------
21 * Allocation is done by offsets into a unit's address space. Ie., an
22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
23 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
24 * and even sparse. Access is handled by configuring percpu base
25 * registers according to the cpu to unit mappings and offsetting the
26 * base address using pcpu_unit_size.
28 * There is special consideration for the first chunk which must handle
29 * the static percpu variables in the kernel image as allocation services
30 * are not online yet. In short, the first chunk is structured like so:
32 * <Static | [Reserved] | Dynamic>
34 * The static data is copied from the original section managed by the
35 * linker. The reserved section, if non-zero, primarily manages static
36 * percpu variables from kernel modules. Finally, the dynamic section
37 * takes care of normal allocations.
39 * The allocator organizes chunks into lists according to free size and
40 * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
41 * flag should be passed. All memcg-aware allocations are sharing one set
42 * of chunks and all unaccounted allocations and allocations performed
43 * by processes belonging to the root memory cgroup are using the second set.
45 * The allocator tries to allocate from the fullest chunk first. Each chunk
46 * is managed by a bitmap with metadata blocks. The allocation map is updated
47 * on every allocation and free to reflect the current state while the boundary
48 * map is only updated on allocation. Each metadata block contains
49 * information to help mitigate the need to iterate over large portions
50 * of the bitmap. The reverse mapping from page to chunk is stored in
51 * the page's index. Lastly, units are lazily backed and grow in unison.
53 * There is a unique conversion that goes on here between bytes and bits.
54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
55 * tracks the number of pages it is responsible for in nr_pages. Helper
56 * functions are used to convert from between the bytes, bits, and blocks.
57 * All hints are managed in bits unless explicitly stated.
59 * To use this allocator, arch code should do the following:
61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
62 * regular address to percpu pointer and back if they need to be
63 * different from the default
65 * - use pcpu_setup_first_chunk() during percpu area initialization to
66 * setup the first chunk containing the kernel static percpu area
69 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
71 #include <linux/bitmap.h>
72 #include <linux/memblock.h>
73 #include <linux/err.h>
74 #include <linux/lcm.h>
75 #include <linux/list.h>
76 #include <linux/log2.h>
78 #include <linux/module.h>
79 #include <linux/mutex.h>
80 #include <linux/percpu.h>
81 #include <linux/pfn.h>
82 #include <linux/slab.h>
83 #include <linux/spinlock.h>
84 #include <linux/vmalloc.h>
85 #include <linux/workqueue.h>
86 #include <linux/kmemleak.h>
87 #include <linux/sched.h>
88 #include <linux/sched/mm.h>
89 #include <linux/memcontrol.h>
91 #include <asm/cacheflush.h>
92 #include <asm/sections.h>
93 #include <asm/tlbflush.h>
96 #define CREATE_TRACE_POINTS
97 #include <trace/events/percpu.h>
99 #include "percpu-internal.h"
101 /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
102 #define PCPU_SLOT_BASE_SHIFT 5
103 /* chunks in slots below this are subject to being sidelined on failed alloc */
104 #define PCPU_SLOT_FAIL_THRESHOLD 3
106 #define PCPU_EMPTY_POP_PAGES_LOW 2
107 #define PCPU_EMPTY_POP_PAGES_HIGH 4
110 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
111 #ifndef __addr_to_pcpu_ptr
112 #define __addr_to_pcpu_ptr(addr) \
113 (void __percpu *)((unsigned long)(addr) - \
114 (unsigned long)pcpu_base_addr + \
115 (unsigned long)__per_cpu_start)
117 #ifndef __pcpu_ptr_to_addr
118 #define __pcpu_ptr_to_addr(ptr) \
119 (void __force *)((unsigned long)(ptr) + \
120 (unsigned long)pcpu_base_addr - \
121 (unsigned long)__per_cpu_start)
123 #else /* CONFIG_SMP */
124 /* on UP, it's always identity mapped */
125 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
126 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
127 #endif /* CONFIG_SMP */
129 static int pcpu_unit_pages __ro_after_init
;
130 static int pcpu_unit_size __ro_after_init
;
131 static int pcpu_nr_units __ro_after_init
;
132 static int pcpu_atom_size __ro_after_init
;
133 int pcpu_nr_slots __ro_after_init
;
134 static size_t pcpu_chunk_struct_size __ro_after_init
;
136 /* cpus with the lowest and highest unit addresses */
137 static unsigned int pcpu_low_unit_cpu __ro_after_init
;
138 static unsigned int pcpu_high_unit_cpu __ro_after_init
;
140 /* the address of the first chunk which starts with the kernel static area */
141 void *pcpu_base_addr __ro_after_init
;
142 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
144 static const int *pcpu_unit_map __ro_after_init
; /* cpu -> unit */
145 const unsigned long *pcpu_unit_offsets __ro_after_init
; /* cpu -> unit offset */
147 /* group information, used for vm allocation */
148 static int pcpu_nr_groups __ro_after_init
;
149 static const unsigned long *pcpu_group_offsets __ro_after_init
;
150 static const size_t *pcpu_group_sizes __ro_after_init
;
153 * The first chunk which always exists. Note that unlike other
154 * chunks, this one can be allocated and mapped in several different
155 * ways and thus often doesn't live in the vmalloc area.
157 struct pcpu_chunk
*pcpu_first_chunk __ro_after_init
;
160 * Optional reserved chunk. This chunk reserves part of the first
161 * chunk and serves it for reserved allocations. When the reserved
162 * region doesn't exist, the following variable is NULL.
164 struct pcpu_chunk
*pcpu_reserved_chunk __ro_after_init
;
166 DEFINE_SPINLOCK(pcpu_lock
); /* all internal data structures */
167 static DEFINE_MUTEX(pcpu_alloc_mutex
); /* chunk create/destroy, [de]pop, map ext */
169 struct list_head
*pcpu_chunk_lists __ro_after_init
; /* chunk list slots */
171 /* chunks which need their map areas extended, protected by pcpu_lock */
172 static LIST_HEAD(pcpu_map_extend_chunks
);
175 * The number of empty populated pages, protected by pcpu_lock. The
176 * reserved chunk doesn't contribute to the count.
178 int pcpu_nr_empty_pop_pages
;
181 * The number of populated pages in use by the allocator, protected by
182 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
183 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
184 * and increments/decrements this count by 1).
186 static unsigned long pcpu_nr_populated
;
189 * Balance work is used to populate or destroy chunks asynchronously. We
190 * try to keep the number of populated free pages between
191 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
194 static void pcpu_balance_workfn(struct work_struct
*work
);
195 static DECLARE_WORK(pcpu_balance_work
, pcpu_balance_workfn
);
196 static bool pcpu_async_enabled __read_mostly
;
197 static bool pcpu_atomic_alloc_failed
;
199 static void pcpu_schedule_balance_work(void)
201 if (pcpu_async_enabled
)
202 schedule_work(&pcpu_balance_work
);
206 * pcpu_addr_in_chunk - check if the address is served from this chunk
207 * @chunk: chunk of interest
208 * @addr: percpu address
211 * True if the address is served from this chunk.
213 static bool pcpu_addr_in_chunk(struct pcpu_chunk
*chunk
, void *addr
)
215 void *start_addr
, *end_addr
;
220 start_addr
= chunk
->base_addr
+ chunk
->start_offset
;
221 end_addr
= chunk
->base_addr
+ chunk
->nr_pages
* PAGE_SIZE
-
224 return addr
>= start_addr
&& addr
< end_addr
;
227 static int __pcpu_size_to_slot(int size
)
229 int highbit
= fls(size
); /* size is in bytes */
230 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
233 static int pcpu_size_to_slot(int size
)
235 if (size
== pcpu_unit_size
)
236 return pcpu_nr_slots
- 1;
237 return __pcpu_size_to_slot(size
);
240 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
242 const struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
244 if (chunk
->free_bytes
< PCPU_MIN_ALLOC_SIZE
||
245 chunk_md
->contig_hint
== 0)
248 return pcpu_size_to_slot(chunk_md
->contig_hint
* PCPU_MIN_ALLOC_SIZE
);
251 /* set the pointer to a chunk in a page struct */
252 static void pcpu_set_page_chunk(struct page
*page
, struct pcpu_chunk
*pcpu
)
254 page
->index
= (unsigned long)pcpu
;
257 /* obtain pointer to a chunk from a page struct */
258 static struct pcpu_chunk
*pcpu_get_page_chunk(struct page
*page
)
260 return (struct pcpu_chunk
*)page
->index
;
263 static int __maybe_unused
pcpu_page_idx(unsigned int cpu
, int page_idx
)
265 return pcpu_unit_map
[cpu
] * pcpu_unit_pages
+ page_idx
;
268 static unsigned long pcpu_unit_page_offset(unsigned int cpu
, int page_idx
)
270 return pcpu_unit_offsets
[cpu
] + (page_idx
<< PAGE_SHIFT
);
273 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
274 unsigned int cpu
, int page_idx
)
276 return (unsigned long)chunk
->base_addr
+
277 pcpu_unit_page_offset(cpu
, page_idx
);
281 * The following are helper functions to help access bitmaps and convert
282 * between bitmap offsets to address offsets.
284 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk
*chunk
, int index
)
286 return chunk
->alloc_map
+
287 (index
* PCPU_BITMAP_BLOCK_BITS
/ BITS_PER_LONG
);
290 static unsigned long pcpu_off_to_block_index(int off
)
292 return off
/ PCPU_BITMAP_BLOCK_BITS
;
295 static unsigned long pcpu_off_to_block_off(int off
)
297 return off
& (PCPU_BITMAP_BLOCK_BITS
- 1);
300 static unsigned long pcpu_block_off_to_off(int index
, int off
)
302 return index
* PCPU_BITMAP_BLOCK_BITS
+ off
;
306 * pcpu_next_hint - determine which hint to use
307 * @block: block of interest
308 * @alloc_bits: size of allocation
310 * This determines if we should scan based on the scan_hint or first_free.
311 * In general, we want to scan from first_free to fulfill allocations by
312 * first fit. However, if we know a scan_hint at position scan_hint_start
313 * cannot fulfill an allocation, we can begin scanning from there knowing
314 * the contig_hint will be our fallback.
316 static int pcpu_next_hint(struct pcpu_block_md
*block
, int alloc_bits
)
319 * The three conditions below determine if we can skip past the
320 * scan_hint. First, does the scan hint exist. Second, is the
321 * contig_hint after the scan_hint (possibly not true iff
322 * contig_hint == scan_hint). Third, is the allocation request
323 * larger than the scan_hint.
325 if (block
->scan_hint
&&
326 block
->contig_hint_start
> block
->scan_hint_start
&&
327 alloc_bits
> block
->scan_hint
)
328 return block
->scan_hint_start
+ block
->scan_hint
;
330 return block
->first_free
;
334 * pcpu_next_md_free_region - finds the next hint free area
335 * @chunk: chunk of interest
336 * @bit_off: chunk offset
337 * @bits: size of free area
339 * Helper function for pcpu_for_each_md_free_region. It checks
340 * block->contig_hint and performs aggregation across blocks to find the
341 * next hint. It modifies bit_off and bits in-place to be consumed in the
344 static void pcpu_next_md_free_region(struct pcpu_chunk
*chunk
, int *bit_off
,
347 int i
= pcpu_off_to_block_index(*bit_off
);
348 int block_off
= pcpu_off_to_block_off(*bit_off
);
349 struct pcpu_block_md
*block
;
352 for (block
= chunk
->md_blocks
+ i
; i
< pcpu_chunk_nr_blocks(chunk
);
354 /* handles contig area across blocks */
356 *bits
+= block
->left_free
;
357 if (block
->left_free
== PCPU_BITMAP_BLOCK_BITS
)
363 * This checks three things. First is there a contig_hint to
364 * check. Second, have we checked this hint before by
365 * comparing the block_off. Third, is this the same as the
366 * right contig hint. In the last case, it spills over into
367 * the next block and should be handled by the contig area
368 * across blocks code.
370 *bits
= block
->contig_hint
;
371 if (*bits
&& block
->contig_hint_start
>= block_off
&&
372 *bits
+ block
->contig_hint_start
< PCPU_BITMAP_BLOCK_BITS
) {
373 *bit_off
= pcpu_block_off_to_off(i
,
374 block
->contig_hint_start
);
377 /* reset to satisfy the second predicate above */
380 *bits
= block
->right_free
;
381 *bit_off
= (i
+ 1) * PCPU_BITMAP_BLOCK_BITS
- block
->right_free
;
386 * pcpu_next_fit_region - finds fit areas for a given allocation request
387 * @chunk: chunk of interest
388 * @alloc_bits: size of allocation
389 * @align: alignment of area (max PAGE_SIZE)
390 * @bit_off: chunk offset
391 * @bits: size of free area
393 * Finds the next free region that is viable for use with a given size and
394 * alignment. This only returns if there is a valid area to be used for this
395 * allocation. block->first_free is returned if the allocation request fits
396 * within the block to see if the request can be fulfilled prior to the contig
399 static void pcpu_next_fit_region(struct pcpu_chunk
*chunk
, int alloc_bits
,
400 int align
, int *bit_off
, int *bits
)
402 int i
= pcpu_off_to_block_index(*bit_off
);
403 int block_off
= pcpu_off_to_block_off(*bit_off
);
404 struct pcpu_block_md
*block
;
407 for (block
= chunk
->md_blocks
+ i
; i
< pcpu_chunk_nr_blocks(chunk
);
409 /* handles contig area across blocks */
411 *bits
+= block
->left_free
;
412 if (*bits
>= alloc_bits
)
414 if (block
->left_free
== PCPU_BITMAP_BLOCK_BITS
)
418 /* check block->contig_hint */
419 *bits
= ALIGN(block
->contig_hint_start
, align
) -
420 block
->contig_hint_start
;
422 * This uses the block offset to determine if this has been
423 * checked in the prior iteration.
425 if (block
->contig_hint
&&
426 block
->contig_hint_start
>= block_off
&&
427 block
->contig_hint
>= *bits
+ alloc_bits
) {
428 int start
= pcpu_next_hint(block
, alloc_bits
);
430 *bits
+= alloc_bits
+ block
->contig_hint_start
-
432 *bit_off
= pcpu_block_off_to_off(i
, start
);
435 /* reset to satisfy the second predicate above */
438 *bit_off
= ALIGN(PCPU_BITMAP_BLOCK_BITS
- block
->right_free
,
440 *bits
= PCPU_BITMAP_BLOCK_BITS
- *bit_off
;
441 *bit_off
= pcpu_block_off_to_off(i
, *bit_off
);
442 if (*bits
>= alloc_bits
)
446 /* no valid offsets were found - fail condition */
447 *bit_off
= pcpu_chunk_map_bits(chunk
);
451 * Metadata free area iterators. These perform aggregation of free areas
452 * based on the metadata blocks and return the offset @bit_off and size in
453 * bits of the free area @bits. pcpu_for_each_fit_region only returns when
454 * a fit is found for the allocation request.
456 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
457 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
458 (bit_off) < pcpu_chunk_map_bits((chunk)); \
459 (bit_off) += (bits) + 1, \
460 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
462 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
463 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
465 (bit_off) < pcpu_chunk_map_bits((chunk)); \
466 (bit_off) += (bits), \
467 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
471 * pcpu_mem_zalloc - allocate memory
472 * @size: bytes to allocate
473 * @gfp: allocation flags
475 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
476 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
477 * This is to facilitate passing through whitelisted flags. The
478 * returned memory is always zeroed.
481 * Pointer to the allocated area on success, NULL on failure.
483 static void *pcpu_mem_zalloc(size_t size
, gfp_t gfp
)
485 if (WARN_ON_ONCE(!slab_is_available()))
488 if (size
<= PAGE_SIZE
)
489 return kzalloc(size
, gfp
);
491 return __vmalloc(size
, gfp
| __GFP_ZERO
);
495 * pcpu_mem_free - free memory
496 * @ptr: memory to free
498 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
500 static void pcpu_mem_free(void *ptr
)
505 static void __pcpu_chunk_move(struct pcpu_chunk
*chunk
, int slot
,
508 if (chunk
!= pcpu_reserved_chunk
) {
509 struct list_head
*pcpu_slot
;
511 pcpu_slot
= pcpu_chunk_list(pcpu_chunk_type(chunk
));
513 list_move(&chunk
->list
, &pcpu_slot
[slot
]);
515 list_move_tail(&chunk
->list
, &pcpu_slot
[slot
]);
519 static void pcpu_chunk_move(struct pcpu_chunk
*chunk
, int slot
)
521 __pcpu_chunk_move(chunk
, slot
, true);
525 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
526 * @chunk: chunk of interest
527 * @oslot: the previous slot it was on
529 * This function is called after an allocation or free changed @chunk.
530 * New slot according to the changed state is determined and @chunk is
531 * moved to the slot. Note that the reserved chunk is never put on
537 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
539 int nslot
= pcpu_chunk_slot(chunk
);
542 __pcpu_chunk_move(chunk
, nslot
, oslot
< nslot
);
546 * pcpu_update_empty_pages - update empty page counters
547 * @chunk: chunk of interest
548 * @nr: nr of empty pages
550 * This is used to keep track of the empty pages now based on the premise
551 * a md_block covers a page. The hint update functions recognize if a block
552 * is made full or broken to calculate deltas for keeping track of free pages.
554 static inline void pcpu_update_empty_pages(struct pcpu_chunk
*chunk
, int nr
)
556 chunk
->nr_empty_pop_pages
+= nr
;
557 if (chunk
!= pcpu_reserved_chunk
)
558 pcpu_nr_empty_pop_pages
+= nr
;
562 * pcpu_region_overlap - determines if two regions overlap
563 * @a: start of first region, inclusive
564 * @b: end of first region, exclusive
565 * @x: start of second region, inclusive
566 * @y: end of second region, exclusive
568 * This is used to determine if the hint region [a, b) overlaps with the
569 * allocated region [x, y).
571 static inline bool pcpu_region_overlap(int a
, int b
, int x
, int y
)
573 return (a
< y
) && (x
< b
);
577 * pcpu_block_update - updates a block given a free area
578 * @block: block of interest
579 * @start: start offset in block
580 * @end: end offset in block
582 * Updates a block given a known free area. The region [start, end) is
583 * expected to be the entirety of the free area within a block. Chooses
584 * the best starting offset if the contig hints are equal.
586 static void pcpu_block_update(struct pcpu_block_md
*block
, int start
, int end
)
588 int contig
= end
- start
;
590 block
->first_free
= min(block
->first_free
, start
);
592 block
->left_free
= contig
;
594 if (end
== block
->nr_bits
)
595 block
->right_free
= contig
;
597 if (contig
> block
->contig_hint
) {
598 /* promote the old contig_hint to be the new scan_hint */
599 if (start
> block
->contig_hint_start
) {
600 if (block
->contig_hint
> block
->scan_hint
) {
601 block
->scan_hint_start
=
602 block
->contig_hint_start
;
603 block
->scan_hint
= block
->contig_hint
;
604 } else if (start
< block
->scan_hint_start
) {
606 * The old contig_hint == scan_hint. But, the
607 * new contig is larger so hold the invariant
608 * scan_hint_start < contig_hint_start.
610 block
->scan_hint
= 0;
613 block
->scan_hint
= 0;
615 block
->contig_hint_start
= start
;
616 block
->contig_hint
= contig
;
617 } else if (contig
== block
->contig_hint
) {
618 if (block
->contig_hint_start
&&
620 __ffs(start
) > __ffs(block
->contig_hint_start
))) {
621 /* start has a better alignment so use it */
622 block
->contig_hint_start
= start
;
623 if (start
< block
->scan_hint_start
&&
624 block
->contig_hint
> block
->scan_hint
)
625 block
->scan_hint
= 0;
626 } else if (start
> block
->scan_hint_start
||
627 block
->contig_hint
> block
->scan_hint
) {
629 * Knowing contig == contig_hint, update the scan_hint
630 * if it is farther than or larger than the current
633 block
->scan_hint_start
= start
;
634 block
->scan_hint
= contig
;
638 * The region is smaller than the contig_hint. So only update
639 * the scan_hint if it is larger than or equal and farther than
640 * the current scan_hint.
642 if ((start
< block
->contig_hint_start
&&
643 (contig
> block
->scan_hint
||
644 (contig
== block
->scan_hint
&&
645 start
> block
->scan_hint_start
)))) {
646 block
->scan_hint_start
= start
;
647 block
->scan_hint
= contig
;
653 * pcpu_block_update_scan - update a block given a free area from a scan
654 * @chunk: chunk of interest
655 * @bit_off: chunk offset
656 * @bits: size of free area
658 * Finding the final allocation spot first goes through pcpu_find_block_fit()
659 * to find a block that can hold the allocation and then pcpu_alloc_area()
660 * where a scan is used. When allocations require specific alignments,
661 * we can inadvertently create holes which will not be seen in the alloc
664 * This takes a given free area hole and updates a block as it may change the
665 * scan_hint. We need to scan backwards to ensure we don't miss free bits
668 static void pcpu_block_update_scan(struct pcpu_chunk
*chunk
, int bit_off
,
671 int s_off
= pcpu_off_to_block_off(bit_off
);
672 int e_off
= s_off
+ bits
;
674 struct pcpu_block_md
*block
;
676 if (e_off
> PCPU_BITMAP_BLOCK_BITS
)
679 s_index
= pcpu_off_to_block_index(bit_off
);
680 block
= chunk
->md_blocks
+ s_index
;
682 /* scan backwards in case of alignment skipping free bits */
683 l_bit
= find_last_bit(pcpu_index_alloc_map(chunk
, s_index
), s_off
);
684 s_off
= (s_off
== l_bit
) ? 0 : l_bit
+ 1;
686 pcpu_block_update(block
, s_off
, e_off
);
690 * pcpu_chunk_refresh_hint - updates metadata about a chunk
691 * @chunk: chunk of interest
692 * @full_scan: if we should scan from the beginning
694 * Iterates over the metadata blocks to find the largest contig area.
695 * A full scan can be avoided on the allocation path as this is triggered
696 * if we broke the contig_hint. In doing so, the scan_hint will be before
697 * the contig_hint or after if the scan_hint == contig_hint. This cannot
698 * be prevented on freeing as we want to find the largest area possibly
701 static void pcpu_chunk_refresh_hint(struct pcpu_chunk
*chunk
, bool full_scan
)
703 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
706 /* promote scan_hint to contig_hint */
707 if (!full_scan
&& chunk_md
->scan_hint
) {
708 bit_off
= chunk_md
->scan_hint_start
+ chunk_md
->scan_hint
;
709 chunk_md
->contig_hint_start
= chunk_md
->scan_hint_start
;
710 chunk_md
->contig_hint
= chunk_md
->scan_hint
;
711 chunk_md
->scan_hint
= 0;
713 bit_off
= chunk_md
->first_free
;
714 chunk_md
->contig_hint
= 0;
718 pcpu_for_each_md_free_region(chunk
, bit_off
, bits
)
719 pcpu_block_update(chunk_md
, bit_off
, bit_off
+ bits
);
723 * pcpu_block_refresh_hint
724 * @chunk: chunk of interest
725 * @index: index of the metadata block
727 * Scans over the block beginning at first_free and updates the block
728 * metadata accordingly.
730 static void pcpu_block_refresh_hint(struct pcpu_chunk
*chunk
, int index
)
732 struct pcpu_block_md
*block
= chunk
->md_blocks
+ index
;
733 unsigned long *alloc_map
= pcpu_index_alloc_map(chunk
, index
);
734 unsigned int rs
, re
, start
; /* region start, region end */
736 /* promote scan_hint to contig_hint */
737 if (block
->scan_hint
) {
738 start
= block
->scan_hint_start
+ block
->scan_hint
;
739 block
->contig_hint_start
= block
->scan_hint_start
;
740 block
->contig_hint
= block
->scan_hint
;
741 block
->scan_hint
= 0;
743 start
= block
->first_free
;
744 block
->contig_hint
= 0;
747 block
->right_free
= 0;
749 /* iterate over free areas and update the contig hints */
750 bitmap_for_each_clear_region(alloc_map
, rs
, re
, start
,
751 PCPU_BITMAP_BLOCK_BITS
)
752 pcpu_block_update(block
, rs
, re
);
756 * pcpu_block_update_hint_alloc - update hint on allocation path
757 * @chunk: chunk of interest
758 * @bit_off: chunk offset
759 * @bits: size of request
761 * Updates metadata for the allocation path. The metadata only has to be
762 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
763 * scans are required if the block's contig hint is broken.
765 static void pcpu_block_update_hint_alloc(struct pcpu_chunk
*chunk
, int bit_off
,
768 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
769 int nr_empty_pages
= 0;
770 struct pcpu_block_md
*s_block
, *e_block
, *block
;
771 int s_index
, e_index
; /* block indexes of the freed allocation */
772 int s_off
, e_off
; /* block offsets of the freed allocation */
775 * Calculate per block offsets.
776 * The calculation uses an inclusive range, but the resulting offsets
777 * are [start, end). e_index always points to the last block in the
780 s_index
= pcpu_off_to_block_index(bit_off
);
781 e_index
= pcpu_off_to_block_index(bit_off
+ bits
- 1);
782 s_off
= pcpu_off_to_block_off(bit_off
);
783 e_off
= pcpu_off_to_block_off(bit_off
+ bits
- 1) + 1;
785 s_block
= chunk
->md_blocks
+ s_index
;
786 e_block
= chunk
->md_blocks
+ e_index
;
790 * block->first_free must be updated if the allocation takes its place.
791 * If the allocation breaks the contig_hint, a scan is required to
794 if (s_block
->contig_hint
== PCPU_BITMAP_BLOCK_BITS
)
797 if (s_off
== s_block
->first_free
)
798 s_block
->first_free
= find_next_zero_bit(
799 pcpu_index_alloc_map(chunk
, s_index
),
800 PCPU_BITMAP_BLOCK_BITS
,
803 if (pcpu_region_overlap(s_block
->scan_hint_start
,
804 s_block
->scan_hint_start
+ s_block
->scan_hint
,
807 s_block
->scan_hint
= 0;
809 if (pcpu_region_overlap(s_block
->contig_hint_start
,
810 s_block
->contig_hint_start
+
811 s_block
->contig_hint
,
814 /* block contig hint is broken - scan to fix it */
816 s_block
->left_free
= 0;
817 pcpu_block_refresh_hint(chunk
, s_index
);
819 /* update left and right contig manually */
820 s_block
->left_free
= min(s_block
->left_free
, s_off
);
821 if (s_index
== e_index
)
822 s_block
->right_free
= min_t(int, s_block
->right_free
,
823 PCPU_BITMAP_BLOCK_BITS
- e_off
);
825 s_block
->right_free
= 0;
831 if (s_index
!= e_index
) {
832 if (e_block
->contig_hint
== PCPU_BITMAP_BLOCK_BITS
)
836 * When the allocation is across blocks, the end is along
837 * the left part of the e_block.
839 e_block
->first_free
= find_next_zero_bit(
840 pcpu_index_alloc_map(chunk
, e_index
),
841 PCPU_BITMAP_BLOCK_BITS
, e_off
);
843 if (e_off
== PCPU_BITMAP_BLOCK_BITS
) {
844 /* reset the block */
847 if (e_off
> e_block
->scan_hint_start
)
848 e_block
->scan_hint
= 0;
850 e_block
->left_free
= 0;
851 if (e_off
> e_block
->contig_hint_start
) {
852 /* contig hint is broken - scan to fix it */
853 pcpu_block_refresh_hint(chunk
, e_index
);
855 e_block
->right_free
=
856 min_t(int, e_block
->right_free
,
857 PCPU_BITMAP_BLOCK_BITS
- e_off
);
861 /* update in-between md_blocks */
862 nr_empty_pages
+= (e_index
- s_index
- 1);
863 for (block
= s_block
+ 1; block
< e_block
; block
++) {
864 block
->scan_hint
= 0;
865 block
->contig_hint
= 0;
866 block
->left_free
= 0;
867 block
->right_free
= 0;
872 pcpu_update_empty_pages(chunk
, -nr_empty_pages
);
874 if (pcpu_region_overlap(chunk_md
->scan_hint_start
,
875 chunk_md
->scan_hint_start
+
879 chunk_md
->scan_hint
= 0;
882 * The only time a full chunk scan is required is if the chunk
883 * contig hint is broken. Otherwise, it means a smaller space
884 * was used and therefore the chunk contig hint is still correct.
886 if (pcpu_region_overlap(chunk_md
->contig_hint_start
,
887 chunk_md
->contig_hint_start
+
888 chunk_md
->contig_hint
,
891 pcpu_chunk_refresh_hint(chunk
, false);
895 * pcpu_block_update_hint_free - updates the block hints on the free path
896 * @chunk: chunk of interest
897 * @bit_off: chunk offset
898 * @bits: size of request
900 * Updates metadata for the allocation path. This avoids a blind block
901 * refresh by making use of the block contig hints. If this fails, it scans
902 * forward and backward to determine the extent of the free area. This is
903 * capped at the boundary of blocks.
905 * A chunk update is triggered if a page becomes free, a block becomes free,
906 * or the free spans across blocks. This tradeoff is to minimize iterating
907 * over the block metadata to update chunk_md->contig_hint.
908 * chunk_md->contig_hint may be off by up to a page, but it will never be more
909 * than the available space. If the contig hint is contained in one block, it
912 static void pcpu_block_update_hint_free(struct pcpu_chunk
*chunk
, int bit_off
,
915 int nr_empty_pages
= 0;
916 struct pcpu_block_md
*s_block
, *e_block
, *block
;
917 int s_index
, e_index
; /* block indexes of the freed allocation */
918 int s_off
, e_off
; /* block offsets of the freed allocation */
919 int start
, end
; /* start and end of the whole free area */
922 * Calculate per block offsets.
923 * The calculation uses an inclusive range, but the resulting offsets
924 * are [start, end). e_index always points to the last block in the
927 s_index
= pcpu_off_to_block_index(bit_off
);
928 e_index
= pcpu_off_to_block_index(bit_off
+ bits
- 1);
929 s_off
= pcpu_off_to_block_off(bit_off
);
930 e_off
= pcpu_off_to_block_off(bit_off
+ bits
- 1) + 1;
932 s_block
= chunk
->md_blocks
+ s_index
;
933 e_block
= chunk
->md_blocks
+ e_index
;
936 * Check if the freed area aligns with the block->contig_hint.
937 * If it does, then the scan to find the beginning/end of the
938 * larger free area can be avoided.
940 * start and end refer to beginning and end of the free area
941 * within each their respective blocks. This is not necessarily
942 * the entire free area as it may span blocks past the beginning
943 * or end of the block.
946 if (s_off
== s_block
->contig_hint
+ s_block
->contig_hint_start
) {
947 start
= s_block
->contig_hint_start
;
950 * Scan backwards to find the extent of the free area.
951 * find_last_bit returns the starting bit, so if the start bit
952 * is returned, that means there was no last bit and the
953 * remainder of the chunk is free.
955 int l_bit
= find_last_bit(pcpu_index_alloc_map(chunk
, s_index
),
957 start
= (start
== l_bit
) ? 0 : l_bit
+ 1;
961 if (e_off
== e_block
->contig_hint_start
)
962 end
= e_block
->contig_hint_start
+ e_block
->contig_hint
;
964 end
= find_next_bit(pcpu_index_alloc_map(chunk
, e_index
),
965 PCPU_BITMAP_BLOCK_BITS
, end
);
968 e_off
= (s_index
== e_index
) ? end
: PCPU_BITMAP_BLOCK_BITS
;
969 if (!start
&& e_off
== PCPU_BITMAP_BLOCK_BITS
)
971 pcpu_block_update(s_block
, start
, e_off
);
973 /* freeing in the same block */
974 if (s_index
!= e_index
) {
976 if (end
== PCPU_BITMAP_BLOCK_BITS
)
978 pcpu_block_update(e_block
, 0, end
);
980 /* reset md_blocks in the middle */
981 nr_empty_pages
+= (e_index
- s_index
- 1);
982 for (block
= s_block
+ 1; block
< e_block
; block
++) {
983 block
->first_free
= 0;
984 block
->scan_hint
= 0;
985 block
->contig_hint_start
= 0;
986 block
->contig_hint
= PCPU_BITMAP_BLOCK_BITS
;
987 block
->left_free
= PCPU_BITMAP_BLOCK_BITS
;
988 block
->right_free
= PCPU_BITMAP_BLOCK_BITS
;
993 pcpu_update_empty_pages(chunk
, nr_empty_pages
);
996 * Refresh chunk metadata when the free makes a block free or spans
997 * across blocks. The contig_hint may be off by up to a page, but if
998 * the contig_hint is contained in a block, it will be accurate with
999 * the else condition below.
1001 if (((end
- start
) >= PCPU_BITMAP_BLOCK_BITS
) || s_index
!= e_index
)
1002 pcpu_chunk_refresh_hint(chunk
, true);
1004 pcpu_block_update(&chunk
->chunk_md
,
1005 pcpu_block_off_to_off(s_index
, start
),
1010 * pcpu_is_populated - determines if the region is populated
1011 * @chunk: chunk of interest
1012 * @bit_off: chunk offset
1013 * @bits: size of area
1014 * @next_off: return value for the next offset to start searching
1016 * For atomic allocations, check if the backing pages are populated.
1019 * Bool if the backing pages are populated.
1020 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1022 static bool pcpu_is_populated(struct pcpu_chunk
*chunk
, int bit_off
, int bits
,
1025 unsigned int page_start
, page_end
, rs
, re
;
1027 page_start
= PFN_DOWN(bit_off
* PCPU_MIN_ALLOC_SIZE
);
1028 page_end
= PFN_UP((bit_off
+ bits
) * PCPU_MIN_ALLOC_SIZE
);
1031 bitmap_next_clear_region(chunk
->populated
, &rs
, &re
, page_end
);
1035 *next_off
= re
* PAGE_SIZE
/ PCPU_MIN_ALLOC_SIZE
;
1040 * pcpu_find_block_fit - finds the block index to start searching
1041 * @chunk: chunk of interest
1042 * @alloc_bits: size of request in allocation units
1043 * @align: alignment of area (max PAGE_SIZE bytes)
1044 * @pop_only: use populated regions only
1046 * Given a chunk and an allocation spec, find the offset to begin searching
1047 * for a free region. This iterates over the bitmap metadata blocks to
1048 * find an offset that will be guaranteed to fit the requirements. It is
1049 * not quite first fit as if the allocation does not fit in the contig hint
1050 * of a block or chunk, it is skipped. This errs on the side of caution
1051 * to prevent excess iteration. Poor alignment can cause the allocator to
1052 * skip over blocks and chunks that have valid free areas.
1055 * The offset in the bitmap to begin searching.
1056 * -1 if no offset is found.
1058 static int pcpu_find_block_fit(struct pcpu_chunk
*chunk
, int alloc_bits
,
1059 size_t align
, bool pop_only
)
1061 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
1062 int bit_off
, bits
, next_off
;
1065 * Check to see if the allocation can fit in the chunk's contig hint.
1066 * This is an optimization to prevent scanning by assuming if it
1067 * cannot fit in the global hint, there is memory pressure and creating
1068 * a new chunk would happen soon.
1070 bit_off
= ALIGN(chunk_md
->contig_hint_start
, align
) -
1071 chunk_md
->contig_hint_start
;
1072 if (bit_off
+ alloc_bits
> chunk_md
->contig_hint
)
1075 bit_off
= pcpu_next_hint(chunk_md
, alloc_bits
);
1077 pcpu_for_each_fit_region(chunk
, alloc_bits
, align
, bit_off
, bits
) {
1078 if (!pop_only
|| pcpu_is_populated(chunk
, bit_off
, bits
,
1086 if (bit_off
== pcpu_chunk_map_bits(chunk
))
1093 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1094 * @map: the address to base the search on
1095 * @size: the bitmap size in bits
1096 * @start: the bitnumber to start searching at
1097 * @nr: the number of zeroed bits we're looking for
1098 * @align_mask: alignment mask for zero area
1099 * @largest_off: offset of the largest area skipped
1100 * @largest_bits: size of the largest area skipped
1102 * The @align_mask should be one less than a power of 2.
1104 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1105 * the largest area that was skipped. This is imperfect, but in general is
1106 * good enough. The largest remembered region is the largest failed region
1107 * seen. This does not include anything we possibly skipped due to alignment.
1108 * pcpu_block_update_scan() does scan backwards to try and recover what was
1109 * lost to alignment. While this can cause scanning to miss earlier possible
1110 * free areas, smaller allocations will eventually fill those holes.
1112 static unsigned long pcpu_find_zero_area(unsigned long *map
,
1114 unsigned long start
,
1116 unsigned long align_mask
,
1117 unsigned long *largest_off
,
1118 unsigned long *largest_bits
)
1120 unsigned long index
, end
, i
, area_off
, area_bits
;
1122 index
= find_next_zero_bit(map
, size
, start
);
1124 /* Align allocation */
1125 index
= __ALIGN_MASK(index
, align_mask
);
1131 i
= find_next_bit(map
, end
, index
);
1133 area_bits
= i
- area_off
;
1134 /* remember largest unused area with best alignment */
1135 if (area_bits
> *largest_bits
||
1136 (area_bits
== *largest_bits
&& *largest_off
&&
1137 (!area_off
|| __ffs(area_off
) > __ffs(*largest_off
)))) {
1138 *largest_off
= area_off
;
1139 *largest_bits
= area_bits
;
1149 * pcpu_alloc_area - allocates an area from a pcpu_chunk
1150 * @chunk: chunk of interest
1151 * @alloc_bits: size of request in allocation units
1152 * @align: alignment of area (max PAGE_SIZE)
1153 * @start: bit_off to start searching
1155 * This function takes in a @start offset to begin searching to fit an
1156 * allocation of @alloc_bits with alignment @align. It needs to scan
1157 * the allocation map because if it fits within the block's contig hint,
1158 * @start will be block->first_free. This is an attempt to fill the
1159 * allocation prior to breaking the contig hint. The allocation and
1160 * boundary maps are updated accordingly if it confirms a valid
1164 * Allocated addr offset in @chunk on success.
1165 * -1 if no matching area is found.
1167 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int alloc_bits
,
1168 size_t align
, int start
)
1170 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
1171 size_t align_mask
= (align
) ? (align
- 1) : 0;
1172 unsigned long area_off
= 0, area_bits
= 0;
1173 int bit_off
, end
, oslot
;
1175 lockdep_assert_held(&pcpu_lock
);
1177 oslot
= pcpu_chunk_slot(chunk
);
1180 * Search to find a fit.
1182 end
= min_t(int, start
+ alloc_bits
+ PCPU_BITMAP_BLOCK_BITS
,
1183 pcpu_chunk_map_bits(chunk
));
1184 bit_off
= pcpu_find_zero_area(chunk
->alloc_map
, end
, start
, alloc_bits
,
1185 align_mask
, &area_off
, &area_bits
);
1190 pcpu_block_update_scan(chunk
, area_off
, area_bits
);
1192 /* update alloc map */
1193 bitmap_set(chunk
->alloc_map
, bit_off
, alloc_bits
);
1195 /* update boundary map */
1196 set_bit(bit_off
, chunk
->bound_map
);
1197 bitmap_clear(chunk
->bound_map
, bit_off
+ 1, alloc_bits
- 1);
1198 set_bit(bit_off
+ alloc_bits
, chunk
->bound_map
);
1200 chunk
->free_bytes
-= alloc_bits
* PCPU_MIN_ALLOC_SIZE
;
1202 /* update first free bit */
1203 if (bit_off
== chunk_md
->first_free
)
1204 chunk_md
->first_free
= find_next_zero_bit(
1206 pcpu_chunk_map_bits(chunk
),
1207 bit_off
+ alloc_bits
);
1209 pcpu_block_update_hint_alloc(chunk
, bit_off
, alloc_bits
);
1211 pcpu_chunk_relocate(chunk
, oslot
);
1213 return bit_off
* PCPU_MIN_ALLOC_SIZE
;
1217 * pcpu_free_area - frees the corresponding offset
1218 * @chunk: chunk of interest
1219 * @off: addr offset into chunk
1221 * This function determines the size of an allocation to free using
1222 * the boundary bitmap and clears the allocation map.
1225 * Number of freed bytes.
1227 static int pcpu_free_area(struct pcpu_chunk
*chunk
, int off
)
1229 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
1230 int bit_off
, bits
, end
, oslot
, freed
;
1232 lockdep_assert_held(&pcpu_lock
);
1233 pcpu_stats_area_dealloc(chunk
);
1235 oslot
= pcpu_chunk_slot(chunk
);
1237 bit_off
= off
/ PCPU_MIN_ALLOC_SIZE
;
1239 /* find end index */
1240 end
= find_next_bit(chunk
->bound_map
, pcpu_chunk_map_bits(chunk
),
1242 bits
= end
- bit_off
;
1243 bitmap_clear(chunk
->alloc_map
, bit_off
, bits
);
1245 freed
= bits
* PCPU_MIN_ALLOC_SIZE
;
1247 /* update metadata */
1248 chunk
->free_bytes
+= freed
;
1250 /* update first free bit */
1251 chunk_md
->first_free
= min(chunk_md
->first_free
, bit_off
);
1253 pcpu_block_update_hint_free(chunk
, bit_off
, bits
);
1255 pcpu_chunk_relocate(chunk
, oslot
);
1260 static void pcpu_init_md_block(struct pcpu_block_md
*block
, int nr_bits
)
1262 block
->scan_hint
= 0;
1263 block
->contig_hint
= nr_bits
;
1264 block
->left_free
= nr_bits
;
1265 block
->right_free
= nr_bits
;
1266 block
->first_free
= 0;
1267 block
->nr_bits
= nr_bits
;
1270 static void pcpu_init_md_blocks(struct pcpu_chunk
*chunk
)
1272 struct pcpu_block_md
*md_block
;
1274 /* init the chunk's block */
1275 pcpu_init_md_block(&chunk
->chunk_md
, pcpu_chunk_map_bits(chunk
));
1277 for (md_block
= chunk
->md_blocks
;
1278 md_block
!= chunk
->md_blocks
+ pcpu_chunk_nr_blocks(chunk
);
1280 pcpu_init_md_block(md_block
, PCPU_BITMAP_BLOCK_BITS
);
1284 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1285 * @tmp_addr: the start of the region served
1286 * @map_size: size of the region served
1288 * This is responsible for creating the chunks that serve the first chunk. The
1289 * base_addr is page aligned down of @tmp_addr while the region end is page
1290 * aligned up. Offsets are kept track of to determine the region served. All
1291 * this is done to appease the bitmap allocator in avoiding partial blocks.
1294 * Chunk serving the region at @tmp_addr of @map_size.
1296 static struct pcpu_chunk
* __init
pcpu_alloc_first_chunk(unsigned long tmp_addr
,
1299 struct pcpu_chunk
*chunk
;
1300 unsigned long aligned_addr
, lcm_align
;
1301 int start_offset
, offset_bits
, region_size
, region_bits
;
1304 /* region calculations */
1305 aligned_addr
= tmp_addr
& PAGE_MASK
;
1307 start_offset
= tmp_addr
- aligned_addr
;
1310 * Align the end of the region with the LCM of PAGE_SIZE and
1311 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1314 lcm_align
= lcm(PAGE_SIZE
, PCPU_BITMAP_BLOCK_SIZE
);
1315 region_size
= ALIGN(start_offset
+ map_size
, lcm_align
);
1317 /* allocate chunk */
1318 alloc_size
= struct_size(chunk
, populated
,
1319 BITS_TO_LONGS(region_size
>> PAGE_SHIFT
));
1320 chunk
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1322 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1325 INIT_LIST_HEAD(&chunk
->list
);
1327 chunk
->base_addr
= (void *)aligned_addr
;
1328 chunk
->start_offset
= start_offset
;
1329 chunk
->end_offset
= region_size
- chunk
->start_offset
- map_size
;
1331 chunk
->nr_pages
= region_size
>> PAGE_SHIFT
;
1332 region_bits
= pcpu_chunk_map_bits(chunk
);
1334 alloc_size
= BITS_TO_LONGS(region_bits
) * sizeof(chunk
->alloc_map
[0]);
1335 chunk
->alloc_map
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1336 if (!chunk
->alloc_map
)
1337 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1341 BITS_TO_LONGS(region_bits
+ 1) * sizeof(chunk
->bound_map
[0]);
1342 chunk
->bound_map
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1343 if (!chunk
->bound_map
)
1344 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1347 alloc_size
= pcpu_chunk_nr_blocks(chunk
) * sizeof(chunk
->md_blocks
[0]);
1348 chunk
->md_blocks
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1349 if (!chunk
->md_blocks
)
1350 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1353 #ifdef CONFIG_MEMCG_KMEM
1354 /* first chunk isn't memcg-aware */
1355 chunk
->obj_cgroups
= NULL
;
1357 pcpu_init_md_blocks(chunk
);
1359 /* manage populated page bitmap */
1360 chunk
->immutable
= true;
1361 bitmap_fill(chunk
->populated
, chunk
->nr_pages
);
1362 chunk
->nr_populated
= chunk
->nr_pages
;
1363 chunk
->nr_empty_pop_pages
= chunk
->nr_pages
;
1365 chunk
->free_bytes
= map_size
;
1367 if (chunk
->start_offset
) {
1368 /* hide the beginning of the bitmap */
1369 offset_bits
= chunk
->start_offset
/ PCPU_MIN_ALLOC_SIZE
;
1370 bitmap_set(chunk
->alloc_map
, 0, offset_bits
);
1371 set_bit(0, chunk
->bound_map
);
1372 set_bit(offset_bits
, chunk
->bound_map
);
1374 chunk
->chunk_md
.first_free
= offset_bits
;
1376 pcpu_block_update_hint_alloc(chunk
, 0, offset_bits
);
1379 if (chunk
->end_offset
) {
1380 /* hide the end of the bitmap */
1381 offset_bits
= chunk
->end_offset
/ PCPU_MIN_ALLOC_SIZE
;
1382 bitmap_set(chunk
->alloc_map
,
1383 pcpu_chunk_map_bits(chunk
) - offset_bits
,
1385 set_bit((start_offset
+ map_size
) / PCPU_MIN_ALLOC_SIZE
,
1387 set_bit(region_bits
, chunk
->bound_map
);
1389 pcpu_block_update_hint_alloc(chunk
, pcpu_chunk_map_bits(chunk
)
1390 - offset_bits
, offset_bits
);
1396 static struct pcpu_chunk
*pcpu_alloc_chunk(enum pcpu_chunk_type type
, gfp_t gfp
)
1398 struct pcpu_chunk
*chunk
;
1401 chunk
= pcpu_mem_zalloc(pcpu_chunk_struct_size
, gfp
);
1405 INIT_LIST_HEAD(&chunk
->list
);
1406 chunk
->nr_pages
= pcpu_unit_pages
;
1407 region_bits
= pcpu_chunk_map_bits(chunk
);
1409 chunk
->alloc_map
= pcpu_mem_zalloc(BITS_TO_LONGS(region_bits
) *
1410 sizeof(chunk
->alloc_map
[0]), gfp
);
1411 if (!chunk
->alloc_map
)
1412 goto alloc_map_fail
;
1414 chunk
->bound_map
= pcpu_mem_zalloc(BITS_TO_LONGS(region_bits
+ 1) *
1415 sizeof(chunk
->bound_map
[0]), gfp
);
1416 if (!chunk
->bound_map
)
1417 goto bound_map_fail
;
1419 chunk
->md_blocks
= pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk
) *
1420 sizeof(chunk
->md_blocks
[0]), gfp
);
1421 if (!chunk
->md_blocks
)
1422 goto md_blocks_fail
;
1424 #ifdef CONFIG_MEMCG_KMEM
1425 if (pcpu_is_memcg_chunk(type
)) {
1426 chunk
->obj_cgroups
=
1427 pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk
) *
1428 sizeof(struct obj_cgroup
*), gfp
);
1429 if (!chunk
->obj_cgroups
)
1434 pcpu_init_md_blocks(chunk
);
1437 chunk
->free_bytes
= chunk
->nr_pages
* PAGE_SIZE
;
1441 #ifdef CONFIG_MEMCG_KMEM
1443 pcpu_mem_free(chunk
->md_blocks
);
1446 pcpu_mem_free(chunk
->bound_map
);
1448 pcpu_mem_free(chunk
->alloc_map
);
1450 pcpu_mem_free(chunk
);
1455 static void pcpu_free_chunk(struct pcpu_chunk
*chunk
)
1459 #ifdef CONFIG_MEMCG_KMEM
1460 pcpu_mem_free(chunk
->obj_cgroups
);
1462 pcpu_mem_free(chunk
->md_blocks
);
1463 pcpu_mem_free(chunk
->bound_map
);
1464 pcpu_mem_free(chunk
->alloc_map
);
1465 pcpu_mem_free(chunk
);
1469 * pcpu_chunk_populated - post-population bookkeeping
1470 * @chunk: pcpu_chunk which got populated
1471 * @page_start: the start page
1472 * @page_end: the end page
1474 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1475 * the bookkeeping information accordingly. Must be called after each
1476 * successful population.
1478 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1479 * is to serve an allocation in that area.
1481 static void pcpu_chunk_populated(struct pcpu_chunk
*chunk
, int page_start
,
1484 int nr
= page_end
- page_start
;
1486 lockdep_assert_held(&pcpu_lock
);
1488 bitmap_set(chunk
->populated
, page_start
, nr
);
1489 chunk
->nr_populated
+= nr
;
1490 pcpu_nr_populated
+= nr
;
1492 pcpu_update_empty_pages(chunk
, nr
);
1496 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1497 * @chunk: pcpu_chunk which got depopulated
1498 * @page_start: the start page
1499 * @page_end: the end page
1501 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1502 * Update the bookkeeping information accordingly. Must be called after
1503 * each successful depopulation.
1505 static void pcpu_chunk_depopulated(struct pcpu_chunk
*chunk
,
1506 int page_start
, int page_end
)
1508 int nr
= page_end
- page_start
;
1510 lockdep_assert_held(&pcpu_lock
);
1512 bitmap_clear(chunk
->populated
, page_start
, nr
);
1513 chunk
->nr_populated
-= nr
;
1514 pcpu_nr_populated
-= nr
;
1516 pcpu_update_empty_pages(chunk
, -nr
);
1520 * Chunk management implementation.
1522 * To allow different implementations, chunk alloc/free and
1523 * [de]population are implemented in a separate file which is pulled
1524 * into this file and compiled together. The following functions
1525 * should be implemented.
1527 * pcpu_populate_chunk - populate the specified range of a chunk
1528 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1529 * pcpu_create_chunk - create a new chunk
1530 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1531 * pcpu_addr_to_page - translate address to physical address
1532 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1534 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
,
1535 int page_start
, int page_end
, gfp_t gfp
);
1536 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
,
1537 int page_start
, int page_end
);
1538 static struct pcpu_chunk
*pcpu_create_chunk(enum pcpu_chunk_type type
,
1540 static void pcpu_destroy_chunk(struct pcpu_chunk
*chunk
);
1541 static struct page
*pcpu_addr_to_page(void *addr
);
1542 static int __init
pcpu_verify_alloc_info(const struct pcpu_alloc_info
*ai
);
1544 #ifdef CONFIG_NEED_PER_CPU_KM
1545 #include "percpu-km.c"
1547 #include "percpu-vm.c"
1551 * pcpu_chunk_addr_search - determine chunk containing specified address
1552 * @addr: address for which the chunk needs to be determined.
1554 * This is an internal function that handles all but static allocations.
1555 * Static percpu address values should never be passed into the allocator.
1558 * The address of the found chunk.
1560 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
1562 /* is it in the dynamic region (first chunk)? */
1563 if (pcpu_addr_in_chunk(pcpu_first_chunk
, addr
))
1564 return pcpu_first_chunk
;
1566 /* is it in the reserved region? */
1567 if (pcpu_addr_in_chunk(pcpu_reserved_chunk
, addr
))
1568 return pcpu_reserved_chunk
;
1571 * The address is relative to unit0 which might be unused and
1572 * thus unmapped. Offset the address to the unit space of the
1573 * current processor before looking it up in the vmalloc
1574 * space. Note that any possible cpu id can be used here, so
1575 * there's no need to worry about preemption or cpu hotplug.
1577 addr
+= pcpu_unit_offsets
[raw_smp_processor_id()];
1578 return pcpu_get_page_chunk(pcpu_addr_to_page(addr
));
1581 #ifdef CONFIG_MEMCG_KMEM
1582 static enum pcpu_chunk_type
pcpu_memcg_pre_alloc_hook(size_t size
, gfp_t gfp
,
1583 struct obj_cgroup
**objcgp
)
1585 struct obj_cgroup
*objcg
;
1587 if (!memcg_kmem_enabled() || !(gfp
& __GFP_ACCOUNT
))
1588 return PCPU_CHUNK_ROOT
;
1590 objcg
= get_obj_cgroup_from_current();
1592 return PCPU_CHUNK_ROOT
;
1594 if (obj_cgroup_charge(objcg
, gfp
, size
* num_possible_cpus())) {
1595 obj_cgroup_put(objcg
);
1596 return PCPU_FAIL_ALLOC
;
1600 return PCPU_CHUNK_MEMCG
;
1603 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup
*objcg
,
1604 struct pcpu_chunk
*chunk
, int off
,
1611 chunk
->obj_cgroups
[off
>> PCPU_MIN_ALLOC_SHIFT
] = objcg
;
1614 mod_memcg_state(obj_cgroup_memcg(objcg
), MEMCG_PERCPU_B
,
1615 size
* num_possible_cpus());
1618 obj_cgroup_uncharge(objcg
, size
* num_possible_cpus());
1619 obj_cgroup_put(objcg
);
1623 static void pcpu_memcg_free_hook(struct pcpu_chunk
*chunk
, int off
, size_t size
)
1625 struct obj_cgroup
*objcg
;
1627 if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk
)))
1630 objcg
= chunk
->obj_cgroups
[off
>> PCPU_MIN_ALLOC_SHIFT
];
1631 chunk
->obj_cgroups
[off
>> PCPU_MIN_ALLOC_SHIFT
] = NULL
;
1633 obj_cgroup_uncharge(objcg
, size
* num_possible_cpus());
1636 mod_memcg_state(obj_cgroup_memcg(objcg
), MEMCG_PERCPU_B
,
1637 -(size
* num_possible_cpus()));
1640 obj_cgroup_put(objcg
);
1643 #else /* CONFIG_MEMCG_KMEM */
1644 static enum pcpu_chunk_type
1645 pcpu_memcg_pre_alloc_hook(size_t size
, gfp_t gfp
, struct obj_cgroup
**objcgp
)
1647 return PCPU_CHUNK_ROOT
;
1650 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup
*objcg
,
1651 struct pcpu_chunk
*chunk
, int off
,
1656 static void pcpu_memcg_free_hook(struct pcpu_chunk
*chunk
, int off
, size_t size
)
1659 #endif /* CONFIG_MEMCG_KMEM */
1662 * pcpu_alloc - the percpu allocator
1663 * @size: size of area to allocate in bytes
1664 * @align: alignment of area (max PAGE_SIZE)
1665 * @reserved: allocate from the reserved chunk if available
1666 * @gfp: allocation flags
1668 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1669 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1670 * then no warning will be triggered on invalid or failed allocation
1674 * Percpu pointer to the allocated area on success, NULL on failure.
1676 static void __percpu
*pcpu_alloc(size_t size
, size_t align
, bool reserved
,
1682 enum pcpu_chunk_type type
;
1683 struct list_head
*pcpu_slot
;
1684 struct obj_cgroup
*objcg
= NULL
;
1685 static int warn_limit
= 10;
1686 struct pcpu_chunk
*chunk
, *next
;
1688 int slot
, off
, cpu
, ret
;
1689 unsigned long flags
;
1691 size_t bits
, bit_align
;
1693 gfp
= current_gfp_context(gfp
);
1694 /* whitelisted flags that can be passed to the backing allocators */
1695 pcpu_gfp
= gfp
& (GFP_KERNEL
| __GFP_NORETRY
| __GFP_NOWARN
);
1696 is_atomic
= (gfp
& GFP_KERNEL
) != GFP_KERNEL
;
1697 do_warn
= !(gfp
& __GFP_NOWARN
);
1700 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1701 * therefore alignment must be a minimum of that many bytes.
1702 * An allocation may have internal fragmentation from rounding up
1703 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1705 if (unlikely(align
< PCPU_MIN_ALLOC_SIZE
))
1706 align
= PCPU_MIN_ALLOC_SIZE
;
1708 size
= ALIGN(size
, PCPU_MIN_ALLOC_SIZE
);
1709 bits
= size
>> PCPU_MIN_ALLOC_SHIFT
;
1710 bit_align
= align
>> PCPU_MIN_ALLOC_SHIFT
;
1712 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
||
1713 !is_power_of_2(align
))) {
1714 WARN(do_warn
, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1719 type
= pcpu_memcg_pre_alloc_hook(size
, gfp
, &objcg
);
1720 if (unlikely(type
== PCPU_FAIL_ALLOC
))
1722 pcpu_slot
= pcpu_chunk_list(type
);
1726 * pcpu_balance_workfn() allocates memory under this mutex,
1727 * and it may wait for memory reclaim. Allow current task
1728 * to become OOM victim, in case of memory pressure.
1730 if (gfp
& __GFP_NOFAIL
) {
1731 mutex_lock(&pcpu_alloc_mutex
);
1732 } else if (mutex_lock_killable(&pcpu_alloc_mutex
)) {
1733 pcpu_memcg_post_alloc_hook(objcg
, NULL
, 0, size
);
1738 spin_lock_irqsave(&pcpu_lock
, flags
);
1740 /* serve reserved allocations from the reserved chunk if available */
1741 if (reserved
&& pcpu_reserved_chunk
) {
1742 chunk
= pcpu_reserved_chunk
;
1744 off
= pcpu_find_block_fit(chunk
, bits
, bit_align
, is_atomic
);
1746 err
= "alloc from reserved chunk failed";
1750 off
= pcpu_alloc_area(chunk
, bits
, bit_align
, off
);
1754 err
= "alloc from reserved chunk failed";
1759 /* search through normal chunks */
1760 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
1761 list_for_each_entry_safe(chunk
, next
, &pcpu_slot
[slot
], list
) {
1762 off
= pcpu_find_block_fit(chunk
, bits
, bit_align
,
1765 if (slot
< PCPU_SLOT_FAIL_THRESHOLD
)
1766 pcpu_chunk_move(chunk
, 0);
1770 off
= pcpu_alloc_area(chunk
, bits
, bit_align
, off
);
1777 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1780 * No space left. Create a new chunk. We don't want multiple
1781 * tasks to create chunks simultaneously. Serialize and create iff
1782 * there's still no empty chunk after grabbing the mutex.
1785 err
= "atomic alloc failed, no space left";
1789 if (list_empty(&pcpu_slot
[pcpu_nr_slots
- 1])) {
1790 chunk
= pcpu_create_chunk(type
, pcpu_gfp
);
1792 err
= "failed to allocate new chunk";
1796 spin_lock_irqsave(&pcpu_lock
, flags
);
1797 pcpu_chunk_relocate(chunk
, -1);
1799 spin_lock_irqsave(&pcpu_lock
, flags
);
1805 pcpu_stats_area_alloc(chunk
, size
);
1806 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1808 /* populate if not all pages are already there */
1810 unsigned int page_start
, page_end
, rs
, re
;
1812 page_start
= PFN_DOWN(off
);
1813 page_end
= PFN_UP(off
+ size
);
1815 bitmap_for_each_clear_region(chunk
->populated
, rs
, re
,
1816 page_start
, page_end
) {
1817 WARN_ON(chunk
->immutable
);
1819 ret
= pcpu_populate_chunk(chunk
, rs
, re
, pcpu_gfp
);
1821 spin_lock_irqsave(&pcpu_lock
, flags
);
1823 pcpu_free_area(chunk
, off
);
1824 err
= "failed to populate";
1827 pcpu_chunk_populated(chunk
, rs
, re
);
1828 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1831 mutex_unlock(&pcpu_alloc_mutex
);
1834 if (pcpu_nr_empty_pop_pages
< PCPU_EMPTY_POP_PAGES_LOW
)
1835 pcpu_schedule_balance_work();
1837 /* clear the areas and return address relative to base address */
1838 for_each_possible_cpu(cpu
)
1839 memset((void *)pcpu_chunk_addr(chunk
, cpu
, 0) + off
, 0, size
);
1841 ptr
= __addr_to_pcpu_ptr(chunk
->base_addr
+ off
);
1842 kmemleak_alloc_percpu(ptr
, size
, gfp
);
1844 trace_percpu_alloc_percpu(reserved
, is_atomic
, size
, align
,
1845 chunk
->base_addr
, off
, ptr
);
1847 pcpu_memcg_post_alloc_hook(objcg
, chunk
, off
, size
);
1852 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1854 trace_percpu_alloc_percpu_fail(reserved
, is_atomic
, size
, align
);
1856 if (!is_atomic
&& do_warn
&& warn_limit
) {
1857 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1858 size
, align
, is_atomic
, err
);
1861 pr_info("limit reached, disable warning\n");
1864 /* see the flag handling in pcpu_blance_workfn() */
1865 pcpu_atomic_alloc_failed
= true;
1866 pcpu_schedule_balance_work();
1868 mutex_unlock(&pcpu_alloc_mutex
);
1871 pcpu_memcg_post_alloc_hook(objcg
, NULL
, 0, size
);
1877 * __alloc_percpu_gfp - allocate dynamic percpu area
1878 * @size: size of area to allocate in bytes
1879 * @align: alignment of area (max PAGE_SIZE)
1880 * @gfp: allocation flags
1882 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1883 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1884 * be called from any context but is a lot more likely to fail. If @gfp
1885 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1886 * allocation requests.
1889 * Percpu pointer to the allocated area on success, NULL on failure.
1891 void __percpu
*__alloc_percpu_gfp(size_t size
, size_t align
, gfp_t gfp
)
1893 return pcpu_alloc(size
, align
, false, gfp
);
1895 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp
);
1898 * __alloc_percpu - allocate dynamic percpu area
1899 * @size: size of area to allocate in bytes
1900 * @align: alignment of area (max PAGE_SIZE)
1902 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1904 void __percpu
*__alloc_percpu(size_t size
, size_t align
)
1906 return pcpu_alloc(size
, align
, false, GFP_KERNEL
);
1908 EXPORT_SYMBOL_GPL(__alloc_percpu
);
1911 * __alloc_reserved_percpu - allocate reserved percpu area
1912 * @size: size of area to allocate in bytes
1913 * @align: alignment of area (max PAGE_SIZE)
1915 * Allocate zero-filled percpu area of @size bytes aligned at @align
1916 * from reserved percpu area if arch has set it up; otherwise,
1917 * allocation is served from the same dynamic area. Might sleep.
1918 * Might trigger writeouts.
1921 * Does GFP_KERNEL allocation.
1924 * Percpu pointer to the allocated area on success, NULL on failure.
1926 void __percpu
*__alloc_reserved_percpu(size_t size
, size_t align
)
1928 return pcpu_alloc(size
, align
, true, GFP_KERNEL
);
1932 * __pcpu_balance_workfn - manage the amount of free chunks and populated pages
1935 * Reclaim all fully free chunks except for the first one. This is also
1936 * responsible for maintaining the pool of empty populated pages. However,
1937 * it is possible that this is called when physical memory is scarce causing
1938 * OOM killer to be triggered. We should avoid doing so until an actual
1939 * allocation causes the failure as it is possible that requests can be
1940 * serviced from already backed regions.
1942 static void __pcpu_balance_workfn(enum pcpu_chunk_type type
)
1944 /* gfp flags passed to underlying allocators */
1945 const gfp_t gfp
= GFP_KERNEL
| __GFP_NORETRY
| __GFP_NOWARN
;
1947 struct list_head
*pcpu_slot
= pcpu_chunk_list(type
);
1948 struct list_head
*free_head
= &pcpu_slot
[pcpu_nr_slots
- 1];
1949 struct pcpu_chunk
*chunk
, *next
;
1950 int slot
, nr_to_pop
, ret
;
1953 * There's no reason to keep around multiple unused chunks and VM
1954 * areas can be scarce. Destroy all free chunks except for one.
1956 mutex_lock(&pcpu_alloc_mutex
);
1957 spin_lock_irq(&pcpu_lock
);
1959 list_for_each_entry_safe(chunk
, next
, free_head
, list
) {
1960 WARN_ON(chunk
->immutable
);
1962 /* spare the first one */
1963 if (chunk
== list_first_entry(free_head
, struct pcpu_chunk
, list
))
1966 list_move(&chunk
->list
, &to_free
);
1969 spin_unlock_irq(&pcpu_lock
);
1971 list_for_each_entry_safe(chunk
, next
, &to_free
, list
) {
1972 unsigned int rs
, re
;
1974 bitmap_for_each_set_region(chunk
->populated
, rs
, re
, 0,
1976 pcpu_depopulate_chunk(chunk
, rs
, re
);
1977 spin_lock_irq(&pcpu_lock
);
1978 pcpu_chunk_depopulated(chunk
, rs
, re
);
1979 spin_unlock_irq(&pcpu_lock
);
1981 pcpu_destroy_chunk(chunk
);
1986 * Ensure there are certain number of free populated pages for
1987 * atomic allocs. Fill up from the most packed so that atomic
1988 * allocs don't increase fragmentation. If atomic allocation
1989 * failed previously, always populate the maximum amount. This
1990 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1991 * failing indefinitely; however, large atomic allocs are not
1992 * something we support properly and can be highly unreliable and
1996 if (pcpu_atomic_alloc_failed
) {
1997 nr_to_pop
= PCPU_EMPTY_POP_PAGES_HIGH
;
1998 /* best effort anyway, don't worry about synchronization */
1999 pcpu_atomic_alloc_failed
= false;
2001 nr_to_pop
= clamp(PCPU_EMPTY_POP_PAGES_HIGH
-
2002 pcpu_nr_empty_pop_pages
,
2003 0, PCPU_EMPTY_POP_PAGES_HIGH
);
2006 for (slot
= pcpu_size_to_slot(PAGE_SIZE
); slot
< pcpu_nr_slots
; slot
++) {
2007 unsigned int nr_unpop
= 0, rs
, re
;
2012 spin_lock_irq(&pcpu_lock
);
2013 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
2014 nr_unpop
= chunk
->nr_pages
- chunk
->nr_populated
;
2018 spin_unlock_irq(&pcpu_lock
);
2023 /* @chunk can't go away while pcpu_alloc_mutex is held */
2024 bitmap_for_each_clear_region(chunk
->populated
, rs
, re
, 0,
2026 int nr
= min_t(int, re
- rs
, nr_to_pop
);
2028 ret
= pcpu_populate_chunk(chunk
, rs
, rs
+ nr
, gfp
);
2031 spin_lock_irq(&pcpu_lock
);
2032 pcpu_chunk_populated(chunk
, rs
, rs
+ nr
);
2033 spin_unlock_irq(&pcpu_lock
);
2044 /* ran out of chunks to populate, create a new one and retry */
2045 chunk
= pcpu_create_chunk(type
, gfp
);
2047 spin_lock_irq(&pcpu_lock
);
2048 pcpu_chunk_relocate(chunk
, -1);
2049 spin_unlock_irq(&pcpu_lock
);
2054 mutex_unlock(&pcpu_alloc_mutex
);
2058 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2061 * Call __pcpu_balance_workfn() for each chunk type.
2063 static void pcpu_balance_workfn(struct work_struct
*work
)
2065 enum pcpu_chunk_type type
;
2067 for (type
= 0; type
< PCPU_NR_CHUNK_TYPES
; type
++)
2068 __pcpu_balance_workfn(type
);
2072 * free_percpu - free percpu area
2073 * @ptr: pointer to area to free
2075 * Free percpu area @ptr.
2078 * Can be called from atomic context.
2080 void free_percpu(void __percpu
*ptr
)
2083 struct pcpu_chunk
*chunk
;
2084 unsigned long flags
;
2086 bool need_balance
= false;
2087 struct list_head
*pcpu_slot
;
2092 kmemleak_free_percpu(ptr
);
2094 addr
= __pcpu_ptr_to_addr(ptr
);
2096 spin_lock_irqsave(&pcpu_lock
, flags
);
2098 chunk
= pcpu_chunk_addr_search(addr
);
2099 off
= addr
- chunk
->base_addr
;
2101 size
= pcpu_free_area(chunk
, off
);
2103 pcpu_slot
= pcpu_chunk_list(pcpu_chunk_type(chunk
));
2105 pcpu_memcg_free_hook(chunk
, off
, size
);
2107 /* if there are more than one fully free chunks, wake up grim reaper */
2108 if (chunk
->free_bytes
== pcpu_unit_size
) {
2109 struct pcpu_chunk
*pos
;
2111 list_for_each_entry(pos
, &pcpu_slot
[pcpu_nr_slots
- 1], list
)
2113 need_balance
= true;
2118 trace_percpu_free_percpu(chunk
->base_addr
, off
, ptr
);
2120 spin_unlock_irqrestore(&pcpu_lock
, flags
);
2123 pcpu_schedule_balance_work();
2125 EXPORT_SYMBOL_GPL(free_percpu
);
2127 bool __is_kernel_percpu_address(unsigned long addr
, unsigned long *can_addr
)
2130 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
2131 void __percpu
*base
= __addr_to_pcpu_ptr(pcpu_base_addr
);
2134 for_each_possible_cpu(cpu
) {
2135 void *start
= per_cpu_ptr(base
, cpu
);
2136 void *va
= (void *)addr
;
2138 if (va
>= start
&& va
< start
+ static_size
) {
2140 *can_addr
= (unsigned long) (va
- start
);
2141 *can_addr
+= (unsigned long)
2142 per_cpu_ptr(base
, get_boot_cpu_id());
2148 /* on UP, can't distinguish from other static vars, always false */
2153 * is_kernel_percpu_address - test whether address is from static percpu area
2154 * @addr: address to test
2156 * Test whether @addr belongs to in-kernel static percpu area. Module
2157 * static percpu areas are not considered. For those, use
2158 * is_module_percpu_address().
2161 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2163 bool is_kernel_percpu_address(unsigned long addr
)
2165 return __is_kernel_percpu_address(addr
, NULL
);
2169 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2170 * @addr: the address to be converted to physical address
2172 * Given @addr which is dereferenceable address obtained via one of
2173 * percpu access macros, this function translates it into its physical
2174 * address. The caller is responsible for ensuring @addr stays valid
2175 * until this function finishes.
2177 * percpu allocator has special setup for the first chunk, which currently
2178 * supports either embedding in linear address space or vmalloc mapping,
2179 * and, from the second one, the backing allocator (currently either vm or
2180 * km) provides translation.
2182 * The addr can be translated simply without checking if it falls into the
2183 * first chunk. But the current code reflects better how percpu allocator
2184 * actually works, and the verification can discover both bugs in percpu
2185 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2189 * The physical address for @addr.
2191 phys_addr_t
per_cpu_ptr_to_phys(void *addr
)
2193 void __percpu
*base
= __addr_to_pcpu_ptr(pcpu_base_addr
);
2194 bool in_first_chunk
= false;
2195 unsigned long first_low
, first_high
;
2199 * The following test on unit_low/high isn't strictly
2200 * necessary but will speed up lookups of addresses which
2201 * aren't in the first chunk.
2203 * The address check is against full chunk sizes. pcpu_base_addr
2204 * points to the beginning of the first chunk including the
2205 * static region. Assumes good intent as the first chunk may
2206 * not be full (ie. < pcpu_unit_pages in size).
2208 first_low
= (unsigned long)pcpu_base_addr
+
2209 pcpu_unit_page_offset(pcpu_low_unit_cpu
, 0);
2210 first_high
= (unsigned long)pcpu_base_addr
+
2211 pcpu_unit_page_offset(pcpu_high_unit_cpu
, pcpu_unit_pages
);
2212 if ((unsigned long)addr
>= first_low
&&
2213 (unsigned long)addr
< first_high
) {
2214 for_each_possible_cpu(cpu
) {
2215 void *start
= per_cpu_ptr(base
, cpu
);
2217 if (addr
>= start
&& addr
< start
+ pcpu_unit_size
) {
2218 in_first_chunk
= true;
2224 if (in_first_chunk
) {
2225 if (!is_vmalloc_addr(addr
))
2228 return page_to_phys(vmalloc_to_page(addr
)) +
2229 offset_in_page(addr
);
2231 return page_to_phys(pcpu_addr_to_page(addr
)) +
2232 offset_in_page(addr
);
2236 * pcpu_alloc_alloc_info - allocate percpu allocation info
2237 * @nr_groups: the number of groups
2238 * @nr_units: the number of units
2240 * Allocate ai which is large enough for @nr_groups groups containing
2241 * @nr_units units. The returned ai's groups[0].cpu_map points to the
2242 * cpu_map array which is long enough for @nr_units and filled with
2243 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
2244 * pointer of other groups.
2247 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2250 struct pcpu_alloc_info
* __init
pcpu_alloc_alloc_info(int nr_groups
,
2253 struct pcpu_alloc_info
*ai
;
2254 size_t base_size
, ai_size
;
2258 base_size
= ALIGN(struct_size(ai
, groups
, nr_groups
),
2259 __alignof__(ai
->groups
[0].cpu_map
[0]));
2260 ai_size
= base_size
+ nr_units
* sizeof(ai
->groups
[0].cpu_map
[0]);
2262 ptr
= memblock_alloc(PFN_ALIGN(ai_size
), PAGE_SIZE
);
2268 ai
->groups
[0].cpu_map
= ptr
;
2270 for (unit
= 0; unit
< nr_units
; unit
++)
2271 ai
->groups
[0].cpu_map
[unit
] = NR_CPUS
;
2273 ai
->nr_groups
= nr_groups
;
2274 ai
->__ai_size
= PFN_ALIGN(ai_size
);
2280 * pcpu_free_alloc_info - free percpu allocation info
2281 * @ai: pcpu_alloc_info to free
2283 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2285 void __init
pcpu_free_alloc_info(struct pcpu_alloc_info
*ai
)
2287 memblock_free_early(__pa(ai
), ai
->__ai_size
);
2291 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2293 * @ai: allocation info to dump
2295 * Print out information about @ai using loglevel @lvl.
2297 static void pcpu_dump_alloc_info(const char *lvl
,
2298 const struct pcpu_alloc_info
*ai
)
2300 int group_width
= 1, cpu_width
= 1, width
;
2301 char empty_str
[] = "--------";
2302 int alloc
= 0, alloc_end
= 0;
2304 int upa
, apl
; /* units per alloc, allocs per line */
2310 v
= num_possible_cpus();
2313 empty_str
[min_t(int, cpu_width
, sizeof(empty_str
) - 1)] = '\0';
2315 upa
= ai
->alloc_size
/ ai
->unit_size
;
2316 width
= upa
* (cpu_width
+ 1) + group_width
+ 3;
2317 apl
= rounddown_pow_of_two(max(60 / width
, 1));
2319 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2320 lvl
, ai
->static_size
, ai
->reserved_size
, ai
->dyn_size
,
2321 ai
->unit_size
, ai
->alloc_size
/ ai
->atom_size
, ai
->atom_size
);
2323 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2324 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2325 int unit
= 0, unit_end
= 0;
2327 BUG_ON(gi
->nr_units
% upa
);
2328 for (alloc_end
+= gi
->nr_units
/ upa
;
2329 alloc
< alloc_end
; alloc
++) {
2330 if (!(alloc
% apl
)) {
2332 printk("%spcpu-alloc: ", lvl
);
2334 pr_cont("[%0*d] ", group_width
, group
);
2336 for (unit_end
+= upa
; unit
< unit_end
; unit
++)
2337 if (gi
->cpu_map
[unit
] != NR_CPUS
)
2339 cpu_width
, gi
->cpu_map
[unit
]);
2341 pr_cont("%s ", empty_str
);
2348 * pcpu_setup_first_chunk - initialize the first percpu chunk
2349 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2350 * @base_addr: mapped address
2352 * Initialize the first percpu chunk which contains the kernel static
2353 * percpu area. This function is to be called from arch percpu area
2356 * @ai contains all information necessary to initialize the first
2357 * chunk and prime the dynamic percpu allocator.
2359 * @ai->static_size is the size of static percpu area.
2361 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2362 * reserve after the static area in the first chunk. This reserves
2363 * the first chunk such that it's available only through reserved
2364 * percpu allocation. This is primarily used to serve module percpu
2365 * static areas on architectures where the addressing model has
2366 * limited offset range for symbol relocations to guarantee module
2367 * percpu symbols fall inside the relocatable range.
2369 * @ai->dyn_size determines the number of bytes available for dynamic
2370 * allocation in the first chunk. The area between @ai->static_size +
2371 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2373 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2374 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2377 * @ai->atom_size is the allocation atom size and used as alignment
2380 * @ai->alloc_size is the allocation size and always multiple of
2381 * @ai->atom_size. This is larger than @ai->atom_size if
2382 * @ai->unit_size is larger than @ai->atom_size.
2384 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2385 * percpu areas. Units which should be colocated are put into the
2386 * same group. Dynamic VM areas will be allocated according to these
2387 * groupings. If @ai->nr_groups is zero, a single group containing
2388 * all units is assumed.
2390 * The caller should have mapped the first chunk at @base_addr and
2391 * copied static data to each unit.
2393 * The first chunk will always contain a static and a dynamic region.
2394 * However, the static region is not managed by any chunk. If the first
2395 * chunk also contains a reserved region, it is served by two chunks -
2396 * one for the reserved region and one for the dynamic region. They
2397 * share the same vm, but use offset regions in the area allocation map.
2398 * The chunk serving the dynamic region is circulated in the chunk slots
2399 * and available for dynamic allocation like any other chunk.
2401 void __init
pcpu_setup_first_chunk(const struct pcpu_alloc_info
*ai
,
2404 size_t size_sum
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
2405 size_t static_size
, dyn_size
;
2406 struct pcpu_chunk
*chunk
;
2407 unsigned long *group_offsets
;
2408 size_t *group_sizes
;
2409 unsigned long *unit_off
;
2414 unsigned long tmp_addr
;
2416 enum pcpu_chunk_type type
;
2418 #define PCPU_SETUP_BUG_ON(cond) do { \
2419 if (unlikely(cond)) { \
2420 pr_emerg("failed to initialize, %s\n", #cond); \
2421 pr_emerg("cpu_possible_mask=%*pb\n", \
2422 cpumask_pr_args(cpu_possible_mask)); \
2423 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2429 PCPU_SETUP_BUG_ON(ai
->nr_groups
<= 0);
2431 PCPU_SETUP_BUG_ON(!ai
->static_size
);
2432 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start
));
2434 PCPU_SETUP_BUG_ON(!base_addr
);
2435 PCPU_SETUP_BUG_ON(offset_in_page(base_addr
));
2436 PCPU_SETUP_BUG_ON(ai
->unit_size
< size_sum
);
2437 PCPU_SETUP_BUG_ON(offset_in_page(ai
->unit_size
));
2438 PCPU_SETUP_BUG_ON(ai
->unit_size
< PCPU_MIN_UNIT_SIZE
);
2439 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai
->unit_size
, PCPU_BITMAP_BLOCK_SIZE
));
2440 PCPU_SETUP_BUG_ON(ai
->dyn_size
< PERCPU_DYNAMIC_EARLY_SIZE
);
2441 PCPU_SETUP_BUG_ON(!ai
->dyn_size
);
2442 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai
->reserved_size
, PCPU_MIN_ALLOC_SIZE
));
2443 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE
, PAGE_SIZE
) ||
2444 IS_ALIGNED(PAGE_SIZE
, PCPU_BITMAP_BLOCK_SIZE
)));
2445 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai
) < 0);
2447 /* process group information and build config tables accordingly */
2448 alloc_size
= ai
->nr_groups
* sizeof(group_offsets
[0]);
2449 group_offsets
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2451 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2454 alloc_size
= ai
->nr_groups
* sizeof(group_sizes
[0]);
2455 group_sizes
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2457 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2460 alloc_size
= nr_cpu_ids
* sizeof(unit_map
[0]);
2461 unit_map
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2463 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2466 alloc_size
= nr_cpu_ids
* sizeof(unit_off
[0]);
2467 unit_off
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2469 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2472 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++)
2473 unit_map
[cpu
] = UINT_MAX
;
2475 pcpu_low_unit_cpu
= NR_CPUS
;
2476 pcpu_high_unit_cpu
= NR_CPUS
;
2478 for (group
= 0, unit
= 0; group
< ai
->nr_groups
; group
++, unit
+= i
) {
2479 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2481 group_offsets
[group
] = gi
->base_offset
;
2482 group_sizes
[group
] = gi
->nr_units
* ai
->unit_size
;
2484 for (i
= 0; i
< gi
->nr_units
; i
++) {
2485 cpu
= gi
->cpu_map
[i
];
2489 PCPU_SETUP_BUG_ON(cpu
>= nr_cpu_ids
);
2490 PCPU_SETUP_BUG_ON(!cpu_possible(cpu
));
2491 PCPU_SETUP_BUG_ON(unit_map
[cpu
] != UINT_MAX
);
2493 unit_map
[cpu
] = unit
+ i
;
2494 unit_off
[cpu
] = gi
->base_offset
+ i
* ai
->unit_size
;
2496 /* determine low/high unit_cpu */
2497 if (pcpu_low_unit_cpu
== NR_CPUS
||
2498 unit_off
[cpu
] < unit_off
[pcpu_low_unit_cpu
])
2499 pcpu_low_unit_cpu
= cpu
;
2500 if (pcpu_high_unit_cpu
== NR_CPUS
||
2501 unit_off
[cpu
] > unit_off
[pcpu_high_unit_cpu
])
2502 pcpu_high_unit_cpu
= cpu
;
2505 pcpu_nr_units
= unit
;
2507 for_each_possible_cpu(cpu
)
2508 PCPU_SETUP_BUG_ON(unit_map
[cpu
] == UINT_MAX
);
2510 /* we're done parsing the input, undefine BUG macro and dump config */
2511 #undef PCPU_SETUP_BUG_ON
2512 pcpu_dump_alloc_info(KERN_DEBUG
, ai
);
2514 pcpu_nr_groups
= ai
->nr_groups
;
2515 pcpu_group_offsets
= group_offsets
;
2516 pcpu_group_sizes
= group_sizes
;
2517 pcpu_unit_map
= unit_map
;
2518 pcpu_unit_offsets
= unit_off
;
2520 /* determine basic parameters */
2521 pcpu_unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
2522 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
2523 pcpu_atom_size
= ai
->atom_size
;
2524 pcpu_chunk_struct_size
= struct_size(chunk
, populated
,
2525 BITS_TO_LONGS(pcpu_unit_pages
));
2527 pcpu_stats_save_ai(ai
);
2530 * Allocate chunk slots. The additional last slot is for
2533 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
2534 pcpu_chunk_lists
= memblock_alloc(pcpu_nr_slots
*
2535 sizeof(pcpu_chunk_lists
[0]) *
2536 PCPU_NR_CHUNK_TYPES
,
2538 if (!pcpu_chunk_lists
)
2539 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2540 pcpu_nr_slots
* sizeof(pcpu_chunk_lists
[0]) *
2541 PCPU_NR_CHUNK_TYPES
);
2543 for (type
= 0; type
< PCPU_NR_CHUNK_TYPES
; type
++)
2544 for (i
= 0; i
< pcpu_nr_slots
; i
++)
2545 INIT_LIST_HEAD(&pcpu_chunk_list(type
)[i
]);
2548 * The end of the static region needs to be aligned with the
2549 * minimum allocation size as this offsets the reserved and
2550 * dynamic region. The first chunk ends page aligned by
2551 * expanding the dynamic region, therefore the dynamic region
2552 * can be shrunk to compensate while still staying above the
2555 static_size
= ALIGN(ai
->static_size
, PCPU_MIN_ALLOC_SIZE
);
2556 dyn_size
= ai
->dyn_size
- (static_size
- ai
->static_size
);
2559 * Initialize first chunk.
2560 * If the reserved_size is non-zero, this initializes the reserved
2561 * chunk. If the reserved_size is zero, the reserved chunk is NULL
2562 * and the dynamic region is initialized here. The first chunk,
2563 * pcpu_first_chunk, will always point to the chunk that serves
2564 * the dynamic region.
2566 tmp_addr
= (unsigned long)base_addr
+ static_size
;
2567 map_size
= ai
->reserved_size
?: dyn_size
;
2568 chunk
= pcpu_alloc_first_chunk(tmp_addr
, map_size
);
2570 /* init dynamic chunk if necessary */
2571 if (ai
->reserved_size
) {
2572 pcpu_reserved_chunk
= chunk
;
2574 tmp_addr
= (unsigned long)base_addr
+ static_size
+
2576 map_size
= dyn_size
;
2577 chunk
= pcpu_alloc_first_chunk(tmp_addr
, map_size
);
2580 /* link the first chunk in */
2581 pcpu_first_chunk
= chunk
;
2582 pcpu_nr_empty_pop_pages
= pcpu_first_chunk
->nr_empty_pop_pages
;
2583 pcpu_chunk_relocate(pcpu_first_chunk
, -1);
2585 /* include all regions of the first chunk */
2586 pcpu_nr_populated
+= PFN_DOWN(size_sum
);
2588 pcpu_stats_chunk_alloc();
2589 trace_percpu_create_chunk(base_addr
);
2592 pcpu_base_addr
= base_addr
;
2597 const char * const pcpu_fc_names
[PCPU_FC_NR
] __initconst
= {
2598 [PCPU_FC_AUTO
] = "auto",
2599 [PCPU_FC_EMBED
] = "embed",
2600 [PCPU_FC_PAGE
] = "page",
2603 enum pcpu_fc pcpu_chosen_fc __initdata
= PCPU_FC_AUTO
;
2605 static int __init
percpu_alloc_setup(char *str
)
2612 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2613 else if (!strcmp(str
, "embed"))
2614 pcpu_chosen_fc
= PCPU_FC_EMBED
;
2616 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2617 else if (!strcmp(str
, "page"))
2618 pcpu_chosen_fc
= PCPU_FC_PAGE
;
2621 pr_warn("unknown allocator %s specified\n", str
);
2625 early_param("percpu_alloc", percpu_alloc_setup
);
2628 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2629 * Build it if needed by the arch config or the generic setup is going
2632 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2633 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2634 #define BUILD_EMBED_FIRST_CHUNK
2637 /* build pcpu_page_first_chunk() iff needed by the arch config */
2638 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2639 #define BUILD_PAGE_FIRST_CHUNK
2642 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2643 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2645 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2646 * @reserved_size: the size of reserved percpu area in bytes
2647 * @dyn_size: minimum free size for dynamic allocation in bytes
2648 * @atom_size: allocation atom size
2649 * @cpu_distance_fn: callback to determine distance between cpus, optional
2651 * This function determines grouping of units, their mappings to cpus
2652 * and other parameters considering needed percpu size, allocation
2653 * atom size and distances between CPUs.
2655 * Groups are always multiples of atom size and CPUs which are of
2656 * LOCAL_DISTANCE both ways are grouped together and share space for
2657 * units in the same group. The returned configuration is guaranteed
2658 * to have CPUs on different nodes on different groups and >=75% usage
2659 * of allocated virtual address space.
2662 * On success, pointer to the new allocation_info is returned. On
2663 * failure, ERR_PTR value is returned.
2665 static struct pcpu_alloc_info
* __init
pcpu_build_alloc_info(
2666 size_t reserved_size
, size_t dyn_size
,
2668 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
)
2670 static int group_map
[NR_CPUS
] __initdata
;
2671 static int group_cnt
[NR_CPUS
] __initdata
;
2672 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
2673 int nr_groups
= 1, nr_units
= 0;
2674 size_t size_sum
, min_unit_size
, alloc_size
;
2675 int upa
, max_upa
, best_upa
; /* units_per_alloc */
2676 int last_allocs
, group
, unit
;
2677 unsigned int cpu
, tcpu
;
2678 struct pcpu_alloc_info
*ai
;
2679 unsigned int *cpu_map
;
2681 /* this function may be called multiple times */
2682 memset(group_map
, 0, sizeof(group_map
));
2683 memset(group_cnt
, 0, sizeof(group_cnt
));
2685 /* calculate size_sum and ensure dyn_size is enough for early alloc */
2686 size_sum
= PFN_ALIGN(static_size
+ reserved_size
+
2687 max_t(size_t, dyn_size
, PERCPU_DYNAMIC_EARLY_SIZE
));
2688 dyn_size
= size_sum
- static_size
- reserved_size
;
2691 * Determine min_unit_size, alloc_size and max_upa such that
2692 * alloc_size is multiple of atom_size and is the smallest
2693 * which can accommodate 4k aligned segments which are equal to
2694 * or larger than min_unit_size.
2696 min_unit_size
= max_t(size_t, size_sum
, PCPU_MIN_UNIT_SIZE
);
2698 /* determine the maximum # of units that can fit in an allocation */
2699 alloc_size
= roundup(min_unit_size
, atom_size
);
2700 upa
= alloc_size
/ min_unit_size
;
2701 while (alloc_size
% upa
|| (offset_in_page(alloc_size
/ upa
)))
2705 /* group cpus according to their proximity */
2706 for_each_possible_cpu(cpu
) {
2709 for_each_possible_cpu(tcpu
) {
2712 if (group_map
[tcpu
] == group
&& cpu_distance_fn
&&
2713 (cpu_distance_fn(cpu
, tcpu
) > LOCAL_DISTANCE
||
2714 cpu_distance_fn(tcpu
, cpu
) > LOCAL_DISTANCE
)) {
2716 nr_groups
= max(nr_groups
, group
+ 1);
2720 group_map
[cpu
] = group
;
2725 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2726 * Expand the unit_size until we use >= 75% of the units allocated.
2727 * Related to atom_size, which could be much larger than the unit_size.
2729 last_allocs
= INT_MAX
;
2730 for (upa
= max_upa
; upa
; upa
--) {
2731 int allocs
= 0, wasted
= 0;
2733 if (alloc_size
% upa
|| (offset_in_page(alloc_size
/ upa
)))
2736 for (group
= 0; group
< nr_groups
; group
++) {
2737 int this_allocs
= DIV_ROUND_UP(group_cnt
[group
], upa
);
2738 allocs
+= this_allocs
;
2739 wasted
+= this_allocs
* upa
- group_cnt
[group
];
2743 * Don't accept if wastage is over 1/3. The
2744 * greater-than comparison ensures upa==1 always
2745 * passes the following check.
2747 if (wasted
> num_possible_cpus() / 3)
2750 /* and then don't consume more memory */
2751 if (allocs
> last_allocs
)
2753 last_allocs
= allocs
;
2758 /* allocate and fill alloc_info */
2759 for (group
= 0; group
< nr_groups
; group
++)
2760 nr_units
+= roundup(group_cnt
[group
], upa
);
2762 ai
= pcpu_alloc_alloc_info(nr_groups
, nr_units
);
2764 return ERR_PTR(-ENOMEM
);
2765 cpu_map
= ai
->groups
[0].cpu_map
;
2767 for (group
= 0; group
< nr_groups
; group
++) {
2768 ai
->groups
[group
].cpu_map
= cpu_map
;
2769 cpu_map
+= roundup(group_cnt
[group
], upa
);
2772 ai
->static_size
= static_size
;
2773 ai
->reserved_size
= reserved_size
;
2774 ai
->dyn_size
= dyn_size
;
2775 ai
->unit_size
= alloc_size
/ upa
;
2776 ai
->atom_size
= atom_size
;
2777 ai
->alloc_size
= alloc_size
;
2779 for (group
= 0, unit
= 0; group
< nr_groups
; group
++) {
2780 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2783 * Initialize base_offset as if all groups are located
2784 * back-to-back. The caller should update this to
2785 * reflect actual allocation.
2787 gi
->base_offset
= unit
* ai
->unit_size
;
2789 for_each_possible_cpu(cpu
)
2790 if (group_map
[cpu
] == group
)
2791 gi
->cpu_map
[gi
->nr_units
++] = cpu
;
2792 gi
->nr_units
= roundup(gi
->nr_units
, upa
);
2793 unit
+= gi
->nr_units
;
2795 BUG_ON(unit
!= nr_units
);
2799 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2801 #if defined(BUILD_EMBED_FIRST_CHUNK)
2803 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2804 * @reserved_size: the size of reserved percpu area in bytes
2805 * @dyn_size: minimum free size for dynamic allocation in bytes
2806 * @atom_size: allocation atom size
2807 * @cpu_distance_fn: callback to determine distance between cpus, optional
2808 * @alloc_fn: function to allocate percpu page
2809 * @free_fn: function to free percpu page
2811 * This is a helper to ease setting up embedded first percpu chunk and
2812 * can be called where pcpu_setup_first_chunk() is expected.
2814 * If this function is used to setup the first chunk, it is allocated
2815 * by calling @alloc_fn and used as-is without being mapped into
2816 * vmalloc area. Allocations are always whole multiples of @atom_size
2817 * aligned to @atom_size.
2819 * This enables the first chunk to piggy back on the linear physical
2820 * mapping which often uses larger page size. Please note that this
2821 * can result in very sparse cpu->unit mapping on NUMA machines thus
2822 * requiring large vmalloc address space. Don't use this allocator if
2823 * vmalloc space is not orders of magnitude larger than distances
2824 * between node memory addresses (ie. 32bit NUMA machines).
2826 * @dyn_size specifies the minimum dynamic area size.
2828 * If the needed size is smaller than the minimum or specified unit
2829 * size, the leftover is returned using @free_fn.
2832 * 0 on success, -errno on failure.
2834 int __init
pcpu_embed_first_chunk(size_t reserved_size
, size_t dyn_size
,
2836 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
,
2837 pcpu_fc_alloc_fn_t alloc_fn
,
2838 pcpu_fc_free_fn_t free_fn
)
2840 void *base
= (void *)ULONG_MAX
;
2841 void **areas
= NULL
;
2842 struct pcpu_alloc_info
*ai
;
2843 size_t size_sum
, areas_size
;
2844 unsigned long max_distance
;
2845 int group
, i
, highest_group
, rc
= 0;
2847 ai
= pcpu_build_alloc_info(reserved_size
, dyn_size
, atom_size
,
2852 size_sum
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
2853 areas_size
= PFN_ALIGN(ai
->nr_groups
* sizeof(void *));
2855 areas
= memblock_alloc(areas_size
, SMP_CACHE_BYTES
);
2861 /* allocate, copy and determine base address & max_distance */
2863 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2864 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2865 unsigned int cpu
= NR_CPUS
;
2868 for (i
= 0; i
< gi
->nr_units
&& cpu
== NR_CPUS
; i
++)
2869 cpu
= gi
->cpu_map
[i
];
2870 BUG_ON(cpu
== NR_CPUS
);
2872 /* allocate space for the whole group */
2873 ptr
= alloc_fn(cpu
, gi
->nr_units
* ai
->unit_size
, atom_size
);
2876 goto out_free_areas
;
2878 /* kmemleak tracks the percpu allocations separately */
2882 base
= min(ptr
, base
);
2883 if (ptr
> areas
[highest_group
])
2884 highest_group
= group
;
2886 max_distance
= areas
[highest_group
] - base
;
2887 max_distance
+= ai
->unit_size
* ai
->groups
[highest_group
].nr_units
;
2889 /* warn if maximum distance is further than 75% of vmalloc space */
2890 if (max_distance
> VMALLOC_TOTAL
* 3 / 4) {
2891 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2892 max_distance
, VMALLOC_TOTAL
);
2893 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2894 /* and fail if we have fallback */
2896 goto out_free_areas
;
2901 * Copy data and free unused parts. This should happen after all
2902 * allocations are complete; otherwise, we may end up with
2903 * overlapping groups.
2905 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2906 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2907 void *ptr
= areas
[group
];
2909 for (i
= 0; i
< gi
->nr_units
; i
++, ptr
+= ai
->unit_size
) {
2910 if (gi
->cpu_map
[i
] == NR_CPUS
) {
2911 /* unused unit, free whole */
2912 free_fn(ptr
, ai
->unit_size
);
2915 /* copy and return the unused part */
2916 memcpy(ptr
, __per_cpu_load
, ai
->static_size
);
2917 free_fn(ptr
+ size_sum
, ai
->unit_size
- size_sum
);
2921 /* base address is now known, determine group base offsets */
2922 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2923 ai
->groups
[group
].base_offset
= areas
[group
] - base
;
2926 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2927 PFN_DOWN(size_sum
), ai
->static_size
, ai
->reserved_size
,
2928 ai
->dyn_size
, ai
->unit_size
);
2930 pcpu_setup_first_chunk(ai
, base
);
2934 for (group
= 0; group
< ai
->nr_groups
; group
++)
2936 free_fn(areas
[group
],
2937 ai
->groups
[group
].nr_units
* ai
->unit_size
);
2939 pcpu_free_alloc_info(ai
);
2941 memblock_free_early(__pa(areas
), areas_size
);
2944 #endif /* BUILD_EMBED_FIRST_CHUNK */
2946 #ifdef BUILD_PAGE_FIRST_CHUNK
2948 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2949 * @reserved_size: the size of reserved percpu area in bytes
2950 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2951 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2952 * @populate_pte_fn: function to populate pte
2954 * This is a helper to ease setting up page-remapped first percpu
2955 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2957 * This is the basic allocator. Static percpu area is allocated
2958 * page-by-page into vmalloc area.
2961 * 0 on success, -errno on failure.
2963 int __init
pcpu_page_first_chunk(size_t reserved_size
,
2964 pcpu_fc_alloc_fn_t alloc_fn
,
2965 pcpu_fc_free_fn_t free_fn
,
2966 pcpu_fc_populate_pte_fn_t populate_pte_fn
)
2968 static struct vm_struct vm
;
2969 struct pcpu_alloc_info
*ai
;
2973 struct page
**pages
;
2974 int unit
, i
, j
, rc
= 0;
2978 snprintf(psize_str
, sizeof(psize_str
), "%luK", PAGE_SIZE
>> 10);
2980 ai
= pcpu_build_alloc_info(reserved_size
, 0, PAGE_SIZE
, NULL
);
2983 BUG_ON(ai
->nr_groups
!= 1);
2984 upa
= ai
->alloc_size
/ai
->unit_size
;
2985 nr_g0_units
= roundup(num_possible_cpus(), upa
);
2986 if (WARN_ON(ai
->groups
[0].nr_units
!= nr_g0_units
)) {
2987 pcpu_free_alloc_info(ai
);
2991 unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
2993 /* unaligned allocations can't be freed, round up to page size */
2994 pages_size
= PFN_ALIGN(unit_pages
* num_possible_cpus() *
2996 pages
= memblock_alloc(pages_size
, SMP_CACHE_BYTES
);
2998 panic("%s: Failed to allocate %zu bytes\n", __func__
,
3001 /* allocate pages */
3003 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
3004 unsigned int cpu
= ai
->groups
[0].cpu_map
[unit
];
3005 for (i
= 0; i
< unit_pages
; i
++) {
3008 ptr
= alloc_fn(cpu
, PAGE_SIZE
, PAGE_SIZE
);
3010 pr_warn("failed to allocate %s page for cpu%u\n",
3014 /* kmemleak tracks the percpu allocations separately */
3016 pages
[j
++] = virt_to_page(ptr
);
3020 /* allocate vm area, map the pages and copy static data */
3021 vm
.flags
= VM_ALLOC
;
3022 vm
.size
= num_possible_cpus() * ai
->unit_size
;
3023 vm_area_register_early(&vm
, PAGE_SIZE
);
3025 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
3026 unsigned long unit_addr
=
3027 (unsigned long)vm
.addr
+ unit
* ai
->unit_size
;
3029 for (i
= 0; i
< unit_pages
; i
++)
3030 populate_pte_fn(unit_addr
+ (i
<< PAGE_SHIFT
));
3032 /* pte already populated, the following shouldn't fail */
3033 rc
= __pcpu_map_pages(unit_addr
, &pages
[unit
* unit_pages
],
3036 panic("failed to map percpu area, err=%d\n", rc
);
3039 * FIXME: Archs with virtual cache should flush local
3040 * cache for the linear mapping here - something
3041 * equivalent to flush_cache_vmap() on the local cpu.
3042 * flush_cache_vmap() can't be used as most supporting
3043 * data structures are not set up yet.
3046 /* copy static data */
3047 memcpy((void *)unit_addr
, __per_cpu_load
, ai
->static_size
);
3050 /* we're ready, commit */
3051 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3052 unit_pages
, psize_str
, ai
->static_size
,
3053 ai
->reserved_size
, ai
->dyn_size
);
3055 pcpu_setup_first_chunk(ai
, vm
.addr
);
3060 free_fn(page_address(pages
[j
]), PAGE_SIZE
);
3063 memblock_free_early(__pa(pages
), pages_size
);
3064 pcpu_free_alloc_info(ai
);
3067 #endif /* BUILD_PAGE_FIRST_CHUNK */
3069 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
3071 * Generic SMP percpu area setup.
3073 * The embedding helper is used because its behavior closely resembles
3074 * the original non-dynamic generic percpu area setup. This is
3075 * important because many archs have addressing restrictions and might
3076 * fail if the percpu area is located far away from the previous
3077 * location. As an added bonus, in non-NUMA cases, embedding is
3078 * generally a good idea TLB-wise because percpu area can piggy back
3079 * on the physical linear memory mapping which uses large page
3080 * mappings on applicable archs.
3082 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
3083 EXPORT_SYMBOL(__per_cpu_offset
);
3085 static void * __init
pcpu_dfl_fc_alloc(unsigned int cpu
, size_t size
,
3088 return memblock_alloc_from(size
, align
, __pa(MAX_DMA_ADDRESS
));
3091 static void __init
pcpu_dfl_fc_free(void *ptr
, size_t size
)
3093 memblock_free_early(__pa(ptr
), size
);
3096 void __init
setup_per_cpu_areas(void)
3098 unsigned long delta
;
3103 * Always reserve area for module percpu variables. That's
3104 * what the legacy allocator did.
3106 rc
= pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE
,
3107 PERCPU_DYNAMIC_RESERVE
, PAGE_SIZE
, NULL
,
3108 pcpu_dfl_fc_alloc
, pcpu_dfl_fc_free
);
3110 panic("Failed to initialize percpu areas.");
3112 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
3113 for_each_possible_cpu(cpu
)
3114 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
3116 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3118 #else /* CONFIG_SMP */
3121 * UP percpu area setup.
3123 * UP always uses km-based percpu allocator with identity mapping.
3124 * Static percpu variables are indistinguishable from the usual static
3125 * variables and don't require any special preparation.
3127 void __init
setup_per_cpu_areas(void)
3129 const size_t unit_size
=
3130 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE
,
3131 PERCPU_DYNAMIC_RESERVE
));
3132 struct pcpu_alloc_info
*ai
;
3135 ai
= pcpu_alloc_alloc_info(1, 1);
3136 fc
= memblock_alloc_from(unit_size
, PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
3138 panic("Failed to allocate memory for percpu areas.");
3139 /* kmemleak tracks the percpu allocations separately */
3142 ai
->dyn_size
= unit_size
;
3143 ai
->unit_size
= unit_size
;
3144 ai
->atom_size
= unit_size
;
3145 ai
->alloc_size
= unit_size
;
3146 ai
->groups
[0].nr_units
= 1;
3147 ai
->groups
[0].cpu_map
[0] = 0;
3149 pcpu_setup_first_chunk(ai
, fc
);
3150 pcpu_free_alloc_info(ai
);
3153 #endif /* CONFIG_SMP */
3156 * pcpu_nr_pages - calculate total number of populated backing pages
3158 * This reflects the number of pages populated to back chunks. Metadata is
3159 * excluded in the number exposed in meminfo as the number of backing pages
3160 * scales with the number of cpus and can quickly outweigh the memory used for
3161 * metadata. It also keeps this calculation nice and simple.
3164 * Total number of populated backing pages in use by the allocator.
3166 unsigned long pcpu_nr_pages(void)
3168 return pcpu_nr_populated
* pcpu_nr_units
;
3172 * Percpu allocator is initialized early during boot when neither slab or
3173 * workqueue is available. Plug async management until everything is up
3176 static int __init
percpu_enable_async(void)
3178 pcpu_async_enabled
= true;
3181 subsys_initcall(percpu_enable_async
);