net: sched: set xt_tgchk_param par.nft_compat as 0 in ipt_init_target
[linux/fpc-iii.git] / mm / percpu.c
blobf014cebbf40549187a89b3375d834cfea81f96f2
1 /*
2 * mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks. Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.
18 * c0 c1 c2
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26 * cpus. On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes. The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be equal to or larger than the maximum contiguous
35 * area in the chunk. This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map. A positive value in the map represents a free
40 * region and negative allocated. Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry. This is mostly copied from the percpu_modalloc() allocator.
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
46 * To use this allocator, arch code should do the followings.
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
56 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
58 #include <linux/bitmap.h>
59 #include <linux/bootmem.h>
60 #include <linux/err.h>
61 #include <linux/list.h>
62 #include <linux/log2.h>
63 #include <linux/mm.h>
64 #include <linux/module.h>
65 #include <linux/mutex.h>
66 #include <linux/percpu.h>
67 #include <linux/pfn.h>
68 #include <linux/slab.h>
69 #include <linux/spinlock.h>
70 #include <linux/vmalloc.h>
71 #include <linux/workqueue.h>
72 #include <linux/kmemleak.h>
74 #include <asm/cacheflush.h>
75 #include <asm/sections.h>
76 #include <asm/tlbflush.h>
77 #include <asm/io.h>
79 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
80 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
81 #define PCPU_ATOMIC_MAP_MARGIN_LOW 32
82 #define PCPU_ATOMIC_MAP_MARGIN_HIGH 64
83 #define PCPU_EMPTY_POP_PAGES_LOW 2
84 #define PCPU_EMPTY_POP_PAGES_HIGH 4
86 #ifdef CONFIG_SMP
87 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
88 #ifndef __addr_to_pcpu_ptr
89 #define __addr_to_pcpu_ptr(addr) \
90 (void __percpu *)((unsigned long)(addr) - \
91 (unsigned long)pcpu_base_addr + \
92 (unsigned long)__per_cpu_start)
93 #endif
94 #ifndef __pcpu_ptr_to_addr
95 #define __pcpu_ptr_to_addr(ptr) \
96 (void __force *)((unsigned long)(ptr) + \
97 (unsigned long)pcpu_base_addr - \
98 (unsigned long)__per_cpu_start)
99 #endif
100 #else /* CONFIG_SMP */
101 /* on UP, it's always identity mapped */
102 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
103 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
104 #endif /* CONFIG_SMP */
106 struct pcpu_chunk {
107 struct list_head list; /* linked to pcpu_slot lists */
108 int free_size; /* free bytes in the chunk */
109 int contig_hint; /* max contiguous size hint */
110 void *base_addr; /* base address of this chunk */
112 int map_used; /* # of map entries used before the sentry */
113 int map_alloc; /* # of map entries allocated */
114 int *map; /* allocation map */
115 struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
117 void *data; /* chunk data */
118 int first_free; /* no free below this */
119 bool immutable; /* no [de]population allowed */
120 int nr_populated; /* # of populated pages */
121 unsigned long populated[]; /* populated bitmap */
124 static int pcpu_unit_pages __read_mostly;
125 static int pcpu_unit_size __read_mostly;
126 static int pcpu_nr_units __read_mostly;
127 static int pcpu_atom_size __read_mostly;
128 static int pcpu_nr_slots __read_mostly;
129 static size_t pcpu_chunk_struct_size __read_mostly;
131 /* cpus with the lowest and highest unit addresses */
132 static unsigned int pcpu_low_unit_cpu __read_mostly;
133 static unsigned int pcpu_high_unit_cpu __read_mostly;
135 /* the address of the first chunk which starts with the kernel static area */
136 void *pcpu_base_addr __read_mostly;
137 EXPORT_SYMBOL_GPL(pcpu_base_addr);
139 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
140 const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
142 /* group information, used for vm allocation */
143 static int pcpu_nr_groups __read_mostly;
144 static const unsigned long *pcpu_group_offsets __read_mostly;
145 static const size_t *pcpu_group_sizes __read_mostly;
148 * The first chunk which always exists. Note that unlike other
149 * chunks, this one can be allocated and mapped in several different
150 * ways and thus often doesn't live in the vmalloc area.
152 static struct pcpu_chunk *pcpu_first_chunk;
155 * Optional reserved chunk. This chunk reserves part of the first
156 * chunk and serves it for reserved allocations. The amount of
157 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
158 * area doesn't exist, the following variables contain NULL and 0
159 * respectively.
161 static struct pcpu_chunk *pcpu_reserved_chunk;
162 static int pcpu_reserved_chunk_limit;
164 static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
165 static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
167 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
169 /* chunks which need their map areas extended, protected by pcpu_lock */
170 static LIST_HEAD(pcpu_map_extend_chunks);
173 * The number of empty populated pages, protected by pcpu_lock. The
174 * reserved chunk doesn't contribute to the count.
176 static int pcpu_nr_empty_pop_pages;
179 * Balance work is used to populate or destroy chunks asynchronously. We
180 * try to keep the number of populated free pages between
181 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
182 * empty chunk.
184 static void pcpu_balance_workfn(struct work_struct *work);
185 static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
186 static bool pcpu_async_enabled __read_mostly;
187 static bool pcpu_atomic_alloc_failed;
189 static void pcpu_schedule_balance_work(void)
191 if (pcpu_async_enabled)
192 schedule_work(&pcpu_balance_work);
195 static bool pcpu_addr_in_first_chunk(void *addr)
197 void *first_start = pcpu_first_chunk->base_addr;
199 return addr >= first_start && addr < first_start + pcpu_unit_size;
202 static bool pcpu_addr_in_reserved_chunk(void *addr)
204 void *first_start = pcpu_first_chunk->base_addr;
206 return addr >= first_start &&
207 addr < first_start + pcpu_reserved_chunk_limit;
210 static int __pcpu_size_to_slot(int size)
212 int highbit = fls(size); /* size is in bytes */
213 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
216 static int pcpu_size_to_slot(int size)
218 if (size == pcpu_unit_size)
219 return pcpu_nr_slots - 1;
220 return __pcpu_size_to_slot(size);
223 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
225 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
226 return 0;
228 return pcpu_size_to_slot(chunk->free_size);
231 /* set the pointer to a chunk in a page struct */
232 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
234 page->index = (unsigned long)pcpu;
237 /* obtain pointer to a chunk from a page struct */
238 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
240 return (struct pcpu_chunk *)page->index;
243 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
245 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
248 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
249 unsigned int cpu, int page_idx)
251 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
252 (page_idx << PAGE_SHIFT);
255 static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
256 int *rs, int *re, int end)
258 *rs = find_next_zero_bit(chunk->populated, end, *rs);
259 *re = find_next_bit(chunk->populated, end, *rs + 1);
262 static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
263 int *rs, int *re, int end)
265 *rs = find_next_bit(chunk->populated, end, *rs);
266 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
270 * (Un)populated page region iterators. Iterate over (un)populated
271 * page regions between @start and @end in @chunk. @rs and @re should
272 * be integer variables and will be set to start and end page index of
273 * the current region.
275 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
276 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
277 (rs) < (re); \
278 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
280 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
281 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
282 (rs) < (re); \
283 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
286 * pcpu_mem_zalloc - allocate memory
287 * @size: bytes to allocate
289 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
290 * kzalloc() is used; otherwise, vzalloc() is used. The returned
291 * memory is always zeroed.
293 * CONTEXT:
294 * Does GFP_KERNEL allocation.
296 * RETURNS:
297 * Pointer to the allocated area on success, NULL on failure.
299 static void *pcpu_mem_zalloc(size_t size)
301 if (WARN_ON_ONCE(!slab_is_available()))
302 return NULL;
304 if (size <= PAGE_SIZE)
305 return kzalloc(size, GFP_KERNEL);
306 else
307 return vzalloc(size);
311 * pcpu_mem_free - free memory
312 * @ptr: memory to free
314 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
316 static void pcpu_mem_free(void *ptr)
318 kvfree(ptr);
322 * pcpu_count_occupied_pages - count the number of pages an area occupies
323 * @chunk: chunk of interest
324 * @i: index of the area in question
326 * Count the number of pages chunk's @i'th area occupies. When the area's
327 * start and/or end address isn't aligned to page boundary, the straddled
328 * page is included in the count iff the rest of the page is free.
330 static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
332 int off = chunk->map[i] & ~1;
333 int end = chunk->map[i + 1] & ~1;
335 if (!PAGE_ALIGNED(off) && i > 0) {
336 int prev = chunk->map[i - 1];
338 if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
339 off = round_down(off, PAGE_SIZE);
342 if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
343 int next = chunk->map[i + 1];
344 int nend = chunk->map[i + 2] & ~1;
346 if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
347 end = round_up(end, PAGE_SIZE);
350 return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
354 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
355 * @chunk: chunk of interest
356 * @oslot: the previous slot it was on
358 * This function is called after an allocation or free changed @chunk.
359 * New slot according to the changed state is determined and @chunk is
360 * moved to the slot. Note that the reserved chunk is never put on
361 * chunk slots.
363 * CONTEXT:
364 * pcpu_lock.
366 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
368 int nslot = pcpu_chunk_slot(chunk);
370 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
371 if (oslot < nslot)
372 list_move(&chunk->list, &pcpu_slot[nslot]);
373 else
374 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
379 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
380 * @chunk: chunk of interest
381 * @is_atomic: the allocation context
383 * Determine whether area map of @chunk needs to be extended. If
384 * @is_atomic, only the amount necessary for a new allocation is
385 * considered; however, async extension is scheduled if the left amount is
386 * low. If !@is_atomic, it aims for more empty space. Combined, this
387 * ensures that the map is likely to have enough available space to
388 * accomodate atomic allocations which can't extend maps directly.
390 * CONTEXT:
391 * pcpu_lock.
393 * RETURNS:
394 * New target map allocation length if extension is necessary, 0
395 * otherwise.
397 static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
399 int margin, new_alloc;
401 lockdep_assert_held(&pcpu_lock);
403 if (is_atomic) {
404 margin = 3;
406 if (chunk->map_alloc <
407 chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
408 if (list_empty(&chunk->map_extend_list)) {
409 list_add_tail(&chunk->map_extend_list,
410 &pcpu_map_extend_chunks);
411 pcpu_schedule_balance_work();
414 } else {
415 margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
418 if (chunk->map_alloc >= chunk->map_used + margin)
419 return 0;
421 new_alloc = PCPU_DFL_MAP_ALLOC;
422 while (new_alloc < chunk->map_used + margin)
423 new_alloc *= 2;
425 return new_alloc;
429 * pcpu_extend_area_map - extend area map of a chunk
430 * @chunk: chunk of interest
431 * @new_alloc: new target allocation length of the area map
433 * Extend area map of @chunk to have @new_alloc entries.
435 * CONTEXT:
436 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
438 * RETURNS:
439 * 0 on success, -errno on failure.
441 static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
443 int *old = NULL, *new = NULL;
444 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
445 unsigned long flags;
447 lockdep_assert_held(&pcpu_alloc_mutex);
449 new = pcpu_mem_zalloc(new_size);
450 if (!new)
451 return -ENOMEM;
453 /* acquire pcpu_lock and switch to new area map */
454 spin_lock_irqsave(&pcpu_lock, flags);
456 if (new_alloc <= chunk->map_alloc)
457 goto out_unlock;
459 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
460 old = chunk->map;
462 memcpy(new, old, old_size);
464 chunk->map_alloc = new_alloc;
465 chunk->map = new;
466 new = NULL;
468 out_unlock:
469 spin_unlock_irqrestore(&pcpu_lock, flags);
472 * pcpu_mem_free() might end up calling vfree() which uses
473 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
475 pcpu_mem_free(old);
476 pcpu_mem_free(new);
478 return 0;
482 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
483 * @chunk: chunk the candidate area belongs to
484 * @off: the offset to the start of the candidate area
485 * @this_size: the size of the candidate area
486 * @size: the size of the target allocation
487 * @align: the alignment of the target allocation
488 * @pop_only: only allocate from already populated region
490 * We're trying to allocate @size bytes aligned at @align. @chunk's area
491 * at @off sized @this_size is a candidate. This function determines
492 * whether the target allocation fits in the candidate area and returns the
493 * number of bytes to pad after @off. If the target area doesn't fit, -1
494 * is returned.
496 * If @pop_only is %true, this function only considers the already
497 * populated part of the candidate area.
499 static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
500 int size, int align, bool pop_only)
502 int cand_off = off;
504 while (true) {
505 int head = ALIGN(cand_off, align) - off;
506 int page_start, page_end, rs, re;
508 if (this_size < head + size)
509 return -1;
511 if (!pop_only)
512 return head;
515 * If the first unpopulated page is beyond the end of the
516 * allocation, the whole allocation is populated;
517 * otherwise, retry from the end of the unpopulated area.
519 page_start = PFN_DOWN(head + off);
520 page_end = PFN_UP(head + off + size);
522 rs = page_start;
523 pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
524 if (rs >= page_end)
525 return head;
526 cand_off = re * PAGE_SIZE;
531 * pcpu_alloc_area - allocate area from a pcpu_chunk
532 * @chunk: chunk of interest
533 * @size: wanted size in bytes
534 * @align: wanted align
535 * @pop_only: allocate only from the populated area
536 * @occ_pages_p: out param for the number of pages the area occupies
538 * Try to allocate @size bytes area aligned at @align from @chunk.
539 * Note that this function only allocates the offset. It doesn't
540 * populate or map the area.
542 * @chunk->map must have at least two free slots.
544 * CONTEXT:
545 * pcpu_lock.
547 * RETURNS:
548 * Allocated offset in @chunk on success, -1 if no matching area is
549 * found.
551 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
552 bool pop_only, int *occ_pages_p)
554 int oslot = pcpu_chunk_slot(chunk);
555 int max_contig = 0;
556 int i, off;
557 bool seen_free = false;
558 int *p;
560 for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
561 int head, tail;
562 int this_size;
564 off = *p;
565 if (off & 1)
566 continue;
568 this_size = (p[1] & ~1) - off;
570 head = pcpu_fit_in_area(chunk, off, this_size, size, align,
571 pop_only);
572 if (head < 0) {
573 if (!seen_free) {
574 chunk->first_free = i;
575 seen_free = true;
577 max_contig = max(this_size, max_contig);
578 continue;
582 * If head is small or the previous block is free,
583 * merge'em. Note that 'small' is defined as smaller
584 * than sizeof(int), which is very small but isn't too
585 * uncommon for percpu allocations.
587 if (head && (head < sizeof(int) || !(p[-1] & 1))) {
588 *p = off += head;
589 if (p[-1] & 1)
590 chunk->free_size -= head;
591 else
592 max_contig = max(*p - p[-1], max_contig);
593 this_size -= head;
594 head = 0;
597 /* if tail is small, just keep it around */
598 tail = this_size - head - size;
599 if (tail < sizeof(int)) {
600 tail = 0;
601 size = this_size - head;
604 /* split if warranted */
605 if (head || tail) {
606 int nr_extra = !!head + !!tail;
608 /* insert new subblocks */
609 memmove(p + nr_extra + 1, p + 1,
610 sizeof(chunk->map[0]) * (chunk->map_used - i));
611 chunk->map_used += nr_extra;
613 if (head) {
614 if (!seen_free) {
615 chunk->first_free = i;
616 seen_free = true;
618 *++p = off += head;
619 ++i;
620 max_contig = max(head, max_contig);
622 if (tail) {
623 p[1] = off + size;
624 max_contig = max(tail, max_contig);
628 if (!seen_free)
629 chunk->first_free = i + 1;
631 /* update hint and mark allocated */
632 if (i + 1 == chunk->map_used)
633 chunk->contig_hint = max_contig; /* fully scanned */
634 else
635 chunk->contig_hint = max(chunk->contig_hint,
636 max_contig);
638 chunk->free_size -= size;
639 *p |= 1;
641 *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
642 pcpu_chunk_relocate(chunk, oslot);
643 return off;
646 chunk->contig_hint = max_contig; /* fully scanned */
647 pcpu_chunk_relocate(chunk, oslot);
649 /* tell the upper layer that this chunk has no matching area */
650 return -1;
654 * pcpu_free_area - free area to a pcpu_chunk
655 * @chunk: chunk of interest
656 * @freeme: offset of area to free
657 * @occ_pages_p: out param for the number of pages the area occupies
659 * Free area starting from @freeme to @chunk. Note that this function
660 * only modifies the allocation map. It doesn't depopulate or unmap
661 * the area.
663 * CONTEXT:
664 * pcpu_lock.
666 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
667 int *occ_pages_p)
669 int oslot = pcpu_chunk_slot(chunk);
670 int off = 0;
671 unsigned i, j;
672 int to_free = 0;
673 int *p;
675 freeme |= 1; /* we are searching for <given offset, in use> pair */
677 i = 0;
678 j = chunk->map_used;
679 while (i != j) {
680 unsigned k = (i + j) / 2;
681 off = chunk->map[k];
682 if (off < freeme)
683 i = k + 1;
684 else if (off > freeme)
685 j = k;
686 else
687 i = j = k;
689 BUG_ON(off != freeme);
691 if (i < chunk->first_free)
692 chunk->first_free = i;
694 p = chunk->map + i;
695 *p = off &= ~1;
696 chunk->free_size += (p[1] & ~1) - off;
698 *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
700 /* merge with next? */
701 if (!(p[1] & 1))
702 to_free++;
703 /* merge with previous? */
704 if (i > 0 && !(p[-1] & 1)) {
705 to_free++;
706 i--;
707 p--;
709 if (to_free) {
710 chunk->map_used -= to_free;
711 memmove(p + 1, p + 1 + to_free,
712 (chunk->map_used - i) * sizeof(chunk->map[0]));
715 chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
716 pcpu_chunk_relocate(chunk, oslot);
719 static struct pcpu_chunk *pcpu_alloc_chunk(void)
721 struct pcpu_chunk *chunk;
723 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
724 if (!chunk)
725 return NULL;
727 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
728 sizeof(chunk->map[0]));
729 if (!chunk->map) {
730 pcpu_mem_free(chunk);
731 return NULL;
734 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
735 chunk->map[0] = 0;
736 chunk->map[1] = pcpu_unit_size | 1;
737 chunk->map_used = 1;
739 INIT_LIST_HEAD(&chunk->list);
740 INIT_LIST_HEAD(&chunk->map_extend_list);
741 chunk->free_size = pcpu_unit_size;
742 chunk->contig_hint = pcpu_unit_size;
744 return chunk;
747 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
749 if (!chunk)
750 return;
751 pcpu_mem_free(chunk->map);
752 pcpu_mem_free(chunk);
756 * pcpu_chunk_populated - post-population bookkeeping
757 * @chunk: pcpu_chunk which got populated
758 * @page_start: the start page
759 * @page_end: the end page
761 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
762 * the bookkeeping information accordingly. Must be called after each
763 * successful population.
765 static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
766 int page_start, int page_end)
768 int nr = page_end - page_start;
770 lockdep_assert_held(&pcpu_lock);
772 bitmap_set(chunk->populated, page_start, nr);
773 chunk->nr_populated += nr;
774 pcpu_nr_empty_pop_pages += nr;
778 * pcpu_chunk_depopulated - post-depopulation bookkeeping
779 * @chunk: pcpu_chunk which got depopulated
780 * @page_start: the start page
781 * @page_end: the end page
783 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
784 * Update the bookkeeping information accordingly. Must be called after
785 * each successful depopulation.
787 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
788 int page_start, int page_end)
790 int nr = page_end - page_start;
792 lockdep_assert_held(&pcpu_lock);
794 bitmap_clear(chunk->populated, page_start, nr);
795 chunk->nr_populated -= nr;
796 pcpu_nr_empty_pop_pages -= nr;
800 * Chunk management implementation.
802 * To allow different implementations, chunk alloc/free and
803 * [de]population are implemented in a separate file which is pulled
804 * into this file and compiled together. The following functions
805 * should be implemented.
807 * pcpu_populate_chunk - populate the specified range of a chunk
808 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
809 * pcpu_create_chunk - create a new chunk
810 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
811 * pcpu_addr_to_page - translate address to physical address
812 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
814 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
815 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
816 static struct pcpu_chunk *pcpu_create_chunk(void);
817 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
818 static struct page *pcpu_addr_to_page(void *addr);
819 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
821 #ifdef CONFIG_NEED_PER_CPU_KM
822 #include "percpu-km.c"
823 #else
824 #include "percpu-vm.c"
825 #endif
828 * pcpu_chunk_addr_search - determine chunk containing specified address
829 * @addr: address for which the chunk needs to be determined.
831 * RETURNS:
832 * The address of the found chunk.
834 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
836 /* is it in the first chunk? */
837 if (pcpu_addr_in_first_chunk(addr)) {
838 /* is it in the reserved area? */
839 if (pcpu_addr_in_reserved_chunk(addr))
840 return pcpu_reserved_chunk;
841 return pcpu_first_chunk;
845 * The address is relative to unit0 which might be unused and
846 * thus unmapped. Offset the address to the unit space of the
847 * current processor before looking it up in the vmalloc
848 * space. Note that any possible cpu id can be used here, so
849 * there's no need to worry about preemption or cpu hotplug.
851 addr += pcpu_unit_offsets[raw_smp_processor_id()];
852 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
856 * pcpu_alloc - the percpu allocator
857 * @size: size of area to allocate in bytes
858 * @align: alignment of area (max PAGE_SIZE)
859 * @reserved: allocate from the reserved chunk if available
860 * @gfp: allocation flags
862 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
863 * contain %GFP_KERNEL, the allocation is atomic.
865 * RETURNS:
866 * Percpu pointer to the allocated area on success, NULL on failure.
868 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
869 gfp_t gfp)
871 static int warn_limit = 10;
872 struct pcpu_chunk *chunk;
873 const char *err;
874 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
875 int occ_pages = 0;
876 int slot, off, new_alloc, cpu, ret;
877 unsigned long flags;
878 void __percpu *ptr;
881 * We want the lowest bit of offset available for in-use/free
882 * indicator, so force >= 16bit alignment and make size even.
884 if (unlikely(align < 2))
885 align = 2;
887 size = ALIGN(size, 2);
889 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
890 WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
891 size, align);
892 return NULL;
895 if (!is_atomic)
896 mutex_lock(&pcpu_alloc_mutex);
898 spin_lock_irqsave(&pcpu_lock, flags);
900 /* serve reserved allocations from the reserved chunk if available */
901 if (reserved && pcpu_reserved_chunk) {
902 chunk = pcpu_reserved_chunk;
904 if (size > chunk->contig_hint) {
905 err = "alloc from reserved chunk failed";
906 goto fail_unlock;
909 while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
910 spin_unlock_irqrestore(&pcpu_lock, flags);
911 if (is_atomic ||
912 pcpu_extend_area_map(chunk, new_alloc) < 0) {
913 err = "failed to extend area map of reserved chunk";
914 goto fail;
916 spin_lock_irqsave(&pcpu_lock, flags);
919 off = pcpu_alloc_area(chunk, size, align, is_atomic,
920 &occ_pages);
921 if (off >= 0)
922 goto area_found;
924 err = "alloc from reserved chunk failed";
925 goto fail_unlock;
928 restart:
929 /* search through normal chunks */
930 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
931 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
932 if (size > chunk->contig_hint)
933 continue;
935 new_alloc = pcpu_need_to_extend(chunk, is_atomic);
936 if (new_alloc) {
937 if (is_atomic)
938 continue;
939 spin_unlock_irqrestore(&pcpu_lock, flags);
940 if (pcpu_extend_area_map(chunk,
941 new_alloc) < 0) {
942 err = "failed to extend area map";
943 goto fail;
945 spin_lock_irqsave(&pcpu_lock, flags);
947 * pcpu_lock has been dropped, need to
948 * restart cpu_slot list walking.
950 goto restart;
953 off = pcpu_alloc_area(chunk, size, align, is_atomic,
954 &occ_pages);
955 if (off >= 0)
956 goto area_found;
960 spin_unlock_irqrestore(&pcpu_lock, flags);
963 * No space left. Create a new chunk. We don't want multiple
964 * tasks to create chunks simultaneously. Serialize and create iff
965 * there's still no empty chunk after grabbing the mutex.
967 if (is_atomic)
968 goto fail;
970 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
971 chunk = pcpu_create_chunk();
972 if (!chunk) {
973 err = "failed to allocate new chunk";
974 goto fail;
977 spin_lock_irqsave(&pcpu_lock, flags);
978 pcpu_chunk_relocate(chunk, -1);
979 } else {
980 spin_lock_irqsave(&pcpu_lock, flags);
983 goto restart;
985 area_found:
986 spin_unlock_irqrestore(&pcpu_lock, flags);
988 /* populate if not all pages are already there */
989 if (!is_atomic) {
990 int page_start, page_end, rs, re;
992 page_start = PFN_DOWN(off);
993 page_end = PFN_UP(off + size);
995 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
996 WARN_ON(chunk->immutable);
998 ret = pcpu_populate_chunk(chunk, rs, re);
1000 spin_lock_irqsave(&pcpu_lock, flags);
1001 if (ret) {
1002 pcpu_free_area(chunk, off, &occ_pages);
1003 err = "failed to populate";
1004 goto fail_unlock;
1006 pcpu_chunk_populated(chunk, rs, re);
1007 spin_unlock_irqrestore(&pcpu_lock, flags);
1010 mutex_unlock(&pcpu_alloc_mutex);
1013 if (chunk != pcpu_reserved_chunk) {
1014 spin_lock_irqsave(&pcpu_lock, flags);
1015 pcpu_nr_empty_pop_pages -= occ_pages;
1016 spin_unlock_irqrestore(&pcpu_lock, flags);
1019 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1020 pcpu_schedule_balance_work();
1022 /* clear the areas and return address relative to base address */
1023 for_each_possible_cpu(cpu)
1024 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1026 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1027 kmemleak_alloc_percpu(ptr, size, gfp);
1028 return ptr;
1030 fail_unlock:
1031 spin_unlock_irqrestore(&pcpu_lock, flags);
1032 fail:
1033 if (!is_atomic && warn_limit) {
1034 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1035 size, align, is_atomic, err);
1036 dump_stack();
1037 if (!--warn_limit)
1038 pr_info("limit reached, disable warning\n");
1040 if (is_atomic) {
1041 /* see the flag handling in pcpu_blance_workfn() */
1042 pcpu_atomic_alloc_failed = true;
1043 pcpu_schedule_balance_work();
1044 } else {
1045 mutex_unlock(&pcpu_alloc_mutex);
1047 return NULL;
1051 * __alloc_percpu_gfp - allocate dynamic percpu area
1052 * @size: size of area to allocate in bytes
1053 * @align: alignment of area (max PAGE_SIZE)
1054 * @gfp: allocation flags
1056 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1057 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1058 * be called from any context but is a lot more likely to fail.
1060 * RETURNS:
1061 * Percpu pointer to the allocated area on success, NULL on failure.
1063 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1065 return pcpu_alloc(size, align, false, gfp);
1067 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1070 * __alloc_percpu - allocate dynamic percpu area
1071 * @size: size of area to allocate in bytes
1072 * @align: alignment of area (max PAGE_SIZE)
1074 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1076 void __percpu *__alloc_percpu(size_t size, size_t align)
1078 return pcpu_alloc(size, align, false, GFP_KERNEL);
1080 EXPORT_SYMBOL_GPL(__alloc_percpu);
1083 * __alloc_reserved_percpu - allocate reserved percpu area
1084 * @size: size of area to allocate in bytes
1085 * @align: alignment of area (max PAGE_SIZE)
1087 * Allocate zero-filled percpu area of @size bytes aligned at @align
1088 * from reserved percpu area if arch has set it up; otherwise,
1089 * allocation is served from the same dynamic area. Might sleep.
1090 * Might trigger writeouts.
1092 * CONTEXT:
1093 * Does GFP_KERNEL allocation.
1095 * RETURNS:
1096 * Percpu pointer to the allocated area on success, NULL on failure.
1098 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1100 return pcpu_alloc(size, align, true, GFP_KERNEL);
1104 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1105 * @work: unused
1107 * Reclaim all fully free chunks except for the first one.
1109 static void pcpu_balance_workfn(struct work_struct *work)
1111 LIST_HEAD(to_free);
1112 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1113 struct pcpu_chunk *chunk, *next;
1114 int slot, nr_to_pop, ret;
1117 * There's no reason to keep around multiple unused chunks and VM
1118 * areas can be scarce. Destroy all free chunks except for one.
1120 mutex_lock(&pcpu_alloc_mutex);
1121 spin_lock_irq(&pcpu_lock);
1123 list_for_each_entry_safe(chunk, next, free_head, list) {
1124 WARN_ON(chunk->immutable);
1126 /* spare the first one */
1127 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1128 continue;
1130 list_del_init(&chunk->map_extend_list);
1131 list_move(&chunk->list, &to_free);
1134 spin_unlock_irq(&pcpu_lock);
1136 list_for_each_entry_safe(chunk, next, &to_free, list) {
1137 int rs, re;
1139 pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1140 pcpu_depopulate_chunk(chunk, rs, re);
1141 spin_lock_irq(&pcpu_lock);
1142 pcpu_chunk_depopulated(chunk, rs, re);
1143 spin_unlock_irq(&pcpu_lock);
1145 pcpu_destroy_chunk(chunk);
1148 /* service chunks which requested async area map extension */
1149 do {
1150 int new_alloc = 0;
1152 spin_lock_irq(&pcpu_lock);
1154 chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
1155 struct pcpu_chunk, map_extend_list);
1156 if (chunk) {
1157 list_del_init(&chunk->map_extend_list);
1158 new_alloc = pcpu_need_to_extend(chunk, false);
1161 spin_unlock_irq(&pcpu_lock);
1163 if (new_alloc)
1164 pcpu_extend_area_map(chunk, new_alloc);
1165 } while (chunk);
1168 * Ensure there are certain number of free populated pages for
1169 * atomic allocs. Fill up from the most packed so that atomic
1170 * allocs don't increase fragmentation. If atomic allocation
1171 * failed previously, always populate the maximum amount. This
1172 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1173 * failing indefinitely; however, large atomic allocs are not
1174 * something we support properly and can be highly unreliable and
1175 * inefficient.
1177 retry_pop:
1178 if (pcpu_atomic_alloc_failed) {
1179 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1180 /* best effort anyway, don't worry about synchronization */
1181 pcpu_atomic_alloc_failed = false;
1182 } else {
1183 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1184 pcpu_nr_empty_pop_pages,
1185 0, PCPU_EMPTY_POP_PAGES_HIGH);
1188 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1189 int nr_unpop = 0, rs, re;
1191 if (!nr_to_pop)
1192 break;
1194 spin_lock_irq(&pcpu_lock);
1195 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1196 nr_unpop = pcpu_unit_pages - chunk->nr_populated;
1197 if (nr_unpop)
1198 break;
1200 spin_unlock_irq(&pcpu_lock);
1202 if (!nr_unpop)
1203 continue;
1205 /* @chunk can't go away while pcpu_alloc_mutex is held */
1206 pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1207 int nr = min(re - rs, nr_to_pop);
1209 ret = pcpu_populate_chunk(chunk, rs, rs + nr);
1210 if (!ret) {
1211 nr_to_pop -= nr;
1212 spin_lock_irq(&pcpu_lock);
1213 pcpu_chunk_populated(chunk, rs, rs + nr);
1214 spin_unlock_irq(&pcpu_lock);
1215 } else {
1216 nr_to_pop = 0;
1219 if (!nr_to_pop)
1220 break;
1224 if (nr_to_pop) {
1225 /* ran out of chunks to populate, create a new one and retry */
1226 chunk = pcpu_create_chunk();
1227 if (chunk) {
1228 spin_lock_irq(&pcpu_lock);
1229 pcpu_chunk_relocate(chunk, -1);
1230 spin_unlock_irq(&pcpu_lock);
1231 goto retry_pop;
1235 mutex_unlock(&pcpu_alloc_mutex);
1239 * free_percpu - free percpu area
1240 * @ptr: pointer to area to free
1242 * Free percpu area @ptr.
1244 * CONTEXT:
1245 * Can be called from atomic context.
1247 void free_percpu(void __percpu *ptr)
1249 void *addr;
1250 struct pcpu_chunk *chunk;
1251 unsigned long flags;
1252 int off, occ_pages;
1254 if (!ptr)
1255 return;
1257 kmemleak_free_percpu(ptr);
1259 addr = __pcpu_ptr_to_addr(ptr);
1261 spin_lock_irqsave(&pcpu_lock, flags);
1263 chunk = pcpu_chunk_addr_search(addr);
1264 off = addr - chunk->base_addr;
1266 pcpu_free_area(chunk, off, &occ_pages);
1268 if (chunk != pcpu_reserved_chunk)
1269 pcpu_nr_empty_pop_pages += occ_pages;
1271 /* if there are more than one fully free chunks, wake up grim reaper */
1272 if (chunk->free_size == pcpu_unit_size) {
1273 struct pcpu_chunk *pos;
1275 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1276 if (pos != chunk) {
1277 pcpu_schedule_balance_work();
1278 break;
1282 spin_unlock_irqrestore(&pcpu_lock, flags);
1284 EXPORT_SYMBOL_GPL(free_percpu);
1287 * is_kernel_percpu_address - test whether address is from static percpu area
1288 * @addr: address to test
1290 * Test whether @addr belongs to in-kernel static percpu area. Module
1291 * static percpu areas are not considered. For those, use
1292 * is_module_percpu_address().
1294 * RETURNS:
1295 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1297 bool is_kernel_percpu_address(unsigned long addr)
1299 #ifdef CONFIG_SMP
1300 const size_t static_size = __per_cpu_end - __per_cpu_start;
1301 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1302 unsigned int cpu;
1304 for_each_possible_cpu(cpu) {
1305 void *start = per_cpu_ptr(base, cpu);
1307 if ((void *)addr >= start && (void *)addr < start + static_size)
1308 return true;
1310 #endif
1311 /* on UP, can't distinguish from other static vars, always false */
1312 return false;
1316 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1317 * @addr: the address to be converted to physical address
1319 * Given @addr which is dereferenceable address obtained via one of
1320 * percpu access macros, this function translates it into its physical
1321 * address. The caller is responsible for ensuring @addr stays valid
1322 * until this function finishes.
1324 * percpu allocator has special setup for the first chunk, which currently
1325 * supports either embedding in linear address space or vmalloc mapping,
1326 * and, from the second one, the backing allocator (currently either vm or
1327 * km) provides translation.
1329 * The addr can be translated simply without checking if it falls into the
1330 * first chunk. But the current code reflects better how percpu allocator
1331 * actually works, and the verification can discover both bugs in percpu
1332 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1333 * code.
1335 * RETURNS:
1336 * The physical address for @addr.
1338 phys_addr_t per_cpu_ptr_to_phys(void *addr)
1340 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1341 bool in_first_chunk = false;
1342 unsigned long first_low, first_high;
1343 unsigned int cpu;
1346 * The following test on unit_low/high isn't strictly
1347 * necessary but will speed up lookups of addresses which
1348 * aren't in the first chunk.
1350 first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1351 first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1352 pcpu_unit_pages);
1353 if ((unsigned long)addr >= first_low &&
1354 (unsigned long)addr < first_high) {
1355 for_each_possible_cpu(cpu) {
1356 void *start = per_cpu_ptr(base, cpu);
1358 if (addr >= start && addr < start + pcpu_unit_size) {
1359 in_first_chunk = true;
1360 break;
1365 if (in_first_chunk) {
1366 if (!is_vmalloc_addr(addr))
1367 return __pa(addr);
1368 else
1369 return page_to_phys(vmalloc_to_page(addr)) +
1370 offset_in_page(addr);
1371 } else
1372 return page_to_phys(pcpu_addr_to_page(addr)) +
1373 offset_in_page(addr);
1377 * pcpu_alloc_alloc_info - allocate percpu allocation info
1378 * @nr_groups: the number of groups
1379 * @nr_units: the number of units
1381 * Allocate ai which is large enough for @nr_groups groups containing
1382 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1383 * cpu_map array which is long enough for @nr_units and filled with
1384 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1385 * pointer of other groups.
1387 * RETURNS:
1388 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1389 * failure.
1391 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1392 int nr_units)
1394 struct pcpu_alloc_info *ai;
1395 size_t base_size, ai_size;
1396 void *ptr;
1397 int unit;
1399 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1400 __alignof__(ai->groups[0].cpu_map[0]));
1401 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1403 ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1404 if (!ptr)
1405 return NULL;
1406 ai = ptr;
1407 ptr += base_size;
1409 ai->groups[0].cpu_map = ptr;
1411 for (unit = 0; unit < nr_units; unit++)
1412 ai->groups[0].cpu_map[unit] = NR_CPUS;
1414 ai->nr_groups = nr_groups;
1415 ai->__ai_size = PFN_ALIGN(ai_size);
1417 return ai;
1421 * pcpu_free_alloc_info - free percpu allocation info
1422 * @ai: pcpu_alloc_info to free
1424 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1426 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1428 memblock_free_early(__pa(ai), ai->__ai_size);
1432 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1433 * @lvl: loglevel
1434 * @ai: allocation info to dump
1436 * Print out information about @ai using loglevel @lvl.
1438 static void pcpu_dump_alloc_info(const char *lvl,
1439 const struct pcpu_alloc_info *ai)
1441 int group_width = 1, cpu_width = 1, width;
1442 char empty_str[] = "--------";
1443 int alloc = 0, alloc_end = 0;
1444 int group, v;
1445 int upa, apl; /* units per alloc, allocs per line */
1447 v = ai->nr_groups;
1448 while (v /= 10)
1449 group_width++;
1451 v = num_possible_cpus();
1452 while (v /= 10)
1453 cpu_width++;
1454 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1456 upa = ai->alloc_size / ai->unit_size;
1457 width = upa * (cpu_width + 1) + group_width + 3;
1458 apl = rounddown_pow_of_two(max(60 / width, 1));
1460 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1461 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1462 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1464 for (group = 0; group < ai->nr_groups; group++) {
1465 const struct pcpu_group_info *gi = &ai->groups[group];
1466 int unit = 0, unit_end = 0;
1468 BUG_ON(gi->nr_units % upa);
1469 for (alloc_end += gi->nr_units / upa;
1470 alloc < alloc_end; alloc++) {
1471 if (!(alloc % apl)) {
1472 pr_cont("\n");
1473 printk("%spcpu-alloc: ", lvl);
1475 pr_cont("[%0*d] ", group_width, group);
1477 for (unit_end += upa; unit < unit_end; unit++)
1478 if (gi->cpu_map[unit] != NR_CPUS)
1479 pr_cont("%0*d ",
1480 cpu_width, gi->cpu_map[unit]);
1481 else
1482 pr_cont("%s ", empty_str);
1485 pr_cont("\n");
1489 * pcpu_setup_first_chunk - initialize the first percpu chunk
1490 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1491 * @base_addr: mapped address
1493 * Initialize the first percpu chunk which contains the kernel static
1494 * perpcu area. This function is to be called from arch percpu area
1495 * setup path.
1497 * @ai contains all information necessary to initialize the first
1498 * chunk and prime the dynamic percpu allocator.
1500 * @ai->static_size is the size of static percpu area.
1502 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1503 * reserve after the static area in the first chunk. This reserves
1504 * the first chunk such that it's available only through reserved
1505 * percpu allocation. This is primarily used to serve module percpu
1506 * static areas on architectures where the addressing model has
1507 * limited offset range for symbol relocations to guarantee module
1508 * percpu symbols fall inside the relocatable range.
1510 * @ai->dyn_size determines the number of bytes available for dynamic
1511 * allocation in the first chunk. The area between @ai->static_size +
1512 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1514 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1515 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1516 * @ai->dyn_size.
1518 * @ai->atom_size is the allocation atom size and used as alignment
1519 * for vm areas.
1521 * @ai->alloc_size is the allocation size and always multiple of
1522 * @ai->atom_size. This is larger than @ai->atom_size if
1523 * @ai->unit_size is larger than @ai->atom_size.
1525 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1526 * percpu areas. Units which should be colocated are put into the
1527 * same group. Dynamic VM areas will be allocated according to these
1528 * groupings. If @ai->nr_groups is zero, a single group containing
1529 * all units is assumed.
1531 * The caller should have mapped the first chunk at @base_addr and
1532 * copied static data to each unit.
1534 * If the first chunk ends up with both reserved and dynamic areas, it
1535 * is served by two chunks - one to serve the core static and reserved
1536 * areas and the other for the dynamic area. They share the same vm
1537 * and page map but uses different area allocation map to stay away
1538 * from each other. The latter chunk is circulated in the chunk slots
1539 * and available for dynamic allocation like any other chunks.
1541 * RETURNS:
1542 * 0 on success, -errno on failure.
1544 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1545 void *base_addr)
1547 static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1548 static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1549 size_t dyn_size = ai->dyn_size;
1550 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1551 struct pcpu_chunk *schunk, *dchunk = NULL;
1552 unsigned long *group_offsets;
1553 size_t *group_sizes;
1554 unsigned long *unit_off;
1555 unsigned int cpu;
1556 int *unit_map;
1557 int group, unit, i;
1559 #define PCPU_SETUP_BUG_ON(cond) do { \
1560 if (unlikely(cond)) { \
1561 pr_emerg("failed to initialize, %s\n", #cond); \
1562 pr_emerg("cpu_possible_mask=%*pb\n", \
1563 cpumask_pr_args(cpu_possible_mask)); \
1564 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1565 BUG(); \
1567 } while (0)
1569 /* sanity checks */
1570 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1571 #ifdef CONFIG_SMP
1572 PCPU_SETUP_BUG_ON(!ai->static_size);
1573 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
1574 #endif
1575 PCPU_SETUP_BUG_ON(!base_addr);
1576 PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
1577 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1578 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
1579 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1580 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1581 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1583 /* process group information and build config tables accordingly */
1584 group_offsets = memblock_virt_alloc(ai->nr_groups *
1585 sizeof(group_offsets[0]), 0);
1586 group_sizes = memblock_virt_alloc(ai->nr_groups *
1587 sizeof(group_sizes[0]), 0);
1588 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1589 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1591 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1592 unit_map[cpu] = UINT_MAX;
1594 pcpu_low_unit_cpu = NR_CPUS;
1595 pcpu_high_unit_cpu = NR_CPUS;
1597 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1598 const struct pcpu_group_info *gi = &ai->groups[group];
1600 group_offsets[group] = gi->base_offset;
1601 group_sizes[group] = gi->nr_units * ai->unit_size;
1603 for (i = 0; i < gi->nr_units; i++) {
1604 cpu = gi->cpu_map[i];
1605 if (cpu == NR_CPUS)
1606 continue;
1608 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
1609 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1610 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1612 unit_map[cpu] = unit + i;
1613 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1615 /* determine low/high unit_cpu */
1616 if (pcpu_low_unit_cpu == NR_CPUS ||
1617 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1618 pcpu_low_unit_cpu = cpu;
1619 if (pcpu_high_unit_cpu == NR_CPUS ||
1620 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1621 pcpu_high_unit_cpu = cpu;
1624 pcpu_nr_units = unit;
1626 for_each_possible_cpu(cpu)
1627 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1629 /* we're done parsing the input, undefine BUG macro and dump config */
1630 #undef PCPU_SETUP_BUG_ON
1631 pcpu_dump_alloc_info(KERN_DEBUG, ai);
1633 pcpu_nr_groups = ai->nr_groups;
1634 pcpu_group_offsets = group_offsets;
1635 pcpu_group_sizes = group_sizes;
1636 pcpu_unit_map = unit_map;
1637 pcpu_unit_offsets = unit_off;
1639 /* determine basic parameters */
1640 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1641 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1642 pcpu_atom_size = ai->atom_size;
1643 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1644 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1647 * Allocate chunk slots. The additional last slot is for
1648 * empty chunks.
1650 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1651 pcpu_slot = memblock_virt_alloc(
1652 pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1653 for (i = 0; i < pcpu_nr_slots; i++)
1654 INIT_LIST_HEAD(&pcpu_slot[i]);
1657 * Initialize static chunk. If reserved_size is zero, the
1658 * static chunk covers static area + dynamic allocation area
1659 * in the first chunk. If reserved_size is not zero, it
1660 * covers static area + reserved area (mostly used for module
1661 * static percpu allocation).
1663 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1664 INIT_LIST_HEAD(&schunk->list);
1665 INIT_LIST_HEAD(&schunk->map_extend_list);
1666 schunk->base_addr = base_addr;
1667 schunk->map = smap;
1668 schunk->map_alloc = ARRAY_SIZE(smap);
1669 schunk->immutable = true;
1670 bitmap_fill(schunk->populated, pcpu_unit_pages);
1671 schunk->nr_populated = pcpu_unit_pages;
1673 if (ai->reserved_size) {
1674 schunk->free_size = ai->reserved_size;
1675 pcpu_reserved_chunk = schunk;
1676 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1677 } else {
1678 schunk->free_size = dyn_size;
1679 dyn_size = 0; /* dynamic area covered */
1681 schunk->contig_hint = schunk->free_size;
1683 schunk->map[0] = 1;
1684 schunk->map[1] = ai->static_size;
1685 schunk->map_used = 1;
1686 if (schunk->free_size)
1687 schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
1688 schunk->map[schunk->map_used] |= 1;
1690 /* init dynamic chunk if necessary */
1691 if (dyn_size) {
1692 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1693 INIT_LIST_HEAD(&dchunk->list);
1694 INIT_LIST_HEAD(&dchunk->map_extend_list);
1695 dchunk->base_addr = base_addr;
1696 dchunk->map = dmap;
1697 dchunk->map_alloc = ARRAY_SIZE(dmap);
1698 dchunk->immutable = true;
1699 bitmap_fill(dchunk->populated, pcpu_unit_pages);
1700 dchunk->nr_populated = pcpu_unit_pages;
1702 dchunk->contig_hint = dchunk->free_size = dyn_size;
1703 dchunk->map[0] = 1;
1704 dchunk->map[1] = pcpu_reserved_chunk_limit;
1705 dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
1706 dchunk->map_used = 2;
1709 /* link the first chunk in */
1710 pcpu_first_chunk = dchunk ?: schunk;
1711 pcpu_nr_empty_pop_pages +=
1712 pcpu_count_occupied_pages(pcpu_first_chunk, 1);
1713 pcpu_chunk_relocate(pcpu_first_chunk, -1);
1715 /* we're done */
1716 pcpu_base_addr = base_addr;
1717 return 0;
1720 #ifdef CONFIG_SMP
1722 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1723 [PCPU_FC_AUTO] = "auto",
1724 [PCPU_FC_EMBED] = "embed",
1725 [PCPU_FC_PAGE] = "page",
1728 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1730 static int __init percpu_alloc_setup(char *str)
1732 if (!str)
1733 return -EINVAL;
1735 if (0)
1736 /* nada */;
1737 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1738 else if (!strcmp(str, "embed"))
1739 pcpu_chosen_fc = PCPU_FC_EMBED;
1740 #endif
1741 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1742 else if (!strcmp(str, "page"))
1743 pcpu_chosen_fc = PCPU_FC_PAGE;
1744 #endif
1745 else
1746 pr_warn("unknown allocator %s specified\n", str);
1748 return 0;
1750 early_param("percpu_alloc", percpu_alloc_setup);
1753 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1754 * Build it if needed by the arch config or the generic setup is going
1755 * to be used.
1757 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1758 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1759 #define BUILD_EMBED_FIRST_CHUNK
1760 #endif
1762 /* build pcpu_page_first_chunk() iff needed by the arch config */
1763 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1764 #define BUILD_PAGE_FIRST_CHUNK
1765 #endif
1767 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
1768 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1770 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1771 * @reserved_size: the size of reserved percpu area in bytes
1772 * @dyn_size: minimum free size for dynamic allocation in bytes
1773 * @atom_size: allocation atom size
1774 * @cpu_distance_fn: callback to determine distance between cpus, optional
1776 * This function determines grouping of units, their mappings to cpus
1777 * and other parameters considering needed percpu size, allocation
1778 * atom size and distances between CPUs.
1780 * Groups are always multiples of atom size and CPUs which are of
1781 * LOCAL_DISTANCE both ways are grouped together and share space for
1782 * units in the same group. The returned configuration is guaranteed
1783 * to have CPUs on different nodes on different groups and >=75% usage
1784 * of allocated virtual address space.
1786 * RETURNS:
1787 * On success, pointer to the new allocation_info is returned. On
1788 * failure, ERR_PTR value is returned.
1790 static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1791 size_t reserved_size, size_t dyn_size,
1792 size_t atom_size,
1793 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1795 static int group_map[NR_CPUS] __initdata;
1796 static int group_cnt[NR_CPUS] __initdata;
1797 const size_t static_size = __per_cpu_end - __per_cpu_start;
1798 int nr_groups = 1, nr_units = 0;
1799 size_t size_sum, min_unit_size, alloc_size;
1800 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1801 int last_allocs, group, unit;
1802 unsigned int cpu, tcpu;
1803 struct pcpu_alloc_info *ai;
1804 unsigned int *cpu_map;
1806 /* this function may be called multiple times */
1807 memset(group_map, 0, sizeof(group_map));
1808 memset(group_cnt, 0, sizeof(group_cnt));
1810 /* calculate size_sum and ensure dyn_size is enough for early alloc */
1811 size_sum = PFN_ALIGN(static_size + reserved_size +
1812 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1813 dyn_size = size_sum - static_size - reserved_size;
1816 * Determine min_unit_size, alloc_size and max_upa such that
1817 * alloc_size is multiple of atom_size and is the smallest
1818 * which can accommodate 4k aligned segments which are equal to
1819 * or larger than min_unit_size.
1821 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1823 alloc_size = roundup(min_unit_size, atom_size);
1824 upa = alloc_size / min_unit_size;
1825 while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1826 upa--;
1827 max_upa = upa;
1829 /* group cpus according to their proximity */
1830 for_each_possible_cpu(cpu) {
1831 group = 0;
1832 next_group:
1833 for_each_possible_cpu(tcpu) {
1834 if (cpu == tcpu)
1835 break;
1836 if (group_map[tcpu] == group && cpu_distance_fn &&
1837 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1838 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1839 group++;
1840 nr_groups = max(nr_groups, group + 1);
1841 goto next_group;
1844 group_map[cpu] = group;
1845 group_cnt[group]++;
1849 * Expand unit size until address space usage goes over 75%
1850 * and then as much as possible without using more address
1851 * space.
1853 last_allocs = INT_MAX;
1854 for (upa = max_upa; upa; upa--) {
1855 int allocs = 0, wasted = 0;
1857 if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1858 continue;
1860 for (group = 0; group < nr_groups; group++) {
1861 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1862 allocs += this_allocs;
1863 wasted += this_allocs * upa - group_cnt[group];
1867 * Don't accept if wastage is over 1/3. The
1868 * greater-than comparison ensures upa==1 always
1869 * passes the following check.
1871 if (wasted > num_possible_cpus() / 3)
1872 continue;
1874 /* and then don't consume more memory */
1875 if (allocs > last_allocs)
1876 break;
1877 last_allocs = allocs;
1878 best_upa = upa;
1880 upa = best_upa;
1882 /* allocate and fill alloc_info */
1883 for (group = 0; group < nr_groups; group++)
1884 nr_units += roundup(group_cnt[group], upa);
1886 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1887 if (!ai)
1888 return ERR_PTR(-ENOMEM);
1889 cpu_map = ai->groups[0].cpu_map;
1891 for (group = 0; group < nr_groups; group++) {
1892 ai->groups[group].cpu_map = cpu_map;
1893 cpu_map += roundup(group_cnt[group], upa);
1896 ai->static_size = static_size;
1897 ai->reserved_size = reserved_size;
1898 ai->dyn_size = dyn_size;
1899 ai->unit_size = alloc_size / upa;
1900 ai->atom_size = atom_size;
1901 ai->alloc_size = alloc_size;
1903 for (group = 0, unit = 0; group_cnt[group]; group++) {
1904 struct pcpu_group_info *gi = &ai->groups[group];
1907 * Initialize base_offset as if all groups are located
1908 * back-to-back. The caller should update this to
1909 * reflect actual allocation.
1911 gi->base_offset = unit * ai->unit_size;
1913 for_each_possible_cpu(cpu)
1914 if (group_map[cpu] == group)
1915 gi->cpu_map[gi->nr_units++] = cpu;
1916 gi->nr_units = roundup(gi->nr_units, upa);
1917 unit += gi->nr_units;
1919 BUG_ON(unit != nr_units);
1921 return ai;
1923 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1925 #if defined(BUILD_EMBED_FIRST_CHUNK)
1927 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1928 * @reserved_size: the size of reserved percpu area in bytes
1929 * @dyn_size: minimum free size for dynamic allocation in bytes
1930 * @atom_size: allocation atom size
1931 * @cpu_distance_fn: callback to determine distance between cpus, optional
1932 * @alloc_fn: function to allocate percpu page
1933 * @free_fn: function to free percpu page
1935 * This is a helper to ease setting up embedded first percpu chunk and
1936 * can be called where pcpu_setup_first_chunk() is expected.
1938 * If this function is used to setup the first chunk, it is allocated
1939 * by calling @alloc_fn and used as-is without being mapped into
1940 * vmalloc area. Allocations are always whole multiples of @atom_size
1941 * aligned to @atom_size.
1943 * This enables the first chunk to piggy back on the linear physical
1944 * mapping which often uses larger page size. Please note that this
1945 * can result in very sparse cpu->unit mapping on NUMA machines thus
1946 * requiring large vmalloc address space. Don't use this allocator if
1947 * vmalloc space is not orders of magnitude larger than distances
1948 * between node memory addresses (ie. 32bit NUMA machines).
1950 * @dyn_size specifies the minimum dynamic area size.
1952 * If the needed size is smaller than the minimum or specified unit
1953 * size, the leftover is returned using @free_fn.
1955 * RETURNS:
1956 * 0 on success, -errno on failure.
1958 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1959 size_t atom_size,
1960 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1961 pcpu_fc_alloc_fn_t alloc_fn,
1962 pcpu_fc_free_fn_t free_fn)
1964 void *base = (void *)ULONG_MAX;
1965 void **areas = NULL;
1966 struct pcpu_alloc_info *ai;
1967 size_t size_sum, areas_size;
1968 unsigned long max_distance;
1969 int group, i, highest_group, rc;
1971 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1972 cpu_distance_fn);
1973 if (IS_ERR(ai))
1974 return PTR_ERR(ai);
1976 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1977 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1979 areas = memblock_virt_alloc_nopanic(areas_size, 0);
1980 if (!areas) {
1981 rc = -ENOMEM;
1982 goto out_free;
1985 /* allocate, copy and determine base address & max_distance */
1986 highest_group = 0;
1987 for (group = 0; group < ai->nr_groups; group++) {
1988 struct pcpu_group_info *gi = &ai->groups[group];
1989 unsigned int cpu = NR_CPUS;
1990 void *ptr;
1992 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1993 cpu = gi->cpu_map[i];
1994 BUG_ON(cpu == NR_CPUS);
1996 /* allocate space for the whole group */
1997 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1998 if (!ptr) {
1999 rc = -ENOMEM;
2000 goto out_free_areas;
2002 /* kmemleak tracks the percpu allocations separately */
2003 kmemleak_free(ptr);
2004 areas[group] = ptr;
2006 base = min(ptr, base);
2007 if (ptr > areas[highest_group])
2008 highest_group = group;
2010 max_distance = areas[highest_group] - base;
2011 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2013 /* warn if maximum distance is further than 75% of vmalloc space */
2014 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2015 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2016 max_distance, VMALLOC_TOTAL);
2017 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2018 /* and fail if we have fallback */
2019 rc = -EINVAL;
2020 goto out_free_areas;
2021 #endif
2025 * Copy data and free unused parts. This should happen after all
2026 * allocations are complete; otherwise, we may end up with
2027 * overlapping groups.
2029 for (group = 0; group < ai->nr_groups; group++) {
2030 struct pcpu_group_info *gi = &ai->groups[group];
2031 void *ptr = areas[group];
2033 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2034 if (gi->cpu_map[i] == NR_CPUS) {
2035 /* unused unit, free whole */
2036 free_fn(ptr, ai->unit_size);
2037 continue;
2039 /* copy and return the unused part */
2040 memcpy(ptr, __per_cpu_load, ai->static_size);
2041 free_fn(ptr + size_sum, ai->unit_size - size_sum);
2045 /* base address is now known, determine group base offsets */
2046 for (group = 0; group < ai->nr_groups; group++) {
2047 ai->groups[group].base_offset = areas[group] - base;
2050 pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2051 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2052 ai->dyn_size, ai->unit_size);
2054 rc = pcpu_setup_first_chunk(ai, base);
2055 goto out_free;
2057 out_free_areas:
2058 for (group = 0; group < ai->nr_groups; group++)
2059 if (areas[group])
2060 free_fn(areas[group],
2061 ai->groups[group].nr_units * ai->unit_size);
2062 out_free:
2063 pcpu_free_alloc_info(ai);
2064 if (areas)
2065 memblock_free_early(__pa(areas), areas_size);
2066 return rc;
2068 #endif /* BUILD_EMBED_FIRST_CHUNK */
2070 #ifdef BUILD_PAGE_FIRST_CHUNK
2072 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2073 * @reserved_size: the size of reserved percpu area in bytes
2074 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2075 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2076 * @populate_pte_fn: function to populate pte
2078 * This is a helper to ease setting up page-remapped first percpu
2079 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2081 * This is the basic allocator. Static percpu area is allocated
2082 * page-by-page into vmalloc area.
2084 * RETURNS:
2085 * 0 on success, -errno on failure.
2087 int __init pcpu_page_first_chunk(size_t reserved_size,
2088 pcpu_fc_alloc_fn_t alloc_fn,
2089 pcpu_fc_free_fn_t free_fn,
2090 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2092 static struct vm_struct vm;
2093 struct pcpu_alloc_info *ai;
2094 char psize_str[16];
2095 int unit_pages;
2096 size_t pages_size;
2097 struct page **pages;
2098 int unit, i, j, rc;
2100 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2102 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2103 if (IS_ERR(ai))
2104 return PTR_ERR(ai);
2105 BUG_ON(ai->nr_groups != 1);
2106 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
2108 unit_pages = ai->unit_size >> PAGE_SHIFT;
2110 /* unaligned allocations can't be freed, round up to page size */
2111 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2112 sizeof(pages[0]));
2113 pages = memblock_virt_alloc(pages_size, 0);
2115 /* allocate pages */
2116 j = 0;
2117 for (unit = 0; unit < num_possible_cpus(); unit++)
2118 for (i = 0; i < unit_pages; i++) {
2119 unsigned int cpu = ai->groups[0].cpu_map[unit];
2120 void *ptr;
2122 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2123 if (!ptr) {
2124 pr_warn("failed to allocate %s page for cpu%u\n",
2125 psize_str, cpu);
2126 goto enomem;
2128 /* kmemleak tracks the percpu allocations separately */
2129 kmemleak_free(ptr);
2130 pages[j++] = virt_to_page(ptr);
2133 /* allocate vm area, map the pages and copy static data */
2134 vm.flags = VM_ALLOC;
2135 vm.size = num_possible_cpus() * ai->unit_size;
2136 vm_area_register_early(&vm, PAGE_SIZE);
2138 for (unit = 0; unit < num_possible_cpus(); unit++) {
2139 unsigned long unit_addr =
2140 (unsigned long)vm.addr + unit * ai->unit_size;
2142 for (i = 0; i < unit_pages; i++)
2143 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2145 /* pte already populated, the following shouldn't fail */
2146 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2147 unit_pages);
2148 if (rc < 0)
2149 panic("failed to map percpu area, err=%d\n", rc);
2152 * FIXME: Archs with virtual cache should flush local
2153 * cache for the linear mapping here - something
2154 * equivalent to flush_cache_vmap() on the local cpu.
2155 * flush_cache_vmap() can't be used as most supporting
2156 * data structures are not set up yet.
2159 /* copy static data */
2160 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2163 /* we're ready, commit */
2164 pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2165 unit_pages, psize_str, vm.addr, ai->static_size,
2166 ai->reserved_size, ai->dyn_size);
2168 rc = pcpu_setup_first_chunk(ai, vm.addr);
2169 goto out_free_ar;
2171 enomem:
2172 while (--j >= 0)
2173 free_fn(page_address(pages[j]), PAGE_SIZE);
2174 rc = -ENOMEM;
2175 out_free_ar:
2176 memblock_free_early(__pa(pages), pages_size);
2177 pcpu_free_alloc_info(ai);
2178 return rc;
2180 #endif /* BUILD_PAGE_FIRST_CHUNK */
2182 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2184 * Generic SMP percpu area setup.
2186 * The embedding helper is used because its behavior closely resembles
2187 * the original non-dynamic generic percpu area setup. This is
2188 * important because many archs have addressing restrictions and might
2189 * fail if the percpu area is located far away from the previous
2190 * location. As an added bonus, in non-NUMA cases, embedding is
2191 * generally a good idea TLB-wise because percpu area can piggy back
2192 * on the physical linear memory mapping which uses large page
2193 * mappings on applicable archs.
2195 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2196 EXPORT_SYMBOL(__per_cpu_offset);
2198 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2199 size_t align)
2201 return memblock_virt_alloc_from_nopanic(
2202 size, align, __pa(MAX_DMA_ADDRESS));
2205 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2207 memblock_free_early(__pa(ptr), size);
2210 void __init setup_per_cpu_areas(void)
2212 unsigned long delta;
2213 unsigned int cpu;
2214 int rc;
2217 * Always reserve area for module percpu variables. That's
2218 * what the legacy allocator did.
2220 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2221 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2222 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2223 if (rc < 0)
2224 panic("Failed to initialize percpu areas.");
2226 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2227 for_each_possible_cpu(cpu)
2228 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2230 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2232 #else /* CONFIG_SMP */
2235 * UP percpu area setup.
2237 * UP always uses km-based percpu allocator with identity mapping.
2238 * Static percpu variables are indistinguishable from the usual static
2239 * variables and don't require any special preparation.
2241 void __init setup_per_cpu_areas(void)
2243 const size_t unit_size =
2244 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2245 PERCPU_DYNAMIC_RESERVE));
2246 struct pcpu_alloc_info *ai;
2247 void *fc;
2249 ai = pcpu_alloc_alloc_info(1, 1);
2250 fc = memblock_virt_alloc_from_nopanic(unit_size,
2251 PAGE_SIZE,
2252 __pa(MAX_DMA_ADDRESS));
2253 if (!ai || !fc)
2254 panic("Failed to allocate memory for percpu areas.");
2255 /* kmemleak tracks the percpu allocations separately */
2256 kmemleak_free(fc);
2258 ai->dyn_size = unit_size;
2259 ai->unit_size = unit_size;
2260 ai->atom_size = unit_size;
2261 ai->alloc_size = unit_size;
2262 ai->groups[0].nr_units = 1;
2263 ai->groups[0].cpu_map[0] = 0;
2265 if (pcpu_setup_first_chunk(ai, fc) < 0)
2266 panic("Failed to initialize percpu areas.");
2269 #endif /* CONFIG_SMP */
2272 * First and reserved chunks are initialized with temporary allocation
2273 * map in initdata so that they can be used before slab is online.
2274 * This function is called after slab is brought up and replaces those
2275 * with properly allocated maps.
2277 void __init percpu_init_late(void)
2279 struct pcpu_chunk *target_chunks[] =
2280 { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
2281 struct pcpu_chunk *chunk;
2282 unsigned long flags;
2283 int i;
2285 for (i = 0; (chunk = target_chunks[i]); i++) {
2286 int *map;
2287 const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
2289 BUILD_BUG_ON(size > PAGE_SIZE);
2291 map = pcpu_mem_zalloc(size);
2292 BUG_ON(!map);
2294 spin_lock_irqsave(&pcpu_lock, flags);
2295 memcpy(map, chunk->map, size);
2296 chunk->map = map;
2297 spin_unlock_irqrestore(&pcpu_lock, flags);
2302 * Percpu allocator is initialized early during boot when neither slab or
2303 * workqueue is available. Plug async management until everything is up
2304 * and running.
2306 static int __init percpu_enable_async(void)
2308 pcpu_async_enabled = true;
2309 return 0;
2311 subsys_initcall(percpu_enable_async);