2 * mm/percpu-vm.c - vmalloc area based chunk allocation
4 * Copyright (C) 2010 SUSE Linux Products GmbH
5 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * Chunks are mapped into vmalloc areas and populated page by page.
10 * This is the default chunk allocator.
13 static struct page
*pcpu_chunk_page(struct pcpu_chunk
*chunk
,
14 unsigned int cpu
, int page_idx
)
16 /* must not be used on pre-mapped chunk */
17 WARN_ON(chunk
->immutable
);
19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk
, cpu
, page_idx
));
23 * pcpu_get_pages - get temp pages array
25 * Returns pointer to array of pointers to struct page which can be indexed
26 * with pcpu_page_idx(). Note that there is only one array and accesses
27 * should be serialized by pcpu_alloc_mutex.
30 * Pointer to temp pages array on success.
32 static struct page
**pcpu_get_pages(void)
34 static struct page
**pages
;
35 size_t pages_size
= pcpu_nr_units
* pcpu_unit_pages
* sizeof(pages
[0]);
37 lockdep_assert_held(&pcpu_alloc_mutex
);
40 pages
= pcpu_mem_zalloc(pages_size
, 0);
45 * pcpu_free_pages - free pages which were allocated for @chunk
46 * @chunk: chunk pages were allocated for
47 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
48 * @page_start: page index of the first page to be freed
49 * @page_end: page index of the last page to be freed + 1
51 * Free pages [@page_start and @page_end) in @pages for all units.
52 * The pages were allocated for @chunk.
54 static void pcpu_free_pages(struct pcpu_chunk
*chunk
,
55 struct page
**pages
, int page_start
, int page_end
)
60 for_each_possible_cpu(cpu
) {
61 for (i
= page_start
; i
< page_end
; i
++) {
62 struct page
*page
= pages
[pcpu_page_idx(cpu
, i
)];
71 * pcpu_alloc_pages - allocates pages for @chunk
72 * @chunk: target chunk
73 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
74 * @page_start: page index of the first page to be allocated
75 * @page_end: page index of the last page to be allocated + 1
76 * @gfp: allocation flags passed to the underlying allocator
78 * Allocate pages [@page_start,@page_end) into @pages for all units.
79 * The allocation is for @chunk. Percpu core doesn't care about the
80 * content of @pages and will pass it verbatim to pcpu_map_pages().
82 static int pcpu_alloc_pages(struct pcpu_chunk
*chunk
,
83 struct page
**pages
, int page_start
, int page_end
,
86 unsigned int cpu
, tcpu
;
89 gfp
|= GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_COLD
;
91 for_each_possible_cpu(cpu
) {
92 for (i
= page_start
; i
< page_end
; i
++) {
93 struct page
**pagep
= &pages
[pcpu_page_idx(cpu
, i
)];
95 *pagep
= alloc_pages_node(cpu_to_node(cpu
), gfp
, 0);
103 while (--i
>= page_start
)
104 __free_page(pages
[pcpu_page_idx(cpu
, i
)]);
106 for_each_possible_cpu(tcpu
) {
109 for (i
= page_start
; i
< page_end
; i
++)
110 __free_page(pages
[pcpu_page_idx(tcpu
, i
)]);
116 * pcpu_pre_unmap_flush - flush cache prior to unmapping
117 * @chunk: chunk the regions to be flushed belongs to
118 * @page_start: page index of the first page to be flushed
119 * @page_end: page index of the last page to be flushed + 1
121 * Pages in [@page_start,@page_end) of @chunk are about to be
122 * unmapped. Flush cache. As each flushing trial can be very
123 * expensive, issue flush on the whole region at once rather than
124 * doing it for each cpu. This could be an overkill but is more
127 static void pcpu_pre_unmap_flush(struct pcpu_chunk
*chunk
,
128 int page_start
, int page_end
)
131 pcpu_chunk_addr(chunk
, pcpu_low_unit_cpu
, page_start
),
132 pcpu_chunk_addr(chunk
, pcpu_high_unit_cpu
, page_end
));
135 static void __pcpu_unmap_pages(unsigned long addr
, int nr_pages
)
137 unmap_kernel_range_noflush(addr
, nr_pages
<< PAGE_SHIFT
);
141 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
142 * @chunk: chunk of interest
143 * @pages: pages array which can be used to pass information to free
144 * @page_start: page index of the first page to unmap
145 * @page_end: page index of the last page to unmap + 1
147 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
148 * Corresponding elements in @pages were cleared by the caller and can
149 * be used to carry information to pcpu_free_pages() which will be
150 * called after all unmaps are finished. The caller should call
151 * proper pre/post flush functions.
153 static void pcpu_unmap_pages(struct pcpu_chunk
*chunk
,
154 struct page
**pages
, int page_start
, int page_end
)
159 for_each_possible_cpu(cpu
) {
160 for (i
= page_start
; i
< page_end
; i
++) {
163 page
= pcpu_chunk_page(chunk
, cpu
, i
);
165 pages
[pcpu_page_idx(cpu
, i
)] = page
;
167 __pcpu_unmap_pages(pcpu_chunk_addr(chunk
, cpu
, page_start
),
168 page_end
- page_start
);
173 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
174 * @chunk: pcpu_chunk the regions to be flushed belong to
175 * @page_start: page index of the first page to be flushed
176 * @page_end: page index of the last page to be flushed + 1
178 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
179 * TLB for the regions. This can be skipped if the area is to be
180 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
182 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
183 * for the whole region.
185 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk
*chunk
,
186 int page_start
, int page_end
)
188 flush_tlb_kernel_range(
189 pcpu_chunk_addr(chunk
, pcpu_low_unit_cpu
, page_start
),
190 pcpu_chunk_addr(chunk
, pcpu_high_unit_cpu
, page_end
));
193 static int __pcpu_map_pages(unsigned long addr
, struct page
**pages
,
196 return map_kernel_range_noflush(addr
, nr_pages
<< PAGE_SHIFT
,
201 * pcpu_map_pages - map pages into a pcpu_chunk
202 * @chunk: chunk of interest
203 * @pages: pages array containing pages to be mapped
204 * @page_start: page index of the first page to map
205 * @page_end: page index of the last page to map + 1
207 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
208 * caller is responsible for calling pcpu_post_map_flush() after all
209 * mappings are complete.
211 * This function is responsible for setting up whatever is necessary for
212 * reverse lookup (addr -> chunk).
214 static int pcpu_map_pages(struct pcpu_chunk
*chunk
,
215 struct page
**pages
, int page_start
, int page_end
)
217 unsigned int cpu
, tcpu
;
220 for_each_possible_cpu(cpu
) {
221 err
= __pcpu_map_pages(pcpu_chunk_addr(chunk
, cpu
, page_start
),
222 &pages
[pcpu_page_idx(cpu
, page_start
)],
223 page_end
- page_start
);
227 for (i
= page_start
; i
< page_end
; i
++)
228 pcpu_set_page_chunk(pages
[pcpu_page_idx(cpu
, i
)],
233 for_each_possible_cpu(tcpu
) {
236 __pcpu_unmap_pages(pcpu_chunk_addr(chunk
, tcpu
, page_start
),
237 page_end
- page_start
);
239 pcpu_post_unmap_tlb_flush(chunk
, page_start
, page_end
);
244 * pcpu_post_map_flush - flush cache after mapping
245 * @chunk: pcpu_chunk the regions to be flushed belong to
246 * @page_start: page index of the first page to be flushed
247 * @page_end: page index of the last page to be flushed + 1
249 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
252 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
253 * for the whole region.
255 static void pcpu_post_map_flush(struct pcpu_chunk
*chunk
,
256 int page_start
, int page_end
)
259 pcpu_chunk_addr(chunk
, pcpu_low_unit_cpu
, page_start
),
260 pcpu_chunk_addr(chunk
, pcpu_high_unit_cpu
, page_end
));
264 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
265 * @chunk: chunk of interest
266 * @page_start: the start page
267 * @page_end: the end page
268 * @gfp: allocation flags passed to the underlying memory allocator
270 * For each cpu, populate and map pages [@page_start,@page_end) into
274 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
276 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
,
277 int page_start
, int page_end
, gfp_t gfp
)
281 pages
= pcpu_get_pages();
285 if (pcpu_alloc_pages(chunk
, pages
, page_start
, page_end
, gfp
))
288 if (pcpu_map_pages(chunk
, pages
, page_start
, page_end
)) {
289 pcpu_free_pages(chunk
, pages
, page_start
, page_end
);
292 pcpu_post_map_flush(chunk
, page_start
, page_end
);
298 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
299 * @chunk: chunk to depopulate
300 * @page_start: the start page
301 * @page_end: the end page
303 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
309 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
,
310 int page_start
, int page_end
)
315 * If control reaches here, there must have been at least one
316 * successful population attempt so the temp pages array must
319 pages
= pcpu_get_pages();
323 pcpu_pre_unmap_flush(chunk
, page_start
, page_end
);
325 pcpu_unmap_pages(chunk
, pages
, page_start
, page_end
);
327 /* no need to flush tlb, vmalloc will handle it lazily */
329 pcpu_free_pages(chunk
, pages
, page_start
, page_end
);
332 static struct pcpu_chunk
*pcpu_create_chunk(gfp_t gfp
)
334 struct pcpu_chunk
*chunk
;
335 struct vm_struct
**vms
;
337 chunk
= pcpu_alloc_chunk(gfp
);
341 vms
= pcpu_get_vm_areas(pcpu_group_offsets
, pcpu_group_sizes
,
342 pcpu_nr_groups
, pcpu_atom_size
);
344 pcpu_free_chunk(chunk
);
349 chunk
->base_addr
= vms
[0]->addr
- pcpu_group_offsets
[0];
351 pcpu_stats_chunk_alloc();
352 trace_percpu_create_chunk(chunk
->base_addr
);
357 static void pcpu_destroy_chunk(struct pcpu_chunk
*chunk
)
362 pcpu_stats_chunk_dealloc();
363 trace_percpu_destroy_chunk(chunk
->base_addr
);
366 pcpu_free_vm_areas(chunk
->data
, pcpu_nr_groups
);
367 pcpu_free_chunk(chunk
);
370 static struct page
*pcpu_addr_to_page(void *addr
)
372 return vmalloc_to_page(addr
);
375 static int __init
pcpu_verify_alloc_info(const struct pcpu_alloc_info
*ai
)
377 /* no extra restriction */