percpu: introduce pcpu_alloc_info and pcpu_group_info
[linux/fpc-iii.git] / arch / x86 / kernel / setup_percpu.c
blobdb5f9c49fec52c5343b3c61d6d3dfa381f2345ca
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
19 #include <asm/cpu.h>
20 #include <asm/stackprotector.h>
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
24 #else
25 # define DBG(x...)
26 #endif
28 DEFINE_PER_CPU(int, cpu_number);
29 EXPORT_PER_CPU_SYMBOL(cpu_number);
31 #ifdef CONFIG_X86_64
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33 #else
34 #define BOOT_PERCPU_OFFSET 0
35 #endif
37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
43 EXPORT_SYMBOL(__per_cpu_offset);
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
52 #ifdef CONFIG_X86_64
53 #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
54 #else
55 #define PERCPU_FIRST_CHUNK_RESERVE 0
56 #endif
58 /**
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
61 * If NUMA is not configured or there is only one NUMA node available,
62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
65 * RETURNS:
66 * true if NUMA should be considered; otherwise, false.
68 static bool __init pcpu_need_numa(void)
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t *last = NULL;
72 unsigned int cpu;
74 for_each_possible_cpu(cpu) {
75 int node = early_cpu_to_node(cpu);
77 if (node_online(node) && NODE_DATA(node) &&
78 last && last != NODE_DATA(node))
79 return true;
81 last = NODE_DATA(node);
83 #endif
84 return false;
87 /**
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
91 * @align: alignment
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
95 * configuration.
97 * RETURNS:
98 * Pointer to the allocated area on success, NULL on failure.
100 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101 unsigned long align)
103 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104 #ifdef CONFIG_NEED_MULTIPLE_NODES
105 int node = early_cpu_to_node(cpu);
106 void *ptr;
108 if (!node_online(node) || !NODE_DATA(node)) {
109 ptr = __alloc_bootmem_nopanic(size, align, goal);
110 pr_info("cpu %d has no node %d or node-local memory\n",
111 cpu, node);
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu, size, __pa(ptr));
114 } else {
115 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116 size, align, goal);
117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 "%016lx\n", cpu, size, node, __pa(ptr));
120 return ptr;
121 #else
122 return __alloc_bootmem_nopanic(size, align, goal);
123 #endif
127 * Helpers for first chunk memory allocation
129 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
131 return pcpu_alloc_bootmem(cpu, size, align);
134 static void __init pcpu_fc_free(void *ptr, size_t size)
136 free_bootmem(__pa(ptr), size);
140 * Large page remapping allocator
142 #ifdef CONFIG_NEED_MULTIPLE_NODES
143 static void __init pcpul_map(void *ptr, size_t size, void *addr)
145 pmd_t *pmd, pmd_v;
147 pmd = populate_extra_pmd((unsigned long)addr);
148 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(ptr)), PAGE_KERNEL_LARGE);
149 set_pmd(pmd, pmd_v);
152 static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to)
154 if (early_cpu_to_node(from) == early_cpu_to_node(to))
155 return LOCAL_DISTANCE;
156 else
157 return REMOTE_DISTANCE;
160 static ssize_t __init setup_pcpu_lpage(bool chosen)
162 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
163 size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE;
164 struct pcpu_alloc_info *ai;
165 ssize_t ret;
167 /* on non-NUMA, embedding is better */
168 if (!chosen && !pcpu_need_numa())
169 return -EINVAL;
171 /* need PSE */
172 if (!cpu_has_pse) {
173 pr_warning("PERCPU: lpage allocator requires PSE\n");
174 return -EINVAL;
177 /* allocate and build unit_map */
178 ai = pcpu_build_alloc_info(PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
179 PMD_SIZE, pcpu_lpage_cpu_distance);
180 if (IS_ERR(ai)) {
181 pr_warning("PERCPU: failed to build unit_map (%ld)\n",
182 PTR_ERR(ai));
183 return PTR_ERR(ai);
186 /* do the parameters look okay? */
187 if (!chosen) {
188 size_t vm_size = VMALLOC_END - VMALLOC_START;
189 size_t tot_size = 0;
190 int group;
192 for (group = 0; group < ai->nr_groups; group++)
193 tot_size += ai->unit_size * ai->groups[group].nr_units;
195 /* don't consume more than 20% of vmalloc area */
196 if (tot_size > vm_size / 5) {
197 pr_info("PERCPU: too large chunk size %zuMB for "
198 "large page remap\n", tot_size >> 20);
199 ret = -EINVAL;
200 goto out_free;
204 ret = pcpu_lpage_first_chunk(ai, pcpu_fc_alloc, pcpu_fc_free,
205 pcpul_map);
206 out_free:
207 pcpu_free_alloc_info(ai);
208 return ret;
210 #else
211 static ssize_t __init setup_pcpu_lpage(bool chosen)
213 return -EINVAL;
215 #endif
218 * Embedding allocator
220 * The first chunk is sized to just contain the static area plus
221 * module and dynamic reserves and embedded into linear physical
222 * mapping so that it can use PMD mapping without additional TLB
223 * pressure.
225 static ssize_t __init setup_pcpu_embed(bool chosen)
227 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
230 * If large page isn't supported, there's no benefit in doing
231 * this. Also, embedding allocation doesn't play well with
232 * NUMA.
234 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
235 return -EINVAL;
237 return pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
238 reserve - PERCPU_FIRST_CHUNK_RESERVE);
242 * Page allocator
244 * Boring fallback 4k page allocator. This allocator puts more
245 * pressure on PTE TLBs but other than that behaves nicely on both UMA
246 * and NUMA.
248 static void __init pcpup_populate_pte(unsigned long addr)
250 populate_extra_pte(addr);
253 static ssize_t __init setup_pcpu_page(void)
255 return pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
256 pcpu_fc_alloc, pcpu_fc_free,
257 pcpup_populate_pte);
260 static inline void setup_percpu_segment(int cpu)
262 #ifdef CONFIG_X86_32
263 struct desc_struct gdt;
265 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
266 0x2 | DESCTYPE_S, 0x8);
267 gdt.s = 1;
268 write_gdt_entry(get_cpu_gdt_table(cpu),
269 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
270 #endif
273 void __init setup_per_cpu_areas(void)
275 unsigned int cpu;
276 unsigned long delta;
277 size_t pcpu_unit_size;
278 ssize_t ret;
280 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
281 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
284 * Allocate percpu area. If PSE is supported, try to make use
285 * of large page mappings. Please read comments on top of
286 * each allocator for details.
288 ret = -EINVAL;
289 if (pcpu_chosen_fc != PCPU_FC_AUTO) {
290 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
291 if (pcpu_chosen_fc == PCPU_FC_LPAGE)
292 ret = setup_pcpu_lpage(true);
293 else
294 ret = setup_pcpu_embed(true);
296 if (ret < 0)
297 pr_warning("PERCPU: %s allocator failed (%zd), "
298 "falling back to page size\n",
299 pcpu_fc_names[pcpu_chosen_fc], ret);
301 } else {
302 ret = setup_pcpu_lpage(false);
303 if (ret < 0)
304 ret = setup_pcpu_embed(false);
306 if (ret < 0)
307 ret = setup_pcpu_page();
308 if (ret < 0)
309 panic("cannot initialize percpu area (err=%zd)", ret);
311 pcpu_unit_size = ret;
313 /* alrighty, percpu areas up and running */
314 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
315 for_each_possible_cpu(cpu) {
316 per_cpu_offset(cpu) =
317 delta + pcpu_unit_map[cpu] * pcpu_unit_size;
318 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
319 per_cpu(cpu_number, cpu) = cpu;
320 setup_percpu_segment(cpu);
321 setup_stack_canary_segment(cpu);
323 * Copy data used in early init routines from the
324 * initial arrays to the per cpu data areas. These
325 * arrays then become expendable and the *_early_ptr's
326 * are zeroed indicating that the static arrays are
327 * gone.
329 #ifdef CONFIG_X86_LOCAL_APIC
330 per_cpu(x86_cpu_to_apicid, cpu) =
331 early_per_cpu_map(x86_cpu_to_apicid, cpu);
332 per_cpu(x86_bios_cpu_apicid, cpu) =
333 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
334 #endif
335 #ifdef CONFIG_X86_64
336 per_cpu(irq_stack_ptr, cpu) =
337 per_cpu(irq_stack_union.irq_stack, cpu) +
338 IRQ_STACK_SIZE - 64;
339 #ifdef CONFIG_NUMA
340 per_cpu(x86_cpu_to_node_map, cpu) =
341 early_per_cpu_map(x86_cpu_to_node_map, cpu);
342 #endif
343 #endif
345 * Up to this point, the boot CPU has been using .data.init
346 * area. Reload any changed state for the boot CPU.
348 if (cpu == boot_cpu_id)
349 switch_to_new_gdt(cpu);
352 /* indicate the early static arrays will soon be gone */
353 #ifdef CONFIG_X86_LOCAL_APIC
354 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
355 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
356 #endif
357 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
358 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
359 #endif
361 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
363 * make sure boot cpu node_number is right, when boot cpu is on the
364 * node that doesn't have mem installed
366 per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
367 #endif
369 /* Setup node to cpumask map */
370 setup_node_to_cpumask_map();
372 /* Setup cpu initialized, callin, callout masks */
373 setup_cpu_local_masks();