1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
20 #include <asm/stackprotector.h>
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
28 DEFINE_PER_CPU(int, cpu_number
);
29 EXPORT_PER_CPU_SYMBOL(cpu_number
);
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
34 #define BOOT_PERCPU_OFFSET 0
37 DEFINE_PER_CPU(unsigned long, this_cpu_off
) = BOOT_PERCPU_OFFSET
;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off
);
40 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
= {
41 [0 ... NR_CPUS
-1] = BOOT_PERCPU_OFFSET
,
43 EXPORT_SYMBOL(__per_cpu_offset
);
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
53 #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
55 #define PERCPU_FIRST_CHUNK_RESERVE 0
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
61 * If NUMA is not configured or there is only one NUMA node available,
62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
66 * true if NUMA should be considered; otherwise, false.
68 static bool __init
pcpu_need_numa(void)
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t
*last
= NULL
;
74 for_each_possible_cpu(cpu
) {
75 int node
= early_cpu_to_node(cpu
);
77 if (node_online(node
) && NODE_DATA(node
) &&
78 last
&& last
!= NODE_DATA(node
))
81 last
= NODE_DATA(node
);
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
98 * Pointer to the allocated area on success, NULL on failure.
100 static void * __init
pcpu_alloc_bootmem(unsigned int cpu
, unsigned long size
,
103 const unsigned long goal
= __pa(MAX_DMA_ADDRESS
);
104 #ifdef CONFIG_NEED_MULTIPLE_NODES
105 int node
= early_cpu_to_node(cpu
);
108 if (!node_online(node
) || !NODE_DATA(node
)) {
109 ptr
= __alloc_bootmem_nopanic(size
, align
, goal
);
110 pr_info("cpu %d has no node %d or node-local memory\n",
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu
, size
, __pa(ptr
));
115 ptr
= __alloc_bootmem_node_nopanic(NODE_DATA(node
),
117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 "%016lx\n", cpu
, size
, node
, __pa(ptr
));
122 return __alloc_bootmem_nopanic(size
, align
, goal
);
127 * Helpers for first chunk memory allocation
129 static void * __init
pcpu_fc_alloc(unsigned int cpu
, size_t size
, size_t align
)
131 return pcpu_alloc_bootmem(cpu
, size
, align
);
134 static void __init
pcpu_fc_free(void *ptr
, size_t size
)
136 free_bootmem(__pa(ptr
), size
);
140 * Large page remapping allocator
142 #ifdef CONFIG_NEED_MULTIPLE_NODES
143 static void __init
pcpul_map(void *ptr
, size_t size
, void *addr
)
147 pmd
= populate_extra_pmd((unsigned long)addr
);
148 pmd_v
= pfn_pmd(page_to_pfn(virt_to_page(ptr
)), PAGE_KERNEL_LARGE
);
152 static int pcpu_lpage_cpu_distance(unsigned int from
, unsigned int to
)
154 if (early_cpu_to_node(from
) == early_cpu_to_node(to
))
155 return LOCAL_DISTANCE
;
157 return REMOTE_DISTANCE
;
160 static ssize_t __init
setup_pcpu_lpage(bool chosen
)
162 size_t reserve
= PERCPU_MODULE_RESERVE
+ PERCPU_DYNAMIC_RESERVE
;
163 size_t dyn_size
= reserve
- PERCPU_FIRST_CHUNK_RESERVE
;
164 struct pcpu_alloc_info
*ai
;
167 /* on non-NUMA, embedding is better */
168 if (!chosen
&& !pcpu_need_numa())
173 pr_warning("PERCPU: lpage allocator requires PSE\n");
177 /* allocate and build unit_map */
178 ai
= pcpu_build_alloc_info(PERCPU_FIRST_CHUNK_RESERVE
, dyn_size
,
179 PMD_SIZE
, pcpu_lpage_cpu_distance
);
181 pr_warning("PERCPU: failed to build unit_map (%ld)\n",
186 /* do the parameters look okay? */
188 size_t vm_size
= VMALLOC_END
- VMALLOC_START
;
192 for (group
= 0; group
< ai
->nr_groups
; group
++)
193 tot_size
+= ai
->unit_size
* ai
->groups
[group
].nr_units
;
195 /* don't consume more than 20% of vmalloc area */
196 if (tot_size
> vm_size
/ 5) {
197 pr_info("PERCPU: too large chunk size %zuMB for "
198 "large page remap\n", tot_size
>> 20);
204 ret
= pcpu_lpage_first_chunk(ai
, pcpu_fc_alloc
, pcpu_fc_free
,
207 pcpu_free_alloc_info(ai
);
211 static ssize_t __init
setup_pcpu_lpage(bool chosen
)
218 * Embedding allocator
220 * The first chunk is sized to just contain the static area plus
221 * module and dynamic reserves and embedded into linear physical
222 * mapping so that it can use PMD mapping without additional TLB
225 static ssize_t __init
setup_pcpu_embed(bool chosen
)
227 size_t reserve
= PERCPU_MODULE_RESERVE
+ PERCPU_DYNAMIC_RESERVE
;
230 * If large page isn't supported, there's no benefit in doing
231 * this. Also, embedding allocation doesn't play well with
234 if (!chosen
&& (!cpu_has_pse
|| pcpu_need_numa()))
237 return pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE
,
238 reserve
- PERCPU_FIRST_CHUNK_RESERVE
);
244 * Boring fallback 4k page allocator. This allocator puts more
245 * pressure on PTE TLBs but other than that behaves nicely on both UMA
248 static void __init
pcpup_populate_pte(unsigned long addr
)
250 populate_extra_pte(addr
);
253 static ssize_t __init
setup_pcpu_page(void)
255 return pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE
,
256 pcpu_fc_alloc
, pcpu_fc_free
,
260 static inline void setup_percpu_segment(int cpu
)
263 struct desc_struct gdt
;
265 pack_descriptor(&gdt
, per_cpu_offset(cpu
), 0xFFFFF,
266 0x2 | DESCTYPE_S
, 0x8);
268 write_gdt_entry(get_cpu_gdt_table(cpu
),
269 GDT_ENTRY_PERCPU
, &gdt
, DESCTYPE_S
);
273 void __init
setup_per_cpu_areas(void)
277 size_t pcpu_unit_size
;
280 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
281 NR_CPUS
, nr_cpumask_bits
, nr_cpu_ids
, nr_node_ids
);
284 * Allocate percpu area. If PSE is supported, try to make use
285 * of large page mappings. Please read comments on top of
286 * each allocator for details.
289 if (pcpu_chosen_fc
!= PCPU_FC_AUTO
) {
290 if (pcpu_chosen_fc
!= PCPU_FC_PAGE
) {
291 if (pcpu_chosen_fc
== PCPU_FC_LPAGE
)
292 ret
= setup_pcpu_lpage(true);
294 ret
= setup_pcpu_embed(true);
297 pr_warning("PERCPU: %s allocator failed (%zd), "
298 "falling back to page size\n",
299 pcpu_fc_names
[pcpu_chosen_fc
], ret
);
302 ret
= setup_pcpu_lpage(false);
304 ret
= setup_pcpu_embed(false);
307 ret
= setup_pcpu_page();
309 panic("cannot initialize percpu area (err=%zd)", ret
);
311 pcpu_unit_size
= ret
;
313 /* alrighty, percpu areas up and running */
314 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
315 for_each_possible_cpu(cpu
) {
316 per_cpu_offset(cpu
) =
317 delta
+ pcpu_unit_map
[cpu
] * pcpu_unit_size
;
318 per_cpu(this_cpu_off
, cpu
) = per_cpu_offset(cpu
);
319 per_cpu(cpu_number
, cpu
) = cpu
;
320 setup_percpu_segment(cpu
);
321 setup_stack_canary_segment(cpu
);
323 * Copy data used in early init routines from the
324 * initial arrays to the per cpu data areas. These
325 * arrays then become expendable and the *_early_ptr's
326 * are zeroed indicating that the static arrays are
329 #ifdef CONFIG_X86_LOCAL_APIC
330 per_cpu(x86_cpu_to_apicid
, cpu
) =
331 early_per_cpu_map(x86_cpu_to_apicid
, cpu
);
332 per_cpu(x86_bios_cpu_apicid
, cpu
) =
333 early_per_cpu_map(x86_bios_cpu_apicid
, cpu
);
336 per_cpu(irq_stack_ptr
, cpu
) =
337 per_cpu(irq_stack_union
.irq_stack
, cpu
) +
340 per_cpu(x86_cpu_to_node_map
, cpu
) =
341 early_per_cpu_map(x86_cpu_to_node_map
, cpu
);
345 * Up to this point, the boot CPU has been using .data.init
346 * area. Reload any changed state for the boot CPU.
348 if (cpu
== boot_cpu_id
)
349 switch_to_new_gdt(cpu
);
352 /* indicate the early static arrays will soon be gone */
353 #ifdef CONFIG_X86_LOCAL_APIC
354 early_per_cpu_ptr(x86_cpu_to_apicid
) = NULL
;
355 early_per_cpu_ptr(x86_bios_cpu_apicid
) = NULL
;
357 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
358 early_per_cpu_ptr(x86_cpu_to_node_map
) = NULL
;
361 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
363 * make sure boot cpu node_number is right, when boot cpu is on the
364 * node that doesn't have mem installed
366 per_cpu(node_number
, boot_cpu_id
) = cpu_to_node(boot_cpu_id
);
369 /* Setup node to cpumask map */
370 setup_node_to_cpumask_map();
372 /* Setup cpu initialized, callin, callout masks */
373 setup_cpu_local_masks();