2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
3 * Copyright (c) 2001 Intel Corp.
4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
5 * Copyright (c) 2002 NEC Corp.
6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
7 * Copyright (c) 2004 Silicon Graphics, Inc
8 * Russ Anderson <rja@sgi.com>
9 * Jesse Barnes <jbarnes@sgi.com>
10 * Jack Steiner <steiner@sgi.com>
14 * Platform initialization for Discontig Memory
17 #include <linux/kernel.h>
19 #include <linux/nmi.h>
20 #include <linux/swap.h>
21 #include <linux/bootmem.h>
22 #include <linux/acpi.h>
23 #include <linux/efi.h>
24 #include <linux/nodemask.h>
25 #include <asm/pgalloc.h>
27 #include <asm/meminit.h>
29 #include <asm/sections.h>
32 * Track per-node information needed to setup the boot memory allocator, the
33 * per-node areas, and the real VM.
35 struct early_node_data
{
36 struct ia64_node_data
*node_data
;
37 unsigned long pernode_addr
;
38 unsigned long pernode_size
;
39 unsigned long num_physpages
;
40 #ifdef CONFIG_ZONE_DMA
41 unsigned long num_dma_physpages
;
43 unsigned long min_pfn
;
44 unsigned long max_pfn
;
47 static struct early_node_data mem_data
[MAX_NUMNODES
] __initdata
;
48 static nodemask_t memory_less_mask __initdata
;
50 pg_data_t
*pgdat_list
[MAX_NUMNODES
];
53 * To prevent cache aliasing effects, align per-node structures so that they
54 * start at addresses that are strided by node number.
56 #define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024)
57 #define NODEDATA_ALIGN(addr, node) \
58 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
59 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
62 * build_node_maps - callback to setup bootmem structs for each node
63 * @start: physical start of range
64 * @len: length of range
65 * @node: node where this range resides
67 * We allocate a struct bootmem_data for each piece of memory that we wish to
68 * treat as a virtually contiguous block (i.e. each node). Each such block
69 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
70 * if necessary. Any non-existent pages will simply be part of the virtual
71 * memmap. We also update min_low_pfn and max_low_pfn here as we receive
72 * memory ranges from the caller.
74 static int __init
build_node_maps(unsigned long start
, unsigned long len
,
77 unsigned long spfn
, epfn
, end
= start
+ len
;
78 struct bootmem_data
*bdp
= &bootmem_node_data
[node
];
80 epfn
= GRANULEROUNDUP(end
) >> PAGE_SHIFT
;
81 spfn
= GRANULEROUNDDOWN(start
) >> PAGE_SHIFT
;
83 if (!bdp
->node_low_pfn
) {
84 bdp
->node_min_pfn
= spfn
;
85 bdp
->node_low_pfn
= epfn
;
87 bdp
->node_min_pfn
= min(spfn
, bdp
->node_min_pfn
);
88 bdp
->node_low_pfn
= max(epfn
, bdp
->node_low_pfn
);
95 * early_nr_cpus_node - return number of cpus on a given node
96 * @node: node to check
98 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
99 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
100 * called yet. Note that node 0 will also count all non-existent cpus.
102 static int __meminit
early_nr_cpus_node(int node
)
106 for_each_possible_early_cpu(cpu
)
107 if (node
== node_cpuid
[cpu
].nid
)
114 * compute_pernodesize - compute size of pernode data
115 * @node: the node id.
117 static unsigned long __meminit
compute_pernodesize(int node
)
119 unsigned long pernodesize
= 0, cpus
;
121 cpus
= early_nr_cpus_node(node
);
122 pernodesize
+= PERCPU_PAGE_SIZE
* cpus
;
123 pernodesize
+= node
* L1_CACHE_BYTES
;
124 pernodesize
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
125 pernodesize
+= L1_CACHE_ALIGN(sizeof(struct ia64_node_data
));
126 pernodesize
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
127 pernodesize
= PAGE_ALIGN(pernodesize
);
132 * per_cpu_node_setup - setup per-cpu areas on each node
133 * @cpu_data: per-cpu area on this node
134 * @node: node to setup
136 * Copy the static per-cpu data into the region we just set aside and then
137 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
138 * the end of the area.
140 static void *per_cpu_node_setup(void *cpu_data
, int node
)
145 for_each_possible_early_cpu(cpu
) {
147 void *cpu0_data
= __cpu0_per_cpu
;
148 __per_cpu_offset
[cpu
] = (char*)cpu0_data
-
150 } else if (node
== node_cpuid
[cpu
].nid
) {
151 memcpy(__va(cpu_data
), __phys_per_cpu_start
,
152 __per_cpu_end
- __per_cpu_start
);
153 __per_cpu_offset
[cpu
] = (char*)__va(cpu_data
) -
155 cpu_data
+= PERCPU_PAGE_SIZE
;
163 * fill_pernode - initialize pernode data.
164 * @node: the node id.
165 * @pernode: physical address of pernode data
166 * @pernodesize: size of the pernode data
168 static void __init
fill_pernode(int node
, unsigned long pernode
,
169 unsigned long pernodesize
)
172 int cpus
= early_nr_cpus_node(node
);
173 struct bootmem_data
*bdp
= &bootmem_node_data
[node
];
175 mem_data
[node
].pernode_addr
= pernode
;
176 mem_data
[node
].pernode_size
= pernodesize
;
177 memset(__va(pernode
), 0, pernodesize
);
179 cpu_data
= (void *)pernode
;
180 pernode
+= PERCPU_PAGE_SIZE
* cpus
;
181 pernode
+= node
* L1_CACHE_BYTES
;
183 pgdat_list
[node
] = __va(pernode
);
184 pernode
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
186 mem_data
[node
].node_data
= __va(pernode
);
187 pernode
+= L1_CACHE_ALIGN(sizeof(struct ia64_node_data
));
189 pgdat_list
[node
]->bdata
= bdp
;
190 pernode
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
192 cpu_data
= per_cpu_node_setup(cpu_data
, node
);
198 * find_pernode_space - allocate memory for memory map and per-node structures
199 * @start: physical start of range
200 * @len: length of range
201 * @node: node where this range resides
203 * This routine reserves space for the per-cpu data struct, the list of
204 * pg_data_ts and the per-node data struct. Each node will have something like
205 * the following in the first chunk of addr. space large enough to hold it.
207 * ________________________
209 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
210 * | PERCPU_PAGE_SIZE * | start and length big enough
211 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
212 * |------------------------|
213 * | local pg_data_t * |
214 * |------------------------|
215 * | local ia64_node_data |
216 * |------------------------|
218 * |________________________|
220 * Once this space has been set aside, the bootmem maps are initialized. We
221 * could probably move the allocation of the per-cpu and ia64_node_data space
222 * outside of this function and use alloc_bootmem_node(), but doing it here
223 * is straightforward and we get the alignments we want so...
225 static int __init
find_pernode_space(unsigned long start
, unsigned long len
,
228 unsigned long spfn
, epfn
;
229 unsigned long pernodesize
= 0, pernode
, pages
, mapsize
;
230 struct bootmem_data
*bdp
= &bootmem_node_data
[node
];
232 spfn
= start
>> PAGE_SHIFT
;
233 epfn
= (start
+ len
) >> PAGE_SHIFT
;
235 pages
= bdp
->node_low_pfn
- bdp
->node_min_pfn
;
236 mapsize
= bootmem_bootmap_pages(pages
) << PAGE_SHIFT
;
239 * Make sure this memory falls within this node's usable memory
240 * since we may have thrown some away in build_maps().
242 if (spfn
< bdp
->node_min_pfn
|| epfn
> bdp
->node_low_pfn
)
245 /* Don't setup this node's local space twice... */
246 if (mem_data
[node
].pernode_addr
)
250 * Calculate total size needed, incl. what's necessary
251 * for good alignment and alias prevention.
253 pernodesize
= compute_pernodesize(node
);
254 pernode
= NODEDATA_ALIGN(start
, node
);
256 /* Is this range big enough for what we want to store here? */
257 if (start
+ len
> (pernode
+ pernodesize
+ mapsize
))
258 fill_pernode(node
, pernode
, pernodesize
);
264 * free_node_bootmem - free bootmem allocator memory for use
265 * @start: physical start of range
266 * @len: length of range
267 * @node: node where this range resides
269 * Simply calls the bootmem allocator to free the specified ranged from
270 * the given pg_data_t's bdata struct. After this function has been called
271 * for all the entries in the EFI memory map, the bootmem allocator will
272 * be ready to service allocation requests.
274 static int __init
free_node_bootmem(unsigned long start
, unsigned long len
,
277 free_bootmem_node(pgdat_list
[node
], start
, len
);
283 * reserve_pernode_space - reserve memory for per-node space
285 * Reserve the space used by the bootmem maps & per-node space in the boot
286 * allocator so that when we actually create the real mem maps we don't
289 static void __init
reserve_pernode_space(void)
291 unsigned long base
, size
, pages
;
292 struct bootmem_data
*bdp
;
295 for_each_online_node(node
) {
296 pg_data_t
*pdp
= pgdat_list
[node
];
298 if (node_isset(node
, memory_less_mask
))
303 /* First the bootmem_map itself */
304 pages
= bdp
->node_low_pfn
- bdp
->node_min_pfn
;
305 size
= bootmem_bootmap_pages(pages
) << PAGE_SHIFT
;
306 base
= __pa(bdp
->node_bootmem_map
);
307 reserve_bootmem_node(pdp
, base
, size
, BOOTMEM_DEFAULT
);
309 /* Now the per-node space */
310 size
= mem_data
[node
].pernode_size
;
311 base
= __pa(mem_data
[node
].pernode_addr
);
312 reserve_bootmem_node(pdp
, base
, size
, BOOTMEM_DEFAULT
);
316 static void __meminit
scatter_node_data(void)
322 * for_each_online_node() can't be used at here.
323 * node_online_map is not set for hot-added nodes at this time,
324 * because we are halfway through initialization of the new node's
325 * structures. If for_each_online_node() is used, a new node's
326 * pg_data_ptrs will be not initialized. Instead of using it,
327 * pgdat_list[] is checked.
329 for_each_node(node
) {
330 if (pgdat_list
[node
]) {
331 dst
= LOCAL_DATA_ADDR(pgdat_list
[node
])->pg_data_ptrs
;
332 memcpy(dst
, pgdat_list
, sizeof(pgdat_list
));
338 * initialize_pernode_data - fixup per-cpu & per-node pointers
340 * Each node's per-node area has a copy of the global pg_data_t list, so
341 * we copy that to each node here, as well as setting the per-cpu pointer
342 * to the local node data structure. The active_cpus field of the per-node
343 * structure gets setup by the platform_cpu_init() function later.
345 static void __init
initialize_pernode_data(void)
352 /* Set the node_data pointer for each per-cpu struct */
353 for_each_possible_early_cpu(cpu
) {
354 node
= node_cpuid
[cpu
].nid
;
355 per_cpu(cpu_info
, cpu
).node_data
= mem_data
[node
].node_data
;
359 struct cpuinfo_ia64
*cpu0_cpu_info
;
361 node
= node_cpuid
[cpu
].nid
;
362 cpu0_cpu_info
= (struct cpuinfo_ia64
*)(__phys_per_cpu_start
+
363 ((char *)&per_cpu__cpu_info
- __per_cpu_start
));
364 cpu0_cpu_info
->node_data
= mem_data
[node
].node_data
;
366 #endif /* CONFIG_SMP */
370 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
371 * node but fall back to any other node when __alloc_bootmem_node fails
374 * @pernodesize: size of this node's pernode data
376 static void __init
*memory_less_node_alloc(int nid
, unsigned long pernodesize
)
380 int bestnode
= -1, node
, anynode
= 0;
382 for_each_online_node(node
) {
383 if (node_isset(node
, memory_less_mask
))
385 else if (node_distance(nid
, node
) < best
) {
386 best
= node_distance(nid
, node
);
395 ptr
= __alloc_bootmem_node(pgdat_list
[bestnode
], pernodesize
,
396 PERCPU_PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
402 * memory_less_nodes - allocate and initialize CPU only nodes pernode
405 static void __init
memory_less_nodes(void)
407 unsigned long pernodesize
;
411 for_each_node_mask(node
, memory_less_mask
) {
412 pernodesize
= compute_pernodesize(node
);
413 pernode
= memory_less_node_alloc(node
, pernodesize
);
414 fill_pernode(node
, __pa(pernode
), pernodesize
);
421 * find_memory - walk the EFI memory map and setup the bootmem allocator
423 * Called early in boot to setup the bootmem allocator, and to
424 * allocate the per-cpu and per-node structures.
426 void __init
find_memory(void)
432 if (num_online_nodes() == 0) {
433 printk(KERN_ERR
"node info missing!\n");
437 nodes_or(memory_less_mask
, memory_less_mask
, node_online_map
);
441 /* These actually end up getting called by call_pernode_memory() */
442 efi_memmap_walk(filter_rsvd_memory
, build_node_maps
);
443 efi_memmap_walk(filter_rsvd_memory
, find_pernode_space
);
444 efi_memmap_walk(find_max_min_low_pfn
, NULL
);
446 for_each_online_node(node
)
447 if (bootmem_node_data
[node
].node_low_pfn
) {
448 node_clear(node
, memory_less_mask
);
449 mem_data
[node
].min_pfn
= ~0UL;
452 efi_memmap_walk(filter_memory
, register_active_ranges
);
455 * Initialize the boot memory maps in reverse order since that's
456 * what the bootmem allocator expects
458 for (node
= MAX_NUMNODES
- 1; node
>= 0; node
--) {
459 unsigned long pernode
, pernodesize
, map
;
460 struct bootmem_data
*bdp
;
462 if (!node_online(node
))
464 else if (node_isset(node
, memory_less_mask
))
467 bdp
= &bootmem_node_data
[node
];
468 pernode
= mem_data
[node
].pernode_addr
;
469 pernodesize
= mem_data
[node
].pernode_size
;
470 map
= pernode
+ pernodesize
;
472 init_bootmem_node(pgdat_list
[node
],
478 efi_memmap_walk(filter_rsvd_memory
, free_node_bootmem
);
480 reserve_pernode_space();
482 initialize_pernode_data();
484 max_pfn
= max_low_pfn
;
491 * per_cpu_init - setup per-cpu variables
493 * find_pernode_space() does most of this already, we just need to set
494 * local_per_cpu_offset
496 void __cpuinit
*per_cpu_init(void)
499 static int first_time
= 1;
503 for_each_possible_early_cpu(cpu
)
504 per_cpu(local_per_cpu_offset
, cpu
) = __per_cpu_offset
[cpu
];
507 return __per_cpu_start
+ __per_cpu_offset
[smp_processor_id()];
509 #endif /* CONFIG_SMP */
512 * show_mem - give short summary of memory stats
514 * Shows a simple page count of reserved and used pages in the system.
515 * For discontig machines, it does this on a per-pgdat basis.
519 int i
, total_reserved
= 0;
520 int total_shared
= 0, total_cached
= 0;
521 unsigned long total_present
= 0;
524 printk(KERN_INFO
"Mem-info:\n");
526 printk(KERN_INFO
"Node memory in pages:\n");
527 for_each_online_pgdat(pgdat
) {
528 unsigned long present
;
530 int shared
= 0, cached
= 0, reserved
= 0;
532 pgdat_resize_lock(pgdat
, &flags
);
533 present
= pgdat
->node_present_pages
;
534 for(i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
536 if (unlikely(i
% MAX_ORDER_NR_PAGES
== 0))
537 touch_nmi_watchdog();
538 if (pfn_valid(pgdat
->node_start_pfn
+ i
))
539 page
= pfn_to_page(pgdat
->node_start_pfn
+ i
);
541 i
= vmemmap_find_next_valid_pfn(pgdat
->node_id
,
545 if (PageReserved(page
))
547 else if (PageSwapCache(page
))
549 else if (page_count(page
))
550 shared
+= page_count(page
)-1;
552 pgdat_resize_unlock(pgdat
, &flags
);
553 total_present
+= present
;
554 total_reserved
+= reserved
;
555 total_cached
+= cached
;
556 total_shared
+= shared
;
557 printk(KERN_INFO
"Node %4d: RAM: %11ld, rsvd: %8d, "
558 "shrd: %10d, swpd: %10d\n", pgdat
->node_id
,
559 present
, reserved
, shared
, cached
);
561 printk(KERN_INFO
"%ld pages of RAM\n", total_present
);
562 printk(KERN_INFO
"%d reserved pages\n", total_reserved
);
563 printk(KERN_INFO
"%d pages shared\n", total_shared
);
564 printk(KERN_INFO
"%d pages swap cached\n", total_cached
);
565 printk(KERN_INFO
"Total of %ld pages in page table cache\n",
566 quicklist_total_size());
567 printk(KERN_INFO
"%d free buffer pages\n", nr_free_buffer_pages());
571 * call_pernode_memory - use SRAT to call callback functions with node info
572 * @start: physical start of range
573 * @len: length of range
574 * @arg: function to call for each range
576 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
577 * out to which node a block of memory belongs. Ignore memory that we cannot
578 * identify, and split blocks that run across multiple nodes.
580 * Take this opportunity to round the start address up and the end address
581 * down to page boundaries.
583 void call_pernode_memory(unsigned long start
, unsigned long len
, void *arg
)
585 unsigned long rs
, re
, end
= start
+ len
;
586 void (*func
)(unsigned long, unsigned long, int);
589 start
= PAGE_ALIGN(start
);
596 if (!num_node_memblks
) {
597 /* No SRAT table, so assume one node (node 0) */
599 (*func
)(start
, end
- start
, 0);
603 for (i
= 0; i
< num_node_memblks
; i
++) {
604 rs
= max(start
, node_memblk
[i
].start_paddr
);
605 re
= min(end
, node_memblk
[i
].start_paddr
+
606 node_memblk
[i
].size
);
609 (*func
)(rs
, re
- rs
, node_memblk
[i
].nid
);
617 * count_node_pages - callback to build per-node memory info structures
618 * @start: physical start of range
619 * @len: length of range
620 * @node: node where this range resides
622 * Each node has it's own number of physical pages, DMAable pages, start, and
623 * end page frame number. This routine will be called by call_pernode_memory()
624 * for each piece of usable memory and will setup these values for each node.
625 * Very similar to build_maps().
627 static __init
int count_node_pages(unsigned long start
, unsigned long len
, int node
)
629 unsigned long end
= start
+ len
;
631 mem_data
[node
].num_physpages
+= len
>> PAGE_SHIFT
;
632 #ifdef CONFIG_ZONE_DMA
633 if (start
<= __pa(MAX_DMA_ADDRESS
))
634 mem_data
[node
].num_dma_physpages
+=
635 (min(end
, __pa(MAX_DMA_ADDRESS
)) - start
) >>PAGE_SHIFT
;
637 start
= GRANULEROUNDDOWN(start
);
638 end
= GRANULEROUNDUP(end
);
639 mem_data
[node
].max_pfn
= max(mem_data
[node
].max_pfn
,
641 mem_data
[node
].min_pfn
= min(mem_data
[node
].min_pfn
,
642 start
>> PAGE_SHIFT
);
648 * paging_init - setup page tables
650 * paging_init() sets up the page tables for each node of the system and frees
651 * the bootmem allocator memory for general use.
653 void __init
paging_init(void)
655 unsigned long max_dma
;
656 unsigned long pfn_offset
= 0;
657 unsigned long max_pfn
= 0;
659 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
661 max_dma
= virt_to_phys((void *) MAX_DMA_ADDRESS
) >> PAGE_SHIFT
;
663 efi_memmap_walk(filter_rsvd_memory
, count_node_pages
);
665 sparse_memory_present_with_active_regions(MAX_NUMNODES
);
668 #ifdef CONFIG_VIRTUAL_MEM_MAP
669 vmalloc_end
-= PAGE_ALIGN(ALIGN(max_low_pfn
, MAX_ORDER_NR_PAGES
) *
670 sizeof(struct page
));
671 vmem_map
= (struct page
*) vmalloc_end
;
672 efi_memmap_walk(create_mem_map_page_table
, NULL
);
673 printk("Virtual mem_map starts at 0x%p\n", vmem_map
);
676 for_each_online_node(node
) {
677 num_physpages
+= mem_data
[node
].num_physpages
;
678 pfn_offset
= mem_data
[node
].min_pfn
;
680 #ifdef CONFIG_VIRTUAL_MEM_MAP
681 NODE_DATA(node
)->node_mem_map
= vmem_map
+ pfn_offset
;
683 if (mem_data
[node
].max_pfn
> max_pfn
)
684 max_pfn
= mem_data
[node
].max_pfn
;
687 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
688 #ifdef CONFIG_ZONE_DMA
689 max_zone_pfns
[ZONE_DMA
] = max_dma
;
691 max_zone_pfns
[ZONE_NORMAL
] = max_pfn
;
692 free_area_init_nodes(max_zone_pfns
);
694 zero_page_memmap_ptr
= virt_to_page(ia64_imva(empty_zero_page
));
697 #ifdef CONFIG_MEMORY_HOTPLUG
698 pg_data_t
*arch_alloc_nodedata(int nid
)
700 unsigned long size
= compute_pernodesize(nid
);
702 return kzalloc(size
, GFP_KERNEL
);
705 void arch_free_nodedata(pg_data_t
*pgdat
)
710 void arch_refresh_nodedata(int update_node
, pg_data_t
*update_pgdat
)
712 pgdat_list
[update_node
] = update_pgdat
;
717 #ifdef CONFIG_SPARSEMEM_VMEMMAP
718 int __meminit
vmemmap_populate(struct page
*start_page
,
719 unsigned long size
, int node
)
721 return vmemmap_populate_basepages(start_page
, size
, node
);