2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
3 * Copyright (c) 2001 Intel Corp.
4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
5 * Copyright (c) 2002 NEC Corp.
6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
7 * Copyright (c) 2004 Silicon Graphics, Inc
8 * Russ Anderson <rja@sgi.com>
9 * Jesse Barnes <jbarnes@sgi.com>
10 * Jack Steiner <steiner@sgi.com>
14 * Platform initialization for Discontig Memory
17 #include <linux/kernel.h>
19 #include <linux/swap.h>
20 #include <linux/bootmem.h>
21 #include <linux/acpi.h>
22 #include <linux/efi.h>
23 #include <linux/nodemask.h>
24 #include <asm/pgalloc.h>
26 #include <asm/meminit.h>
28 #include <asm/sections.h>
31 * Track per-node information needed to setup the boot memory allocator, the
32 * per-node areas, and the real VM.
34 struct early_node_data
{
35 struct ia64_node_data
*node_data
;
37 unsigned long pernode_addr
;
38 unsigned long pernode_size
;
39 struct bootmem_data bootmem_data
;
40 unsigned long num_physpages
;
41 unsigned long num_dma_physpages
;
42 unsigned long min_pfn
;
43 unsigned long max_pfn
;
46 static struct early_node_data mem_data
[MAX_NUMNODES
] __initdata
;
47 static nodemask_t memory_less_mask __initdata
;
50 * To prevent cache aliasing effects, align per-node structures so that they
51 * start at addresses that are strided by node number.
53 #define NODEDATA_ALIGN(addr, node) \
54 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + (node)*PERCPU_PAGE_SIZE)
57 * build_node_maps - callback to setup bootmem structs for each node
58 * @start: physical start of range
59 * @len: length of range
60 * @node: node where this range resides
62 * We allocate a struct bootmem_data for each piece of memory that we wish to
63 * treat as a virtually contiguous block (i.e. each node). Each such block
64 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
65 * if necessary. Any non-existent pages will simply be part of the virtual
66 * memmap. We also update min_low_pfn and max_low_pfn here as we receive
67 * memory ranges from the caller.
69 static int __init
build_node_maps(unsigned long start
, unsigned long len
,
72 unsigned long cstart
, epfn
, end
= start
+ len
;
73 struct bootmem_data
*bdp
= &mem_data
[node
].bootmem_data
;
75 epfn
= GRANULEROUNDUP(end
) >> PAGE_SHIFT
;
76 cstart
= GRANULEROUNDDOWN(start
);
78 if (!bdp
->node_low_pfn
) {
79 bdp
->node_boot_start
= cstart
;
80 bdp
->node_low_pfn
= epfn
;
82 bdp
->node_boot_start
= min(cstart
, bdp
->node_boot_start
);
83 bdp
->node_low_pfn
= max(epfn
, bdp
->node_low_pfn
);
86 min_low_pfn
= min(min_low_pfn
, bdp
->node_boot_start
>>PAGE_SHIFT
);
87 max_low_pfn
= max(max_low_pfn
, bdp
->node_low_pfn
);
93 * early_nr_cpus_node - return number of cpus on a given node
94 * @node: node to check
96 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
97 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
98 * called yet. Note that node 0 will also count all non-existent cpus.
100 static int __init
early_nr_cpus_node(int node
)
104 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++)
105 if (node
== node_cpuid
[cpu
].nid
)
112 * compute_pernodesize - compute size of pernode data
113 * @node: the node id.
115 static unsigned long __init
compute_pernodesize(int node
)
117 unsigned long pernodesize
= 0, cpus
;
119 cpus
= early_nr_cpus_node(node
);
120 pernodesize
+= PERCPU_PAGE_SIZE
* cpus
;
121 pernodesize
+= node
* L1_CACHE_BYTES
;
122 pernodesize
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
123 pernodesize
+= L1_CACHE_ALIGN(sizeof(struct ia64_node_data
));
124 pernodesize
= PAGE_ALIGN(pernodesize
);
129 * per_cpu_node_setup - setup per-cpu areas on each node
130 * @cpu_data: per-cpu area on this node
131 * @node: node to setup
133 * Copy the static per-cpu data into the region we just set aside and then
134 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
135 * the end of the area.
137 static void *per_cpu_node_setup(void *cpu_data
, int node
)
142 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
143 if (node
== node_cpuid
[cpu
].nid
) {
144 memcpy(__va(cpu_data
), __phys_per_cpu_start
,
145 __per_cpu_end
- __per_cpu_start
);
146 __per_cpu_offset
[cpu
] = (char*)__va(cpu_data
) -
148 cpu_data
+= PERCPU_PAGE_SIZE
;
156 * fill_pernode - initialize pernode data.
157 * @node: the node id.
158 * @pernode: physical address of pernode data
159 * @pernodesize: size of the pernode data
161 static void __init
fill_pernode(int node
, unsigned long pernode
,
162 unsigned long pernodesize
)
165 int cpus
= early_nr_cpus_node(node
);
166 struct bootmem_data
*bdp
= &mem_data
[node
].bootmem_data
;
168 mem_data
[node
].pernode_addr
= pernode
;
169 mem_data
[node
].pernode_size
= pernodesize
;
170 memset(__va(pernode
), 0, pernodesize
);
172 cpu_data
= (void *)pernode
;
173 pernode
+= PERCPU_PAGE_SIZE
* cpus
;
174 pernode
+= node
* L1_CACHE_BYTES
;
176 mem_data
[node
].pgdat
= __va(pernode
);
177 pernode
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
179 mem_data
[node
].node_data
= __va(pernode
);
180 pernode
+= L1_CACHE_ALIGN(sizeof(struct ia64_node_data
));
182 mem_data
[node
].pgdat
->bdata
= bdp
;
183 pernode
+= L1_CACHE_ALIGN(sizeof(pg_data_t
));
185 cpu_data
= per_cpu_node_setup(cpu_data
, node
);
191 * find_pernode_space - allocate memory for memory map and per-node structures
192 * @start: physical start of range
193 * @len: length of range
194 * @node: node where this range resides
196 * This routine reserves space for the per-cpu data struct, the list of
197 * pg_data_ts and the per-node data struct. Each node will have something like
198 * the following in the first chunk of addr. space large enough to hold it.
200 * ________________________
202 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
203 * | PERCPU_PAGE_SIZE * | start and length big enough
204 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
205 * |------------------------|
206 * | local pg_data_t * |
207 * |------------------------|
208 * | local ia64_node_data |
209 * |------------------------|
211 * |________________________|
213 * Once this space has been set aside, the bootmem maps are initialized. We
214 * could probably move the allocation of the per-cpu and ia64_node_data space
215 * outside of this function and use alloc_bootmem_node(), but doing it here
216 * is straightforward and we get the alignments we want so...
218 static int __init
find_pernode_space(unsigned long start
, unsigned long len
,
222 unsigned long pernodesize
= 0, pernode
, pages
, mapsize
;
223 struct bootmem_data
*bdp
= &mem_data
[node
].bootmem_data
;
225 epfn
= (start
+ len
) >> PAGE_SHIFT
;
227 pages
= bdp
->node_low_pfn
- (bdp
->node_boot_start
>> PAGE_SHIFT
);
228 mapsize
= bootmem_bootmap_pages(pages
) << PAGE_SHIFT
;
231 * Make sure this memory falls within this node's usable memory
232 * since we may have thrown some away in build_maps().
234 if (start
< bdp
->node_boot_start
|| epfn
> bdp
->node_low_pfn
)
237 /* Don't setup this node's local space twice... */
238 if (mem_data
[node
].pernode_addr
)
242 * Calculate total size needed, incl. what's necessary
243 * for good alignment and alias prevention.
245 pernodesize
= compute_pernodesize(node
);
246 pernode
= NODEDATA_ALIGN(start
, node
);
248 /* Is this range big enough for what we want to store here? */
249 if (start
+ len
> (pernode
+ pernodesize
+ mapsize
))
250 fill_pernode(node
, pernode
, pernodesize
);
256 * free_node_bootmem - free bootmem allocator memory for use
257 * @start: physical start of range
258 * @len: length of range
259 * @node: node where this range resides
261 * Simply calls the bootmem allocator to free the specified ranged from
262 * the given pg_data_t's bdata struct. After this function has been called
263 * for all the entries in the EFI memory map, the bootmem allocator will
264 * be ready to service allocation requests.
266 static int __init
free_node_bootmem(unsigned long start
, unsigned long len
,
269 free_bootmem_node(mem_data
[node
].pgdat
, start
, len
);
275 * reserve_pernode_space - reserve memory for per-node space
277 * Reserve the space used by the bootmem maps & per-node space in the boot
278 * allocator so that when we actually create the real mem maps we don't
281 static void __init
reserve_pernode_space(void)
283 unsigned long base
, size
, pages
;
284 struct bootmem_data
*bdp
;
287 for_each_online_node(node
) {
288 pg_data_t
*pdp
= mem_data
[node
].pgdat
;
290 if (node_isset(node
, memory_less_mask
))
295 /* First the bootmem_map itself */
296 pages
= bdp
->node_low_pfn
- (bdp
->node_boot_start
>>PAGE_SHIFT
);
297 size
= bootmem_bootmap_pages(pages
) << PAGE_SHIFT
;
298 base
= __pa(bdp
->node_bootmem_map
);
299 reserve_bootmem_node(pdp
, base
, size
);
301 /* Now the per-node space */
302 size
= mem_data
[node
].pernode_size
;
303 base
= __pa(mem_data
[node
].pernode_addr
);
304 reserve_bootmem_node(pdp
, base
, size
);
309 * initialize_pernode_data - fixup per-cpu & per-node pointers
311 * Each node's per-node area has a copy of the global pg_data_t list, so
312 * we copy that to each node here, as well as setting the per-cpu pointer
313 * to the local node data structure. The active_cpus field of the per-node
314 * structure gets setup by the platform_cpu_init() function later.
316 static void __init
initialize_pernode_data(void)
318 pg_data_t
*pgdat_list
[MAX_NUMNODES
];
321 for_each_online_node(node
)
322 pgdat_list
[node
] = mem_data
[node
].pgdat
;
324 /* Copy the pg_data_t list to each node and init the node field */
325 for_each_online_node(node
) {
326 memcpy(mem_data
[node
].node_data
->pg_data_ptrs
, pgdat_list
,
330 /* Set the node_data pointer for each per-cpu struct */
331 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
332 node
= node_cpuid
[cpu
].nid
;
333 per_cpu(cpu_info
, cpu
).node_data
= mem_data
[node
].node_data
;
337 struct cpuinfo_ia64
*cpu0_cpu_info
;
339 node
= node_cpuid
[cpu
].nid
;
340 cpu0_cpu_info
= (struct cpuinfo_ia64
*)(__phys_per_cpu_start
+
341 ((char *)&per_cpu__cpu_info
- __per_cpu_start
));
342 cpu0_cpu_info
->node_data
= mem_data
[node
].node_data
;
344 #endif /* CONFIG_SMP */
348 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
349 * node but fall back to any other node when __alloc_bootmem_node fails
352 * @pernodesize: size of this node's pernode data
353 * @align: alignment to use for this node's pernode data
355 static void __init
*memory_less_node_alloc(int nid
, unsigned long pernodesize
,
360 int bestnode
= -1, node
;
362 for_each_online_node(node
) {
363 if (node_isset(node
, memory_less_mask
))
365 else if (node_distance(nid
, node
) < best
) {
366 best
= node_distance(nid
, node
);
371 ptr
= __alloc_bootmem_node(mem_data
[bestnode
].pgdat
,
372 pernodesize
, align
, __pa(MAX_DMA_ADDRESS
));
375 panic("NO memory for memory less node\n");
380 * pgdat_insert - insert the pgdat into global pgdat_list
381 * @pgdat: the pgdat for a node.
383 static void __init
pgdat_insert(pg_data_t
*pgdat
)
385 pg_data_t
*prev
= NULL
, *next
;
388 if (pgdat
->node_id
< next
->node_id
)
394 prev
->pgdat_next
= pgdat
;
395 pgdat
->pgdat_next
= next
;
397 pgdat
->pgdat_next
= pgdat_list
;
405 * memory_less_nodes - allocate and initialize CPU only nodes pernode
408 static void __init
memory_less_nodes(void)
410 unsigned long pernodesize
;
414 for_each_node_mask(node
, memory_less_mask
) {
415 pernodesize
= compute_pernodesize(node
);
416 pernode
= memory_less_node_alloc(node
, pernodesize
,
417 (node
) ? (node
* PERCPU_PAGE_SIZE
) : (1024*1024));
418 fill_pernode(node
, __pa(pernode
), pernodesize
);
425 * find_memory - walk the EFI memory map and setup the bootmem allocator
427 * Called early in boot to setup the bootmem allocator, and to
428 * allocate the per-cpu and per-node structures.
430 void __init
find_memory(void)
436 if (num_online_nodes() == 0) {
437 printk(KERN_ERR
"node info missing!\n");
441 nodes_or(memory_less_mask
, memory_less_mask
, node_online_map
);
445 /* These actually end up getting called by call_pernode_memory() */
446 efi_memmap_walk(filter_rsvd_memory
, build_node_maps
);
447 efi_memmap_walk(filter_rsvd_memory
, find_pernode_space
);
449 for_each_online_node(node
)
450 if (mem_data
[node
].bootmem_data
.node_low_pfn
) {
451 node_clear(node
, memory_less_mask
);
452 mem_data
[node
].min_pfn
= ~0UL;
455 * Initialize the boot memory maps in reverse order since that's
456 * what the bootmem allocator expects
458 for (node
= MAX_NUMNODES
- 1; node
>= 0; node
--) {
459 unsigned long pernode
, pernodesize
, map
;
460 struct bootmem_data
*bdp
;
462 if (!node_online(node
))
464 else if (node_isset(node
, memory_less_mask
))
467 bdp
= &mem_data
[node
].bootmem_data
;
468 pernode
= mem_data
[node
].pernode_addr
;
469 pernodesize
= mem_data
[node
].pernode_size
;
470 map
= pernode
+ pernodesize
;
472 init_bootmem_node(mem_data
[node
].pgdat
,
474 bdp
->node_boot_start
>>PAGE_SHIFT
,
478 efi_memmap_walk(filter_rsvd_memory
, free_node_bootmem
);
480 reserve_pernode_space();
482 initialize_pernode_data();
484 max_pfn
= max_low_pfn
;
491 * per_cpu_init - setup per-cpu variables
493 * find_pernode_space() does most of this already, we just need to set
494 * local_per_cpu_offset
496 void *per_cpu_init(void)
500 if (smp_processor_id() != 0)
501 return __per_cpu_start
+ __per_cpu_offset
[smp_processor_id()];
503 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++)
504 per_cpu(local_per_cpu_offset
, cpu
) = __per_cpu_offset
[cpu
];
506 return __per_cpu_start
+ __per_cpu_offset
[smp_processor_id()];
508 #endif /* CONFIG_SMP */
511 * show_mem - give short summary of memory stats
513 * Shows a simple page count of reserved and used pages in the system.
514 * For discontig machines, it does this on a per-pgdat basis.
518 int i
, total_reserved
= 0;
519 int total_shared
= 0, total_cached
= 0;
520 unsigned long total_present
= 0;
523 printk("Mem-info:\n");
525 printk("Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
526 for_each_pgdat(pgdat
) {
527 unsigned long present
= pgdat
->node_present_pages
;
528 int shared
= 0, cached
= 0, reserved
= 0;
529 printk("Node ID: %d\n", pgdat
->node_id
);
530 for(i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
531 struct page
*page
= pgdat_page_nr(pgdat
, i
);
532 if (!ia64_pfn_valid(pgdat
->node_start_pfn
+i
))
534 if (PageReserved(page
))
536 else if (PageSwapCache(page
))
538 else if (page_count(page
))
539 shared
+= page_count(page
)-1;
541 total_present
+= present
;
542 total_reserved
+= reserved
;
543 total_cached
+= cached
;
544 total_shared
+= shared
;
545 printk("\t%ld pages of RAM\n", present
);
546 printk("\t%d reserved pages\n", reserved
);
547 printk("\t%d pages shared\n", shared
);
548 printk("\t%d pages swap cached\n", cached
);
550 printk("%ld pages of RAM\n", total_present
);
551 printk("%d reserved pages\n", total_reserved
);
552 printk("%d pages shared\n", total_shared
);
553 printk("%d pages swap cached\n", total_cached
);
554 printk("Total of %ld pages in page table cache\n",
555 pgtable_quicklist_total_size());
556 printk("%d free buffer pages\n", nr_free_buffer_pages());
560 * call_pernode_memory - use SRAT to call callback functions with node info
561 * @start: physical start of range
562 * @len: length of range
563 * @arg: function to call for each range
565 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
566 * out to which node a block of memory belongs. Ignore memory that we cannot
567 * identify, and split blocks that run across multiple nodes.
569 * Take this opportunity to round the start address up and the end address
570 * down to page boundaries.
572 void call_pernode_memory(unsigned long start
, unsigned long len
, void *arg
)
574 unsigned long rs
, re
, end
= start
+ len
;
575 void (*func
)(unsigned long, unsigned long, int);
578 start
= PAGE_ALIGN(start
);
585 if (!num_node_memblks
) {
586 /* No SRAT table, so assume one node (node 0) */
588 (*func
)(start
, end
- start
, 0);
592 for (i
= 0; i
< num_node_memblks
; i
++) {
593 rs
= max(start
, node_memblk
[i
].start_paddr
);
594 re
= min(end
, node_memblk
[i
].start_paddr
+
595 node_memblk
[i
].size
);
598 (*func
)(rs
, re
- rs
, node_memblk
[i
].nid
);
606 * count_node_pages - callback to build per-node memory info structures
607 * @start: physical start of range
608 * @len: length of range
609 * @node: node where this range resides
611 * Each node has it's own number of physical pages, DMAable pages, start, and
612 * end page frame number. This routine will be called by call_pernode_memory()
613 * for each piece of usable memory and will setup these values for each node.
614 * Very similar to build_maps().
616 static __init
int count_node_pages(unsigned long start
, unsigned long len
, int node
)
618 unsigned long end
= start
+ len
;
620 mem_data
[node
].num_physpages
+= len
>> PAGE_SHIFT
;
621 if (start
<= __pa(MAX_DMA_ADDRESS
))
622 mem_data
[node
].num_dma_physpages
+=
623 (min(end
, __pa(MAX_DMA_ADDRESS
)) - start
) >>PAGE_SHIFT
;
624 start
= GRANULEROUNDDOWN(start
);
625 start
= ORDERROUNDDOWN(start
);
626 end
= GRANULEROUNDUP(end
);
627 mem_data
[node
].max_pfn
= max(mem_data
[node
].max_pfn
,
629 mem_data
[node
].min_pfn
= min(mem_data
[node
].min_pfn
,
630 start
>> PAGE_SHIFT
);
636 * paging_init - setup page tables
638 * paging_init() sets up the page tables for each node of the system and frees
639 * the bootmem allocator memory for general use.
641 void __init
paging_init(void)
643 unsigned long max_dma
;
644 unsigned long zones_size
[MAX_NR_ZONES
];
645 unsigned long zholes_size
[MAX_NR_ZONES
];
646 unsigned long pfn_offset
= 0;
649 max_dma
= virt_to_phys((void *) MAX_DMA_ADDRESS
) >> PAGE_SHIFT
;
651 efi_memmap_walk(filter_rsvd_memory
, count_node_pages
);
653 vmalloc_end
-= PAGE_ALIGN(max_low_pfn
* sizeof(struct page
));
654 vmem_map
= (struct page
*) vmalloc_end
;
655 efi_memmap_walk(create_mem_map_page_table
, NULL
);
656 printk("Virtual mem_map starts at 0x%p\n", vmem_map
);
658 for_each_online_node(node
) {
659 memset(zones_size
, 0, sizeof(zones_size
));
660 memset(zholes_size
, 0, sizeof(zholes_size
));
662 num_physpages
+= mem_data
[node
].num_physpages
;
664 if (mem_data
[node
].min_pfn
>= max_dma
) {
665 /* All of this node's memory is above ZONE_DMA */
666 zones_size
[ZONE_NORMAL
] = mem_data
[node
].max_pfn
-
667 mem_data
[node
].min_pfn
;
668 zholes_size
[ZONE_NORMAL
] = mem_data
[node
].max_pfn
-
669 mem_data
[node
].min_pfn
-
670 mem_data
[node
].num_physpages
;
671 } else if (mem_data
[node
].max_pfn
< max_dma
) {
672 /* All of this node's memory is in ZONE_DMA */
673 zones_size
[ZONE_DMA
] = mem_data
[node
].max_pfn
-
674 mem_data
[node
].min_pfn
;
675 zholes_size
[ZONE_DMA
] = mem_data
[node
].max_pfn
-
676 mem_data
[node
].min_pfn
-
677 mem_data
[node
].num_dma_physpages
;
679 /* This node has memory in both zones */
680 zones_size
[ZONE_DMA
] = max_dma
-
681 mem_data
[node
].min_pfn
;
682 zholes_size
[ZONE_DMA
] = zones_size
[ZONE_DMA
] -
683 mem_data
[node
].num_dma_physpages
;
684 zones_size
[ZONE_NORMAL
] = mem_data
[node
].max_pfn
-
686 zholes_size
[ZONE_NORMAL
] = zones_size
[ZONE_NORMAL
] -
687 (mem_data
[node
].num_physpages
-
688 mem_data
[node
].num_dma_physpages
);
691 pfn_offset
= mem_data
[node
].min_pfn
;
693 NODE_DATA(node
)->node_mem_map
= vmem_map
+ pfn_offset
;
694 free_area_init_node(node
, NODE_DATA(node
), zones_size
,
695 pfn_offset
, zholes_size
);
699 * Make memory less nodes become a member of the known nodes.
701 for_each_node_mask(node
, memory_less_mask
)
702 pgdat_insert(mem_data
[node
].pgdat
);
704 zero_page_memmap_ptr
= virt_to_page(ia64_imva(empty_zero_page
));