2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Stephane Eranian <eranian@hpl.hp.com>
9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
10 * Copyright (C) 1999 VA Linux Systems
11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
17 #include <linux/bootmem.h>
18 #include <linux/efi.h>
19 #include <linux/memblock.h>
21 #include <linux/nmi.h>
22 #include <linux/swap.h>
24 #include <asm/meminit.h>
25 #include <asm/pgalloc.h>
26 #include <asm/pgtable.h>
27 #include <asm/sections.h>
30 #ifdef CONFIG_VIRTUAL_MEM_MAP
31 static unsigned long max_gap
;
34 /* physical address where the bootmem map is located */
35 unsigned long bootmap_start
;
38 * find_bootmap_location - callback to find a memory area for the bootmap
39 * @start: start of region
41 * @arg: unused callback data
43 * Find a place to put the bootmap and return its starting address in
44 * bootmap_start. This address must be page-aligned.
47 find_bootmap_location (u64 start
, u64 end
, void *arg
)
49 u64 needed
= *(unsigned long *)arg
;
50 u64 range_start
, range_end
, free_start
;
54 if (start
== PAGE_OFFSET
) {
61 free_start
= PAGE_OFFSET
;
63 for (i
= 0; i
< num_rsvd_regions
; i
++) {
64 range_start
= max(start
, free_start
);
65 range_end
= min(end
, rsvd_region
[i
].start
& PAGE_MASK
);
67 free_start
= PAGE_ALIGN(rsvd_region
[i
].end
);
69 if (range_end
<= range_start
)
70 continue; /* skip over empty range */
72 if (range_end
- range_start
>= needed
) {
73 bootmap_start
= __pa(range_start
);
77 /* nothing more available in this segment */
85 static void *cpu_data
;
87 * per_cpu_init - setup per-cpu variables
89 * Allocate and setup per-cpu data areas.
91 void *per_cpu_init(void)
93 static bool first_time
= true;
94 void *cpu0_data
= __cpu0_per_cpu
;
102 * get_free_pages() cannot be used before cpu_init() done.
103 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
104 * to avoid that AP calls get_zeroed_page().
106 for_each_possible_cpu(cpu
) {
107 void *src
= cpu
== 0 ? cpu0_data
: __phys_per_cpu_start
;
109 memcpy(cpu_data
, src
, __per_cpu_end
- __per_cpu_start
);
110 __per_cpu_offset
[cpu
] = (char *)cpu_data
- __per_cpu_start
;
111 per_cpu(local_per_cpu_offset
, cpu
) = __per_cpu_offset
[cpu
];
114 * percpu area for cpu0 is moved from the __init area
115 * which is setup by head.S and used till this point.
116 * Update ar.k3. This move is ensures that percpu
117 * area for cpu0 is on the correct node and its
118 * virtual address isn't insanely far from other
119 * percpu areas which is important for congruent
123 ia64_set_kr(IA64_KR_PER_CPU_DATA
, __pa(cpu_data
) -
124 (unsigned long)__per_cpu_start
);
126 cpu_data
+= PERCPU_PAGE_SIZE
;
129 return __per_cpu_start
+ __per_cpu_offset
[smp_processor_id()];
133 alloc_per_cpu_data(void)
135 cpu_data
= __alloc_bootmem(PERCPU_PAGE_SIZE
* num_possible_cpus(),
136 PERCPU_PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
140 * setup_per_cpu_areas - setup percpu areas
142 * Arch code has already allocated and initialized percpu areas. All
143 * this function has to do is to teach the determined layout to the
144 * dynamic percpu allocator, which happens to be more complex than
145 * creating whole new ones using helpers.
148 setup_per_cpu_areas(void)
150 struct pcpu_alloc_info
*ai
;
151 struct pcpu_group_info
*gi
;
153 ssize_t static_size
, reserved_size
, dyn_size
;
156 ai
= pcpu_alloc_alloc_info(1, num_possible_cpus());
158 panic("failed to allocate pcpu_alloc_info");
161 /* units are assigned consecutively to possible cpus */
162 for_each_possible_cpu(cpu
)
163 gi
->cpu_map
[gi
->nr_units
++] = cpu
;
166 static_size
= __per_cpu_end
- __per_cpu_start
;
167 reserved_size
= PERCPU_MODULE_RESERVE
;
168 dyn_size
= PERCPU_PAGE_SIZE
- static_size
- reserved_size
;
170 panic("percpu area overflow static=%zd reserved=%zd\n",
171 static_size
, reserved_size
);
173 ai
->static_size
= static_size
;
174 ai
->reserved_size
= reserved_size
;
175 ai
->dyn_size
= dyn_size
;
176 ai
->unit_size
= PERCPU_PAGE_SIZE
;
177 ai
->atom_size
= PAGE_SIZE
;
178 ai
->alloc_size
= PERCPU_PAGE_SIZE
;
180 rc
= pcpu_setup_first_chunk(ai
, __per_cpu_start
+ __per_cpu_offset
[0]);
182 panic("failed to setup percpu area (err=%d)", rc
);
184 pcpu_free_alloc_info(ai
);
187 #define alloc_per_cpu_data() do { } while (0)
188 #endif /* CONFIG_SMP */
191 * find_memory - setup memory map
193 * Walk the EFI memory map and find usable memory for the system, taking
194 * into account reserved areas.
199 unsigned long bootmap_size
;
203 /* first find highest page frame number */
206 efi_memmap_walk(find_max_min_low_pfn
, NULL
);
207 max_pfn
= max_low_pfn
;
208 /* how many bytes to cover all the pages */
209 bootmap_size
= bootmem_bootmap_pages(max_pfn
) << PAGE_SHIFT
;
211 /* look for a location to hold the bootmap */
212 bootmap_start
= ~0UL;
213 efi_memmap_walk(find_bootmap_location
, &bootmap_size
);
214 if (bootmap_start
== ~0UL)
215 panic("Cannot find %ld bytes for bootmap\n", bootmap_size
);
217 bootmap_size
= init_bootmem_node(NODE_DATA(0),
218 (bootmap_start
>> PAGE_SHIFT
), 0, max_pfn
);
220 /* Free all available memory, then mark bootmem-map as being in use. */
221 efi_memmap_walk(filter_rsvd_memory
, free_bootmem
);
222 reserve_bootmem(bootmap_start
, bootmap_size
, BOOTMEM_DEFAULT
);
226 alloc_per_cpu_data();
230 * Set up the page tables.
236 unsigned long max_dma
;
237 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
239 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
240 #ifdef CONFIG_ZONE_DMA
241 max_dma
= virt_to_phys((void *) MAX_DMA_ADDRESS
) >> PAGE_SHIFT
;
242 max_zone_pfns
[ZONE_DMA
] = max_dma
;
244 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
246 #ifdef CONFIG_VIRTUAL_MEM_MAP
247 efi_memmap_walk(filter_memory
, register_active_ranges
);
248 efi_memmap_walk(find_largest_hole
, (u64
*)&max_gap
);
249 if (max_gap
< LARGE_GAP
) {
250 vmem_map
= (struct page
*) 0;
251 free_area_init_nodes(max_zone_pfns
);
253 unsigned long map_size
;
255 /* allocate virtual_mem_map */
257 map_size
= PAGE_ALIGN(ALIGN(max_low_pfn
, MAX_ORDER_NR_PAGES
) *
258 sizeof(struct page
));
259 VMALLOC_END
-= map_size
;
260 vmem_map
= (struct page
*) VMALLOC_END
;
261 efi_memmap_walk(create_mem_map_page_table
, NULL
);
264 * alloc_node_mem_map makes an adjustment for mem_map
265 * which isn't compatible with vmem_map.
267 NODE_DATA(0)->node_mem_map
= vmem_map
+
268 find_min_pfn_with_active_regions();
269 free_area_init_nodes(max_zone_pfns
);
271 printk("Virtual mem_map starts at 0x%p\n", mem_map
);
273 #else /* !CONFIG_VIRTUAL_MEM_MAP */
274 memblock_add_node(0, PFN_PHYS(max_low_pfn
), 0);
275 free_area_init_nodes(max_zone_pfns
);
276 #endif /* !CONFIG_VIRTUAL_MEM_MAP */
277 zero_page_memmap_ptr
= virt_to_page(ia64_imva(empty_zero_page
));