[PATCH] don't use flush_tlb_all in suspend time
[linux-2.6/next.git] / mm / sparse.c
blobe0a3fe48aa3745bebd710ff80d8cad6215dd244d
1 /*
2 * sparse memory mappings.
3 */
4 #include <linux/config.h>
5 #include <linux/mm.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/vmalloc.h>
12 #include <asm/dma.h>
15 * Permanent SPARSEMEM data:
17 * 1) mem_section - memory sections, mem_map's for valid memory
19 #ifdef CONFIG_SPARSEMEM_EXTREME
20 struct mem_section *mem_section[NR_SECTION_ROOTS]
21 ____cacheline_internodealigned_in_smp;
22 #else
23 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
24 ____cacheline_internodealigned_in_smp;
25 #endif
26 EXPORT_SYMBOL(mem_section);
28 #ifdef CONFIG_SPARSEMEM_EXTREME
29 static struct mem_section *sparse_index_alloc(int nid)
31 struct mem_section *section = NULL;
32 unsigned long array_size = SECTIONS_PER_ROOT *
33 sizeof(struct mem_section);
35 if (slab_is_available())
36 section = kmalloc_node(array_size, GFP_KERNEL, nid);
37 else
38 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
40 if (section)
41 memset(section, 0, array_size);
43 return section;
46 static int sparse_index_init(unsigned long section_nr, int nid)
48 static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED;
49 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
50 struct mem_section *section;
51 int ret = 0;
53 if (mem_section[root])
54 return -EEXIST;
56 section = sparse_index_alloc(nid);
58 * This lock keeps two different sections from
59 * reallocating for the same index
61 spin_lock(&index_init_lock);
63 if (mem_section[root]) {
64 ret = -EEXIST;
65 goto out;
68 mem_section[root] = section;
69 out:
70 spin_unlock(&index_init_lock);
71 return ret;
73 #else /* !SPARSEMEM_EXTREME */
74 static inline int sparse_index_init(unsigned long section_nr, int nid)
76 return 0;
78 #endif
81 * Although written for the SPARSEMEM_EXTREME case, this happens
82 * to also work for the flat array case becase
83 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
85 int __section_nr(struct mem_section* ms)
87 unsigned long root_nr;
88 struct mem_section* root;
90 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
91 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
92 if (!root)
93 continue;
95 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
96 break;
99 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
103 * During early boot, before section_mem_map is used for an actual
104 * mem_map, we use section_mem_map to store the section's NUMA
105 * node. This keeps us from having to use another data structure. The
106 * node information is cleared just before we store the real mem_map.
108 static inline unsigned long sparse_encode_early_nid(int nid)
110 return (nid << SECTION_NID_SHIFT);
113 static inline int sparse_early_nid(struct mem_section *section)
115 return (section->section_mem_map >> SECTION_NID_SHIFT);
118 /* Record a memory area against a node. */
119 void memory_present(int nid, unsigned long start, unsigned long end)
121 unsigned long pfn;
123 start &= PAGE_SECTION_MASK;
124 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
125 unsigned long section = pfn_to_section_nr(pfn);
126 struct mem_section *ms;
128 sparse_index_init(section, nid);
130 ms = __nr_to_section(section);
131 if (!ms->section_mem_map)
132 ms->section_mem_map = sparse_encode_early_nid(nid) |
133 SECTION_MARKED_PRESENT;
138 * Only used by the i386 NUMA architecures, but relatively
139 * generic code.
141 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
142 unsigned long end_pfn)
144 unsigned long pfn;
145 unsigned long nr_pages = 0;
147 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
148 if (nid != early_pfn_to_nid(pfn))
149 continue;
151 if (pfn_valid(pfn))
152 nr_pages += PAGES_PER_SECTION;
155 return nr_pages * sizeof(struct page);
159 * Subtle, we encode the real pfn into the mem_map such that
160 * the identity pfn - section_mem_map will return the actual
161 * physical page frame number.
163 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
165 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
169 * We need this if we ever free the mem_maps. While not implemented yet,
170 * this function is included for parity with its sibling.
172 static __attribute((unused))
173 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
175 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
178 static int sparse_init_one_section(struct mem_section *ms,
179 unsigned long pnum, struct page *mem_map)
181 if (!valid_section(ms))
182 return -EINVAL;
184 ms->section_mem_map &= ~SECTION_MAP_MASK;
185 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
187 return 1;
190 static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
192 struct page *map;
193 struct mem_section *ms = __nr_to_section(pnum);
194 int nid = sparse_early_nid(ms);
196 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
197 if (map)
198 return map;
200 map = alloc_bootmem_node(NODE_DATA(nid),
201 sizeof(struct page) * PAGES_PER_SECTION);
202 if (map)
203 return map;
205 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
206 ms->section_mem_map = 0;
207 return NULL;
210 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
212 struct page *page, *ret;
213 unsigned long memmap_size = sizeof(struct page) * nr_pages;
215 page = alloc_pages(GFP_KERNEL, get_order(memmap_size));
216 if (page)
217 goto got_map_page;
219 ret = vmalloc(memmap_size);
220 if (ret)
221 goto got_map_ptr;
223 return NULL;
224 got_map_page:
225 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
226 got_map_ptr:
227 memset(ret, 0, memmap_size);
229 return ret;
232 static int vaddr_in_vmalloc_area(void *addr)
234 if (addr >= (void *)VMALLOC_START &&
235 addr < (void *)VMALLOC_END)
236 return 1;
237 return 0;
240 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
242 if (vaddr_in_vmalloc_area(memmap))
243 vfree(memmap);
244 else
245 free_pages((unsigned long)memmap,
246 get_order(sizeof(struct page) * nr_pages));
250 * Allocate the accumulated non-linear sections, allocate a mem_map
251 * for each and record the physical to section mapping.
253 void sparse_init(void)
255 unsigned long pnum;
256 struct page *map;
258 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
259 if (!valid_section_nr(pnum))
260 continue;
262 map = sparse_early_mem_map_alloc(pnum);
263 if (!map)
264 continue;
265 sparse_init_one_section(__nr_to_section(pnum), pnum, map);
270 * returns the number of sections whose mem_maps were properly
271 * set. If this is <=0, then that means that the passed-in
272 * map was not consumed and must be freed.
274 int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
275 int nr_pages)
277 unsigned long section_nr = pfn_to_section_nr(start_pfn);
278 struct pglist_data *pgdat = zone->zone_pgdat;
279 struct mem_section *ms;
280 struct page *memmap;
281 unsigned long flags;
282 int ret;
285 * no locking for this, because it does its own
286 * plus, it does a kmalloc
288 sparse_index_init(section_nr, pgdat->node_id);
289 memmap = __kmalloc_section_memmap(nr_pages);
291 pgdat_resize_lock(pgdat, &flags);
293 ms = __pfn_to_section(start_pfn);
294 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
295 ret = -EEXIST;
296 goto out;
298 ms->section_mem_map |= SECTION_MARKED_PRESENT;
300 ret = sparse_init_one_section(ms, section_nr, memmap);
302 out:
303 pgdat_resize_unlock(pgdat, &flags);
304 if (ret <= 0)
305 __kfree_section_memmap(memmap, nr_pages);
306 return ret;