[PATCH] lockdep: annotate serio
[linux-2.6/openmoko-kernel/knife-kernel.git] / mm / memory_hotplug.c
blob01c9fb97c619135edce3431d265e397f49113b58
1 /*
2 * linux/mm/memory_hotplug.c
4 * Copyright (C)
5 */
7 #include <linux/stddef.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/interrupt.h>
11 #include <linux/pagemap.h>
12 #include <linux/bootmem.h>
13 #include <linux/compiler.h>
14 #include <linux/module.h>
15 #include <linux/pagevec.h>
16 #include <linux/slab.h>
17 #include <linux/sysctl.h>
18 #include <linux/cpu.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
21 #include <linux/highmem.h>
22 #include <linux/vmalloc.h>
23 #include <linux/ioport.h>
25 #include <asm/tlbflush.h>
27 extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
28 unsigned long size);
29 static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
31 struct pglist_data *pgdat = zone->zone_pgdat;
32 int nr_pages = PAGES_PER_SECTION;
33 int nid = pgdat->node_id;
34 int zone_type;
36 zone_type = zone - pgdat->node_zones;
37 if (!populated_zone(zone)) {
38 int ret = 0;
39 ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
40 if (ret < 0)
41 return ret;
43 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
44 zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
45 return 0;
48 extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
49 int nr_pages);
50 static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
52 int nr_pages = PAGES_PER_SECTION;
53 int ret;
55 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
57 if (ret < 0)
58 return ret;
60 ret = __add_zone(zone, phys_start_pfn);
62 if (ret < 0)
63 return ret;
65 return register_new_memory(__pfn_to_section(phys_start_pfn));
69 * Reasonably generic function for adding memory. It is
70 * expected that archs that support memory hotplug will
71 * call this function after deciding the zone to which to
72 * add the new pages.
74 int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
75 unsigned long nr_pages)
77 unsigned long i;
78 int err = 0;
80 for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) {
81 err = __add_section(zone, phys_start_pfn + i);
83 /* We want to keep adding the rest of the
84 * sections if the first ones already exist
86 if (err && (err != -EEXIST))
87 break;
90 return err;
92 EXPORT_SYMBOL_GPL(__add_pages);
94 static void grow_zone_span(struct zone *zone,
95 unsigned long start_pfn, unsigned long end_pfn)
97 unsigned long old_zone_end_pfn;
99 zone_span_writelock(zone);
101 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
102 if (start_pfn < zone->zone_start_pfn)
103 zone->zone_start_pfn = start_pfn;
105 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
106 zone->zone_start_pfn;
108 zone_span_writeunlock(zone);
111 static void grow_pgdat_span(struct pglist_data *pgdat,
112 unsigned long start_pfn, unsigned long end_pfn)
114 unsigned long old_pgdat_end_pfn =
115 pgdat->node_start_pfn + pgdat->node_spanned_pages;
117 if (start_pfn < pgdat->node_start_pfn)
118 pgdat->node_start_pfn = start_pfn;
120 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
121 pgdat->node_start_pfn;
124 int online_pages(unsigned long pfn, unsigned long nr_pages)
126 unsigned long i;
127 unsigned long flags;
128 unsigned long onlined_pages = 0;
129 struct resource res;
130 u64 section_end;
131 unsigned long start_pfn;
132 struct zone *zone;
133 int need_zonelists_rebuild = 0;
136 * This doesn't need a lock to do pfn_to_page().
137 * The section can't be removed here because of the
138 * memory_block->state_sem.
140 zone = page_zone(pfn_to_page(pfn));
141 pgdat_resize_lock(zone->zone_pgdat, &flags);
142 grow_zone_span(zone, pfn, pfn + nr_pages);
143 grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
144 pgdat_resize_unlock(zone->zone_pgdat, &flags);
147 * If this zone is not populated, then it is not in zonelist.
148 * This means the page allocator ignores this zone.
149 * So, zonelist must be updated after online.
151 if (!populated_zone(zone))
152 need_zonelists_rebuild = 1;
154 res.start = (u64)pfn << PAGE_SHIFT;
155 res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
156 res.flags = IORESOURCE_MEM; /* we just need system ram */
157 section_end = res.end;
159 while (find_next_system_ram(&res) >= 0) {
160 start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
161 nr_pages = (unsigned long)
162 ((res.end + 1 - res.start) >> PAGE_SHIFT);
164 if (PageReserved(pfn_to_page(start_pfn))) {
165 /* this region's page is not onlined now */
166 for (i = 0; i < nr_pages; i++) {
167 struct page *page = pfn_to_page(start_pfn + i);
168 online_page(page);
169 onlined_pages++;
173 res.start = res.end + 1;
174 res.end = section_end;
176 zone->present_pages += onlined_pages;
177 zone->zone_pgdat->node_present_pages += onlined_pages;
179 setup_per_zone_pages_min();
181 if (need_zonelists_rebuild)
182 build_all_zonelists();
183 vm_total_pages = nr_free_pagecache_pages();
184 return 0;
187 static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
189 struct pglist_data *pgdat;
190 unsigned long zones_size[MAX_NR_ZONES] = {0};
191 unsigned long zholes_size[MAX_NR_ZONES] = {0};
192 unsigned long start_pfn = start >> PAGE_SHIFT;
194 pgdat = arch_alloc_nodedata(nid);
195 if (!pgdat)
196 return NULL;
198 arch_refresh_nodedata(nid, pgdat);
200 /* we can use NODE_DATA(nid) from here */
202 /* init node's zones as empty zones, we don't have any present pages.*/
203 free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
205 return pgdat;
208 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
210 arch_refresh_nodedata(nid, NULL);
211 arch_free_nodedata(pgdat);
212 return;
215 /* add this memory to iomem resource */
216 static void register_memory_resource(u64 start, u64 size)
218 struct resource *res;
220 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
221 BUG_ON(!res);
223 res->name = "System RAM";
224 res->start = start;
225 res->end = start + size - 1;
226 res->flags = IORESOURCE_MEM;
227 if (request_resource(&iomem_resource, res) < 0) {
228 printk("System RAM resource %llx - %llx cannot be added\n",
229 (unsigned long long)res->start, (unsigned long long)res->end);
230 kfree(res);
236 int add_memory(int nid, u64 start, u64 size)
238 pg_data_t *pgdat = NULL;
239 int new_pgdat = 0;
240 int ret;
242 if (!node_online(nid)) {
243 pgdat = hotadd_new_pgdat(nid, start);
244 if (!pgdat)
245 return -ENOMEM;
246 new_pgdat = 1;
247 ret = kswapd_run(nid);
248 if (ret)
249 goto error;
252 /* call arch's memory hotadd */
253 ret = arch_add_memory(nid, start, size);
255 if (ret < 0)
256 goto error;
258 /* we online node here. we can't roll back from here. */
259 node_set_online(nid);
261 if (new_pgdat) {
262 ret = register_one_node(nid);
264 * If sysfs file of new node can't create, cpu on the node
265 * can't be hot-added. There is no rollback way now.
266 * So, check by BUG_ON() to catch it reluctantly..
268 BUG_ON(ret);
271 /* register this memory as resource */
272 register_memory_resource(start, size);
274 return ret;
275 error:
276 /* rollback pgdat allocation and others */
277 if (new_pgdat)
278 rollback_node_hotadd(nid, pgdat);
280 return ret;
282 EXPORT_SYMBOL_GPL(add_memory);