[PATCH] avr32: fixup kprobes preemption handling
[pv_ops_mirror.git] / mm / memory_hotplug.c
blob0c055a090f4df19e042312e2a30cef3140fb8558
1 /*
2 * linux/mm/memory_hotplug.c
4 * Copyright (C)
5 */
7 #include <linux/stddef.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/interrupt.h>
11 #include <linux/pagemap.h>
12 #include <linux/bootmem.h>
13 #include <linux/compiler.h>
14 #include <linux/module.h>
15 #include <linux/pagevec.h>
16 #include <linux/writeback.h>
17 #include <linux/slab.h>
18 #include <linux/sysctl.h>
19 #include <linux/cpu.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/highmem.h>
23 #include <linux/vmalloc.h>
24 #include <linux/ioport.h>
25 #include <linux/cpuset.h>
27 #include <asm/tlbflush.h>
29 /* add this memory to iomem resource */
30 static struct resource *register_memory_resource(u64 start, u64 size)
32 struct resource *res;
33 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
34 BUG_ON(!res);
36 res->name = "System RAM";
37 res->start = start;
38 res->end = start + size - 1;
39 res->flags = IORESOURCE_MEM;
40 if (request_resource(&iomem_resource, res) < 0) {
41 printk("System RAM resource %llx - %llx cannot be added\n",
42 (unsigned long long)res->start, (unsigned long long)res->end);
43 kfree(res);
44 res = NULL;
46 return res;
49 static void release_memory_resource(struct resource *res)
51 if (!res)
52 return;
53 release_resource(res);
54 kfree(res);
55 return;
59 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
60 static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
62 struct pglist_data *pgdat = zone->zone_pgdat;
63 int nr_pages = PAGES_PER_SECTION;
64 int nid = pgdat->node_id;
65 int zone_type;
67 zone_type = zone - pgdat->node_zones;
68 if (!populated_zone(zone)) {
69 int ret = 0;
70 ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
71 if (ret < 0)
72 return ret;
74 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
75 return 0;
78 static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
80 int nr_pages = PAGES_PER_SECTION;
81 int ret;
83 if (pfn_valid(phys_start_pfn))
84 return -EEXIST;
86 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
88 if (ret < 0)
89 return ret;
91 ret = __add_zone(zone, phys_start_pfn);
93 if (ret < 0)
94 return ret;
96 return register_new_memory(__pfn_to_section(phys_start_pfn));
100 * Reasonably generic function for adding memory. It is
101 * expected that archs that support memory hotplug will
102 * call this function after deciding the zone to which to
103 * add the new pages.
105 int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
106 unsigned long nr_pages)
108 unsigned long i;
109 int err = 0;
110 int start_sec, end_sec;
111 /* during initialize mem_map, align hot-added range to section */
112 start_sec = pfn_to_section_nr(phys_start_pfn);
113 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
115 for (i = start_sec; i <= end_sec; i++) {
116 err = __add_section(zone, i << PFN_SECTION_SHIFT);
119 * EEXIST is finally dealed with by ioresource collision
120 * check. see add_memory() => register_memory_resource()
121 * Warning will be printed if there is collision.
123 if (err && (err != -EEXIST))
124 break;
125 err = 0;
128 return err;
130 EXPORT_SYMBOL_GPL(__add_pages);
132 static void grow_zone_span(struct zone *zone,
133 unsigned long start_pfn, unsigned long end_pfn)
135 unsigned long old_zone_end_pfn;
137 zone_span_writelock(zone);
139 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
140 if (start_pfn < zone->zone_start_pfn)
141 zone->zone_start_pfn = start_pfn;
143 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
144 zone->zone_start_pfn;
146 zone_span_writeunlock(zone);
149 static void grow_pgdat_span(struct pglist_data *pgdat,
150 unsigned long start_pfn, unsigned long end_pfn)
152 unsigned long old_pgdat_end_pfn =
153 pgdat->node_start_pfn + pgdat->node_spanned_pages;
155 if (start_pfn < pgdat->node_start_pfn)
156 pgdat->node_start_pfn = start_pfn;
158 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
159 pgdat->node_start_pfn;
162 int online_pages(unsigned long pfn, unsigned long nr_pages)
164 unsigned long i;
165 unsigned long flags;
166 unsigned long onlined_pages = 0;
167 struct resource res;
168 u64 section_end;
169 unsigned long start_pfn;
170 struct zone *zone;
171 int need_zonelists_rebuild = 0;
174 * This doesn't need a lock to do pfn_to_page().
175 * The section can't be removed here because of the
176 * memory_block->state_sem.
178 zone = page_zone(pfn_to_page(pfn));
179 pgdat_resize_lock(zone->zone_pgdat, &flags);
180 grow_zone_span(zone, pfn, pfn + nr_pages);
181 grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
182 pgdat_resize_unlock(zone->zone_pgdat, &flags);
185 * If this zone is not populated, then it is not in zonelist.
186 * This means the page allocator ignores this zone.
187 * So, zonelist must be updated after online.
189 if (!populated_zone(zone))
190 need_zonelists_rebuild = 1;
192 res.start = (u64)pfn << PAGE_SHIFT;
193 res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
194 res.flags = IORESOURCE_MEM; /* we just need system ram */
195 section_end = res.end;
197 while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
198 start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
199 nr_pages = (unsigned long)
200 ((res.end + 1 - res.start) >> PAGE_SHIFT);
202 if (PageReserved(pfn_to_page(start_pfn))) {
203 /* this region's page is not onlined now */
204 for (i = 0; i < nr_pages; i++) {
205 struct page *page = pfn_to_page(start_pfn + i);
206 online_page(page);
207 onlined_pages++;
211 res.start = res.end + 1;
212 res.end = section_end;
214 zone->present_pages += onlined_pages;
215 zone->zone_pgdat->node_present_pages += onlined_pages;
217 setup_per_zone_pages_min();
219 if (need_zonelists_rebuild)
220 build_all_zonelists();
221 vm_total_pages = nr_free_pagecache_pages();
222 writeback_set_ratelimit();
223 return 0;
225 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
227 static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
229 struct pglist_data *pgdat;
230 unsigned long zones_size[MAX_NR_ZONES] = {0};
231 unsigned long zholes_size[MAX_NR_ZONES] = {0};
232 unsigned long start_pfn = start >> PAGE_SHIFT;
234 pgdat = arch_alloc_nodedata(nid);
235 if (!pgdat)
236 return NULL;
238 arch_refresh_nodedata(nid, pgdat);
240 /* we can use NODE_DATA(nid) from here */
242 /* init node's zones as empty zones, we don't have any present pages.*/
243 free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
245 return pgdat;
248 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
250 arch_refresh_nodedata(nid, NULL);
251 arch_free_nodedata(pgdat);
252 return;
256 int add_memory(int nid, u64 start, u64 size)
258 pg_data_t *pgdat = NULL;
259 int new_pgdat = 0;
260 struct resource *res;
261 int ret;
263 res = register_memory_resource(start, size);
264 if (!res)
265 return -EEXIST;
267 if (!node_online(nid)) {
268 pgdat = hotadd_new_pgdat(nid, start);
269 if (!pgdat)
270 return -ENOMEM;
271 new_pgdat = 1;
272 ret = kswapd_run(nid);
273 if (ret)
274 goto error;
277 /* call arch's memory hotadd */
278 ret = arch_add_memory(nid, start, size);
280 if (ret < 0)
281 goto error;
283 /* we online node here. we can't roll back from here. */
284 node_set_online(nid);
286 cpuset_track_online_nodes();
288 if (new_pgdat) {
289 ret = register_one_node(nid);
291 * If sysfs file of new node can't create, cpu on the node
292 * can't be hot-added. There is no rollback way now.
293 * So, check by BUG_ON() to catch it reluctantly..
295 BUG_ON(ret);
298 return ret;
299 error:
300 /* rollback pgdat allocation and others */
301 if (new_pgdat)
302 rollback_node_hotadd(nid, pgdat);
303 if (res)
304 release_memory_resource(res);
306 return ret;
308 EXPORT_SYMBOL_GPL(add_memory);