spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / include / linux / memory_hotplug.h
blob0b8e2a742600eb92cb874c6056dac76177dc7c30
1 #ifndef __LINUX_MEMORY_HOTPLUG_H
2 #define __LINUX_MEMORY_HOTPLUG_H
4 #include <linux/mmzone.h>
5 #include <linux/spinlock.h>
6 #include <linux/notifier.h>
8 struct page;
9 struct zone;
10 struct pglist_data;
11 struct mem_section;
13 #ifdef CONFIG_MEMORY_HOTPLUG
16 * Types for free bootmem stored in page->lru.next. These have to be in
17 * some random range in unsigned long space for debugging purposes.
19 enum {
20 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
21 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
22 MIX_SECTION_INFO,
23 NODE_INFO,
24 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
28 * pgdat resizing functions
30 static inline
31 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
33 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
35 static inline
36 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
38 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
40 static inline
41 void pgdat_resize_init(struct pglist_data *pgdat)
43 spin_lock_init(&pgdat->node_size_lock);
46 * Zone resizing functions
48 static inline unsigned zone_span_seqbegin(struct zone *zone)
50 return read_seqbegin(&zone->span_seqlock);
52 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
54 return read_seqretry(&zone->span_seqlock, iv);
56 static inline void zone_span_writelock(struct zone *zone)
58 write_seqlock(&zone->span_seqlock);
60 static inline void zone_span_writeunlock(struct zone *zone)
62 write_sequnlock(&zone->span_seqlock);
64 static inline void zone_seqlock_init(struct zone *zone)
66 seqlock_init(&zone->span_seqlock);
68 extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
69 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
70 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
71 /* VM interface that may be used by firmware interface */
72 extern int online_pages(unsigned long, unsigned long);
73 extern void __offline_isolated_pages(unsigned long, unsigned long);
75 typedef void (*online_page_callback_t)(struct page *page);
77 extern int set_online_page_callback(online_page_callback_t callback);
78 extern int restore_online_page_callback(online_page_callback_t callback);
80 extern void __online_page_set_limits(struct page *page);
81 extern void __online_page_increment_counters(struct page *page);
82 extern void __online_page_free(struct page *page);
84 #ifdef CONFIG_MEMORY_HOTREMOVE
85 extern bool is_pageblock_removable_nolock(struct page *page);
86 #endif /* CONFIG_MEMORY_HOTREMOVE */
88 /* reasonably generic interface to expand the physical pages in a zone */
89 extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
90 unsigned long nr_pages);
91 extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
92 unsigned long nr_pages);
94 #ifdef CONFIG_NUMA
95 extern int memory_add_physaddr_to_nid(u64 start);
96 #else
97 static inline int memory_add_physaddr_to_nid(u64 start)
99 return 0;
101 #endif
103 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
105 * For supporting node-hotadd, we have to allocate a new pgdat.
107 * If an arch has generic style NODE_DATA(),
108 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
110 * In general, generic_alloc_nodedata() is used.
111 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
114 extern pg_data_t *arch_alloc_nodedata(int nid);
115 extern void arch_free_nodedata(pg_data_t *pgdat);
116 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
118 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
120 #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
121 #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
123 #ifdef CONFIG_NUMA
125 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
126 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
127 * Because, pgdat for the new node is not allocated/initialized yet itself.
128 * To use new node's memory, more consideration will be necessary.
130 #define generic_alloc_nodedata(nid) \
131 ({ \
132 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
135 * This definition is just for error path in node hotadd.
136 * For node hotremove, we have to replace this.
138 #define generic_free_nodedata(pgdat) kfree(pgdat)
140 extern pg_data_t *node_data[];
141 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
143 node_data[nid] = pgdat;
146 #else /* !CONFIG_NUMA */
148 /* never called */
149 static inline pg_data_t *generic_alloc_nodedata(int nid)
151 BUG();
152 return NULL;
154 static inline void generic_free_nodedata(pg_data_t *pgdat)
157 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
160 #endif /* CONFIG_NUMA */
161 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
163 #ifdef CONFIG_SPARSEMEM_VMEMMAP
164 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
167 static inline void put_page_bootmem(struct page *page)
170 #else
171 extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
172 extern void put_page_bootmem(struct page *page);
173 #endif
176 * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug
177 * notifier will be called under this. 2) offline/online/add/remove memory
178 * will not run simultaneously.
181 void lock_memory_hotplug(void);
182 void unlock_memory_hotplug(void);
184 #else /* ! CONFIG_MEMORY_HOTPLUG */
186 * Stub functions for when hotplug is off
188 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
189 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
190 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
192 static inline unsigned zone_span_seqbegin(struct zone *zone)
194 return 0;
196 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
198 return 0;
200 static inline void zone_span_writelock(struct zone *zone) {}
201 static inline void zone_span_writeunlock(struct zone *zone) {}
202 static inline void zone_seqlock_init(struct zone *zone) {}
204 static inline int mhp_notimplemented(const char *func)
206 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
207 dump_stack();
208 return -ENOSYS;
211 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
215 static inline void lock_memory_hotplug(void) {}
216 static inline void unlock_memory_hotplug(void) {}
218 #endif /* ! CONFIG_MEMORY_HOTPLUG */
220 #ifdef CONFIG_MEMORY_HOTREMOVE
222 extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
224 #else
225 static inline int is_mem_section_removable(unsigned long pfn,
226 unsigned long nr_pages)
228 return 0;
230 #endif /* CONFIG_MEMORY_HOTREMOVE */
232 extern int mem_online_node(int nid);
233 extern int add_memory(int nid, u64 start, u64 size);
234 extern int arch_add_memory(int nid, u64 start, u64 size);
235 extern int remove_memory(u64 start, u64 size);
236 extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
237 int nr_pages);
238 extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
239 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
240 unsigned long pnum);
242 #endif /* __LINUX_MEMORY_HOTPLUG_H */