Linux 2.6.35-rc2
[linux/fpc-iii.git] / mm / page_cgroup.c
blob6c0081441a326a174f83ebd66485bd9cb961f88d
1 #include <linux/mm.h>
2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
13 static void __meminit
14 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
16 pc->flags = 0;
17 pc->mem_cgroup = NULL;
18 pc->page = pfn_to_page(pfn);
19 INIT_LIST_HEAD(&pc->lru);
21 static unsigned long total_usage;
23 #if !defined(CONFIG_SPARSEMEM)
26 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
28 pgdat->node_page_cgroup = NULL;
31 struct page_cgroup *lookup_page_cgroup(struct page *page)
33 unsigned long pfn = page_to_pfn(page);
34 unsigned long offset;
35 struct page_cgroup *base;
37 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
38 if (unlikely(!base))
39 return NULL;
41 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
42 return base + offset;
45 static int __init alloc_node_page_cgroup(int nid)
47 struct page_cgroup *base, *pc;
48 unsigned long table_size;
49 unsigned long start_pfn, nr_pages, index;
51 start_pfn = NODE_DATA(nid)->node_start_pfn;
52 nr_pages = NODE_DATA(nid)->node_spanned_pages;
54 if (!nr_pages)
55 return 0;
57 table_size = sizeof(struct page_cgroup) * nr_pages;
59 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
60 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
61 if (!base)
62 return -ENOMEM;
63 for (index = 0; index < nr_pages; index++) {
64 pc = base + index;
65 __init_page_cgroup(pc, start_pfn + index);
67 NODE_DATA(nid)->node_page_cgroup = base;
68 total_usage += table_size;
69 return 0;
72 void __init page_cgroup_init_flatmem(void)
75 int nid, fail;
77 if (mem_cgroup_disabled())
78 return;
80 for_each_online_node(nid) {
81 fail = alloc_node_page_cgroup(nid);
82 if (fail)
83 goto fail;
85 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
86 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
87 " don't want memory cgroups\n");
88 return;
89 fail:
90 printk(KERN_CRIT "allocation of page_cgroup failed.\n");
91 printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
92 panic("Out of memory");
95 #else /* CONFIG_FLAT_NODE_MEM_MAP */
97 struct page_cgroup *lookup_page_cgroup(struct page *page)
99 unsigned long pfn = page_to_pfn(page);
100 struct mem_section *section = __pfn_to_section(pfn);
102 if (!section->page_cgroup)
103 return NULL;
104 return section->page_cgroup + pfn;
107 /* __alloc_bootmem...() is protected by !slab_available() */
108 static int __init_refok init_section_page_cgroup(unsigned long pfn)
110 struct mem_section *section = __pfn_to_section(pfn);
111 struct page_cgroup *base, *pc;
112 unsigned long table_size;
113 int nid, index;
115 if (!section->page_cgroup) {
116 nid = page_to_nid(pfn_to_page(pfn));
117 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
118 VM_BUG_ON(!slab_is_available());
119 if (node_state(nid, N_HIGH_MEMORY)) {
120 base = kmalloc_node(table_size,
121 GFP_KERNEL | __GFP_NOWARN, nid);
122 if (!base)
123 base = vmalloc_node(table_size, nid);
124 } else {
125 base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN);
126 if (!base)
127 base = vmalloc(table_size);
129 } else {
131 * We don't have to allocate page_cgroup again, but
132 * address of memmap may be changed. So, we have to initialize
133 * again.
135 base = section->page_cgroup + pfn;
136 table_size = 0;
137 /* check address of memmap is changed or not. */
138 if (base->page == pfn_to_page(pfn))
139 return 0;
142 if (!base) {
143 printk(KERN_ERR "page cgroup allocation failure\n");
144 return -ENOMEM;
147 for (index = 0; index < PAGES_PER_SECTION; index++) {
148 pc = base + index;
149 __init_page_cgroup(pc, pfn + index);
152 section->page_cgroup = base - pfn;
153 total_usage += table_size;
154 return 0;
156 #ifdef CONFIG_MEMORY_HOTPLUG
157 void __free_page_cgroup(unsigned long pfn)
159 struct mem_section *ms;
160 struct page_cgroup *base;
162 ms = __pfn_to_section(pfn);
163 if (!ms || !ms->page_cgroup)
164 return;
165 base = ms->page_cgroup + pfn;
166 if (is_vmalloc_addr(base)) {
167 vfree(base);
168 ms->page_cgroup = NULL;
169 } else {
170 struct page *page = virt_to_page(base);
171 if (!PageReserved(page)) { /* Is bootmem ? */
172 kfree(base);
173 ms->page_cgroup = NULL;
178 int __meminit online_page_cgroup(unsigned long start_pfn,
179 unsigned long nr_pages,
180 int nid)
182 unsigned long start, end, pfn;
183 int fail = 0;
185 start = start_pfn & ~(PAGES_PER_SECTION - 1);
186 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
188 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
189 if (!pfn_present(pfn))
190 continue;
191 fail = init_section_page_cgroup(pfn);
193 if (!fail)
194 return 0;
196 /* rollback */
197 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
198 __free_page_cgroup(pfn);
200 return -ENOMEM;
203 int __meminit offline_page_cgroup(unsigned long start_pfn,
204 unsigned long nr_pages, int nid)
206 unsigned long start, end, pfn;
208 start = start_pfn & ~(PAGES_PER_SECTION - 1);
209 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
211 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
212 __free_page_cgroup(pfn);
213 return 0;
217 static int __meminit page_cgroup_callback(struct notifier_block *self,
218 unsigned long action, void *arg)
220 struct memory_notify *mn = arg;
221 int ret = 0;
222 switch (action) {
223 case MEM_GOING_ONLINE:
224 ret = online_page_cgroup(mn->start_pfn,
225 mn->nr_pages, mn->status_change_nid);
226 break;
227 case MEM_OFFLINE:
228 offline_page_cgroup(mn->start_pfn,
229 mn->nr_pages, mn->status_change_nid);
230 break;
231 case MEM_CANCEL_ONLINE:
232 case MEM_GOING_OFFLINE:
233 break;
234 case MEM_ONLINE:
235 case MEM_CANCEL_OFFLINE:
236 break;
239 if (ret)
240 ret = notifier_from_errno(ret);
241 else
242 ret = NOTIFY_OK;
244 return ret;
247 #endif
249 void __init page_cgroup_init(void)
251 unsigned long pfn;
252 int fail = 0;
254 if (mem_cgroup_disabled())
255 return;
257 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
258 if (!pfn_present(pfn))
259 continue;
260 fail = init_section_page_cgroup(pfn);
262 if (fail) {
263 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
264 panic("Out of memory");
265 } else {
266 hotplug_memory_notifier(page_cgroup_callback, 0);
268 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
269 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't"
270 " want memory cgroups\n");
273 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
275 return;
278 #endif
281 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
283 static DEFINE_MUTEX(swap_cgroup_mutex);
284 struct swap_cgroup_ctrl {
285 struct page **map;
286 unsigned long length;
287 spinlock_t lock;
290 struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
292 struct swap_cgroup {
293 unsigned short id;
295 #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
296 #define SC_POS_MASK (SC_PER_PAGE - 1)
299 * SwapCgroup implements "lookup" and "exchange" operations.
300 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
301 * against SwapCache. At swap_free(), this is accessed directly from swap.
303 * This means,
304 * - we have no race in "exchange" when we're accessed via SwapCache because
305 * SwapCache(and its swp_entry) is under lock.
306 * - When called via swap_free(), there is no user of this entry and no race.
307 * Then, we don't need lock around "exchange".
309 * TODO: we can push these buffers out to HIGHMEM.
313 * allocate buffer for swap_cgroup.
315 static int swap_cgroup_prepare(int type)
317 struct page *page;
318 struct swap_cgroup_ctrl *ctrl;
319 unsigned long idx, max;
321 ctrl = &swap_cgroup_ctrl[type];
323 for (idx = 0; idx < ctrl->length; idx++) {
324 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
325 if (!page)
326 goto not_enough_page;
327 ctrl->map[idx] = page;
329 return 0;
330 not_enough_page:
331 max = idx;
332 for (idx = 0; idx < max; idx++)
333 __free_page(ctrl->map[idx]);
335 return -ENOMEM;
339 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
340 * @end: swap entry to be cmpxchged
341 * @old: old id
342 * @new: new id
344 * Returns old id at success, 0 at failure.
345 * (There is no mem_cgroup useing 0 as its id)
347 unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
348 unsigned short old, unsigned short new)
350 int type = swp_type(ent);
351 unsigned long offset = swp_offset(ent);
352 unsigned long idx = offset / SC_PER_PAGE;
353 unsigned long pos = offset & SC_POS_MASK;
354 struct swap_cgroup_ctrl *ctrl;
355 struct page *mappage;
356 struct swap_cgroup *sc;
357 unsigned long flags;
358 unsigned short retval;
360 ctrl = &swap_cgroup_ctrl[type];
362 mappage = ctrl->map[idx];
363 sc = page_address(mappage);
364 sc += pos;
365 spin_lock_irqsave(&ctrl->lock, flags);
366 retval = sc->id;
367 if (retval == old)
368 sc->id = new;
369 else
370 retval = 0;
371 spin_unlock_irqrestore(&ctrl->lock, flags);
372 return retval;
376 * swap_cgroup_record - record mem_cgroup for this swp_entry.
377 * @ent: swap entry to be recorded into
378 * @mem: mem_cgroup to be recorded
380 * Returns old value at success, 0 at failure.
381 * (Of course, old value can be 0.)
383 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
385 int type = swp_type(ent);
386 unsigned long offset = swp_offset(ent);
387 unsigned long idx = offset / SC_PER_PAGE;
388 unsigned long pos = offset & SC_POS_MASK;
389 struct swap_cgroup_ctrl *ctrl;
390 struct page *mappage;
391 struct swap_cgroup *sc;
392 unsigned short old;
393 unsigned long flags;
395 ctrl = &swap_cgroup_ctrl[type];
397 mappage = ctrl->map[idx];
398 sc = page_address(mappage);
399 sc += pos;
400 spin_lock_irqsave(&ctrl->lock, flags);
401 old = sc->id;
402 sc->id = id;
403 spin_unlock_irqrestore(&ctrl->lock, flags);
405 return old;
409 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
410 * @ent: swap entry to be looked up.
412 * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
414 unsigned short lookup_swap_cgroup(swp_entry_t ent)
416 int type = swp_type(ent);
417 unsigned long offset = swp_offset(ent);
418 unsigned long idx = offset / SC_PER_PAGE;
419 unsigned long pos = offset & SC_POS_MASK;
420 struct swap_cgroup_ctrl *ctrl;
421 struct page *mappage;
422 struct swap_cgroup *sc;
423 unsigned short ret;
425 ctrl = &swap_cgroup_ctrl[type];
426 mappage = ctrl->map[idx];
427 sc = page_address(mappage);
428 sc += pos;
429 ret = sc->id;
430 return ret;
433 int swap_cgroup_swapon(int type, unsigned long max_pages)
435 void *array;
436 unsigned long array_size;
437 unsigned long length;
438 struct swap_cgroup_ctrl *ctrl;
440 if (!do_swap_account)
441 return 0;
443 length = ((max_pages/SC_PER_PAGE) + 1);
444 array_size = length * sizeof(void *);
446 array = vmalloc(array_size);
447 if (!array)
448 goto nomem;
450 memset(array, 0, array_size);
451 ctrl = &swap_cgroup_ctrl[type];
452 mutex_lock(&swap_cgroup_mutex);
453 ctrl->length = length;
454 ctrl->map = array;
455 spin_lock_init(&ctrl->lock);
456 if (swap_cgroup_prepare(type)) {
457 /* memory shortage */
458 ctrl->map = NULL;
459 ctrl->length = 0;
460 vfree(array);
461 mutex_unlock(&swap_cgroup_mutex);
462 goto nomem;
464 mutex_unlock(&swap_cgroup_mutex);
466 return 0;
467 nomem:
468 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
469 printk(KERN_INFO
470 "swap_cgroup can be disabled by noswapaccount boot option\n");
471 return -ENOMEM;
474 void swap_cgroup_swapoff(int type)
476 int i;
477 struct swap_cgroup_ctrl *ctrl;
479 if (!do_swap_account)
480 return;
482 mutex_lock(&swap_cgroup_mutex);
483 ctrl = &swap_cgroup_ctrl[type];
484 if (ctrl->map) {
485 for (i = 0; i < ctrl->length; i++) {
486 struct page *page = ctrl->map[i];
487 if (page)
488 __free_page(page);
490 vfree(ctrl->map);
491 ctrl->map = NULL;
492 ctrl->length = 0;
494 mutex_unlock(&swap_cgroup_mutex);
497 #endif