1 /* memcontrol.h - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
30 /* Stats that can be updated by kernel. */
31 enum mem_cgroup_page_stat_item
{
32 MEMCG_NR_FILE_MAPPED
, /* # of pages charged as file rss */
35 struct mem_cgroup_reclaim_cookie
{
38 unsigned int generation
;
41 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
45 * alloc memory but reclaims memory from all available zones. So, "where I want
46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
47 * available but adding a rule is better. charge functions' gfp_mask should
48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
53 extern int mem_cgroup_newpage_charge(struct page
*page
, struct mm_struct
*mm
,
55 /* for swap handling */
56 extern int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
57 struct page
*page
, gfp_t mask
, struct mem_cgroup
**memcgp
);
58 extern void mem_cgroup_commit_charge_swapin(struct page
*page
,
59 struct mem_cgroup
*memcg
);
60 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*memcg
);
62 extern int mem_cgroup_cache_charge(struct page
*page
, struct mm_struct
*mm
,
65 struct lruvec
*mem_cgroup_zone_lruvec(struct zone
*, struct mem_cgroup
*);
66 struct lruvec
*mem_cgroup_lru_add_list(struct zone
*, struct page
*,
68 void mem_cgroup_lru_del_list(struct page
*, enum lru_list
);
69 void mem_cgroup_lru_del(struct page
*);
70 struct lruvec
*mem_cgroup_lru_move_lists(struct zone
*, struct page
*,
71 enum lru_list
, enum lru_list
);
73 /* For coalescing uncharge for reducing memcg' overhead*/
74 extern void mem_cgroup_uncharge_start(void);
75 extern void mem_cgroup_uncharge_end(void);
77 extern void mem_cgroup_uncharge_page(struct page
*page
);
78 extern void mem_cgroup_uncharge_cache_page(struct page
*page
);
80 extern void mem_cgroup_out_of_memory(struct mem_cgroup
*memcg
, gfp_t gfp_mask
);
81 int task_in_mem_cgroup(struct task_struct
*task
, const struct mem_cgroup
*memcg
);
83 extern struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
);
84 extern struct mem_cgroup
*mem_cgroup_from_task(struct task_struct
*p
);
85 extern struct mem_cgroup
*try_get_mem_cgroup_from_mm(struct mm_struct
*mm
);
87 extern struct mem_cgroup
*parent_mem_cgroup(struct mem_cgroup
*memcg
);
88 extern struct mem_cgroup
*mem_cgroup_from_cont(struct cgroup
*cont
);
91 int mm_match_cgroup(const struct mm_struct
*mm
, const struct mem_cgroup
*cgroup
)
93 struct mem_cgroup
*memcg
;
95 memcg
= mem_cgroup_from_task(rcu_dereference((mm
)->owner
));
97 return cgroup
== memcg
;
100 extern struct cgroup_subsys_state
*mem_cgroup_css(struct mem_cgroup
*memcg
);
103 mem_cgroup_prepare_migration(struct page
*page
,
104 struct page
*newpage
, struct mem_cgroup
**memcgp
, gfp_t gfp_mask
);
105 extern void mem_cgroup_end_migration(struct mem_cgroup
*memcg
,
106 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
);
108 struct mem_cgroup
*mem_cgroup_iter(struct mem_cgroup
*,
110 struct mem_cgroup_reclaim_cookie
*);
111 void mem_cgroup_iter_break(struct mem_cgroup
*, struct mem_cgroup
*);
114 * For memory reclaim.
116 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup
*memcg
,
118 int mem_cgroup_inactive_file_is_low(struct mem_cgroup
*memcg
,
120 int mem_cgroup_select_victim_node(struct mem_cgroup
*memcg
);
121 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup
*memcg
,
122 int nid
, int zid
, unsigned int lrumask
);
123 struct zone_reclaim_stat
*mem_cgroup_get_reclaim_stat(struct mem_cgroup
*memcg
,
125 struct zone_reclaim_stat
*
126 mem_cgroup_get_reclaim_stat_from_page(struct page
*page
);
127 extern void mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
,
128 struct task_struct
*p
);
129 extern void mem_cgroup_replace_page_cache(struct page
*oldpage
,
130 struct page
*newpage
);
132 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
133 extern int do_swap_account
;
136 static inline bool mem_cgroup_disabled(void)
138 if (mem_cgroup_subsys
.disabled
)
143 void mem_cgroup_update_page_stat(struct page
*page
,
144 enum mem_cgroup_page_stat_item idx
,
147 static inline void mem_cgroup_inc_page_stat(struct page
*page
,
148 enum mem_cgroup_page_stat_item idx
)
150 mem_cgroup_update_page_stat(page
, idx
, 1);
153 static inline void mem_cgroup_dec_page_stat(struct page
*page
,
154 enum mem_cgroup_page_stat_item idx
)
156 mem_cgroup_update_page_stat(page
, idx
, -1);
159 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
161 unsigned long *total_scanned
);
162 u64
mem_cgroup_get_limit(struct mem_cgroup
*memcg
);
164 void mem_cgroup_count_vm_event(struct mm_struct
*mm
, enum vm_event_item idx
);
165 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
166 void mem_cgroup_split_huge_fixup(struct page
*head
);
169 #ifdef CONFIG_DEBUG_VM
170 bool mem_cgroup_bad_page_check(struct page
*page
);
171 void mem_cgroup_print_bad_page(struct page
*page
);
173 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
176 static inline int mem_cgroup_newpage_charge(struct page
*page
,
177 struct mm_struct
*mm
, gfp_t gfp_mask
)
182 static inline int mem_cgroup_cache_charge(struct page
*page
,
183 struct mm_struct
*mm
, gfp_t gfp_mask
)
188 static inline int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
189 struct page
*page
, gfp_t gfp_mask
, struct mem_cgroup
**memcgp
)
194 static inline void mem_cgroup_commit_charge_swapin(struct page
*page
,
195 struct mem_cgroup
*memcg
)
199 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*memcg
)
203 static inline void mem_cgroup_uncharge_start(void)
207 static inline void mem_cgroup_uncharge_end(void)
211 static inline void mem_cgroup_uncharge_page(struct page
*page
)
215 static inline void mem_cgroup_uncharge_cache_page(struct page
*page
)
219 static inline struct lruvec
*mem_cgroup_zone_lruvec(struct zone
*zone
,
220 struct mem_cgroup
*memcg
)
222 return &zone
->lruvec
;
225 static inline struct lruvec
*mem_cgroup_lru_add_list(struct zone
*zone
,
229 return &zone
->lruvec
;
232 static inline void mem_cgroup_lru_del_list(struct page
*page
, enum lru_list lru
)
236 static inline void mem_cgroup_lru_del(struct page
*page
)
240 static inline struct lruvec
*mem_cgroup_lru_move_lists(struct zone
*zone
,
245 return &zone
->lruvec
;
248 static inline struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
)
253 static inline struct mem_cgroup
*try_get_mem_cgroup_from_mm(struct mm_struct
*mm
)
258 static inline int mm_match_cgroup(struct mm_struct
*mm
,
259 struct mem_cgroup
*memcg
)
264 static inline int task_in_mem_cgroup(struct task_struct
*task
,
265 const struct mem_cgroup
*memcg
)
270 static inline struct cgroup_subsys_state
271 *mem_cgroup_css(struct mem_cgroup
*memcg
)
277 mem_cgroup_prepare_migration(struct page
*page
, struct page
*newpage
,
278 struct mem_cgroup
**memcgp
, gfp_t gfp_mask
)
283 static inline void mem_cgroup_end_migration(struct mem_cgroup
*memcg
,
284 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
)
288 static inline struct mem_cgroup
*
289 mem_cgroup_iter(struct mem_cgroup
*root
,
290 struct mem_cgroup
*prev
,
291 struct mem_cgroup_reclaim_cookie
*reclaim
)
296 static inline void mem_cgroup_iter_break(struct mem_cgroup
*root
,
297 struct mem_cgroup
*prev
)
301 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup
*memcg
)
306 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup
*memcg
,
311 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup
*memcg
,
316 static inline bool mem_cgroup_disabled(void)
322 mem_cgroup_inactive_anon_is_low(struct mem_cgroup
*memcg
, struct zone
*zone
)
328 mem_cgroup_inactive_file_is_low(struct mem_cgroup
*memcg
, struct zone
*zone
)
333 static inline unsigned long
334 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup
*memcg
, int nid
, int zid
,
335 unsigned int lru_mask
)
341 static inline struct zone_reclaim_stat
*
342 mem_cgroup_get_reclaim_stat(struct mem_cgroup
*memcg
, struct zone
*zone
)
347 static inline struct zone_reclaim_stat
*
348 mem_cgroup_get_reclaim_stat_from_page(struct page
*page
)
354 mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
, struct task_struct
*p
)
358 static inline void mem_cgroup_inc_page_stat(struct page
*page
,
359 enum mem_cgroup_page_stat_item idx
)
363 static inline void mem_cgroup_dec_page_stat(struct page
*page
,
364 enum mem_cgroup_page_stat_item idx
)
369 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
371 unsigned long *total_scanned
)
377 u64
mem_cgroup_get_limit(struct mem_cgroup
*memcg
)
382 static inline void mem_cgroup_split_huge_fixup(struct page
*head
)
387 void mem_cgroup_count_vm_event(struct mm_struct
*mm
, enum vm_event_item idx
)
390 static inline void mem_cgroup_replace_page_cache(struct page
*oldpage
,
391 struct page
*newpage
)
394 #endif /* CONFIG_CGROUP_MEM_CONT */
396 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
398 mem_cgroup_bad_page_check(struct page
*page
)
404 mem_cgroup_print_bad_page(struct page
*page
)
416 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
417 void sock_update_memcg(struct sock
*sk
);
418 void sock_release_memcg(struct sock
*sk
);
420 static inline void sock_update_memcg(struct sock
*sk
)
423 static inline void sock_release_memcg(struct sock
*sk
)
426 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
427 #endif /* _LINUX_MEMCONTROL_H */