1 /* memcontrol.h - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
30 /* Stats that can be updated by kernel. */
31 enum mem_cgroup_page_stat_item
{
32 MEMCG_NR_FILE_MAPPED
, /* # of pages charged as file rss */
35 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan
,
36 struct list_head
*dst
,
37 unsigned long *scanned
, int order
,
38 int mode
, struct zone
*z
,
39 struct mem_cgroup
*mem_cont
,
40 int active
, int file
);
42 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
44 * All "charge" functions with gfp_mask should use GFP_KERNEL or
45 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
46 * alloc memory but reclaims memory from all available zones. So, "where I want
47 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
48 * available but adding a rule is better. charge functions' gfp_mask should
49 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
51 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
54 extern int mem_cgroup_newpage_charge(struct page
*page
, struct mm_struct
*mm
,
56 /* for swap handling */
57 extern int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
58 struct page
*page
, gfp_t mask
, struct mem_cgroup
**ptr
);
59 extern void mem_cgroup_commit_charge_swapin(struct page
*page
,
60 struct mem_cgroup
*ptr
);
61 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*ptr
);
63 extern int mem_cgroup_cache_charge(struct page
*page
, struct mm_struct
*mm
,
65 extern void mem_cgroup_add_lru_list(struct page
*page
, enum lru_list lru
);
66 extern void mem_cgroup_del_lru_list(struct page
*page
, enum lru_list lru
);
67 extern void mem_cgroup_rotate_reclaimable_page(struct page
*page
);
68 extern void mem_cgroup_rotate_lru_list(struct page
*page
, enum lru_list lru
);
69 extern void mem_cgroup_del_lru(struct page
*page
);
70 extern void mem_cgroup_move_lists(struct page
*page
,
71 enum lru_list from
, enum lru_list to
);
73 /* For coalescing uncharge for reducing memcg' overhead*/
74 extern void mem_cgroup_uncharge_start(void);
75 extern void mem_cgroup_uncharge_end(void);
77 extern void mem_cgroup_uncharge_page(struct page
*page
);
78 extern void mem_cgroup_uncharge_cache_page(struct page
*page
);
79 extern int mem_cgroup_shmem_charge_fallback(struct page
*page
,
80 struct mm_struct
*mm
, gfp_t gfp_mask
);
82 extern void mem_cgroup_out_of_memory(struct mem_cgroup
*mem
, gfp_t gfp_mask
);
83 int task_in_mem_cgroup(struct task_struct
*task
, const struct mem_cgroup
*mem
);
85 extern struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
);
86 extern struct mem_cgroup
*mem_cgroup_from_task(struct task_struct
*p
);
87 extern struct mem_cgroup
*try_get_mem_cgroup_from_mm(struct mm_struct
*mm
);
90 int mm_match_cgroup(const struct mm_struct
*mm
, const struct mem_cgroup
*cgroup
)
92 struct mem_cgroup
*mem
;
94 mem
= mem_cgroup_from_task(rcu_dereference((mm
)->owner
));
99 extern struct cgroup_subsys_state
*mem_cgroup_css(struct mem_cgroup
*mem
);
102 mem_cgroup_prepare_migration(struct page
*page
,
103 struct page
*newpage
, struct mem_cgroup
**ptr
, gfp_t gfp_mask
);
104 extern void mem_cgroup_end_migration(struct mem_cgroup
*mem
,
105 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
);
108 * For memory reclaim.
110 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup
*memcg
);
111 int mem_cgroup_inactive_file_is_low(struct mem_cgroup
*memcg
);
112 int mem_cgroup_select_victim_node(struct mem_cgroup
*memcg
);
113 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup
*memcg
,
116 struct zone_reclaim_stat
*mem_cgroup_get_reclaim_stat(struct mem_cgroup
*memcg
,
118 struct zone_reclaim_stat
*
119 mem_cgroup_get_reclaim_stat_from_page(struct page
*page
);
120 extern void mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
,
121 struct task_struct
*p
);
123 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
124 extern int do_swap_account
;
127 static inline bool mem_cgroup_disabled(void)
129 if (mem_cgroup_subsys
.disabled
)
134 void mem_cgroup_update_page_stat(struct page
*page
,
135 enum mem_cgroup_page_stat_item idx
,
138 static inline void mem_cgroup_inc_page_stat(struct page
*page
,
139 enum mem_cgroup_page_stat_item idx
)
141 mem_cgroup_update_page_stat(page
, idx
, 1);
144 static inline void mem_cgroup_dec_page_stat(struct page
*page
,
145 enum mem_cgroup_page_stat_item idx
)
147 mem_cgroup_update_page_stat(page
, idx
, -1);
150 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
152 unsigned long *total_scanned
);
153 u64
mem_cgroup_get_limit(struct mem_cgroup
*mem
);
155 void mem_cgroup_count_vm_event(struct mm_struct
*mm
, enum vm_event_item idx
);
156 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
157 void mem_cgroup_split_huge_fixup(struct page
*head
, struct page
*tail
);
160 #ifdef CONFIG_DEBUG_VM
161 bool mem_cgroup_bad_page_check(struct page
*page
);
162 void mem_cgroup_print_bad_page(struct page
*page
);
164 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
167 static inline int mem_cgroup_newpage_charge(struct page
*page
,
168 struct mm_struct
*mm
, gfp_t gfp_mask
)
173 static inline int mem_cgroup_cache_charge(struct page
*page
,
174 struct mm_struct
*mm
, gfp_t gfp_mask
)
179 static inline int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
180 struct page
*page
, gfp_t gfp_mask
, struct mem_cgroup
**ptr
)
185 static inline void mem_cgroup_commit_charge_swapin(struct page
*page
,
186 struct mem_cgroup
*ptr
)
190 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*ptr
)
194 static inline void mem_cgroup_uncharge_start(void)
198 static inline void mem_cgroup_uncharge_end(void)
202 static inline void mem_cgroup_uncharge_page(struct page
*page
)
206 static inline void mem_cgroup_uncharge_cache_page(struct page
*page
)
210 static inline int mem_cgroup_shmem_charge_fallback(struct page
*page
,
211 struct mm_struct
*mm
, gfp_t gfp_mask
)
216 static inline void mem_cgroup_add_lru_list(struct page
*page
, int lru
)
220 static inline void mem_cgroup_del_lru_list(struct page
*page
, int lru
)
225 static inline void mem_cgroup_rotate_reclaimable_page(struct page
*page
)
230 static inline void mem_cgroup_rotate_lru_list(struct page
*page
, int lru
)
235 static inline void mem_cgroup_del_lru(struct page
*page
)
241 mem_cgroup_move_lists(struct page
*page
, enum lru_list from
, enum lru_list to
)
245 static inline struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
)
250 static inline struct mem_cgroup
*try_get_mem_cgroup_from_mm(struct mm_struct
*mm
)
255 static inline int mm_match_cgroup(struct mm_struct
*mm
, struct mem_cgroup
*mem
)
260 static inline int task_in_mem_cgroup(struct task_struct
*task
,
261 const struct mem_cgroup
*mem
)
266 static inline struct cgroup_subsys_state
*mem_cgroup_css(struct mem_cgroup
*mem
)
272 mem_cgroup_prepare_migration(struct page
*page
, struct page
*newpage
,
273 struct mem_cgroup
**ptr
, gfp_t gfp_mask
)
278 static inline void mem_cgroup_end_migration(struct mem_cgroup
*mem
,
279 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
)
283 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup
*mem
)
288 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup
*mem
,
293 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup
*mem
,
298 static inline bool mem_cgroup_disabled(void)
304 mem_cgroup_inactive_anon_is_low(struct mem_cgroup
*memcg
)
310 mem_cgroup_inactive_file_is_low(struct mem_cgroup
*memcg
)
315 static inline unsigned long
316 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup
*memcg
, struct zone
*zone
,
323 static inline struct zone_reclaim_stat
*
324 mem_cgroup_get_reclaim_stat(struct mem_cgroup
*memcg
, struct zone
*zone
)
329 static inline struct zone_reclaim_stat
*
330 mem_cgroup_get_reclaim_stat_from_page(struct page
*page
)
336 mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
, struct task_struct
*p
)
340 static inline void mem_cgroup_inc_page_stat(struct page
*page
,
341 enum mem_cgroup_page_stat_item idx
)
345 static inline void mem_cgroup_dec_page_stat(struct page
*page
,
346 enum mem_cgroup_page_stat_item idx
)
351 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
353 unsigned long *total_scanned
)
359 u64
mem_cgroup_get_limit(struct mem_cgroup
*mem
)
364 static inline void mem_cgroup_split_huge_fixup(struct page
*head
,
370 void mem_cgroup_count_vm_event(struct mm_struct
*mm
, enum vm_event_item idx
)
373 #endif /* CONFIG_CGROUP_MEM_CONT */
375 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
377 mem_cgroup_bad_page_check(struct page
*page
)
383 mem_cgroup_print_bad_page(struct page
*page
)
388 #endif /* _LINUX_MEMCONTROL_H */