mm: rename mem_cgroup_migrate to mem_cgroup_replace_page
[linux/fpc-iii.git] / mm / hugetlb_cgroup.c
blob6e0057439a469b2cf38e9e9aee1f40a17dd08d59
1 /*
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2.1 of the GNU Lesser General Public License
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16 #include <linux/cgroup.h>
17 #include <linux/page_counter.h>
18 #include <linux/slab.h>
19 #include <linux/hugetlb.h>
20 #include <linux/hugetlb_cgroup.h>
22 struct hugetlb_cgroup {
23 struct cgroup_subsys_state css;
25 * the counter to account for hugepages from hugetlb.
27 struct page_counter hugepage[HUGE_MAX_HSTATE];
30 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
31 #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
32 #define MEMFILE_ATTR(val) ((val) & 0xffff)
34 static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
36 static inline
37 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
39 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
42 static inline
43 struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
45 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
48 static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
50 return (h_cg == root_h_cgroup);
53 static inline struct hugetlb_cgroup *
54 parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
56 return hugetlb_cgroup_from_css(h_cg->css.parent);
59 static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
61 int idx;
63 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
64 if (page_counter_read(&h_cg->hugepage[idx]))
65 return true;
67 return false;
70 static struct cgroup_subsys_state *
71 hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
73 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
74 struct hugetlb_cgroup *h_cgroup;
75 int idx;
77 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
78 if (!h_cgroup)
79 return ERR_PTR(-ENOMEM);
81 if (parent_h_cgroup) {
82 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
83 page_counter_init(&h_cgroup->hugepage[idx],
84 &parent_h_cgroup->hugepage[idx]);
85 } else {
86 root_h_cgroup = h_cgroup;
87 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
88 page_counter_init(&h_cgroup->hugepage[idx], NULL);
90 return &h_cgroup->css;
93 static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
95 struct hugetlb_cgroup *h_cgroup;
97 h_cgroup = hugetlb_cgroup_from_css(css);
98 kfree(h_cgroup);
103 * Should be called with hugetlb_lock held.
104 * Since we are holding hugetlb_lock, pages cannot get moved from
105 * active list or uncharged from the cgroup, So no need to get
106 * page reference and test for page active here. This function
107 * cannot fail.
109 static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
110 struct page *page)
112 unsigned int nr_pages;
113 struct page_counter *counter;
114 struct hugetlb_cgroup *page_hcg;
115 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
117 page_hcg = hugetlb_cgroup_from_page(page);
119 * We can have pages in active list without any cgroup
120 * ie, hugepage with less than 3 pages. We can safely
121 * ignore those pages.
123 if (!page_hcg || page_hcg != h_cg)
124 goto out;
126 nr_pages = 1 << compound_order(page);
127 if (!parent) {
128 parent = root_h_cgroup;
129 /* root has no limit */
130 page_counter_charge(&parent->hugepage[idx], nr_pages);
132 counter = &h_cg->hugepage[idx];
133 /* Take the pages off the local counter */
134 page_counter_cancel(counter, nr_pages);
136 set_hugetlb_cgroup(page, parent);
137 out:
138 return;
142 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
143 * the parent cgroup.
145 static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
147 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
148 struct hstate *h;
149 struct page *page;
150 int idx = 0;
152 do {
153 for_each_hstate(h) {
154 spin_lock(&hugetlb_lock);
155 list_for_each_entry(page, &h->hugepage_activelist, lru)
156 hugetlb_cgroup_move_parent(idx, h_cg, page);
158 spin_unlock(&hugetlb_lock);
159 idx++;
161 cond_resched();
162 } while (hugetlb_cgroup_have_usage(h_cg));
165 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
166 struct hugetlb_cgroup **ptr)
168 int ret = 0;
169 struct page_counter *counter;
170 struct hugetlb_cgroup *h_cg = NULL;
172 if (hugetlb_cgroup_disabled())
173 goto done;
175 * We don't charge any cgroup if the compound page have less
176 * than 3 pages.
178 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
179 goto done;
180 again:
181 rcu_read_lock();
182 h_cg = hugetlb_cgroup_from_task(current);
183 if (!css_tryget_online(&h_cg->css)) {
184 rcu_read_unlock();
185 goto again;
187 rcu_read_unlock();
189 ret = page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter);
190 css_put(&h_cg->css);
191 done:
192 *ptr = h_cg;
193 return ret;
196 /* Should be called with hugetlb_lock held */
197 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
198 struct hugetlb_cgroup *h_cg,
199 struct page *page)
201 if (hugetlb_cgroup_disabled() || !h_cg)
202 return;
204 set_hugetlb_cgroup(page, h_cg);
205 return;
209 * Should be called with hugetlb_lock held
211 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
212 struct page *page)
214 struct hugetlb_cgroup *h_cg;
216 if (hugetlb_cgroup_disabled())
217 return;
218 lockdep_assert_held(&hugetlb_lock);
219 h_cg = hugetlb_cgroup_from_page(page);
220 if (unlikely(!h_cg))
221 return;
222 set_hugetlb_cgroup(page, NULL);
223 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
224 return;
227 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
228 struct hugetlb_cgroup *h_cg)
230 if (hugetlb_cgroup_disabled() || !h_cg)
231 return;
233 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
234 return;
236 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
237 return;
240 enum {
241 RES_USAGE,
242 RES_LIMIT,
243 RES_MAX_USAGE,
244 RES_FAILCNT,
247 static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
248 struct cftype *cft)
250 struct page_counter *counter;
251 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
253 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
255 switch (MEMFILE_ATTR(cft->private)) {
256 case RES_USAGE:
257 return (u64)page_counter_read(counter) * PAGE_SIZE;
258 case RES_LIMIT:
259 return (u64)counter->limit * PAGE_SIZE;
260 case RES_MAX_USAGE:
261 return (u64)counter->watermark * PAGE_SIZE;
262 case RES_FAILCNT:
263 return counter->failcnt;
264 default:
265 BUG();
269 static DEFINE_MUTEX(hugetlb_limit_mutex);
271 static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
272 char *buf, size_t nbytes, loff_t off)
274 int ret, idx;
275 unsigned long nr_pages;
276 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
278 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
279 return -EINVAL;
281 buf = strstrip(buf);
282 ret = page_counter_memparse(buf, "-1", &nr_pages);
283 if (ret)
284 return ret;
286 idx = MEMFILE_IDX(of_cft(of)->private);
288 switch (MEMFILE_ATTR(of_cft(of)->private)) {
289 case RES_LIMIT:
290 mutex_lock(&hugetlb_limit_mutex);
291 ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages);
292 mutex_unlock(&hugetlb_limit_mutex);
293 break;
294 default:
295 ret = -EINVAL;
296 break;
298 return ret ?: nbytes;
301 static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
302 char *buf, size_t nbytes, loff_t off)
304 int ret = 0;
305 struct page_counter *counter;
306 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
308 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
310 switch (MEMFILE_ATTR(of_cft(of)->private)) {
311 case RES_MAX_USAGE:
312 page_counter_reset_watermark(counter);
313 break;
314 case RES_FAILCNT:
315 counter->failcnt = 0;
316 break;
317 default:
318 ret = -EINVAL;
319 break;
321 return ret ?: nbytes;
324 static char *mem_fmt(char *buf, int size, unsigned long hsize)
326 if (hsize >= (1UL << 30))
327 snprintf(buf, size, "%luGB", hsize >> 30);
328 else if (hsize >= (1UL << 20))
329 snprintf(buf, size, "%luMB", hsize >> 20);
330 else
331 snprintf(buf, size, "%luKB", hsize >> 10);
332 return buf;
335 static void __init __hugetlb_cgroup_file_init(int idx)
337 char buf[32];
338 struct cftype *cft;
339 struct hstate *h = &hstates[idx];
341 /* format the size */
342 mem_fmt(buf, 32, huge_page_size(h));
344 /* Add the limit file */
345 cft = &h->cgroup_files[0];
346 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
347 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
348 cft->read_u64 = hugetlb_cgroup_read_u64;
349 cft->write = hugetlb_cgroup_write;
351 /* Add the usage file */
352 cft = &h->cgroup_files[1];
353 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
354 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
355 cft->read_u64 = hugetlb_cgroup_read_u64;
357 /* Add the MAX usage file */
358 cft = &h->cgroup_files[2];
359 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
360 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
361 cft->write = hugetlb_cgroup_reset;
362 cft->read_u64 = hugetlb_cgroup_read_u64;
364 /* Add the failcntfile */
365 cft = &h->cgroup_files[3];
366 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
367 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
368 cft->write = hugetlb_cgroup_reset;
369 cft->read_u64 = hugetlb_cgroup_read_u64;
371 /* NULL terminate the last cft */
372 cft = &h->cgroup_files[4];
373 memset(cft, 0, sizeof(*cft));
375 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
376 h->cgroup_files));
379 void __init hugetlb_cgroup_file_init(void)
381 struct hstate *h;
383 for_each_hstate(h) {
385 * Add cgroup control files only if the huge page consists
386 * of more than two normal pages. This is because we use
387 * page[2].lru.next for storing cgroup details.
389 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
390 __hugetlb_cgroup_file_init(hstate_index(h));
395 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
396 * when we migrate hugepages
398 void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
400 struct hugetlb_cgroup *h_cg;
401 struct hstate *h = page_hstate(oldhpage);
403 if (hugetlb_cgroup_disabled())
404 return;
406 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
407 spin_lock(&hugetlb_lock);
408 h_cg = hugetlb_cgroup_from_page(oldhpage);
409 set_hugetlb_cgroup(oldhpage, NULL);
411 /* move the h_cg details to new cgroup */
412 set_hugetlb_cgroup(newhpage, h_cg);
413 list_move(&newhpage->lru, &h->hugepage_activelist);
414 spin_unlock(&hugetlb_lock);
415 return;
418 struct cgroup_subsys hugetlb_cgrp_subsys = {
419 .css_alloc = hugetlb_cgroup_css_alloc,
420 .css_offline = hugetlb_cgroup_css_offline,
421 .css_free = hugetlb_cgroup_css_free,