3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
7 * Copyright (C) 2019 Red Hat, Inc.
8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 #include <linux/cgroup.h>
21 #include <linux/page_counter.h>
22 #include <linux/slab.h>
23 #include <linux/hugetlb.h>
24 #include <linux/hugetlb_cgroup.h>
26 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
27 #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
28 #define MEMFILE_ATTR(val) ((val) & 0xffff)
30 #define hugetlb_cgroup_from_counter(counter, idx) \
31 container_of(counter, struct hugetlb_cgroup, hugepage[idx])
33 static struct hugetlb_cgroup
*root_h_cgroup __read_mostly
;
35 static inline struct page_counter
*
36 __hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup
*h_cg
, int idx
,
40 return &h_cg
->rsvd_hugepage
[idx
];
41 return &h_cg
->hugepage
[idx
];
44 static inline struct page_counter
*
45 hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup
*h_cg
, int idx
)
47 return __hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
, false);
50 static inline struct page_counter
*
51 hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup
*h_cg
, int idx
)
53 return __hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
, true);
57 struct hugetlb_cgroup
*hugetlb_cgroup_from_css(struct cgroup_subsys_state
*s
)
59 return s
? container_of(s
, struct hugetlb_cgroup
, css
) : NULL
;
63 struct hugetlb_cgroup
*hugetlb_cgroup_from_task(struct task_struct
*task
)
65 return hugetlb_cgroup_from_css(task_css(task
, hugetlb_cgrp_id
));
68 static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup
*h_cg
)
70 return (h_cg
== root_h_cgroup
);
73 static inline struct hugetlb_cgroup
*
74 parent_hugetlb_cgroup(struct hugetlb_cgroup
*h_cg
)
76 return hugetlb_cgroup_from_css(h_cg
->css
.parent
);
79 static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup
*h_cg
)
83 for (idx
= 0; idx
< hugetlb_max_hstate
; idx
++) {
84 if (page_counter_read(
85 hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
)))
91 static void hugetlb_cgroup_init(struct hugetlb_cgroup
*h_cgroup
,
92 struct hugetlb_cgroup
*parent_h_cgroup
)
96 for (idx
= 0; idx
< HUGE_MAX_HSTATE
; idx
++) {
97 struct page_counter
*fault_parent
= NULL
;
98 struct page_counter
*rsvd_parent
= NULL
;
102 if (parent_h_cgroup
) {
103 fault_parent
= hugetlb_cgroup_counter_from_cgroup(
104 parent_h_cgroup
, idx
);
105 rsvd_parent
= hugetlb_cgroup_counter_from_cgroup_rsvd(
106 parent_h_cgroup
, idx
);
108 page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup
,
112 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup
, idx
),
115 limit
= round_down(PAGE_COUNTER_MAX
,
116 1 << huge_page_order(&hstates
[idx
]));
118 ret
= page_counter_set_max(
119 hugetlb_cgroup_counter_from_cgroup(h_cgroup
, idx
),
122 ret
= page_counter_set_max(
123 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup
, idx
),
129 static struct cgroup_subsys_state
*
130 hugetlb_cgroup_css_alloc(struct cgroup_subsys_state
*parent_css
)
132 struct hugetlb_cgroup
*parent_h_cgroup
= hugetlb_cgroup_from_css(parent_css
);
133 struct hugetlb_cgroup
*h_cgroup
;
135 h_cgroup
= kzalloc(sizeof(*h_cgroup
), GFP_KERNEL
);
137 return ERR_PTR(-ENOMEM
);
139 if (!parent_h_cgroup
)
140 root_h_cgroup
= h_cgroup
;
142 hugetlb_cgroup_init(h_cgroup
, parent_h_cgroup
);
143 return &h_cgroup
->css
;
146 static void hugetlb_cgroup_css_free(struct cgroup_subsys_state
*css
)
148 struct hugetlb_cgroup
*h_cgroup
;
150 h_cgroup
= hugetlb_cgroup_from_css(css
);
155 * Should be called with hugetlb_lock held.
156 * Since we are holding hugetlb_lock, pages cannot get moved from
157 * active list or uncharged from the cgroup, So no need to get
158 * page reference and test for page active here. This function
161 static void hugetlb_cgroup_move_parent(int idx
, struct hugetlb_cgroup
*h_cg
,
164 unsigned int nr_pages
;
165 struct page_counter
*counter
;
166 struct hugetlb_cgroup
*page_hcg
;
167 struct hugetlb_cgroup
*parent
= parent_hugetlb_cgroup(h_cg
);
169 page_hcg
= hugetlb_cgroup_from_page(page
);
171 * We can have pages in active list without any cgroup
172 * ie, hugepage with less than 3 pages. We can safely
173 * ignore those pages.
175 if (!page_hcg
|| page_hcg
!= h_cg
)
178 nr_pages
= compound_nr(page
);
180 parent
= root_h_cgroup
;
181 /* root has no limit */
182 page_counter_charge(&parent
->hugepage
[idx
], nr_pages
);
184 counter
= &h_cg
->hugepage
[idx
];
185 /* Take the pages off the local counter */
186 page_counter_cancel(counter
, nr_pages
);
188 set_hugetlb_cgroup(page
, parent
);
194 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
197 static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state
*css
)
199 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(css
);
207 spin_lock(&hugetlb_lock
);
208 list_for_each_entry(page
, &h
->hugepage_activelist
, lru
)
209 hugetlb_cgroup_move_parent(idx
, h_cg
, page
);
211 spin_unlock(&hugetlb_lock
);
215 } while (hugetlb_cgroup_have_usage(h_cg
));
218 static inline void hugetlb_event(struct hugetlb_cgroup
*hugetlb
, int idx
,
219 enum hugetlb_memory_event event
)
221 atomic_long_inc(&hugetlb
->events_local
[idx
][event
]);
222 cgroup_file_notify(&hugetlb
->events_local_file
[idx
]);
225 atomic_long_inc(&hugetlb
->events
[idx
][event
]);
226 cgroup_file_notify(&hugetlb
->events_file
[idx
]);
227 } while ((hugetlb
= parent_hugetlb_cgroup(hugetlb
)) &&
228 !hugetlb_cgroup_is_root(hugetlb
));
231 static int __hugetlb_cgroup_charge_cgroup(int idx
, unsigned long nr_pages
,
232 struct hugetlb_cgroup
**ptr
,
236 struct page_counter
*counter
;
237 struct hugetlb_cgroup
*h_cg
= NULL
;
239 if (hugetlb_cgroup_disabled())
242 * We don't charge any cgroup if the compound page have less
245 if (huge_page_order(&hstates
[idx
]) < HUGETLB_CGROUP_MIN_ORDER
)
249 h_cg
= hugetlb_cgroup_from_task(current
);
250 if (!css_tryget(&h_cg
->css
)) {
256 if (!page_counter_try_charge(
257 __hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
, rsvd
),
258 nr_pages
, &counter
)) {
260 hugetlb_event(h_cg
, idx
, HUGETLB_MAX
);
264 /* Reservations take a reference to the css because they do not get
274 int hugetlb_cgroup_charge_cgroup(int idx
, unsigned long nr_pages
,
275 struct hugetlb_cgroup
**ptr
)
277 return __hugetlb_cgroup_charge_cgroup(idx
, nr_pages
, ptr
, false);
280 int hugetlb_cgroup_charge_cgroup_rsvd(int idx
, unsigned long nr_pages
,
281 struct hugetlb_cgroup
**ptr
)
283 return __hugetlb_cgroup_charge_cgroup(idx
, nr_pages
, ptr
, true);
286 /* Should be called with hugetlb_lock held */
287 static void __hugetlb_cgroup_commit_charge(int idx
, unsigned long nr_pages
,
288 struct hugetlb_cgroup
*h_cg
,
289 struct page
*page
, bool rsvd
)
291 if (hugetlb_cgroup_disabled() || !h_cg
)
294 __set_hugetlb_cgroup(page
, h_cg
, rsvd
);
298 void hugetlb_cgroup_commit_charge(int idx
, unsigned long nr_pages
,
299 struct hugetlb_cgroup
*h_cg
,
302 __hugetlb_cgroup_commit_charge(idx
, nr_pages
, h_cg
, page
, false);
305 void hugetlb_cgroup_commit_charge_rsvd(int idx
, unsigned long nr_pages
,
306 struct hugetlb_cgroup
*h_cg
,
309 __hugetlb_cgroup_commit_charge(idx
, nr_pages
, h_cg
, page
, true);
313 * Should be called with hugetlb_lock held
315 static void __hugetlb_cgroup_uncharge_page(int idx
, unsigned long nr_pages
,
316 struct page
*page
, bool rsvd
)
318 struct hugetlb_cgroup
*h_cg
;
320 if (hugetlb_cgroup_disabled())
322 lockdep_assert_held(&hugetlb_lock
);
323 h_cg
= __hugetlb_cgroup_from_page(page
, rsvd
);
326 __set_hugetlb_cgroup(page
, NULL
, rsvd
);
328 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
,
338 void hugetlb_cgroup_uncharge_page(int idx
, unsigned long nr_pages
,
341 __hugetlb_cgroup_uncharge_page(idx
, nr_pages
, page
, false);
344 void hugetlb_cgroup_uncharge_page_rsvd(int idx
, unsigned long nr_pages
,
347 __hugetlb_cgroup_uncharge_page(idx
, nr_pages
, page
, true);
350 static void __hugetlb_cgroup_uncharge_cgroup(int idx
, unsigned long nr_pages
,
351 struct hugetlb_cgroup
*h_cg
,
354 if (hugetlb_cgroup_disabled() || !h_cg
)
357 if (huge_page_order(&hstates
[idx
]) < HUGETLB_CGROUP_MIN_ORDER
)
360 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
,
368 void hugetlb_cgroup_uncharge_cgroup(int idx
, unsigned long nr_pages
,
369 struct hugetlb_cgroup
*h_cg
)
371 __hugetlb_cgroup_uncharge_cgroup(idx
, nr_pages
, h_cg
, false);
374 void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx
, unsigned long nr_pages
,
375 struct hugetlb_cgroup
*h_cg
)
377 __hugetlb_cgroup_uncharge_cgroup(idx
, nr_pages
, h_cg
, true);
380 void hugetlb_cgroup_uncharge_counter(struct resv_map
*resv
, unsigned long start
,
383 if (hugetlb_cgroup_disabled() || !resv
|| !resv
->reservation_counter
||
387 page_counter_uncharge(resv
->reservation_counter
,
388 (end
- start
) * resv
->pages_per_hpage
);
392 void hugetlb_cgroup_uncharge_file_region(struct resv_map
*resv
,
393 struct file_region
*rg
,
394 unsigned long nr_pages
)
396 if (hugetlb_cgroup_disabled() || !resv
|| !rg
|| !nr_pages
)
399 if (rg
->reservation_counter
&& resv
->pages_per_hpage
&& nr_pages
> 0 &&
400 !resv
->reservation_counter
) {
401 page_counter_uncharge(rg
->reservation_counter
,
402 nr_pages
* resv
->pages_per_hpage
);
418 static u64
hugetlb_cgroup_read_u64(struct cgroup_subsys_state
*css
,
421 struct page_counter
*counter
;
422 struct page_counter
*rsvd_counter
;
423 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(css
);
425 counter
= &h_cg
->hugepage
[MEMFILE_IDX(cft
->private)];
426 rsvd_counter
= &h_cg
->rsvd_hugepage
[MEMFILE_IDX(cft
->private)];
428 switch (MEMFILE_ATTR(cft
->private)) {
430 return (u64
)page_counter_read(counter
) * PAGE_SIZE
;
432 return (u64
)page_counter_read(rsvd_counter
) * PAGE_SIZE
;
434 return (u64
)counter
->max
* PAGE_SIZE
;
436 return (u64
)rsvd_counter
->max
* PAGE_SIZE
;
438 return (u64
)counter
->watermark
* PAGE_SIZE
;
439 case RES_RSVD_MAX_USAGE
:
440 return (u64
)rsvd_counter
->watermark
* PAGE_SIZE
;
442 return counter
->failcnt
;
443 case RES_RSVD_FAILCNT
:
444 return rsvd_counter
->failcnt
;
450 static int hugetlb_cgroup_read_u64_max(struct seq_file
*seq
, void *v
)
454 struct cftype
*cft
= seq_cft(seq
);
456 struct page_counter
*counter
;
457 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(seq_css(seq
));
459 idx
= MEMFILE_IDX(cft
->private);
460 counter
= &h_cg
->hugepage
[idx
];
462 limit
= round_down(PAGE_COUNTER_MAX
,
463 1 << huge_page_order(&hstates
[idx
]));
465 switch (MEMFILE_ATTR(cft
->private)) {
467 counter
= &h_cg
->rsvd_hugepage
[idx
];
470 val
= (u64
)page_counter_read(counter
);
471 seq_printf(seq
, "%llu\n", val
* PAGE_SIZE
);
474 counter
= &h_cg
->rsvd_hugepage
[idx
];
477 val
= (u64
)counter
->max
;
479 seq_puts(seq
, "max\n");
481 seq_printf(seq
, "%llu\n", val
* PAGE_SIZE
);
490 static DEFINE_MUTEX(hugetlb_limit_mutex
);
492 static ssize_t
hugetlb_cgroup_write(struct kernfs_open_file
*of
,
493 char *buf
, size_t nbytes
, loff_t off
,
497 unsigned long nr_pages
;
498 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(of_css(of
));
501 if (hugetlb_cgroup_is_root(h_cg
)) /* Can't set limit on root */
505 ret
= page_counter_memparse(buf
, max
, &nr_pages
);
509 idx
= MEMFILE_IDX(of_cft(of
)->private);
510 nr_pages
= round_down(nr_pages
, 1 << huge_page_order(&hstates
[idx
]));
512 switch (MEMFILE_ATTR(of_cft(of
)->private)) {
517 mutex_lock(&hugetlb_limit_mutex
);
518 ret
= page_counter_set_max(
519 __hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
, rsvd
),
521 mutex_unlock(&hugetlb_limit_mutex
);
527 return ret
?: nbytes
;
530 static ssize_t
hugetlb_cgroup_write_legacy(struct kernfs_open_file
*of
,
531 char *buf
, size_t nbytes
, loff_t off
)
533 return hugetlb_cgroup_write(of
, buf
, nbytes
, off
, "-1");
536 static ssize_t
hugetlb_cgroup_write_dfl(struct kernfs_open_file
*of
,
537 char *buf
, size_t nbytes
, loff_t off
)
539 return hugetlb_cgroup_write(of
, buf
, nbytes
, off
, "max");
542 static ssize_t
hugetlb_cgroup_reset(struct kernfs_open_file
*of
,
543 char *buf
, size_t nbytes
, loff_t off
)
546 struct page_counter
*counter
, *rsvd_counter
;
547 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(of_css(of
));
549 counter
= &h_cg
->hugepage
[MEMFILE_IDX(of_cft(of
)->private)];
550 rsvd_counter
= &h_cg
->rsvd_hugepage
[MEMFILE_IDX(of_cft(of
)->private)];
552 switch (MEMFILE_ATTR(of_cft(of
)->private)) {
554 page_counter_reset_watermark(counter
);
556 case RES_RSVD_MAX_USAGE
:
557 page_counter_reset_watermark(rsvd_counter
);
560 counter
->failcnt
= 0;
562 case RES_RSVD_FAILCNT
:
563 rsvd_counter
->failcnt
= 0;
569 return ret
?: nbytes
;
572 static char *mem_fmt(char *buf
, int size
, unsigned long hsize
)
574 if (hsize
>= (1UL << 30))
575 snprintf(buf
, size
, "%luGB", hsize
>> 30);
576 else if (hsize
>= (1UL << 20))
577 snprintf(buf
, size
, "%luMB", hsize
>> 20);
579 snprintf(buf
, size
, "%luKB", hsize
>> 10);
583 static int __hugetlb_events_show(struct seq_file
*seq
, bool local
)
587 struct cftype
*cft
= seq_cft(seq
);
588 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(seq_css(seq
));
590 idx
= MEMFILE_IDX(cft
->private);
593 max
= atomic_long_read(&h_cg
->events_local
[idx
][HUGETLB_MAX
]);
595 max
= atomic_long_read(&h_cg
->events
[idx
][HUGETLB_MAX
]);
597 seq_printf(seq
, "max %lu\n", max
);
602 static int hugetlb_events_show(struct seq_file
*seq
, void *v
)
604 return __hugetlb_events_show(seq
, false);
607 static int hugetlb_events_local_show(struct seq_file
*seq
, void *v
)
609 return __hugetlb_events_show(seq
, true);
612 static void __init
__hugetlb_cgroup_file_dfl_init(int idx
)
616 struct hstate
*h
= &hstates
[idx
];
618 /* format the size */
619 mem_fmt(buf
, sizeof(buf
), huge_page_size(h
));
621 /* Add the limit file */
622 cft
= &h
->cgroup_files_dfl
[0];
623 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.max", buf
);
624 cft
->private = MEMFILE_PRIVATE(idx
, RES_LIMIT
);
625 cft
->seq_show
= hugetlb_cgroup_read_u64_max
;
626 cft
->write
= hugetlb_cgroup_write_dfl
;
627 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
629 /* Add the reservation limit file */
630 cft
= &h
->cgroup_files_dfl
[1];
631 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.max", buf
);
632 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_LIMIT
);
633 cft
->seq_show
= hugetlb_cgroup_read_u64_max
;
634 cft
->write
= hugetlb_cgroup_write_dfl
;
635 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
637 /* Add the current usage file */
638 cft
= &h
->cgroup_files_dfl
[2];
639 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.current", buf
);
640 cft
->private = MEMFILE_PRIVATE(idx
, RES_USAGE
);
641 cft
->seq_show
= hugetlb_cgroup_read_u64_max
;
642 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
644 /* Add the current reservation usage file */
645 cft
= &h
->cgroup_files_dfl
[3];
646 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.current", buf
);
647 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_USAGE
);
648 cft
->seq_show
= hugetlb_cgroup_read_u64_max
;
649 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
651 /* Add the events file */
652 cft
= &h
->cgroup_files_dfl
[4];
653 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.events", buf
);
654 cft
->private = MEMFILE_PRIVATE(idx
, 0);
655 cft
->seq_show
= hugetlb_events_show
;
656 cft
->file_offset
= offsetof(struct hugetlb_cgroup
, events_file
[idx
]);
657 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
659 /* Add the events.local file */
660 cft
= &h
->cgroup_files_dfl
[5];
661 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.events.local", buf
);
662 cft
->private = MEMFILE_PRIVATE(idx
, 0);
663 cft
->seq_show
= hugetlb_events_local_show
;
664 cft
->file_offset
= offsetof(struct hugetlb_cgroup
,
665 events_local_file
[idx
]);
666 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
668 /* NULL terminate the last cft */
669 cft
= &h
->cgroup_files_dfl
[6];
670 memset(cft
, 0, sizeof(*cft
));
672 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys
,
673 h
->cgroup_files_dfl
));
676 static void __init
__hugetlb_cgroup_file_legacy_init(int idx
)
680 struct hstate
*h
= &hstates
[idx
];
682 /* format the size */
683 mem_fmt(buf
, sizeof(buf
), huge_page_size(h
));
685 /* Add the limit file */
686 cft
= &h
->cgroup_files_legacy
[0];
687 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.limit_in_bytes", buf
);
688 cft
->private = MEMFILE_PRIVATE(idx
, RES_LIMIT
);
689 cft
->read_u64
= hugetlb_cgroup_read_u64
;
690 cft
->write
= hugetlb_cgroup_write_legacy
;
692 /* Add the reservation limit file */
693 cft
= &h
->cgroup_files_legacy
[1];
694 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.limit_in_bytes", buf
);
695 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_LIMIT
);
696 cft
->read_u64
= hugetlb_cgroup_read_u64
;
697 cft
->write
= hugetlb_cgroup_write_legacy
;
699 /* Add the usage file */
700 cft
= &h
->cgroup_files_legacy
[2];
701 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.usage_in_bytes", buf
);
702 cft
->private = MEMFILE_PRIVATE(idx
, RES_USAGE
);
703 cft
->read_u64
= hugetlb_cgroup_read_u64
;
705 /* Add the reservation usage file */
706 cft
= &h
->cgroup_files_legacy
[3];
707 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.usage_in_bytes", buf
);
708 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_USAGE
);
709 cft
->read_u64
= hugetlb_cgroup_read_u64
;
711 /* Add the MAX usage file */
712 cft
= &h
->cgroup_files_legacy
[4];
713 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.max_usage_in_bytes", buf
);
714 cft
->private = MEMFILE_PRIVATE(idx
, RES_MAX_USAGE
);
715 cft
->write
= hugetlb_cgroup_reset
;
716 cft
->read_u64
= hugetlb_cgroup_read_u64
;
718 /* Add the MAX reservation usage file */
719 cft
= &h
->cgroup_files_legacy
[5];
720 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.max_usage_in_bytes", buf
);
721 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_MAX_USAGE
);
722 cft
->write
= hugetlb_cgroup_reset
;
723 cft
->read_u64
= hugetlb_cgroup_read_u64
;
725 /* Add the failcntfile */
726 cft
= &h
->cgroup_files_legacy
[6];
727 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.failcnt", buf
);
728 cft
->private = MEMFILE_PRIVATE(idx
, RES_FAILCNT
);
729 cft
->write
= hugetlb_cgroup_reset
;
730 cft
->read_u64
= hugetlb_cgroup_read_u64
;
732 /* Add the reservation failcntfile */
733 cft
= &h
->cgroup_files_legacy
[7];
734 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.failcnt", buf
);
735 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_FAILCNT
);
736 cft
->write
= hugetlb_cgroup_reset
;
737 cft
->read_u64
= hugetlb_cgroup_read_u64
;
739 /* NULL terminate the last cft */
740 cft
= &h
->cgroup_files_legacy
[8];
741 memset(cft
, 0, sizeof(*cft
));
743 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys
,
744 h
->cgroup_files_legacy
));
747 static void __init
__hugetlb_cgroup_file_init(int idx
)
749 __hugetlb_cgroup_file_dfl_init(idx
);
750 __hugetlb_cgroup_file_legacy_init(idx
);
753 void __init
hugetlb_cgroup_file_init(void)
759 * Add cgroup control files only if the huge page consists
760 * of more than two normal pages. This is because we use
761 * page[2].private for storing cgroup details.
763 if (huge_page_order(h
) >= HUGETLB_CGROUP_MIN_ORDER
)
764 __hugetlb_cgroup_file_init(hstate_index(h
));
769 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
770 * when we migrate hugepages
772 void hugetlb_cgroup_migrate(struct page
*oldhpage
, struct page
*newhpage
)
774 struct hugetlb_cgroup
*h_cg
;
775 struct hugetlb_cgroup
*h_cg_rsvd
;
776 struct hstate
*h
= page_hstate(oldhpage
);
778 if (hugetlb_cgroup_disabled())
781 VM_BUG_ON_PAGE(!PageHuge(oldhpage
), oldhpage
);
782 spin_lock(&hugetlb_lock
);
783 h_cg
= hugetlb_cgroup_from_page(oldhpage
);
784 h_cg_rsvd
= hugetlb_cgroup_from_page_rsvd(oldhpage
);
785 set_hugetlb_cgroup(oldhpage
, NULL
);
786 set_hugetlb_cgroup_rsvd(oldhpage
, NULL
);
788 /* move the h_cg details to new cgroup */
789 set_hugetlb_cgroup(newhpage
, h_cg
);
790 set_hugetlb_cgroup_rsvd(newhpage
, h_cg_rsvd
);
791 list_move(&newhpage
->lru
, &h
->hugepage_activelist
);
792 spin_unlock(&hugetlb_lock
);
796 static struct cftype hugetlb_files
[] = {
800 struct cgroup_subsys hugetlb_cgrp_subsys
= {
801 .css_alloc
= hugetlb_cgroup_css_alloc
,
802 .css_offline
= hugetlb_cgroup_css_offline
,
803 .css_free
= hugetlb_cgroup_css_free
,
804 .dfl_cftypes
= hugetlb_files
,
805 .legacy_cftypes
= hugetlb_files
,