1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/limits.h>
10 #include <sys/types.h>
14 #include <sys/sysinfo.h>
17 #include "../kselftest.h"
18 #include "cgroup_util.h"
22 * Memory cgroup charging and vmstat data aggregation is performed using
23 * percpu batches 32 pages big (look at MEMCG_CHARGE_BATCH). So the maximum
24 * discrepancy between charge and vmstat entries is number of cpus multiplied
25 * by 32 pages multiplied by 2.
27 #define MAX_VMSTAT_ERROR (4096 * 32 * 2 * get_nprocs())
30 static int alloc_dcache(const char *cgroup
, void *arg
)
36 for (i
= 0; i
< (unsigned long)arg
; i
++) {
37 snprintf(buf
, sizeof(buf
),
38 "/something-non-existent-with-a-long-name-%64lu-%d",
47 * This test allocates 100000 of negative dentries with long names.
48 * Then it checks that "slab" in memory.stat is larger than 1M.
49 * Then it sets memory.high to 1M and checks that at least 1/2
50 * of slab memory has been reclaimed.
52 static int test_kmem_basic(const char *root
)
56 long slab0
, slab1
, current
;
58 cg
= cg_name(root
, "kmem_basic_test");
65 if (cg_run(cg
, alloc_dcache
, (void *)100000))
68 slab0
= cg_read_key_long(cg
, "memory.stat", "slab ");
69 if (slab0
< (1 << 20))
72 cg_write(cg
, "memory.high", "1M");
73 slab1
= cg_read_key_long(cg
, "memory.stat", "slab ");
77 current
= cg_read_long(cg
, "memory.current");
81 if (slab1
< slab0
/ 2 && current
< slab0
/ 2)
90 static void *alloc_kmem_fn(void *arg
)
92 alloc_dcache(NULL
, (void *)100);
96 static int alloc_kmem_smp(const char *cgroup
, void *arg
)
98 int nr_threads
= 2 * get_nprocs();
103 tinfo
= calloc(nr_threads
, sizeof(pthread_t
));
107 for (i
= 0; i
< nr_threads
; i
++) {
108 if (pthread_create(&tinfo
[i
], NULL
, &alloc_kmem_fn
,
115 for (i
= 0; i
< nr_threads
; i
++) {
116 ret
= pthread_join(tinfo
[i
], NULL
);
125 static int cg_run_in_subcgroups(const char *parent
,
126 int (*fn
)(const char *cgroup
, void *arg
),
127 void *arg
, int times
)
132 for (i
= 0; i
< times
; i
++) {
133 child
= cg_name_indexed(parent
, "child", i
);
137 if (cg_create(child
)) {
143 if (cg_run(child
, fn
, NULL
)) {
157 * The test creates and destroys a large number of cgroups. In each cgroup it
158 * allocates some slab memory (mostly negative dentries) using 2 * NR_CPUS
159 * threads. Then it checks the sanity of numbers on the parent level:
160 * the total size of the cgroups should be roughly equal to
161 * anon + file + slab + kernel_stack.
163 static int test_kmem_memcg_deletion(const char *root
)
165 long current
, slab
, anon
, file
, kernel_stack
, sum
;
169 parent
= cg_name(root
, "kmem_memcg_deletion_test");
173 if (cg_create(parent
))
176 if (cg_write(parent
, "cgroup.subtree_control", "+memory"))
179 if (cg_run_in_subcgroups(parent
, alloc_kmem_smp
, NULL
, 100))
182 current
= cg_read_long(parent
, "memory.current");
183 slab
= cg_read_key_long(parent
, "memory.stat", "slab ");
184 anon
= cg_read_key_long(parent
, "memory.stat", "anon ");
185 file
= cg_read_key_long(parent
, "memory.stat", "file ");
186 kernel_stack
= cg_read_key_long(parent
, "memory.stat", "kernel_stack ");
187 if (current
< 0 || slab
< 0 || anon
< 0 || file
< 0 ||
191 sum
= slab
+ anon
+ file
+ kernel_stack
;
192 if (abs(sum
- current
) < MAX_VMSTAT_ERROR
) {
195 printf("memory.current = %ld\n", current
);
196 printf("slab + anon + file + kernel_stack = %ld\n", sum
);
197 printf("slab = %ld\n", slab
);
198 printf("anon = %ld\n", anon
);
199 printf("file = %ld\n", file
);
200 printf("kernel_stack = %ld\n", kernel_stack
);
211 * The test reads the entire /proc/kpagecgroup. If the operation went
212 * successfully (and the kernel didn't panic), the test is treated as passed.
214 static int test_kmem_proc_kpagecgroup(const char *root
)
216 unsigned long buf
[128];
221 fd
= open("/proc/kpagecgroup", O_RDONLY
);
226 len
= read(fd
, buf
, sizeof(buf
));
236 static void *pthread_wait_fn(void *arg
)
242 static int spawn_1000_threads(const char *cgroup
, void *arg
)
244 int nr_threads
= 1000;
250 tinfo
= calloc(nr_threads
, sizeof(pthread_t
));
254 for (i
= 0; i
< nr_threads
; i
++) {
255 if (pthread_create(&tinfo
[i
], NULL
, &pthread_wait_fn
,
262 stack
= cg_read_key_long(cgroup
, "memory.stat", "kernel_stack ");
263 if (stack
>= 4096 * 1000)
271 * The test spawns a process, which spawns 1000 threads. Then it checks
272 * that memory.stat's kernel_stack is at least 1000 pages large.
274 static int test_kmem_kernel_stacks(const char *root
)
279 cg
= cg_name(root
, "kmem_kernel_stacks_test");
286 if (cg_run(cg
, spawn_1000_threads
, NULL
))
298 * This test sequentionally creates 30 child cgroups, allocates some
299 * kernel memory in each of them, and deletes them. Then it checks
300 * that the number of dying cgroups on the parent level is 0.
302 static int test_kmem_dead_cgroups(const char *root
)
309 parent
= cg_name(root
, "kmem_dead_cgroups_test");
313 if (cg_create(parent
))
316 if (cg_write(parent
, "cgroup.subtree_control", "+memory"))
319 if (cg_run_in_subcgroups(parent
, alloc_dcache
, (void *)100, 30))
322 for (i
= 0; i
< 5; i
++) {
323 dead
= cg_read_key_long(parent
, "cgroup.stat",
324 "nr_dying_descendants ");
330 * Reclaiming cgroups might take some time,
331 * let's wait a bit and repeat.
344 * This test creates a sub-tree with 1000 memory cgroups.
345 * Then it checks that the memory.current on the parent level
346 * is greater than 0 and approximates matches the percpu value
349 static int test_percpu_basic(const char *root
)
352 char *parent
, *child
;
353 long current
, percpu
;
356 parent
= cg_name(root
, "percpu_basic_test");
360 if (cg_create(parent
))
363 if (cg_write(parent
, "cgroup.subtree_control", "+memory"))
366 for (i
= 0; i
< 1000; i
++) {
367 child
= cg_name_indexed(parent
, "child", i
);
371 if (cg_create(child
))
372 goto cleanup_children
;
377 current
= cg_read_long(parent
, "memory.current");
378 percpu
= cg_read_key_long(parent
, "memory.stat", "percpu ");
380 if (current
> 0 && percpu
> 0 && abs(current
- percpu
) <
384 printf("memory.current %ld\npercpu %ld\n",
388 for (i
= 0; i
< 1000; i
++) {
389 child
= cg_name_indexed(parent
, "child", i
);
401 #define T(x) { x, #x }
403 int (*fn
)(const char *root
);
407 T(test_kmem_memcg_deletion
),
408 T(test_kmem_proc_kpagecgroup
),
409 T(test_kmem_kernel_stacks
),
410 T(test_kmem_dead_cgroups
),
411 T(test_percpu_basic
),
415 int main(int argc
, char **argv
)
418 int i
, ret
= EXIT_SUCCESS
;
420 if (cg_find_unified_root(root
, sizeof(root
)))
421 ksft_exit_skip("cgroup v2 isn't mounted\n");
424 * Check that memory controller is available:
425 * memory is listed in cgroup.controllers
427 if (cg_read_strstr(root
, "cgroup.controllers", "memory"))
428 ksft_exit_skip("memory controller isn't available\n");
430 if (cg_read_strstr(root
, "cgroup.subtree_control", "memory"))
431 if (cg_write(root
, "cgroup.subtree_control", "+memory"))
432 ksft_exit_skip("Failed to set memory controller\n");
434 for (i
= 0; i
< ARRAY_SIZE(tests
); i
++) {
435 switch (tests
[i
].fn(root
)) {
437 ksft_test_result_pass("%s\n", tests
[i
].name
);
440 ksft_test_result_skip("%s\n", tests
[i
].name
);
444 ksft_test_result_fail("%s\n", tests
[i
].name
);