WIP FPC-III support
[linux/fpc-iii.git] / tools / testing / selftests / cgroup / test_core.c
blob3df648c378765653348791d7f4e53d20b6ee3eb5
1 /* SPDX-License-Identifier: GPL-2.0 */
3 #include <linux/limits.h>
4 #include <sys/types.h>
5 #include <sys/mman.h>
6 #include <sys/wait.h>
7 #include <unistd.h>
8 #include <fcntl.h>
9 #include <stdio.h>
10 #include <errno.h>
11 #include <signal.h>
12 #include <string.h>
13 #include <pthread.h>
15 #include "../kselftest.h"
16 #include "cgroup_util.h"
18 static int touch_anon(char *buf, size_t size)
20 int fd;
21 char *pos = buf;
23 fd = open("/dev/urandom", O_RDONLY);
24 if (fd < 0)
25 return -1;
27 while (size > 0) {
28 ssize_t ret = read(fd, pos, size);
30 if (ret < 0) {
31 if (errno != EINTR) {
32 close(fd);
33 return -1;
35 } else {
36 pos += ret;
37 size -= ret;
40 close(fd);
42 return 0;
45 static int alloc_and_touch_anon_noexit(const char *cgroup, void *arg)
47 int ppid = getppid();
48 size_t size = (size_t)arg;
49 void *buf;
51 buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
52 0, 0);
53 if (buf == MAP_FAILED)
54 return -1;
56 if (touch_anon((char *)buf, size)) {
57 munmap(buf, size);
58 return -1;
61 while (getppid() == ppid)
62 sleep(1);
64 munmap(buf, size);
65 return 0;
69 * Create a child process that allocates and touches 100MB, then waits to be
70 * killed. Wait until the child is attached to the cgroup, kill all processes
71 * in that cgroup and wait until "cgroup.procs" is empty. At this point try to
72 * destroy the empty cgroup. The test helps detect race conditions between
73 * dying processes leaving the cgroup and cgroup destruction path.
75 static int test_cgcore_destroy(const char *root)
77 int ret = KSFT_FAIL;
78 char *cg_test = NULL;
79 int child_pid;
80 char buf[PAGE_SIZE];
82 cg_test = cg_name(root, "cg_test");
84 if (!cg_test)
85 goto cleanup;
87 for (int i = 0; i < 10; i++) {
88 if (cg_create(cg_test))
89 goto cleanup;
91 child_pid = cg_run_nowait(cg_test, alloc_and_touch_anon_noexit,
92 (void *) MB(100));
94 if (child_pid < 0)
95 goto cleanup;
97 /* wait for the child to enter cgroup */
98 if (cg_wait_for_proc_count(cg_test, 1))
99 goto cleanup;
101 if (cg_killall(cg_test))
102 goto cleanup;
104 /* wait for cgroup to be empty */
105 while (1) {
106 if (cg_read(cg_test, "cgroup.procs", buf, sizeof(buf)))
107 goto cleanup;
108 if (buf[0] == '\0')
109 break;
110 usleep(1000);
113 if (rmdir(cg_test))
114 goto cleanup;
116 if (waitpid(child_pid, NULL, 0) < 0)
117 goto cleanup;
119 ret = KSFT_PASS;
120 cleanup:
121 if (cg_test)
122 cg_destroy(cg_test);
123 free(cg_test);
124 return ret;
128 * A(0) - B(0) - C(1)
129 * \ D(0)
131 * A, B and C's "populated" fields would be 1 while D's 0.
132 * test that after the one process in C is moved to root,
133 * A,B and C's "populated" fields would flip to "0" and file
134 * modified events will be generated on the
135 * "cgroup.events" files of both cgroups.
137 static int test_cgcore_populated(const char *root)
139 int ret = KSFT_FAIL;
140 int err;
141 char *cg_test_a = NULL, *cg_test_b = NULL;
142 char *cg_test_c = NULL, *cg_test_d = NULL;
143 int cgroup_fd = -EBADF;
144 pid_t pid;
146 cg_test_a = cg_name(root, "cg_test_a");
147 cg_test_b = cg_name(root, "cg_test_a/cg_test_b");
148 cg_test_c = cg_name(root, "cg_test_a/cg_test_b/cg_test_c");
149 cg_test_d = cg_name(root, "cg_test_a/cg_test_b/cg_test_d");
151 if (!cg_test_a || !cg_test_b || !cg_test_c || !cg_test_d)
152 goto cleanup;
154 if (cg_create(cg_test_a))
155 goto cleanup;
157 if (cg_create(cg_test_b))
158 goto cleanup;
160 if (cg_create(cg_test_c))
161 goto cleanup;
163 if (cg_create(cg_test_d))
164 goto cleanup;
166 if (cg_enter_current(cg_test_c))
167 goto cleanup;
169 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 1\n"))
170 goto cleanup;
172 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 1\n"))
173 goto cleanup;
175 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 1\n"))
176 goto cleanup;
178 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
179 goto cleanup;
181 if (cg_enter_current(root))
182 goto cleanup;
184 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 0\n"))
185 goto cleanup;
187 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 0\n"))
188 goto cleanup;
190 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 0\n"))
191 goto cleanup;
193 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
194 goto cleanup;
196 /* Test that we can directly clone into a new cgroup. */
197 cgroup_fd = dirfd_open_opath(cg_test_d);
198 if (cgroup_fd < 0)
199 goto cleanup;
201 pid = clone_into_cgroup(cgroup_fd);
202 if (pid < 0) {
203 if (errno == ENOSYS)
204 goto cleanup_pass;
205 goto cleanup;
208 if (pid == 0) {
209 if (raise(SIGSTOP))
210 exit(EXIT_FAILURE);
211 exit(EXIT_SUCCESS);
214 err = cg_read_strcmp(cg_test_d, "cgroup.events", "populated 1\n");
216 (void)clone_reap(pid, WSTOPPED);
217 (void)kill(pid, SIGCONT);
218 (void)clone_reap(pid, WEXITED);
220 if (err)
221 goto cleanup;
223 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
224 goto cleanup;
226 /* Remove cgroup. */
227 if (cg_test_d) {
228 cg_destroy(cg_test_d);
229 free(cg_test_d);
230 cg_test_d = NULL;
233 pid = clone_into_cgroup(cgroup_fd);
234 if (pid < 0)
235 goto cleanup_pass;
236 if (pid == 0)
237 exit(EXIT_SUCCESS);
238 (void)clone_reap(pid, WEXITED);
239 goto cleanup;
241 cleanup_pass:
242 ret = KSFT_PASS;
244 cleanup:
245 if (cg_test_d)
246 cg_destroy(cg_test_d);
247 if (cg_test_c)
248 cg_destroy(cg_test_c);
249 if (cg_test_b)
250 cg_destroy(cg_test_b);
251 if (cg_test_a)
252 cg_destroy(cg_test_a);
253 free(cg_test_d);
254 free(cg_test_c);
255 free(cg_test_b);
256 free(cg_test_a);
257 if (cgroup_fd >= 0)
258 close(cgroup_fd);
259 return ret;
263 * A (domain threaded) - B (threaded) - C (domain)
265 * test that C can't be used until it is turned into a
266 * threaded cgroup. "cgroup.type" file will report "domain (invalid)" in
267 * these cases. Operations which fail due to invalid topology use
268 * EOPNOTSUPP as the errno.
270 static int test_cgcore_invalid_domain(const char *root)
272 int ret = KSFT_FAIL;
273 char *grandparent = NULL, *parent = NULL, *child = NULL;
275 grandparent = cg_name(root, "cg_test_grandparent");
276 parent = cg_name(root, "cg_test_grandparent/cg_test_parent");
277 child = cg_name(root, "cg_test_grandparent/cg_test_parent/cg_test_child");
278 if (!parent || !child || !grandparent)
279 goto cleanup;
281 if (cg_create(grandparent))
282 goto cleanup;
284 if (cg_create(parent))
285 goto cleanup;
287 if (cg_create(child))
288 goto cleanup;
290 if (cg_write(parent, "cgroup.type", "threaded"))
291 goto cleanup;
293 if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n"))
294 goto cleanup;
296 if (!cg_enter_current(child))
297 goto cleanup;
299 if (errno != EOPNOTSUPP)
300 goto cleanup;
302 if (!clone_into_cgroup_run_wait(child))
303 goto cleanup;
305 if (errno == ENOSYS)
306 goto cleanup_pass;
308 if (errno != EOPNOTSUPP)
309 goto cleanup;
311 cleanup_pass:
312 ret = KSFT_PASS;
314 cleanup:
315 cg_enter_current(root);
316 if (child)
317 cg_destroy(child);
318 if (parent)
319 cg_destroy(parent);
320 if (grandparent)
321 cg_destroy(grandparent);
322 free(child);
323 free(parent);
324 free(grandparent);
325 return ret;
329 * Test that when a child becomes threaded
330 * the parent type becomes domain threaded.
332 static int test_cgcore_parent_becomes_threaded(const char *root)
334 int ret = KSFT_FAIL;
335 char *parent = NULL, *child = NULL;
337 parent = cg_name(root, "cg_test_parent");
338 child = cg_name(root, "cg_test_parent/cg_test_child");
339 if (!parent || !child)
340 goto cleanup;
342 if (cg_create(parent))
343 goto cleanup;
345 if (cg_create(child))
346 goto cleanup;
348 if (cg_write(child, "cgroup.type", "threaded"))
349 goto cleanup;
351 if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n"))
352 goto cleanup;
354 ret = KSFT_PASS;
356 cleanup:
357 if (child)
358 cg_destroy(child);
359 if (parent)
360 cg_destroy(parent);
361 free(child);
362 free(parent);
363 return ret;
368 * Test that there's no internal process constrain on threaded cgroups.
369 * You can add threads/processes on a parent with a controller enabled.
371 static int test_cgcore_no_internal_process_constraint_on_threads(const char *root)
373 int ret = KSFT_FAIL;
374 char *parent = NULL, *child = NULL;
376 if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
377 cg_write(root, "cgroup.subtree_control", "+cpu")) {
378 ret = KSFT_SKIP;
379 goto cleanup;
382 parent = cg_name(root, "cg_test_parent");
383 child = cg_name(root, "cg_test_parent/cg_test_child");
384 if (!parent || !child)
385 goto cleanup;
387 if (cg_create(parent))
388 goto cleanup;
390 if (cg_create(child))
391 goto cleanup;
393 if (cg_write(parent, "cgroup.type", "threaded"))
394 goto cleanup;
396 if (cg_write(child, "cgroup.type", "threaded"))
397 goto cleanup;
399 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
400 goto cleanup;
402 if (cg_enter_current(parent))
403 goto cleanup;
405 ret = KSFT_PASS;
407 cleanup:
408 cg_enter_current(root);
409 cg_enter_current(root);
410 if (child)
411 cg_destroy(child);
412 if (parent)
413 cg_destroy(parent);
414 free(child);
415 free(parent);
416 return ret;
420 * Test that you can't enable a controller on a child if it's not enabled
421 * on the parent.
423 static int test_cgcore_top_down_constraint_enable(const char *root)
425 int ret = KSFT_FAIL;
426 char *parent = NULL, *child = NULL;
428 parent = cg_name(root, "cg_test_parent");
429 child = cg_name(root, "cg_test_parent/cg_test_child");
430 if (!parent || !child)
431 goto cleanup;
433 if (cg_create(parent))
434 goto cleanup;
436 if (cg_create(child))
437 goto cleanup;
439 if (!cg_write(child, "cgroup.subtree_control", "+memory"))
440 goto cleanup;
442 ret = KSFT_PASS;
444 cleanup:
445 if (child)
446 cg_destroy(child);
447 if (parent)
448 cg_destroy(parent);
449 free(child);
450 free(parent);
451 return ret;
455 * Test that you can't disable a controller on a parent
456 * if it's enabled in a child.
458 static int test_cgcore_top_down_constraint_disable(const char *root)
460 int ret = KSFT_FAIL;
461 char *parent = NULL, *child = NULL;
463 parent = cg_name(root, "cg_test_parent");
464 child = cg_name(root, "cg_test_parent/cg_test_child");
465 if (!parent || !child)
466 goto cleanup;
468 if (cg_create(parent))
469 goto cleanup;
471 if (cg_create(child))
472 goto cleanup;
474 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
475 goto cleanup;
477 if (cg_write(child, "cgroup.subtree_control", "+memory"))
478 goto cleanup;
480 if (!cg_write(parent, "cgroup.subtree_control", "-memory"))
481 goto cleanup;
483 ret = KSFT_PASS;
485 cleanup:
486 if (child)
487 cg_destroy(child);
488 if (parent)
489 cg_destroy(parent);
490 free(child);
491 free(parent);
492 return ret;
496 * Test internal process constraint.
497 * You can't add a pid to a domain parent if a controller is enabled.
499 static int test_cgcore_internal_process_constraint(const char *root)
501 int ret = KSFT_FAIL;
502 char *parent = NULL, *child = NULL;
504 parent = cg_name(root, "cg_test_parent");
505 child = cg_name(root, "cg_test_parent/cg_test_child");
506 if (!parent || !child)
507 goto cleanup;
509 if (cg_create(parent))
510 goto cleanup;
512 if (cg_create(child))
513 goto cleanup;
515 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
516 goto cleanup;
518 if (!cg_enter_current(parent))
519 goto cleanup;
521 if (!clone_into_cgroup_run_wait(parent))
522 goto cleanup;
524 ret = KSFT_PASS;
526 cleanup:
527 if (child)
528 cg_destroy(child);
529 if (parent)
530 cg_destroy(parent);
531 free(child);
532 free(parent);
533 return ret;
536 static void *dummy_thread_fn(void *arg)
538 return (void *)(size_t)pause();
542 * Test threadgroup migration.
543 * All threads of a process are migrated together.
545 static int test_cgcore_proc_migration(const char *root)
547 int ret = KSFT_FAIL;
548 int t, c_threads = 0, n_threads = 13;
549 char *src = NULL, *dst = NULL;
550 pthread_t threads[n_threads];
552 src = cg_name(root, "cg_src");
553 dst = cg_name(root, "cg_dst");
554 if (!src || !dst)
555 goto cleanup;
557 if (cg_create(src))
558 goto cleanup;
559 if (cg_create(dst))
560 goto cleanup;
562 if (cg_enter_current(src))
563 goto cleanup;
565 for (c_threads = 0; c_threads < n_threads; ++c_threads) {
566 if (pthread_create(&threads[c_threads], NULL, dummy_thread_fn, NULL))
567 goto cleanup;
570 cg_enter_current(dst);
571 if (cg_read_lc(dst, "cgroup.threads") != n_threads + 1)
572 goto cleanup;
574 ret = KSFT_PASS;
576 cleanup:
577 for (t = 0; t < c_threads; ++t) {
578 pthread_cancel(threads[t]);
581 for (t = 0; t < c_threads; ++t) {
582 pthread_join(threads[t], NULL);
585 cg_enter_current(root);
587 if (dst)
588 cg_destroy(dst);
589 if (src)
590 cg_destroy(src);
591 free(dst);
592 free(src);
593 return ret;
596 static void *migrating_thread_fn(void *arg)
598 int g, i, n_iterations = 1000;
599 char **grps = arg;
600 char lines[3][PATH_MAX];
602 for (g = 1; g < 3; ++g)
603 snprintf(lines[g], sizeof(lines[g]), "0::%s", grps[g] + strlen(grps[0]));
605 for (i = 0; i < n_iterations; ++i) {
606 cg_enter_current_thread(grps[(i % 2) + 1]);
608 if (proc_read_strstr(0, 1, "cgroup", lines[(i % 2) + 1]))
609 return (void *)-1;
611 return NULL;
615 * Test single thread migration.
616 * Threaded cgroups allow successful migration of a thread.
618 static int test_cgcore_thread_migration(const char *root)
620 int ret = KSFT_FAIL;
621 char *dom = NULL;
622 char line[PATH_MAX];
623 char *grps[3] = { (char *)root, NULL, NULL };
624 pthread_t thr;
625 void *retval;
627 dom = cg_name(root, "cg_dom");
628 grps[1] = cg_name(root, "cg_dom/cg_src");
629 grps[2] = cg_name(root, "cg_dom/cg_dst");
630 if (!grps[1] || !grps[2] || !dom)
631 goto cleanup;
633 if (cg_create(dom))
634 goto cleanup;
635 if (cg_create(grps[1]))
636 goto cleanup;
637 if (cg_create(grps[2]))
638 goto cleanup;
640 if (cg_write(grps[1], "cgroup.type", "threaded"))
641 goto cleanup;
642 if (cg_write(grps[2], "cgroup.type", "threaded"))
643 goto cleanup;
645 if (cg_enter_current(grps[1]))
646 goto cleanup;
648 if (pthread_create(&thr, NULL, migrating_thread_fn, grps))
649 goto cleanup;
651 if (pthread_join(thr, &retval))
652 goto cleanup;
654 if (retval)
655 goto cleanup;
657 snprintf(line, sizeof(line), "0::%s", grps[1] + strlen(grps[0]));
658 if (proc_read_strstr(0, 1, "cgroup", line))
659 goto cleanup;
661 ret = KSFT_PASS;
663 cleanup:
664 cg_enter_current(root);
665 if (grps[2])
666 cg_destroy(grps[2]);
667 if (grps[1])
668 cg_destroy(grps[1]);
669 if (dom)
670 cg_destroy(dom);
671 free(grps[2]);
672 free(grps[1]);
673 free(dom);
674 return ret;
677 #define T(x) { x, #x }
678 struct corecg_test {
679 int (*fn)(const char *root);
680 const char *name;
681 } tests[] = {
682 T(test_cgcore_internal_process_constraint),
683 T(test_cgcore_top_down_constraint_enable),
684 T(test_cgcore_top_down_constraint_disable),
685 T(test_cgcore_no_internal_process_constraint_on_threads),
686 T(test_cgcore_parent_becomes_threaded),
687 T(test_cgcore_invalid_domain),
688 T(test_cgcore_populated),
689 T(test_cgcore_proc_migration),
690 T(test_cgcore_thread_migration),
691 T(test_cgcore_destroy),
693 #undef T
695 int main(int argc, char *argv[])
697 char root[PATH_MAX];
698 int i, ret = EXIT_SUCCESS;
700 if (cg_find_unified_root(root, sizeof(root)))
701 ksft_exit_skip("cgroup v2 isn't mounted\n");
703 if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
704 if (cg_write(root, "cgroup.subtree_control", "+memory"))
705 ksft_exit_skip("Failed to set memory controller\n");
707 for (i = 0; i < ARRAY_SIZE(tests); i++) {
708 switch (tests[i].fn(root)) {
709 case KSFT_PASS:
710 ksft_test_result_pass("%s\n", tests[i].name);
711 break;
712 case KSFT_SKIP:
713 ksft_test_result_skip("%s\n", tests[i].name);
714 break;
715 default:
716 ret = EXIT_FAILURE;
717 ksft_test_result_fail("%s\n", tests[i].name);
718 break;
722 return ret;