0001
0002
0003 #define _GNU_SOURCE
0004 #include <linux/limits.h>
0005 #include <linux/sched.h>
0006 #include <sys/types.h>
0007 #include <sys/mman.h>
0008 #include <sys/wait.h>
0009 #include <unistd.h>
0010 #include <fcntl.h>
0011 #include <sched.h>
0012 #include <stdio.h>
0013 #include <errno.h>
0014 #include <signal.h>
0015 #include <string.h>
0016 #include <pthread.h>
0017
0018 #include "../kselftest.h"
0019 #include "cgroup_util.h"
0020
0021 static int touch_anon(char *buf, size_t size)
0022 {
0023 int fd;
0024 char *pos = buf;
0025
0026 fd = open("/dev/urandom", O_RDONLY);
0027 if (fd < 0)
0028 return -1;
0029
0030 while (size > 0) {
0031 ssize_t ret = read(fd, pos, size);
0032
0033 if (ret < 0) {
0034 if (errno != EINTR) {
0035 close(fd);
0036 return -1;
0037 }
0038 } else {
0039 pos += ret;
0040 size -= ret;
0041 }
0042 }
0043 close(fd);
0044
0045 return 0;
0046 }
0047
0048 static int alloc_and_touch_anon_noexit(const char *cgroup, void *arg)
0049 {
0050 int ppid = getppid();
0051 size_t size = (size_t)arg;
0052 void *buf;
0053
0054 buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
0055 0, 0);
0056 if (buf == MAP_FAILED)
0057 return -1;
0058
0059 if (touch_anon((char *)buf, size)) {
0060 munmap(buf, size);
0061 return -1;
0062 }
0063
0064 while (getppid() == ppid)
0065 sleep(1);
0066
0067 munmap(buf, size);
0068 return 0;
0069 }
0070
0071
0072
0073
0074
0075
0076
0077
0078 static int test_cgcore_destroy(const char *root)
0079 {
0080 int ret = KSFT_FAIL;
0081 char *cg_test = NULL;
0082 int child_pid;
0083 char buf[PAGE_SIZE];
0084
0085 cg_test = cg_name(root, "cg_test");
0086
0087 if (!cg_test)
0088 goto cleanup;
0089
0090 for (int i = 0; i < 10; i++) {
0091 if (cg_create(cg_test))
0092 goto cleanup;
0093
0094 child_pid = cg_run_nowait(cg_test, alloc_and_touch_anon_noexit,
0095 (void *) MB(100));
0096
0097 if (child_pid < 0)
0098 goto cleanup;
0099
0100
0101 if (cg_wait_for_proc_count(cg_test, 1))
0102 goto cleanup;
0103
0104 if (cg_killall(cg_test))
0105 goto cleanup;
0106
0107
0108 while (1) {
0109 if (cg_read(cg_test, "cgroup.procs", buf, sizeof(buf)))
0110 goto cleanup;
0111 if (buf[0] == '\0')
0112 break;
0113 usleep(1000);
0114 }
0115
0116 if (rmdir(cg_test))
0117 goto cleanup;
0118
0119 if (waitpid(child_pid, NULL, 0) < 0)
0120 goto cleanup;
0121 }
0122 ret = KSFT_PASS;
0123 cleanup:
0124 if (cg_test)
0125 cg_destroy(cg_test);
0126 free(cg_test);
0127 return ret;
0128 }
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 static int test_cgcore_populated(const char *root)
0141 {
0142 int ret = KSFT_FAIL;
0143 int err;
0144 char *cg_test_a = NULL, *cg_test_b = NULL;
0145 char *cg_test_c = NULL, *cg_test_d = NULL;
0146 int cgroup_fd = -EBADF;
0147 pid_t pid;
0148
0149 cg_test_a = cg_name(root, "cg_test_a");
0150 cg_test_b = cg_name(root, "cg_test_a/cg_test_b");
0151 cg_test_c = cg_name(root, "cg_test_a/cg_test_b/cg_test_c");
0152 cg_test_d = cg_name(root, "cg_test_a/cg_test_b/cg_test_d");
0153
0154 if (!cg_test_a || !cg_test_b || !cg_test_c || !cg_test_d)
0155 goto cleanup;
0156
0157 if (cg_create(cg_test_a))
0158 goto cleanup;
0159
0160 if (cg_create(cg_test_b))
0161 goto cleanup;
0162
0163 if (cg_create(cg_test_c))
0164 goto cleanup;
0165
0166 if (cg_create(cg_test_d))
0167 goto cleanup;
0168
0169 if (cg_enter_current(cg_test_c))
0170 goto cleanup;
0171
0172 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 1\n"))
0173 goto cleanup;
0174
0175 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 1\n"))
0176 goto cleanup;
0177
0178 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 1\n"))
0179 goto cleanup;
0180
0181 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
0182 goto cleanup;
0183
0184 if (cg_enter_current(root))
0185 goto cleanup;
0186
0187 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 0\n"))
0188 goto cleanup;
0189
0190 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 0\n"))
0191 goto cleanup;
0192
0193 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 0\n"))
0194 goto cleanup;
0195
0196 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
0197 goto cleanup;
0198
0199
0200 cgroup_fd = dirfd_open_opath(cg_test_d);
0201 if (cgroup_fd < 0)
0202 goto cleanup;
0203
0204 pid = clone_into_cgroup(cgroup_fd);
0205 if (pid < 0) {
0206 if (errno == ENOSYS)
0207 goto cleanup_pass;
0208 goto cleanup;
0209 }
0210
0211 if (pid == 0) {
0212 if (raise(SIGSTOP))
0213 exit(EXIT_FAILURE);
0214 exit(EXIT_SUCCESS);
0215 }
0216
0217 err = cg_read_strcmp(cg_test_d, "cgroup.events", "populated 1\n");
0218
0219 (void)clone_reap(pid, WSTOPPED);
0220 (void)kill(pid, SIGCONT);
0221 (void)clone_reap(pid, WEXITED);
0222
0223 if (err)
0224 goto cleanup;
0225
0226 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
0227 goto cleanup;
0228
0229
0230 if (cg_test_d) {
0231 cg_destroy(cg_test_d);
0232 free(cg_test_d);
0233 cg_test_d = NULL;
0234 }
0235
0236 pid = clone_into_cgroup(cgroup_fd);
0237 if (pid < 0)
0238 goto cleanup_pass;
0239 if (pid == 0)
0240 exit(EXIT_SUCCESS);
0241 (void)clone_reap(pid, WEXITED);
0242 goto cleanup;
0243
0244 cleanup_pass:
0245 ret = KSFT_PASS;
0246
0247 cleanup:
0248 if (cg_test_d)
0249 cg_destroy(cg_test_d);
0250 if (cg_test_c)
0251 cg_destroy(cg_test_c);
0252 if (cg_test_b)
0253 cg_destroy(cg_test_b);
0254 if (cg_test_a)
0255 cg_destroy(cg_test_a);
0256 free(cg_test_d);
0257 free(cg_test_c);
0258 free(cg_test_b);
0259 free(cg_test_a);
0260 if (cgroup_fd >= 0)
0261 close(cgroup_fd);
0262 return ret;
0263 }
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273 static int test_cgcore_invalid_domain(const char *root)
0274 {
0275 int ret = KSFT_FAIL;
0276 char *grandparent = NULL, *parent = NULL, *child = NULL;
0277
0278 grandparent = cg_name(root, "cg_test_grandparent");
0279 parent = cg_name(root, "cg_test_grandparent/cg_test_parent");
0280 child = cg_name(root, "cg_test_grandparent/cg_test_parent/cg_test_child");
0281 if (!parent || !child || !grandparent)
0282 goto cleanup;
0283
0284 if (cg_create(grandparent))
0285 goto cleanup;
0286
0287 if (cg_create(parent))
0288 goto cleanup;
0289
0290 if (cg_create(child))
0291 goto cleanup;
0292
0293 if (cg_write(parent, "cgroup.type", "threaded"))
0294 goto cleanup;
0295
0296 if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n"))
0297 goto cleanup;
0298
0299 if (!cg_enter_current(child))
0300 goto cleanup;
0301
0302 if (errno != EOPNOTSUPP)
0303 goto cleanup;
0304
0305 if (!clone_into_cgroup_run_wait(child))
0306 goto cleanup;
0307
0308 if (errno == ENOSYS)
0309 goto cleanup_pass;
0310
0311 if (errno != EOPNOTSUPP)
0312 goto cleanup;
0313
0314 cleanup_pass:
0315 ret = KSFT_PASS;
0316
0317 cleanup:
0318 cg_enter_current(root);
0319 if (child)
0320 cg_destroy(child);
0321 if (parent)
0322 cg_destroy(parent);
0323 if (grandparent)
0324 cg_destroy(grandparent);
0325 free(child);
0326 free(parent);
0327 free(grandparent);
0328 return ret;
0329 }
0330
0331
0332
0333
0334
0335 static int test_cgcore_parent_becomes_threaded(const char *root)
0336 {
0337 int ret = KSFT_FAIL;
0338 char *parent = NULL, *child = NULL;
0339
0340 parent = cg_name(root, "cg_test_parent");
0341 child = cg_name(root, "cg_test_parent/cg_test_child");
0342 if (!parent || !child)
0343 goto cleanup;
0344
0345 if (cg_create(parent))
0346 goto cleanup;
0347
0348 if (cg_create(child))
0349 goto cleanup;
0350
0351 if (cg_write(child, "cgroup.type", "threaded"))
0352 goto cleanup;
0353
0354 if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n"))
0355 goto cleanup;
0356
0357 ret = KSFT_PASS;
0358
0359 cleanup:
0360 if (child)
0361 cg_destroy(child);
0362 if (parent)
0363 cg_destroy(parent);
0364 free(child);
0365 free(parent);
0366 return ret;
0367
0368 }
0369
0370
0371
0372
0373
0374 static int test_cgcore_no_internal_process_constraint_on_threads(const char *root)
0375 {
0376 int ret = KSFT_FAIL;
0377 char *parent = NULL, *child = NULL;
0378
0379 if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
0380 cg_write(root, "cgroup.subtree_control", "+cpu")) {
0381 ret = KSFT_SKIP;
0382 goto cleanup;
0383 }
0384
0385 parent = cg_name(root, "cg_test_parent");
0386 child = cg_name(root, "cg_test_parent/cg_test_child");
0387 if (!parent || !child)
0388 goto cleanup;
0389
0390 if (cg_create(parent))
0391 goto cleanup;
0392
0393 if (cg_create(child))
0394 goto cleanup;
0395
0396 if (cg_write(parent, "cgroup.type", "threaded"))
0397 goto cleanup;
0398
0399 if (cg_write(child, "cgroup.type", "threaded"))
0400 goto cleanup;
0401
0402 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
0403 goto cleanup;
0404
0405 if (cg_enter_current(parent))
0406 goto cleanup;
0407
0408 ret = KSFT_PASS;
0409
0410 cleanup:
0411 cg_enter_current(root);
0412 cg_enter_current(root);
0413 if (child)
0414 cg_destroy(child);
0415 if (parent)
0416 cg_destroy(parent);
0417 free(child);
0418 free(parent);
0419 return ret;
0420 }
0421
0422
0423
0424
0425
0426 static int test_cgcore_top_down_constraint_enable(const char *root)
0427 {
0428 int ret = KSFT_FAIL;
0429 char *parent = NULL, *child = NULL;
0430
0431 parent = cg_name(root, "cg_test_parent");
0432 child = cg_name(root, "cg_test_parent/cg_test_child");
0433 if (!parent || !child)
0434 goto cleanup;
0435
0436 if (cg_create(parent))
0437 goto cleanup;
0438
0439 if (cg_create(child))
0440 goto cleanup;
0441
0442 if (!cg_write(child, "cgroup.subtree_control", "+memory"))
0443 goto cleanup;
0444
0445 ret = KSFT_PASS;
0446
0447 cleanup:
0448 if (child)
0449 cg_destroy(child);
0450 if (parent)
0451 cg_destroy(parent);
0452 free(child);
0453 free(parent);
0454 return ret;
0455 }
0456
0457
0458
0459
0460
0461 static int test_cgcore_top_down_constraint_disable(const char *root)
0462 {
0463 int ret = KSFT_FAIL;
0464 char *parent = NULL, *child = NULL;
0465
0466 parent = cg_name(root, "cg_test_parent");
0467 child = cg_name(root, "cg_test_parent/cg_test_child");
0468 if (!parent || !child)
0469 goto cleanup;
0470
0471 if (cg_create(parent))
0472 goto cleanup;
0473
0474 if (cg_create(child))
0475 goto cleanup;
0476
0477 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
0478 goto cleanup;
0479
0480 if (cg_write(child, "cgroup.subtree_control", "+memory"))
0481 goto cleanup;
0482
0483 if (!cg_write(parent, "cgroup.subtree_control", "-memory"))
0484 goto cleanup;
0485
0486 ret = KSFT_PASS;
0487
0488 cleanup:
0489 if (child)
0490 cg_destroy(child);
0491 if (parent)
0492 cg_destroy(parent);
0493 free(child);
0494 free(parent);
0495 return ret;
0496 }
0497
0498
0499
0500
0501
0502 static int test_cgcore_internal_process_constraint(const char *root)
0503 {
0504 int ret = KSFT_FAIL;
0505 char *parent = NULL, *child = NULL;
0506
0507 parent = cg_name(root, "cg_test_parent");
0508 child = cg_name(root, "cg_test_parent/cg_test_child");
0509 if (!parent || !child)
0510 goto cleanup;
0511
0512 if (cg_create(parent))
0513 goto cleanup;
0514
0515 if (cg_create(child))
0516 goto cleanup;
0517
0518 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
0519 goto cleanup;
0520
0521 if (!cg_enter_current(parent))
0522 goto cleanup;
0523
0524 if (!clone_into_cgroup_run_wait(parent))
0525 goto cleanup;
0526
0527 ret = KSFT_PASS;
0528
0529 cleanup:
0530 if (child)
0531 cg_destroy(child);
0532 if (parent)
0533 cg_destroy(parent);
0534 free(child);
0535 free(parent);
0536 return ret;
0537 }
0538
0539 static void *dummy_thread_fn(void *arg)
0540 {
0541 return (void *)(size_t)pause();
0542 }
0543
0544
0545
0546
0547
0548 static int test_cgcore_proc_migration(const char *root)
0549 {
0550 int ret = KSFT_FAIL;
0551 int t, c_threads = 0, n_threads = 13;
0552 char *src = NULL, *dst = NULL;
0553 pthread_t threads[n_threads];
0554
0555 src = cg_name(root, "cg_src");
0556 dst = cg_name(root, "cg_dst");
0557 if (!src || !dst)
0558 goto cleanup;
0559
0560 if (cg_create(src))
0561 goto cleanup;
0562 if (cg_create(dst))
0563 goto cleanup;
0564
0565 if (cg_enter_current(src))
0566 goto cleanup;
0567
0568 for (c_threads = 0; c_threads < n_threads; ++c_threads) {
0569 if (pthread_create(&threads[c_threads], NULL, dummy_thread_fn, NULL))
0570 goto cleanup;
0571 }
0572
0573 cg_enter_current(dst);
0574 if (cg_read_lc(dst, "cgroup.threads") != n_threads + 1)
0575 goto cleanup;
0576
0577 ret = KSFT_PASS;
0578
0579 cleanup:
0580 for (t = 0; t < c_threads; ++t) {
0581 pthread_cancel(threads[t]);
0582 }
0583
0584 for (t = 0; t < c_threads; ++t) {
0585 pthread_join(threads[t], NULL);
0586 }
0587
0588 cg_enter_current(root);
0589
0590 if (dst)
0591 cg_destroy(dst);
0592 if (src)
0593 cg_destroy(src);
0594 free(dst);
0595 free(src);
0596 return ret;
0597 }
0598
0599 static void *migrating_thread_fn(void *arg)
0600 {
0601 int g, i, n_iterations = 1000;
0602 char **grps = arg;
0603 char lines[3][PATH_MAX];
0604
0605 for (g = 1; g < 3; ++g)
0606 snprintf(lines[g], sizeof(lines[g]), "0::%s", grps[g] + strlen(grps[0]));
0607
0608 for (i = 0; i < n_iterations; ++i) {
0609 cg_enter_current_thread(grps[(i % 2) + 1]);
0610
0611 if (proc_read_strstr(0, 1, "cgroup", lines[(i % 2) + 1]))
0612 return (void *)-1;
0613 }
0614 return NULL;
0615 }
0616
0617
0618
0619
0620
0621 static int test_cgcore_thread_migration(const char *root)
0622 {
0623 int ret = KSFT_FAIL;
0624 char *dom = NULL;
0625 char line[PATH_MAX];
0626 char *grps[3] = { (char *)root, NULL, NULL };
0627 pthread_t thr;
0628 void *retval;
0629
0630 dom = cg_name(root, "cg_dom");
0631 grps[1] = cg_name(root, "cg_dom/cg_src");
0632 grps[2] = cg_name(root, "cg_dom/cg_dst");
0633 if (!grps[1] || !grps[2] || !dom)
0634 goto cleanup;
0635
0636 if (cg_create(dom))
0637 goto cleanup;
0638 if (cg_create(grps[1]))
0639 goto cleanup;
0640 if (cg_create(grps[2]))
0641 goto cleanup;
0642
0643 if (cg_write(grps[1], "cgroup.type", "threaded"))
0644 goto cleanup;
0645 if (cg_write(grps[2], "cgroup.type", "threaded"))
0646 goto cleanup;
0647
0648 if (cg_enter_current(grps[1]))
0649 goto cleanup;
0650
0651 if (pthread_create(&thr, NULL, migrating_thread_fn, grps))
0652 goto cleanup;
0653
0654 if (pthread_join(thr, &retval))
0655 goto cleanup;
0656
0657 if (retval)
0658 goto cleanup;
0659
0660 snprintf(line, sizeof(line), "0::%s", grps[1] + strlen(grps[0]));
0661 if (proc_read_strstr(0, 1, "cgroup", line))
0662 goto cleanup;
0663
0664 ret = KSFT_PASS;
0665
0666 cleanup:
0667 cg_enter_current(root);
0668 if (grps[2])
0669 cg_destroy(grps[2]);
0670 if (grps[1])
0671 cg_destroy(grps[1]);
0672 if (dom)
0673 cg_destroy(dom);
0674 free(grps[2]);
0675 free(grps[1]);
0676 free(dom);
0677 return ret;
0678 }
0679
0680
0681
0682
0683
0684 static int test_cgcore_lesser_euid_open(const char *root)
0685 {
0686 const uid_t test_euid = 65534;
0687 int ret = KSFT_FAIL;
0688 char *cg_test_a = NULL, *cg_test_b = NULL;
0689 char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
0690 int cg_test_b_procs_fd = -1;
0691 uid_t saved_uid;
0692
0693 cg_test_a = cg_name(root, "cg_test_a");
0694 cg_test_b = cg_name(root, "cg_test_b");
0695
0696 if (!cg_test_a || !cg_test_b)
0697 goto cleanup;
0698
0699 cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
0700 cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
0701
0702 if (!cg_test_a_procs || !cg_test_b_procs)
0703 goto cleanup;
0704
0705 if (cg_create(cg_test_a) || cg_create(cg_test_b))
0706 goto cleanup;
0707
0708 if (cg_enter_current(cg_test_a))
0709 goto cleanup;
0710
0711 if (chown(cg_test_a_procs, test_euid, -1) ||
0712 chown(cg_test_b_procs, test_euid, -1))
0713 goto cleanup;
0714
0715 saved_uid = geteuid();
0716 if (seteuid(test_euid))
0717 goto cleanup;
0718
0719 cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR);
0720
0721 if (seteuid(saved_uid))
0722 goto cleanup;
0723
0724 if (cg_test_b_procs_fd < 0)
0725 goto cleanup;
0726
0727 if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES)
0728 goto cleanup;
0729
0730 ret = KSFT_PASS;
0731
0732 cleanup:
0733 cg_enter_current(root);
0734 if (cg_test_b_procs_fd >= 0)
0735 close(cg_test_b_procs_fd);
0736 if (cg_test_b)
0737 cg_destroy(cg_test_b);
0738 if (cg_test_a)
0739 cg_destroy(cg_test_a);
0740 free(cg_test_b_procs);
0741 free(cg_test_a_procs);
0742 free(cg_test_b);
0743 free(cg_test_a);
0744 return ret;
0745 }
0746
0747 struct lesser_ns_open_thread_arg {
0748 const char *path;
0749 int fd;
0750 int err;
0751 };
0752
0753 static int lesser_ns_open_thread_fn(void *arg)
0754 {
0755 struct lesser_ns_open_thread_arg *targ = arg;
0756
0757 targ->fd = open(targ->path, O_RDWR);
0758 targ->err = errno;
0759 return 0;
0760 }
0761
0762
0763
0764
0765
0766 static int test_cgcore_lesser_ns_open(const char *root)
0767 {
0768 static char stack[65536];
0769 const uid_t test_euid = 65534;
0770 int ret = KSFT_FAIL;
0771 char *cg_test_a = NULL, *cg_test_b = NULL;
0772 char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
0773 int cg_test_b_procs_fd = -1;
0774 struct lesser_ns_open_thread_arg targ = { .fd = -1 };
0775 pid_t pid;
0776 int status;
0777
0778 cg_test_a = cg_name(root, "cg_test_a");
0779 cg_test_b = cg_name(root, "cg_test_b");
0780
0781 if (!cg_test_a || !cg_test_b)
0782 goto cleanup;
0783
0784 cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
0785 cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
0786
0787 if (!cg_test_a_procs || !cg_test_b_procs)
0788 goto cleanup;
0789
0790 if (cg_create(cg_test_a) || cg_create(cg_test_b))
0791 goto cleanup;
0792
0793 if (cg_enter_current(cg_test_b))
0794 goto cleanup;
0795
0796 if (chown(cg_test_a_procs, test_euid, -1) ||
0797 chown(cg_test_b_procs, test_euid, -1))
0798 goto cleanup;
0799
0800 targ.path = cg_test_b_procs;
0801 pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack),
0802 CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD,
0803 &targ);
0804 if (pid < 0)
0805 goto cleanup;
0806
0807 if (waitpid(pid, &status, 0) < 0)
0808 goto cleanup;
0809
0810 if (!WIFEXITED(status))
0811 goto cleanup;
0812
0813 cg_test_b_procs_fd = targ.fd;
0814 if (cg_test_b_procs_fd < 0)
0815 goto cleanup;
0816
0817 if (cg_enter_current(cg_test_a))
0818 goto cleanup;
0819
0820 if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT)
0821 goto cleanup;
0822
0823 ret = KSFT_PASS;
0824
0825 cleanup:
0826 cg_enter_current(root);
0827 if (cg_test_b_procs_fd >= 0)
0828 close(cg_test_b_procs_fd);
0829 if (cg_test_b)
0830 cg_destroy(cg_test_b);
0831 if (cg_test_a)
0832 cg_destroy(cg_test_a);
0833 free(cg_test_b_procs);
0834 free(cg_test_a_procs);
0835 free(cg_test_b);
0836 free(cg_test_a);
0837 return ret;
0838 }
0839
0840 #define T(x) { x, #x }
0841 struct corecg_test {
0842 int (*fn)(const char *root);
0843 const char *name;
0844 } tests[] = {
0845 T(test_cgcore_internal_process_constraint),
0846 T(test_cgcore_top_down_constraint_enable),
0847 T(test_cgcore_top_down_constraint_disable),
0848 T(test_cgcore_no_internal_process_constraint_on_threads),
0849 T(test_cgcore_parent_becomes_threaded),
0850 T(test_cgcore_invalid_domain),
0851 T(test_cgcore_populated),
0852 T(test_cgcore_proc_migration),
0853 T(test_cgcore_thread_migration),
0854 T(test_cgcore_destroy),
0855 T(test_cgcore_lesser_euid_open),
0856 T(test_cgcore_lesser_ns_open),
0857 };
0858 #undef T
0859
0860 int main(int argc, char *argv[])
0861 {
0862 char root[PATH_MAX];
0863 int i, ret = EXIT_SUCCESS;
0864
0865 if (cg_find_unified_root(root, sizeof(root)))
0866 ksft_exit_skip("cgroup v2 isn't mounted\n");
0867
0868 if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
0869 if (cg_write(root, "cgroup.subtree_control", "+memory"))
0870 ksft_exit_skip("Failed to set memory controller\n");
0871
0872 for (i = 0; i < ARRAY_SIZE(tests); i++) {
0873 switch (tests[i].fn(root)) {
0874 case KSFT_PASS:
0875 ksft_test_result_pass("%s\n", tests[i].name);
0876 break;
0877 case KSFT_SKIP:
0878 ksft_test_result_skip("%s\n", tests[i].name);
0879 break;
0880 default:
0881 ret = EXIT_FAILURE;
0882 ksft_test_result_fail("%s\n", tests[i].name);
0883 break;
0884 }
0885 }
0886
0887 return ret;
0888 }