0001
0002
0003 #define _GNU_SOURCE
0004 #include <linux/limits.h>
0005 #include <sys/sysinfo.h>
0006 #include <sys/wait.h>
0007 #include <errno.h>
0008 #include <pthread.h>
0009 #include <stdio.h>
0010 #include <time.h>
0011
0012 #include "../kselftest.h"
0013 #include "cgroup_util.h"
0014
0015 enum hog_clock_type {
0016
0017 CPU_HOG_CLOCK_PROCESS,
0018
0019 CPU_HOG_CLOCK_WALL,
0020 };
0021
0022 struct cpu_hogger {
0023 char *cgroup;
0024 pid_t pid;
0025 long usage;
0026 };
0027
0028 struct cpu_hog_func_param {
0029 int nprocs;
0030 struct timespec ts;
0031 enum hog_clock_type clock_type;
0032 };
0033
0034
0035
0036
0037
0038 static int test_cpucg_subtree_control(const char *root)
0039 {
0040 char *parent = NULL, *child = NULL, *parent2 = NULL, *child2 = NULL;
0041 int ret = KSFT_FAIL;
0042
0043
0044 parent = cg_name(root, "cpucg_test_0");
0045 if (!parent)
0046 goto cleanup;
0047
0048 if (cg_create(parent))
0049 goto cleanup;
0050
0051 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
0052 goto cleanup;
0053
0054 child = cg_name(parent, "cpucg_test_child");
0055 if (!child)
0056 goto cleanup;
0057
0058 if (cg_create(child))
0059 goto cleanup;
0060
0061 if (cg_read_strstr(child, "cgroup.controllers", "cpu"))
0062 goto cleanup;
0063
0064
0065 parent2 = cg_name(root, "cpucg_test_1");
0066 if (!parent2)
0067 goto cleanup;
0068
0069 if (cg_create(parent2))
0070 goto cleanup;
0071
0072 child2 = cg_name(parent2, "cpucg_test_child");
0073 if (!child2)
0074 goto cleanup;
0075
0076 if (cg_create(child2))
0077 goto cleanup;
0078
0079 if (!cg_read_strstr(child2, "cgroup.controllers", "cpu"))
0080 goto cleanup;
0081
0082 ret = KSFT_PASS;
0083
0084 cleanup:
0085 cg_destroy(child);
0086 free(child);
0087 cg_destroy(child2);
0088 free(child2);
0089 cg_destroy(parent);
0090 free(parent);
0091 cg_destroy(parent2);
0092 free(parent2);
0093
0094 return ret;
0095 }
0096
0097 static void *hog_cpu_thread_func(void *arg)
0098 {
0099 while (1)
0100 ;
0101
0102 return NULL;
0103 }
0104
0105 static struct timespec
0106 timespec_sub(const struct timespec *lhs, const struct timespec *rhs)
0107 {
0108 struct timespec zero = {
0109 .tv_sec = 0,
0110 .tv_nsec = 0,
0111 };
0112 struct timespec ret;
0113
0114 if (lhs->tv_sec < rhs->tv_sec)
0115 return zero;
0116
0117 ret.tv_sec = lhs->tv_sec - rhs->tv_sec;
0118
0119 if (lhs->tv_nsec < rhs->tv_nsec) {
0120 if (ret.tv_sec == 0)
0121 return zero;
0122
0123 ret.tv_sec--;
0124 ret.tv_nsec = NSEC_PER_SEC - rhs->tv_nsec + lhs->tv_nsec;
0125 } else
0126 ret.tv_nsec = lhs->tv_nsec - rhs->tv_nsec;
0127
0128 return ret;
0129 }
0130
0131 static int hog_cpus_timed(const char *cgroup, void *arg)
0132 {
0133 const struct cpu_hog_func_param *param =
0134 (struct cpu_hog_func_param *)arg;
0135 struct timespec ts_run = param->ts;
0136 struct timespec ts_remaining = ts_run;
0137 struct timespec ts_start;
0138 int i, ret;
0139
0140 ret = clock_gettime(CLOCK_MONOTONIC, &ts_start);
0141 if (ret != 0)
0142 return ret;
0143
0144 for (i = 0; i < param->nprocs; i++) {
0145 pthread_t tid;
0146
0147 ret = pthread_create(&tid, NULL, &hog_cpu_thread_func, NULL);
0148 if (ret != 0)
0149 return ret;
0150 }
0151
0152 while (ts_remaining.tv_sec > 0 || ts_remaining.tv_nsec > 0) {
0153 struct timespec ts_total;
0154
0155 ret = nanosleep(&ts_remaining, NULL);
0156 if (ret && errno != EINTR)
0157 return ret;
0158
0159 if (param->clock_type == CPU_HOG_CLOCK_PROCESS) {
0160 ret = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts_total);
0161 if (ret != 0)
0162 return ret;
0163 } else {
0164 struct timespec ts_current;
0165
0166 ret = clock_gettime(CLOCK_MONOTONIC, &ts_current);
0167 if (ret != 0)
0168 return ret;
0169
0170 ts_total = timespec_sub(&ts_current, &ts_start);
0171 }
0172
0173 ts_remaining = timespec_sub(&ts_run, &ts_total);
0174 }
0175
0176 return 0;
0177 }
0178
0179
0180
0181
0182
0183 static int test_cpucg_stats(const char *root)
0184 {
0185 int ret = KSFT_FAIL;
0186 long usage_usec, user_usec, system_usec;
0187 long usage_seconds = 2;
0188 long expected_usage_usec = usage_seconds * USEC_PER_SEC;
0189 char *cpucg;
0190
0191 cpucg = cg_name(root, "cpucg_test");
0192 if (!cpucg)
0193 goto cleanup;
0194
0195 if (cg_create(cpucg))
0196 goto cleanup;
0197
0198 usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
0199 user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
0200 system_usec = cg_read_key_long(cpucg, "cpu.stat", "system_usec");
0201 if (usage_usec != 0 || user_usec != 0 || system_usec != 0)
0202 goto cleanup;
0203
0204 struct cpu_hog_func_param param = {
0205 .nprocs = 1,
0206 .ts = {
0207 .tv_sec = usage_seconds,
0208 .tv_nsec = 0,
0209 },
0210 .clock_type = CPU_HOG_CLOCK_PROCESS,
0211 };
0212 if (cg_run(cpucg, hog_cpus_timed, (void *)¶m))
0213 goto cleanup;
0214
0215 usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
0216 user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
0217 if (user_usec <= 0)
0218 goto cleanup;
0219
0220 if (!values_close(usage_usec, expected_usage_usec, 1))
0221 goto cleanup;
0222
0223 ret = KSFT_PASS;
0224
0225 cleanup:
0226 cg_destroy(cpucg);
0227 free(cpucg);
0228
0229 return ret;
0230 }
0231
0232 static int
0233 run_cpucg_weight_test(
0234 const char *root,
0235 pid_t (*spawn_child)(const struct cpu_hogger *child),
0236 int (*validate)(const struct cpu_hogger *children, int num_children))
0237 {
0238 int ret = KSFT_FAIL, i;
0239 char *parent = NULL;
0240 struct cpu_hogger children[3] = {NULL};
0241
0242 parent = cg_name(root, "cpucg_test_0");
0243 if (!parent)
0244 goto cleanup;
0245
0246 if (cg_create(parent))
0247 goto cleanup;
0248
0249 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
0250 goto cleanup;
0251
0252 for (i = 0; i < ARRAY_SIZE(children); i++) {
0253 children[i].cgroup = cg_name_indexed(parent, "cpucg_child", i);
0254 if (!children[i].cgroup)
0255 goto cleanup;
0256
0257 if (cg_create(children[i].cgroup))
0258 goto cleanup;
0259
0260 if (cg_write_numeric(children[i].cgroup, "cpu.weight",
0261 50 * (i + 1)))
0262 goto cleanup;
0263 }
0264
0265 for (i = 0; i < ARRAY_SIZE(children); i++) {
0266 pid_t pid = spawn_child(&children[i]);
0267 if (pid <= 0)
0268 goto cleanup;
0269 children[i].pid = pid;
0270 }
0271
0272 for (i = 0; i < ARRAY_SIZE(children); i++) {
0273 int retcode;
0274
0275 waitpid(children[i].pid, &retcode, 0);
0276 if (!WIFEXITED(retcode))
0277 goto cleanup;
0278 if (WEXITSTATUS(retcode))
0279 goto cleanup;
0280 }
0281
0282 for (i = 0; i < ARRAY_SIZE(children); i++)
0283 children[i].usage = cg_read_key_long(children[i].cgroup,
0284 "cpu.stat", "usage_usec");
0285
0286 if (validate(children, ARRAY_SIZE(children)))
0287 goto cleanup;
0288
0289 ret = KSFT_PASS;
0290 cleanup:
0291 for (i = 0; i < ARRAY_SIZE(children); i++) {
0292 cg_destroy(children[i].cgroup);
0293 free(children[i].cgroup);
0294 }
0295 cg_destroy(parent);
0296 free(parent);
0297
0298 return ret;
0299 }
0300
0301 static pid_t weight_hog_ncpus(const struct cpu_hogger *child, int ncpus)
0302 {
0303 long usage_seconds = 10;
0304 struct cpu_hog_func_param param = {
0305 .nprocs = ncpus,
0306 .ts = {
0307 .tv_sec = usage_seconds,
0308 .tv_nsec = 0,
0309 },
0310 .clock_type = CPU_HOG_CLOCK_WALL,
0311 };
0312 return cg_run_nowait(child->cgroup, hog_cpus_timed, (void *)¶m);
0313 }
0314
0315 static pid_t weight_hog_all_cpus(const struct cpu_hogger *child)
0316 {
0317 return weight_hog_ncpus(child, get_nprocs());
0318 }
0319
0320 static int
0321 overprovision_validate(const struct cpu_hogger *children, int num_children)
0322 {
0323 int ret = KSFT_FAIL, i;
0324
0325 for (i = 0; i < num_children - 1; i++) {
0326 long delta;
0327
0328 if (children[i + 1].usage <= children[i].usage)
0329 goto cleanup;
0330
0331 delta = children[i + 1].usage - children[i].usage;
0332 if (!values_close(delta, children[0].usage, 35))
0333 goto cleanup;
0334 }
0335
0336 ret = KSFT_PASS;
0337 cleanup:
0338 return ret;
0339 }
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355 static int test_cpucg_weight_overprovisioned(const char *root)
0356 {
0357 return run_cpucg_weight_test(root, weight_hog_all_cpus,
0358 overprovision_validate);
0359 }
0360
0361 static pid_t weight_hog_one_cpu(const struct cpu_hogger *child)
0362 {
0363 return weight_hog_ncpus(child, 1);
0364 }
0365
0366 static int
0367 underprovision_validate(const struct cpu_hogger *children, int num_children)
0368 {
0369 int ret = KSFT_FAIL, i;
0370
0371 for (i = 0; i < num_children - 1; i++) {
0372 if (!values_close(children[i + 1].usage, children[0].usage, 15))
0373 goto cleanup;
0374 }
0375
0376 ret = KSFT_PASS;
0377 cleanup:
0378 return ret;
0379 }
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395 static int test_cpucg_weight_underprovisioned(const char *root)
0396 {
0397
0398
0399 if (get_nprocs() < 4)
0400 return KSFT_SKIP;
0401
0402 return run_cpucg_weight_test(root, weight_hog_one_cpu,
0403 underprovision_validate);
0404 }
0405
0406 static int
0407 run_cpucg_nested_weight_test(const char *root, bool overprovisioned)
0408 {
0409 int ret = KSFT_FAIL, i;
0410 char *parent = NULL, *child = NULL;
0411 struct cpu_hogger leaf[3] = {NULL};
0412 long nested_leaf_usage, child_usage;
0413 int nprocs = get_nprocs();
0414
0415 if (!overprovisioned) {
0416 if (nprocs < 4)
0417
0418
0419
0420
0421 return KSFT_SKIP;
0422 nprocs /= 4;
0423 }
0424
0425 parent = cg_name(root, "cpucg_test");
0426 child = cg_name(parent, "cpucg_child");
0427 if (!parent || !child)
0428 goto cleanup;
0429
0430 if (cg_create(parent))
0431 goto cleanup;
0432 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
0433 goto cleanup;
0434
0435 if (cg_create(child))
0436 goto cleanup;
0437 if (cg_write(child, "cgroup.subtree_control", "+cpu"))
0438 goto cleanup;
0439 if (cg_write(child, "cpu.weight", "1000"))
0440 goto cleanup;
0441
0442 for (i = 0; i < ARRAY_SIZE(leaf); i++) {
0443 const char *ancestor;
0444 long weight;
0445
0446 if (i == 0) {
0447 ancestor = parent;
0448 weight = 1000;
0449 } else {
0450 ancestor = child;
0451 weight = 5000;
0452 }
0453 leaf[i].cgroup = cg_name_indexed(ancestor, "cpucg_leaf", i);
0454 if (!leaf[i].cgroup)
0455 goto cleanup;
0456
0457 if (cg_create(leaf[i].cgroup))
0458 goto cleanup;
0459
0460 if (cg_write_numeric(leaf[i].cgroup, "cpu.weight", weight))
0461 goto cleanup;
0462 }
0463
0464 for (i = 0; i < ARRAY_SIZE(leaf); i++) {
0465 pid_t pid;
0466 struct cpu_hog_func_param param = {
0467 .nprocs = nprocs,
0468 .ts = {
0469 .tv_sec = 10,
0470 .tv_nsec = 0,
0471 },
0472 .clock_type = CPU_HOG_CLOCK_WALL,
0473 };
0474
0475 pid = cg_run_nowait(leaf[i].cgroup, hog_cpus_timed,
0476 (void *)¶m);
0477 if (pid <= 0)
0478 goto cleanup;
0479 leaf[i].pid = pid;
0480 }
0481
0482 for (i = 0; i < ARRAY_SIZE(leaf); i++) {
0483 int retcode;
0484
0485 waitpid(leaf[i].pid, &retcode, 0);
0486 if (!WIFEXITED(retcode))
0487 goto cleanup;
0488 if (WEXITSTATUS(retcode))
0489 goto cleanup;
0490 }
0491
0492 for (i = 0; i < ARRAY_SIZE(leaf); i++) {
0493 leaf[i].usage = cg_read_key_long(leaf[i].cgroup,
0494 "cpu.stat", "usage_usec");
0495 if (leaf[i].usage <= 0)
0496 goto cleanup;
0497 }
0498
0499 nested_leaf_usage = leaf[1].usage + leaf[2].usage;
0500 if (overprovisioned) {
0501 if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
0502 goto cleanup;
0503 } else if (!values_close(leaf[0].usage * 2, nested_leaf_usage, 15))
0504 goto cleanup;
0505
0506
0507 child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
0508 if (child_usage <= 0)
0509 goto cleanup;
0510 if (!values_close(child_usage, nested_leaf_usage, 1))
0511 goto cleanup;
0512
0513 ret = KSFT_PASS;
0514 cleanup:
0515 for (i = 0; i < ARRAY_SIZE(leaf); i++) {
0516 cg_destroy(leaf[i].cgroup);
0517 free(leaf[i].cgroup);
0518 }
0519 cg_destroy(child);
0520 free(child);
0521 cg_destroy(parent);
0522 free(parent);
0523
0524 return ret;
0525 }
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541 static int
0542 test_cpucg_nested_weight_overprovisioned(const char *root)
0543 {
0544 return run_cpucg_nested_weight_test(root, true);
0545 }
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561 static int
0562 test_cpucg_nested_weight_underprovisioned(const char *root)
0563 {
0564 return run_cpucg_nested_weight_test(root, false);
0565 }
0566
0567
0568
0569
0570
0571 static int test_cpucg_max(const char *root)
0572 {
0573 int ret = KSFT_FAIL;
0574 long usage_usec, user_usec;
0575 long usage_seconds = 1;
0576 long expected_usage_usec = usage_seconds * USEC_PER_SEC;
0577 char *cpucg;
0578
0579 cpucg = cg_name(root, "cpucg_test");
0580 if (!cpucg)
0581 goto cleanup;
0582
0583 if (cg_create(cpucg))
0584 goto cleanup;
0585
0586 if (cg_write(cpucg, "cpu.max", "1000"))
0587 goto cleanup;
0588
0589 struct cpu_hog_func_param param = {
0590 .nprocs = 1,
0591 .ts = {
0592 .tv_sec = usage_seconds,
0593 .tv_nsec = 0,
0594 },
0595 .clock_type = CPU_HOG_CLOCK_WALL,
0596 };
0597 if (cg_run(cpucg, hog_cpus_timed, (void *)¶m))
0598 goto cleanup;
0599
0600 usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
0601 user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
0602 if (user_usec <= 0)
0603 goto cleanup;
0604
0605 if (user_usec >= expected_usage_usec)
0606 goto cleanup;
0607
0608 if (values_close(usage_usec, expected_usage_usec, 95))
0609 goto cleanup;
0610
0611 ret = KSFT_PASS;
0612
0613 cleanup:
0614 cg_destroy(cpucg);
0615 free(cpucg);
0616
0617 return ret;
0618 }
0619
0620
0621
0622
0623
0624 static int test_cpucg_max_nested(const char *root)
0625 {
0626 int ret = KSFT_FAIL;
0627 long usage_usec, user_usec;
0628 long usage_seconds = 1;
0629 long expected_usage_usec = usage_seconds * USEC_PER_SEC;
0630 char *parent, *child;
0631
0632 parent = cg_name(root, "cpucg_parent");
0633 child = cg_name(parent, "cpucg_child");
0634 if (!parent || !child)
0635 goto cleanup;
0636
0637 if (cg_create(parent))
0638 goto cleanup;
0639
0640 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
0641 goto cleanup;
0642
0643 if (cg_create(child))
0644 goto cleanup;
0645
0646 if (cg_write(parent, "cpu.max", "1000"))
0647 goto cleanup;
0648
0649 struct cpu_hog_func_param param = {
0650 .nprocs = 1,
0651 .ts = {
0652 .tv_sec = usage_seconds,
0653 .tv_nsec = 0,
0654 },
0655 .clock_type = CPU_HOG_CLOCK_WALL,
0656 };
0657 if (cg_run(child, hog_cpus_timed, (void *)¶m))
0658 goto cleanup;
0659
0660 usage_usec = cg_read_key_long(child, "cpu.stat", "usage_usec");
0661 user_usec = cg_read_key_long(child, "cpu.stat", "user_usec");
0662 if (user_usec <= 0)
0663 goto cleanup;
0664
0665 if (user_usec >= expected_usage_usec)
0666 goto cleanup;
0667
0668 if (values_close(usage_usec, expected_usage_usec, 95))
0669 goto cleanup;
0670
0671 ret = KSFT_PASS;
0672
0673 cleanup:
0674 cg_destroy(child);
0675 free(child);
0676 cg_destroy(parent);
0677 free(parent);
0678
0679 return ret;
0680 }
0681
0682 #define T(x) { x, #x }
0683 struct cpucg_test {
0684 int (*fn)(const char *root);
0685 const char *name;
0686 } tests[] = {
0687 T(test_cpucg_subtree_control),
0688 T(test_cpucg_stats),
0689 T(test_cpucg_weight_overprovisioned),
0690 T(test_cpucg_weight_underprovisioned),
0691 T(test_cpucg_nested_weight_overprovisioned),
0692 T(test_cpucg_nested_weight_underprovisioned),
0693 T(test_cpucg_max),
0694 T(test_cpucg_max_nested),
0695 };
0696 #undef T
0697
0698 int main(int argc, char *argv[])
0699 {
0700 char root[PATH_MAX];
0701 int i, ret = EXIT_SUCCESS;
0702
0703 if (cg_find_unified_root(root, sizeof(root)))
0704 ksft_exit_skip("cgroup v2 isn't mounted\n");
0705
0706 if (cg_read_strstr(root, "cgroup.subtree_control", "cpu"))
0707 if (cg_write(root, "cgroup.subtree_control", "+cpu"))
0708 ksft_exit_skip("Failed to set cpu controller\n");
0709
0710 for (i = 0; i < ARRAY_SIZE(tests); i++) {
0711 switch (tests[i].fn(root)) {
0712 case KSFT_PASS:
0713 ksft_test_result_pass("%s\n", tests[i].name);
0714 break;
0715 case KSFT_SKIP:
0716 ksft_test_result_skip("%s\n", tests[i].name);
0717 break;
0718 default:
0719 ret = EXIT_FAILURE;
0720 ksft_test_result_fail("%s\n", tests[i].name);
0721 break;
0722 }
0723 }
0724
0725 return ret;
0726 }