if (touch_anon((char *)buf, size)) {
munmap(buf, size); return -1;
}
while (getppid() == ppid)
sleep(1);
munmap(buf, size); return 0;
}
/* * Create a child process that allocates and touches 100MB, then waits to be * killed. Wait until the child is attached to the cgroup, kill all processes * in that cgroup and wait until "cgroup.procs" is empty. At this point try to * destroy the empty cgroup. The test helps detect race conditions between * dying processes leaving the cgroup and cgroup destruction path.
*/ staticint test_cgcore_destroy(constchar *root)
{ int ret = KSFT_FAIL; char *cg_test = NULL; int child_pid; char buf[PAGE_SIZE];
cg_test = cg_name(root, "cg_test");
if (!cg_test) goto cleanup;
for (int i = 0; i < 10; i++) { if (cg_create(cg_test)) goto cleanup;
/* wait for the child to enter cgroup */ if (cg_wait_for_proc_count(cg_test, 1)) goto cleanup;
if (cg_killall(cg_test)) goto cleanup;
/* wait for cgroup to be empty */ while (1) { if (cg_read(cg_test, "cgroup.procs", buf, sizeof(buf))) goto cleanup; if (buf[0] == '\0') break;
usleep(1000);
}
if (rmdir(cg_test)) goto cleanup;
if (waitpid(child_pid, NULL, 0) < 0) goto cleanup;
}
ret = KSFT_PASS;
cleanup: if (cg_test)
cg_destroy(cg_test);
free(cg_test); return ret;
}
/* * A(0) - B(0) - C(1) * \ D(0) * * A, B and C's "populated" fields would be 1 while D's 0. * test that after the one process in C is moved to root, * A,B and C's "populated" fields would flip to "0" and file * modified events will be generated on the * "cgroup.events" files of both cgroups.
*/ staticint test_cgcore_populated(constchar *root)
{ int ret = KSFT_FAIL; int err; char *cg_test_a = NULL, *cg_test_b = NULL; char *cg_test_c = NULL, *cg_test_d = NULL; int cgroup_fd = -EBADF;
pid_t pid;
pid = clone_into_cgroup(cgroup_fd); if (pid < 0) goto cleanup_pass; if (pid == 0) exit(EXIT_SUCCESS);
(void)clone_reap(pid, WEXITED); goto cleanup;
cleanup_pass:
ret = KSFT_PASS;
cleanup: if (cg_test_d)
cg_destroy(cg_test_d); if (cg_test_c)
cg_destroy(cg_test_c); if (cg_test_b)
cg_destroy(cg_test_b); if (cg_test_a)
cg_destroy(cg_test_a);
free(cg_test_d);
free(cg_test_c);
free(cg_test_b);
free(cg_test_a); if (cgroup_fd >= 0)
close(cgroup_fd); return ret;
}
/* * A (domain threaded) - B (threaded) - C (domain) * * test that C can't be used until it is turned into a * threaded cgroup. "cgroup.type" file will report "domain (invalid)" in * these cases. Operations which fail due to invalid topology use * EOPNOTSUPP as the errno.
*/ staticint test_cgcore_invalid_domain(constchar *root)
{ int ret = KSFT_FAIL; char *grandparent = NULL, *parent = NULL, *child = NULL;
if (cg_write(parent, "cgroup.type", "threaded")) goto cleanup;
if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n")) goto cleanup;
if (!cg_enter_current(child)) goto cleanup;
if (errno != EOPNOTSUPP) goto cleanup;
if (!clone_into_cgroup_run_wait(child)) goto cleanup;
if (errno == ENOSYS) goto cleanup_pass;
if (errno != EOPNOTSUPP) goto cleanup;
cleanup_pass:
ret = KSFT_PASS;
cleanup:
cg_enter_current(root); if (child)
cg_destroy(child); if (parent)
cg_destroy(parent); if (grandparent)
cg_destroy(grandparent);
free(child);
free(parent);
free(grandparent); return ret;
}
/* * Test that when a child becomes threaded * the parent type becomes domain threaded.
*/ staticint test_cgcore_parent_becomes_threaded(constchar *root)
{ int ret = KSFT_FAIL; char *parent = NULL, *child = NULL;
if (cg_write(child, "cgroup.type", "threaded")) goto cleanup;
if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n")) goto cleanup;
ret = KSFT_PASS;
cleanup: if (child)
cg_destroy(child); if (parent)
cg_destroy(parent);
free(child);
free(parent); return ret;
}
/* * Test that there's no internal process constrain on threaded cgroups. * You can add threads/processes on a parent with a controller enabled.
*/ staticint test_cgcore_no_internal_process_constraint_on_threads(constchar *root)
{ int ret = KSFT_FAIL; char *parent = NULL, *child = NULL;
if (cg_test_v1_named ||
cg_read_strstr(root, "cgroup.controllers", "cpu") ||
cg_write(root, "cgroup.subtree_control", "+cpu")) {
ret = KSFT_SKIP; goto cleanup;
}
if (cg_write(parent, "cgroup.type", "threaded")) goto cleanup;
if (cg_write(child, "cgroup.type", "threaded")) goto cleanup;
if (cg_write(parent, "cgroup.subtree_control", "+cpu")) goto cleanup;
if (cg_enter_current(parent)) goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_enter_current(root);
cg_enter_current(root); if (child)
cg_destroy(child); if (parent)
cg_destroy(parent);
free(child);
free(parent); return ret;
}
/* * Test that you can't enable a controller on a child if it's not enabled * on the parent.
*/ staticint test_cgcore_top_down_constraint_enable(constchar *root)
{ int ret = KSFT_FAIL; char *parent = NULL, *child = NULL;
if (!cg_write(child, "cgroup.subtree_control", "+memory")) goto cleanup;
ret = KSFT_PASS;
cleanup: if (child)
cg_destroy(child); if (parent)
cg_destroy(parent);
free(child);
free(parent); return ret;
}
/* * Test that you can't disable a controller on a parent * if it's enabled in a child.
*/ staticint test_cgcore_top_down_constraint_disable(constchar *root)
{ int ret = KSFT_FAIL; char *parent = NULL, *child = NULL;
if (cg_write(parent, "cgroup.subtree_control", "+memory")) goto cleanup;
if (cg_write(child, "cgroup.subtree_control", "+memory")) goto cleanup;
if (!cg_write(parent, "cgroup.subtree_control", "-memory")) goto cleanup;
ret = KSFT_PASS;
cleanup: if (child)
cg_destroy(child); if (parent)
cg_destroy(parent);
free(child);
free(parent); return ret;
}
/* * Test internal process constraint. * You can't add a pid to a domain parent if a controller is enabled.
*/ staticint test_cgcore_internal_process_constraint(constchar *root)
{ int ret = KSFT_FAIL; char *parent = NULL, *child = NULL;
/* * Test threadgroup migration. * All threads of a process are migrated together.
*/ staticint test_cgcore_proc_migration(constchar *root)
{ int ret = KSFT_FAIL; int t, c_threads = 0, n_threads = 13; char *src = NULL, *dst = NULL;
pthread_t threads[n_threads];
cleanup:
cg_enter_current(root); if (grps[2])
cg_destroy(grps[2]); if (grps[1])
cg_destroy(grps[1]); if (dom)
cg_destroy(dom);
free(grps[2]);
free(grps[1]);
free(dom); return ret;
}
/* * cgroup migration permission check should be performed based on the * credentials at the time of open instead of write.
*/ staticint test_cgcore_lesser_euid_open(constchar *root)
{ const uid_t test_euid = TEST_UID; int ret = KSFT_FAIL; char *cg_test_a = NULL, *cg_test_b = NULL; char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL; int cg_test_b_procs_fd = -1;
uid_t saved_uid;
/* * cgroup migration permission check should be performed based on the cgroup * namespace at the time of open instead of write.
*/ staticint test_cgcore_lesser_ns_open(constchar *root)
{ staticchar stack[65536]; const uid_t test_euid = 65534; /* usually nobody, any !root is fine */ int ret = KSFT_FAIL; char *cg_test_a = NULL, *cg_test_b = NULL; char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL; int cg_test_b_procs_fd = -1; struct lesser_ns_open_thread_arg targ = { .fd = -1 };
pid_t pid; int status;
int main(int argc, char *argv[])
{ char root[PATH_MAX]; int i, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root), &nsdelegate)) { if (setup_named_v1_root(root, sizeof(root), CG_NAMED_NAME))
ksft_exit_skip("cgroup v2 isn't mounted and could not setup named v1 hierarchy\n");
cg_test_v1_named = true; goto post_v2_setup;
}
if (cg_read_strstr(root, "cgroup.subtree_control", "memory")) if (cg_write(root, "cgroup.subtree_control", "+memory"))
ksft_exit_skip("Failed to set memory controller\n");
post_v2_setup: for (i = 0; i < ARRAY_SIZE(tests); i++) { switch (tests[i].fn(root)) { case KSFT_PASS:
ksft_test_result_pass("%s\n", tests[i].name); break; case KSFT_SKIP:
ksft_test_result_skip("%s\n", tests[i].name); break; default:
ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name); break;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.