/* * Command line option to not fork the test running in the same process and * making them easier to debug.
*/ staticbool dont_fork; /* Fork the tests in parallel and wait for their completion. */ staticbool sequential; /* Number of times each test is run. */ staticunsignedint runs_per_test = 1; constchar *dso_to_test; constchar *test_objdump_path = "objdump";
/* * List of architecture specific tests. Not a weak symbol as the array length is * dependent on the initialization, as such GCC with LTO complains of * conflicting definitions with a weak symbol.
*/ #ifdefined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) externstruct test_suite *arch_tests[]; #else staticstruct test_suite *arch_tests[] = {
NULL,
}; #endif
check_leaks();
err_out:
fflush(NULL); for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
signal(signals[i], SIG_DFL); return -err;
}
#define TEST_RUNNING -3
staticint print_test_result(struct test_suite *t, int curr_suite, int curr_test_case, int result, int width, int running)
{ if (test_suite__num_test_cases(t) > 1) { int subw = width > 2 ? width - 2 : width;
staticvoid finish_test(struct child_test **child_tests, int running_test, int child_test_num, int width)
{ struct child_test *child_test = child_tests[running_test]; struct test_suite *t; int curr_suite, curr_test_case, err; bool err_done = false; struct strbuf err_output = STRBUF_INIT; int last_running = -1; int ret;
if (child_test == NULL) { /* Test wasn't started. */ return;
}
t = child_test->test;
curr_suite = child_test->suite_num;
curr_test_case = child_test->test_case_num;
err = child_test->process.err; /* * For test suites with subtests, display the suite name ahead of the * sub test names.
*/ if (test_suite__num_test_cases(t) > 1 && curr_test_case == 0)
pr_info("%3d: %-*s:\n", curr_suite + 1, width, test_description(t, -1));
/* * Busy loop reading from the child's stdout/stderr that are set to be * non-blocking until EOF.
*/ if (err > 0)
fcntl(err, F_SETFL, O_NONBLOCK); if (verbose > 1) { if (test_suite__num_test_cases(t) > 1)
pr_info("%3d.%1d: %s:\n", curr_suite + 1, curr_test_case + 1,
test_description(t, curr_test_case)); else
pr_info("%3d: %s:\n", curr_suite + 1, test_description(t, -1));
} while (!err_done) { struct pollfd pfds[1] = {
{ .fd = err,
.events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
},
}; if (perf_use_color_default) { int running = 0;
for (int y = running_test; y < child_test_num; y++) { if (child_tests[y] == NULL) continue; if (check_if_command_finished(&child_tests[y]->process) == 0)
running++;
} if (running != last_running) { if (last_running != -1) { /* * Erase "Running (.. active)" line * printed before poll/sleep.
*/
fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
}
print_test_result(t, curr_suite, curr_test_case, TEST_RUNNING,
width, running);
last_running = running;
}
}
err_done = true; if (err <= 0) { /* No child stderr to poll, sleep for 10ms for child to complete. */
usleep(10 * 1000);
} else { /* Poll to avoid excessive spinning, timeout set for 100ms. */
poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100); if (pfds[0].revents) { char buf[512];
ssize_t len;
len = read(err, buf, sizeof(buf) - 1);
if (len > 0) {
err_done = false;
buf[len] = '\0';
strbuf_addstr(&err_output, buf);
}
}
} if (err_done)
err_done = check_if_command_finished(&child_test->process);
} if (perf_use_color_default && last_running != -1) { /* Erase "Running (.. active)" line printed before poll/sleep. */
fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
} /* Clean up child process. */
ret = finish_command(&child_test->process); if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
fprintf(stderr, "%s", err_output.buf);
staticint __cmd_test(struct test_suite **suites, int argc, constchar *argv[], struct intlist *skiplist)
{ staticint width = 0; int err = 0;
for (struct test_suite **t = suites; *t; t++) { int i, len = strlen(test_description(*t, -1));
if (width < len)
width = len;
test_suite__for_each_test_case(*t, i) {
len = strlen(test_description(*t, i)); if (width < len)
width = len;
num_tests += runs_per_test;
}
}
child_tests = calloc(num_tests, sizeof(*child_tests)); if (!child_tests) return -ENOMEM;
err = sigsetjmp(cmd_test_jmp_buf, 1); if (err) {
pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
err); for (size_t x = 0; x < num_tests; x++) { struct child_test *child_test = child_tests[x];
if (!child_test || child_test->process.pid <= 0) continue;
/* * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2 * runs the exclusive tests sequentially. In other modes all tests are * run in pass 1.
*/ for (int pass = 1; pass <= 2; pass++) { int child_test_num = 0; int curr_suite = 0;
for (struct test_suite **t = suites; *t; t++, curr_suite++) { int curr_test_case; bool suite_matched = false;
if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv)) { /* * Test suite shouldn't be run based on * description. See if any test case should.
*/ bool skip = true;
int cmd_test(int argc, constchar **argv)
{ constchar *test_usage[] = { "perf test [] [{list |[|]}]",
NULL,
}; constchar *skip = NULL; constchar *workload = NULL; bool list_workloads = false; conststruct option test_options[] = {
OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('F', "dont-fork", &dont_fork, "Do not fork for testcase"),
OPT_BOOLEAN('S', "sequential", &sequential, "Run the tests one after another rather than in parallel"),
OPT_UINTEGER('r', "runs-per-test", &runs_per_test, "Run each test the given number of times, default 1"),
OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
OPT_STRING(0, "objdump", &test_objdump_path, "path", "objdump binary to use for disassembly and annotations"),
OPT_END()
}; constchar * const test_subcommands[] = { "list", NULL }; struct intlist *skiplist = NULL; int ret = hists__init(); struct test_suite **suites;
if (skip != NULL)
skiplist = intlist__new(skip); /* * Tests that create BPF maps, for instance, need more than the 64K * default:
*/
rlimit__bump_memlock();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.