/* test_tailcall_1 checks basic functionality by patching multiple locations * in a single program for a single tail call slot with nop->jmp, jmp->nop * and jmp->jmp rewrites. Also checks for nop->nop.
*/ staticvoid test_tailcall_1(void)
{ int err, map_fd, prog_fd, main_fd, i, j; struct bpf_map *prog_array; struct bpf_program *prog; struct bpf_object *obj; char prog_name[32]; char buff[128] = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = buff,
.data_size_in = sizeof(buff),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd); if (CHECK_FAIL(err)) return;
prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out;
main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out;
map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out;
prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, i, "tailcall retval");
err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out;
}
/* test_tailcall_3 checks that the count value of the tail call limit * enforcement matches with expectations. JIT uses direct jump.
*/ staticvoid test_tailcall_3(void)
{
test_tailcall_count("tailcall3.bpf.o", false, false);
}
/* test_tailcall_6 checks that the count value of the tail call limit * enforcement matches with expectations. JIT uses indirect jump.
*/ staticvoid test_tailcall_6(void)
{
test_tailcall_count("tailcall6.bpf.o", false, false);
}
/* test_tailcall_4 checks that the kernel properly selects indirect jump * for the case where the key is not known. Latter is passed via global * data to select different targets we can compare return value of.
*/ staticvoid test_tailcall_4(void)
{ int err, map_fd, prog_fd, main_fd, data_fd, i; struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; staticconstint zero = 0; char buff[128] = {}; char prog_name[32];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = buff,
.data_size_in = sizeof(buff),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd); if (CHECK_FAIL(err)) return;
prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out;
main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out;
map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out;
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) goto out;
data_fd = bpf_map__fd(data_map); if (CHECK_FAIL(data_fd < 0)) goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out;
prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out;
/* make sure that subprog can access ctx and entry prog that * called this subprog can properly return
*/
i = 0;
err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out;
/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to * 256 bytes) can be used within bpf subprograms that have the tailcalls * in them
*/ staticvoid test_tailcall_bpf2bpf_3(void)
{ int err, map_fd, prog_fd, main_fd, i; struct bpf_map *prog_array; struct bpf_program *prog; struct bpf_object *obj; char prog_name[32];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd); if (CHECK_FAIL(err)) return;
prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out;
main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out;
map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out;
prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out;
/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved * across tailcalls combined with bpf2bpf calls. for making sure that tailcall * counter behaves correctly, bpf program will go through following flow: * * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 -> * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 -> * subprog2 [here bump global counter] --------^ * * We go through first two tailcalls and start counting from the subprog2 where * the loop begins. At the end of the test make sure that the global counter is * equal to 31, because tailcall counter includes the first two tailcalls * whereas global counter is incremented only on loop presented on flow above. * * The noise parameter is used to insert bpf_map_update calls into the logic * to force verifier to patch instructions. This allows us to ensure jump * logic remains correct with instruction movement.
*/ staticvoid test_tailcall_bpf2bpf_4(bool noise)
{ int err, map_fd, prog_fd, main_fd, data_fd, i; struct tailcall_bpf2bpf4__bss val; struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; char prog_name[32];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd); if (CHECK_FAIL(err)) return;
prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out;
main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out;
prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out;
map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out;
prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out;
/* Tail call counting works even when there is data on stack which is * not aligned to 8 bytes.
*/ staticvoid test_tailcall_bpf2bpf_6(void)
{ struct tailcall_bpf2bpf6 *obj; int err, map_fd, prog_fd, main_fd, data_fd, i, val;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
obj = tailcall_bpf2bpf6__open_and_load(); if (!ASSERT_OK_PTR(obj, "open and load")) return;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
ASSERT_OK(err, "bss map lookup");
ASSERT_EQ(val, 1, "done flag is set");
out:
tailcall_bpf2bpf6__destroy(obj);
}
/* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call * limit enforcement matches with expectations when tailcall is preceded with * bpf2bpf call, and the bpf2bpf call is traced by fentry.
*/ staticvoid test_tailcall_bpf2bpf_fentry(void)
{
test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
}
/* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call * limit enforcement matches with expectations when tailcall is preceded with * bpf2bpf call, and the bpf2bpf call is traced by fexit.
*/ staticvoid test_tailcall_bpf2bpf_fexit(void)
{
test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
}
/* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail * call limit enforcement matches with expectations when tailcall is preceded * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
*/ staticvoid test_tailcall_bpf2bpf_fentry_fexit(void)
{
test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
}
/* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail * call limit enforcement matches with expectations when tailcall is preceded * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
*/ staticvoid test_tailcall_bpf2bpf_fentry_entry(void)
{ struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL; int err, map_fd, prog_fd, data_fd, i, val; struct bpf_map *prog_array, *data_map; struct bpf_link *fentry_link = NULL; struct bpf_program *prog; char buff[128] = {};
/* * We are trying to hit prog array update during another program load * that shares the same prog array map. * * For that we share the jmp_table map between two skeleton instances * by pinning the jmp_table to same path. Then first skeleton instance * periodically updates jmp_table in 'poke update' thread while we load * the second skeleton instance in the main thread.
*/ staticvoid test_tailcall_poke(void)
{ struct tailcall_poke *call, *test; int err, cnt = 10;
pthread_t thread;
unlink(JMP_TABLE);
call = tailcall_poke__open_and_load(); if (!ASSERT_OK_PTR(call, "tailcall_poke__open")) return;
err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE); if (!ASSERT_OK(err, "bpf_map__pin")) goto out;
err = pthread_create(&thread, NULL, poke_update, call); if (!ASSERT_OK(err, "new toggler")) goto out;
while (cnt--) {
test = tailcall_poke__open(); if (!ASSERT_OK_PTR(test, "tailcall_poke__open")) break;
/* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail * call limit enforcement matches with expectations when tailcalls are preceded * with two bpf2bpf calls. * * subprog --tailcall-> entry * entry < * subprog --tailcall-> entry
*/ staticvoid test_tailcall_bpf2bpf_hierarchy_1(void)
{
test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", false, false, false);
}
/* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the * tail call limit enforcement matches with expectations when tailcalls are * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry.
*/ staticvoid test_tailcall_bpf2bpf_hierarchy_fentry(void)
{
test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", true, false, false);
}
/* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail * call limit enforcement matches with expectations when tailcalls are preceded * with two bpf2bpf calls, and the two subprogs are traced by fexit.
*/ staticvoid test_tailcall_bpf2bpf_hierarchy_fexit(void)
{
test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", false, true, false);
}
/* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of * the tail call limit enforcement matches with expectations when tailcalls are * preceded with two bpf2bpf calls, and the two subprogs are traced by both * fentry and fexit.
*/ staticvoid test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void)
{
test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", true, true, false);
}
/* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of * the tail call limit enforcement matches with expectations when tailcalls are * preceded with two bpf2bpf calls in fentry.
*/ staticvoid test_tailcall_bpf2bpf_hierarchy_fentry_entry(void)
{
test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true);
}
/* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail * call limit enforcement matches with expectations: * * subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0 * entry < * subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1
*/ staticvoid test_tailcall_bpf2bpf_hierarchy_2(void)
{
RUN_TESTS(tailcall_bpf2bpf_hierarchy2);
}
/* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail * call limit enforcement matches with expectations: * * subprog with jmp_table0 to classifier_0 * entry --tailcall-> classifier_0 < * subprog with jmp_table1 to classifier_0
*/ staticvoid test_tailcall_bpf2bpf_hierarchy_3(void)
{
RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
}
/* test_tailcall_freplace checks that the freplace prog fails to update the * prog_array map, no matter whether the freplace prog attaches to its target.
*/ staticvoid test_tailcall_freplace(void)
{ struct tailcall_freplace *freplace_skel = NULL; struct bpf_link *freplace_link = NULL; struct bpf_program *freplace_prog; struct tc_bpf2bpf *tc_skel = NULL; int prog_fd, tc_prog_fd, map_fd; char buff[128] = {}; int err, key;
void test_tailcalls(void)
{ if (test__start_subtest("tailcall_1"))
test_tailcall_1(); if (test__start_subtest("tailcall_2"))
test_tailcall_2(); if (test__start_subtest("tailcall_3"))
test_tailcall_3(); if (test__start_subtest("tailcall_4"))
test_tailcall_4(); if (test__start_subtest("tailcall_5"))
test_tailcall_5(); if (test__start_subtest("tailcall_6"))
test_tailcall_6(); if (test__start_subtest("tailcall_bpf2bpf_1"))
test_tailcall_bpf2bpf_1(); if (test__start_subtest("tailcall_bpf2bpf_2"))
test_tailcall_bpf2bpf_2(); if (test__start_subtest("tailcall_bpf2bpf_3"))
test_tailcall_bpf2bpf_3(); if (test__start_subtest("tailcall_bpf2bpf_4"))
test_tailcall_bpf2bpf_4(false); if (test__start_subtest("tailcall_bpf2bpf_5"))
test_tailcall_bpf2bpf_4(true); if (test__start_subtest("tailcall_bpf2bpf_6"))
test_tailcall_bpf2bpf_6(); if (test__start_subtest("tailcall_bpf2bpf_fentry"))
test_tailcall_bpf2bpf_fentry(); if (test__start_subtest("tailcall_bpf2bpf_fexit"))
test_tailcall_bpf2bpf_fexit(); if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
test_tailcall_bpf2bpf_fentry_fexit(); if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
test_tailcall_bpf2bpf_fentry_entry(); if (test__start_subtest("tailcall_poke"))
test_tailcall_poke(); if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1"))
test_tailcall_bpf2bpf_hierarchy_1(); if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry"))
test_tailcall_bpf2bpf_hierarchy_fentry(); if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit"))
test_tailcall_bpf2bpf_hierarchy_fexit(); if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit"))
test_tailcall_bpf2bpf_hierarchy_fentry_fexit(); if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry"))
test_tailcall_bpf2bpf_hierarchy_fentry_entry();
test_tailcall_bpf2bpf_hierarchy_2();
test_tailcall_bpf2bpf_hierarchy_3(); if (test__start_subtest("tailcall_freplace"))
test_tailcall_freplace(); if (test__start_subtest("tailcall_bpf2bpf_freplace"))
test_tailcall_bpf2bpf_freplace(); if (test__start_subtest("tailcall_failure"))
test_tailcall_failure();
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.24 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.