/* Close link and map fd prematurely */
bpf_link__destroy(link);
bpf_object__destroy_skeleton(*skel);
*skel = NULL;
/* Try to let map free work to run first if map is freed */
usleep(100); /* Memory used by both sock map and sock local storage map are * freed after two synchronize_rcu() calls, so wait for it
*/
kern_sync_rcu();
kern_sync_rcu();
/* Read after both map fd and link fd are closed */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
ASSERT_GE(len, 0, "read_iterator");
close(iter_fd);
}
staticint read_fd_into_buffer(int fd, char *buf, int size)
{ int bufleft = size; int len;
do {
len = read(fd, buf, bufleft); if (len > 0) {
buf += len;
bufleft -= len;
}
} while (len > 0);
linfo.task.tid = 0;
linfo.task.pid = getpid(); /* This includes the parent thread, this thread, watchdog timer thread * and the do_nothing_wait thread
*/
test_task_common(&opts, 3, 1);
/* Create a new thread so pid and tid aren't the same */
ASSERT_OK(pthread_create(&thread_id, NULL, &run_test_task_tid, NULL), "pthread_create");
ASSERT_FALSE(pthread_join(thread_id, NULL), "pthread_join");
}
staticvoid test_task_pid(void)
{
LIBBPF_OPTS(bpf_iter_attach_opts, opts); union bpf_iter_link_info linfo;
link = bpf_program__attach_iter(prog, NULL); if (!ASSERT_OK_PTR(link, "attach_iter")) return ret;
iter_fd = bpf_iter_create(bpf_link__fd(link)); if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link;
err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ); if (bss->skip) {
printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
ret = 1;
test__skip(); goto free_link;
}
if (!ASSERT_GE(err, 0, "read")) goto free_link;
ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)", "check for btf representation of task_struct in iter data");
free_link: if (iter_fd > 0)
close(iter_fd);
bpf_link__destroy(link); return ret;
}
skel1 = bpf_iter_test_kern1__open_and_load(); if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load")) return;
link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); if (!ASSERT_OK_PTR(link, "attach_iter")) goto out;
/* unlink this path if it exists. */
unlink(path);
err = bpf_link__pin(link, path); if (!ASSERT_OK(err, "pin_iter")) goto free_link;
err = do_read(path, "abcd"); if (err) goto unlink_path;
/* file based iterator seems working fine. Let us a link update * of the underlying link and `cat` the iterator again, its content * should change.
*/
skel2 = bpf_iter_test_kern2__open_and_load(); if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load")) goto unlink_path;
err = bpf_link__update_program(link, skel2->progs.dump_task); if (!ASSERT_OK(err, "update_prog")) goto destroy_skel2;
skel = bpf_iter_test_kern4__open(); if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open")) return;
/* create two maps: bpf program will only do bpf_seq_write * for these two maps. The goal is one map output almost * fills seq_file buffer and then the other will trigger * overflow and needs restart.
*/
map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); if (!ASSERT_GE(map1_fd, 0, "bpf_map_create")) goto out;
map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); if (!ASSERT_GE(map2_fd, 0, "bpf_map_create")) goto free_map1;
/* bpf_seq_printf kernel buffer is 8 pages, so one map * bpf_seq_write will mostly fill it, and the other map * will partially fill and then trigger overflow and need * bpf_seq_read restart.
*/
iter_size = sysconf(_SC_PAGE_SIZE) << 3;
staticvoid test_bpf_hash_map(void)
{
__u32 expected_key_a = 0, expected_key_b = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); struct bpf_iter_bpf_hash_map *skel; int err, i, len, map_fd, iter_fd; union bpf_iter_link_info linfo;
__u64 val, expected_val = 0; struct bpf_link *link; struct key_t { int a; int b; int c;
} key; char buf[64];
skel = bpf_iter_bpf_hash_map__open(); if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open")) return;
skel->bss->in_test_mode = true;
err = bpf_iter_bpf_hash_map__load(skel); if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load")) goto out;
/* iterator with hashmap2 and hashmap3 should fail */
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); if (!ASSERT_ERR_PTR(link, "attach_iter")) goto out;
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); if (!ASSERT_ERR_PTR(link, "attach_iter")) goto out;
/* hashmap1 should be good, update map values here */
map_fd = bpf_map__fd(skel->maps.hashmap1); for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
key.a = i + 1;
key.b = i + 2;
key.c = i + 3;
val = i + 4;
expected_key_a += key.a;
expected_key_b += key.b;
expected_val += val;
/* Sleepable program is prohibited for hash map iterator */
linfo.map.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts); if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter")) goto out;
linfo.map.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); if (!ASSERT_OK_PTR(link, "attach_iter")) goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link)); if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link;
/* do some tests */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
; if (!ASSERT_GE(len, 0, "read")) goto close_iter;
/* test results */ if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) goto close_iter; if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) goto close_iter; if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter;
skel = bpf_iter_bpf_array_map__open_and_load(); if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) return;
map_fd = bpf_map__fd(skel->maps.arraymap1); for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
val = i + 4;
expected_key += i;
expected_val += val;
/* An iterator program deletes all local storage in a map. */ staticvoid test_bpf_sk_storage_delete(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); struct bpf_iter_bpf_sk_storage_helpers *skel; union bpf_iter_link_info linfo; int err, len, map_fd, iter_fd; struct bpf_link *link; int sock_fd = -1;
__u32 val = 42; char buf[64];
skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) return;
iter_fd = bpf_iter_create(bpf_link__fd(link)); if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link;
/* do some tests */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
; if (!ASSERT_GE(len, 0, "read")) goto close_iter;
/* test results */
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
/* Note: The following assertions serve to ensure * the value was deleted. It does so by asserting * that bpf_map_lookup_elem has failed. This might * seem counterintuitive at first.
*/
ASSERT_ERR(err, "bpf_map_lookup_elem");
ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem");
/* This creates a socket and its local storage. It then runs a task_iter BPF * program that replaces the existing socket local storage with the tgid of the * only task owning a file descriptor to this socket, this process, prog_tests. * It then runs a tcp socket iterator that negates the value in the existing * socket local storage, the test verifies that the resulting value is -pid.
*/ staticvoid test_bpf_sk_storage_get(void)
{ struct bpf_iter_bpf_sk_storage_helpers *skel; int err, map_fd, val = -1; int sock_fd = -1;
skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) return;
if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
skel->links.proc_maps = NULL; goto out;
}
start_tm = time(NULL);
cur_tm = start_tm;
child_pid = fork(); if (child_pid == 0) { /* Fork short-lived processes in the background. */ while (cur_tm < start_tm + wait_sec) {
system("echo > /dev/null");
cur_tm = time(NULL);
} exit(0);
}
if (!ASSERT_GE(child_pid, 0, "fork_child")) goto out;
while (cur_tm < start_tm + wait_sec) {
iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps)); if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto out;
/* Drain all data from iter_fd. */ while (cur_tm < start_tm + wait_sec) {
err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE); if (!ASSERT_GE(err, 0, "read_iter_fd")) goto out;
if (test__start_subtest("btf_id_or_null"))
test_btf_id_or_null(); if (test__start_subtest("ipv6_route"))
test_ipv6_route(); if (test__start_subtest("netlink"))
test_netlink(); if (test__start_subtest("bpf_map"))
test_bpf_map(); if (test__start_subtest("task_tid"))
test_task_tid(); if (test__start_subtest("task_pid"))
test_task_pid(); if (test__start_subtest("task_pidfd"))
test_task_pidfd(); if (test__start_subtest("task_sleepable"))
test_task_sleepable(); if (test__start_subtest("task_stack"))
test_task_stack(); if (test__start_subtest("task_file"))
test_task_file(); if (test__start_subtest("task_vma"))
test_task_vma(); if (test__start_subtest("task_vma_dead_task"))
test_task_vma_dead_task(); if (test__start_subtest("task_btf"))
test_task_btf(); if (test__start_subtest("tcp4"))
test_tcp4(); if (test__start_subtest("tcp6"))
test_tcp6(); if (test__start_subtest("udp4"))
test_udp4(); if (test__start_subtest("udp6"))
test_udp6(); if (test__start_subtest("unix"))
test_unix(); if (test__start_subtest("anon"))
test_anon_iter(false); if (test__start_subtest("anon-read-one-char"))
test_anon_iter(true); if (test__start_subtest("file"))
test_file_iter(); if (test__start_subtest("overflow"))
test_overflow(false, false); if (test__start_subtest("overflow-e2big"))
test_overflow(true, false); if (test__start_subtest("prog-ret-1"))
test_overflow(false, true); if (test__start_subtest("bpf_hash_map"))
test_bpf_hash_map(); if (test__start_subtest("bpf_percpu_hash_map"))
test_bpf_percpu_hash_map(); if (test__start_subtest("bpf_array_map"))
test_bpf_array_map(); if (test__start_subtest("bpf_array_map_iter_fd"))
test_bpf_array_map_iter_fd(); if (test__start_subtest("bpf_percpu_array_map"))
test_bpf_percpu_array_map(); if (test__start_subtest("bpf_sk_storage_map"))
test_bpf_sk_storage_map(); if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
test_bpf_sk_storage_map_iter_fd(); if (test__start_subtest("bpf_sk_storage_delete"))
test_bpf_sk_storage_delete(); if (test__start_subtest("bpf_sk_storage_get"))
test_bpf_sk_storage_get(); if (test__start_subtest("rdonly-buf-out-of-bound"))
test_rdonly_buf_out_of_bound(); if (test__start_subtest("buf-neg-offset"))
test_buf_neg_offset(); if (test__start_subtest("link-iter"))
test_link_iter(); if (test__start_subtest("ksym"))
test_ksym_iter(); if (test__start_subtest("bpf_sockmap_map_iter_fd"))
test_bpf_sockmap_map_iter_fd(); if (test__start_subtest("vma_offset"))
test_task_vma_offset();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.