ret = bpf_map_lookup_and_delete_batch(map_fd,
&in_batch, &out_batch,
keys, values, &count,
NULL);
/* * Despite what uapi header says, lookup_and_delete_batch will return * -ENOENT in case we successfully have deleted all elements, so check * this separately
*/
CHECK(ret < 0 && (errno != ENOENT || !count), "bpf_map_lookup_and_delete_batch", "error: %s\n", strerror(errno));
CHECK(count != save_count, "bpf_map_lookup_and_delete_batch", "deleted not all elements: removed=%u expected=%u\n",
count, save_count);
}
staticvoid delete_all_elements(__u32 type, int map_fd, bool batch)
{ static __u8 val[8 << 10]; /* enough for 1024 CPUs */
__u32 key = -1; void *keys;
__u32 i, n; int ret;
if (batch) { /* Can't mix delete_batch and delete_and_lookup_batch because * they have different semantics in relation to the keys * argument. However, delete_batch utilize map_delete_elem,
* so we actually test it in non-batch scenario */
delete_and_lookup_batch(map_fd, keys, n);
} else { /* Intentionally mix delete and lookup_and_delete so we can test both */ for (i = 0; i < n; i++) { void *keyp = keys + i*MAX_MAP_KEY_SIZE;
if (i % 2 || type == BPF_MAP_TYPE_HASH_OF_MAPS) {
ret = bpf_map_delete_elem(map_fd, keyp);
CHECK(ret < 0, "bpf_map_delete_elem", "error: key %u: %s\n", i, strerror(errno));
} else {
ret = bpf_map_lookup_and_delete_elem(map_fd, keyp, val);
CHECK(ret < 0, "bpf_map_lookup_and_delete_elem", "error: key %u: %s\n", i, strerror(errno));
}
}
}
staticvoid *patch_map_thread(void *arg)
{ /* 8KB is enough for 1024 CPUs. And it is shared between N_THREADS. */ static __u8 blob[8 << 10]; struct upsert_opts *opts = arg; void *val_ptr; int val; int ret; int i;
for (i = 0; i < opts->n; i++) { if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
val = create_small_hash();
val_ptr = &val;
} elseif (is_percpu(opts->map_type)) {
val_ptr = blob;
} else {
val = rand();
val_ptr = &val;
}
/* 2 seconds may be enough ? */ if (opts->retry_for_nomem)
ret = map_update_retriable(opts->map_fd, &i, val_ptr, 0,
40, retry_for_nomem_fn); else
ret = bpf_map_update_elem(opts->map_fd, &i, val_ptr, 0);
CHECK(ret < 0, "bpf_map_update_elem", "key=%d error: %s\n", i, strerror(errno));
if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
close(val);
} return NULL;
}
staticvoid upsert_elements(struct upsert_opts *opts)
{
pthread_t threads[N_THREADS]; int ret; int i;
for (i = 0; i < ARRAY_SIZE(threads); i++) {
ret = pthread_create(&i[threads], NULL, patch_map_thread, opts);
CHECK(ret != 0, "pthread_create", "error: %s\n", strerror(ret));
}
for (i = 0; i < ARRAY_SIZE(threads); i++) {
ret = pthread_join(i[threads], NULL);
CHECK(ret != 0, "pthread_join", "error: %s\n", strerror(ret));
}
}
/* Count the current number of elements in the map by iterating through * all the map keys via bpf_get_next_key
*/
n_real = map_count_elements(info->type, map_fd);
/* The "real" number of elements should be the same as the inserted * number of elements in all cases except LRU maps, where some elements * may have been evicted
*/ if (n_inserted == 0 || !is_lru(info->type))
CHECK(n_inserted != n_real, "map_count_elements", "n_real(%u) != n_inserted(%u)\n", n_real, n_inserted);
/* Count the current number of elements in the map using an iterator */
n_iter = get_cur_elements(info->id);
/* Both counts should be the same, as all updates are over */
CHECK(n_iter != n_real, "get_cur_elements", "n_iter=%u, expected %u (map_type=%s,map_flags=%08x)\n",
n_iter, n_real, map_type_to_s(info->type), info->map_flags);
}
/* Reduce the number of elements we are updating such that we don't * bump into -E2BIG from non-preallocated hash maps, but still will
* have some evictions for LRU maps */ if (opts.map_type != BPF_MAP_TYPE_HASH_OF_MAPS)
opts.n -= 512; else
opts.n /= 2;
/* per-cpu bpf memory allocator may not be able to allocate per-cpu * pointer successfully and it can not refill free llist timely, and * bpf_map_update_elem() will return -ENOMEM. so just retry to mitigate * the problem temporarily.
*/
opts.retry_for_nomem = is_percpu(opts.map_type) && (info.map_flags & BPF_F_NO_PREALLOC);
/* * Upsert keys [0, n) under some competition: with random values from * N_THREADS threads. Check values, then delete all elements and check * values again.
*/
upsert_elements(&opts);
check_expected_number_elements(opts.n, map_fd, &info);
delete_all_elements(info.type, map_fd, !BATCH);
check_expected_number_elements(0, map_fd, &info);
/* Now do the same, but using batch delete operations */
upsert_elements(&opts);
check_expected_number_elements(opts.n, map_fd, &info);
delete_all_elements(info.type, map_fd, BATCH);
check_expected_number_elements(0, map_fd, &info);
close(map_fd);
}
staticint map_create_opts(__u32 type, constchar *name, struct bpf_map_create_opts *map_opts,
__u32 key_size, __u32 val_size)
{ int max_entries; int map_fd;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.