staticvoid prepopulate_hashmap(int fd)
{ int i, key, val;
/* local_storage gets will have BPF_LOCAL_STORAGE_GET_F_CREATE flag set, so * populate the hashmap for a similar comparison
*/ for (i = 0; i < HASHMAP_SZ; i++) {
key = val = i; if (bpf_map_update_elem(fd, &key, &val, 0)) {
fprintf(stderr, "Error prepopulating hashmap (key %d)\n", key); exit(1);
}
}
}
staticvoid __setup(struct bpf_program *prog, bool hashmap)
{ struct bpf_map *inner_map; int i, fd, mim_fd, err;
LIBBPF_OPTS(bpf_map_create_opts, create_opts);
if (!hashmap)
create_opts.map_flags = BPF_F_NO_PREALLOC;
staticvoid *producer(void *input)
{ while (true)
trigger_bpf_program();
return NULL;
}
/* cache sequential and interleaved get benchs test local_storage get * performance, specifically they demonstrate performance cliff of * current list-plus-cache local_storage model. * * cache sequential get: call bpf_task_storage_get on n maps in order * cache interleaved get: like "sequential get", but interleave 4 calls to the * 'important' map (idx 0 in array_of_maps) for every 10 calls. Goal * is to mimic environment where many progs are accessing their local_storage * maps, with 'our' prog needing to access its map more often than others
*/ conststruct bench bench_local_storage_cache_seq_get = {
.name = "local-storage-cache-seq-get",
.argp = &bench_local_storage_argp,
.validate = validate,
.setup = local_storage_cache_get_setup,
.producer_thread = producer,
.measure = measure,
.report_progress = local_storage_report_progress,
.report_final = local_storage_report_final,
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.