/* we need one copy of events per cpu for reading */
map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
bpf_map__set_max_entries(skel->maps.events, map_size);
bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups); /* previous result is saved in a per-cpu array */
map_size = evlist->core.nr_entries / nr_cgroups;
bpf_map__set_max_entries(skel->maps.prev_readings, map_size); /* cgroup result needs all events (per-cpu) */
map_size = evlist->core.nr_entries;
bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
set_max_rlimit();
err = bperf_cgroup_bpf__load(skel); if (err) {
pr_err("Failed to load cgroup skeleton\n"); goto out;
}
err = -1;
cgrp_switch = evsel__new(&cgrp_switch_attr); if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
pr_err("Failed to open cgroup switches event\n"); goto out;
}
perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
FD(cgrp_switch, i)); if (IS_ERR(link)) {
pr_err("Failed to attach cgroup program\n");
err = PTR_ERR(link); goto out;
}
}
/* * Update cgrp_idx map from cgroup-id to event index.
*/
cgrp = NULL;
i = 0;
if (read_cgroup_id(cgrp) < 0) {
pr_debug("Failed to get cgroup id for %s\n", cgrp->name);
cgrp->id = 0;
}
map_fd = bpf_map__fd(skel->maps.cgrp_idx);
err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY); if (err < 0) {
pr_err("Failed to update cgroup index map\n"); goto out;
}
i++;
}
/* * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check * whether the kernel support it
*/
prog_fd = bpf_program__fd(skel->progs.trigger_read);
err = bperf_trigger_reading(prog_fd, 0); if (err) {
pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n" "Therefore, --for-each-cgroup might show inaccurate readings\n");
err = 0;
}
if (!bperf_loaded && bperf_load_program(evsel->evlist)) return -1;
bperf_loaded = true; /* just to bypass bpf_counter_skip() */
evsel->follower_skel = (struct bperf_follower_bpf *)skel;
return 0;
}
staticint bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused, int cpu_map_idx __maybe_unused, int fd __maybe_unused)
{ /* nothing to do */ return 0;
}
/* * trigger the leader prog on each cpu, so the cgrp_reading map could get * the latest results.
*/ staticint bperf_cgrp__sync_counters(struct evlist *evlist)
{ struct perf_cpu cpu; int idx; int prog_fd = bpf_program__fd(skel->progs.trigger_read);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.14Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.