#define MAX_LEVELS 10 // max cgroup hierarchy level: arbitrary #define MAX_EVENTS 32 // max events per cgroup: arbitrary
// NOTE: many of map and global data will be modified before loading // from the userspace (perf tool) using the skeleton helpers.
// single set of global perf events to measure struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(int));
__uint(max_entries, 1);
} events SEC(".maps");
// from cgroup id to event index struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(__u64));
__uint(value_size, sizeof(__u32));
__uint(max_entries, 1);
} cgrp_idx SEC(".maps");
// aggregated event values for each cgroup (per-cpu) // will be read from the user-space struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
} cgrp_readings SEC(".maps");
/* new kernel cgroup definition */ struct cgroup___new { int level; struct cgroup *ancestors[];
} __attribute__((preserve_access_index));
/* old kernel cgroup definition */ struct cgroup___old { int level;
u64 ancestor_ids[];
} __attribute__((preserve_access_index));
staticinline __u64 get_cgroup_v1_ancestor_id(struct cgroup *cgrp, int level)
{ /* recast pointer to capture new type for compiler */ struct cgroup___new *cgrp_new = (void *)cgrp;
if (bpf_core_field_exists(cgrp_new->ancestors)) { return BPF_CORE_READ(cgrp_new, ancestors[level], kn, id);
} else { /* recast pointer to capture old type for compiler */ struct cgroup___old *cgrp_old = (void *)cgrp;
for (cnt = 0; i < MAX_LEVELS; i++) {
__u64 cgrp_id;
if (i > level) break;
// convert cgroup-id to a map index
cgrp_id = get_cgroup_v1_ancestor_id(cgrp, i);
elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id); if (!elem) continue;
cgrps[cnt++] = *elem; if (cnt == size) break;
}
return cnt;
}
staticinlineint get_cgroup_v2_idx(__u32 *cgrps, int size)
{ registerint i = 0;
__u32 *elem; int cnt;
for (cnt = 0; i < MAX_LEVELS; i++) {
__u64 cgrp_id = bpf_get_current_ancestor_cgroup_id(i);
if (cgrp_id == 0) break;
// convert cgroup-id to a map index
elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id); if (!elem) continue;
cgrps[cnt++] = *elem; if (cnt == size) break;
}
return cnt;
}
staticint bperf_cgroup_count(void)
{ register __u32 idx = 0; // to have it in a register to pass BPF verifier registerint c = 0; struct bpf_perf_event_value val, delta, *prev_val, *cgrp_val;
__u32 cpu = bpf_get_smp_processor_id();
__u32 cgrp_idx[MAX_LEVELS]; int cgrp_cnt;
__u32 key, cgrp; long err;
for (c = 0; c < MAX_LEVELS; c++) { if (c == cgrp_cnt) break;
cgrp = cgrp_idx[c];
// aggregate the result by cgroup
key = cgrp * num_events + idx;
cgrp_val = bpf_map_lookup_elem(&cgrp_readings, &key); if (cgrp_val) {
cgrp_val->counter += delta.counter;
cgrp_val->enabled += delta.enabled;
cgrp_val->running += delta.running;
} else {
bpf_map_update_elem(&cgrp_readings, &key,
&delta, BPF_ANY);
}
}
}
*prev_val = val;
} return 0;
}
// This will be attached to cgroup-switches event for each cpu
SEC("perf_event") int BPF_PROG(on_cgrp_switch)
{ return bperf_cgroup_count();
}
SEC("raw_tp/sched_switch") int BPF_PROG(trigger_read)
{ return bperf_cgroup_count();
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.11Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.