struct get_stack_trace_t { int pid; int kern_stack_size; int user_stack_size; int user_stack_buildid_size;
__u64 kern_stack[MAX_STACK_RAWTP];
__u64 user_stack[MAX_STACK_RAWTP]; struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
};
staticvoid get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
{ bool good_kern_stack = false, good_user_stack = false; constchar *nonjit_func = "___bpf_prog_run"; /* perfbuf-submitted data is 4-byte aligned, but we need 8-byte * alignment, so copy data into a local variable, for simplicity
*/ struct get_stack_trace_t e; int i, num_stack; struct ksym *ks;
if (size < sizeof(struct get_stack_trace_t)) {
__u64 *raw_data = data; bool found = false;
num_stack = size / sizeof(__u64); /* If jit is enabled, we do not have a good way to * verify the sanity of the kernel stack. So we * just assume it is good if the stack is not empty. * This could be improved in the future.
*/ if (env.jit_enabled) {
found = num_stack > 0;
} else { for (i = 0; i < num_stack; i++) {
ks = ksym_search(raw_data[i]); if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
found = true; break;
}
}
} if (found) {
good_kern_stack = true;
good_user_stack = true;
}
} else {
num_stack = e.kern_stack_size / sizeof(__u64); if (env.jit_enabled) {
good_kern_stack = num_stack > 0;
} else { for (i = 0; i < num_stack; i++) {
ks = ksym_search(e.kern_stack[i]); if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
good_kern_stack = true; break;
}
}
} if (e.user_stack_size > 0 && e.user_stack_buildid_size > 0)
good_user_stack = true;
}
if (!good_kern_stack)
CHECK(!good_kern_stack, "kern_stack", "corrupted kernel stack\n"); if (!good_user_stack)
CHECK(!good_user_stack, "user_stack", "corrupted user stack\n");
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.