long ksym_get_addr_local(struct ksyms *ksyms, constchar *name)
{ int i;
for (i = 0; i < ksyms->sym_cnt; i++) { if (strcmp(ksyms->syms[i].name, name) == 0) return ksyms->syms[i].addr;
}
return 0;
}
long ksym_get_addr(constchar *name)
{ if (!ksyms) return 0; return ksym_get_addr_local(ksyms, name);
}
/* open kallsyms and read symbol addresses on the fly. Without caching all symbols, * this is faster than load + find.
*/ int kallsyms_find(constchar *sym, unsignedlonglong *addr)
{ char type, name[500], *match; unsignedlonglong value; int err = 0;
FILE *f;
f = fopen("/proc/kallsyms", "r"); if (!f) return -EINVAL;
while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) { /* If CONFIG_LTO_CLANG_THIN is enabled, static variable/function * symbols could be promoted to global due to cross-file inlining. * For such cases, clang compiler will add .llvm.<hash> suffix * to those symbols to avoid potential naming conflict. * Let us ignore .llvm.<hash> suffix during symbol comparison.
*/ if (type == 'd') {
match = strstr(name, ".llvm."); if (match)
*match = '\0';
} if (strcmp(name, sym) == 0) {
*addr = value; goto out;
}
}
err = -ENOENT;
out:
fclose(f); return err;
}
#ifdef PROCMAP_QUERY int env_verbosity __weak = 0;
/* * A PPC64 ABIv2 function may have a local and a global entry * point. We need to use the local entry point when patching * functions, so identify and step over the global entry point * sequence. * * The global entry point sequence is always of the form: * * addis r2,r12,XXXX * addi r2,r2,XXXX * * A linker optimisation may convert the addis to lis: * * lis r2,XXXX * addi r2,r2,XXXX
*/
{ const __u32 *insn = (const __u32 *)(uintptr_t)addr;
/* Reads binary from *path* file and returns it in the *build_id* buffer * with *size* which is expected to be at least BPF_BUILD_ID_SIZE bytes. * Returns size of build id on success. On error the error value is * returned.
*/ int read_build_id(constchar *path, char *build_id, size_t size)
{ int fd, err = -EINVAL;
Elf *elf = NULL;
GElf_Ehdr ehdr;
size_t max, i;
staticbool is_invalid_entry(char *buf, bool kernel)
{ if (kernel && strchr(buf, '[')) returntrue; if (!kernel && !strchr(buf, '[')) returntrue; returnfalse;
}
staticbool skip_entry(char *name)
{ /* * We attach to almost all kernel functions and some of them * will cause 'suspicious RCU usage' when fprobe is attached * to them. Filter out the current culprits - arch_cpu_idle * default_idle and rcu_* functions.
*/ if (!strcmp(name, "arch_cpu_idle")) returntrue; if (!strcmp(name, "default_idle")) returntrue; if (!strncmp(name, "rcu_", 4)) returntrue; if (!strcmp(name, "bpf_dispatcher_xdp_func")) returntrue; if (!strncmp(name, "__ftrace_invalid_address__", sizeof("__ftrace_invalid_address__") - 1)) returntrue; returnfalse;
}
/* Do comparison by ignoring '.llvm.<hash>' suffixes. */ staticint compare_name(constchar *name1, constchar *name2)
{ constchar *res1, *res2; int len1, len2;
ksyms = load_kallsyms_custom_local(load_kallsyms_compare); if (!ksyms) return -EINVAL;
/* * The available_filter_functions contains many duplicates, * but other than that all symbols are usable to trace. * Filtering out duplicates by using hashmap__add, which won't * add existing entry.
*/
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
f = fopen("/sys/kernel/tracing/available_filter_functions", "r"); else
f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r"); else
f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
if (!f) return -ENOENT;
/* In my local setup, the number of entries is 50k+ so Let us initially * allocate space to hold 64k entries. If 64k is not enough, incrementally * increase 1k each time.
*/
max_cnt = 65536;
inc_cnt = 1024;
addrs = malloc(max_cnt * sizeof(long)); if (addrs == NULL) {
err = -ENOMEM; goto error;
}
while (fgets(buf, sizeof(buf), f)) { if (is_invalid_entry(buf, kernel)) continue;
free(name); if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2) continue; if (skip_entry(name)) continue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.