int __weak arch__choose_best_symbol(struct symbol *syma, struct symbol *symb __maybe_unused)
{ /* Avoid "SyS" kernel syscall aliases */ if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3)) return SYMBOL_B; if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10)) return SYMBOL_B;
return SYMBOL_A;
}
staticint choose_best_symbol(struct symbol *syma, struct symbol *symb)
{
s64 a;
s64 b;
size_t na, nb;
/* Prefer a symbol with non zero length */
a = syma->end - syma->start;
b = symb->end - symb->start; if ((b == 0) && (a > 0)) return SYMBOL_A; elseif ((a == 0) && (b > 0)) return SYMBOL_B;
if (syma->type != symb->type) { if (syma->type == STT_NOTYPE) return SYMBOL_B; if (symb->type == STT_NOTYPE) return SYMBOL_A;
}
/* Prefer a non weak symbol over a weak one */
a = syma->binding == STB_WEAK;
b = symb->binding == STB_WEAK; if (b && !a) return SYMBOL_A; if (a && !b) return SYMBOL_B;
/* Prefer a global symbol over a non global one */
a = syma->binding == STB_GLOBAL;
b = symb->binding == STB_GLOBAL; if (a && !b) return SYMBOL_A; if (b && !a) return SYMBOL_B;
/* Prefer a symbol with less underscores */
a = prefix_underscores_count(syma->name);
b = prefix_underscores_count(symb->name); if (b > a) return SYMBOL_A; elseif (a > b) return SYMBOL_B;
/* Choose the symbol with the longest name */
na = strlen(syma->name);
nb = strlen(symb->name); if (na > nb) return SYMBOL_A; elseif (na < nb) return SYMBOL_B;
/* Update zero-sized symbols using the address of the next symbol */ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
{ struct rb_node *nd, *prevnd = rb_first_cached(symbols); struct symbol *curr, *prev;
/* * On some architecture kernel text segment start is located at * some low memory address, while modules are located at high * memory addresses (or vice versa). The gap between end of * kernel text segment and beginning of first module's text * segment is very big. Therefore do not fill this gap and do * not assign it to the kernel dso map (kallsyms). * * Also BPF code can be allocated separately from text segments * and modules. So the last entry in a module should not fill * the gap too. * * In kallsyms, it determines module symbols using '[' character * like in: * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
*/ if (prev->end == prev->start) { constchar *prev_mod; constchar *curr_mod;
if (!is_kallsyms) {
prev->end = curr->start; continue;
}
/* Last kernel/module symbol mapped to end of page */ if (!prev_mod != !curr_mod)
prev->end = roundup(prev->end + 4096, 4096); /* Last symbol in the previous module */ elseif (prev_mod && strcmp(prev_mod, curr_mod))
prev->end = roundup(prev->end + 4096, 4096); else
prev->end = curr->start;
if (kernel) { constchar *name = sym->name; /* * ppc64 uses function descriptors and appends a '.' to the * start of every instruction address. Remove it.
*/ if (name[0] == '.')
name++;
sym->idle = symbol__is_idle(name);
}
while (*p != NULL) {
parent = *p;
s = rb_entry(parent, struct symbol, rb_node); if (ip < s->start)
p = &(*p)->rb_left; else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&sym->rb_node, parent, p);
rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
}
if (n) return rb_entry(n, struct symbol, rb_node);
return NULL;
}
staticstruct symbol *symbols__next(struct symbol *sym)
{ struct rb_node *n = rb_next(&sym->rb_node);
if (n) return rb_entry(n, struct symbol, rb_node);
return NULL;
}
staticint symbols__sort_name_cmp(constvoid *vlhs, constvoid *vrhs)
{ conststruct symbol *lhs = *((conststruct symbol **)vlhs); conststruct symbol *rhs = *((conststruct symbol **)vrhs);
return strcmp(lhs->name, rhs->name);
}
staticstruct symbol **symbols__sort_by_name(struct rb_root_cached *source, size_t *len)
{ struct rb_node *nd; struct symbol **result;
size_t i = 0, size = 0;
for (nd = rb_first_cached(source); nd; nd = rb_next(nd))
size++;
result = malloc(sizeof(*result) * size); if (!result) return NULL;
for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
if (cmp > 0)
upper = i; elseif (cmp < 0)
lower = i + 1; else { if (found_idx)
*found_idx = i;
s = symbols[i]; break;
}
} if (s && includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) { /* return first symbol that has same name (if any) */ for (; i > 0; i--) { struct symbol *tmp = symbols[i - 1];
if (!arch__compare_symbol_names(tmp->name, s->name)) { if (found_idx)
*found_idx = i - 1;
s = tmp;
} else break;
}
}
assert(!found_idx || !s || s == symbols[*found_idx]); return s;
}
/* Ignore local symbols for ARM modules */ if (name[0] == '$') return 0;
/* * module symbols are not sorted so we add all * symbols, setting length to 0, and rely on * symbols__fixup_end() to fix it up.
*/
sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name); if (sym == NULL) return -ENOMEM; /* * We will pass the symbols to the filter later, in * map__split_kallsyms, when we have split the maps per module
*/
__symbols__insert(root, sym, !strchr(name, '['));
return 0;
}
/* * Loads the function entries in /proc/kallsyms into kernel_map->dso, * so that we can in the next step set the symbol ->end address and then * call kernel_maps__split_kallsyms.
*/ staticint dso__load_all_kallsyms(struct dso *dso, constchar *filename)
{ return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
}
/* Symbols have been adjusted */
dso__set_adjust_symbols(dso, true);
return count;
}
/* * Split the symbols into maps, making sure there are no overlaps, i.e. the * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have.
*/ staticint maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, struct map *initial_map)
{ struct machine *machine; struct map *curr_map = map__get(initial_map); struct symbol *pos; int count = 0, moved = 0; struct rb_root_cached *root = dso__symbols(dso); struct rb_node *next = rb_first_cached(root); int kernel_range = 0; bool x86_64;
if (!kmaps) return -1;
machine = maps__machine(kmaps);
x86_64 = machine__is(machine, "x86_64");
while (next) { char *module;
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
module = strchr(pos->name, '\t'); if (module) { struct dso *curr_map_dso;
if (!symbol_conf.use_modules) goto discard_symbol;
*module++ = '\0';
curr_map_dso = map__dso(curr_map); if (strcmp(dso__short_name(curr_map_dso), module)) { if (!RC_CHK_EQUAL(curr_map, initial_map) &&
dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST &&
machine__is_default_guest(machine)) { /* * We assume all symbols of a module are * continuous in * kallsyms, so curr_map * points to a module and all its * symbols are in its kmap. Mark it as * loaded.
*/
dso__set_loaded(curr_map_dso);
}
map__zput(curr_map);
curr_map = maps__find_by_name(kmaps, module); if (curr_map == NULL) {
pr_debug("%s/proc/{kallsyms,modules} " "inconsistency while looking " "for \"%s\" module!\n",
machine->root_dir, module);
curr_map = map__get(initial_map); goto discard_symbol;
}
curr_map_dso = map__dso(curr_map); if (dso__loaded(curr_map_dso) &&
!machine__is_default_guest(machine)) goto discard_symbol;
} /* * So that we look just like we get from .ko files, * i.e. not prelinked, relative to initial_map->start.
*/
pos->start = map__map_ip(curr_map, pos->start);
pos->end = map__map_ip(curr_map, pos->end);
} elseif (x86_64 && is_entry_trampoline(pos->name)) { /* * These symbols are not needed anymore since the * trampoline maps refer to the text section and it's * symbols instead. Avoid having to deal with * relocations, and the assumption that the first symbol * is the start of kernel text, by simply removing the * symbols at this point.
*/ goto discard_symbol;
} elseif (!RC_CHK_EQUAL(curr_map, initial_map)) { char dso_name[PATH_MAX]; struct dso *ndso;
if (delta) { /* Kernel was relocated at boot time */
pos->start -= delta;
pos->end -= delta;
}
dso = map__dso(old_map); /* Module must be in memory at the same address */
mi = find_module(dso__short_name(dso), modules); if (!mi || mi->start != map__start(old_map)) return -EINVAL;
/* * If kallsyms is referenced by name then we look for filename in the same * directory.
*/ staticbool filename_from_kallsyms_filename(char *filename, constchar *base_name, constchar *kallsyms_filename)
{ char *name;
strcpy(filename, kallsyms_filename);
name = strrchr(filename, '/'); if (!name) returnfalse;
name += 1;
if (!strcmp(name, "kallsyms")) {
strcpy(name, base_name); returntrue;
}
/* * We need to preserve eBPF maps even if they are covered by kcore, * because we need to access eBPF dso for source data.
*/ return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map);
}
/* Find the kernel map using the '_stext' symbol */ if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
u64 replacement_size = 0; struct map_list_node *new_node;
if (!(stext >= map__start(new_map) && stext < map__end(new_map))) continue;
/* * On some architectures, ARM64 for example, the kernel * text can get allocated inside of the vmalloc segment. * Select the smallest matching segment, in case stext * falls within more than one in the list.
*/ if (!replacement_map || new_size < replacement_size) {
replacement_map = new_map;
replacement_size = new_size;
}
}
}
if (!replacement_map)
replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
/* * Update addresses of vmlinux map. Re-insert it to ensure maps are * correctly ordered. Do this before using maps__merge_in() for the * remaining maps so vmlinux gets split if necessary.
*/
map_ref = map__get(map);
maps__remove(kmaps, map_ref);
/* skip if replacement_map, already inserted above */ if (!RC_CHK_EQUAL(new_map, replacement_map)) { /* * Merge kcore map into existing maps, * and ensure that current maps (eBPF) * stay intact.
*/ if (maps__merge_in(kmaps, new_map)) {
err = -EINVAL; goto out_err;
}
}
map__zput(new_node->map);
free(new_node);
}
if (machine__is(machine, "x86_64")) {
u64 addr;
/* * If one of the corresponding symbols is there, assume the * entry trampoline maps are too.
*/ if (!kallsyms__get_function_start(kallsyms_filename,
ENTRY_TRAMPOLINE_NAME,
&addr))
machine->trampolines_mapped = true;
}
/* * Set the data type and long name so that kcore can be read via * dso__data_read_addr().
*/ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KCORE); else
dso__set_binary_type(dso, DSO_BINARY_TYPE__KCORE);
dso__set_long_name(dso, strdup(kcore_filename), true);
close(fd);
if (map__prot(map) & PROT_EXEC)
pr_debug("Using %s for kernel object code\n", kcore_filename); else
pr_debug("Using %s for kernel data\n", kcore_filename);
return 0;
out_err: while (!list_empty(&md.maps)) { struct map_list_node *list_node;
/* * If the kernel is relocated at boot time, kallsyms won't match. Compute the * delta based on the relocation reference symbol.
*/ staticint kallsyms__delta(struct kmap *kmap, constchar *filename, u64 *delta)
{
u64 addr;
if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) return 0;
if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr)) return -1;
staticbool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, enum dso_binary_type type)
{ switch (type) { case DSO_BINARY_TYPE__JAVA_JIT: case DSO_BINARY_TYPE__DEBUGLINK: case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: case DSO_BINARY_TYPE__GNU_DEBUGDATA: return !kmod && dso__kernel(dso) == DSO_SPACE__USER;
case DSO_BINARY_TYPE__KALLSYMS: case DSO_BINARY_TYPE__VMLINUX: case DSO_BINARY_TYPE__KCORE: return dso__kernel(dso) == DSO_SPACE__KERNEL;
case DSO_BINARY_TYPE__GUEST_KALLSYMS: case DSO_BINARY_TYPE__GUEST_VMLINUX: case DSO_BINARY_TYPE__GUEST_KCORE: return dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST;
case DSO_BINARY_TYPE__GUEST_KMODULE: case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: /* * kernel modules know their symtab type - it's set when * creating a module dso in machine__addnew_module_map().
*/ return kmod && dso__symtab_type(dso) == type;
case DSO_BINARY_TYPE__BUILD_ID_CACHE: case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: returntrue;
case DSO_BINARY_TYPE__BPF_PROG_INFO: case DSO_BINARY_TYPE__BPF_IMAGE: case DSO_BINARY_TYPE__OOL: case DSO_BINARY_TYPE__NOT_FOUND: default: returnfalse;
}
}
/* Checks for the existence of the perf-<pid>.map file in two different * locations. First, if the process is a separate mount namespace, check in * that namespace using the pid of the innermost pid namespace. If's not in a * namespace, or the file can't be found there, try in the mount namespace of * the tracing process using our view of its pid.
*/ staticint dso__find_perf_map(char *filebuf, size_t bufsz, struct nsinfo **nsip)
{ struct nscookie nsc; struct nsinfo *nsi; struct nsinfo *nnsi; int rc = -1;
if (perfmap) {
ret = dso__load_perf_map(map_path, dso);
dso__set_symtab_type(dso, ret > 0
? DSO_BINARY_TYPE__JAVA_JIT
: DSO_BINARY_TYPE__NOT_FOUND); goto out;
}
if (machine)
root_dir = machine->root_dir;
name = malloc(PATH_MAX); if (!name) goto out;
/* * Read the build id if possible. This is required for * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work. Don't block in case path * isn't for a regular file.
*/ if (!dso__has_build_id(dso)) { struct build_id bid = { .size = 0, };
if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) { if (vmlinux_allocated)
free((char *) vmlinux); return -1;
}
/* * dso__load_sym() may copy 'dso' which will result in the copies having * an incorrect long name unless we set it here first.
*/
dso__set_long_name(dso, vmlinux, vmlinux_allocated); if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_VMLINUX); else
dso__set_binary_type(dso, DSO_BINARY_TYPE__VMLINUX);
/* * Use open(O_RDONLY) to check readability directly instead of access(R_OK) * since access(R_OK) only checks with real UID/GID but open() use effective * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
*/ staticbool filename__readable(constchar *file)
{ int fd = open(file, O_RDONLY); if (fd < 0) returnfalse;
close(fd); returntrue;
}
if (!dso__has_build_id(dso)) { /* * Last resort, if we don't have a build-id and couldn't find * any vmlinux file, try the running kernel kallsyms table.
*/ goto proc_kallsyms;
}
if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0)
is_host = dso__build_id_equal(dso, &bid);
/* Try a fast path for /proc/kallsyms if possible */ if (is_host) { /* * Do not check the build-id cache, unless we know we cannot use * /proc/kcore or module maps don't match to /proc/kallsyms. * To check readability of /proc/kcore, do not use access(R_OK) * since /proc/kcore requires CAP_SYS_RAWIO to read and access * can't check it.
*/ if (filename__readable("/proc/kcore") &&
!validate_kcore_addresses("/proc/kallsyms", map)) goto proc_kallsyms;
}
/* Find kallsyms in build-id cache with kcore */
scnprintf(path, sizeof(path), "%s/%s/%s",
buildid_dir, DSO__NAME_KCORE, sbuild_id);
if (!find_matching_kcore(map, path, sizeof(path))) return strdup(path);
/* Use current /proc/kallsyms if possible */ if (is_host) {
proc_kallsyms: return strdup("/proc/kallsyms");
}
/* Finally, find a cache of kallsyms */ if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
pr_err("No kallsyms or vmlinux with build-id %s was found\n",
sbuild_id); return NULL;
}
/* * Step 1: if the user specified a kallsyms or vmlinux filename, use * it and only it, reporting errors to the user if it cannot be used. * * For instance, try to analyse an ARM perf.data file _without_ a * build-id, or if the user specifies the wrong path to the right * vmlinux file, obviously we can't fallback to another vmlinux (a * x86_86 one, on the machine where analysis is being performed, say), * or worse, /proc/kallsyms. * * If the specified file _has_ a build-id and there is a build-id * section in the perf.data file, we will still do the expected * validation in dso__load_vmlinux and will bail out if they don't * match.
*/ if (symbol_conf.kallsyms_name != NULL) {
kallsyms_filename = symbol_conf.kallsyms_name; goto do_kallsyms;
}
/* * Before checking on common vmlinux locations, check if it's * stored as standard build id binary (not kallsyms) under * .debug cache.
*/ if (!symbol_conf.ignore_vmlinux_buildid)
filename = __dso__build_id_filename(dso, NULL, 0, false, false); if (filename != NULL) {
err = dso__load_vmlinux(dso, map, filename, true); if (err > 0) return err;
}
if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
err = dso__load_vmlinux_path(dso, map); if (err > 0) return err;
}
/* do not try local files if a symfs was given */ if (symbol_conf.symfs[0] != 0) return -1;
kallsyms_allocated_filename = dso__find_kallsyms(dso, map); if (!kallsyms_allocated_filename) return -1;
kallsyms_filename = kallsyms_allocated_filename;
do_kallsyms:
err = dso__load_kallsyms(dso, kallsyms_filename, map); if (err > 0)
pr_debug("Using %s for symbols\n", kallsyms_filename);
free(kallsyms_allocated_filename);
if (machine->kallsyms_filename) {
kallsyms_filename = machine->kallsyms_filename;
} elseif (machine__is_default_guest(machine)) { /* * if the user specified a vmlinux filename, use it and only * it, reporting errors to the user if it cannot be used. * Or use file guest_kallsyms inputted by user on commandline
*/ if (symbol_conf.default_guest_vmlinux_name != NULL) {
err = dso__load_vmlinux(dso, map,
symbol_conf.default_guest_vmlinux_name, false); return err;
}
for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++) if (vmlinux_path__add(vmlinux_paths[i]) < 0) goto out_fail;
/* only try kernel version if no symfs was given */ if (symbol_conf.symfs[0] != 0) return 0;
if (env) {
kernel_version = env->os_release;
} else { if (uname(&uts) < 0) goto out_fail;
kernel_version = uts.release;
}
for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version); if (vmlinux_path__add(bf) < 0) goto out_fail;
}
return 0;
out_fail:
vmlinux_path__exit(); return -1;
}
int setup_list(struct strlist **list, constchar *list_str, constchar *list_name)
{ if (list_str == NULL) return 0;
if (fgets(line, sizeof(line), fp) != NULL)
value = cap_syslog ? (atoi(line) >= 2) : (atoi(line) != 0);
fclose(fp);
}
/* Per kernel/kallsyms.c: * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
*/ if (perf_event_paranoid() > 1 && !cap_syslog)
value = true;
return value;
}
int symbol__annotation_init(void)
{ if (symbol_conf.init_annotation) return 0;
if (symbol_conf.initialized) {
pr_err("Annotation needs to be init before symbol__init()\n"); return -1;
}
if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) return -1;
if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
pr_err("'.' is the only non valid --field-separator argument\n"); return -1;
}
if (setup_parallelism_bitmap()) return -1;
if (setup_list(&symbol_conf.dso_list,
symbol_conf.dso_list_str, "dso") < 0) return -1;
if (setup_list(&symbol_conf.comm_list,
symbol_conf.comm_list_str, "comm") < 0) goto out_free_dso_list;
if (setup_intlist(&symbol_conf.pid_list,
symbol_conf.pid_list_str, "pid") < 0) goto out_free_comm_list;
if (setup_intlist(&symbol_conf.tid_list,
symbol_conf.tid_list_str, "tid") < 0) goto out_free_pid_list;
if (setup_list(&symbol_conf.sym_list,
symbol_conf.sym_list_str, "symbol") < 0) goto out_free_tid_list;
if (symbol_conf.sym_list &&
setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0) goto out_free_sym_list;
if (setup_list(&symbol_conf.bt_stop_list,
symbol_conf.bt_stop_list_str, "symbol") < 0) goto out_free_sym_list;
/* * A path to symbols of "/" is identical to "" * reset here for simplicity.
*/
symfs = realpath(symbol_conf.symfs, NULL); if (symfs == NULL)
symfs = symbol_conf.symfs; if (strcmp(symfs, "/") == 0)
symbol_conf.symfs = ""; if (symfs != symbol_conf.symfs)
free((void *)symfs);
int symbol__config_symfs(conststruct option *opt __maybe_unused, constchar *dir, int unset __maybe_unused)
{ char *bf = NULL; int ret;
symbol_conf.symfs = strdup(dir); if (symbol_conf.symfs == NULL) return -ENOMEM;
/* skip the locally configured cache if a symfs is given, and * config buildid dir to symfs/.debug
*/
ret = asprintf(&bf, "%s/%s", dir, ".debug"); if (ret < 0) return -ENOMEM;
set_buildid_dir(bf);
free(bf); return 0;
}
/* * Checks that user supplied symbol kernel files are accessible because * the default mechanism for accessing elf files fails silently. i.e. if * debug syms for a build ID aren't found perf carries on normally. When * they are user supplied we should assume that the user doesn't want to * silently fail.
*/ int symbol__validate_sym_arguments(void)
{ if (symbol_conf.vmlinux_name &&
access(symbol_conf.vmlinux_name, R_OK)) {
pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name); return -EINVAL;
} if (symbol_conf.kallsyms_name &&
access(symbol_conf.kallsyms_name, R_OK)) {
pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name); return -EINVAL;
} return 0;
}
/* * We need to figure out if the object was created from C++ sources * DWARF DW_compile_unit has this, but we don't always have access * to it...
*/ if (!want_demangle((dso && dso__kernel(dso)) || kmodule)) return demangled;
rust_demangle_demangle(elf_name, &rust_demangle); if (rust_demangle_is_known(&rust_demangle)) { /* A rust mangled name. */ if (rust_demangle.mangled_len == 0) return demangled;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.