if (android) { if (replace_android_lib(filename, newfilename))
filename = newfilename;
}
if (vdso) { /* The vdso maps are always on the host and not the * container. Ensure that we don't use setns to look * them up.
*/
nnsi = nsinfo__copy(nsi); if (nnsi) {
nsinfo__put(nsi);
nsinfo__clear_need_setns(nnsi);
nsi = nnsi;
}
pgoff = 0;
dso = machine__findnew_vdso(machine, thread);
} else
dso = machine__findnew_dso_id(machine, filename, id);
if (anon || no_dso) {
map->mapping_type = MAPPING_TYPE__IDENTITY;
/* * Set memory without DSO as loaded. All map__find_* * functions still return NULL, and we avoid the * unnecessary map__load warning.
*/ if (!(prot & PROT_EXEC))
dso__set_loaded(dso);
}
mutex_lock(dso__lock(dso));
dso__set_nsinfo(dso, nsi);
mutex_unlock(dso__lock(dso));
if (!build_id__is_defined(&id->build_id)) { /* * If the mmap event had no build ID, search for an existing dso from the * build ID header by name. Otherwise only the dso loaded at the time of * reading the header will have the build ID set and all future mmaps will * have it missing.
*/ struct dso *header_bid_dso = dsos__find(&machine->dsos, filename, false);
/* * Constructor variant for modules (where we know from /proc/modules where * they are loaded) and for vmlinux, where only after we load all the * symbols we'll know where it starts and ends.
*/ struct map *map__new2(u64 start, struct dso *dso)
{ struct map *result;
RC_STRUCT(map) *map;
map = calloc(1, sizeof(*map) + (dso__kernel(dso) ? sizeof(struct kmap) : 0)); if (ADD_RC_CHK(result, map)) { /* ->end will be filled after we load all the symbols. */
map__init(result, start, /*end=*/0, /*pgoff=*/0, dso, /*prot=*/0, /*flags=*/0);
}
if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) returntrue;
/* * If PERF_RECORD_BPF_EVENT is not included, the dso will not have * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can * guess the type based on name.
*/
name = dso__short_name(dso); return name && (strstr(name, "bpf_prog_") == name);
}
if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE) returntrue;
/* * If PERF_RECORD_KSYMBOL is not included, the dso will not have * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can * guess the type based on name.
*/
name = dso__short_name(dso); return name && is_bpf_image(name);
}
if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map__set_end(map, sym->end);
}
}
#define DSO__DELETED "(deleted)"
int map__load(struct map *map)
{ struct dso *dso = map__dso(map); constchar *name = dso__long_name(dso); int nr;
if (dso__loaded(dso)) return 0;
nr = dso__load(dso, map); if (nr < 0) { if (dso__has_build_id(dso)) { char sbuild_id[SBUILD_ID_SIZE];
build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
pr_debug("%s with build id %s not found", name, sbuild_id);
} else
pr_debug("Failed to open %s", name);
pr_debug(", continuing without symbols\n"); return -1;
} elseif (nr == 0) { #ifdef HAVE_LIBELF_SUPPORT const size_t len = strlen(name); const size_t real_len = len - sizeof(DSO__DELETED);
if (len > sizeof(DSO__DELETED) &&
strcmp(name + real_len + 1, DSO__DELETED) == 0) {
pr_debug("%.*s was updated (is prelink enabled?). " "Restart the long running apps that use it!\n",
(int)real_len, name);
} else {
pr_debug("no symbols found in %s, maybe install a debug package?\n", name);
} #endif return -1;
}
return 0;
}
struct symbol *map__find_symbol(struct map *map, u64 addr)
{ if (map__load(map) < 0) return NULL;
/** * map__rip_2objdump - convert symbol start address to objdump address. * @map: memory map * @rip: symbol start address * * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is * relative to section start. * * Return: Address suitable for passing to "objdump --start-address="
*/
u64 map__rip_2objdump(struct map *map, u64 rip)
{ struct kmap *kmap = __map__kmap(map); conststruct dso *dso = map__dso(map);
/* * vmlinux does not have program headers for PTI entry trampolines and * kcore may not either. However the trampoline object code is on the * main kernel map, so just use that instead.
*/ if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps) { struct machine *machine = maps__machine(kmap->kmaps);
if (machine) { struct map *kernel_map = machine__kernel_map(machine);
if (kernel_map)
map = kernel_map;
}
}
if (!dso__adjust_symbols(dso)) return rip;
if (dso__rel(dso)) return rip - map__pgoff(map);
if (dso__kernel(dso) == DSO_SPACE__USER) return rip + dso__text_offset(dso);
/** * map__objdump_2mem - convert objdump address to a memory address. * @map: memory map * @ip: objdump address * * Closely related to map__rip_2objdump(), this function takes an address from * objdump and converts it to a memory address. Note this assumes that @map * contains the address. To be sure the result is valid, check it forwards * e.g. map__rip_2objdump(map__map_ip(map, map__objdump_2mem(map, ip))) == ip * * Return: Memory address.
*/
u64 map__objdump_2mem(struct map *map, u64 ip)
{ conststruct dso *dso = map__dso(map);
if (!dso__adjust_symbols(dso)) return map__unmap_ip(map, ip);
if (dso__rel(dso)) return map__unmap_ip(map, ip + map__pgoff(map));
if (dso__kernel(dso) == DSO_SPACE__USER) return map__unmap_ip(map, ip - dso__text_offset(dso));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.